repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
cole945/qemu | hw/char/serial-isa.c | 87 | 4291 | /*
* QEMU 16550A UART emulation
*
* Copyright (c) 2003-2004 Fabrice Bellard
* Copyright (c) 2008 Citrix Systems, Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include "hw/char/serial.h"
#include "hw/isa/isa.h"
#define ISA_SERIAL(obj) OBJECT_CHECK(ISASerialState, (obj), TYPE_ISA_SERIAL)
typedef struct ISASerialState {
ISADevice parent_obj;
uint32_t index;
uint32_t iobase;
uint32_t isairq;
SerialState state;
} ISASerialState;
static const int isa_serial_io[MAX_SERIAL_PORTS] = {
0x3f8, 0x2f8, 0x3e8, 0x2e8
};
static const int isa_serial_irq[MAX_SERIAL_PORTS] = {
4, 3, 4, 3
};
static void serial_isa_realizefn(DeviceState *dev, Error **errp)
{
static int index;
ISADevice *isadev = ISA_DEVICE(dev);
ISASerialState *isa = ISA_SERIAL(dev);
SerialState *s = &isa->state;
if (isa->index == -1) {
isa->index = index;
}
if (isa->index >= MAX_SERIAL_PORTS) {
error_setg(errp, "Max. supported number of ISA serial ports is %d.",
MAX_SERIAL_PORTS);
return;
}
if (isa->iobase == -1) {
isa->iobase = isa_serial_io[isa->index];
}
if (isa->isairq == -1) {
isa->isairq = isa_serial_irq[isa->index];
}
index++;
s->baudbase = 115200;
isa_init_irq(isadev, &s->irq, isa->isairq);
serial_realize_core(s, errp);
qdev_set_legacy_instance_id(dev, isa->iobase, 3);
memory_region_init_io(&s->io, OBJECT(isa), &serial_io_ops, s, "serial", 8);
isa_register_ioport(isadev, &s->io, isa->iobase);
}
static const VMStateDescription vmstate_isa_serial = {
.name = "serial",
.version_id = 3,
.minimum_version_id = 2,
.fields = (VMStateField[]) {
VMSTATE_STRUCT(state, ISASerialState, 0, vmstate_serial, SerialState),
VMSTATE_END_OF_LIST()
}
};
static Property serial_isa_properties[] = {
DEFINE_PROP_UINT32("index", ISASerialState, index, -1),
DEFINE_PROP_UINT32("iobase", ISASerialState, iobase, -1),
DEFINE_PROP_UINT32("irq", ISASerialState, isairq, -1),
DEFINE_PROP_CHR("chardev", ISASerialState, state.chr),
DEFINE_PROP_UINT32("wakeup", ISASerialState, state.wakeup, 0),
DEFINE_PROP_END_OF_LIST(),
};
static void serial_isa_class_initfn(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = serial_isa_realizefn;
dc->vmsd = &vmstate_isa_serial;
dc->props = serial_isa_properties;
set_bit(DEVICE_CATEGORY_INPUT, dc->categories);
}
static const TypeInfo serial_isa_info = {
.name = TYPE_ISA_SERIAL,
.parent = TYPE_ISA_DEVICE,
.instance_size = sizeof(ISASerialState),
.class_init = serial_isa_class_initfn,
};
static void serial_register_types(void)
{
type_register_static(&serial_isa_info);
}
type_init(serial_register_types)
bool serial_isa_init(ISABus *bus, int index, CharDriverState *chr)
{
DeviceState *dev;
ISADevice *isadev;
isadev = isa_try_create(bus, TYPE_ISA_SERIAL);
if (!isadev) {
return false;
}
dev = DEVICE(isadev);
qdev_prop_set_uint32(dev, "index", index);
qdev_prop_set_chr(dev, "chardev", chr);
if (qdev_init(dev) < 0) {
return false;
}
return true;
}
| gpl-2.0 |
InES-HPMM/linux-l4t | drivers/edp/sysedp_sysfs.c | 87 | 9645 | /*
* Copyright (c) 2013-2014, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/sysedp.h>
#include <linux/err.h>
#include <trace/events/sysedp.h>
#include "sysedp_internal.h"
struct kobject sysedp_kobj;
static struct kset *consumers_kset;
struct sysedp_consumer_attribute {
struct attribute attr;
ssize_t (*show)(struct sysedp_consumer *c, char *buf);
ssize_t (*store)(struct sysedp_consumer *c,
const char *buf, size_t count);
};
static ssize_t states_show(struct sysedp_consumer *c, char *s)
{
unsigned int i;
int cnt = 0;
const int sz = sizeof(*c->states) * 3 + 2;
for (i = 0; i < c->num_states && (cnt + sz) < PAGE_SIZE; i++)
cnt += sprintf(s + cnt, "%s%u", i ? " " : "", c->states[i]);
cnt += sprintf(s + cnt, "\n");
return cnt;
}
static ssize_t ocpeaks_show(struct sysedp_consumer *c, char *s)
{
unsigned int i;
int cnt = 0;
unsigned int *p;
const int sz = sizeof(*p) * 3 + 2;
p = c->ocpeaks ? c->ocpeaks : c->states;
for (i = 0; i < c->num_states && (cnt + sz) < PAGE_SIZE; i++)
cnt += sprintf(s + cnt, "%s%u", i ? " " : "", p[i]);
cnt += sprintf(s + cnt, "\n");
return cnt;
}
static ssize_t current_show(struct sysedp_consumer *c, char *s)
{
return sprintf(s, "%u\n", c->states[c->state]);
}
static ssize_t state_show(struct sysedp_consumer *c, char *s)
{
return sprintf(s, "%u\n", c->state);
}
static ssize_t state_store(struct sysedp_consumer *c, const char *s,
size_t count)
{
unsigned int new_state;
if (sscanf(s, "%u", &new_state) != 1)
return -EINVAL;
sysedp_set_state(c, new_state);
return count;
}
static struct sysedp_consumer_attribute attr_current = {
.attr = { .name = "current", .mode = 0444 },
.show = current_show
};
static struct sysedp_consumer_attribute attr_state = __ATTR(state, 0660,
state_show,
state_store);
static struct sysedp_consumer_attribute attr_states = __ATTR_RO(states);
static struct sysedp_consumer_attribute attr_ocpeaks = __ATTR_RO(ocpeaks);
static struct attribute *consumer_attrs[] = {
&attr_current.attr,
&attr_state.attr,
&attr_states.attr,
&attr_ocpeaks.attr,
NULL
};
static struct sysedp_consumer *to_consumer(struct kobject *kobj)
{
return container_of(kobj, struct sysedp_consumer, kobj);
}
static ssize_t consumer_attr_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
ssize_t r = -EINVAL;
struct sysedp_consumer *c;
struct sysedp_consumer_attribute *cattr;
c = to_consumer(kobj);
cattr = container_of(attr, struct sysedp_consumer_attribute, attr);
if (c && cattr) {
if (cattr->show)
r = cattr->show(c, buf);
}
return r;
}
static ssize_t consumer_attr_store(struct kobject *kobj,
struct attribute *attr, const char *buf, size_t count)
{
ssize_t r = -EINVAL;
struct sysedp_consumer *c;
struct sysedp_consumer_attribute *cattr;
c = to_consumer(kobj);
cattr = container_of(attr, struct sysedp_consumer_attribute, attr);
if (c && cattr) {
if (cattr->store)
r = cattr->store(c, buf, count);
}
return r;
}
static const struct sysfs_ops consumer_sysfs_ops = {
.show = consumer_attr_show,
.store = consumer_attr_store
};
static struct kobj_type ktype_consumer = {
.sysfs_ops = &consumer_sysfs_ops,
.default_attrs = consumer_attrs
};
int sysedp_consumer_add_kobject(struct sysedp_consumer *consumer)
{
int ret;
consumer->kobj.kset = consumers_kset;
kobject_init(&consumer->kobj, &ktype_consumer);
ret = kobject_add(&consumer->kobj, NULL, consumer->name);
if (ret) {
pr_err("%s: failed to add sysfs consumer entry\n",
consumer->name);
return ret;
}
ret = kobject_uevent(&consumer->kobj, KOBJ_ADD);
if (ret) {
pr_err("%s: failed to send uevent\n",
consumer->name);
kobject_put(&consumer->kobj);
return ret;
}
return 0;
}
void sysedp_consumer_remove_kobject(struct sysedp_consumer *consumer)
{
kobject_put(&consumer->kobj);
}
struct sysedp_attribute {
struct attribute attr;
ssize_t (*show)(char *buf);
ssize_t (*store)(const char *buf, size_t count);
};
static unsigned int *get_tokenized_data(const char *buf,
unsigned int *num_tokens)
{
const char *cp;
int i;
unsigned int ntokens = 1;
unsigned int *tokenized_data;
int err = -EINVAL;
if (!buf || *buf == 0)
goto err;
cp = buf;
while ((cp = strpbrk(cp + 1, ",;")))
if (*cp == ';')
break;
else
ntokens++;
tokenized_data = kmalloc(ntokens * sizeof(unsigned int),
GFP_KERNEL);
if (!tokenized_data) {
err = -ENOMEM;
goto err;
}
cp = buf;
i = 0;
while (i < ntokens) {
if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
goto err_kfree;
cp = strpbrk(cp, ",;");
if (!cp || *cp == ';')
break;
cp++;
}
if (i != ntokens)
goto err_kfree;
*num_tokens = ntokens;
return tokenized_data;
err_kfree:
kfree(tokenized_data);
err:
return ERR_PTR(err);
}
static ssize_t consumer_register_store(const char *s, size_t count)
{
size_t name_len;
unsigned int *states = 0;
unsigned int *ocpeaks = 0;
unsigned int num_states;
unsigned int num_ocpeaks;
struct sysedp_consumer *consumer = 0;
const char *s2;
int err;
name_len = strcspn(s, " \n");
if (name_len > SYSEDP_NAME_LEN-1)
return -EINVAL;
states = get_tokenized_data(s + name_len, &num_states);
if (IS_ERR_OR_NULL(states))
return PTR_ERR(states);
/* Parse for optional 2nd table (peak values) */
s2 = strpbrk(s + name_len, ";");
if (s2) {
ocpeaks = get_tokenized_data(s2 + 1, &num_ocpeaks);
if (IS_ERR_OR_NULL(ocpeaks)) {
err = PTR_ERR(ocpeaks);
ocpeaks = 0;
goto err_kfree;
}
if (num_states != num_ocpeaks) {
err = -EINVAL;
goto err_kfree;
}
}
consumer = kzalloc(sizeof(*consumer), GFP_KERNEL);
if (IS_ERR_OR_NULL(consumer)) {
err = PTR_ERR(consumer);
consumer = 0;
goto err_kfree;
}
memcpy(consumer->name, s, name_len);
consumer->name[name_len] = 0;
consumer->states = states;
consumer->ocpeaks = ocpeaks;
consumer->num_states = num_states;
consumer->removable = 1;
err = sysedp_register_consumer(consumer);
if (err)
goto err_kfree;
return count;
err_kfree:
kfree(states);
kfree(ocpeaks);
kfree(consumer);
return err;
}
static ssize_t consumer_unregister_store(const char *s, size_t count)
{
char name[SYSEDP_NAME_LEN];
size_t n;
struct sysedp_consumer *consumer;
n = count > SYSEDP_NAME_LEN - 1 ? SYSEDP_NAME_LEN - 1 : count;
strncpy(name, s, n);
name[n] = 0;
consumer = sysedp_get_consumer(strim(name));
if (!consumer)
return -EINVAL;
if (!consumer->removable)
return -EINVAL;
sysedp_unregister_consumer(consumer);
kfree(consumer->states);
kfree(consumer->ocpeaks);
kfree(consumer);
return count;
}
static ssize_t margin_show(char *s)
{
return sprintf(s, "%d\n", margin);
}
static ssize_t margin_store(const char *s, size_t count)
{
int val;
if (sscanf(s, "%d", &val) != 1)
return -EINVAL;
mutex_lock(&sysedp_lock);
margin = val;
_sysedp_refresh();
mutex_unlock(&sysedp_lock);
return count;
}
static ssize_t avail_budget_show(char *s)
{
return sprintf(s, "%u\n", avail_budget);
}
static struct sysedp_attribute attr_consumer_register =
__ATTR(consumer_register, 0220, NULL, consumer_register_store);
static struct sysedp_attribute attr_consumer_unregister =
__ATTR(consumer_unregister, 0220, NULL, consumer_unregister_store);
static struct sysedp_attribute attr_margin =
__ATTR(margin, 0660, margin_show, margin_store);
static struct sysedp_attribute attr_avail_budget = __ATTR_RO(avail_budget);
static struct attribute *sysedp_attrs[] = {
&attr_consumer_register.attr,
&attr_consumer_unregister.attr,
&attr_margin.attr,
&attr_avail_budget.attr,
NULL
};
static ssize_t sysedp_attr_show(struct kobject *kobj,
struct attribute *_attr, char *buf)
{
ssize_t r = -EINVAL;
struct sysedp_attribute *attr;
attr = container_of(_attr, struct sysedp_attribute, attr);
if (attr && attr->show)
r = attr->show(buf);
return r;
}
static ssize_t sysedp_attr_store(struct kobject *kobj, struct attribute *_attr,
const char *buf, size_t count)
{
ssize_t r = -EINVAL;
struct sysedp_attribute *attr;
attr = container_of(_attr, struct sysedp_attribute, attr);
if (attr && attr->store)
r = attr->store(buf, count);
return r;
}
static const struct sysfs_ops sysedp_sysfs_ops = {
.show = sysedp_attr_show,
.store = sysedp_attr_store
};
static struct kobj_type ktype_sysedp = {
.sysfs_ops = &sysedp_sysfs_ops,
.default_attrs = sysedp_attrs
};
static const struct kset_uevent_ops sysedp_uevent_ops = {
};
int sysedp_init_sysfs(void)
{
int ret;
struct kobject *parent = NULL;
#ifdef CONFIG_PM
parent = power_kobj;
#endif
ret = kobject_init_and_add(&sysedp_kobj, &ktype_sysedp,
parent, "sysedp");
if (ret) {
pr_err("sysedp_init_sysfs: initialization failed\n");
return ret;
}
consumers_kset = kset_create_and_add("consumers", &sysedp_uevent_ops,
&sysedp_kobj);
if (!consumers_kset) {
pr_err("sysedp_init_sysfs: consumers kset init failed\n");
return -EFAULT;
}
return 0;
}
| gpl-2.0 |
arter97/android_kernel_nvidia_shieldtablet | drivers/net/wireless/sd8897/mlan/mlan_11h.c | 87 | 107519 | /** @file mlan_11h.c
*
* @brief This file contains functions for 802.11H.
*
* Copyright (C) 2008-2013, Marvell International Ltd.
*
* This software file (the "File") is distributed by Marvell International
* Ltd. under the terms of the GNU General Public License Version 2, June 1991
* (the "License"). You may use, redistribute and/or modify this File in
* accordance with the terms and conditions of the License, a copy of which
* is available by writing to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the
* worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
*
* THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE
* IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE
* ARE EXPRESSLY DISCLAIMED. The License provides additional details about
* this warranty disclaimer.
*
*/
/*************************************************************
Change Log:
03/26/2009: initial version
************************************************************/
#include "mlan.h"
#include "mlan_join.h"
#include "mlan_util.h"
#include "mlan_fw.h"
#include "mlan_main.h"
#include "mlan_ioctl.h"
#include "mlan_11h.h"
#ifdef UAP_SUPPORT
#include "mlan_uap.h"
#endif
/********************************************************
Local Variables
********************************************************/
/** Default IBSS DFS recovery interval (in TBTTs); used for adhoc start */
#define WLAN_11H_DEFAULT_DFS_RECOVERY_INTERVAL 100
/** Default 11h power constraint used to offset the maximum transmit power */
#define WLAN_11H_TPC_POWERCONSTRAINT 0
/** 11h TPC Power capability minimum setting, sent in TPC_INFO command to fw */
#define WLAN_11H_TPC_POWERCAPABILITY_MIN 5
/** 11h TPC Power capability maximum setting, sent in TPC_INFO command to fw */
#define WLAN_11H_TPC_POWERCAPABILITY_MAX 20
/** Regulatory requirement for the duration of a channel availability check */
#define WLAN_11H_CHANNEL_AVAIL_CHECK_DURATION 60000 /* in ms */
/** Starting Frequency for 11A band */
#define START_FREQ_11A_BAND 5000 /* in MHz */
/** DFS Channel Move Time */
#define DFS_CHAN_MOVE_TIME 10 /* in sec */
/** Regulatory requirement for the duration of a non-occupancy period */
#define WLAN_11H_NON_OCCUPANCY_PERIOD 1800 /* in sec (30mins) */
/** Maximum allowable age (seconds) on DFS report data */
#define MAX_DFS_REPORT_USABLE_AGE_SEC (120) /* 2 minutes */
/** Minimum delay for CHAN_SW IE to broadcast by FW */
#define MIN_RDH_CHAN_SW_IE_PERIOD_MSEC (500) /* 5 beacons @ 100ms */
/** Maximum delay for CHAN_SW IE to broadcast by FW */
#define MAX_RDH_CHAN_SW_IE_PERIOD_MSEC (3000) /* 5 beacons @ 600ms */
/** Maximum retries on selecting new random channel */
#define MAX_RANDOM_CHANNEL_RETRIES (20)
/** Maximum retries on selecting new random non-dfs channel */
#define MAX_SWITCH_CHANNEL_RETRIES (30)
/** Value for undetermined priv_curr_idx on first entry to new RDH stage */
#define RDH_STAGE_FIRST_ENTRY_PRIV_IDX (0xff)
/** Region codes 0x10, 0x20: channels 1 thru 11 supported */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_2_4G_region_FCC = { 1, 11 };
/** Region codes 0x30, 0x32, 0x41, 0x50: channels 1 thru 13 supported */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_2_4G_region_EU = { 1, 13 };
/** Region code 0x40: only channel 14 supported */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_2_4G_region_JPN40 = { 14, 1 };
/** JPN sub-band config : Start Channel = 8, NumChans = 3 */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_JPN_bottom_band = { 8, 3 };
/** U-NII sub-band config : Start Channel = 36, NumChans = 4 */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_unii_lower_band = { 36, 4 };
/** U-NII sub-band config : Start Channel = 52, NumChans = 4 */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_unii_middle_band = { 52, 4 };
/** U-NII sub-band config : Start Channel = 100, NumChans = 11 */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_unii_mid_upper_band = { 100, 11 };
/** U-NII sub-band config : Start Channel = 149, NumChans = 5 */
static const
IEEEtypes_SupportChan_Subband_t wlan_11h_unii_upper_band = { 149, 5 };
/** Internally passed structure used to send a CMD_802_11_TPC_INFO command */
typedef struct {
t_u8 chan; /**< Channel to which the power constraint applies */
t_u8 power_constraint; /**< Local power constraint to send to firmware */
} wlan_11h_tpc_info_param_t;
/********************************************************
Global Variables
********************************************************/
/********************************************************
Local Functions
********************************************************/
/**
* @brief Utility function to get a random number based on the underlying OS
*
* @param pmadapter Pointer to mlan_adapter
* @return random integer
*/
static t_u32
wlan_11h_get_random_num(pmlan_adapter pmadapter)
{
t_u32 sec, usec;
ENTER();
pmadapter->callbacks.moal_get_system_time(pmadapter->pmoal_handle, &sec,
&usec);
sec = (sec & 0xFFFF) + (sec >> 16);
usec = (usec & 0xFFFF) + (usec >> 16);
LEAVE();
return (usec << 16) | sec;
}
/**
* @brief Convert an IEEE formatted IE to 16-bit ID/Len Marvell
* proprietary format
*
* @param pmadapter Pointer to mlan_adapter
* @param pout_buf Output parameter: Buffer to output Marvell formatted IE
* @param pin_ie Pointer to IEEE IE to be converted to Marvell format
*
* @return Number of bytes output to pout_buf parameter return
*/
static t_u32
wlan_11h_convert_ieee_to_mrvl_ie(mlan_adapter * pmadapter,
t_u8 * pout_buf, const t_u8 * pin_ie)
{
MrvlIEtypesHeader_t mrvl_ie_hdr;
t_u8 *ptmp_buf = pout_buf;
ENTER();
/* Assign the Element Id and Len to the Marvell struct attributes */
mrvl_ie_hdr.type = wlan_cpu_to_le16(pin_ie[0]);
mrvl_ie_hdr.len = wlan_cpu_to_le16(pin_ie[1]);
/* If the element ID is zero, return without doing any copying */
if (!mrvl_ie_hdr.type) {
LEAVE();
return 0;
}
/* Copy the header to the buffer pointer */
memcpy(pmadapter, ptmp_buf, &mrvl_ie_hdr, sizeof(mrvl_ie_hdr));
/* Increment the temp buffer pointer by the size appended */
ptmp_buf += sizeof(mrvl_ie_hdr);
/* Append the data section of the IE; length given by the IEEE IE
length */
memcpy(pmadapter, ptmp_buf, pin_ie + 2, pin_ie[1]);
LEAVE();
/* Return the number of bytes appended to pout_buf */
return sizeof(mrvl_ie_hdr) + pin_ie[1];
}
#ifdef STA_SUPPORT
/**
* @brief Setup the IBSS DFS element passed to the firmware in adhoc start
* and join commands
*
* The DFS Owner and recovery fields are set to be our MAC address and
* a predetermined constant recovery value. If we are joining an adhoc
* network, these values are replaced with the existing IBSS values.
* They are valid only when starting a new IBSS.
*
* The IBSS DFS Element is variable in size based on the number of
* channels supported in our current region.
*
* @param priv Private driver information structure
* @param pdfs Output parameter: Pointer to the IBSS DFS element setup by
* this function.
*
* @return
* - Length of the returned element in pdfs output parameter
* - 0 if returned element is not setup
*/
static t_u32
wlan_11h_set_ibss_dfs_ie(mlan_private * priv, IEEEtypes_IBSS_DFS_t * pdfs)
{
t_u8 num_chans = 0;
MeasRptBasicMap_t initial_map;
mlan_adapter *adapter = priv->adapter;
ENTER();
memset(adapter, pdfs, 0x00, sizeof(IEEEtypes_IBSS_DFS_t));
/*
* A basic measurement report is included with each channel in the
* map field. Initial value for the map for each supported channel
* is with only the unmeasured bit set.
*/
memset(adapter, &initial_map, 0x00, sizeof(initial_map));
initial_map.unmeasured = 1;
/* Set the DFS Owner and recovery interval fields */
memcpy(adapter, pdfs->dfs_owner, priv->curr_addr,
sizeof(pdfs->dfs_owner));
pdfs->dfs_recovery_interval = WLAN_11H_DEFAULT_DFS_RECOVERY_INTERVAL;
for (; (num_chans < adapter->parsed_region_chan.no_of_chan)
&& (num_chans < WLAN_11H_MAX_IBSS_DFS_CHANNELS); num_chans++) {
pdfs->channel_map[num_chans].channel_number =
adapter->parsed_region_chan.chan_pwr[num_chans].chan;
/*
* Set the initial map field with a basic measurement
*/
pdfs->channel_map[num_chans].rpt_map = initial_map;
}
/*
* If we have an established channel map, include it and return
* a valid DFS element
*/
if (num_chans) {
PRINTM(MINFO, "11h: Added %d channels to IBSS DFS Map\n",
num_chans);
pdfs->element_id = IBSS_DFS;
pdfs->len =
(sizeof(pdfs->dfs_owner) +
sizeof(pdfs->dfs_recovery_interval)
+ num_chans * sizeof(IEEEtypes_ChannelMap_t));
LEAVE();
return pdfs->len + sizeof(pdfs->len) + sizeof(pdfs->element_id);
}
/* Ensure the element is zeroed out for an invalid return */
memset(adapter, pdfs, 0x00, sizeof(IEEEtypes_IBSS_DFS_t));
LEAVE();
return 0;
}
#endif
/**
* @brief Setup the Supported Channel IE sent in association requests
*
* The Supported Channels IE is required to be sent when the spectrum
* management capability (11h) is enabled. The element contains a
* starting channel and number of channels tuple for each sub-band
* the STA supports. This information is based on the operating region.
*
* @param priv Private driver information structure
* @param band Band in use
* @param psup_chan Output parameter: Pointer to the Supported Chan element
* setup by this function.
*
* @return
* - Length of the returned element in psup_chan output parameter
* - 0 if returned element is not setup
*/
static
t_u16
wlan_11h_set_supp_channels_ie(mlan_private * priv,
t_u8 band,
IEEEtypes_SupportedChannels_t * psup_chan)
{
t_u16 num_subbands = 0;
t_u16 ret_len = 0;
t_u8 cfp_bg, cfp_a;
ENTER();
memset(priv->adapter, psup_chan, 0x00,
sizeof(IEEEtypes_SupportedChannels_t));
cfp_bg = cfp_a = priv->adapter->region_code;
if (!priv->adapter->region_code) {
/* Invalid region code, use CFP code */
cfp_bg = priv->adapter->cfp_code_bg;
cfp_a = priv->adapter->cfp_code_a;
}
if ((band & BAND_B) || (band & BAND_G)) {
/*
* Channels are contiguous in 2.4GHz, usually only one subband.
*/
switch (cfp_bg) {
case 0x10: /* USA FCC */
case 0x20: /* Canada IC */
default:
psup_chan->subband[num_subbands++] =
wlan_11h_2_4G_region_FCC;
break;
case 0x30: /* Europe ETSI */
case 0x32: /* France */
case 0x41: /* Japan */
case 0x50: /* China */
psup_chan->subband[num_subbands++] =
wlan_11h_2_4G_region_EU;
break;
case 0x40: /* Japan */
psup_chan->subband[num_subbands++] =
wlan_11h_2_4G_region_JPN40;
break;
case 0xff: /* Japan special */
psup_chan->subband[num_subbands++] =
wlan_11h_2_4G_region_EU;
psup_chan->subband[num_subbands++] =
wlan_11h_2_4G_region_JPN40;
break;
}
} else if (band & BAND_A) {
/*
* Set the supported channel elements based on the region code,
* incrementing num_subbands for each sub-band we append to the
* element.
*/
switch (cfp_a) {
case 0x10: /* USA FCC */
case 0x20: /* Canada IC */
case 0x32: /* France */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_lower_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_middle_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_mid_upper_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_upper_band;
break;
case 0x30: /* Europe ETSI */
default:
psup_chan->subband[num_subbands++] =
wlan_11h_unii_lower_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_middle_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_mid_upper_band;
break;
case 0x50: /* China */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_upper_band;
break;
case 0x40: /* Japan */
case 0x41: /* Japan */
case 0xff: /* Japan special */
psup_chan->subband[num_subbands++] =
wlan_11h_JPN_bottom_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_lower_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_middle_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_mid_upper_band;
break;
case 0x1: /* Low band (5150-5250 MHz) channels */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_lower_band;
break;
case 0x2: /* Lower middle band (5250-5350 MHz) channels */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_middle_band;
break;
case 0x3: /* Upper middle band (5470-5725 MHz) channels */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_mid_upper_band;
break;
case 0x4: /* High band (5725-5850 MHz) channels */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_upper_band;
break;
case 0x5: /* Low band (5150-5250 MHz) and High band
(5725-5850 MHz) channels */
psup_chan->subband[num_subbands++] =
wlan_11h_unii_lower_band;
psup_chan->subband[num_subbands++] =
wlan_11h_unii_upper_band;
break;
}
}
/*
* If we have setup any supported subbands in the element, return a
* valid IE along with its size, else return 0.
*/
if (num_subbands) {
psup_chan->element_id = SUPPORTED_CHANNELS;
psup_chan->len =
num_subbands * sizeof(IEEEtypes_SupportChan_Subband_t);
ret_len = (t_u16) (psup_chan->len
+ sizeof(psup_chan->len) +
sizeof(psup_chan->element_id));
HEXDUMP("11h: SupChan", (t_u8 *) psup_chan, ret_len);
}
LEAVE();
return ret_len;
}
/**
* @brief Prepare CMD_802_11_TPC_ADAPT_REQ firmware command
*
* @param priv Private driver information structure
* @param pcmd_ptr Output parameter: Pointer to the command being prepared
* for the firmware
* @param pinfo_buf HostCmd_DS_802_11_TPC_ADAPT_REQ passed as void data block
*
* @return MLAN_STATUS_SUCCESS
*/
static mlan_status
wlan_11h_cmd_tpc_request(mlan_private * priv,
HostCmd_DS_COMMAND * pcmd_ptr,
const t_void * pinfo_buf)
{
ENTER();
memcpy(priv->adapter, &pcmd_ptr->params.tpc_req, pinfo_buf,
sizeof(HostCmd_DS_802_11_TPC_ADAPT_REQ));
pcmd_ptr->params.tpc_req.req.timeout =
wlan_cpu_to_le16(pcmd_ptr->params.tpc_req.req.timeout);
/* Converted to little endian in wlan_11h_cmd_process */
pcmd_ptr->size = sizeof(HostCmd_DS_802_11_TPC_ADAPT_REQ) + S_DS_GEN;
HEXDUMP("11h: 11_TPC_ADAPT_REQ:", (t_u8 *) pcmd_ptr,
(t_u32) pcmd_ptr->size);
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Prepare CMD_802_11_TPC_INFO firmware command
*
* @param priv Private driver information structure
* @param pcmd_ptr Output parameter: Pointer to the command being prepared
* for the firmware
* @param pinfo_buf wlan_11h_tpc_info_param_t passed as void data block
*
* @return MLAN_STATUS_SUCCESS
*/
static mlan_status
wlan_11h_cmd_tpc_info(mlan_private * priv,
HostCmd_DS_COMMAND * pcmd_ptr, const t_void * pinfo_buf)
{
HostCmd_DS_802_11_TPC_INFO *ptpc_info = &pcmd_ptr->params.tpc_info;
MrvlIEtypes_LocalPowerConstraint_t *pconstraint =
&ptpc_info->local_constraint;
MrvlIEtypes_PowerCapability_t *pcap = &ptpc_info->power_cap;
wlan_11h_device_state_t *pstate = &priv->adapter->state_11h;
const wlan_11h_tpc_info_param_t *ptpc_info_param =
(wlan_11h_tpc_info_param_t *) pinfo_buf;
ENTER();
pcap->min_power = pstate->min_tx_power_capability;
pcap->max_power = pstate->max_tx_power_capability;
pcap->header.len = wlan_cpu_to_le16(2);
pcap->header.type = wlan_cpu_to_le16(TLV_TYPE_POWER_CAPABILITY);
pconstraint->chan = ptpc_info_param->chan;
pconstraint->constraint = ptpc_info_param->power_constraint;
pconstraint->header.type = wlan_cpu_to_le16(TLV_TYPE_POWER_CONSTRAINT);
pconstraint->header.len = wlan_cpu_to_le16(2);
/* Converted to little endian in wlan_11h_cmd_process */
pcmd_ptr->size = sizeof(HostCmd_DS_802_11_TPC_INFO) + S_DS_GEN;
HEXDUMP("11h: TPC INFO", (t_u8 *) pcmd_ptr, (t_u32) pcmd_ptr->size);
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Prepare CMD_802_11_CHAN_SW_ANN firmware command
*
* @param priv Private driver information structure
* @param pcmd_ptr Output parameter: Pointer to the command being
* prepared to for firmware
* @param pinfo_buf HostCmd_DS_802_11_CHAN_SW_ANN passed as void data block
*
* @return MLAN_STATUS_SUCCESS
*/
static mlan_status
wlan_11h_cmd_chan_sw_ann(mlan_private * priv,
HostCmd_DS_COMMAND * pcmd_ptr,
const t_void * pinfo_buf)
{
const HostCmd_DS_802_11_CHAN_SW_ANN *pch_sw_ann =
(HostCmd_DS_802_11_CHAN_SW_ANN *) pinfo_buf;
ENTER();
/* Converted to little endian in wlan_11h_cmd_process */
pcmd_ptr->size = sizeof(HostCmd_DS_802_11_CHAN_SW_ANN) + S_DS_GEN;
memcpy(priv->adapter, &pcmd_ptr->params.chan_sw_ann, pch_sw_ann,
sizeof(HostCmd_DS_802_11_CHAN_SW_ANN));
PRINTM(MINFO, "11h: ChSwAnn: %#x-%u, Seq=%u, Ret=%u\n",
pcmd_ptr->command, pcmd_ptr->size, pcmd_ptr->seq_num,
pcmd_ptr->result);
PRINTM(MINFO, "11h: ChSwAnn: Ch=%d, Cnt=%d, Mode=%d\n",
pch_sw_ann->new_chan, pch_sw_ann->switch_count,
pch_sw_ann->switch_mode);
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Prepare CMD_CHAN_REPORT_REQUEST firmware command
*
* @param priv Private driver information structure
* @param pcmd_ptr Output parameter: Pointer to the command being
* prepared to for firmware
* @param pinfo_buf HostCmd_DS_CHAN_RPT_REQ passed as void data block
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_PENDING
*/
static mlan_status
wlan_11h_cmd_chan_rpt_req(mlan_private * priv,
HostCmd_DS_COMMAND * pcmd_ptr,
const t_void * pinfo_buf)
{
const HostCmd_DS_CHAN_RPT_REQ *pchan_rpt_req =
(HostCmd_DS_CHAN_RPT_REQ *) pinfo_buf;
wlan_dfs_device_state_t *pstate_dfs = &priv->adapter->state_dfs;
MrvlIEtypes_ChanRpt11hBasic_t *ptlv_basic;
ENTER();
if (pstate_dfs->dfs_check_pending) {
PRINTM(MERROR,
"11h: ChanRptReq - previous CMD_CHAN_REPORT_REQUEST has"
" not returned its result yet (as EVENT_CHANNEL_READY)."
" This command will be dropped.\n");
LEAVE();
return MLAN_STATUS_PENDING;
}
/* Converted to little endian in wlan_11h_cmd_process */
pcmd_ptr->size = sizeof(HostCmd_DS_CHAN_RPT_REQ) + S_DS_GEN;
memcpy(priv->adapter, &pcmd_ptr->params.chan_rpt_req, pchan_rpt_req,
sizeof(HostCmd_DS_CHAN_RPT_REQ));
/* if DFS channel, add BASIC report TLV, and set radar bit */
if (wlan_11h_radar_detect_required
(priv, pchan_rpt_req->chan_desc.chanNum)) {
ptlv_basic =
(MrvlIEtypes_ChanRpt11hBasic_t *) (((t_u8 *) (pcmd_ptr))
+ pcmd_ptr->size);
ptlv_basic->Header.type =
wlan_cpu_to_le16(TLV_TYPE_CHANRPT_11H_BASIC);
ptlv_basic->Header.len =
wlan_cpu_to_le16(sizeof(MeasRptBasicMap_t));
memset(priv->adapter, &ptlv_basic->map, 0,
sizeof(MeasRptBasicMap_t));
ptlv_basic->map.radar = 1;
pcmd_ptr->size += sizeof(MrvlIEtypes_ChanRpt11hBasic_t);
}
/* update dfs sturcture. dfs_check_pending is set when we receive
CMD_RESP == SUCCESS */
pstate_dfs->dfs_check_pending = MFALSE;
pstate_dfs->dfs_radar_found = MFALSE;
pstate_dfs->dfs_check_channel = pchan_rpt_req->chan_desc.chanNum;
LEAVE();
return MLAN_STATUS_SUCCESS;
}
/**
* @brief Set the local power capability and constraint TLV
*
* @param ppbuffer The buffer to add these two TLVs
* @param channel Channel to which the power constraint applies
* @param power_constraint Power constraint to be applied on the channel
* @param min_tx_power_capability Min. Tx Power in Power Capability IE
* @param max_tx_power_capability Max. Tx Power in Power Capability IE
*
* @return The len increased
*/
static t_u32
wlan_11h_set_local_power_constraint_tlv(t_u8 ** ppbuffer,
t_u8 channel,
t_u8 power_constraint,
t_u8 min_tx_power_capability,
t_u8 max_tx_power_capability)
{
MrvlIEtypes_PowerCapability_t *pcap;
MrvlIEtypes_LocalPowerConstraint_t *pconstraint;
t_u8 *start_ptr = MNULL;
ENTER();
/* Null Checks */
if ((ppbuffer == MNULL) || (((t_u8 *) (*ppbuffer)) == MNULL)) {
LEAVE();
return 0;
}
start_ptr = (t_u8 *) (*ppbuffer);
PRINTM(MINFO,
"11h: Set local power constraint = %d channel=%d min_tx_pwr=%d max_tx_pwr=%d\n",
power_constraint, channel, min_tx_power_capability,
max_tx_power_capability);
pcap = (MrvlIEtypes_PowerCapability_t *) * ppbuffer;
pcap->header.type = wlan_cpu_to_le16(TLV_TYPE_POWER_CAPABILITY);
pcap->header.len = wlan_cpu_to_le16(2);
pcap->min_power = min_tx_power_capability;
pcap->max_power = max_tx_power_capability;
*ppbuffer += sizeof(MrvlIEtypesHeader_t) + 2;
pconstraint = (MrvlIEtypes_LocalPowerConstraint_t *) * ppbuffer;
pconstraint->header.type = wlan_cpu_to_le16(TLV_TYPE_POWER_CONSTRAINT);
pconstraint->header.len = wlan_cpu_to_le16(2);
pconstraint->chan = channel;
pconstraint->constraint = power_constraint;
*ppbuffer += sizeof(MrvlIEtypesHeader_t) + 2;
LEAVE();
return (t_u32) (*ppbuffer - start_ptr);
}
/**
* @brief Utility function to process a join to an infrastructure BSS
*
* @param priv Private driver information structure
* @param ppbuffer Output parameter: Pointer to the TLV output buffer,
* modified on return to point after the appended 11h TLVs
* @param band Band on which we are joining the BSS
* @param channel Channel on which we are joining the BSS
* @param p11h_bss_info Pointer to the 11h BSS information for this network
* that was parsed out of the scan response.
*
* @return Integer number of bytes appended to the TLV output
* buffer (ppbuffer)
*/
static t_u32
wlan_11h_process_infra_join(mlan_private * priv,
t_u8 ** ppbuffer,
t_u8 band,
t_u32 channel, wlan_11h_bss_info_t * p11h_bss_info)
{
MrvlIEtypesHeader_t ie_header;
IEEEtypes_SupportedChannels_t sup_chan_ie;
t_u32 ret_len = 0;
t_u16 sup_chan_len = 0;
ENTER();
/* Null Checks */
if ((ppbuffer == MNULL) || (((t_u8 *) (*ppbuffer)) == MNULL)) {
LEAVE();
return 0;
}
ret_len +=
wlan_11h_set_local_power_constraint_tlv(ppbuffer,
(t_u8) channel,
(t_u8) p11h_bss_info->
power_constraint.
local_constraint,
(t_u8) priv->adapter->
state_11h.
min_tx_power_capability,
(t_u8) priv->adapter->
state_11h.
max_tx_power_capability);
/* Setup the Supported Channels IE */
sup_chan_len = wlan_11h_set_supp_channels_ie(priv, band, &sup_chan_ie);
/*
* If we returned a valid Supported Channels IE, wrap and append it
*/
if (sup_chan_len) {
/* Wrap the supported channels IE with a passthrough TLV type */
ie_header.type = wlan_cpu_to_le16(TLV_TYPE_PASSTHROUGH);
ie_header.len = sup_chan_len;
memcpy(priv->adapter, *ppbuffer, &ie_header, sizeof(ie_header));
/* Increment the return size and the return buffer pointer
param */
*ppbuffer += sizeof(ie_header);
ret_len += sizeof(ie_header);
/* Copy the supported channels IE to the output buf, advance
pointer */
memcpy(priv->adapter, *ppbuffer, &sup_chan_ie, sup_chan_len);
*ppbuffer += sup_chan_len;
ret_len += sup_chan_len;
}
LEAVE();
return ret_len;
}
/**
* @brief Utility function to process a start or join to an adhoc network
*
* Add the elements to the TLV buffer needed in the start/join adhoc commands:
* - IBSS DFS IE
* - Quiet IE
*
* Also send the local constraint to the firmware in a TPC_INFO command.
*
* @param priv Private driver information structure
* @param ppbuffer Output parameter: Pointer to the TLV output buffer,
* modified on return to point after the appended 11h TLVs
* @param channel Channel on which we are starting/joining the IBSS
* @param p11h_bss_info Pointer to the 11h BSS information for this network
* that was parsed out of the scan response. NULL
* indicates we are starting the adhoc network
*
* @return Integer number of bytes appended to the TLV output
* buffer (ppbuffer)
*/
static t_u32
wlan_11h_process_adhoc(mlan_private * priv,
t_u8 ** ppbuffer,
t_u32 channel, wlan_11h_bss_info_t * p11h_bss_info)
{
IEEEtypes_IBSS_DFS_t dfs_elem;
t_u32 size_appended;
t_u32 ret_len = 0;
t_s8 local_constraint = 0;
mlan_adapter *adapter = priv->adapter;
ENTER();
#ifdef STA_SUPPORT
/* Format our own IBSS DFS Element. Include our channel map fields */
wlan_11h_set_ibss_dfs_ie(priv, &dfs_elem);
#endif
if (p11h_bss_info) {
/*
* Copy the DFS Owner/Recovery Interval from the BSS we are joining
*/
memcpy(adapter, dfs_elem.dfs_owner,
p11h_bss_info->ibss_dfs.dfs_owner,
sizeof(dfs_elem.dfs_owner));
dfs_elem.dfs_recovery_interval =
p11h_bss_info->ibss_dfs.dfs_recovery_interval;
}
/* Append the dfs element to the TLV buffer */
size_appended = wlan_11h_convert_ieee_to_mrvl_ie(adapter,
(t_u8 *) * ppbuffer,
(t_u8 *) & dfs_elem);
HEXDUMP("11h: IBSS-DFS", (t_u8 *) * ppbuffer, size_appended);
*ppbuffer += size_appended;
ret_len += size_appended;
/*
* Check to see if we are joining a network. Join is indicated by the
* BSS Info pointer being valid (not NULL)
*/
if (p11h_bss_info) {
/*
* If there was a quiet element, include it in adhoc join command
*/
if (p11h_bss_info->quiet.element_id == QUIET) {
size_appended
=
wlan_11h_convert_ieee_to_mrvl_ie(adapter,
(t_u8 *) *
ppbuffer,
(t_u8 *) &
p11h_bss_info->
quiet);
HEXDUMP("11h: Quiet", (t_u8 *) * ppbuffer,
size_appended);
*ppbuffer += size_appended;
ret_len += size_appended;
}
/* Copy the local constraint from the network */
local_constraint =
p11h_bss_info->power_constraint.local_constraint;
} else {
/*
* If we are the adhoc starter, we can add a quiet element
*/
if (adapter->state_11h.quiet_ie.quiet_period) {
size_appended =
wlan_11h_convert_ieee_to_mrvl_ie(adapter,
(t_u8 *) *
ppbuffer,
(t_u8 *) &
adapter->
state_11h.
quiet_ie);
HEXDUMP("11h: Quiet", (t_u8 *) * ppbuffer,
size_appended);
*ppbuffer += size_appended;
ret_len += size_appended;
}
/* Use the local_constraint configured in the driver state */
local_constraint = adapter->state_11h.usr_def_power_constraint;
}
PRINTM(MINFO, "WEILIE 1: ppbuffer = %p\n", *ppbuffer);
ret_len += wlan_11h_set_local_power_constraint_tlv(ppbuffer,
(t_u8) channel,
(t_u8)
local_constraint,
(t_u8) priv->
adapter->state_11h.
min_tx_power_capability,
(t_u8) priv->
adapter->state_11h.
max_tx_power_capability);
PRINTM(MINFO, "WEILIE 2: ppbuffer = %p\n", *ppbuffer);
LEAVE();
return ret_len;
}
/**
* @brief Return whether the driver has enabled 11h for the interface
*
* Association/Join commands are dynamic in that they enable 11h in the
* driver/firmware when they are detected in the existing BSS.
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if 11h is enabled
* - MFALSE otherwise
*/
static t_bool
wlan_11h_is_enabled(mlan_private * priv)
{
ENTER();
LEAVE();
return priv->intf_state_11h.is_11h_enabled;
}
/**
* @brief Return whether the device has activated slave radar detection.
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if slave radar detection is enabled in firmware
* - MFALSE otherwise
*/
static t_bool
wlan_11h_is_slave_radar_det_active(mlan_private * priv)
{
ENTER();
LEAVE();
return priv->adapter->state_11h.is_slave_radar_det_active;
}
/**
* @brief Return whether the slave interface is active, and on DFS channel.
* priv is assumed to already be a dfs slave interface, doesn't check this.
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if priv is slave, and meets both conditions
* - MFALSE otherwise
*/
static t_bool
wlan_11h_is_slave_active_on_dfs_chan(mlan_private * priv)
{
t_bool ret = MFALSE;
ENTER();
if ((priv->media_connected == MTRUE) &&
(priv->curr_bss_params.band & BAND_A) &&
wlan_11h_radar_detect_required(priv,
priv->curr_bss_params.bss_descriptor.
channel))
ret = MTRUE;
LEAVE();
return ret;
}
/**
* @brief Return whether the master interface is active, and on DFS channel.
* priv is assumed to already be a dfs master interface, doesn't check this.
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if priv is master, and meets both conditions
* - MFALSE otherwise
*/
static t_bool
wlan_11h_is_master_active_on_dfs_chan(mlan_private * priv)
{
t_bool ret = MFALSE;
ENTER();
if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) {
/* Ad-hoc creator */
if (((priv->media_connected == MTRUE)
|| (priv->adhoc_state == ADHOC_STARTING)) &&
(priv->adapter->adhoc_start_band & BAND_A) &&
wlan_11h_radar_detect_required(priv, priv->adhoc_channel))
ret = MTRUE;
} else if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP) {
/* UAP */
#ifdef UAP_SUPPORT
if ((priv->uap_bss_started == MTRUE) &&
(priv->uap_state_chan_cb.band_config & BAND_CONFIG_5GHZ) &&
wlan_11h_radar_detect_required(priv,
priv->uap_state_chan_cb.
channel))
ret = MTRUE;
#endif
}
LEAVE();
return ret;
}
/**
* @brief Determine if priv is DFS Master interface
*
* @param priv Pointer to mlan_private
*
* @return MTRUE or MFALSE
*/
static t_bool
wlan_11h_is_dfs_master(mlan_private * priv)
{
t_bool ret = MFALSE;
ENTER();
/* UAP: all are master */
if (GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_UAP)
ret = MTRUE;
/* STA: only ad-hoc creator is master */
else if ((GET_BSS_ROLE(priv) == MLAN_BSS_ROLE_STA) &&
(priv->bss_mode == MLAN_BSS_MODE_IBSS) &&
(priv->adhoc_state == ADHOC_STARTED ||
priv->adhoc_state == ADHOC_STARTING))
ret = MTRUE;
/* all other cases = slave interface */
LEAVE();
return ret;
}
/* Need this as function to pass to wlan_count_priv_cond() */
/**
* @brief Determine if priv is DFS Slave interface
*
* @param priv Pointer to mlan_private
*
* @return MTRUE or MFALSE
*/
static t_bool
wlan_11h_is_dfs_slave(mlan_private * priv)
{
t_bool ret = MFALSE;
ENTER();
ret = !wlan_11h_is_dfs_master(priv);
LEAVE();
return ret;
}
/**
* @brief This function checks if interface is active.
*
* @param pmpriv A pointer to mlan_private structure
*
* @return MTRUE or MFALSE
*/
static t_bool
wlan_is_intf_active(mlan_private * pmpriv)
{
t_bool ret = MFALSE;
ENTER();
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP)
/* NOTE: UAP's media_connected == true only after first STA
associated. Need different variable to tell if UAP has been
started. */
ret = pmpriv->uap_bss_started;
else
#endif
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_STA)
ret = pmpriv->media_connected;
LEAVE();
return ret;
}
/**
* @brief This function gets current radar detect flags
*
* @param pmadapter A pointer to mlan_adapter structure
*
* @return 11H MIB setting for radar detect
*/
static t_u32
wlan_11h_get_current_radar_detect_flags(mlan_adapter * pmadapter)
{
t_u32 radar_det_flags = 0;
ENTER();
if (pmadapter->state_11h.is_master_radar_det_active)
radar_det_flags |= MASTER_RADAR_DET_MASK;
if (pmadapter->state_11h.is_slave_radar_det_active)
radar_det_flags |= SLAVE_RADAR_DET_MASK;
PRINTM(MINFO, "%s: radar_det_state_curr=0x%x\n",
__func__, radar_det_flags);
LEAVE();
return radar_det_flags;
}
/**
* @brief This function checks if radar detect flags have/should be changed.
*
* @param pmadapter A pointer to mlan_adapter structure
* @param pnew_state Output param with new state, if return MTRUE.
*
* @return MTRUE (need update) or MFALSE (no change in flags)
*/
static t_bool
wlan_11h_check_radar_det_state(mlan_adapter * pmadapter, OUT t_u32 * pnew_state)
{
t_u32 radar_det_state_new = 0;
t_bool ret;
ENTER();
PRINTM(MINFO, "%s: master_radar_det_pending=%d, "
" slave_radar_det_pending=%d\n", __func__,
pmadapter->state_11h.master_radar_det_enable_pending,
pmadapter->state_11h.slave_radar_det_enable_pending);
/* new state comes from evaluating interface states & pending starts */
if (pmadapter->state_11h.master_radar_det_enable_pending ||
(wlan_count_priv_cond(pmadapter,
wlan_11h_is_master_active_on_dfs_chan,
wlan_11h_is_dfs_master) > 0))
radar_det_state_new |= MASTER_RADAR_DET_MASK;
if (pmadapter->state_11h.slave_radar_det_enable_pending ||
(wlan_count_priv_cond(pmadapter,
wlan_11h_is_slave_active_on_dfs_chan,
wlan_11h_is_dfs_slave) > 0))
radar_det_state_new |= SLAVE_RADAR_DET_MASK;
PRINTM(MINFO, "%s: radar_det_state_new=0x%x\n",
__func__, radar_det_state_new);
/* now compare flags with current state */
ret = (wlan_11h_get_current_radar_detect_flags(pmadapter)
!= radar_det_state_new) ? MTRUE : MFALSE;
if (ret)
*pnew_state = radar_det_state_new;
LEAVE();
return ret;
}
/**
* @brief Determine if mlan_private list only contains UAP interface(s)
*
* @param priv_list List of mlan_private pointers
* @param priv_list_count Number of mlan_privates in above list
*
* @return MTRUE or MFALSE
*/
static t_bool
wlan_only_uap_priv_in_list(mlan_private ** priv_list, t_u8 priv_list_count)
{
#if defined(STA_SUPPORT) && !defined(UAP_SUPPORT)
return MFALSE;
#else
t_u8 uap_count = 0;
t_u8 sta_count = 0;
t_u8 i;
ENTER();
for (i = 0; i < priv_list_count; i++) {
if (GET_BSS_ROLE(priv_list[i]) == MLAN_BSS_ROLE_UAP)
uap_count++;
else
sta_count++;
}
LEAVE();
return ((uap_count > 0) && (sta_count == 0)) ? MTRUE : MFALSE;
#endif
}
/**
* @brief generate the channel center frequency index
*
* @param channel_num channel number
*
* @return frenquency index
*/
static t_u8
wlan_11h_get_channel_freq_idx(IN t_u8 channel_num)
{
t_u8 index;
t_u8 center_freq[] = { 42, 58, 106, 122, 138, 155 };
t_u8 chan_idx, ret = 0;
chan_idx = channel_num - 100;
for (index = 0; index < sizeof(center_freq); index++) {
if ((chan_idx >= (center_freq[index] - 6)) &&
(chan_idx <= (center_freq[index] + 6))) {
ret = center_freq[index];
break;
}
}
return ret;
}
/**
* @brief Prepare ioctl for add/remove CHAN_SW IE - RADAR_DETECTED event handling
*
* @param pmadapter Pointer to mlan_adapter
* @param pioctl_req Pointer to completed mlan_ioctl_req (allocated inside)
* @param is_adding_ie CHAN_SW IE is to be added (MTRUE), or removed (MFALSE)
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
static mlan_status
wlan_11h_prepare_custom_ie_chansw(IN mlan_adapter * pmadapter,
OUT mlan_ioctl_req ** ppioctl_req,
IN t_bool is_adding_ie)
{
mlan_ioctl_req *pioctl_req = MNULL;
mlan_ds_misc_cfg *pds_misc_cfg = MNULL;
custom_ie *pcust_chansw_ie = MNULL;
IEEEtypes_ChanSwitchAnn_t *pchansw_ie = MNULL;
mlan_status ret;
IEEEtypes_Header_t *pChanSwWrap_ie = MNULL;
IEEEtypes_WideBWChanSwitch_t *pbwchansw_ie = MNULL;
IEEEtypes_VhtTpcEnvelope_t *pvhttpcEnv_ie = MNULL;
t_u8 index;
mlan_private *pmpriv = MNULL;
ENTER();
if (pmadapter == MNULL || ppioctl_req == MNULL) {
LEAVE();
return MLAN_STATUS_FAILURE;
}
/* allocate buffer for mlan_ioctl_req and mlan_ds_misc_cfg */
/* FYI - will be freed as part of cmd_response handler */
ret = pmadapter->callbacks.moal_malloc(pmadapter->pmoal_handle,
sizeof(mlan_ioctl_req) +
sizeof(mlan_ds_misc_cfg),
MLAN_MEM_DEF,
(t_u8 **) & pioctl_req);
if ((ret != MLAN_STATUS_SUCCESS) || !pioctl_req) {
PRINTM(MERROR, "%s(): Could not allocate ioctl req\n",
__func__);
LEAVE();
return MLAN_STATUS_FAILURE;
}
pds_misc_cfg = (mlan_ds_misc_cfg *) ((t_u8 *) pioctl_req +
sizeof(mlan_ioctl_req));
/* prepare mlan_ioctl_req */
memset(pmadapter, pioctl_req, 0x00, sizeof(mlan_ioctl_req));
pioctl_req->req_id = MLAN_IOCTL_MISC_CFG;
pioctl_req->action = MLAN_ACT_SET;
pioctl_req->pbuf = (t_u8 *) pds_misc_cfg;
pioctl_req->buf_len = sizeof(mlan_ds_misc_cfg);
/* prepare mlan_ds_misc_cfg */
memset(pmadapter, pds_misc_cfg, 0x00, sizeof(mlan_ds_misc_cfg));
pds_misc_cfg->sub_command = MLAN_OID_MISC_CUSTOM_IE;
pds_misc_cfg->param.cust_ie.type = TLV_TYPE_MGMT_IE;
pds_misc_cfg->param.cust_ie.len = (sizeof(custom_ie) - MAX_IE_SIZE);
/* configure custom_ie api settings */
pcust_chansw_ie =
(custom_ie *) & pds_misc_cfg->param.cust_ie.ie_data_list[0];
pcust_chansw_ie->ie_index = 0xffff; /* Auto index */
pcust_chansw_ie->ie_length = sizeof(IEEEtypes_ChanSwitchAnn_t);
pcust_chansw_ie->mgmt_subtype_mask = (is_adding_ie)
? MBIT(8) | MBIT(5) /* add IE for BEACON | PROBE_RSP */
: 0; /* remove IE */
/* prepare CHAN_SW IE inside ioctl */
pchansw_ie = (IEEEtypes_ChanSwitchAnn_t *) pcust_chansw_ie->ie_buffer;
pchansw_ie->element_id = CHANNEL_SWITCH_ANN;
pchansw_ie->len =
sizeof(IEEEtypes_ChanSwitchAnn_t) - sizeof(IEEEtypes_Header_t);
pchansw_ie->chan_switch_mode = 1; /* STA should not transmit */
pchansw_ie->new_channel_num = pmadapter->state_rdh.new_channel;
pchansw_ie->chan_switch_count = 0; /* simplification */
for (index = 0; index < pmadapter->state_rdh.priv_list_count; index++) {
pmpriv = pmadapter->state_rdh.priv_list[index];
/* find the first AP interface */
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP) {
if (pmpriv->is_11ac_enabled) {
pChanSwWrap_ie =
(IEEEtypes_Header_t *) ((t_u8 *)
pchansw_ie +
sizeof
(IEEEtypes_ChanSwitchAnn_t));
pChanSwWrap_ie->element_id = EXT_POWER_CONSTR;
/* will have multiple sub IEs */
pChanSwWrap_ie->len = 0;
/* prepare the Wide Bandwidth Channel Switch IE */
pbwchansw_ie =
(IEEEtypes_WideBWChanSwitch_t
*) ((t_u8 *) pChanSwWrap_ie +
sizeof(IEEEtypes_Header_t));
pbwchansw_ie->ieee_hdr.element_id =
BW_CHANNEL_SWITCH;
pbwchansw_ie->ieee_hdr.len =
sizeof(IEEEtypes_WideBWChanSwitch_t) -
sizeof(IEEEtypes_Header_t);
pbwchansw_ie->new_channel_width = VHT_OPER_CHWD_80MHZ; /* fix
80MHZ
now */
pbwchansw_ie->new_channel_center_freq0 =
wlan_11h_get_channel_freq_idx
(pmadapter->state_rdh.new_channel);
pbwchansw_ie->new_channel_center_freq1 =
wlan_11h_get_channel_freq_idx
(pmadapter->state_rdh.new_channel);
pChanSwWrap_ie->len +=
sizeof(IEEEtypes_WideBWChanSwitch_t);
/* prepare the VHT Transmit Power Envelope IE */
pvhttpcEnv_ie =
(IEEEtypes_VhtTpcEnvelope_t *) ((t_u8 *)
pChanSwWrap_ie
+
sizeof
(IEEEtypes_Header_t)
+
sizeof
(IEEEtypes_WideBWChanSwitch_t));
pvhttpcEnv_ie->ieee_hdr.element_id =
VHT_TX_POWER_ENV;
pvhttpcEnv_ie->ieee_hdr.len =
sizeof(IEEEtypes_VhtTpcEnvelope_t) -
sizeof(IEEEtypes_Header_t);
pvhttpcEnv_ie->tpc_info = 3; /* Local Max TX
Power Count=
3, Local TX
Power Unit
Inter=EIP(0) */
pvhttpcEnv_ie->local_max_tp_20mhz = 0xff;
pvhttpcEnv_ie->local_max_tp_40mhz = 0xff;
pvhttpcEnv_ie->local_max_tp_80mhz = 0xff;
pChanSwWrap_ie->len +=
sizeof(IEEEtypes_VhtTpcEnvelope_t);
pcust_chansw_ie->ie_length +=
sizeof(IEEEtypes_WideBWChanSwitch_t)
+ sizeof(IEEEtypes_VhtTpcEnvelope_t)
+ sizeof(IEEEtypes_Header_t);
PRINTM(MINFO,
"Append Wide Bandwidth Channel Switch IE\n");
break;
}
}
}
pds_misc_cfg->param.cust_ie.len += pcust_chansw_ie->ie_length;
DBG_HEXDUMP(MCMD_D, "11h: custom_ie containing CHAN_SW IE",
(t_u8 *) pcust_chansw_ie, pds_misc_cfg->param.cust_ie.len);
/* assign output pointer before returning */
*ppioctl_req = pioctl_req;
LEAVE();
return MLAN_STATUS_SUCCESS;
}
#ifdef UAP_SUPPORT
/**
* @brief Retrieve a randomly selected starting channel if needed for 11h
*
* If 11h is enabled and 5GHz band is selected in band_config
* return a random channel in A band, else one from BG band.
*
* @param priv Private driver information structure
* @param uap_band_cfg Private driver information structure
*
* @return Starting channel
*/
static t_u8
wlan_11h_get_uap_start_channel(mlan_private * priv, t_u8 uap_band_cfg)
{
t_u8 start_chn;
mlan_adapter *adapter = priv->adapter;
t_u32 region;
t_u32 rand_entry;
region_chan_t *chn_tbl;
t_u8 rand_tries = 0;
/* TODO: right now mostly a copy of wlan_11h_get_adhoc_start_channel.
Improve to be more specfic to UAP, e.g. 1. take into account
COUNTRY_CODE -> region_code 2. check domain_info for value channels */
ENTER();
/*
* Set start_chn to the Default. Used if 11h is disabled or the band
* does not require 11h support.
*/
start_chn = DEFAULT_AD_HOC_CHANNEL;
/*
* Check that we are looking for a channel in the A Band
*/
if (uap_band_cfg & UAP_BAND_CONFIG_5GHZ) {
/*
* Set default to the A Band default. Used if random selection fails
* or if 11h is not enabled
*/
start_chn = DEFAULT_AD_HOC_CHANNEL_A;
/*
* Check that 11h is enabled in the driver
*/
if (wlan_11h_is_enabled(priv)) {
/*
* Search the region_channel tables for a channel table
* that is marked for the A Band.
*/
for (region = 0; (region < MAX_REGION_CHANNEL_NUM);
region++) {
chn_tbl = &adapter->region_channel[region];
/* Check if table is valid and marked for A
Band */
if (chn_tbl->valid
&& chn_tbl->region == adapter->region_code
&& chn_tbl->band & BAND_A) {
/*
* Set the start channel. Get a random number and
* use it to pick an entry in the table between 0
* and the number of channels in the table (NumCFP).
*/
do {
rand_entry =
wlan_11h_get_random_num
(adapter) %
chn_tbl->num_cfp;
start_chn =
(t_u8) chn_tbl->
pcfp[rand_entry].
channel;
} while ((wlan_11h_is_channel_under_nop
(adapter, start_chn) ||
((adapter->state_rdh.stage ==
RDH_GET_INFO_CHANNEL) &&
wlan_11h_radar_detect_required
(priv, start_chn)))
&& (++rand_tries <
MAX_RANDOM_CHANNEL_RETRIES));
}
}
}
}
PRINTM(MCMD_D, "11h: UAP Get Start Channel %d\n", start_chn);
LEAVE();
return start_chn;
}
#endif /* UAP_SUPPORT */
#ifdef DEBUG_LEVEL1
static const char *DFS_TS_REPR_STRINGS[] = { "",
"NOP_start",
"CAC_completed"
};
#endif
/**
* @brief Search for a dfs timestamp in the list with desired channel.
*
* Assumes there will only be one timestamp per channel in the list.
*
* @param pmadapter Pointer to mlan_adapter
* @param channel Channel number
*
* @return Pointer to timestamp if found, or MNULL
*/
static wlan_dfs_timestamp_t *
wlan_11h_find_dfs_timestamp(mlan_adapter * pmadapter, t_u8 channel)
{
wlan_dfs_timestamp_t *pts = MNULL, *pts_found = MNULL;
ENTER();
pts = (wlan_dfs_timestamp_t *) util_peek_list(pmadapter->pmoal_handle,
&pmadapter->state_dfs.
dfs_ts_head, MNULL,
MNULL);
while (pts &&
pts !=
(wlan_dfs_timestamp_t *) & pmadapter->state_dfs.dfs_ts_head) {
PRINTM(MINFO,
"dfs_timestamp(@ %p) - chan=%d, repr=%d(%s),"
" time(sec.usec)=%lu.%06lu\n", pts, pts->channel,
pts->represents, DFS_TS_REPR_STRINGS[pts->represents],
pts->ts_sec, pts->ts_usec);
if (pts->channel == channel) {
pts_found = pts;
break;
}
pts = pts->pnext;
}
LEAVE();
return pts_found;
}
/**
* @brief Removes dfs timestamp from list.
*
* @param pmadapter Pointer to mlan_adapter
* @param pdfs_ts Pointer to dfs_timestamp to remove
*/
static t_void
wlan_11h_remove_dfs_timestamp(mlan_adapter * pmadapter,
wlan_dfs_timestamp_t * pdfs_ts)
{
ENTER();
/* dequeue and delete timestamp */
util_unlink_list(pmadapter->pmoal_handle,
&pmadapter->state_dfs.dfs_ts_head,
(pmlan_linked_list) pdfs_ts, MNULL, MNULL);
pmadapter->callbacks.moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pdfs_ts);
LEAVE();
}
/**
* @brief Add a dfs timestamp to the list
*
* Assumes there will only be one timestamp per channel in the list,
* and that timestamp modes (represents) are mutually exclusive.
*
* @param pmadapter Pointer to mlan_adapter
* @param repr Timestamp 'represents' value (see _dfs_timestamp_repr_e)
* @param channel Channel number
*
* @return Pointer to timestamp if found, or MNULL
*/
static mlan_status
wlan_11h_add_dfs_timestamp(mlan_adapter * pmadapter, t_u8 repr, t_u8 channel)
{
wlan_dfs_timestamp_t *pdfs_ts = MNULL;
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
pdfs_ts = wlan_11h_find_dfs_timestamp(pmadapter, channel);
if (!pdfs_ts) {
/* need to allocate new timestamp */
ret = pmadapter->callbacks.moal_malloc(pmadapter->pmoal_handle,
sizeof
(wlan_dfs_timestamp_t),
MLAN_MEM_DEF,
(t_u8 **) & pdfs_ts);
if ((ret != MLAN_STATUS_SUCCESS) || !pdfs_ts) {
PRINTM(MERROR, "%s(): Could not allocate dfs_ts\n",
__func__);
LEAVE();
return MLAN_STATUS_FAILURE;
}
memset(pmadapter, (t_u8 *) pdfs_ts, 0,
sizeof(wlan_dfs_timestamp_t));
util_enqueue_list_tail(pmadapter->pmoal_handle,
&pmadapter->state_dfs.dfs_ts_head,
(pmlan_linked_list) pdfs_ts, MNULL,
MNULL);
pdfs_ts->channel = channel;
}
/* (else, use existing timestamp for channel; see assumptions above) */
/* update params */
pmadapter->callbacks.moal_get_system_time(pmadapter->pmoal_handle,
&pdfs_ts->ts_sec,
&pdfs_ts->ts_usec);
pdfs_ts->represents = repr;
PRINTM(MCMD_D, "11h: add/update dfs_timestamp - chan=%d, repr=%d(%s),"
" time(sec.usec)=%lu.%06lu\n", pdfs_ts->channel,
pdfs_ts->represents, DFS_TS_REPR_STRINGS[pdfs_ts->represents],
pdfs_ts->ts_sec, pdfs_ts->ts_usec);
LEAVE();
return ret;
}
/********************************************************
Global functions
********************************************************/
/**
* @brief Return whether the device has activated master radar detection.
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if master radar detection is enabled in firmware
* - MFALSE otherwise
*/
t_bool
wlan_11h_is_master_radar_det_active(mlan_private * priv)
{
ENTER();
LEAVE();
return priv->adapter->state_11h.is_master_radar_det_active;
}
/**
* @brief Configure master radar detection.
* Call wlan_11h_check_update_radar_det_state() afterwards
* to push this to firmware.
*
* @param priv Private driver information structure
* @param enable Whether to enable or disable master radar detection
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*
* @sa wlan_11h_check_update_radar_det_state
*/
mlan_status
wlan_11h_config_master_radar_det(mlan_private * priv, t_bool enable)
{
mlan_status ret = MLAN_STATUS_FAILURE;
ENTER();
if (wlan_11h_is_dfs_master(priv) &&
priv->adapter->init_para.dfs_master_radar_det_en) {
priv->adapter->state_11h.master_radar_det_enable_pending =
enable;
ret = MLAN_STATUS_SUCCESS;
}
LEAVE();
return ret;
}
/**
* @brief Configure slave radar detection.
* Call wlan_11h_check_update_radar_det_state() afterwards
* to push this to firmware.
*
* @param priv Private driver information structure
* @param enable Whether to enable or disable slave radar detection
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*
* @sa wlan_11h_check_update_radar_det_state
*/
mlan_status
wlan_11h_config_slave_radar_det(mlan_private * priv, t_bool enable)
{
mlan_status ret = MLAN_STATUS_FAILURE;
ENTER();
if (wlan_11h_is_dfs_slave(priv) &&
priv->adapter->init_para.dfs_slave_radar_det_en) {
priv->adapter->state_11h.slave_radar_det_enable_pending =
enable;
ret = MLAN_STATUS_SUCCESS;
}
LEAVE();
return ret;
}
/**
* @brief Checks all interfaces and determines if radar_detect flag states
* have/should be changed. If so, sends SNMP_MIB 11H command to FW.
* Call this function on any interface enable/disable/channel change.
*
* @param pmpriv Pointer to mlan_private structure
*
* @return MLAN_STATUS_SUCCESS (update or not)
* or MLAN_STATUS_FAILURE (cmd failure)
*
* @sa wlan_11h_check_radar_det_state
*/
mlan_status
wlan_11h_check_update_radar_det_state(mlan_private * pmpriv)
{
t_u32 new_radar_det_state = 0;
t_u32 mib_11h = 0;
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
if (wlan_11h_check_radar_det_state(pmpriv->adapter,
&new_radar_det_state)) {
PRINTM(MCMD_D, "%s: radar_det_state being updated.\n",
__func__);
mib_11h |= new_radar_det_state;
/* keep priv's existing 11h state */
if (pmpriv->intf_state_11h.is_11h_active)
mib_11h |= ENABLE_11H_MASK;
/* Send cmd to FW to enable/disable 11h function in firmware */
ret = wlan_prepare_cmd(pmpriv,
HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET,
Dot11H_i, MNULL, &mib_11h);
if (ret)
ret = MLAN_STATUS_FAILURE;
}
/* updated state sent OR no change, thus no longer pending */
pmpriv->adapter->state_11h.master_radar_det_enable_pending = MFALSE;
pmpriv->adapter->state_11h.slave_radar_det_enable_pending = MFALSE;
LEAVE();
return ret;
}
/**
* @brief Query 11h firmware enabled state.
*
* Return whether the firmware currently has 11h extensions enabled
*
* @param priv Private driver information structure
*
* @return
* - MTRUE if 11h has been activated in the firmware
* - MFALSE otherwise
*
* @sa wlan_11h_activate
*/
t_bool
wlan_11h_is_active(mlan_private * priv)
{
ENTER();
LEAVE();
return priv->intf_state_11h.is_11h_active;
}
/**
* @brief Enable the transmit interface and record the state.
*
* @param priv Private driver information structure
*
* @return N/A
*/
t_void
wlan_11h_tx_enable(mlan_private * priv)
{
ENTER();
if (priv->intf_state_11h.tx_disabled) {
wlan_recv_event(priv, MLAN_EVENT_ID_FW_START_TX, MNULL);
priv->intf_state_11h.tx_disabled = MFALSE;
}
LEAVE();
}
/**
* @brief Disable the transmit interface and record the state.
*
* @param priv Private driver information structure
*
* @return N/A
*/
t_void
wlan_11h_tx_disable(mlan_private * priv)
{
ENTER();
if (!priv->intf_state_11h.tx_disabled) {
priv->intf_state_11h.tx_disabled = MTRUE;
wlan_recv_event(priv, MLAN_EVENT_ID_FW_STOP_TX, MNULL);
}
LEAVE();
}
/**
* @brief Enable or Disable the 11h extensions in the firmware
*
* @param priv Private driver information structure
* @param pioctl_buf A pointer to MLAN IOCTL Request buffer
* @param flag Enable 11h if MTRUE, disable otherwise
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_activate(mlan_private * priv, t_void * pioctl_buf, t_bool flag)
{
t_u32 enable = flag & ENABLE_11H_MASK;
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
/* add bits for master/slave radar detect into enable. */
enable |= wlan_11h_get_current_radar_detect_flags(priv->adapter);
/*
* Send cmd to FW to enable/disable 11h function in firmware
*/
ret = wlan_prepare_cmd(priv,
HostCmd_CMD_802_11_SNMP_MIB,
HostCmd_ACT_GEN_SET,
Dot11H_i, (t_void *) pioctl_buf, &enable);
if (ret)
ret = MLAN_STATUS_FAILURE;
else
/* Set boolean flag in driver 11h state */
priv->intf_state_11h.is_11h_active = flag;
PRINTM(MINFO, "11h: %s\n", flag ? "Activate" : "Deactivate");
LEAVE();
return ret;
}
/**
* @brief Initialize the 11h parameters and enable 11h when starting an IBSS
*
* @param adapter mlan_adapter structure
*
* @return N/A
*/
t_void
wlan_11h_init(mlan_adapter * adapter)
{
wlan_11h_device_state_t *pstate_11h = &adapter->state_11h;
IEEEtypes_Quiet_t *pquiet = &adapter->state_11h.quiet_ie;
wlan_dfs_device_state_t *pstate_dfs = &adapter->state_dfs;
wlan_radar_det_hndlg_state_t *pstate_rdh = &adapter->state_rdh;
#ifdef DFS_TESTING_SUPPORT
wlan_dfs_testing_settings_t *pdfs_test = &adapter->dfs_test_params;
#endif
ENTER();
/* Initialize 11H struct */
pstate_11h->usr_def_power_constraint = WLAN_11H_TPC_POWERCONSTRAINT;
pstate_11h->min_tx_power_capability = WLAN_11H_TPC_POWERCAPABILITY_MIN;
pstate_11h->max_tx_power_capability = WLAN_11H_TPC_POWERCAPABILITY_MAX;
pstate_11h->recvd_chanswann_event = MFALSE;
pstate_11h->master_radar_det_enable_pending = MFALSE;
pstate_11h->slave_radar_det_enable_pending = MFALSE;
pstate_11h->is_master_radar_det_active = MFALSE;
pstate_11h->is_slave_radar_det_active = MFALSE;
/* Initialize quiet_ie */
memset(adapter, pquiet, 0, sizeof(IEEEtypes_Quiet_t));
pquiet->element_id = QUIET;
pquiet->len =
(sizeof(pquiet->quiet_count) + sizeof(pquiet->quiet_period)
+ sizeof(pquiet->quiet_duration)
+ sizeof(pquiet->quiet_offset));
/* Initialize DFS struct */
pstate_dfs->dfs_check_pending = MFALSE;
pstate_dfs->dfs_radar_found = MFALSE;
pstate_dfs->dfs_check_channel = 0;
pstate_dfs->dfs_report_time_sec = 0;
util_init_list((pmlan_linked_list) & pstate_dfs->dfs_ts_head);
/* Initialize RDH struct */
pstate_rdh->stage = RDH_OFF;
pstate_rdh->priv_list_count = 0;
pstate_rdh->priv_curr_idx = 0;
pstate_rdh->curr_channel = 0;
pstate_rdh->new_channel = 0;
pstate_rdh->uap_band_cfg = 0;
pstate_rdh->max_bcn_dtim_ms = 0;
memset(adapter, pstate_rdh->priv_list, 0,
sizeof(pstate_rdh->priv_list));
#ifdef DFS_TESTING_SUPPORT
/* Initialize DFS testing struct */
pdfs_test->user_cac_period_msec = 0;
pdfs_test->user_nop_period_sec = 0;
pdfs_test->no_channel_change_on_radar = MFALSE;
pdfs_test->fixed_new_channel_on_radar = 0;
#endif
LEAVE();
}
/**
* @brief Cleanup for the 11h parameters that allocated memory, etc.
*
* @param adapter mlan_adapter structure
*
* @return N/A
*/
t_void
wlan_11h_cleanup(mlan_adapter * adapter)
{
wlan_dfs_device_state_t *pstate_dfs = &adapter->state_dfs;
wlan_dfs_timestamp_t *pdfs_ts;
ENTER();
/* cleanup dfs_timestamp list */
pdfs_ts = (wlan_dfs_timestamp_t *) util_peek_list(adapter->pmoal_handle,
&pstate_dfs->
dfs_ts_head, MNULL,
MNULL);
while (pdfs_ts) {
util_unlink_list(adapter->pmoal_handle,
&pstate_dfs->dfs_ts_head,
(pmlan_linked_list) pdfs_ts, MNULL, MNULL);
adapter->callbacks.moal_mfree(adapter->pmoal_handle,
(t_u8 *) pdfs_ts);
pdfs_ts =
(wlan_dfs_timestamp_t *) util_peek_list(adapter->
pmoal_handle,
&pstate_dfs->
dfs_ts_head,
MNULL, MNULL);
}
LEAVE();
}
/**
* @brief Initialize the 11h parameters and enable 11h when starting an IBSS
*
* @param pmpriv Pointer to mlan_private structure
*
* @return N/A
*/
t_void
wlan_11h_priv_init(mlan_private * pmpriv)
{
wlan_11h_interface_state_t *pistate_11h = &pmpriv->intf_state_11h;
ENTER();
pistate_11h->is_11h_enabled = MTRUE;
pistate_11h->is_11h_active = MFALSE;
pistate_11h->adhoc_auto_sel_chan = MTRUE;
pistate_11h->tx_disabled = MFALSE;
pistate_11h->dfs_slave_csa_chan = 0;
pistate_11h->dfs_slave_csa_expire_at_sec = 0;
LEAVE();
}
/**
* @brief Retrieve a randomly selected starting channel if needed for 11h
*
* If 11h is enabled and an A-Band channel start band preference
* configured in the driver, the start channel must be random in order
* to meet with
*
* @param priv Private driver information structure
*
* @return Starting channel
*/
t_u8
wlan_11h_get_adhoc_start_channel(mlan_private * priv)
{
t_u8 start_chn;
mlan_adapter *adapter = priv->adapter;
t_u32 region;
t_u32 rand_entry;
region_chan_t *chn_tbl;
t_u8 rand_tries = 0;
ENTER();
/*
* Set start_chn to the Default. Used if 11h is disabled or the band
* does not require 11h support.
*/
start_chn = DEFAULT_AD_HOC_CHANNEL;
/*
* Check that we are looking for a channel in the A Band
*/
if ((adapter->adhoc_start_band & BAND_A)
|| (adapter->adhoc_start_band & BAND_AN)
) {
/*
* Set default to the A Band default. Used if random selection fails
* or if 11h is not enabled
*/
start_chn = DEFAULT_AD_HOC_CHANNEL_A;
/*
* Check that 11h is enabled in the driver
*/
if (wlan_11h_is_enabled(priv)) {
/*
* Search the region_channel tables for a channel table
* that is marked for the A Band.
*/
for (region = 0; (region < MAX_REGION_CHANNEL_NUM);
region++) {
chn_tbl = &adapter->region_channel[region];
/* Check if table is valid and marked for A
Band */
if (chn_tbl->valid
&& chn_tbl->region == adapter->region_code
&& chn_tbl->band & BAND_A) {
/*
* Set the start channel. Get a random number and
* use it to pick an entry in the table between 0
* and the number of channels in the table (NumCFP).
*/
do {
rand_entry =
wlan_11h_get_random_num
(adapter) %
chn_tbl->num_cfp;
start_chn =
(t_u8) chn_tbl->
pcfp[rand_entry].
channel;
} while ((wlan_11h_is_channel_under_nop
(adapter, start_chn) ||
((adapter->state_rdh.stage ==
RDH_GET_INFO_CHANNEL) &&
wlan_11h_radar_detect_required
(priv, start_chn)))
&& (++rand_tries <
MAX_RANDOM_CHANNEL_RETRIES));
}
}
}
}
PRINTM(MINFO, "11h: %s: AdHoc Channel set to %u\n",
wlan_11h_is_enabled(priv) ? "Enabled" : "Disabled", start_chn);
LEAVE();
return start_chn;
}
/**
* @brief Retrieve channel closed for operation by Channel Switch Announcement
*
* After receiving CSA, we must not transmit in any form on the original
* channel for a certain duration. This checks the time, and returns
* the channel if valid.
*
* @param priv Private driver information structure
*
* @return Closed channel, else 0
*/
t_u8
wlan_11h_get_csa_closed_channel(mlan_private * priv)
{
t_u32 sec, usec;
ENTER();
if (!priv->intf_state_11h.dfs_slave_csa_chan) {
LEAVE();
return 0;
}
/* have csa channel, check if expired or not */
priv->adapter->callbacks.moal_get_system_time(priv->adapter->
pmoal_handle, &sec,
&usec);
if (sec > priv->intf_state_11h.dfs_slave_csa_expire_at_sec) {
/* expired: remove channel from blacklist table, and clear vars
*/
wlan_set_chan_blacklist(priv, BAND_A,
priv->intf_state_11h.dfs_slave_csa_chan,
MFALSE);
priv->intf_state_11h.dfs_slave_csa_chan = 0;
priv->intf_state_11h.dfs_slave_csa_expire_at_sec = 0;
}
LEAVE();
return priv->intf_state_11h.dfs_slave_csa_chan;
}
/**
* @brief Check if the current region's regulations require the input channel
* to be scanned for radar.
*
* Based on statically defined requirements for sub-bands per regulatory
* agency requirements.
*
* Used in adhoc start to determine if channel availability check is required
*
* @param priv Private driver information structure
* @param channel Channel to determine radar detection requirements
*
* @return
* - MTRUE if radar detection is required
* - MFALSE otherwise
*/
/** @sa wlan_11h_issue_radar_detect
*/
t_bool
wlan_11h_radar_detect_required(mlan_private * priv, t_u8 channel)
{
t_bool required = MFALSE;
ENTER();
/*
* No checks for 11h or measurement code being enabled is placed here
* since regulatory requirements exist whether we support them or not.
*/
required = wlan_get_cfp_radar_detect(priv, channel);
if (!priv->adapter->region_code)
PRINTM(MINFO, "11h: Radar detection in CFP code[BG:%#x, A:%#x] "
"is %srequired for channel %d\n",
priv->adapter->cfp_code_bg, priv->adapter->cfp_code_a,
(required ? "" : "not "), channel);
else
PRINTM(MINFO, "11h: Radar detection in region %#02x "
"is %srequired for channel %d\n",
priv->adapter->region_code, (required ? "" : "not "),
channel);
if (required == MTRUE && priv->media_connected == MTRUE
&& priv->curr_bss_params.bss_descriptor.channel == channel) {
required = MFALSE;
PRINTM(MINFO, "11h: Radar detection not required. "
"Already operating on the channel\n");
}
LEAVE();
return required;
}
/**
* @brief Perform a radar measurement if required on given channel
*
* Check to see if the provided channel requires a channel availability
* check (60 second radar detection measurement). If required, perform
* measurement, stalling calling thread until the measurement completes
* and then report result.
*
* Used when starting an adhoc or AP network.
*
* @param priv Private driver information structure
* @param pioctl_req Pointer to IOCTL request buffer
* @param channel Channel on which to perform radar measurement
*
* @return
* - MTRUE if radar measurement request was successfully issued
* - MFALSE if radar detection is not required
* - < 0 for error during radar detection (if performed)
*
* @sa wlan_11h_radar_detect_required
*/
t_s32
wlan_11h_issue_radar_detect(mlan_private * priv,
pmlan_ioctl_req pioctl_req, t_u8 channel)
{
t_s32 ret;
HostCmd_DS_CHAN_RPT_REQ chan_rpt_req;
ENTER();
ret = wlan_11h_radar_detect_required(priv, channel);
if (ret) {
/* Prepare and issue CMD_CHAN_RPT_REQ. */
memset(priv->adapter, &chan_rpt_req, 0x00,
sizeof(chan_rpt_req));
chan_rpt_req.chan_desc.startFreq = START_FREQ_11A_BAND;
chan_rpt_req.chan_desc.chanWidth = 0; /* 1 for 40Mhz */
chan_rpt_req.chan_desc.chanNum = channel;
chan_rpt_req.millisec_dwell_time =
WLAN_11H_CHANNEL_AVAIL_CHECK_DURATION;
#ifdef DFS_TESTING_SUPPORT
if (priv->adapter->dfs_test_params.user_cac_period_msec) {
PRINTM(MCMD_D,
"dfs_testing - user CAC period=%d (msec)\n",
priv->adapter->dfs_test_params.
user_cac_period_msec);
chan_rpt_req.millisec_dwell_time =
priv->adapter->dfs_test_params.
user_cac_period_msec;
}
#endif
PRINTM(MMSG, "11h: issuing DFS Radar check for channel=%d."
" Please wait for response...\n", channel);
ret = wlan_prepare_cmd(priv, HostCmd_CMD_CHAN_REPORT_REQUEST,
HostCmd_ACT_GEN_SET, 0,
(t_void *) pioctl_req,
(t_void *) & chan_rpt_req);
}
LEAVE();
return ret;
}
/**
* @brief Checks if a radar measurement was performed on channel,
* and if so, whether radar was detected on it.
*
* Used when starting an adhoc network.
*
* @param priv Private driver information structure
* @param chan Channel to check upon
*
* @return
* - MLAN_STATUS_SUCCESS if no radar on channel
* - MLAN_STATUS_FAILURE if radar was found on channel
* - (TBD??) MLAN_STATUS_PENDING if radar report NEEDS TO BE REISSUED
*
* @sa wlan_11h_issue_radar_detect
* @sa wlan_11h_process_start
*/
mlan_status
wlan_11h_check_chan_report(mlan_private * priv, t_u8 chan)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
wlan_dfs_device_state_t *pstate_dfs = &priv->adapter->state_dfs;
t_u32 sec, usec;
ENTER();
/* check report we hold is valid or not */
priv->adapter->callbacks.moal_get_system_time(priv->adapter->
pmoal_handle, &sec,
&usec);
PRINTM(MINFO, "11h: %s()\n", __func__);
PRINTM(MINFO, "- sec_now=%d, sec_report=%d.\n",
sec, pstate_dfs->dfs_report_time_sec);
PRINTM(MINFO, "- rpt_channel=%d, rpt_radar=%d.\n",
pstate_dfs->dfs_check_channel, pstate_dfs->dfs_radar_found);
if ((!pstate_dfs->dfs_check_pending) &&
(chan == pstate_dfs->dfs_check_channel) &&
((sec - pstate_dfs->dfs_report_time_sec) <
MAX_DFS_REPORT_USABLE_AGE_SEC)) {
/* valid and not out-dated, check if radar */
if (pstate_dfs->dfs_radar_found) {
PRINTM(MMSG, "Radar was detected on channel %d.\n",
chan);
ret = MLAN_STATUS_FAILURE;
}
} else {
/* TODO: reissue report request if not pending. BUT HOW to
make the code wait for it??? For now, just fail since we
don't have the info. */
ret = MLAN_STATUS_PENDING;
}
LEAVE();
return ret;
}
/**
* @brief Process an TLV buffer for a pending BSS Adhoc start command.
*
* Activate 11h functionality in the firmware if driver has is enabled
* for 11h (configured by the application via IOCTL).
*
* @param priv Private driver information structure
* @param ppbuffer Output parameter: Pointer to the TLV output buffer,
* modified on return to point after the appended 11h TLVs
* @param pcap_info Pointer to the capability info for the BSS to join
* @param channel Channel on which we are starting the IBSS
* @param p11h_bss_info Input/Output parameter: Pointer to the 11h BSS
* information for this network that we are establishing.
* 11h sensed flag set on output if warranted.
*
* @return
* - MLAN_STATUS_SUCCESS if 11h is disabled
* - Integer number of bytes appended to the TLV output buffer (ppbuffer)
* - < 0 for error (e.g. radar detected on channel)
*/
t_s32
wlan_11h_process_start(mlan_private * priv,
t_u8 ** ppbuffer,
IEEEtypes_CapInfo_t * pcap_info,
t_u32 channel, wlan_11h_bss_info_t * p11h_bss_info)
{
mlan_adapter *adapter = priv->adapter;
t_s32 ret = MLAN_STATUS_SUCCESS;
t_bool is_dfs_chan = MFALSE;
ENTER();
if (wlan_11h_is_enabled(priv)
&& ((adapter->adhoc_start_band & BAND_A)
|| (adapter->adhoc_start_band & BAND_AN)
)
) {
if (!wlan_11d_is_enabled(priv)) {
/* No use having 11h enabled without 11d enabled */
wlan_11d_enable(priv, MNULL, ENABLE_11D);
#ifdef STA_SUPPORT
wlan_11d_create_dnld_countryinfo(priv,
adapter->
adhoc_start_band);
#endif
}
/* Activate 11h functions in firmware, turns on capability bit */
wlan_11h_activate(priv, MNULL, MTRUE);
pcap_info->spectrum_mgmt = MTRUE;
/* If using a DFS channel, enable radar detection. */
is_dfs_chan = wlan_11h_radar_detect_required(priv, channel);
if (is_dfs_chan) {
if (!wlan_11h_is_master_radar_det_active(priv))
wlan_11h_config_master_radar_det(priv, MTRUE);
}
wlan_11h_check_update_radar_det_state(priv);
/* Set flag indicating this BSS we are starting is using 11h */
p11h_bss_info->sensed_11h = MTRUE;
if (is_dfs_chan) {
/* check if this channel is under NOP */
if (wlan_11h_is_channel_under_nop(adapter, channel))
ret = MLAN_STATUS_FAILURE;
/* check last channel report, if this channel is free
of radar */
if (ret == MLAN_STATUS_SUCCESS)
ret = wlan_11h_check_chan_report(priv, channel);
}
if (ret == MLAN_STATUS_SUCCESS)
ret = wlan_11h_process_adhoc(priv, ppbuffer, channel,
MNULL);
else
ret = MLAN_STATUS_FAILURE;
} else {
/* Deactivate 11h functions in the firmware */
wlan_11h_activate(priv, MNULL, MFALSE);
pcap_info->spectrum_mgmt = MFALSE;
wlan_11h_check_update_radar_det_state(priv);
}
LEAVE();
return ret;
}
/**
* @brief Process an TLV buffer for a pending BSS Join command for
* both adhoc and infra networks
*
* The TLV command processing for a BSS join for either adhoc or
* infrastructure network is performed with this function. The
* capability bits are inspected for the IBSS flag and the appropriate
* local routines are called to setup the necessary TLVs.
*
* Activate 11h functionality in the firmware if the spectrum management
* capability bit is found in the network information for the BSS we are
* joining.
*
* @param priv Private driver information structure
* @param ppbuffer Output parameter: Pointer to the TLV output buffer,
* modified on return to point after the appended 11h TLVs
* @param pcap_info Pointer to the capability info for the BSS to join
* @param band Band on which we are joining the BSS
* @param channel Channel on which we are joining the BSS
* @param p11h_bss_info Pointer to the 11h BSS information for this
* network that was parsed out of the scan response.
*
* @return Integer number of bytes appended to the TLV output
* buffer (ppbuffer), MLAN_STATUS_FAILURE (-1),
* or MLAN_STATUS_SUCCESS (0)
*/
t_s32
wlan_11h_process_join(mlan_private * priv,
t_u8 ** ppbuffer,
IEEEtypes_CapInfo_t * pcap_info,
t_u8 band,
t_u32 channel, wlan_11h_bss_info_t * p11h_bss_info)
{
t_s32 ret = 0;
ENTER();
if (priv->media_connected == MTRUE) {
if (wlan_11h_is_active(priv) == p11h_bss_info->sensed_11h) {
/* Assume DFS parameters are the same for roaming as
long as the current & next APs have the same
spectrum mgmt capability bit setting */
ret = MLAN_STATUS_SUCCESS;
} else {
/* No support for roaming between DFS/non-DFS yet */
ret = MLAN_STATUS_FAILURE;
}
LEAVE();
return ret;
}
if (p11h_bss_info->sensed_11h) {
if (!wlan_11d_is_enabled(priv)) {
/* No use having 11h enabled without 11d enabled */
wlan_11d_enable(priv, MNULL, ENABLE_11D);
#ifdef STA_SUPPORT
wlan_11d_parse_dnld_countryinfo(priv,
priv->
pattempted_bss_desc);
#endif
}
/* Activate 11h functions in firmware, turns on capability bit */
wlan_11h_activate(priv, MNULL, MTRUE);
pcap_info->spectrum_mgmt = MTRUE;
/* If using a DFS channel, enable radar detection. */
if ((band & BAND_A) &&
wlan_11h_radar_detect_required(priv, channel)) {
if (!wlan_11h_is_slave_radar_det_active(priv))
wlan_11h_config_slave_radar_det(priv, MTRUE);
}
wlan_11h_check_update_radar_det_state(priv);
if (pcap_info->ibss) {
PRINTM(MINFO, "11h: Adhoc join: Sensed\n");
ret = wlan_11h_process_adhoc(priv, ppbuffer, channel,
p11h_bss_info);
} else {
PRINTM(MINFO, "11h: Infra join: Sensed\n");
ret = wlan_11h_process_infra_join(priv, ppbuffer, band,
channel,
p11h_bss_info);
}
} else {
/* Deactivate 11h functions in the firmware */
wlan_11h_activate(priv, MNULL, MFALSE);
pcap_info->spectrum_mgmt = MFALSE;
wlan_11h_check_update_radar_det_state(priv);
}
LEAVE();
return ret;
}
/**
*
* @brief Prepare the HostCmd_DS_Command structure for an 11h command.
*
* Use the Command field to determine if the command being set up is for
* 11h and call one of the local command handlers accordingly for:
*
* - HostCmd_CMD_802_11_TPC_ADAPT_REQ
* - HostCmd_CMD_802_11_TPC_INFO
* - HostCmd_CMD_802_11_CHAN_SW_ANN
*/
/** - HostCmd_CMD_CHAN_REPORT_REQUEST
*/
/**
* @param priv Private driver information structure
* @param pcmd_ptr Output parameter: Pointer to the command being prepared
* for the firmware
* @param pinfo_buf Void buffer pass through with data necessary for a
* specific command type
*/
/** @return MLAN_STATUS_SUCCESS, MLAN_STATUS_FAILURE or MLAN_STATUS_PENDING
*/
/** @sa wlan_11h_cmd_tpc_request
* @sa wlan_11h_cmd_tpc_info
* @sa wlan_11h_cmd_chan_sw_ann
*/
/** @sa wlan_11h_cmd_chan_report_req
*/
mlan_status
wlan_11h_cmd_process(mlan_private * priv,
HostCmd_DS_COMMAND * pcmd_ptr, const t_void * pinfo_buf)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
switch (pcmd_ptr->command) {
case HostCmd_CMD_802_11_TPC_ADAPT_REQ:
ret = wlan_11h_cmd_tpc_request(priv, pcmd_ptr, pinfo_buf);
break;
case HostCmd_CMD_802_11_TPC_INFO:
ret = wlan_11h_cmd_tpc_info(priv, pcmd_ptr, pinfo_buf);
break;
case HostCmd_CMD_802_11_CHAN_SW_ANN:
ret = wlan_11h_cmd_chan_sw_ann(priv, pcmd_ptr, pinfo_buf);
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
ret = wlan_11h_cmd_chan_rpt_req(priv, pcmd_ptr, pinfo_buf);
break;
default:
ret = MLAN_STATUS_FAILURE;
}
pcmd_ptr->command = wlan_cpu_to_le16(pcmd_ptr->command);
pcmd_ptr->size = wlan_cpu_to_le16(pcmd_ptr->size);
LEAVE();
return ret;
}
/**
* @brief Handle the command response from the firmware if from an 11h command
*
* Use the Command field to determine if the command response being
* is for 11h. Call the local command response handler accordingly for:
*
* - HostCmd_CMD_802_11_TPC_ADAPT_REQ
* - HostCmd_CMD_802_11_TPC_INFO
* - HostCmd_CMD_802_11_CHAN_SW_ANN
*/
/** - HostCmd_CMD_CHAN_REPORT_REQUEST
*/
/**
* @param priv Private driver information structure
* @param resp HostCmd_DS_COMMAND struct returned from the firmware
* command
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_cmdresp_process(mlan_private * priv, const HostCmd_DS_COMMAND * resp)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
ENTER();
switch (resp->command) {
case HostCmd_CMD_802_11_TPC_ADAPT_REQ:
HEXDUMP("11h: TPC REQUEST Rsp:", (t_u8 *) resp,
(t_u32) resp->size);
memcpy(priv->adapter, priv->adapter->curr_cmd->pdata_buf,
&resp->params.tpc_req,
sizeof(HostCmd_DS_802_11_TPC_ADAPT_REQ));
break;
case HostCmd_CMD_802_11_TPC_INFO:
HEXDUMP("11h: TPC INFO Rsp Data:", (t_u8 *) resp,
(t_u32) resp->size);
break;
case HostCmd_CMD_802_11_CHAN_SW_ANN:
PRINTM(MINFO, "11h: Ret ChSwAnn: Sz=%u, Seq=%u, Ret=%u\n",
resp->size, resp->seq_num, resp->result);
break;
case HostCmd_CMD_CHAN_REPORT_REQUEST:
PRINTM(MINFO,
"11h: Ret ChanRptReq. Set dfs_check_pending and wait"
" for EVENT_CHANNEL_REPORT.\n");
priv->adapter->state_dfs.dfs_check_pending = MTRUE;
break;
default:
ret = MLAN_STATUS_FAILURE;
}
LEAVE();
return ret;
}
/**
* @brief Process an element from a scan response, copy relevant info for 11h
*
* @param pmadapter Pointer to mlan_adapter
* @param p11h_bss_info Output parameter: Pointer to the 11h BSS information
* for the network that is being processed
* @param pelement Pointer to the current IE we are inspecting for 11h
* relevance
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_process_bss_elem(mlan_adapter * pmadapter,
wlan_11h_bss_info_t * p11h_bss_info,
const t_u8 * pelement)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
t_u8 element_len = *((t_u8 *) pelement + 1);
ENTER();
switch (*pelement) {
case POWER_CONSTRAINT:
PRINTM(MINFO, "11h: Power Constraint IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->power_constraint, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_PowerConstraint_t)));
p11h_bss_info->power_constraint.len =
MIN(element_len, (sizeof(IEEEtypes_PowerConstraint_t)
- sizeof(IEEEtypes_Header_t)));
break;
case POWER_CAPABILITY:
PRINTM(MINFO, "11h: Power Capability IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->power_capability, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_PowerCapability_t)));
p11h_bss_info->power_capability.len =
MIN(element_len, (sizeof(IEEEtypes_PowerCapability_t)
- sizeof(IEEEtypes_Header_t)));
break;
case TPC_REPORT:
PRINTM(MINFO, "11h: Tpc Report IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->tpc_report, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_TPCReport_t)));
p11h_bss_info->tpc_report.len =
MIN(element_len, (sizeof(IEEEtypes_TPCReport_t)
- sizeof(IEEEtypes_Header_t)));
break;
case CHANNEL_SWITCH_ANN:
PRINTM(MINFO, "11h: Channel Switch Ann IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->chan_switch_ann, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_ChanSwitchAnn_t)));
p11h_bss_info->chan_switch_ann.len =
MIN(element_len, (sizeof(IEEEtypes_ChanSwitchAnn_t)
- sizeof(IEEEtypes_Header_t)));
break;
case QUIET:
PRINTM(MINFO, "11h: Quiet IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->quiet, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_Quiet_t)));
p11h_bss_info->quiet.len =
MIN(element_len, (sizeof(IEEEtypes_Quiet_t)
- sizeof(IEEEtypes_Header_t)));
break;
case IBSS_DFS:
PRINTM(MINFO, "11h: Ibss Dfs IE Found\n");
p11h_bss_info->sensed_11h = MTRUE;
memcpy(pmadapter, &p11h_bss_info->ibss_dfs, pelement,
MIN((element_len + sizeof(IEEEtypes_Header_t)),
sizeof(IEEEtypes_IBSS_DFS_t)));
p11h_bss_info->ibss_dfs.len =
MIN(element_len, (sizeof(IEEEtypes_IBSS_DFS_t)
- sizeof(IEEEtypes_Header_t)));
break;
case SUPPORTED_CHANNELS:
case TPC_REQUEST:
/*
* These elements are not in beacons/probe responses. Included here
* to cover set of enumerated 11h elements.
*/
break;
default:
ret = MLAN_STATUS_FAILURE;
}
LEAVE();
return ret;
}
/**
* @brief Driver handling for CHANNEL_SWITCH_ANN event
*
* @param priv Pointer to mlan_private
*
* @return MLAN_STATUS_SUCCESS, MLAN_STATUS_FAILURE or MLAN_STATUS_PENDING
*/
mlan_status
wlan_11h_handle_event_chanswann(mlan_private * priv)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
t_u32 sec, usec;
ENTER();
priv->adapter->state_11h.recvd_chanswann_event = MTRUE;
/* unlikely: clean up previous csa if still on-going */
if (priv->intf_state_11h.dfs_slave_csa_chan) {
wlan_set_chan_blacklist(priv, BAND_A,
priv->intf_state_11h.dfs_slave_csa_chan,
MFALSE);
}
/* record channel and time of occurence */
priv->intf_state_11h.dfs_slave_csa_chan =
priv->curr_bss_params.bss_descriptor.channel;
priv->adapter->callbacks.moal_get_system_time(priv->adapter->
pmoal_handle, &sec,
&usec);
priv->intf_state_11h.dfs_slave_csa_expire_at_sec =
sec + DFS_CHAN_MOVE_TIME;
#ifdef STA_SUPPORT
/* do directed deauth. recvd_chanswann_event flag will cause different
reason code */
PRINTM(MINFO, "11h: handle_event_chanswann() - sending deauth\n");
ret = wlan_disconnect(priv, MNULL,
&priv->curr_bss_params.bss_descriptor.
mac_address);
/* clear region table so next scan will be all passive */
PRINTM(MINFO, "11h: handle_event_chanswann() - clear region table\n");
wlan_11d_clear_parsedtable(priv);
/* add channel to blacklist table */
PRINTM(MINFO,
"11h: handle_event_chanswann() - scan blacklist csa channel\n");
wlan_set_chan_blacklist(priv, BAND_A,
priv->intf_state_11h.dfs_slave_csa_chan, MTRUE);
#endif
priv->adapter->state_11h.recvd_chanswann_event = MFALSE;
LEAVE();
return ret;
}
#ifdef DFS_TESTING_SUPPORT
/**
* @brief 802.11h DFS Testing configuration
*
* @param pmadapter Pointer to mlan_adapter
* @param pioctl_req Pointer to mlan_ioctl_req
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_ioctl_dfs_testing(pmlan_adapter pmadapter, pmlan_ioctl_req pioctl_req)
{
mlan_ds_11h_cfg *ds_11hcfg = MNULL;
mlan_ds_11h_dfs_testing *dfs_test = MNULL;
wlan_dfs_testing_settings_t *pdfs_test_params = MNULL;
ENTER();
ds_11hcfg = (mlan_ds_11h_cfg *) pioctl_req->pbuf;
dfs_test = &ds_11hcfg->param.dfs_testing;
pdfs_test_params = &pmadapter->dfs_test_params;
if (pioctl_req->action == MLAN_ACT_GET) {
dfs_test->usr_cac_period_msec =
pdfs_test_params->user_cac_period_msec;
dfs_test->usr_nop_period_sec =
pdfs_test_params->user_nop_period_sec;
dfs_test->usr_no_chan_change =
pdfs_test_params->no_channel_change_on_radar;
dfs_test->usr_fixed_new_chan =
pdfs_test_params->fixed_new_channel_on_radar;
} else {
pdfs_test_params->user_cac_period_msec =
dfs_test->usr_cac_period_msec;
pdfs_test_params->user_nop_period_sec =
dfs_test->usr_nop_period_sec;
pdfs_test_params->no_channel_change_on_radar =
dfs_test->usr_no_chan_change;
pdfs_test_params->fixed_new_channel_on_radar =
dfs_test->usr_fixed_new_chan;
}
LEAVE();
return MLAN_STATUS_SUCCESS;
}
#endif /* DFS_TESTING_SUPPORT */
/**
* @brief Check if channel is under NOP (Non-Occupancy Period)
* If so, the channel should not be used until the period expires.
*
* @param pmadapter Pointer to mlan_adapter
* @param channel Channel number
*
* @return MTRUE or MFALSE
*/
t_bool
wlan_11h_is_channel_under_nop(mlan_adapter * pmadapter, t_u8 channel)
{
wlan_dfs_timestamp_t *pdfs_ts = MNULL;
t_u32 now_sec, now_usec;
t_bool ret = MFALSE;
ENTER();
pdfs_ts = wlan_11h_find_dfs_timestamp(pmadapter, channel);
if (pdfs_ts && (pdfs_ts->channel == channel)
&& (pdfs_ts->represents == DFS_TS_REPR_NOP_START)) {
/* found NOP_start timestamp entry on channel */
pmadapter->callbacks.moal_get_system_time(pmadapter->
pmoal_handle,
&now_sec, &now_usec);
#ifdef DFS_TESTING_SUPPORT
if (pmadapter->dfs_test_params.user_nop_period_sec) {
PRINTM(MCMD_D,
"dfs_testing - user NOP period=%d (sec)\n",
pmadapter->dfs_test_params.user_nop_period_sec);
if ((now_sec - pdfs_ts->ts_sec) <=
pmadapter->dfs_test_params.user_nop_period_sec) {
ret = MTRUE;
}
} else
#endif
{
if ((now_sec - pdfs_ts->ts_sec) <=
WLAN_11H_NON_OCCUPANCY_PERIOD)
ret = MTRUE;
}
/* if entry is expired, remove it */
if (!ret)
wlan_11h_remove_dfs_timestamp(pmadapter, pdfs_ts);
else
PRINTM(MMSG,
"11h: channel %d is under NOP - can't use.\n",
channel);
}
LEAVE();
return ret;
}
/**
* @brief Driver handling for CHANNEL_REPORT_RDY event
* This event will have the channel report data appended.
*
* @param priv Pointer to mlan_private
* @param pevent Pointer to mlan_event
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_handle_event_chanrpt_ready(mlan_private * priv, mlan_event * pevent)
{
mlan_status ret = MLAN_STATUS_SUCCESS;
HostCmd_DS_CHAN_RPT_RSP *pchan_rpt_rsp;
MrvlIEtypes_Data_t *ptlv;
MeasRptBasicMap_t *pmeas_rpt_basic;
t_u8 *pbuffer;
t_s32 evt_len;
t_u16 tlv_len;
t_u32 sec, usec;
wlan_dfs_device_state_t *pstate_dfs = &priv->adapter->state_dfs;
ENTER();
pchan_rpt_rsp = (HostCmd_DS_CHAN_RPT_RSP *) & pevent->event_buf;
DBG_HEXDUMP(MCMD_D, "11h: Event ChanRptReady (HostCmd_DS_CHAN_RPT_RSP)",
(t_u8 *) pchan_rpt_rsp,
wlan_le32_to_cpu(pevent->event_len));
if (wlan_le32_to_cpu(pchan_rpt_rsp->cmd_result) ==
MLAN_CMD_RESULT_SUCCESS) {
pbuffer = (t_u8 *) & pchan_rpt_rsp->tlv_buffer;
evt_len = wlan_le32_to_cpu(pevent->event_len);
evt_len -=
sizeof(HostCmd_DS_CHAN_RPT_RSP) -
sizeof(pchan_rpt_rsp->tlv_buffer);
while (evt_len >= sizeof(MrvlIEtypesHeader_t)) {
ptlv = (MrvlIEtypes_Data_t *) pbuffer;
tlv_len = wlan_le16_to_cpu(ptlv->header.len);
switch (wlan_le16_to_cpu(ptlv->header.type)) {
case TLV_TYPE_CHANRPT_11H_BASIC:
pmeas_rpt_basic =
(MeasRptBasicMap_t *) & ptlv->data;
if (pmeas_rpt_basic->radar) {
pstate_dfs->dfs_radar_found = MTRUE;
PRINTM(MMSG,
"RADAR Detected on channel %d!\n",
pstate_dfs->dfs_check_channel);
/* add channel to NOP list */
wlan_11h_add_dfs_timestamp(priv->
adapter,
DFS_TS_REPR_NOP_START,
pstate_dfs->
dfs_check_channel);
}
break;
default:
break;
}
pbuffer += (tlv_len + sizeof(ptlv->header));
evt_len -= (tlv_len + sizeof(ptlv->header));
evt_len = (evt_len > 0) ? evt_len : 0;
}
} else {
ret = MLAN_STATUS_FAILURE;
}
/* Update DFS structure. */
priv->adapter->callbacks.moal_get_system_time(priv->adapter->
pmoal_handle, &sec,
&usec);
pstate_dfs->dfs_report_time_sec = sec;
pstate_dfs->dfs_check_pending = MFALSE;
LEAVE();
return ret;
}
/**
* @brief Check if RADAR_DETECTED handling is blocking data tx
*
* @param pmadapter Pointer to mlan_adapter
*
* @return MTRUE or MFALSE
*/
t_bool
wlan_11h_radar_detected_tx_blocked(mlan_adapter * pmadapter)
{
switch (pmadapter->state_rdh.stage) {
case RDH_OFF:
case RDH_CHK_INTFS:
case RDH_STOP_TRAFFIC:
return MFALSE;
}
return MTRUE;
}
/**
* @brief Callback for RADAR_DETECTED event driver handling
*
* @param priv Void pointer to mlan_private
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
*/
mlan_status
wlan_11h_radar_detected_callback(t_void * priv)
{
mlan_status ret;
ENTER();
ret = wlan_11h_radar_detected_handling(((mlan_private *) (priv))->
adapter);
LEAVE();
return ret;
}
/**
* @brief Driver handling for RADAR_DETECTED event
*
* @param pmadapter Pointer to mlan_adapter
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE or MLAN_STATUS_PENDING
*/
mlan_status
wlan_11h_radar_detected_handling(mlan_adapter * pmadapter)
{
#ifdef DEBUG_LEVEL1
const char *rdh_stage_str[] = {
"RDH_OFF",
"RDH_CHK_INTFS",
"RDH_STOP_TRAFFIC",
"RDH_GET_INFO_CHANNEL",
"RDH_GET_INFO_BEACON_DTIM",
"RDH_SET_CUSTOM_IE",
"RDH_REM_CUSTOM_IE",
"RDH_STOP_INTFS",
"RDH_SET_NEW_CHANNEL",
"RDH_RESTART_INTFS",
"RDH_RESTART_TRAFFIC"
};
#endif
mlan_status ret = MLAN_STATUS_SUCCESS;
mlan_private *pmpriv = MNULL;
t_u32 i;
wlan_radar_det_hndlg_state_t *pstate_rdh = &pmadapter->state_rdh;
ENTER();
switch (pstate_rdh->stage) {
case RDH_CHK_INTFS:
PRINTM(MCMD_D, "%s(): stage(%d)=%s\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage]);
/* get active interfaces */
memset(pmadapter, pstate_rdh->priv_list, 0x00,
sizeof(pstate_rdh->priv_list));
pstate_rdh->priv_list_count = wlan_get_privs_by_cond(pmadapter,
wlan_is_intf_active,
pstate_rdh->
priv_list);
PRINTM(MCMD_D, "%s(): priv_list_count = %d\n", __func__,
pstate_rdh->priv_list_count);
for (i = 0; i < pstate_rdh->priv_list_count; i++)
PRINTM(MINFO, "%s(): priv_list[%d] = %p\n",
__func__, i, pstate_rdh->priv_list[i]);
if (pstate_rdh->priv_list_count == 0) {
/* no interfaces active... nothing to do */
PRINTM(MMSG, "11h: Radar Detected - no active priv's,"
" skip event handling.\n");
pstate_rdh->stage = RDH_OFF;
PRINTM(MCMD_D, "%s(): finished - stage(%d)=%s\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage]);
break; /* EXIT CASE */
}
/* else: start handling */
pstate_rdh->curr_channel = 0;
pstate_rdh->new_channel = 0;
pstate_rdh->uap_band_cfg = 0;
pstate_rdh->max_bcn_dtim_ms = 0;
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_STOP_TRAFFIC;
/* FALL THROUGH TO NEXT STAGE */
case RDH_STOP_TRAFFIC:
PRINTM(MCMD_D, "%s(): stage(%d)=%s\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage]);
PRINTM(MMSG,
"11h: Radar Detected - stopping host tx traffic.\n");
for (i = 0; i < pstate_rdh->priv_list_count; i++)
wlan_11h_tx_disable(pstate_rdh->priv_list[i]);
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_GET_INFO_CHANNEL;
/* FALL THROUGH TO NEXT STAGE */
case RDH_GET_INFO_CHANNEL:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* here, prefer STA info over UAP info - one less CMD to send */
if (pstate_rdh->priv_curr_idx == RDH_STAGE_FIRST_ENTRY_PRIV_IDX) {
if (wlan_only_uap_priv_in_list(pstate_rdh->priv_list,
pstate_rdh->
priv_list_count)) {
#ifdef UAP_SUPPORT
/* Assume all UAPs on same channel, use first
UAP */
pmpriv = pstate_rdh->priv_list[0];
pstate_rdh->priv_curr_idx = 0;
/* send cmd to get first UAP's info */
pmpriv->uap_state_chan_cb.pioctl_req_curr =
MNULL;
pmpriv->uap_state_chan_cb.get_chan_callback =
wlan_11h_radar_detected_callback;
ret = wlan_uap_get_channel(pmpriv);
break; /* EXIT CASE */
#endif
} else {
/* Assume all STAs on same channel, find first
STA */
MASSERT(pstate_rdh->priv_list_count > 0);
for (i = 0; i < pstate_rdh->priv_list_count;
i++) {
pmpriv = pstate_rdh->priv_list[i];
if (GET_BSS_ROLE(pmpriv) ==
MLAN_BSS_ROLE_STA)
break;
}
/* STA info kept in driver, just copy */
pstate_rdh->curr_channel =
pmpriv->curr_bss_params.bss_descriptor.
channel;
}
}
#ifdef UAP_SUPPORT
else if (pstate_rdh->priv_curr_idx <
pstate_rdh->priv_list_count) {
/* repeat entry: UAP return with info */
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
pstate_rdh->curr_channel =
pmpriv->uap_state_chan_cb.channel;
pstate_rdh->uap_band_cfg =
pmpriv->uap_state_chan_cb.band_config;
PRINTM(MCMD_D, "%s(): uap_band_cfg=0x%02x\n", __func__,
pstate_rdh->uap_band_cfg);
}
#endif
/* add channel to NOP list */
wlan_11h_add_dfs_timestamp(pmadapter, DFS_TS_REPR_NOP_START,
pstate_rdh->curr_channel);
/* choose new channel (!= curr channel) and move on */
i = 0;
do {
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP)
pstate_rdh->new_channel =
wlan_11h_get_uap_start_channel(pmpriv,
pmpriv->
uap_state_chan_cb.
band_config);
else
#endif
pstate_rdh->new_channel =
wlan_11h_get_adhoc_start_channel
(pmpriv);
} while ((pstate_rdh->new_channel == pstate_rdh->curr_channel) && (++i < MAX_RANDOM_CHANNEL_RETRIES)); /* avoid
deadloop
*/
if (i >= MAX_RANDOM_CHANNEL_RETRIES) /* report error */
PRINTM(MERROR,
"%s(): ERROR - could not choose new_chan"
" (!= curr_chan) !!\n", __func__);
#ifdef DFS_TESTING_SUPPORT
if (pmadapter->dfs_test_params.fixed_new_channel_on_radar) {
PRINTM(MCMD_D, "dfs_testing - user fixed new_chan=%d\n",
pmadapter->dfs_test_params.
fixed_new_channel_on_radar);
pstate_rdh->new_channel =
pmadapter->dfs_test_params.
fixed_new_channel_on_radar;
}
#endif
PRINTM(MCMD_D, "%s(): curr_chan=%d, new_chan=%d\n",
__func__, pstate_rdh->curr_channel,
pstate_rdh->new_channel);
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_GET_INFO_BEACON_DTIM;
/* FALL THROUGH TO NEXT STAGE */
case RDH_GET_INFO_BEACON_DTIM:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
#ifdef UAP_SUPPORT
/* check all intfs in this stage to find longest period */
/* UAP intf callback returning with info */
if (pstate_rdh->priv_curr_idx < pstate_rdh->priv_list_count) {
t_u16 bcn_dtim_msec;
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
PRINTM(MCMD_D, "%s(): uap.bcn_pd=%d, uap.dtim_pd=%d\n",
__func__,
pmpriv->uap_state_chan_cb.beacon_period,
pmpriv->uap_state_chan_cb.dtim_period);
bcn_dtim_msec =
(pmpriv->uap_state_chan_cb.beacon_period *
pmpriv->uap_state_chan_cb.dtim_period);
if (bcn_dtim_msec > pstate_rdh->max_bcn_dtim_ms)
pstate_rdh->max_bcn_dtim_ms = bcn_dtim_msec;
}
#endif
/* check next intf */
while ((++pstate_rdh->priv_curr_idx) <
pstate_rdh->priv_list_count) {
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP) {
pmpriv->uap_state_chan_cb.pioctl_req_curr =
MNULL;
pmpriv->uap_state_chan_cb.get_chan_callback =
wlan_11h_radar_detected_callback;
ret = wlan_uap_get_beacon_dtim(pmpriv);
break; /* leads to exit case */
} else
#endif
{ /* get STA info from driver and compare here */
t_u16 bcn_pd_msec = 100;
t_u16 dtim_pd_msec = 1;
t_u16 bcn_dtim_msec;
if (wlan_11h_is_dfs_master(pmpriv)) { /* adhoc
creator
*/
bcn_pd_msec = pmpriv->beacon_period;
} else {
bcn_pd_msec =
pmpriv->curr_bss_params.
bss_descriptor.beacon_period;
/* if (priv->bss_mode !=
MLAN_BSS_MODE_IBSS) */
/* TODO: mlan_scan.c needs to parse TLV
0x05 (TIM) for dtim_period */
}
PRINTM(MCMD_D,
"%s(): sta.bcn_pd=%d, sta.dtim_pd=%d\n",
__func__, bcn_pd_msec, dtim_pd_msec);
bcn_dtim_msec = (bcn_pd_msec * dtim_pd_msec);
if (bcn_dtim_msec > pstate_rdh->max_bcn_dtim_ms)
pstate_rdh->max_bcn_dtim_ms =
bcn_dtim_msec;
}
}
if (pstate_rdh->priv_curr_idx < pstate_rdh->priv_list_count)
break; /* EXIT CASE (for UAP) */
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_SET_CUSTOM_IE;
/* FALL THROUGH TO NEXT STAGE */
case RDH_SET_CUSTOM_IE:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* add CHAN_SW IE - firmware will accept on any interface, and
apply to all */
if (pstate_rdh->priv_curr_idx == RDH_STAGE_FIRST_ENTRY_PRIV_IDX) {
mlan_ioctl_req *pioctl_req = MNULL;
ret = wlan_11h_prepare_custom_ie_chansw(pmadapter,
&pioctl_req,
MTRUE);
if ((ret != MLAN_STATUS_SUCCESS) || !pioctl_req) {
PRINTM(MERROR,
"%s(): Error in preparing CHAN_SW IE.\n",
__func__);
break; /* EXIT CASE */
}
PRINTM(MMSG,
"11h: Radar Detected - adding CHAN_SW IE to interfaces.\n");
pmpriv = pstate_rdh->priv_list[0];
pstate_rdh->priv_curr_idx = 0;
pioctl_req->bss_index = pmpriv->bss_index;
ret = wlan_misc_ioctl_custom_ie_list(pmadapter,
pioctl_req,
MFALSE);
if (ret != MLAN_STATUS_SUCCESS &&
ret != MLAN_STATUS_PENDING) {
PRINTM(MERROR,
"%s(): Could not set IE for priv=%p [priv_bss_idx=%d]!\n",
__func__, pmpriv, pmpriv->bss_index);
/* TODO: how to handle this error case?? ignore
& continue? */
}
/* free ioctl buffer memory before we leave */
pmadapter->callbacks.moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pioctl_req);
break; /* EXIT CASE */
}
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_REM_CUSTOM_IE;
/* FALL THROUGH TO NEXT STAGE */
case RDH_REM_CUSTOM_IE:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* remove CHAN_SW IE - firmware will accept on any interface,
and apply to all */
if (pstate_rdh->priv_curr_idx == RDH_STAGE_FIRST_ENTRY_PRIV_IDX) {
mlan_ioctl_req *pioctl_req = MNULL;
/* first entry to this stage, do delay DFS requires a
minimum of 5 chances for clients to hear this IE.
Use delay: 5 beacons <= (BCN_DTIM_MSEC*5) <= 3
seconds). */
t_u16 delay_ms = MAX(MIN_RDH_CHAN_SW_IE_PERIOD_MSEC,
MIN((5 *
pstate_rdh->max_bcn_dtim_ms),
MAX_RDH_CHAN_SW_IE_PERIOD_MSEC));
PRINTM(MMSG,
"11h: Radar Detected - delay %d ms for FW to"
" broadcast CHAN_SW IE.\n", delay_ms);
wlan_mdelay(pmadapter, delay_ms);
PRINTM(MMSG,
"11h: Radar Detected - delay over, removing"
" CHAN_SW IE from interfaces.\n");
ret = wlan_11h_prepare_custom_ie_chansw(pmadapter,
&pioctl_req,
MFALSE);
if ((ret != MLAN_STATUS_SUCCESS) || !pioctl_req) {
PRINTM(MERROR,
"%s(): Error in preparing CHAN_SW IE.\n",
__func__);
break; /* EXIT CASE */
}
pmpriv = pstate_rdh->priv_list[0];
pstate_rdh->priv_curr_idx = 0;
pioctl_req->bss_index = pmpriv->bss_index;
ret = wlan_misc_ioctl_custom_ie_list(pmadapter,
pioctl_req,
MFALSE);
if (ret != MLAN_STATUS_SUCCESS &&
ret != MLAN_STATUS_PENDING) {
PRINTM(MERROR,
"%s(): Could not set IE for priv=%p [priv_bss_idx=%d]!\n",
__func__, pmpriv, pmpriv->bss_index);
/* TODO: hiow to handle this error case??
ignore & continue? */
}
/* free ioctl buffer memory before we leave */
pmadapter->callbacks.moal_mfree(pmadapter->pmoal_handle,
(t_u8 *) pioctl_req);
break; /* EXIT CASE */
}
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_STOP_INTFS;
/* FALL THROUGH TO NEXT STAGE */
case RDH_STOP_INTFS:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* issues one cmd (DEAUTH/ADHOC_STOP/BSS_STOP) to each intf */
while ((++pstate_rdh->priv_curr_idx) <
pstate_rdh->priv_list_count) {
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP) {
ret = wlan_prepare_cmd(pmpriv,
HOST_CMD_APCMD_BSS_STOP,
HostCmd_ACT_GEN_SET, 0,
MNULL, MNULL);
break; /* leads to exit case */
}
#endif
#ifdef STA_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_STA) {
if (wlan_11h_is_dfs_master(pmpriv)) {
/* Save ad-hoc creator state before
stop clears it */
pmpriv->adhoc_state_prev =
pmpriv->adhoc_state;
}
if (pmpriv->media_connected == MTRUE) {
wlan_disconnect(pmpriv, MNULL, MNULL);
break; /* leads to exit case */
}
}
#endif
}
if (pstate_rdh->priv_curr_idx < pstate_rdh->priv_list_count ||
ret == MLAN_STATUS_FAILURE)
break; /* EXIT CASE */
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_SET_NEW_CHANNEL;
#ifdef DFS_TESTING_SUPPORT
if (pmadapter->dfs_test_params.no_channel_change_on_radar) {
PRINTM(MCMD_D,
"dfs_testing - no channel change on radar."
" Overwrite new_chan = curr_chan.\n");
pstate_rdh->new_channel = pstate_rdh->curr_channel;
pstate_rdh->priv_curr_idx =
RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_RESTART_INTFS;
goto rdh_restart_intfs; /* skip next stage */
}
#endif
/* FALL THROUGH TO NEXT STAGE */
case RDH_SET_NEW_CHANNEL:
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* only set new channel for UAP intfs */
while ((++pstate_rdh->priv_curr_idx) <
pstate_rdh->priv_list_count) {
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP) {
pmpriv->uap_state_chan_cb.pioctl_req_curr =
MNULL;
pmpriv->uap_state_chan_cb.get_chan_callback =
wlan_11h_radar_detected_callback;
pstate_rdh->uap_band_cfg |= UAP_BAND_CONFIG_5GHZ; /* DFS
only
in
5GHz
*/
ret = wlan_uap_set_channel(pmpriv,
pstate_rdh->
uap_band_cfg,
pstate_rdh->
new_channel);
break; /* leads to exit case */
}
#endif
}
if (pstate_rdh->priv_curr_idx < pstate_rdh->priv_list_count ||
ret == MLAN_STATUS_FAILURE)
break; /* EXIT CASE (for UAP) */
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_RESTART_INTFS;
/* FALL THROUGH TO NEXT STAGE */
case RDH_RESTART_INTFS:
#ifdef DFS_TESTING_SUPPORT
rdh_restart_intfs:
#endif
PRINTM(MCMD_D, "%s(): stage(%d)=%s, priv_idx=%d\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage],
pstate_rdh->priv_curr_idx);
/* can only restart master intfs */
while ((++pstate_rdh->priv_curr_idx) <
pstate_rdh->priv_list_count) {
pmpriv = pstate_rdh->priv_list[pstate_rdh->
priv_curr_idx];
#ifdef UAP_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_UAP) {
if (wlan_11h_radar_detect_required(pmpriv,
pstate_rdh->
new_channel))
{
/* Radar detection is required for this
channel, make sure 11h is activated
in the firmware */
ret = wlan_11h_activate(pmpriv, MNULL,
MTRUE);
ret = wlan_11h_config_master_radar_det
(pmpriv, MTRUE);
ret = wlan_11h_check_update_radar_det_state(pmpriv);
}
ret = wlan_prepare_cmd(pmpriv,
HOST_CMD_APCMD_BSS_START,
HostCmd_ACT_GEN_SET, 0,
MNULL, MNULL);
break; /* leads to exit case */
}
#endif
#ifdef STA_SUPPORT
if (GET_BSS_ROLE(pmpriv) == MLAN_BSS_ROLE_STA) {
/* Check previous state to find former Ad-hoc
creator interface. Set new state to
Starting, so it'll be seen as a DFS master. */
if (pmpriv->adhoc_state_prev == ADHOC_STARTED) {
pmpriv->adhoc_state = ADHOC_STARTING;
pmpriv->adhoc_state_prev = ADHOC_IDLE;
}
if (wlan_11h_is_dfs_master(pmpriv)) {
/* set new adhoc channel here */
pmpriv->adhoc_channel =
pstate_rdh->new_channel;
if (wlan_11h_radar_detect_required
(pmpriv, pstate_rdh->new_channel)) {
/* Radar detection is required
for this channel, make sure
11h is activated in the
firmware */
ret = wlan_11h_activate(pmpriv,
MNULL,
MTRUE);
if (ret)
break;
ret = wlan_11h_config_master_radar_det(pmpriv, MTRUE);
if (ret)
break;
ret = wlan_11h_check_update_radar_det_state(pmpriv);
if (ret)
break;
}
ret = wlan_prepare_cmd(pmpriv,
HostCmd_CMD_802_11_AD_HOC_START,
HostCmd_ACT_GEN_SET,
0, MNULL,
&pmpriv->
adhoc_last_start_ssid);
break; /* leads to exit case */
}
/* NOTE: DON'T reconnect slave STA intfs -
infra/adhoc_joiner Do we want to return to
same AP/network (on radar channel)? If want
to connect back, depend on either: 1.
driver's reassoc thread 2. wpa_supplicant,
or other user-space app */
}
#endif
}
if (pstate_rdh->priv_curr_idx < pstate_rdh->priv_list_count ||
ret == MLAN_STATUS_FAILURE)
break; /* EXIT CASE (for UAP) */
/* else */
pstate_rdh->priv_curr_idx = RDH_STAGE_FIRST_ENTRY_PRIV_IDX;
pstate_rdh->stage = RDH_RESTART_TRAFFIC;
/* FALL THROUGH TO NEXT STAGE */
case RDH_RESTART_TRAFFIC:
PRINTM(MCMD_D, "%s(): stage(%d)=%s\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage]);
/* continue traffic for reactivated interfaces */
PRINTM(MMSG,
"11h: Radar Detected - restarting host tx traffic.\n");
for (i = 0; i < pstate_rdh->priv_list_count; i++)
wlan_11h_tx_enable(pstate_rdh->priv_list[i]);
pstate_rdh->stage = RDH_OFF; /* DONE! */
PRINTM(MCMD_D, "%s(): finished - stage(%d)=%s\n",
__func__, pstate_rdh->stage,
rdh_stage_str[pstate_rdh->stage]);
break;
default:
pstate_rdh->stage = RDH_OFF; /* cancel RDH to unblock Tx
packets */
break;
}
LEAVE();
return ret;
}
/**
* @brief DFS Event Preprocessing.
* Operates directly on pmadapter variables.
*
* 1. EVENT_RADAR_DETECTED comes from firmware without specific
* bss_num/bss_type. Find it an appropriate interface and
* update event_cause field in event_buf.
*
* @param pmadapter Pointer to mlan_adapter
*
* @return MLAN_STATUS_SUCCESS (update successful)
* or MLAN_STATUS_FAILURE (no change)
*/
mlan_status
wlan_11h_dfs_event_preprocessing(mlan_adapter * pmadapter)
{
mlan_status ret = MLAN_STATUS_FAILURE;
mlan_private *pmpriv = MNULL;
mlan_private *priv_list[MLAN_MAX_BSS_NUM];
ENTER();
switch (pmadapter->event_cause & EVENT_ID_MASK) {
case EVENT_RADAR_DETECTED:
/* find active intf: prefer dfs_master over dfs_slave */
if (wlan_get_privs_by_two_cond(pmadapter,
wlan_11h_is_master_active_on_dfs_chan,
wlan_11h_is_dfs_master,
MTRUE, priv_list)) {
pmpriv = priv_list[0];
PRINTM(MINFO, "%s: found dfs_master priv=%p\n",
__func__, pmpriv);
} else if (wlan_get_privs_by_two_cond(pmadapter,
wlan_11h_is_slave_active_on_dfs_chan,
wlan_11h_is_dfs_slave,
MTRUE, priv_list)) {
pmpriv = priv_list[0];
PRINTM(MINFO, "%s: found dfs_slave priv=%p\n",
__func__, pmpriv);
}
/* update event_cause if we found an appropriate priv */
if (pmpriv) {
pmlan_buffer pmevbuf = pmadapter->pmlan_buffer_event;
t_u32 new_event_cause =
pmadapter->event_cause & EVENT_ID_MASK;
new_event_cause |=
((GET_BSS_NUM(pmpriv) & 0xff) << 16) |
((pmpriv->bss_type & 0xff) << 24);
PRINTM(MINFO, "%s: priv - bss_num=%d, bss_type=%d\n",
__func__, GET_BSS_NUM(pmpriv), pmpriv->bss_type);
memcpy(pmadapter, pmevbuf->pbuf + pmevbuf->data_offset,
&new_event_cause, sizeof(new_event_cause));
ret = MLAN_STATUS_SUCCESS;
}
break;
}
LEAVE();
return ret;
}
/**
* @brief try to switch to a non-dfs channel
*
* @param priv Void pointer to mlan_private
*
* @param chan pointer to channel
*
* @return MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE or MLAN_STATUS_PENDING
*/
mlan_status
wlan_11h_switch_non_dfs_chan(mlan_private * priv, t_u8 * chan)
{
mlan_status ret = MLAN_STATUS_FAILURE;
t_u32 i;
t_u32 rand_entry;
t_u8 def_chan;
t_u8 rand_tries = 0;
region_chan_t *chn_tbl = MNULL;
pmlan_adapter pmadapter = priv->adapter;
ENTER();
/* get the channel table first */
for (i = 0; i < MAX_REGION_CHANNEL_NUM; i++) {
if (pmadapter->region_channel[i].band == BAND_A
&& pmadapter->region_channel[i].valid) {
chn_tbl = &pmadapter->region_channel[i];
break;
}
}
if (!chn_tbl || !chn_tbl->pcfp)
goto done;
do {
rand_entry =
wlan_11h_get_random_num(pmadapter) % chn_tbl->num_cfp;
def_chan = (t_u8) chn_tbl->pcfp[rand_entry].channel;
rand_tries++;
} while ((wlan_11h_is_channel_under_nop(pmadapter, def_chan) ||
chn_tbl->pcfp[rand_entry].passive_scan_or_radar_detect ==
MTRUE) && (rand_tries < MAX_SWITCH_CHANNEL_RETRIES));
/* meet max retries, use the lowest non-dfs channel */
if (rand_tries == MAX_SWITCH_CHANNEL_RETRIES) {
for (i = 0; i < chn_tbl->num_cfp; i++) {
if (chn_tbl->pcfp[i].passive_scan_or_radar_detect ==
MFALSE &&
!wlan_11h_is_channel_under_nop(pmadapter,
(t_u8) chn_tbl->
pcfp[i].channel)) {
def_chan = (t_u8) chn_tbl->pcfp[i].channel;
break;
}
}
if (i == chn_tbl->num_cfp)
goto done;
}
*chan = def_chan;
ret = MLAN_STATUS_SUCCESS;
done:
LEAVE();
return ret;
}
| gpl-2.0 |
linux4hach/linux-at91 | sound/isa/ad1816a/ad1816a.c | 599 | 8971 |
/*
card-ad1816a.c - driver for ADI SoundPort AD1816A based soundcards.
Copyright (C) 2000 by Massimo Piccioni <dafastidio@libero.it>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/time.h>
#include <linux/wait.h>
#include <linux/pnp.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/initval.h>
#include <sound/ad1816a.h>
#include <sound/mpu401.h>
#include <sound/opl3.h>
#define PFX "ad1816a: "
MODULE_AUTHOR("Massimo Piccioni <dafastidio@libero.it>");
MODULE_DESCRIPTION("AD1816A, AD1815");
MODULE_LICENSE("GPL");
MODULE_SUPPORTED_DEVICE("{{Highscreen,Sound-Boostar 16 3D},"
"{Analog Devices,AD1815},"
"{Analog Devices,AD1816A},"
"{TerraTec,Base 64},"
"{TerraTec,AudioSystem EWS64S},"
"{Aztech/Newcom SC-16 3D},"
"{Shark Predator ISA}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 1-MAX */
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */
static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_ISAPNP; /* Enable this card */
static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static long fm_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; /* PnP setup */
static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */
static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; /* Pnp setup */
static int dma1[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
static int dma2[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; /* PnP setup */
static int clockfreq[SNDRV_CARDS];
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for ad1816a based soundcard.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for ad1816a based soundcard.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable ad1816a based soundcard.");
module_param_array(clockfreq, int, NULL, 0444);
MODULE_PARM_DESC(clockfreq, "Clock frequency for ad1816a driver (default = 0).");
static struct pnp_card_device_id snd_ad1816a_pnpids[] = {
/* Analog Devices AD1815 */
{ .id = "ADS7150", .devs = { { .id = "ADS7150" }, { .id = "ADS7151" } } },
/* Analog Device AD1816? */
{ .id = "ADS7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Analog Devices AD1816A - added by Kenneth Platz <kxp@atl.hp.com> */
{ .id = "ADS7181", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Analog Devices AD1816A - Aztech/Newcom SC-16 3D */
{ .id = "AZT1022", .devs = { { .id = "AZT1018" }, { .id = "AZT2002" } } },
/* Highscreen Sound-Boostar 16 3D - added by Stefan Behnel */
{ .id = "LWC1061", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Highscreen Sound-Boostar 16 3D */
{ .id = "MDK1605", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Shark Predator ISA - added by Ken Arromdee */
{ .id = "SMM7180", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */
{ .id = "TER1112", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* Analog Devices AD1816A - Terratec AudioSystem EWS64 S */
{ .id = "TER1112", .devs = { { .id = "TER1100" }, { .id = "TER1101" } } },
/* Analog Devices AD1816A - Terratec Base 64 */
{ .id = "TER1411", .devs = { { .id = "ADS7180" }, { .id = "ADS7181" } } },
/* end */
{ .id = "" }
};
MODULE_DEVICE_TABLE(pnp_card, snd_ad1816a_pnpids);
#define DRIVER_NAME "snd-card-ad1816a"
static int snd_card_ad1816a_pnp(int dev, struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
struct pnp_dev *pdev;
int err;
pdev = pnp_request_card_device(card, id->devs[0].id, NULL);
if (pdev == NULL)
return -EBUSY;
err = pnp_activate_dev(pdev);
if (err < 0) {
printk(KERN_ERR PFX "AUDIO PnP configure failure\n");
return -EBUSY;
}
port[dev] = pnp_port_start(pdev, 2);
fm_port[dev] = pnp_port_start(pdev, 1);
dma1[dev] = pnp_dma(pdev, 0);
dma2[dev] = pnp_dma(pdev, 1);
irq[dev] = pnp_irq(pdev, 0);
pdev = pnp_request_card_device(card, id->devs[1].id, NULL);
if (pdev == NULL) {
mpu_port[dev] = -1;
snd_printk(KERN_WARNING PFX "MPU401 device busy, skipping.\n");
return 0;
}
err = pnp_activate_dev(pdev);
if (err < 0) {
printk(KERN_ERR PFX "MPU401 PnP configure failure\n");
mpu_port[dev] = -1;
} else {
mpu_port[dev] = pnp_port_start(pdev, 0);
mpu_irq[dev] = pnp_irq(pdev, 0);
}
return 0;
}
static int snd_card_ad1816a_probe(int dev, struct pnp_card_link *pcard,
const struct pnp_card_device_id *pid)
{
int error;
struct snd_card *card;
struct snd_ad1816a *chip;
struct snd_opl3 *opl3;
struct snd_timer *timer;
error = snd_card_new(&pcard->card->dev,
index[dev], id[dev], THIS_MODULE,
sizeof(struct snd_ad1816a), &card);
if (error < 0)
return error;
chip = card->private_data;
if ((error = snd_card_ad1816a_pnp(dev, pcard, pid))) {
snd_card_free(card);
return error;
}
if ((error = snd_ad1816a_create(card, port[dev],
irq[dev],
dma1[dev],
dma2[dev],
chip)) < 0) {
snd_card_free(card);
return error;
}
if (clockfreq[dev] >= 5000 && clockfreq[dev] <= 100000)
chip->clock_freq = clockfreq[dev];
strcpy(card->driver, "AD1816A");
strcpy(card->shortname, "ADI SoundPort AD1816A");
sprintf(card->longname, "%s, SS at 0x%lx, irq %d, dma %d&%d",
card->shortname, chip->port, irq[dev], dma1[dev], dma2[dev]);
if ((error = snd_ad1816a_pcm(chip, 0, NULL)) < 0) {
snd_card_free(card);
return error;
}
if ((error = snd_ad1816a_mixer(chip)) < 0) {
snd_card_free(card);
return error;
}
error = snd_ad1816a_timer(chip, 0, &timer);
if (error < 0) {
snd_card_free(card);
return error;
}
if (mpu_port[dev] > 0) {
if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401,
mpu_port[dev], 0, mpu_irq[dev],
NULL) < 0)
printk(KERN_ERR PFX "no MPU-401 device at 0x%lx.\n", mpu_port[dev]);
}
if (fm_port[dev] > 0) {
if (snd_opl3_create(card,
fm_port[dev], fm_port[dev] + 2,
OPL3_HW_AUTO, 0, &opl3) < 0) {
printk(KERN_ERR PFX "no OPL device at 0x%lx-0x%lx.\n", fm_port[dev], fm_port[dev] + 2);
} else {
error = snd_opl3_hwdep_new(opl3, 0, 1, NULL);
if (error < 0) {
snd_card_free(card);
return error;
}
}
}
if ((error = snd_card_register(card)) < 0) {
snd_card_free(card);
return error;
}
pnp_set_card_drvdata(pcard, card);
return 0;
}
static unsigned int ad1816a_devices;
static int snd_ad1816a_pnp_detect(struct pnp_card_link *card,
const struct pnp_card_device_id *id)
{
static int dev;
int res;
for ( ; dev < SNDRV_CARDS; dev++) {
if (!enable[dev])
continue;
res = snd_card_ad1816a_probe(dev, card, id);
if (res < 0)
return res;
dev++;
ad1816a_devices++;
return 0;
}
return -ENODEV;
}
static void snd_ad1816a_pnp_remove(struct pnp_card_link *pcard)
{
snd_card_free(pnp_get_card_drvdata(pcard));
pnp_set_card_drvdata(pcard, NULL);
}
#ifdef CONFIG_PM
static int snd_ad1816a_pnp_suspend(struct pnp_card_link *pcard,
pm_message_t state)
{
struct snd_card *card = pnp_get_card_drvdata(pcard);
snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
snd_ad1816a_suspend(card->private_data);
return 0;
}
static int snd_ad1816a_pnp_resume(struct pnp_card_link *pcard)
{
struct snd_card *card = pnp_get_card_drvdata(pcard);
snd_ad1816a_resume(card->private_data);
snd_power_change_state(card, SNDRV_CTL_POWER_D0);
return 0;
}
#endif
static struct pnp_card_driver ad1816a_pnpc_driver = {
.flags = PNP_DRIVER_RES_DISABLE,
.name = "ad1816a",
.id_table = snd_ad1816a_pnpids,
.probe = snd_ad1816a_pnp_detect,
.remove = snd_ad1816a_pnp_remove,
#ifdef CONFIG_PM
.suspend = snd_ad1816a_pnp_suspend,
.resume = snd_ad1816a_pnp_resume,
#endif
};
static int __init alsa_card_ad1816a_init(void)
{
int err;
err = pnp_register_card_driver(&ad1816a_pnpc_driver);
if (err)
return err;
if (!ad1816a_devices) {
pnp_unregister_card_driver(&ad1816a_pnpc_driver);
#ifdef MODULE
printk(KERN_ERR "no AD1816A based soundcards found.\n");
#endif /* MODULE */
return -ENODEV;
}
return 0;
}
static void __exit alsa_card_ad1816a_exit(void)
{
pnp_unregister_card_driver(&ad1816a_pnpc_driver);
}
module_init(alsa_card_ad1816a_init)
module_exit(alsa_card_ad1816a_exit)
| gpl-2.0 |
blumak2000/blumak_kernel_s6_Lollipop | net/netfilter/ipvs/ip_vs_xmit.c | 599 | 32628 | /*
* ip_vs_xmit.c: various packet transmitters for IPVS
*
* Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
* Julian Anastasov <ja@ssi.bg>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Changes:
*
* Description of forwarding methods:
* - all transmitters are called from LOCAL_IN (remote clients) and
* LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
* - not all connections have destination server, for example,
* connections in backup server when fwmark is used
* - bypass connections use daddr from packet
* - we can use dst without ref while sending in RCU section, we use
* ref when returning NF_ACCEPT for NAT-ed packet via loopback
* LOCAL_OUT rules:
* - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
* - skb->pkt_type is not set yet
* - the only place where we can see skb->sk != NULL
*/
#define KMSG_COMPONENT "IPVS"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/tcp.h> /* for tcphdr */
#include <net/ip.h>
#include <net/tcp.h> /* for csum_tcpudp_magic */
#include <net/udp.h>
#include <net/icmp.h> /* for icmp_send */
#include <net/route.h> /* for ip_route_output */
#include <net/ipv6.h>
#include <net/ip6_route.h>
#include <net/addrconf.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <net/ip_vs.h>
enum {
IP_VS_RT_MODE_LOCAL = 1, /* Allow local dest */
IP_VS_RT_MODE_NON_LOCAL = 2, /* Allow non-local dest */
IP_VS_RT_MODE_RDR = 4, /* Allow redirect from remote daddr to
* local
*/
IP_VS_RT_MODE_CONNECT = 8, /* Always bind route to saddr */
IP_VS_RT_MODE_KNOWN_NH = 16,/* Route via remote addr */
IP_VS_RT_MODE_TUNNEL = 32,/* Tunnel mode */
};
static inline struct ip_vs_dest_dst *ip_vs_dest_dst_alloc(void)
{
return kmalloc(sizeof(struct ip_vs_dest_dst), GFP_ATOMIC);
}
static inline void ip_vs_dest_dst_free(struct ip_vs_dest_dst *dest_dst)
{
kfree(dest_dst);
}
/*
* Destination cache to speed up outgoing route lookup
*/
static inline void
__ip_vs_dst_set(struct ip_vs_dest *dest, struct ip_vs_dest_dst *dest_dst,
struct dst_entry *dst, u32 dst_cookie)
{
struct ip_vs_dest_dst *old;
old = rcu_dereference_protected(dest->dest_dst,
lockdep_is_held(&dest->dst_lock));
if (dest_dst) {
dest_dst->dst_cache = dst;
dest_dst->dst_cookie = dst_cookie;
}
rcu_assign_pointer(dest->dest_dst, dest_dst);
if (old)
call_rcu(&old->rcu_head, ip_vs_dest_dst_rcu_free);
}
static inline struct ip_vs_dest_dst *
__ip_vs_dst_check(struct ip_vs_dest *dest)
{
struct ip_vs_dest_dst *dest_dst = rcu_dereference(dest->dest_dst);
struct dst_entry *dst;
if (!dest_dst)
return NULL;
dst = dest_dst->dst_cache;
if (dst->obsolete &&
dst->ops->check(dst, dest_dst->dst_cookie) == NULL)
return NULL;
return dest_dst;
}
static inline bool
__mtu_check_toobig_v6(const struct sk_buff *skb, u32 mtu)
{
if (IP6CB(skb)->frag_max_size) {
/* frag_max_size tell us that, this packet have been
* defragmented by netfilter IPv6 conntrack module.
*/
if (IP6CB(skb)->frag_max_size > mtu)
return true; /* largest fragment violate MTU */
}
else if (skb->len > mtu && !skb_is_gso(skb)) {
return true; /* Packet size violate MTU size */
}
return false;
}
/* Get route to daddr, update *saddr, optionally bind route to saddr */
static struct rtable *do_output_route4(struct net *net, __be32 daddr,
int rt_mode, __be32 *saddr)
{
struct flowi4 fl4;
struct rtable *rt;
int loop = 0;
memset(&fl4, 0, sizeof(fl4));
fl4.daddr = daddr;
fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
FLOWI_FLAG_KNOWN_NH : 0;
retry:
rt = ip_route_output_key(net, &fl4);
if (IS_ERR(rt)) {
/* Invalid saddr ? */
if (PTR_ERR(rt) == -EINVAL && *saddr &&
rt_mode & IP_VS_RT_MODE_CONNECT && !loop) {
*saddr = 0;
flowi4_update_output(&fl4, 0, 0, daddr, 0);
goto retry;
}
IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n", &daddr);
return NULL;
} else if (!*saddr && rt_mode & IP_VS_RT_MODE_CONNECT && fl4.saddr) {
ip_rt_put(rt);
*saddr = fl4.saddr;
flowi4_update_output(&fl4, 0, 0, daddr, fl4.saddr);
loop++;
goto retry;
}
*saddr = fl4.saddr;
return rt;
}
/* Get route to destination or remote server */
static int
__ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
__be32 daddr, int rt_mode, __be32 *ret_saddr)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct netns_ipvs *ipvs = net_ipvs(net);
struct ip_vs_dest_dst *dest_dst;
struct rtable *rt; /* Route to the other host */
struct rtable *ort; /* Original route */
struct iphdr *iph;
__be16 df;
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rtable *) dest_dst->dst_cache;
else {
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
rt = do_output_route4(net, dest->addr.ip, rt_mode,
&dest_dst->dst_saddr.ip);
if (!rt) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
__ip_vs_dst_set(dest, dest_dst, &rt->dst, 0);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI4, src %pI4, refcnt=%d\n",
&dest->addr.ip, &dest_dst->dst_saddr.ip,
atomic_read(&rt->dst.__refcnt));
}
daddr = dest->addr.ip;
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.ip;
} else {
__be32 saddr = htonl(INADDR_ANY);
noref = 0;
/* For such unconfigured boxes avoid many route lookups
* for performance reasons because we do not remember saddr
*/
rt_mode &= ~IP_VS_RT_MODE_CONNECT;
rt = do_output_route4(net, daddr, rt_mode, &saddr);
if (!rt)
goto err_unreach;
if (ret_saddr)
*ret_saddr = saddr;
}
local = (rt->rt_flags & RTCF_LOCAL) ? 1 : 0;
if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
rt_mode)) {
IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
(rt->rt_flags & RTCF_LOCAL) ?
"local":"non-local", &daddr);
goto err_put;
}
iph = ip_hdr(skb);
if (likely(!local)) {
if (unlikely(ipv4_is_loopback(iph->saddr))) {
IP_VS_DBG_RL("Stopping traffic from loopback address "
"%pI4 to non-local address, dest: %pI4\n",
&iph->saddr, &daddr);
goto err_put;
}
} else {
ort = skb_rtable(skb);
if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
!(ort->rt_flags & RTCF_LOCAL)) {
IP_VS_DBG_RL("Redirect from non-local address %pI4 to "
"local requires NAT method, dest: %pI4\n",
&iph->daddr, &daddr);
goto err_put;
}
/* skb to local stack, preserve old route */
if (!noref)
ip_rt_put(rt);
return local;
}
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL))) {
mtu = dst_mtu(&rt->dst);
df = iph->frag_off & htons(IP_DF);
} else {
struct sock *sk = skb->sk;
mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
if (mtu < 68) {
IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
goto err_put;
}
ort = skb_rtable(skb);
if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
/* MTU check allowed? */
df = sysctl_pmtu_disc(ipvs) ? iph->frag_off & htons(IP_DF) : 0;
}
/* MTU checking */
if (unlikely(df && skb->len > mtu && !skb_is_gso(skb))) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
IP_VS_DBG(1, "frag needed for %pI4\n", &iph->saddr);
goto err_put;
}
skb_dst_drop(skb);
if (noref) {
if (!local)
skb_dst_set_noref_force(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
ip_rt_put(rt);
return -1;
err_unreach:
dst_link_failure(skb);
return -1;
}
#ifdef CONFIG_IP_VS_IPV6
static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
{
return rt->dst.dev && rt->dst.dev->flags & IFF_LOOPBACK;
}
static struct dst_entry *
__ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
struct in6_addr *ret_saddr, int do_xfrm)
{
struct dst_entry *dst;
struct flowi6 fl6 = {
.daddr = *daddr,
};
dst = ip6_route_output(net, NULL, &fl6);
if (dst->error)
goto out_err;
if (!ret_saddr)
return dst;
if (ipv6_addr_any(&fl6.saddr) &&
ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
&fl6.daddr, 0, &fl6.saddr) < 0)
goto out_err;
if (do_xfrm) {
dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
if (IS_ERR(dst)) {
dst = NULL;
goto out_err;
}
}
*ret_saddr = fl6.saddr;
return dst;
out_err:
dst_release(dst);
IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
return NULL;
}
/*
* Get route to destination or remote server
*/
static int
__ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
struct in6_addr *daddr, struct in6_addr *ret_saddr,
struct ip_vs_iphdr *ipvsh, int do_xfrm, int rt_mode)
{
struct net *net = dev_net(skb_dst(skb)->dev);
struct ip_vs_dest_dst *dest_dst;
struct rt6_info *rt; /* Route to the other host */
struct rt6_info *ort; /* Original route */
struct dst_entry *dst;
int mtu;
int local, noref = 1;
if (dest) {
dest_dst = __ip_vs_dst_check(dest);
if (likely(dest_dst))
rt = (struct rt6_info *) dest_dst->dst_cache;
else {
u32 cookie;
dest_dst = ip_vs_dest_dst_alloc();
spin_lock_bh(&dest->dst_lock);
if (!dest_dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
goto err_unreach;
}
dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
&dest_dst->dst_saddr.in6,
do_xfrm);
if (!dst) {
__ip_vs_dst_set(dest, NULL, NULL, 0);
spin_unlock_bh(&dest->dst_lock);
ip_vs_dest_dst_free(dest_dst);
goto err_unreach;
}
rt = (struct rt6_info *) dst;
cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
__ip_vs_dst_set(dest, dest_dst, &rt->dst, cookie);
spin_unlock_bh(&dest->dst_lock);
IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
&dest->addr.in6, &dest_dst->dst_saddr.in6,
atomic_read(&rt->dst.__refcnt));
}
if (ret_saddr)
*ret_saddr = dest_dst->dst_saddr.in6;
} else {
noref = 0;
dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
if (!dst)
goto err_unreach;
rt = (struct rt6_info *) dst;
}
local = __ip_vs_is_local_route6(rt);
if (!((local ? IP_VS_RT_MODE_LOCAL : IP_VS_RT_MODE_NON_LOCAL) &
rt_mode)) {
IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6c\n",
local ? "local":"non-local", daddr);
goto err_put;
}
if (likely(!local)) {
if (unlikely((!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
IPV6_ADDR_LOOPBACK)) {
IP_VS_DBG_RL("Stopping traffic from loopback address "
"%pI6c to non-local address, "
"dest: %pI6c\n",
&ipv6_hdr(skb)->saddr, daddr);
goto err_put;
}
} else {
ort = (struct rt6_info *) skb_dst(skb);
if (!(rt_mode & IP_VS_RT_MODE_RDR) &&
!__ip_vs_is_local_route6(ort)) {
IP_VS_DBG_RL("Redirect from non-local address %pI6c "
"to local requires NAT method, "
"dest: %pI6c\n",
&ipv6_hdr(skb)->daddr, daddr);
goto err_put;
}
/* skb to local stack, preserve old route */
if (!noref)
dst_release(&rt->dst);
return local;
}
/* MTU checking */
if (likely(!(rt_mode & IP_VS_RT_MODE_TUNNEL)))
mtu = dst_mtu(&rt->dst);
else {
struct sock *sk = skb->sk;
mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
if (mtu < IPV6_MIN_MTU) {
IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
IPV6_MIN_MTU);
goto err_put;
}
ort = (struct rt6_info *) skb_dst(skb);
if (!skb->dev && sk && sk->sk_state != TCP_TIME_WAIT)
ort->dst.ops->update_pmtu(&ort->dst, sk, NULL, mtu);
}
if (unlikely(__mtu_check_toobig_v6(skb, mtu))) {
if (!skb->dev)
skb->dev = net->loopback_dev;
/* only send ICMP too big on first fragment */
if (!ipvsh->fragoffs)
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
IP_VS_DBG(1, "frag needed for %pI6c\n", &ipv6_hdr(skb)->saddr);
goto err_put;
}
skb_dst_drop(skb);
if (noref) {
if (!local)
skb_dst_set_noref_force(skb, &rt->dst);
else
skb_dst_set(skb, dst_clone(&rt->dst));
} else
skb_dst_set(skb, &rt->dst);
return local;
err_put:
if (!noref)
dst_release(&rt->dst);
return -1;
err_unreach:
dst_link_failure(skb);
return -1;
}
#endif
/* return NF_ACCEPT to allow forwarding or other NF_xxx on error */
static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
struct ip_vs_conn *cp)
{
int ret = NF_ACCEPT;
skb->ipvs_property = 1;
if (unlikely(cp->flags & IP_VS_CONN_F_NFCT))
ret = ip_vs_confirm_conntrack(skb);
if (ret == NF_ACCEPT) {
nf_reset(skb);
skb_forward_csum(skb);
}
return ret;
}
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
else
ip_vs_update_conntrack(skb, cp, 1);
if (!local) {
skb_forward_csum(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
struct ip_vs_conn *cp, int local)
{
int ret = NF_STOLEN;
skb->ipvs_property = 1;
if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
ip_vs_notrack(skb);
if (!local) {
skb_forward_csum(skb);
NF_HOOK(pf, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
dst_output);
} else
ret = NF_ACCEPT;
return ret;
}
/*
* NULL transmitter (do nothing except return NF_ACCEPT)
*/
int
ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
/* we do not touch skb and do not need pskb ptr */
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
/*
* Bypass transmitter
* Let packets bypass the destination when the destination is not
* available, it may be only used in transparent cache cluster.
*/
int
ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct iphdr *iph = ip_hdr(skb);
EnterFunction(10);
rcu_read_lock();
if (__ip_vs_get_out_rt(skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL,
NULL) < 0)
goto tx_error;
ip_send_check(iph);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
EnterFunction(10);
rcu_read_lock();
if (__ip_vs_get_out_rt_v6(skb, NULL, &ipvsh->daddr.in6, NULL,
ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0)
goto tx_error;
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#endif
/*
* NAT transmitter (only for outside-to-inside nat forwarding)
* Not used for related ICMP
*/
int
ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rtable *rt; /* Route to the other host */
int local, rc, was_input;
EnterFunction(10);
rcu_read_lock();
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
was_input = rt_is_input_route(skb_rtable(skb));
local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR, NULL);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
"ip_vs_nat_xmit(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
"stopping DNAT to loopback address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, sizeof(struct iphdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ip_hdr(skb)->daddr = cp->daddr.ip;
ip_send_check(ip_hdr(skb));
IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
rcu_read_unlock();
LeaveFunction(10);
return rc;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int local, rc;
EnterFunction(10);
rcu_read_lock();
/* check if it is a connection of no-client-port */
if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) {
__be16 _pt, *p;
p = skb_header_pointer(skb, ipvsh->len, sizeof(_pt), &_pt);
if (p == NULL)
goto tx_error;
ip_vs_conn_fill_cport(cp, *p);
IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
}
local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to local address");
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
"ip_vs_nat_xmit_v6(): "
"stopping DNAT to loopback address");
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
/* mangle the packet */
if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp, ipvsh))
goto tx_error;
ipv6_hdr(skb)->daddr = cp->daddr.in6;
IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
/* FIXME: when application helper enlarges the packet and the length
is larger than the MTU of outgoing device, there will be still
MTU problem. */
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
rcu_read_unlock();
LeaveFunction(10);
return rc;
tx_error:
LeaveFunction(10);
kfree_skb(skb);
rcu_read_unlock();
return NF_STOLEN;
}
#endif
/*
* IP Tunneling transmitter
*
* This function encapsulates the packet in a new IP packet, its
* destination will be set to cp->daddr. Most code of this function
* is taken from ipip.c.
*
* It is used in VS/TUN cluster. The load balancer selects a real
* server from a cluster based on a scheduling algorithm,
* encapsulates the request packet and forwards it to the selected
* server. For example, all real servers are configured with
* "ifconfig tunl0 <Virtual IP Address> up". When the server receives
* the encapsulated packet, it will decapsulate the packet, processe
* the request and return the response packets directly to the client
* without passing the load balancer. This can greatly increase the
* scalability of virtual server.
*
* Used for ANY protocol
*/
int
ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct netns_ipvs *ipvs = net_ipvs(skb_net(skb));
struct rtable *rt; /* Route to the other host */
__be32 saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
struct iphdr *old_iph = ip_hdr(skb);
u8 tos = old_iph->tos;
__be16 df;
struct iphdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_CONNECT |
IP_VS_RT_MODE_TUNNEL, &saddr);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
rt = skb_rtable(skb);
tdev = rt->dst.dev;
/* Copy DF, reset fragment offset and MF */
df = sysctl_pmtu_disc(ipvs) ? old_iph->frag_off & htons(IP_DF) : 0;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
struct sk_buff *new_skb =
skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
goto tx_error;
consume_skb(skb);
skb = new_skb;
old_iph = ip_hdr(skb);
}
skb->transport_header = skb->network_header;
/* fix old IP header checksum */
ip_send_check(old_iph);
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr)>>2;
iph->frag_off = df;
iph->protocol = IPPROTO_IPIP;
iph->tos = tos;
iph->daddr = cp->daddr.ip;
iph->saddr = saddr;
iph->ttl = old_iph->ttl;
ip_select_ident(skb, NULL);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip_local_out(skb);
else if (ret == NF_DROP)
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
struct in6_addr saddr; /* Source for tunnel */
struct net_device *tdev; /* Device to other host */
struct ipv6hdr *old_iph = ipv6_hdr(skb);
struct ipv6hdr *iph; /* Our new IP header */
unsigned int max_headroom; /* The extra header space needed */
int ret, local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
&saddr, ipvsh, 1,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_TUNNEL);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
}
rt = (struct rt6_info *) skb_dst(skb);
tdev = rt->dst.dev;
/*
* Okay, now see if we can stuff it in the buffer as-is.
*/
max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
struct sk_buff *new_skb =
skb_realloc_headroom(skb, max_headroom);
if (!new_skb)
goto tx_error;
consume_skb(skb);
skb = new_skb;
old_iph = ipv6_hdr(skb);
}
skb->transport_header = skb->network_header;
skb_push(skb, sizeof(struct ipv6hdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
/*
* Push down and install the IPIP header.
*/
iph = ipv6_hdr(skb);
iph->version = 6;
iph->nexthdr = IPPROTO_IPV6;
iph->payload_len = old_iph->payload_len;
be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
ipv6_change_dsfield(iph, 0, ipv6_get_dsfield(old_iph));
iph->daddr = cp->daddr.in6;
iph->saddr = saddr;
iph->hop_limit = old_iph->hop_limit;
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ret = ip_vs_tunnel_xmit_prepare(skb, cp);
if (ret == NF_ACCEPT)
ip6_local_out(skb);
else if (ret == NF_DROP)
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#endif
/*
* Direct Routing transmitter
* Used for ANY protocol
*/
int
ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_KNOWN_NH, NULL);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 1);
}
ip_send_check(ip_hdr(skb));
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, struct ip_vs_iphdr *ipvsh)
{
int local;
EnterFunction(10);
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
ipvsh, 0,
IP_VS_RT_MODE_LOCAL |
IP_VS_RT_MODE_NON_LOCAL);
if (local < 0)
goto tx_error;
if (local) {
rcu_read_unlock();
return ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 1);
}
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
LeaveFunction(10);
return NF_STOLEN;
}
#endif
/*
* ICMP packet transmitter
* called by the ip_vs_in_icmp
*/
int
ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *iph)
{
struct rtable *rt; /* Route to the other host */
int rc;
int local;
int rt_mode, was_input;
EnterFunction(10);
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, iph);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
goto out;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
was_input = rt_is_input_route(skb_rtable(skb));
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
local = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip, rt_mode, NULL);
if (local < 0)
goto tx_error;
rt = skb_rtable(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && ipv4_is_loopback(cp->daddr.ip) && was_input) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI4\n",
__func__, &cp->daddr.ip);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local);
rcu_read_unlock();
goto out;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
rc = NF_STOLEN;
out:
LeaveFunction(10);
return rc;
}
#ifdef CONFIG_IP_VS_IPV6
int
ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
struct ip_vs_protocol *pp, int offset, unsigned int hooknum,
struct ip_vs_iphdr *ipvsh)
{
struct rt6_info *rt; /* Route to the other host */
int rc;
int local;
int rt_mode;
EnterFunction(10);
/* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
forwarded directly here, because there is no need to
translate address/port back */
if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
if (cp->packet_xmit)
rc = cp->packet_xmit(skb, cp, pp, ipvsh);
else
rc = NF_ACCEPT;
/* do not touch skb anymore */
atomic_inc(&cp->in_pkts);
goto out;
}
/*
* mangle and send the packet here (only for VS/NAT)
*/
/* LOCALNODE from FORWARD hook is not supported */
rt_mode = (hooknum != NF_INET_FORWARD) ?
IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL |
IP_VS_RT_MODE_RDR : IP_VS_RT_MODE_NON_LOCAL;
rcu_read_lock();
local = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
ipvsh, 0, rt_mode);
if (local < 0)
goto tx_error;
rt = (struct rt6_info *) skb_dst(skb);
/*
* Avoid duplicate tuple in reply direction for NAT traffic
* to local address when connection is sync-ed
*/
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
if (cp->flags & IP_VS_CONN_F_SYNC && local) {
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
if (ct && !nf_ct_is_untracked(ct)) {
IP_VS_DBG(10, "%s(): "
"stopping DNAT to local address %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
}
#endif
/* From world but DNAT to loopback address? */
if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
IP_VS_DBG(1, "%s(): "
"stopping DNAT to loopback %pI6\n",
__func__, &cp->daddr.in6);
goto tx_error;
}
/* copy-on-write the packet before mangling it */
if (!skb_make_writable(skb, offset))
goto tx_error;
if (skb_cow(skb, rt->dst.dev->hard_header_len))
goto tx_error;
ip_vs_nat_icmp_v6(skb, pp, cp, 0);
/* Another hack: avoid icmp_send in ip_fragment */
skb->local_df = 1;
rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local);
rcu_read_unlock();
goto out;
tx_error:
kfree_skb(skb);
rcu_read_unlock();
rc = NF_STOLEN;
out:
LeaveFunction(10);
return rc;
}
#endif
| gpl-2.0 |
romracer/sgs2sr-kernel | arch/arm/mach-davinci/board-dm355-leopard.c | 855 | 7190 | /*
* DM355 leopard board support
*
* Based on board-dm355-evm.c
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/nand.h>
#include <linux/i2c.h>
#include <linux/gpio.h>
#include <linux/clk.h>
#include <linux/spi/spi.h>
#include <linux/spi/eeprom.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/dm355.h>
#include <mach/i2c.h>
#include <mach/serial.h>
#include <mach/nand.h>
#include <mach/mmc.h>
#include <mach/usb.h>
/* NOTE: this is geared for the standard config, with a socketed
* 2 GByte Micron NAND (MT29F16G08FAA) using 128KB sectors. If you
* swap chips, maybe with a different block size, partitioning may
* need to be changed.
*/
#define NAND_BLOCK_SIZE SZ_128K
static struct mtd_partition davinci_nand_partitions[] = {
{
/* UBL (a few copies) plus U-Boot */
.name = "bootloader",
.offset = 0,
.size = 15 * NAND_BLOCK_SIZE,
.mask_flags = MTD_WRITEABLE, /* force read-only */
}, {
/* U-Boot environment */
.name = "params",
.offset = MTDPART_OFS_APPEND,
.size = 1 * NAND_BLOCK_SIZE,
.mask_flags = 0,
}, {
.name = "kernel",
.offset = MTDPART_OFS_APPEND,
.size = SZ_4M,
.mask_flags = 0,
}, {
.name = "filesystem1",
.offset = MTDPART_OFS_APPEND,
.size = SZ_512M,
.mask_flags = 0,
}, {
.name = "filesystem2",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL,
.mask_flags = 0,
}
/* two blocks with bad block table (and mirror) at the end */
};
static struct davinci_nand_pdata davinci_nand_data = {
.mask_chipsel = BIT(14),
.parts = davinci_nand_partitions,
.nr_parts = ARRAY_SIZE(davinci_nand_partitions),
.ecc_mode = NAND_ECC_HW_SYNDROME,
.options = NAND_USE_FLASH_BBT,
};
static struct resource davinci_nand_resources[] = {
{
.start = DM355_ASYNC_EMIF_DATA_CE0_BASE,
.end = DM355_ASYNC_EMIF_DATA_CE0_BASE + SZ_32M - 1,
.flags = IORESOURCE_MEM,
}, {
.start = DM355_ASYNC_EMIF_CONTROL_BASE,
.end = DM355_ASYNC_EMIF_CONTROL_BASE + SZ_4K - 1,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device davinci_nand_device = {
.name = "davinci_nand",
.id = 0,
.num_resources = ARRAY_SIZE(davinci_nand_resources),
.resource = davinci_nand_resources,
.dev = {
.platform_data = &davinci_nand_data,
},
};
static struct davinci_i2c_platform_data i2c_pdata = {
.bus_freq = 400 /* kHz */,
.bus_delay = 0 /* usec */,
};
static int leopard_mmc_gpio = -EINVAL;
static void dm355leopard_mmcsd_gpios(unsigned gpio)
{
gpio_request(gpio + 0, "mmc0_ro");
gpio_request(gpio + 1, "mmc0_cd");
gpio_request(gpio + 2, "mmc1_ro");
gpio_request(gpio + 3, "mmc1_cd");
/* we "know" these are input-only so we don't
* need to call gpio_direction_input()
*/
leopard_mmc_gpio = gpio;
}
static struct i2c_board_info dm355leopard_i2c_info[] = {
{ I2C_BOARD_INFO("dm355leopard_msp", 0x25),
.platform_data = dm355leopard_mmcsd_gpios,
/* plus irq */ },
/* { I2C_BOARD_INFO("tlv320aic3x", 0x1b), }, */
/* { I2C_BOARD_INFO("tvp5146", 0x5d), }, */
};
static void __init leopard_init_i2c(void)
{
davinci_init_i2c(&i2c_pdata);
gpio_request(5, "dm355leopard_msp");
gpio_direction_input(5);
dm355leopard_i2c_info[0].irq = gpio_to_irq(5);
i2c_register_board_info(1, dm355leopard_i2c_info,
ARRAY_SIZE(dm355leopard_i2c_info));
}
static struct resource dm355leopard_dm9000_rsrc[] = {
{
/* addr */
.start = 0x04000000,
.end = 0x04000001,
.flags = IORESOURCE_MEM,
}, {
/* data */
.start = 0x04000016,
.end = 0x04000017,
.flags = IORESOURCE_MEM,
}, {
.flags = IORESOURCE_IRQ
| IORESOURCE_IRQ_HIGHEDGE /* rising (active high) */,
},
};
static struct platform_device dm355leopard_dm9000 = {
.name = "dm9000",
.id = -1,
.resource = dm355leopard_dm9000_rsrc,
.num_resources = ARRAY_SIZE(dm355leopard_dm9000_rsrc),
};
static struct platform_device *davinci_leopard_devices[] __initdata = {
&dm355leopard_dm9000,
&davinci_nand_device,
};
static struct davinci_uart_config uart_config __initdata = {
.enabled_uarts = (1 << 0),
};
static void __init dm355_leopard_map_io(void)
{
dm355_init();
}
static int dm355leopard_mmc_get_cd(int module)
{
if (!gpio_is_valid(leopard_mmc_gpio))
return -ENXIO;
/* low == card present */
return !gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 1);
}
static int dm355leopard_mmc_get_ro(int module)
{
if (!gpio_is_valid(leopard_mmc_gpio))
return -ENXIO;
/* high == card's write protect switch active */
return gpio_get_value_cansleep(leopard_mmc_gpio + 2 * module + 0);
}
static struct davinci_mmc_config dm355leopard_mmc_config = {
.get_cd = dm355leopard_mmc_get_cd,
.get_ro = dm355leopard_mmc_get_ro,
.wires = 4,
.max_freq = 50000000,
.caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED,
};
/* Don't connect anything to J10 unless you're only using USB host
* mode *and* have to do so with some kind of gender-bender. If
* you have proper Mini-B or Mini-A cables (or Mini-A adapters)
* the ID pin won't need any help.
*/
#ifdef CONFIG_USB_MUSB_PERIPHERAL
#define USB_ID_VALUE 0 /* ID pulled high; *should* float */
#else
#define USB_ID_VALUE 1 /* ID pulled low */
#endif
static struct spi_eeprom at25640a = {
.byte_len = SZ_64K / 8,
.name = "at25640a",
.page_size = 32,
.flags = EE_ADDR2,
};
static struct spi_board_info dm355_leopard_spi_info[] __initconst = {
{
.modalias = "at25",
.platform_data = &at25640a,
.max_speed_hz = 10 * 1000 * 1000, /* at 3v3 */
.bus_num = 0,
.chip_select = 0,
.mode = SPI_MODE_0,
},
};
static __init void dm355_leopard_init(void)
{
struct clk *aemif;
gpio_request(9, "dm9000");
gpio_direction_input(9);
dm355leopard_dm9000_rsrc[2].start = gpio_to_irq(9);
aemif = clk_get(&dm355leopard_dm9000.dev, "aemif");
if (IS_ERR(aemif))
WARN("%s: unable to get AEMIF clock\n", __func__);
else
clk_enable(aemif);
platform_add_devices(davinci_leopard_devices,
ARRAY_SIZE(davinci_leopard_devices));
leopard_init_i2c();
davinci_serial_init(&uart_config);
/* NOTE: NAND flash timings set by the UBL are slower than
* needed by MT29F16G08FAA chips ... EMIF.A1CR is 0x40400204
* but could be 0x0400008c for about 25% faster page reads.
*/
gpio_request(2, "usb_id_toggle");
gpio_direction_output(2, USB_ID_VALUE);
/* irlml6401 switches over 1A in under 8 msec */
davinci_setup_usb(1000, 8);
davinci_setup_mmc(0, &dm355leopard_mmc_config);
davinci_setup_mmc(1, &dm355leopard_mmc_config);
dm355_init_spi0(BIT(0), dm355_leopard_spi_info,
ARRAY_SIZE(dm355_leopard_spi_info));
}
MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
.phys_io = IO_PHYS,
.io_pg_offst = (__IO_ADDRESS(IO_PHYS) >> 18) & 0xfffc,
.boot_params = (0x80000100),
.map_io = dm355_leopard_map_io,
.init_irq = davinci_irq_init,
.timer = &davinci_timer,
.init_machine = dm355_leopard_init,
MACHINE_END
| gpl-2.0 |
andreyloktev/linux | arch/arm/mach-mvebu/kirkwood-pm.c | 1623 | 1849 | /*
* Power Management driver for Marvell Kirkwood SoCs
*
* Copyright (C) 2013 Ezequiel Garcia <ezequiel@free-electrons.com>
* Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License,
* version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/io.h>
#include "kirkwood.h"
static void __iomem *ddr_operation_base;
static void __iomem *memory_pm_ctrl;
static void kirkwood_low_power(void)
{
u32 mem_pm_ctrl;
mem_pm_ctrl = readl(memory_pm_ctrl);
/* Set peripherals to low-power mode */
writel_relaxed(~0, memory_pm_ctrl);
/* Set DDR in self-refresh */
writel_relaxed(0x7, ddr_operation_base);
/*
* Set CPU in wait-for-interrupt state.
* This disables the CPU core clocks,
* the array clocks, and also the L2 controller.
*/
cpu_do_idle();
writel_relaxed(mem_pm_ctrl, memory_pm_ctrl);
}
static int kirkwood_suspend_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
kirkwood_low_power();
break;
default:
return -EINVAL;
}
return 0;
}
static int kirkwood_pm_valid_standby(suspend_state_t state)
{
return state == PM_SUSPEND_STANDBY;
}
static const struct platform_suspend_ops kirkwood_suspend_ops = {
.enter = kirkwood_suspend_enter,
.valid = kirkwood_pm_valid_standby,
};
int __init kirkwood_pm_init(void)
{
ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
memory_pm_ctrl = ioremap(MEMORY_PM_CTRL_PHYS, 4);
suspend_set_ops(&kirkwood_suspend_ops);
return 0;
}
| gpl-2.0 |
rogersb11/android_kernel_samsung_smdk4x12 | net/tipc/bearer.c | 2391 | 17548 | /*
* net/tipc/bearer.c: TIPC bearer code
*
* Copyright (c) 1996-2006, Ericsson AB
* Copyright (c) 2004-2006, 2010-2011, Wind River Systems
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the names of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
#include "core.h"
#include "config.h"
#include "bearer.h"
#include "discover.h"
#define MAX_ADDR_STR 32
static struct media media_list[MAX_MEDIA];
static u32 media_count;
struct tipc_bearer tipc_bearers[MAX_BEARERS];
static void bearer_disable(struct tipc_bearer *b_ptr);
/**
* media_name_valid - validate media name
*
* Returns 1 if media name is valid, otherwise 0.
*/
static int media_name_valid(const char *name)
{
u32 len;
len = strlen(name);
if ((len + 1) > TIPC_MAX_MEDIA_NAME)
return 0;
return strspn(name, tipc_alphabet) == len;
}
/**
* media_find - locates specified media object by name
*/
static struct media *media_find(const char *name)
{
struct media *m_ptr;
u32 i;
for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
if (!strcmp(m_ptr->name, name))
return m_ptr;
}
return NULL;
}
/**
* tipc_register_media - register a media type
*
* Bearers for this media type must be activated separately at a later stage.
*/
int tipc_register_media(u32 media_type,
char *name,
int (*enable)(struct tipc_bearer *),
void (*disable)(struct tipc_bearer *),
int (*send_msg)(struct sk_buff *,
struct tipc_bearer *,
struct tipc_media_addr *),
char *(*addr2str)(struct tipc_media_addr *a,
char *str_buf, int str_size),
struct tipc_media_addr *bcast_addr,
const u32 bearer_priority,
const u32 link_tolerance, /* [ms] */
const u32 send_window_limit)
{
struct media *m_ptr;
u32 media_id;
u32 i;
int res = -EINVAL;
write_lock_bh(&tipc_net_lock);
if (tipc_mode != TIPC_NET_MODE) {
warn("Media <%s> rejected, not in networked mode yet\n", name);
goto exit;
}
if (!media_name_valid(name)) {
warn("Media <%s> rejected, illegal name\n", name);
goto exit;
}
if (!bcast_addr) {
warn("Media <%s> rejected, no broadcast address\n", name);
goto exit;
}
if ((bearer_priority < TIPC_MIN_LINK_PRI) ||
(bearer_priority > TIPC_MAX_LINK_PRI)) {
warn("Media <%s> rejected, illegal priority (%u)\n", name,
bearer_priority);
goto exit;
}
if ((link_tolerance < TIPC_MIN_LINK_TOL) ||
(link_tolerance > TIPC_MAX_LINK_TOL)) {
warn("Media <%s> rejected, illegal tolerance (%u)\n", name,
link_tolerance);
goto exit;
}
media_id = media_count++;
if (media_id >= MAX_MEDIA) {
warn("Media <%s> rejected, media limit reached (%u)\n", name,
MAX_MEDIA);
media_count--;
goto exit;
}
for (i = 0; i < media_id; i++) {
if (media_list[i].type_id == media_type) {
warn("Media <%s> rejected, duplicate type (%u)\n", name,
media_type);
media_count--;
goto exit;
}
if (!strcmp(name, media_list[i].name)) {
warn("Media <%s> rejected, duplicate name\n", name);
media_count--;
goto exit;
}
}
m_ptr = &media_list[media_id];
m_ptr->type_id = media_type;
m_ptr->send_msg = send_msg;
m_ptr->enable_bearer = enable;
m_ptr->disable_bearer = disable;
m_ptr->addr2str = addr2str;
memcpy(&m_ptr->bcast_addr, bcast_addr, sizeof(*bcast_addr));
strcpy(m_ptr->name, name);
m_ptr->priority = bearer_priority;
m_ptr->tolerance = link_tolerance;
m_ptr->window = send_window_limit;
res = 0;
exit:
write_unlock_bh(&tipc_net_lock);
return res;
}
/**
* tipc_media_addr_printf - record media address in print buffer
*/
void tipc_media_addr_printf(struct print_buf *pb, struct tipc_media_addr *a)
{
struct media *m_ptr;
u32 media_type;
u32 i;
media_type = ntohl(a->type);
for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
if (m_ptr->type_id == media_type)
break;
}
if ((i < media_count) && (m_ptr->addr2str != NULL)) {
char addr_str[MAX_ADDR_STR];
tipc_printf(pb, "%s(%s)", m_ptr->name,
m_ptr->addr2str(a, addr_str, sizeof(addr_str)));
} else {
unchar *addr = (unchar *)&a->dev_addr;
tipc_printf(pb, "UNKNOWN(%u)", media_type);
for (i = 0; i < (sizeof(*a) - sizeof(a->type)); i++)
tipc_printf(pb, "-%02x", addr[i]);
}
}
/**
* tipc_media_get_names - record names of registered media in buffer
*/
struct sk_buff *tipc_media_get_names(void)
{
struct sk_buff *buf;
struct media *m_ptr;
int i;
buf = tipc_cfg_reply_alloc(MAX_MEDIA * TLV_SPACE(TIPC_MAX_MEDIA_NAME));
if (!buf)
return NULL;
read_lock_bh(&tipc_net_lock);
for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
tipc_cfg_append_tlv(buf, TIPC_TLV_MEDIA_NAME, m_ptr->name,
strlen(m_ptr->name) + 1);
}
read_unlock_bh(&tipc_net_lock);
return buf;
}
/**
* bearer_name_validate - validate & (optionally) deconstruct bearer name
* @name - ptr to bearer name string
* @name_parts - ptr to area for bearer name components (or NULL if not needed)
*
* Returns 1 if bearer name is valid, otherwise 0.
*/
static int bearer_name_validate(const char *name,
struct bearer_name *name_parts)
{
char name_copy[TIPC_MAX_BEARER_NAME];
char *media_name;
char *if_name;
u32 media_len;
u32 if_len;
/* copy bearer name & ensure length is OK */
name_copy[TIPC_MAX_BEARER_NAME - 1] = 0;
/* need above in case non-Posix strncpy() doesn't pad with nulls */
strncpy(name_copy, name, TIPC_MAX_BEARER_NAME);
if (name_copy[TIPC_MAX_BEARER_NAME - 1] != 0)
return 0;
/* ensure all component parts of bearer name are present */
media_name = name_copy;
if_name = strchr(media_name, ':');
if (if_name == NULL)
return 0;
*(if_name++) = 0;
media_len = if_name - media_name;
if_len = strlen(if_name) + 1;
/* validate component parts of bearer name */
if ((media_len <= 1) || (media_len > TIPC_MAX_MEDIA_NAME) ||
(if_len <= 1) || (if_len > TIPC_MAX_IF_NAME) ||
(strspn(media_name, tipc_alphabet) != (media_len - 1)) ||
(strspn(if_name, tipc_alphabet) != (if_len - 1)))
return 0;
/* return bearer name components, if necessary */
if (name_parts) {
strcpy(name_parts->media_name, media_name);
strcpy(name_parts->if_name, if_name);
}
return 1;
}
/**
* bearer_find - locates bearer object with matching bearer name
*/
static struct tipc_bearer *bearer_find(const char *name)
{
struct tipc_bearer *b_ptr;
u32 i;
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
if (b_ptr->active && (!strcmp(b_ptr->name, name)))
return b_ptr;
}
return NULL;
}
/**
* tipc_bearer_find_interface - locates bearer object with matching interface name
*/
struct tipc_bearer *tipc_bearer_find_interface(const char *if_name)
{
struct tipc_bearer *b_ptr;
char *b_if_name;
u32 i;
for (i = 0, b_ptr = tipc_bearers; i < MAX_BEARERS; i++, b_ptr++) {
if (!b_ptr->active)
continue;
b_if_name = strchr(b_ptr->name, ':') + 1;
if (!strcmp(b_if_name, if_name))
return b_ptr;
}
return NULL;
}
/**
* tipc_bearer_get_names - record names of bearers in buffer
*/
struct sk_buff *tipc_bearer_get_names(void)
{
struct sk_buff *buf;
struct media *m_ptr;
struct tipc_bearer *b_ptr;
int i, j;
buf = tipc_cfg_reply_alloc(MAX_BEARERS * TLV_SPACE(TIPC_MAX_BEARER_NAME));
if (!buf)
return NULL;
read_lock_bh(&tipc_net_lock);
for (i = 0, m_ptr = media_list; i < media_count; i++, m_ptr++) {
for (j = 0; j < MAX_BEARERS; j++) {
b_ptr = &tipc_bearers[j];
if (b_ptr->active && (b_ptr->media == m_ptr)) {
tipc_cfg_append_tlv(buf, TIPC_TLV_BEARER_NAME,
b_ptr->name,
strlen(b_ptr->name) + 1);
}
}
}
read_unlock_bh(&tipc_net_lock);
return buf;
}
void tipc_bearer_add_dest(struct tipc_bearer *b_ptr, u32 dest)
{
tipc_nmap_add(&b_ptr->nodes, dest);
tipc_bcbearer_sort();
tipc_disc_add_dest(b_ptr->link_req);
}
void tipc_bearer_remove_dest(struct tipc_bearer *b_ptr, u32 dest)
{
tipc_nmap_remove(&b_ptr->nodes, dest);
tipc_bcbearer_sort();
tipc_disc_remove_dest(b_ptr->link_req);
}
/*
* bearer_push(): Resolve bearer congestion. Force the waiting
* links to push out their unsent packets, one packet per link
* per iteration, until all packets are gone or congestion reoccurs.
* 'tipc_net_lock' is read_locked when this function is called
* bearer.lock must be taken before calling
* Returns binary true(1) ore false(0)
*/
static int bearer_push(struct tipc_bearer *b_ptr)
{
u32 res = 0;
struct link *ln, *tln;
if (b_ptr->blocked)
return 0;
while (!list_empty(&b_ptr->cong_links) && (res != PUSH_FAILED)) {
list_for_each_entry_safe(ln, tln, &b_ptr->cong_links, link_list) {
res = tipc_link_push_packet(ln);
if (res == PUSH_FAILED)
break;
if (res == PUSH_FINISHED)
list_move_tail(&ln->link_list, &b_ptr->links);
}
}
return list_empty(&b_ptr->cong_links);
}
void tipc_bearer_lock_push(struct tipc_bearer *b_ptr)
{
int res;
spin_lock_bh(&b_ptr->lock);
res = bearer_push(b_ptr);
spin_unlock_bh(&b_ptr->lock);
if (res)
tipc_bcbearer_push();
}
/*
* Interrupt enabling new requests after bearer congestion or blocking:
* See bearer_send().
*/
void tipc_continue(struct tipc_bearer *b_ptr)
{
spin_lock_bh(&b_ptr->lock);
b_ptr->continue_count++;
if (!list_empty(&b_ptr->cong_links))
tipc_k_signal((Handler)tipc_bearer_lock_push, (unsigned long)b_ptr);
b_ptr->blocked = 0;
spin_unlock_bh(&b_ptr->lock);
}
/*
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* the bearer is congested. 'tipc_net_lock' is in read_lock here
* bearer.lock is busy
*/
static void tipc_bearer_schedule_unlocked(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
list_move_tail(&l_ptr->link_list, &b_ptr->cong_links);
}
/*
* Schedule link for sending of messages after the bearer
* has been deblocked by 'continue()'. This method is called
* when somebody tries to send a message via this link while
* the bearer is congested. 'tipc_net_lock' is in read_lock here,
* bearer.lock is free
*/
void tipc_bearer_schedule(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
spin_lock_bh(&b_ptr->lock);
tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
spin_unlock_bh(&b_ptr->lock);
}
/*
* tipc_bearer_resolve_congestion(): Check if there is bearer congestion,
* and if there is, try to resolve it before returning.
* 'tipc_net_lock' is read_locked when this function is called
*/
int tipc_bearer_resolve_congestion(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
int res = 1;
if (list_empty(&b_ptr->cong_links))
return 1;
spin_lock_bh(&b_ptr->lock);
if (!bearer_push(b_ptr)) {
tipc_bearer_schedule_unlocked(b_ptr, l_ptr);
res = 0;
}
spin_unlock_bh(&b_ptr->lock);
return res;
}
/**
* tipc_bearer_congested - determines if bearer is currently congested
*/
int tipc_bearer_congested(struct tipc_bearer *b_ptr, struct link *l_ptr)
{
if (unlikely(b_ptr->blocked))
return 1;
if (likely(list_empty(&b_ptr->cong_links)))
return 0;
return !tipc_bearer_resolve_congestion(b_ptr, l_ptr);
}
/**
* tipc_enable_bearer - enable bearer with the given name
*/
int tipc_enable_bearer(const char *name, u32 disc_domain, u32 priority)
{
struct tipc_bearer *b_ptr;
struct media *m_ptr;
struct bearer_name b_name;
char addr_string[16];
u32 bearer_id;
u32 with_this_prio;
u32 i;
int res = -EINVAL;
if (tipc_mode != TIPC_NET_MODE) {
warn("Bearer <%s> rejected, not supported in standalone mode\n",
name);
return -ENOPROTOOPT;
}
if (!bearer_name_validate(name, &b_name)) {
warn("Bearer <%s> rejected, illegal name\n", name);
return -EINVAL;
}
if (tipc_addr_domain_valid(disc_domain) &&
(disc_domain != tipc_own_addr)) {
if (tipc_in_scope(disc_domain, tipc_own_addr)) {
disc_domain = tipc_own_addr & TIPC_CLUSTER_MASK;
res = 0; /* accept any node in own cluster */
} else if (in_own_cluster(disc_domain))
res = 0; /* accept specified node in own cluster */
}
if (res) {
warn("Bearer <%s> rejected, illegal discovery domain\n", name);
return -EINVAL;
}
if ((priority < TIPC_MIN_LINK_PRI ||
priority > TIPC_MAX_LINK_PRI) &&
(priority != TIPC_MEDIA_LINK_PRI)) {
warn("Bearer <%s> rejected, illegal priority\n", name);
return -EINVAL;
}
write_lock_bh(&tipc_net_lock);
m_ptr = media_find(b_name.media_name);
if (!m_ptr) {
warn("Bearer <%s> rejected, media <%s> not registered\n", name,
b_name.media_name);
goto exit;
}
if (priority == TIPC_MEDIA_LINK_PRI)
priority = m_ptr->priority;
restart:
bearer_id = MAX_BEARERS;
with_this_prio = 1;
for (i = MAX_BEARERS; i-- != 0; ) {
if (!tipc_bearers[i].active) {
bearer_id = i;
continue;
}
if (!strcmp(name, tipc_bearers[i].name)) {
warn("Bearer <%s> rejected, already enabled\n", name);
goto exit;
}
if ((tipc_bearers[i].priority == priority) &&
(++with_this_prio > 2)) {
if (priority-- == 0) {
warn("Bearer <%s> rejected, duplicate priority\n",
name);
goto exit;
}
warn("Bearer <%s> priority adjustment required %u->%u\n",
name, priority + 1, priority);
goto restart;
}
}
if (bearer_id >= MAX_BEARERS) {
warn("Bearer <%s> rejected, bearer limit reached (%u)\n",
name, MAX_BEARERS);
goto exit;
}
b_ptr = &tipc_bearers[bearer_id];
strcpy(b_ptr->name, name);
res = m_ptr->enable_bearer(b_ptr);
if (res) {
warn("Bearer <%s> rejected, enable failure (%d)\n", name, -res);
goto exit;
}
b_ptr->identity = bearer_id;
b_ptr->media = m_ptr;
b_ptr->net_plane = bearer_id + 'A';
b_ptr->active = 1;
b_ptr->priority = priority;
INIT_LIST_HEAD(&b_ptr->cong_links);
INIT_LIST_HEAD(&b_ptr->links);
spin_lock_init(&b_ptr->lock);
res = tipc_disc_create(b_ptr, &m_ptr->bcast_addr, disc_domain);
if (res) {
bearer_disable(b_ptr);
warn("Bearer <%s> rejected, discovery object creation failed\n",
name);
goto exit;
}
info("Enabled bearer <%s>, discovery domain %s, priority %u\n",
name, tipc_addr_string_fill(addr_string, disc_domain), priority);
exit:
write_unlock_bh(&tipc_net_lock);
return res;
}
/**
* tipc_block_bearer(): Block the bearer with the given name,
* and reset all its links
*/
int tipc_block_bearer(const char *name)
{
struct tipc_bearer *b_ptr = NULL;
struct link *l_ptr;
struct link *temp_l_ptr;
read_lock_bh(&tipc_net_lock);
b_ptr = bearer_find(name);
if (!b_ptr) {
warn("Attempt to block unknown bearer <%s>\n", name);
read_unlock_bh(&tipc_net_lock);
return -EINVAL;
}
info("Blocking bearer <%s>\n", name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
struct tipc_node *n_ptr = l_ptr->owner;
spin_lock_bh(&n_ptr->lock);
tipc_link_reset(l_ptr);
spin_unlock_bh(&n_ptr->lock);
}
spin_unlock_bh(&b_ptr->lock);
read_unlock_bh(&tipc_net_lock);
return 0;
}
/**
* bearer_disable -
*
* Note: This routine assumes caller holds tipc_net_lock.
*/
static void bearer_disable(struct tipc_bearer *b_ptr)
{
struct link *l_ptr;
struct link *temp_l_ptr;
info("Disabling bearer <%s>\n", b_ptr->name);
spin_lock_bh(&b_ptr->lock);
b_ptr->blocked = 1;
b_ptr->media->disable_bearer(b_ptr);
list_for_each_entry_safe(l_ptr, temp_l_ptr, &b_ptr->links, link_list) {
tipc_link_delete(l_ptr);
}
if (b_ptr->link_req)
tipc_disc_delete(b_ptr->link_req);
spin_unlock_bh(&b_ptr->lock);
memset(b_ptr, 0, sizeof(struct tipc_bearer));
}
int tipc_disable_bearer(const char *name)
{
struct tipc_bearer *b_ptr;
int res;
write_lock_bh(&tipc_net_lock);
b_ptr = bearer_find(name);
if (b_ptr == NULL) {
warn("Attempt to disable unknown bearer <%s>\n", name);
res = -EINVAL;
} else {
bearer_disable(b_ptr);
res = 0;
}
write_unlock_bh(&tipc_net_lock);
return res;
}
void tipc_bearer_stop(void)
{
u32 i;
for (i = 0; i < MAX_BEARERS; i++) {
if (tipc_bearers[i].active)
bearer_disable(&tipc_bearers[i]);
}
media_count = 0;
}
| gpl-2.0 |
Nyks45/Veno-M | lib/atomic64_test.c | 2903 | 3483 | /*
* Testsuite for atomic64_t functions
*
* Copyright © 2010 Luca Barbieri
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/atomic.h>
#define INIT(c) do { atomic64_set(&v, c); r = c; } while (0)
static __init int test_atomic64(void)
{
long long v0 = 0xaaa31337c001d00dLL;
long long v1 = 0xdeadbeefdeafcafeLL;
long long v2 = 0xfaceabadf00df001LL;
long long onestwos = 0x1111111122222222LL;
long long one = 1LL;
atomic64_t v = ATOMIC64_INIT(v0);
long long r = v0;
BUG_ON(v.counter != r);
atomic64_set(&v, v1);
r = v1;
BUG_ON(v.counter != r);
BUG_ON(atomic64_read(&v) != r);
INIT(v0);
atomic64_add(onestwos, &v);
r += onestwos;
BUG_ON(v.counter != r);
INIT(v0);
atomic64_add(-one, &v);
r += -one;
BUG_ON(v.counter != r);
INIT(v0);
r += onestwos;
BUG_ON(atomic64_add_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
r += -one;
BUG_ON(atomic64_add_return(-one, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(onestwos, &v);
r -= onestwos;
BUG_ON(v.counter != r);
INIT(v0);
atomic64_sub(-one, &v);
r -= -one;
BUG_ON(v.counter != r);
INIT(v0);
r -= onestwos;
BUG_ON(atomic64_sub_return(onestwos, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
r -= -one;
BUG_ON(atomic64_sub_return(-one, &v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_inc(&v);
r += one;
BUG_ON(v.counter != r);
INIT(v0);
r += one;
BUG_ON(atomic64_inc_return(&v) != r);
BUG_ON(v.counter != r);
INIT(v0);
atomic64_dec(&v);
r -= one;
BUG_ON(v.counter != r);
INIT(v0);
r -= one;
BUG_ON(atomic64_dec_return(&v) != r);
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_xchg(&v, v1) != v0);
r = v1;
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_cmpxchg(&v, v0, v1) != v0);
r = v1;
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_cmpxchg(&v, v2, v1) != v0);
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(atomic64_add_unless(&v, one, v0));
BUG_ON(v.counter != r);
INIT(v0);
BUG_ON(!atomic64_add_unless(&v, one, v1));
r += one;
BUG_ON(v.counter != r);
#ifdef CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
INIT(onestwos);
BUG_ON(atomic64_dec_if_positive(&v) != (onestwos - 1));
r -= one;
BUG_ON(v.counter != r);
INIT(0);
BUG_ON(atomic64_dec_if_positive(&v) != -one);
BUG_ON(v.counter != r);
INIT(-one);
BUG_ON(atomic64_dec_if_positive(&v) != (-one - one));
BUG_ON(v.counter != r);
#else
#warning Please implement atomic64_dec_if_positive for your architecture and select the above Kconfig symbol
#endif
INIT(onestwos);
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
INIT(0);
BUG_ON(atomic64_inc_not_zero(&v));
BUG_ON(v.counter != r);
INIT(-one);
BUG_ON(!atomic64_inc_not_zero(&v));
r += one;
BUG_ON(v.counter != r);
#ifdef CONFIG_X86
printk(KERN_INFO "atomic64 test passed for %s platform %s CX8 and %s SSE\n",
#ifdef CONFIG_X86_64
"x86-64",
#elif defined(CONFIG_X86_CMPXCHG64)
"i586+",
#else
"i386+",
#endif
boot_cpu_has(X86_FEATURE_CX8) ? "with" : "without",
boot_cpu_has(X86_FEATURE_XMM) ? "with" : "without");
#else
printk(KERN_INFO "atomic64 test passed\n");
#endif
return 0;
}
core_initcall(test_atomic64);
| gpl-2.0 |
darkobas/android_kernel_lge_msm8974 | drivers/gpu/drm/vmwgfx/vmwgfx_kms.c | 3671 | 55348 | /**************************************************************************
*
* Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
**************************************************************************/
#include "vmwgfx_kms.h"
/* Might need a hrtimer here? */
#define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
struct vmw_clip_rect {
int x1, x2, y1, y2;
};
/**
* Clip @num_rects number of @rects against @clip storing the
* results in @out_rects and the number of passed rects in @out_num.
*/
void vmw_clip_cliprects(struct drm_clip_rect *rects,
int num_rects,
struct vmw_clip_rect clip,
SVGASignedRect *out_rects,
int *out_num)
{
int i, k;
for (i = 0, k = 0; i < num_rects; i++) {
int x1 = max_t(int, clip.x1, rects[i].x1);
int y1 = max_t(int, clip.y1, rects[i].y1);
int x2 = min_t(int, clip.x2, rects[i].x2);
int y2 = min_t(int, clip.y2, rects[i].y2);
if (x1 >= x2)
continue;
if (y1 >= y2)
continue;
out_rects[k].left = x1;
out_rects[k].top = y1;
out_rects[k].right = x2;
out_rects[k].bottom = y2;
k++;
}
*out_num = k;
}
void vmw_display_unit_cleanup(struct vmw_display_unit *du)
{
if (du->cursor_surface)
vmw_surface_unreference(&du->cursor_surface);
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
drm_crtc_cleanup(&du->crtc);
drm_encoder_cleanup(&du->encoder);
drm_connector_cleanup(&du->connector);
}
/*
* Display Unit Cursor functions
*/
int vmw_cursor_update_image(struct vmw_private *dev_priv,
u32 *image, u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct {
u32 cmd;
SVGAFifoCmdDefineAlphaCursor cursor;
} *cmd;
u32 image_size = width * height * 4;
u32 cmd_size = sizeof(*cmd) + image_size;
if (!image)
return -EINVAL;
cmd = vmw_fifo_reserve(dev_priv, cmd_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
memset(cmd, 0, sizeof(*cmd));
memcpy(&cmd[1], image, image_size);
cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
cmd->cursor.id = cpu_to_le32(0);
cmd->cursor.width = cpu_to_le32(width);
cmd->cursor.height = cpu_to_le32(height);
cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
vmw_fifo_commit(dev_priv, cmd_size);
return 0;
}
int vmw_cursor_update_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
u32 width, u32 height,
u32 hotspotX, u32 hotspotY)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
void *virtual;
bool dummy;
int ret;
kmap_offset = 0;
kmap_num = (width*height*4 + PAGE_SIZE - 1) >> PAGE_SHIFT;
ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return -EINVAL;
}
ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
ret = vmw_cursor_update_image(dev_priv, virtual, width, height,
hotspotX, hotspotY);
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(&dmabuf->base);
return ret;
}
void vmw_cursor_update_position(struct vmw_private *dev_priv,
bool show, int x, int y)
{
__le32 __iomem *fifo_mem = dev_priv->mmio_virt;
uint32_t count;
iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
}
int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *dmabuf = NULL;
int ret;
/* A lot of the code assumes this */
if (handle && (width != 64 || height != 64))
return -EINVAL;
if (handle) {
ret = vmw_user_lookup_handle(dev_priv, tfile,
handle, &surface, &dmabuf);
if (ret) {
DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
return -EINVAL;
}
}
/* need to do this before taking down old image */
if (surface && !surface->snooper.image) {
DRM_ERROR("surface not suitable for cursor\n");
vmw_surface_unreference(&surface);
return -EINVAL;
}
/* takedown old cursor */
if (du->cursor_surface) {
du->cursor_surface->snooper.crtc = NULL;
vmw_surface_unreference(&du->cursor_surface);
}
if (du->cursor_dmabuf)
vmw_dmabuf_unreference(&du->cursor_dmabuf);
/* setup new image */
if (surface) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_surface = surface;
du->cursor_surface->snooper.crtc = crtc;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv, surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
} else if (dmabuf) {
/* vmw_user_surface_lookup takes one reference */
du->cursor_dmabuf = dmabuf;
ret = vmw_cursor_update_dmabuf(dev_priv, dmabuf, width, height,
du->hotspot_x, du->hotspot_y);
} else {
vmw_cursor_update_position(dev_priv, false, 0, 0);
return 0;
}
vmw_cursor_update_position(dev_priv, true,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
return 0;
}
int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
du->cursor_x = x + crtc->x;
du->cursor_y = y + crtc->y;
vmw_cursor_update_position(dev_priv, shown,
du->cursor_x + du->hotspot_x,
du->cursor_y + du->hotspot_y);
return 0;
}
void vmw_kms_cursor_snoop(struct vmw_surface *srf,
struct ttm_object_file *tfile,
struct ttm_buffer_object *bo,
SVGA3dCmdHeader *header)
{
struct ttm_bo_kmap_obj map;
unsigned long kmap_offset;
unsigned long kmap_num;
SVGA3dCopyBox *box;
unsigned box_count;
void *virtual;
bool dummy;
struct vmw_dma_cmd {
SVGA3dCmdHeader header;
SVGA3dCmdSurfaceDMA dma;
} *cmd;
int i, ret;
cmd = container_of(header, struct vmw_dma_cmd, header);
/* No snooper installed */
if (!srf->snooper.image)
return;
if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
DRM_ERROR("face and mipmap for cursors should never != 0\n");
return;
}
if (cmd->header.size < 64) {
DRM_ERROR("at least one full copy box must be given\n");
return;
}
box = (SVGA3dCopyBox *)&cmd[1];
box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
sizeof(SVGA3dCopyBox);
if (cmd->dma.guest.ptr.offset % PAGE_SIZE ||
box->x != 0 || box->y != 0 || box->z != 0 ||
box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
box->d != 1 || box_count != 1) {
/* TODO handle none page aligned offsets */
/* TODO handle more dst & src != 0 */
/* TODO handle more then one copy */
DRM_ERROR("Cant snoop dma request for cursor!\n");
DRM_ERROR("(%u, %u, %u) (%u, %u, %u) (%ux%ux%u) %u %u\n",
box->srcx, box->srcy, box->srcz,
box->x, box->y, box->z,
box->w, box->h, box->d, box_count,
cmd->dma.guest.ptr.offset);
return;
}
kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
kmap_num = (64*64*4) >> PAGE_SHIFT;
ret = ttm_bo_reserve(bo, true, false, false, 0);
if (unlikely(ret != 0)) {
DRM_ERROR("reserve failed\n");
return;
}
ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
if (unlikely(ret != 0))
goto err_unreserve;
virtual = ttm_kmap_obj_virtual(&map, &dummy);
if (box->w == 64 && cmd->dma.guest.pitch == 64*4) {
memcpy(srf->snooper.image, virtual, 64*64*4);
} else {
/* Image is unsigned pointer. */
for (i = 0; i < box->h; i++)
memcpy(srf->snooper.image + i * 64,
virtual + i * cmd->dma.guest.pitch,
box->w * 4);
}
srf->snooper.age++;
/* we can't call this function from this function since execbuf has
* reserved fifo space.
*
* if (srf->snooper.crtc)
* vmw_ldu_crtc_cursor_update_image(dev_priv,
* srf->snooper.image, 64, 64,
* du->hotspot_x, du->hotspot_y);
*/
ttm_bo_kunmap(&map);
err_unreserve:
ttm_bo_unreserve(bo);
}
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_crtc *crtc;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
if (!du->cursor_surface ||
du->cursor_age == du->cursor_surface->snooper.age)
continue;
du->cursor_age = du->cursor_surface->snooper.age;
vmw_cursor_update_image(dev_priv,
du->cursor_surface->snooper.image,
64, 64, du->hotspot_x, du->hotspot_y);
}
mutex_unlock(&dev->mode_config.mutex);
}
/*
* Generic framebuffer code
*/
int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
if (handle)
*handle = 0;
return 0;
}
/*
* Surface framebuffer code
*/
#define vmw_framebuffer_to_vfbs(x) \
container_of(x, struct vmw_framebuffer_surface, base.base)
struct vmw_framebuffer_surface {
struct vmw_framebuffer base;
struct vmw_surface *surface;
struct vmw_dma_buffer *buffer;
struct list_head head;
struct drm_master *master;
};
void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct vmw_master *vmaster = vmw_master(vfbs->master);
mutex_lock(&vmaster->fb_surf_mutex);
list_del(&vfbs->head);
mutex_unlock(&vmaster->fb_surf_mutex);
drm_master_put(&vfbs->master);
drm_framebuffer_cleanup(framebuffer);
vmw_surface_unreference(&vfbs->surface);
ttm_base_object_unref(&vfbs->base.user_obj);
kfree(vfbs);
}
static int do_surface_dirty_sou(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int inc,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list,
head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kzalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Temporary fifo memory alloc failed.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
/* setup blits pointer */
blits = (SVGASignedRect *)&cmd[1];
/* initial clip region */
left = clips->x1;
right = clips->x2;
top = clips->y1;
bottom = clips->y2;
/* skip the first clip rect */
for (i = 1, clips_ptr = clips + inc;
i < num_clips; i++, clips_ptr += inc) {
left = min_t(int, left, (int)clips_ptr->x1);
right = max_t(int, right, (int)clips_ptr->x2);
top = min_t(int, top, (int)clips_ptr->y1);
bottom = max_t(int, bottom, (int)clips_ptr->y2);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += inc) {
tmp[i].x1 = clips_ptr->x1 - left;
tmp[i].x2 = clips_ptr->x2 - left;
tmp[i].y1 = clips_ptr->y1 - top;
tmp[i].y2 = clips_ptr->y2 - top;
}
/* do per unit writing, reuse fifo for each */
for (i = 0; i < num_units; i++) {
struct vmw_display_unit *unit = units[i];
struct vmw_clip_rect clip;
int num;
clip.x1 = left - unit->crtc.x;
clip.y1 = top - unit->crtc.y;
clip.x2 = right - unit->crtc.x;
clip.y2 = bottom - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
cmd->body.destScreenId = unit->unit;
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}
int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_surface *vfbs =
vmw_framebuffer_to_vfbs(framebuffer);
struct drm_clip_rect norect;
int ret, inc = 1;
if (unlikely(vfbs->master != file_priv->master))
return -EINVAL;
/* Require ScreenObject support for 3D */
if (!dev_priv->sou_priv)
return -EINVAL;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
inc = 2; /* skip source rects */
}
ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base,
flags, color,
clips, num_clips, inc, NULL);
ttm_read_unlock(&vmaster->lock);
return 0;
}
static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
.destroy = vmw_framebuffer_surface_destroy,
.dirty = vmw_framebuffer_surface_dirty,
.create_handle = vmw_framebuffer_create_handle,
};
static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_surface *surface,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_surface *vfbs;
enum SVGA3dSurfaceFormat format;
struct vmw_master *vmaster = vmw_master(file_priv->master);
int ret;
/* 3D is only supported on HWv8 hosts which supports screen objects */
if (!dev_priv->sou_priv)
return -ENOSYS;
/*
* Sanity checks.
*/
/* Surface must be marked as a scanout. */
if (unlikely(!surface->scanout))
return -EINVAL;
if (unlikely(surface->mip_levels[0] != 1 ||
surface->num_sizes != 1 ||
surface->sizes[0].width < mode_cmd->width ||
surface->sizes[0].height < mode_cmd->height ||
surface->sizes[0].depth != 1)) {
DRM_ERROR("Incompatible surface dimensions "
"for requested mode.\n");
return -EINVAL;
}
switch (mode_cmd->depth) {
case 32:
format = SVGA3D_A8R8G8B8;
break;
case 24:
format = SVGA3D_X8R8G8B8;
break;
case 16:
format = SVGA3D_R5G6B5;
break;
case 15:
format = SVGA3D_A1R5G5B5;
break;
case 8:
format = SVGA3D_LUMINANCE8;
break;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
if (unlikely(format != surface->format)) {
DRM_ERROR("Invalid surface format for requested mode.\n");
return -EINVAL;
}
vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
if (!vfbs) {
ret = -ENOMEM;
goto out_err1;
}
ret = drm_framebuffer_init(dev, &vfbs->base.base,
&vmw_framebuffer_surface_funcs);
if (ret)
goto out_err2;
if (!vmw_surface_reference(surface)) {
DRM_ERROR("failed to reference surface %p\n", surface);
goto out_err3;
}
/* XXX get the first 3 from the surface info */
vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
vfbs->base.base.pitches[0] = mode_cmd->pitch;
vfbs->base.base.depth = mode_cmd->depth;
vfbs->base.base.width = mode_cmd->width;
vfbs->base.base.height = mode_cmd->height;
vfbs->surface = surface;
vfbs->base.user_handle = mode_cmd->handle;
vfbs->master = drm_master_get(file_priv->master);
mutex_lock(&vmaster->fb_surf_mutex);
list_add_tail(&vfbs->head, &vmaster->fb_surf);
mutex_unlock(&vmaster->fb_surf_mutex);
*out = &vfbs->base;
return 0;
out_err3:
drm_framebuffer_cleanup(&vfbs->base.base);
out_err2:
kfree(vfbs);
out_err1:
return ret;
}
/*
* Dmabuf framebuffer code
*/
#define vmw_framebuffer_to_vfbd(x) \
container_of(x, struct vmw_framebuffer_dmabuf, base.base)
struct vmw_framebuffer_dmabuf {
struct vmw_framebuffer base;
struct vmw_dma_buffer *buffer;
};
void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
drm_framebuffer_cleanup(framebuffer);
vmw_dmabuf_unreference(&vfbd->buffer);
ttm_base_object_unref(&vfbd->base.user_obj);
kfree(vfbd);
}
static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment)
{
size_t fifo_size;
int i;
struct {
uint32_t header;
SVGAFifoCmdUpdate body;
} *cmd;
fifo_size = sizeof(*cmd) * num_clips;
cmd = vmw_fifo_reserve(dev_priv, fifo_size);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Fifo reserve failed.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
for (i = 0; i < num_clips; i++, clips += increment) {
cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
cmd[i].body.x = cpu_to_le32(clips->x1);
cmd[i].body.y = cpu_to_le32(clips->y1);
cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
}
vmw_fifo_commit(dev_priv, fifo_size);
return 0;
}
static int do_dmabuf_define_gmrfb(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer)
{
int depth = framebuffer->base.depth;
size_t fifo_size;
int ret;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
/* Emulate RGBA support, contrary to svga_reg.h this is not
* supported by hosts. This is only a problem if we are reading
* this value later and expecting what we uploaded back.
*/
if (depth == 32)
depth = 24;
fifo_size = sizeof(*cmd);
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
cmd->body.format.colorDepth = depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = framebuffer->base.pitches[0];
cmd->body.ptr.gmrId = framebuffer->user_handle;
cmd->body.ptr.offset = 0;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
kfree(cmd);
return ret;
}
static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
struct vmw_private *dev_priv,
struct vmw_framebuffer *framebuffer,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips, int increment,
struct vmw_fence_obj **out_fence)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *clips_ptr;
int i, k, num_units, ret;
struct drm_crtc *crtc;
size_t fifo_size;
struct {
uint32_t header;
SVGAFifoCmdBlitGMRFBToScreen body;
} *blits;
ret = do_dmabuf_define_gmrfb(file_priv, dev_priv, framebuffer);
if (unlikely(ret != 0))
return ret; /* define_gmrfb prints warnings */
fifo_size = sizeof(*blits) * num_clips;
blits = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(blits == NULL)) {
DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
return -ENOMEM;
}
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &framebuffer->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
int hit_num = 0;
clips_ptr = clips;
for (i = 0; i < num_clips; i++, clips_ptr += increment) {
int clip_x1 = clips_ptr->x1 - unit->crtc.x;
int clip_y1 = clips_ptr->y1 - unit->crtc.y;
int clip_x2 = clips_ptr->x2 - unit->crtc.x;
int clip_y2 = clips_ptr->y2 - unit->crtc.y;
int move_x, move_y;
/* skip any crtcs that misses the clip region */
if (clip_x1 >= unit->crtc.mode.hdisplay ||
clip_y1 >= unit->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
/* clip size to crtc size */
clip_x2 = min_t(int, clip_x2, unit->crtc.mode.hdisplay);
clip_y2 = min_t(int, clip_y2, unit->crtc.mode.vdisplay);
/* translate both src and dest to bring clip into screen */
move_x = min_t(int, clip_x1, 0);
move_y = min_t(int, clip_y1, 0);
/* actual translate done here */
blits[hit_num].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
blits[hit_num].body.destScreenId = unit->unit;
blits[hit_num].body.srcOrigin.x = clips_ptr->x1 - move_x;
blits[hit_num].body.srcOrigin.y = clips_ptr->y1 - move_y;
blits[hit_num].body.destRect.left = clip_x1 - move_x;
blits[hit_num].body.destRect.top = clip_y1 - move_y;
blits[hit_num].body.destRect.right = clip_x2;
blits[hit_num].body.destRect.bottom = clip_y2;
hit_num++;
}
/* no clips hit the crtc */
if (hit_num == 0)
continue;
/* only return the last fence */
if (out_fence && *out_fence)
vmw_fence_obj_unreference(out_fence);
fifo_size = sizeof(*blits) * hit_num;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, blits,
fifo_size, 0, NULL, out_fence);
if (unlikely(ret != 0))
break;
}
kfree(blits);
return ret;
}
int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *clips,
unsigned num_clips)
{
struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
struct vmw_master *vmaster = vmw_master(file_priv->master);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(framebuffer);
struct drm_clip_rect norect;
int ret, increment = 1;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!num_clips) {
num_clips = 1;
clips = &norect;
norect.x1 = norect.y1 = 0;
norect.x2 = framebuffer->width;
norect.y2 = framebuffer->height;
} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
num_clips /= 2;
increment = 2;
}
if (dev_priv->ldu_priv) {
ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment);
} else {
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
flags, color,
clips, num_clips, increment, NULL);
}
ttm_read_unlock(&vmaster->lock);
return ret;
}
static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
.destroy = vmw_framebuffer_dmabuf_destroy,
.dirty = vmw_framebuffer_dmabuf_dirty,
.create_handle = vmw_framebuffer_create_handle,
};
/**
* Pin the dmabuffer to the start of vram.
*/
static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
int ret;
/* This code should not be used with screen objects */
BUG_ON(dev_priv->sou_priv);
vmw_overlay_pause_all(dev_priv);
ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
vmw_overlay_resume_all(dev_priv);
WARN_ON(ret != 0);
return 0;
}
static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
{
struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
if (!vfbd->buffer) {
WARN_ON(!vfbd->buffer);
return 0;
}
return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
}
static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
struct vmw_dma_buffer *dmabuf,
struct vmw_framebuffer **out,
const struct drm_mode_fb_cmd
*mode_cmd)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_framebuffer_dmabuf *vfbd;
unsigned int requested_size;
int ret;
requested_size = mode_cmd->height * mode_cmd->pitch;
if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
DRM_ERROR("Screen buffer object size is too small "
"for requested mode.\n");
return -EINVAL;
}
/* Limited framebuffer color depth support for screen objects */
if (dev_priv->sou_priv) {
switch (mode_cmd->depth) {
case 32:
case 24:
/* Only support 32 bpp for 32 and 24 depth fbs */
if (mode_cmd->bpp == 32)
break;
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
case 16:
case 15:
/* Only support 16 bpp for 16 and 15 depth fbs */
if (mode_cmd->bpp == 16)
break;
DRM_ERROR("Invalid color depth/bbp: %d %d\n",
mode_cmd->depth, mode_cmd->bpp);
return -EINVAL;
default:
DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
return -EINVAL;
}
}
vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
if (!vfbd) {
ret = -ENOMEM;
goto out_err1;
}
ret = drm_framebuffer_init(dev, &vfbd->base.base,
&vmw_framebuffer_dmabuf_funcs);
if (ret)
goto out_err2;
if (!vmw_dmabuf_reference(dmabuf)) {
DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
goto out_err3;
}
vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
vfbd->base.base.pitches[0] = mode_cmd->pitch;
vfbd->base.base.depth = mode_cmd->depth;
vfbd->base.base.width = mode_cmd->width;
vfbd->base.base.height = mode_cmd->height;
if (!dev_priv->sou_priv) {
vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
}
vfbd->base.dmabuf = true;
vfbd->buffer = dmabuf;
vfbd->base.user_handle = mode_cmd->handle;
*out = &vfbd->base;
return 0;
out_err3:
drm_framebuffer_cleanup(&vfbd->base.base);
out_err2:
kfree(vfbd);
out_err1:
return ret;
}
/*
* Generic Kernel modesetting functions
*/
static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
struct drm_file *file_priv,
struct drm_mode_fb_cmd2 *mode_cmd2)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
struct vmw_framebuffer *vfb = NULL;
struct vmw_surface *surface = NULL;
struct vmw_dma_buffer *bo = NULL;
struct ttm_base_object *user_obj;
struct drm_mode_fb_cmd mode_cmd;
int ret;
mode_cmd.width = mode_cmd2->width;
mode_cmd.height = mode_cmd2->height;
mode_cmd.pitch = mode_cmd2->pitches[0];
mode_cmd.handle = mode_cmd2->handles[0];
drm_fb_get_bpp_depth(mode_cmd2->pixel_format, &mode_cmd.depth,
&mode_cmd.bpp);
/**
* This code should be conditioned on Screen Objects not being used.
* If screen objects are used, we can allocate a GMR to hold the
* requested framebuffer.
*/
if (!vmw_kms_validate_mode_vram(dev_priv,
mode_cmd.pitch,
mode_cmd.height)) {
DRM_ERROR("VRAM size is too small for requested mode.\n");
return ERR_PTR(-ENOMEM);
}
/*
* Take a reference on the user object of the resource
* backing the kms fb. This ensures that user-space handle
* lookups on that resource will always work as long as
* it's registered with a kms framebuffer. This is important,
* since vmw_execbuf_process identifies resources in the
* command stream using user-space handles.
*/
user_obj = ttm_base_object_lookup(tfile, mode_cmd.handle);
if (unlikely(user_obj == NULL)) {
DRM_ERROR("Could not locate requested kms frame buffer.\n");
return ERR_PTR(-ENOENT);
}
/**
* End conditioned code.
*/
/* returns either a dmabuf or surface */
ret = vmw_user_lookup_handle(dev_priv, tfile,
mode_cmd.handle,
&surface, &bo);
if (ret)
goto err_out;
/* Create the new framebuffer depending one what we got back */
if (bo)
ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
&mode_cmd);
else if (surface)
ret = vmw_kms_new_framebuffer_surface(dev_priv, file_priv,
surface, &vfb, &mode_cmd);
else
BUG();
err_out:
/* vmw_user_lookup_handle takes one ref so does new_fb */
if (bo)
vmw_dmabuf_unreference(&bo);
if (surface)
vmw_surface_unreference(&surface);
if (ret) {
DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
ttm_base_object_unref(&user_obj);
return ERR_PTR(ret);
} else
vfb->user_obj = user_obj;
return &vfb->base;
}
static struct drm_mode_config_funcs vmw_kms_funcs = {
.fb_create = vmw_kms_fb_create,
};
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct vmw_surface *surface,
uint32_t sid,
int32_t destX, int32_t destY,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_clip_rect *tmp;
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, num_units;
int ret = 0; /* silence warning */
int left, right, top, bottom;
struct {
SVGA3dCmdHeader header;
SVGA3dCmdBlitSurfaceToScreen body;
} *cmd;
SVGASignedRect *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(surface == NULL);
BUG_ON(!clips || !num_clips);
tmp = kzalloc(sizeof(*tmp) * num_clips, GFP_KERNEL);
if (unlikely(tmp == NULL)) {
DRM_ERROR("Temporary cliprect memory alloc failed.\n");
return -ENOMEM;
}
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
ret = -ENOMEM;
goto out_free_tmp;
}
left = clips->x;
right = clips->x + clips->w;
top = clips->y;
bottom = clips->y + clips->h;
for (i = 1; i < num_clips; i++) {
left = min_t(int, left, (int)clips[i].x);
right = max_t(int, right, (int)clips[i].x + clips[i].w);
top = min_t(int, top, (int)clips[i].y);
bottom = max_t(int, bottom, (int)clips[i].y + clips[i].h);
}
/* only need to do this once */
memset(cmd, 0, fifo_size);
cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
blits = (SVGASignedRect *)&cmd[1];
cmd->body.srcRect.left = left;
cmd->body.srcRect.right = right;
cmd->body.srcRect.top = top;
cmd->body.srcRect.bottom = bottom;
for (i = 0; i < num_clips; i++) {
tmp[i].x1 = clips[i].x - left;
tmp[i].x2 = clips[i].x + clips[i].w - left;
tmp[i].y1 = clips[i].y - top;
tmp[i].y2 = clips[i].y + clips[i].h - top;
}
for (k = 0; k < num_units; k++) {
struct vmw_display_unit *unit = units[k];
struct vmw_clip_rect clip;
int num;
clip.x1 = left + destX - unit->crtc.x;
clip.y1 = top + destY - unit->crtc.y;
clip.x2 = right + destX - unit->crtc.x;
clip.y2 = bottom + destY - unit->crtc.y;
/* skip any crtcs that misses the clip region */
if (clip.x1 >= unit->crtc.mode.hdisplay ||
clip.y1 >= unit->crtc.mode.vdisplay ||
clip.x2 <= 0 || clip.y2 <= 0)
continue;
/*
* In order for the clip rects to be correctly scaled
* the src and dest rects needs to be the same size.
*/
cmd->body.destRect.left = clip.x1;
cmd->body.destRect.right = clip.x2;
cmd->body.destRect.top = clip.y1;
cmd->body.destRect.bottom = clip.y2;
/* create a clip rect of the crtc in dest coords */
clip.x2 = unit->crtc.mode.hdisplay - clip.x1;
clip.y2 = unit->crtc.mode.vdisplay - clip.y1;
clip.x1 = 0 - clip.x1;
clip.y1 = 0 - clip.y1;
/* need to reset sid as it is changed by execbuf */
cmd->body.srcImage.sid = sid;
cmd->body.destScreenId = unit->unit;
/* clip and write blits to cmd stream */
vmw_clip_cliprects(tmp, num_clips, clip, blits, &num);
/* if no cliprects hit skip this */
if (num == 0)
continue;
/* recalculate package length */
fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num;
cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
fifo_size, 0, NULL, NULL);
if (unlikely(ret != 0))
break;
}
kfree(cmd);
out_free_tmp:
kfree(tmp);
return ret;
}
int vmw_kms_readback(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
struct drm_vmw_fence_rep __user *user_fence_rep,
struct drm_vmw_rect *clips,
uint32_t num_clips)
{
struct vmw_framebuffer_dmabuf *vfbd =
vmw_framebuffer_to_vfbd(&vfb->base);
struct vmw_dma_buffer *dmabuf = vfbd->buffer;
struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
struct drm_crtc *crtc;
size_t fifo_size;
int i, k, ret, num_units, blits_pos;
struct {
uint32_t header;
SVGAFifoCmdDefineGMRFB body;
} *cmd;
struct {
uint32_t header;
SVGAFifoCmdBlitScreenToGMRFB body;
} *blits;
num_units = 0;
list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
if (crtc->fb != &vfb->base)
continue;
units[num_units++] = vmw_crtc_to_du(crtc);
}
BUG_ON(dmabuf == NULL);
BUG_ON(!clips || !num_clips);
/* take a safe guess at fifo size */
fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
cmd = kmalloc(fifo_size, GFP_KERNEL);
if (unlikely(cmd == NULL)) {
DRM_ERROR("Failed to allocate temporary fifo memory.\n");
return -ENOMEM;
}
memset(cmd, 0, fifo_size);
cmd->header = SVGA_CMD_DEFINE_GMRFB;
cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
cmd->body.format.colorDepth = vfb->base.depth;
cmd->body.format.reserved = 0;
cmd->body.bytesPerLine = vfb->base.pitches[0];
cmd->body.ptr.gmrId = vfb->user_handle;
cmd->body.ptr.offset = 0;
blits = (void *)&cmd[1];
blits_pos = 0;
for (i = 0; i < num_units; i++) {
struct drm_vmw_rect *c = clips;
for (k = 0; k < num_clips; k++, c++) {
/* transform clip coords to crtc origin based coords */
int clip_x1 = c->x - units[i]->crtc.x;
int clip_x2 = c->x - units[i]->crtc.x + c->w;
int clip_y1 = c->y - units[i]->crtc.y;
int clip_y2 = c->y - units[i]->crtc.y + c->h;
int dest_x = c->x;
int dest_y = c->y;
/* compensate for clipping, we negate
* a negative number and add that.
*/
if (clip_x1 < 0)
dest_x += -clip_x1;
if (clip_y1 < 0)
dest_y += -clip_y1;
/* clip */
clip_x1 = max(clip_x1, 0);
clip_y1 = max(clip_y1, 0);
clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
/* and cull any rects that misses the crtc */
if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
clip_y1 >= units[i]->crtc.mode.vdisplay ||
clip_x2 <= 0 || clip_y2 <= 0)
continue;
blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
blits[blits_pos].body.srcScreenId = units[i]->unit;
blits[blits_pos].body.destOrigin.x = dest_x;
blits[blits_pos].body.destOrigin.y = dest_y;
blits[blits_pos].body.srcRect.left = clip_x1;
blits[blits_pos].body.srcRect.top = clip_y1;
blits[blits_pos].body.srcRect.right = clip_x2;
blits[blits_pos].body.srcRect.bottom = clip_y2;
blits_pos++;
}
}
/* reset size here and use calculated exact size from loops */
fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
0, user_fence_rep, NULL);
kfree(cmd);
return ret;
}
int vmw_kms_init(struct vmw_private *dev_priv)
{
struct drm_device *dev = dev_priv->dev;
int ret;
drm_mode_config_init(dev);
dev->mode_config.funcs = &vmw_kms_funcs;
dev->mode_config.min_width = 1;
dev->mode_config.min_height = 1;
/* assumed largest fb size */
dev->mode_config.max_width = 8192;
dev->mode_config.max_height = 8192;
ret = vmw_kms_init_screen_object_display(dev_priv);
if (ret) /* Fallback */
(void)vmw_kms_init_legacy_display_system(dev_priv);
return 0;
}
int vmw_kms_close(struct vmw_private *dev_priv)
{
/*
* Docs says we should take the lock before calling this function
* but since it destroys encoders and our destructor calls
* drm_encoder_cleanup which takes the lock we deadlock.
*/
drm_mode_config_cleanup(dev_priv->dev);
if (dev_priv->sou_priv)
vmw_kms_close_screen_object_display(dev_priv);
else
vmw_kms_close_legacy_display_system(dev_priv);
return 0;
}
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_vmw_cursor_bypass_arg *arg = data;
struct vmw_display_unit *du;
struct drm_mode_object *obj;
struct drm_crtc *crtc;
int ret = 0;
mutex_lock(&dev->mode_config.mutex);
if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
if (!obj) {
ret = -EINVAL;
goto out;
}
crtc = obj_to_crtc(obj);
du = vmw_crtc_to_du(crtc);
du->hotspot_x = arg->xhot;
du->hotspot_y = arg->yhot;
out:
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
int vmw_kms_write_svga(struct vmw_private *vmw_priv,
unsigned width, unsigned height, unsigned pitch,
unsigned bpp, unsigned depth)
{
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bpp);
if (vmw_read(vmw_priv, SVGA_REG_DEPTH) != depth) {
DRM_ERROR("Invalid depth %u for %u bpp, host expects %u\n",
depth, bpp, vmw_read(vmw_priv, SVGA_REG_DEPTH));
return -EINVAL;
}
return 0;
}
int vmw_kms_save_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_priv->vga_pitchlock =
vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
else if (vmw_fifo_have_pitchlock(vmw_priv))
vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
vmw_priv->num_displays = vmw_read(vmw_priv,
SVGA_REG_NUM_GUEST_DISPLAYS);
if (vmw_priv->num_displays == 0)
vmw_priv->num_displays = 1;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
if (i == 0 && vmw_priv->num_displays == 1 &&
save->width == 0 && save->height == 0) {
/*
* It should be fairly safe to assume that these
* values are uninitialized.
*/
save->width = vmw_priv->vga_width - save->pos_x;
save->height = vmw_priv->vga_height - save->pos_y;
}
}
return 0;
}
int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
{
struct vmw_vga_topology_state *save;
uint32_t i;
vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
vmw_priv->vga_pitchlock);
else if (vmw_fifo_have_pitchlock(vmw_priv))
iowrite32(vmw_priv->vga_pitchlock,
vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
return 0;
for (i = 0; i < vmw_priv->num_displays; ++i) {
save = &vmw_priv->vga_save[i];
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
}
return 0;
}
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
{
return ((u64) pitch * (u64) height) < (u64) dev_priv->vram_size;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
{
return 0;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
int vmw_enable_vblank(struct drm_device *dev, int crtc)
{
return -ENOSYS;
}
/**
* Function called by DRM code called with vbl_lock held.
*/
void vmw_disable_vblank(struct drm_device *dev, int crtc)
{
}
/*
* Small shared kms functions.
*/
int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
struct drm_vmw_rect *rects)
{
struct drm_device *dev = dev_priv->dev;
struct vmw_display_unit *du;
struct drm_connector *con;
mutex_lock(&dev->mode_config.mutex);
#if 0
{
unsigned int i;
DRM_INFO("%s: new layout ", __func__);
for (i = 0; i < num; i++)
DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
rects[i].w, rects[i].h);
DRM_INFO("\n");
}
#endif
list_for_each_entry(con, &dev->mode_config.connector_list, head) {
du = vmw_connector_to_du(con);
if (num > du->unit) {
du->pref_width = rects[du->unit].w;
du->pref_height = rects[du->unit].h;
du->pref_active = true;
du->gui_x = rects[du->unit].x;
du->gui_y = rects[du->unit].y;
} else {
du->pref_width = 800;
du->pref_height = 600;
du->pref_active = false;
}
con->status = vmw_du_connector_detect(con, true);
}
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
int vmw_du_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
struct drm_framebuffer *old_fb = crtc->fb;
struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(fb);
struct drm_file *file_priv = event->base.file_priv;
struct vmw_fence_obj *fence = NULL;
struct drm_clip_rect clips;
int ret;
/* require ScreenObject support for page flipping */
if (!dev_priv->sou_priv)
return -ENOSYS;
if (!vmw_kms_screen_object_flippable(dev_priv, crtc))
return -EINVAL;
crtc->fb = fb;
/* do a full screen dirty update */
clips.x1 = clips.y1 = 0;
clips.x2 = fb->width;
clips.y2 = fb->height;
if (vfb->dmabuf)
ret = do_dmabuf_dirty_sou(file_priv, dev_priv, vfb,
0, 0, &clips, 1, 1, &fence);
else
ret = do_surface_dirty_sou(dev_priv, file_priv, vfb,
0, 0, &clips, 1, 1, &fence);
if (ret != 0)
goto out_no_fence;
if (!fence) {
ret = -EINVAL;
goto out_no_fence;
}
ret = vmw_event_fence_action_queue(file_priv, fence,
&event->base,
&event->event.tv_sec,
&event->event.tv_usec,
true);
/*
* No need to hold on to this now. The only cleanup
* we need to do if we fail is unref the fence.
*/
vmw_fence_obj_unreference(&fence);
if (vmw_crtc_to_du(crtc)->is_implicit)
vmw_kms_screen_object_update_implicit_fb(dev_priv, crtc);
return ret;
out_no_fence:
crtc->fb = old_fb;
return ret;
}
void vmw_du_crtc_save(struct drm_crtc *crtc)
{
}
void vmw_du_crtc_restore(struct drm_crtc *crtc)
{
}
void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
u16 *r, u16 *g, u16 *b,
uint32_t start, uint32_t size)
{
struct vmw_private *dev_priv = vmw_priv(crtc->dev);
int i;
for (i = 0; i < size; i++) {
DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
r[i], g[i], b[i]);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
}
}
void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
{
}
void vmw_du_connector_save(struct drm_connector *connector)
{
}
void vmw_du_connector_restore(struct drm_connector *connector)
{
}
enum drm_connector_status
vmw_du_connector_detect(struct drm_connector *connector, bool force)
{
uint32_t num_displays;
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct vmw_display_unit *du = vmw_connector_to_du(connector);
mutex_lock(&dev_priv->hw_mutex);
num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
mutex_unlock(&dev_priv->hw_mutex);
return ((vmw_connector_to_du(connector)->unit < num_displays &&
du->pref_active) ?
connector_status_connected : connector_status_disconnected);
}
static struct drm_display_mode vmw_kms_connector_builtin[] = {
/* 640x480@60Hz */
{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
752, 800, 0, 480, 489, 492, 525, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 800x600@60Hz */
{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
968, 1056, 0, 600, 601, 605, 628, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1024x768@60Hz */
{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
1184, 1344, 0, 768, 771, 777, 806, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1152x864@75Hz */
{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
1344, 1600, 0, 864, 865, 868, 900, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x768@60Hz */
{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
1472, 1664, 0, 768, 771, 778, 798, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x800@60Hz */
{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
1480, 1680, 0, 800, 803, 809, 831, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
/* 1280x960@60Hz */
{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
1488, 1800, 0, 960, 961, 964, 1000, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1280x1024@60Hz */
{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1360x768@60Hz */
{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
1536, 1792, 0, 768, 771, 777, 795, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x1050@60Hz */
{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1440x900@60Hz */
{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
1672, 1904, 0, 900, 903, 909, 934, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1600x1200@60Hz */
{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1680x1050@60Hz */
{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1792x1344@60Hz */
{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1853x1392@60Hz */
{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1200@60Hz */
{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 1920x1440@60Hz */
{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* 2560x1600@60Hz */
{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
/* Terminate */
{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
};
/**
* vmw_guess_mode_timing - Provide fake timings for a
* 60Hz vrefresh mode.
*
* @mode - Pointer to a struct drm_display_mode with hdisplay and vdisplay
* members filled in.
*/
static void vmw_guess_mode_timing(struct drm_display_mode *mode)
{
mode->hsync_start = mode->hdisplay + 50;
mode->hsync_end = mode->hsync_start + 50;
mode->htotal = mode->hsync_end + 50;
mode->vsync_start = mode->vdisplay + 50;
mode->vsync_end = mode->vsync_start + 50;
mode->vtotal = mode->vsync_end + 50;
mode->clock = (u32)mode->htotal * (u32)mode->vtotal / 100 * 6;
mode->vrefresh = drm_mode_vrefresh(mode);
}
int vmw_du_connector_fill_modes(struct drm_connector *connector,
uint32_t max_width, uint32_t max_height)
{
struct vmw_display_unit *du = vmw_connector_to_du(connector);
struct drm_device *dev = connector->dev;
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_display_mode *mode = NULL;
struct drm_display_mode *bmode;
struct drm_display_mode prefmode = { DRM_MODE("preferred",
DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
};
int i;
/* Add preferred mode */
{
mode = drm_mode_duplicate(dev, &prefmode);
if (!mode)
return 0;
mode->hdisplay = du->pref_width;
mode->vdisplay = du->pref_height;
vmw_guess_mode_timing(mode);
if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
mode->vdisplay)) {
drm_mode_probed_add(connector, mode);
} else {
drm_mode_destroy(dev, mode);
mode = NULL;
}
if (du->pref_mode) {
list_del_init(&du->pref_mode->head);
drm_mode_destroy(dev, du->pref_mode);
}
/* mode might be null here, this is intended */
du->pref_mode = mode;
}
for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
bmode = &vmw_kms_connector_builtin[i];
if (bmode->hdisplay > max_width ||
bmode->vdisplay > max_height)
continue;
if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
bmode->vdisplay))
continue;
mode = drm_mode_duplicate(dev, bmode);
if (!mode)
return 0;
mode->vrefresh = drm_mode_vrefresh(mode);
drm_mode_probed_add(connector, mode);
}
/* Move the prefered mode first, help apps pick the right mode. */
if (du->pref_mode)
list_move(&du->pref_mode->head, &connector->probed_modes);
drm_mode_connector_list_update(connector);
return 1;
}
int vmw_du_connector_set_property(struct drm_connector *connector,
struct drm_property *property,
uint64_t val)
{
return 0;
}
int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
struct drm_vmw_update_layout_arg *arg =
(struct drm_vmw_update_layout_arg *)data;
struct vmw_master *vmaster = vmw_master(file_priv->master);
void __user *user_rects;
struct drm_vmw_rect *rects;
unsigned rects_size;
int ret;
int i;
struct drm_mode_config *mode_config = &dev->mode_config;
ret = ttm_read_lock(&vmaster->lock, true);
if (unlikely(ret != 0))
return ret;
if (!arg->num_outputs) {
struct drm_vmw_rect def_rect = {0, 0, 800, 600};
vmw_du_update_layout(dev_priv, 1, &def_rect);
goto out_unlock;
}
rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
rects = kcalloc(arg->num_outputs, sizeof(struct drm_vmw_rect),
GFP_KERNEL);
if (unlikely(!rects)) {
ret = -ENOMEM;
goto out_unlock;
}
user_rects = (void __user *)(unsigned long)arg->rects;
ret = copy_from_user(rects, user_rects, rects_size);
if (unlikely(ret != 0)) {
DRM_ERROR("Failed to get rects.\n");
ret = -EFAULT;
goto out_free;
}
for (i = 0; i < arg->num_outputs; ++i) {
if (rects[i].x < 0 ||
rects[i].y < 0 ||
rects[i].x + rects[i].w > mode_config->max_width ||
rects[i].y + rects[i].h > mode_config->max_height) {
DRM_ERROR("Invalid GUI layout.\n");
ret = -EINVAL;
goto out_free;
}
}
vmw_du_update_layout(dev_priv, arg->num_outputs, rects);
out_free:
kfree(rects);
out_unlock:
ttm_read_unlock(&vmaster->lock);
return ret;
}
| gpl-2.0 |
thicklizard/ge-patches | arch/x86/kernel/apic/bigsmp_32.c | 4695 | 6594 | /*
* APIC driver for "bigsmp" xAPIC machines with more than 8 virtual CPUs.
*
* Drives the local APIC in "clustered mode".
*/
#include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/smp.h>
#include <asm/apicdef.h>
#include <asm/fixmap.h>
#include <asm/mpspec.h>
#include <asm/apic.h>
#include <asm/ipi.h>
static unsigned bigsmp_get_apic_id(unsigned long x)
{
return (x >> 24) & 0xFF;
}
static int bigsmp_apic_id_registered(void)
{
return 1;
}
static const struct cpumask *bigsmp_target_cpus(void)
{
#ifdef CONFIG_SMP
return cpu_online_mask;
#else
return cpumask_of(0);
#endif
}
static unsigned long bigsmp_check_apicid_used(physid_mask_t *map, int apicid)
{
return 0;
}
static unsigned long bigsmp_check_apicid_present(int bit)
{
return 1;
}
static int bigsmp_early_logical_apicid(int cpu)
{
/* on bigsmp, logical apicid is the same as physical */
return early_per_cpu(x86_cpu_to_apicid, cpu);
}
static inline unsigned long calculate_ldr(int cpu)
{
unsigned long val, id;
val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
id = per_cpu(x86_bios_cpu_apicid, cpu);
val |= SET_APIC_LOGICAL_ID(id);
return val;
}
/*
* Set up the logical destination ID.
*
* Intel recommends to set DFR, LDR and TPR before enabling
* an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
* document number 292116). So here it goes...
*/
static void bigsmp_init_apic_ldr(void)
{
unsigned long val;
int cpu = smp_processor_id();
apic_write(APIC_DFR, APIC_DFR_FLAT);
val = calculate_ldr(cpu);
apic_write(APIC_LDR, val);
}
static void bigsmp_setup_apic_routing(void)
{
printk(KERN_INFO
"Enabling APIC mode: Physflat. Using %d I/O APICs\n",
nr_ioapics);
}
static int bigsmp_cpu_present_to_apicid(int mps_cpu)
{
if (mps_cpu < nr_cpu_ids)
return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
return BAD_APICID;
}
static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
{
/* For clustered we don't have a good way to do this yet - hack */
physids_promote(0xFFL, retmap);
}
static int bigsmp_check_phys_apicid_present(int phys_apicid)
{
return 1;
}
/* As we are using single CPU as destination, pick only one CPU here */
static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
{
int cpu = cpumask_first(cpumask);
if (cpu < nr_cpu_ids)
return cpu_physical_id(cpu);
return BAD_APICID;
}
static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
const struct cpumask *andmask)
{
int cpu;
/*
* We're using fixed IRQ delivery, can only return one phys APIC ID.
* May as well be the first.
*/
for_each_cpu_and(cpu, cpumask, andmask) {
if (cpumask_test_cpu(cpu, cpu_online_mask))
return cpu_physical_id(cpu);
}
return BAD_APICID;
}
static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
{
return cpuid_apic >> index_msb;
}
static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
{
default_send_IPI_mask_sequence_phys(mask, vector);
}
static void bigsmp_send_IPI_allbutself(int vector)
{
default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
}
static void bigsmp_send_IPI_all(int vector)
{
bigsmp_send_IPI_mask(cpu_online_mask, vector);
}
static int dmi_bigsmp; /* can be set by dmi scanners */
static int hp_ht_bigsmp(const struct dmi_system_id *d)
{
printk(KERN_NOTICE "%s detected: force use of apic=bigsmp\n", d->ident);
dmi_bigsmp = 1;
return 0;
}
static const struct dmi_system_id bigsmp_dmi_table[] = {
{ hp_ht_bigsmp, "HP ProLiant DL760 G2",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P44-"),
}
},
{ hp_ht_bigsmp, "HP ProLiant DL740",
{ DMI_MATCH(DMI_BIOS_VENDOR, "HP"),
DMI_MATCH(DMI_BIOS_VERSION, "P47-"),
}
},
{ } /* NULL entry stops DMI scanning */
};
static void bigsmp_vector_allocation_domain(int cpu, struct cpumask *retmask)
{
cpumask_clear(retmask);
cpumask_set_cpu(cpu, retmask);
}
static int probe_bigsmp(void)
{
if (def_to_bigsmp)
dmi_bigsmp = 1;
else
dmi_check_system(bigsmp_dmi_table);
return dmi_bigsmp;
}
static struct apic apic_bigsmp = {
.name = "bigsmp",
.probe = probe_bigsmp,
.acpi_madt_oem_check = NULL,
.apic_id_valid = default_apic_id_valid,
.apic_id_registered = bigsmp_apic_id_registered,
.irq_delivery_mode = dest_Fixed,
/* phys delivery to target CPU: */
.irq_dest_mode = 0,
.target_cpus = bigsmp_target_cpus,
.disable_esr = 1,
.dest_logical = 0,
.check_apicid_used = bigsmp_check_apicid_used,
.check_apicid_present = bigsmp_check_apicid_present,
.vector_allocation_domain = bigsmp_vector_allocation_domain,
.init_apic_ldr = bigsmp_init_apic_ldr,
.ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
.setup_apic_routing = bigsmp_setup_apic_routing,
.multi_timer_check = NULL,
.cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
.apicid_to_cpu_present = physid_set_mask_of_physid,
.setup_portio_remap = NULL,
.check_phys_apicid_present = bigsmp_check_phys_apicid_present,
.enable_apic_mode = NULL,
.phys_pkg_id = bigsmp_phys_pkg_id,
.mps_oem_check = NULL,
.get_apic_id = bigsmp_get_apic_id,
.set_apic_id = NULL,
.apic_id_mask = 0xFF << 24,
.cpu_mask_to_apicid = bigsmp_cpu_mask_to_apicid,
.cpu_mask_to_apicid_and = bigsmp_cpu_mask_to_apicid_and,
.send_IPI_mask = bigsmp_send_IPI_mask,
.send_IPI_mask_allbutself = NULL,
.send_IPI_allbutself = bigsmp_send_IPI_allbutself,
.send_IPI_all = bigsmp_send_IPI_all,
.send_IPI_self = default_send_IPI_self,
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = default_wait_for_init_deassert,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = default_inquire_remote_apic,
.read = native_apic_mem_read,
.write = native_apic_mem_write,
.icr_read = native_apic_icr_read,
.icr_write = native_apic_icr_write,
.wait_icr_idle = native_apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
.x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
};
void __init generic_bigsmp_probe(void)
{
unsigned int cpu;
if (!probe_bigsmp())
return;
apic = &apic_bigsmp;
for_each_possible_cpu(cpu) {
if (early_per_cpu(x86_cpu_to_logical_apicid,
cpu) == BAD_APICID)
continue;
early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
bigsmp_early_logical_apicid(cpu);
}
pr_info("Overriding APIC driver with %s\n", apic_bigsmp.name);
}
apic_driver(apic_bigsmp);
| gpl-2.0 |
TeamHorizon/android_kernel_samsung_hlte | drivers/net/ethernet/mellanox/mlx4/icm.c | 4951 | 10813 | /*
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
#include "icm.h"
#include "fw.h"
/*
* We allocate in as big chunks as we can, up to a maximum of 256 KB
* per chunk.
*/
enum {
MLX4_ICM_ALLOC_SIZE = 1 << 18,
MLX4_TABLE_CHUNK_SIZE = 1 << 18
};
static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
if (chunk->nsg > 0)
pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
PCI_DMA_BIDIRECTIONAL);
for (i = 0; i < chunk->npages; ++i)
__free_pages(sg_page(&chunk->mem[i]),
get_order(chunk->mem[i].length));
}
static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
{
int i;
for (i = 0; i < chunk->npages; ++i)
dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
lowmem_page_address(sg_page(&chunk->mem[i])),
sg_dma_address(&chunk->mem[i]));
}
void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
{
struct mlx4_icm_chunk *chunk, *tmp;
if (!icm)
return;
list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
if (coherent)
mlx4_free_icm_coherent(dev, chunk);
else
mlx4_free_icm_pages(dev, chunk);
kfree(chunk);
}
kfree(icm);
}
static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
{
struct page *page;
page = alloc_pages(gfp_mask, order);
if (!page)
return -ENOMEM;
sg_set_page(mem, page, PAGE_SIZE << order, 0);
return 0;
}
static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
int order, gfp_t gfp_mask)
{
void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
&sg_dma_address(mem), gfp_mask);
if (!buf)
return -ENOMEM;
sg_set_buf(mem, buf, PAGE_SIZE << order);
BUG_ON(mem->offset);
sg_dma_len(mem) = PAGE_SIZE << order;
return 0;
}
struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
gfp_t gfp_mask, int coherent)
{
struct mlx4_icm *icm;
struct mlx4_icm_chunk *chunk = NULL;
int cur_order;
int ret;
/* We use sg_set_buf for coherent allocs, which assumes low memory */
BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!icm)
return NULL;
icm->refcount = 0;
INIT_LIST_HEAD(&icm->chunk_list);
cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
while (npages > 0) {
if (!chunk) {
chunk = kmalloc(sizeof *chunk,
gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
if (!chunk)
goto fail;
sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
chunk->npages = 0;
chunk->nsg = 0;
list_add_tail(&chunk->list, &icm->chunk_list);
}
while (1 << cur_order > npages)
--cur_order;
if (coherent)
ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
&chunk->mem[chunk->npages],
cur_order, gfp_mask);
else
ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
cur_order, gfp_mask);
if (ret) {
if (--cur_order < 0)
goto fail;
else
continue;
}
++chunk->npages;
if (coherent)
++chunk->nsg;
else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
if (chunk->npages == MLX4_ICM_CHUNK_LEN)
chunk = NULL;
npages -= 1 << cur_order;
}
if (!coherent && chunk) {
chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
chunk->npages,
PCI_DMA_BIDIRECTIONAL);
if (chunk->nsg <= 0)
goto fail;
}
return icm;
fail:
mlx4_free_icm(dev, icm, coherent);
return NULL;
}
static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
}
static int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
{
return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
{
return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
}
int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
{
return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX,
MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
}
int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
int ret = 0;
mutex_lock(&table->mutex);
if (table->icm[i]) {
++table->icm[i]->refcount;
goto out;
}
table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
(table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, table->coherent);
if (!table->icm[i]) {
ret = -ENOMEM;
goto out;
}
if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
(u64) i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
ret = -ENOMEM;
goto out;
}
++table->icm[i]->refcount;
out:
mutex_unlock(&table->mutex);
return ret;
}
void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
{
int i;
i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
mutex_lock(&table->mutex);
if (--table->icm[i]->refcount == 0) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
table->icm[i] = NULL;
}
mutex_unlock(&table->mutex);
}
void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
{
int idx, offset, dma_offset, i;
struct mlx4_icm_chunk *chunk;
struct mlx4_icm *icm;
struct page *page = NULL;
if (!table->lowmem)
return NULL;
mutex_lock(&table->mutex);
idx = (obj & (table->num_obj - 1)) * table->obj_size;
icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
if (!icm)
goto out;
list_for_each_entry(chunk, &icm->chunk_list, list) {
for (i = 0; i < chunk->npages; ++i) {
if (dma_handle && dma_offset >= 0) {
if (sg_dma_len(&chunk->mem[i]) > dma_offset)
*dma_handle = sg_dma_address(&chunk->mem[i]) +
dma_offset;
dma_offset -= sg_dma_len(&chunk->mem[i]);
}
/*
* DMA mapping can merge pages but not split them,
* so if we found the page, dma_handle has already
* been assigned to.
*/
if (chunk->mem[i].length > offset) {
page = sg_page(&chunk->mem[i]);
goto out;
}
offset -= chunk->mem[i].length;
}
}
out:
mutex_unlock(&table->mutex);
return page ? lowmem_page_address(page) + offset : NULL;
}
int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
int i, err;
for (i = start; i <= end; i += inc) {
err = mlx4_table_get(dev, table, i);
if (err)
goto fail;
}
return 0;
fail:
while (i > start) {
i -= inc;
mlx4_table_put(dev, table, i);
}
return err;
}
void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
int start, int end)
{
int i;
for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
mlx4_table_put(dev, table, i);
}
int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
u64 virt, int obj_size, int nobj, int reserved,
int use_lowmem, int use_coherent)
{
int obj_per_chunk;
int num_icm;
unsigned chunk_size;
int i;
obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
if (!table->icm)
return -ENOMEM;
table->virt = virt;
table->num_icm = num_icm;
table->num_obj = nobj;
table->obj_size = obj_size;
table->lowmem = use_lowmem;
table->coherent = use_coherent;
mutex_init(&table->mutex);
for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
chunk_size = MLX4_TABLE_CHUNK_SIZE;
if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
(use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
__GFP_NOWARN, use_coherent);
if (!table->icm[i])
goto err;
if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
mlx4_free_icm(dev, table->icm[i], use_coherent);
table->icm[i] = NULL;
goto err;
}
/*
* Add a reference to this ICM chunk so that it never
* gets freed (since it contains reserved firmware objects).
*/
++table->icm[i]->refcount;
}
return 0;
err:
for (i = 0; i < num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], use_coherent);
}
return -ENOMEM;
}
void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
{
int i;
for (i = 0; i < table->num_icm; ++i)
if (table->icm[i]) {
mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
mlx4_free_icm(dev, table->icm[i], table->coherent);
}
kfree(table->icm);
}
| gpl-2.0 |
rutvik95/android_kernel_samsung_g7102 | fs/xfs/xfs_dir2_data.c | 5463 | 25278 | /*
* Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it would be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "xfs.h"
#include "xfs_fs.h"
#include "xfs_types.h"
#include "xfs_log.h"
#include "xfs_inum.h"
#include "xfs_trans.h"
#include "xfs_sb.h"
#include "xfs_ag.h"
#include "xfs_mount.h"
#include "xfs_da_btree.h"
#include "xfs_bmap_btree.h"
#include "xfs_dinode.h"
#include "xfs_inode.h"
#include "xfs_dir2_format.h"
#include "xfs_dir2_priv.h"
#include "xfs_error.h"
STATIC xfs_dir2_data_free_t *
xfs_dir2_data_freefind(xfs_dir2_data_hdr_t *hdr, xfs_dir2_data_unused_t *dup);
#ifdef DEBUG
/*
* Check the consistency of the data block.
* The input can also be a block-format directory.
* Pop an assert if we find anything bad.
*/
void
xfs_dir2_data_check(
xfs_inode_t *dp, /* incore inode pointer */
xfs_dabuf_t *bp) /* data block's buffer */
{
xfs_dir2_dataptr_t addr; /* addr for leaf lookup */
xfs_dir2_data_free_t *bf; /* bestfree table */
xfs_dir2_block_tail_t *btp=NULL; /* block tail */
int count; /* count of entries found */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_entry_t *dep; /* data entry */
xfs_dir2_data_free_t *dfp; /* bestfree entry */
xfs_dir2_data_unused_t *dup; /* unused entry */
char *endp; /* end of useful data */
int freeseen; /* mask of bestfrees seen */
xfs_dahash_t hash; /* hash of current name */
int i; /* leaf index */
int lastfree; /* last entry was unused */
xfs_dir2_leaf_entry_t *lep=NULL; /* block leaf entries */
xfs_mount_t *mp; /* filesystem mount point */
char *p; /* current data position */
int stale; /* count of stale leaves */
struct xfs_name name;
mp = dp->i_mount;
hdr = bp->data;
bf = hdr->bestfree;
p = (char *)(hdr + 1);
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
btp = xfs_dir2_block_tail_p(mp, hdr);
lep = xfs_dir2_block_leaf_p(btp);
endp = (char *)lep;
} else {
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC));
endp = (char *)hdr + mp->m_dirblksize;
}
count = lastfree = freeseen = 0;
/*
* Account for zero bestfree entries.
*/
if (!bf[0].length) {
ASSERT(!bf[0].offset);
freeseen |= 1 << 0;
}
if (!bf[1].length) {
ASSERT(!bf[1].offset);
freeseen |= 1 << 1;
}
if (!bf[2].length) {
ASSERT(!bf[2].offset);
freeseen |= 1 << 2;
}
ASSERT(be16_to_cpu(bf[0].length) >= be16_to_cpu(bf[1].length));
ASSERT(be16_to_cpu(bf[1].length) >= be16_to_cpu(bf[2].length));
/*
* Loop over the data/unused entries.
*/
while (p < endp) {
dup = (xfs_dir2_data_unused_t *)p;
/*
* If it's unused, look for the space in the bestfree table.
* If we find it, account for that, else make sure it
* doesn't need to be there.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ASSERT(lastfree == 0);
ASSERT(be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)) ==
(char *)dup - (char *)hdr);
dfp = xfs_dir2_data_freefind(hdr, dup);
if (dfp) {
i = (int)(dfp - bf);
ASSERT((freeseen & (1 << i)) == 0);
freeseen |= 1 << i;
} else {
ASSERT(be16_to_cpu(dup->length) <=
be16_to_cpu(bf[2].length));
}
p += be16_to_cpu(dup->length);
lastfree = 1;
continue;
}
/*
* It's a real entry. Validate the fields.
* If this is a block directory then make sure it's
* in the leaf section of the block.
* The linear search is crude but this is DEBUG code.
*/
dep = (xfs_dir2_data_entry_t *)p;
ASSERT(dep->namelen != 0);
ASSERT(xfs_dir_ino_validate(mp, be64_to_cpu(dep->inumber)) == 0);
ASSERT(be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)) ==
(char *)dep - (char *)hdr);
count++;
lastfree = 0;
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)hdr));
name.name = dep->name;
name.len = dep->namelen;
hash = mp->m_dirnameops->hashname(&name);
for (i = 0; i < be32_to_cpu(btp->count); i++) {
if (be32_to_cpu(lep[i].address) == addr &&
be32_to_cpu(lep[i].hashval) == hash)
break;
}
ASSERT(i < be32_to_cpu(btp->count));
}
p += xfs_dir2_data_entsize(dep->namelen);
}
/*
* Need to have seen all the entries and all the bestfree slots.
*/
ASSERT(freeseen == 7);
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
for (i = stale = 0; i < be32_to_cpu(btp->count); i++) {
if (lep[i].address ==
cpu_to_be32(XFS_DIR2_NULL_DATAPTR))
stale++;
if (i > 0)
ASSERT(be32_to_cpu(lep[i].hashval) >= be32_to_cpu(lep[i - 1].hashval));
}
ASSERT(count == be32_to_cpu(btp->count) - be32_to_cpu(btp->stale));
ASSERT(stale == be32_to_cpu(btp->stale));
}
}
#endif
/*
* Given a data block and an unused entry from that block,
* return the bestfree entry if any that corresponds to it.
*/
STATIC xfs_dir2_data_free_t *
xfs_dir2_data_freefind(
xfs_dir2_data_hdr_t *hdr, /* data block */
xfs_dir2_data_unused_t *dup) /* data unused entry */
{
xfs_dir2_data_free_t *dfp; /* bestfree entry */
xfs_dir2_data_aoff_t off; /* offset value needed */
#if defined(DEBUG) && defined(__KERNEL__)
int matched; /* matched the value */
int seenzero; /* saw a 0 bestfree entry */
#endif
off = (xfs_dir2_data_aoff_t)((char *)dup - (char *)hdr);
#if defined(DEBUG) && defined(__KERNEL__)
/*
* Validate some consistency in the bestfree table.
* Check order, non-overlapping entries, and if we find the
* one we're looking for it has to be exact.
*/
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
for (dfp = &hdr->bestfree[0], seenzero = matched = 0;
dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
dfp++) {
if (!dfp->offset) {
ASSERT(!dfp->length);
seenzero = 1;
continue;
}
ASSERT(seenzero == 0);
if (be16_to_cpu(dfp->offset) == off) {
matched = 1;
ASSERT(dfp->length == dup->length);
} else if (off < be16_to_cpu(dfp->offset))
ASSERT(off + be16_to_cpu(dup->length) <= be16_to_cpu(dfp->offset));
else
ASSERT(be16_to_cpu(dfp->offset) + be16_to_cpu(dfp->length) <= off);
ASSERT(matched || be16_to_cpu(dfp->length) >= be16_to_cpu(dup->length));
if (dfp > &hdr->bestfree[0])
ASSERT(be16_to_cpu(dfp[-1].length) >= be16_to_cpu(dfp[0].length));
}
#endif
/*
* If this is smaller than the smallest bestfree entry,
* it can't be there since they're sorted.
*/
if (be16_to_cpu(dup->length) <
be16_to_cpu(hdr->bestfree[XFS_DIR2_DATA_FD_COUNT - 1].length))
return NULL;
/*
* Look at the three bestfree entries for our guy.
*/
for (dfp = &hdr->bestfree[0];
dfp < &hdr->bestfree[XFS_DIR2_DATA_FD_COUNT];
dfp++) {
if (!dfp->offset)
return NULL;
if (be16_to_cpu(dfp->offset) == off)
return dfp;
}
/*
* Didn't find it. This only happens if there are duplicate lengths.
*/
return NULL;
}
/*
* Insert an unused-space entry into the bestfree table.
*/
xfs_dir2_data_free_t * /* entry inserted */
xfs_dir2_data_freeinsert(
xfs_dir2_data_hdr_t *hdr, /* data block pointer */
xfs_dir2_data_unused_t *dup, /* unused space */
int *loghead) /* log the data header (out) */
{
xfs_dir2_data_free_t *dfp; /* bestfree table pointer */
xfs_dir2_data_free_t new; /* new bestfree entry */
#ifdef __KERNEL__
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
dfp = hdr->bestfree;
new.length = dup->length;
new.offset = cpu_to_be16((char *)dup - (char *)hdr);
/*
* Insert at position 0, 1, or 2; or not at all.
*/
if (be16_to_cpu(new.length) > be16_to_cpu(dfp[0].length)) {
dfp[2] = dfp[1];
dfp[1] = dfp[0];
dfp[0] = new;
*loghead = 1;
return &dfp[0];
}
if (be16_to_cpu(new.length) > be16_to_cpu(dfp[1].length)) {
dfp[2] = dfp[1];
dfp[1] = new;
*loghead = 1;
return &dfp[1];
}
if (be16_to_cpu(new.length) > be16_to_cpu(dfp[2].length)) {
dfp[2] = new;
*loghead = 1;
return &dfp[2];
}
return NULL;
}
/*
* Remove a bestfree entry from the table.
*/
STATIC void
xfs_dir2_data_freeremove(
xfs_dir2_data_hdr_t *hdr, /* data block header */
xfs_dir2_data_free_t *dfp, /* bestfree entry pointer */
int *loghead) /* out: log data header */
{
#ifdef __KERNEL__
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
/*
* It's the first entry, slide the next 2 up.
*/
if (dfp == &hdr->bestfree[0]) {
hdr->bestfree[0] = hdr->bestfree[1];
hdr->bestfree[1] = hdr->bestfree[2];
}
/*
* It's the second entry, slide the 3rd entry up.
*/
else if (dfp == &hdr->bestfree[1])
hdr->bestfree[1] = hdr->bestfree[2];
/*
* Must be the last entry.
*/
else
ASSERT(dfp == &hdr->bestfree[2]);
/*
* Clear the 3rd entry, must be zero now.
*/
hdr->bestfree[2].length = 0;
hdr->bestfree[2].offset = 0;
*loghead = 1;
}
/*
* Given a data block, reconstruct its bestfree map.
*/
void
xfs_dir2_data_freescan(
xfs_mount_t *mp, /* filesystem mount point */
xfs_dir2_data_hdr_t *hdr, /* data block header */
int *loghead) /* out: log data header */
{
xfs_dir2_block_tail_t *btp; /* block tail */
xfs_dir2_data_entry_t *dep; /* active data entry */
xfs_dir2_data_unused_t *dup; /* unused data entry */
char *endp; /* end of block's data */
char *p; /* current entry pointer */
#ifdef __KERNEL__
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
#endif
/*
* Start by clearing the table.
*/
memset(hdr->bestfree, 0, sizeof(hdr->bestfree));
*loghead = 1;
/*
* Set up pointers.
*/
p = (char *)(hdr + 1);
if (hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC)) {
btp = xfs_dir2_block_tail_p(mp, hdr);
endp = (char *)xfs_dir2_block_leaf_p(btp);
} else
endp = (char *)hdr + mp->m_dirblksize;
/*
* Loop over the block's entries.
*/
while (p < endp) {
dup = (xfs_dir2_data_unused_t *)p;
/*
* If it's a free entry, insert it.
*/
if (be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG) {
ASSERT((char *)dup - (char *)hdr ==
be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
xfs_dir2_data_freeinsert(hdr, dup, loghead);
p += be16_to_cpu(dup->length);
}
/*
* For active entries, check their tags and skip them.
*/
else {
dep = (xfs_dir2_data_entry_t *)p;
ASSERT((char *)dep - (char *)hdr ==
be16_to_cpu(*xfs_dir2_data_entry_tag_p(dep)));
p += xfs_dir2_data_entsize(dep->namelen);
}
}
}
/*
* Initialize a data block at the given block number in the directory.
* Give back the buffer for the created block.
*/
int /* error */
xfs_dir2_data_init(
xfs_da_args_t *args, /* directory operation args */
xfs_dir2_db_t blkno, /* logical dir block number */
xfs_dabuf_t **bpp) /* output block buffer */
{
xfs_dabuf_t *bp; /* block buffer */
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_inode_t *dp; /* incore directory inode */
xfs_dir2_data_unused_t *dup; /* unused entry pointer */
int error; /* error return value */
int i; /* bestfree index */
xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
int t; /* temp */
dp = args->dp;
mp = dp->i_mount;
tp = args->trans;
/*
* Get the buffer set up for the block.
*/
error = xfs_da_get_buf(tp, dp, xfs_dir2_db_to_da(mp, blkno), -1, &bp,
XFS_DATA_FORK);
if (error) {
return error;
}
ASSERT(bp != NULL);
/*
* Initialize the header.
*/
hdr = bp->data;
hdr->magic = cpu_to_be32(XFS_DIR2_DATA_MAGIC);
hdr->bestfree[0].offset = cpu_to_be16(sizeof(*hdr));
for (i = 1; i < XFS_DIR2_DATA_FD_COUNT; i++) {
hdr->bestfree[i].length = 0;
hdr->bestfree[i].offset = 0;
}
/*
* Set up an unused entry for the block's body.
*/
dup = (xfs_dir2_data_unused_t *)(hdr + 1);
dup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
t = mp->m_dirblksize - (uint)sizeof(*hdr);
hdr->bestfree[0].length = cpu_to_be16(t);
dup->length = cpu_to_be16(t);
*xfs_dir2_data_unused_tag_p(dup) = cpu_to_be16((char *)dup - (char *)hdr);
/*
* Log it and return it.
*/
xfs_dir2_data_log_header(tp, bp);
xfs_dir2_data_log_unused(tp, bp, dup);
*bpp = bp;
return 0;
}
/*
* Log an active data entry from the block.
*/
void
xfs_dir2_data_log_entry(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
xfs_dir2_data_entry_t *dep) /* data entry pointer */
{
xfs_dir2_data_hdr_t *hdr = bp->data;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
xfs_da_log_buf(tp, bp, (uint)((char *)dep - (char *)hdr),
(uint)((char *)(xfs_dir2_data_entry_tag_p(dep) + 1) -
(char *)hdr - 1));
}
/*
* Log a data block header.
*/
void
xfs_dir2_data_log_header(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp) /* block buffer */
{
xfs_dir2_data_hdr_t *hdr = bp->data;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
xfs_da_log_buf(tp, bp, 0, sizeof(*hdr) - 1);
}
/*
* Log a data unused entry.
*/
void
xfs_dir2_data_log_unused(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
xfs_dir2_data_unused_t *dup) /* data unused pointer */
{
xfs_dir2_data_hdr_t *hdr = bp->data;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
/*
* Log the first part of the unused entry.
*/
xfs_da_log_buf(tp, bp, (uint)((char *)dup - (char *)hdr),
(uint)((char *)&dup->length + sizeof(dup->length) -
1 - (char *)hdr));
/*
* Log the end (tag) of the unused entry.
*/
xfs_da_log_buf(tp, bp,
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr),
(uint)((char *)xfs_dir2_data_unused_tag_p(dup) - (char *)hdr +
sizeof(xfs_dir2_data_off_t) - 1));
}
/*
* Make a byte range in the data block unused.
* Its current contents are unimportant.
*/
void
xfs_dir2_data_make_free(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* block buffer */
xfs_dir2_data_aoff_t offset, /* starting byte offset */
xfs_dir2_data_aoff_t len, /* length in bytes */
int *needlogp, /* out: log header */
int *needscanp) /* out: regen bestfree */
{
xfs_dir2_data_hdr_t *hdr; /* data block pointer */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
char *endptr; /* end of data area */
xfs_mount_t *mp; /* filesystem mount point */
int needscan; /* need to regen bestfree */
xfs_dir2_data_unused_t *newdup; /* new unused entry */
xfs_dir2_data_unused_t *postdup; /* unused entry after us */
xfs_dir2_data_unused_t *prevdup; /* unused entry before us */
mp = tp->t_mountp;
hdr = bp->data;
/*
* Figure out where the end of the data area is.
*/
if (hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC))
endptr = (char *)hdr + mp->m_dirblksize;
else {
xfs_dir2_block_tail_t *btp; /* block tail */
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
btp = xfs_dir2_block_tail_p(mp, hdr);
endptr = (char *)xfs_dir2_block_leaf_p(btp);
}
/*
* If this isn't the start of the block, then back up to
* the previous entry and see if it's free.
*/
if (offset > sizeof(*hdr)) {
__be16 *tagp; /* tag just before us */
tagp = (__be16 *)((char *)hdr + offset) - 1;
prevdup = (xfs_dir2_data_unused_t *)((char *)hdr + be16_to_cpu(*tagp));
if (be16_to_cpu(prevdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
prevdup = NULL;
} else
prevdup = NULL;
/*
* If this isn't the end of the block, see if the entry after
* us is free.
*/
if ((char *)hdr + offset + len < endptr) {
postdup =
(xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
if (be16_to_cpu(postdup->freetag) != XFS_DIR2_DATA_FREE_TAG)
postdup = NULL;
} else
postdup = NULL;
ASSERT(*needscanp == 0);
needscan = 0;
/*
* Previous and following entries are both free,
* merge everything into a single free entry.
*/
if (prevdup && postdup) {
xfs_dir2_data_free_t *dfp2; /* another bestfree pointer */
/*
* See if prevdup and/or postdup are in bestfree table.
*/
dfp = xfs_dir2_data_freefind(hdr, prevdup);
dfp2 = xfs_dir2_data_freefind(hdr, postdup);
/*
* We need a rescan unless there are exactly 2 free entries
* namely our two. Then we know what's happening, otherwise
* since the third bestfree is there, there might be more
* entries.
*/
needscan = (hdr->bestfree[2].length != 0);
/*
* Fix up the new big freespace.
*/
be16_add_cpu(&prevdup->length, len + be16_to_cpu(postdup->length));
*xfs_dir2_data_unused_tag_p(prevdup) =
cpu_to_be16((char *)prevdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, prevdup);
if (!needscan) {
/*
* Has to be the case that entries 0 and 1 are
* dfp and dfp2 (don't know which is which), and
* entry 2 is empty.
* Remove entry 1 first then entry 0.
*/
ASSERT(dfp && dfp2);
if (dfp == &hdr->bestfree[1]) {
dfp = &hdr->bestfree[0];
ASSERT(dfp2 == dfp);
dfp2 = &hdr->bestfree[1];
}
xfs_dir2_data_freeremove(hdr, dfp2, needlogp);
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
/*
* Now insert the new entry.
*/
dfp = xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
ASSERT(dfp == &hdr->bestfree[0]);
ASSERT(dfp->length == prevdup->length);
ASSERT(!dfp[1].length);
ASSERT(!dfp[2].length);
}
}
/*
* The entry before us is free, merge with it.
*/
else if (prevdup) {
dfp = xfs_dir2_data_freefind(hdr, prevdup);
be16_add_cpu(&prevdup->length, len);
*xfs_dir2_data_unused_tag_p(prevdup) =
cpu_to_be16((char *)prevdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, prevdup);
/*
* If the previous entry was in the table, the new entry
* is longer, so it will be in the table too. Remove
* the old one and add the new one.
*/
if (dfp) {
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
xfs_dir2_data_freeinsert(hdr, prevdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
*/
else {
needscan = be16_to_cpu(prevdup->length) >
be16_to_cpu(hdr->bestfree[2].length);
}
}
/*
* The following entry is free, merge with it.
*/
else if (postdup) {
dfp = xfs_dir2_data_freefind(hdr, postdup);
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(len + be16_to_cpu(postdup->length));
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If the following entry was in the table, the new entry
* is longer, so it will be in the table too. Remove
* the old one and add the new one.
*/
if (dfp) {
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
}
/*
* Otherwise we need a scan if the new entry is big enough.
*/
else {
needscan = be16_to_cpu(newdup->length) >
be16_to_cpu(hdr->bestfree[2].length);
}
}
/*
* Neither neighbor is free. Make a new entry.
*/
else {
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(len);
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
}
*needscanp = needscan;
}
/*
* Take a byte range out of an existing unused space and make it un-free.
*/
void
xfs_dir2_data_use_free(
xfs_trans_t *tp, /* transaction pointer */
xfs_dabuf_t *bp, /* data block buffer */
xfs_dir2_data_unused_t *dup, /* unused entry */
xfs_dir2_data_aoff_t offset, /* starting offset to use */
xfs_dir2_data_aoff_t len, /* length to use */
int *needlogp, /* out: need to log header */
int *needscanp) /* out: need regen bestfree */
{
xfs_dir2_data_hdr_t *hdr; /* data block header */
xfs_dir2_data_free_t *dfp; /* bestfree pointer */
int matchback; /* matches end of freespace */
int matchfront; /* matches start of freespace */
int needscan; /* need to regen bestfree */
xfs_dir2_data_unused_t *newdup; /* new unused entry */
xfs_dir2_data_unused_t *newdup2; /* another new unused entry */
int oldlen; /* old unused entry's length */
hdr = bp->data;
ASSERT(hdr->magic == cpu_to_be32(XFS_DIR2_DATA_MAGIC) ||
hdr->magic == cpu_to_be32(XFS_DIR2_BLOCK_MAGIC));
ASSERT(be16_to_cpu(dup->freetag) == XFS_DIR2_DATA_FREE_TAG);
ASSERT(offset >= (char *)dup - (char *)hdr);
ASSERT(offset + len <= (char *)dup + be16_to_cpu(dup->length) - (char *)hdr);
ASSERT((char *)dup - (char *)hdr == be16_to_cpu(*xfs_dir2_data_unused_tag_p(dup)));
/*
* Look up the entry in the bestfree table.
*/
dfp = xfs_dir2_data_freefind(hdr, dup);
oldlen = be16_to_cpu(dup->length);
ASSERT(dfp || oldlen <= be16_to_cpu(hdr->bestfree[2].length));
/*
* Check for alignment with front and back of the entry.
*/
matchfront = (char *)dup - (char *)hdr == offset;
matchback = (char *)dup + oldlen - (char *)hdr == offset + len;
ASSERT(*needscanp == 0);
needscan = 0;
/*
* If we matched it exactly we just need to get rid of it from
* the bestfree table.
*/
if (matchfront && matchback) {
if (dfp) {
needscan = (hdr->bestfree[2].offset != 0);
if (!needscan)
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
}
}
/*
* We match the first part of the entry.
* Make a new entry with the remaining freespace.
*/
else if (matchfront) {
newdup = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
newdup->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup->length = cpu_to_be16(oldlen - len);
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
/*
* If we got inserted at the last slot,
* that means we don't know if there was a better
* choice for the last slot, or not. Rescan.
*/
needscan = dfp == &hdr->bestfree[2];
}
}
/*
* We match the last part of the entry.
* Trim the allocated space off the tail of the entry.
*/
else if (matchback) {
newdup = dup;
newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
/*
* If it was in the table, remove it and add the new one.
*/
if (dfp) {
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
dfp = xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
ASSERT(dfp != NULL);
ASSERT(dfp->length == newdup->length);
ASSERT(be16_to_cpu(dfp->offset) == (char *)newdup - (char *)hdr);
/*
* If we got inserted at the last slot,
* that means we don't know if there was a better
* choice for the last slot, or not. Rescan.
*/
needscan = dfp == &hdr->bestfree[2];
}
}
/*
* Poking out the middle of an entry.
* Make two new entries.
*/
else {
newdup = dup;
newdup->length = cpu_to_be16(((char *)hdr + offset) - (char *)newdup);
*xfs_dir2_data_unused_tag_p(newdup) =
cpu_to_be16((char *)newdup - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup);
newdup2 = (xfs_dir2_data_unused_t *)((char *)hdr + offset + len);
newdup2->freetag = cpu_to_be16(XFS_DIR2_DATA_FREE_TAG);
newdup2->length = cpu_to_be16(oldlen - len - be16_to_cpu(newdup->length));
*xfs_dir2_data_unused_tag_p(newdup2) =
cpu_to_be16((char *)newdup2 - (char *)hdr);
xfs_dir2_data_log_unused(tp, bp, newdup2);
/*
* If the old entry was in the table, we need to scan
* if the 3rd entry was valid, since these entries
* are smaller than the old one.
* If we don't need to scan that means there were 1 or 2
* entries in the table, and removing the old and adding
* the 2 new will work.
*/
if (dfp) {
needscan = (hdr->bestfree[2].length != 0);
if (!needscan) {
xfs_dir2_data_freeremove(hdr, dfp, needlogp);
xfs_dir2_data_freeinsert(hdr, newdup, needlogp);
xfs_dir2_data_freeinsert(hdr, newdup2,
needlogp);
}
}
}
*needscanp = needscan;
}
| gpl-2.0 |
mostafa-z/Gabriel_MM | drivers/staging/comedi/drivers/comedi_parport.c | 8279 | 9626 | /*
comedi/drivers/comedi_parport.c
hardware driver for standard parallel port
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: comedi_parport
Description: Standard PC parallel port
Author: ds
Status: works in immediate mode
Devices: [standard] parallel port (comedi_parport)
Updated: Tue, 30 Apr 2002 21:11:45 -0700
A cheap and easy way to get a few more digital I/O lines. Steal
additional parallel ports from old computers or your neighbors'
computers.
Option list:
0: I/O port base for the parallel port.
1: IRQ
Parallel Port Lines:
pin subdev chan aka
--- ------ ---- ---
1 2 0 strobe
2 0 0 data 0
3 0 1 data 1
4 0 2 data 2
5 0 3 data 3
6 0 4 data 4
7 0 5 data 5
8 0 6 data 6
9 0 7 data 7
10 1 3 acknowledge
11 1 4 busy
12 1 2 output
13 1 1 printer selected
14 2 1 auto LF
15 1 0 error
16 2 2 init
17 2 3 select printer
18-25 ground
Notes:
Subdevices 0 is digital I/O, subdevice 1 is digital input, and
subdevice 2 is digital output. Unlike other Comedi devices,
subdevice 0 defaults to output.
Pins 13 and 14 are inverted once by Comedi and once by the
hardware, thus cancelling the effect.
Pin 1 is a strobe, thus acts like one. There's no way in software
to change this, at least on a standard parallel port.
Subdevice 3 pretends to be a digital input subdevice, but it always
returns 0 when read. However, if you run a command with
scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
pin, which can be used to wake up tasks.
*/
/*
see http://www.beyondlogic.org/ for information.
or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
*/
#include "../comedidev.h"
#include <linux/interrupt.h>
#include <linux/ioport.h>
#define PARPORT_SIZE 3
#define PARPORT_A 0
#define PARPORT_B 1
#define PARPORT_C 2
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int parport_detach(struct comedi_device *dev);
static struct comedi_driver driver_parport = {
.driver_name = "comedi_parport",
.module = THIS_MODULE,
.attach = parport_attach,
.detach = parport_detach,
};
static int __init driver_parport_init_module(void)
{
return comedi_driver_register(&driver_parport);
}
static void __exit driver_parport_cleanup_module(void)
{
comedi_driver_unregister(&driver_parport);
}
module_init(driver_parport_init_module);
module_exit(driver_parport_cleanup_module);
struct parport_private {
unsigned int a_data;
unsigned int c_data;
int enable_irq;
};
#define devpriv ((struct parport_private *)(dev->private))
static int parport_insn_a(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
devpriv->a_data &= ~data[0];
devpriv->a_data |= (data[0] & data[1]);
outb(devpriv->a_data, dev->iobase + PARPORT_A);
}
data[1] = inb(dev->iobase + PARPORT_A);
return 2;
}
static int parport_insn_config_a(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
s->io_bits = 0xff;
devpriv->c_data &= ~(1 << 5);
} else {
s->io_bits = 0;
devpriv->c_data |= (1 << 5);
}
outb(devpriv->c_data, dev->iobase + PARPORT_C);
return 1;
}
static int parport_insn_b(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
/* should writes be ignored? */
/* anyone??? */
}
data[1] = (inb(dev->iobase + PARPORT_B) >> 3);
return 2;
}
static int parport_insn_c(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] &= 0x0f;
if (data[0]) {
devpriv->c_data &= ~data[0];
devpriv->c_data |= (data[0] & data[1]);
outb(devpriv->c_data, dev->iobase + PARPORT_C);
}
data[1] = devpriv->c_data & 0xf;
return 2;
}
static int parport_intr_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n < 1)
return -EINVAL;
data[1] = 0;
return 2;
}
static int parport_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* step 1 */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_FOLLOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: ignored */
if (err)
return 2;
/* step 3: */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
if (cmd->scan_end_arg != 1) {
cmd->scan_end_arg = 1;
err++;
}
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
if (err)
return 3;
/* step 4: ignored */
if (err)
return 4;
return 0;
}
static int parport_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
devpriv->c_data |= 0x10;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
devpriv->enable_irq = 1;
return 0;
}
static int parport_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
printk(KERN_DEBUG "parport_intr_cancel()\n");
devpriv->c_data &= ~0x10;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
devpriv->enable_irq = 0;
return 0;
}
static irqreturn_t parport_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 3;
if (!devpriv->enable_irq) {
printk(KERN_ERR "comedi_parport: bogus irq, ignored\n");
return IRQ_NONE;
}
comedi_buf_put(s->async, 0);
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(dev, s);
return IRQ_HANDLED;
}
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
int ret;
unsigned int irq;
unsigned long iobase;
struct comedi_subdevice *s;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: parport: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, PARPORT_SIZE, "parport (comedi)")) {
printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
irq = it->options[1];
if (irq) {
printk(KERN_INFO " irq=%u", irq);
ret = request_irq(irq, parport_interrupt, 0, "comedi_parport",
dev);
if (ret < 0) {
printk(KERN_ERR " irq not available\n");
return -EINVAL;
}
dev->irq = irq;
}
dev->board_name = "parport";
ret = alloc_subdevices(dev, 4);
if (ret < 0)
return ret;
ret = alloc_private(dev, sizeof(struct parport_private));
if (ret < 0)
return ret;
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_a;
s->insn_config = parport_insn_config_a;
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 5;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_b;
s = dev->subdevices + 2;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_c;
s = dev->subdevices + 3;
if (irq) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_intr_insn;
s->do_cmdtest = parport_intr_cmdtest;
s->do_cmd = parport_intr_cmd;
s->cancel = parport_intr_cancel;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
devpriv->a_data = 0;
outb(devpriv->a_data, dev->iobase + PARPORT_A);
devpriv->c_data = 0;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
printk(KERN_INFO "\n");
return 1;
}
static int parport_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: parport: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, PARPORT_SIZE);
if (dev->irq)
free_irq(dev->irq, dev);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
garwynn/L900_NE2_Kernel | drivers/staging/comedi/drivers/comedi_parport.c | 8279 | 9626 | /*
comedi/drivers/comedi_parport.c
hardware driver for standard parallel port
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: comedi_parport
Description: Standard PC parallel port
Author: ds
Status: works in immediate mode
Devices: [standard] parallel port (comedi_parport)
Updated: Tue, 30 Apr 2002 21:11:45 -0700
A cheap and easy way to get a few more digital I/O lines. Steal
additional parallel ports from old computers or your neighbors'
computers.
Option list:
0: I/O port base for the parallel port.
1: IRQ
Parallel Port Lines:
pin subdev chan aka
--- ------ ---- ---
1 2 0 strobe
2 0 0 data 0
3 0 1 data 1
4 0 2 data 2
5 0 3 data 3
6 0 4 data 4
7 0 5 data 5
8 0 6 data 6
9 0 7 data 7
10 1 3 acknowledge
11 1 4 busy
12 1 2 output
13 1 1 printer selected
14 2 1 auto LF
15 1 0 error
16 2 2 init
17 2 3 select printer
18-25 ground
Notes:
Subdevices 0 is digital I/O, subdevice 1 is digital input, and
subdevice 2 is digital output. Unlike other Comedi devices,
subdevice 0 defaults to output.
Pins 13 and 14 are inverted once by Comedi and once by the
hardware, thus cancelling the effect.
Pin 1 is a strobe, thus acts like one. There's no way in software
to change this, at least on a standard parallel port.
Subdevice 3 pretends to be a digital input subdevice, but it always
returns 0 when read. However, if you run a command with
scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
pin, which can be used to wake up tasks.
*/
/*
see http://www.beyondlogic.org/ for information.
or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
*/
#include "../comedidev.h"
#include <linux/interrupt.h>
#include <linux/ioport.h>
#define PARPORT_SIZE 3
#define PARPORT_A 0
#define PARPORT_B 1
#define PARPORT_C 2
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int parport_detach(struct comedi_device *dev);
static struct comedi_driver driver_parport = {
.driver_name = "comedi_parport",
.module = THIS_MODULE,
.attach = parport_attach,
.detach = parport_detach,
};
static int __init driver_parport_init_module(void)
{
return comedi_driver_register(&driver_parport);
}
static void __exit driver_parport_cleanup_module(void)
{
comedi_driver_unregister(&driver_parport);
}
module_init(driver_parport_init_module);
module_exit(driver_parport_cleanup_module);
struct parport_private {
unsigned int a_data;
unsigned int c_data;
int enable_irq;
};
#define devpriv ((struct parport_private *)(dev->private))
static int parport_insn_a(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
devpriv->a_data &= ~data[0];
devpriv->a_data |= (data[0] & data[1]);
outb(devpriv->a_data, dev->iobase + PARPORT_A);
}
data[1] = inb(dev->iobase + PARPORT_A);
return 2;
}
static int parport_insn_config_a(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
s->io_bits = 0xff;
devpriv->c_data &= ~(1 << 5);
} else {
s->io_bits = 0;
devpriv->c_data |= (1 << 5);
}
outb(devpriv->c_data, dev->iobase + PARPORT_C);
return 1;
}
static int parport_insn_b(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (data[0]) {
/* should writes be ignored? */
/* anyone??? */
}
data[1] = (inb(dev->iobase + PARPORT_B) >> 3);
return 2;
}
static int parport_insn_c(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
data[0] &= 0x0f;
if (data[0]) {
devpriv->c_data &= ~data[0];
devpriv->c_data |= (data[0] & data[1]);
outb(devpriv->c_data, dev->iobase + PARPORT_C);
}
data[1] = devpriv->c_data & 0xf;
return 2;
}
static int parport_intr_insn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n < 1)
return -EINVAL;
data[1] = 0;
return 2;
}
static int parport_intr_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* step 1 */
tmp = cmd->start_src;
cmd->start_src &= TRIG_NOW;
if (!cmd->start_src || tmp != cmd->start_src)
err++;
tmp = cmd->scan_begin_src;
cmd->scan_begin_src &= TRIG_EXT;
if (!cmd->scan_begin_src || tmp != cmd->scan_begin_src)
err++;
tmp = cmd->convert_src;
cmd->convert_src &= TRIG_FOLLOW;
if (!cmd->convert_src || tmp != cmd->convert_src)
err++;
tmp = cmd->scan_end_src;
cmd->scan_end_src &= TRIG_COUNT;
if (!cmd->scan_end_src || tmp != cmd->scan_end_src)
err++;
tmp = cmd->stop_src;
cmd->stop_src &= TRIG_NONE;
if (!cmd->stop_src || tmp != cmd->stop_src)
err++;
if (err)
return 1;
/* step 2: ignored */
if (err)
return 2;
/* step 3: */
if (cmd->start_arg != 0) {
cmd->start_arg = 0;
err++;
}
if (cmd->scan_begin_arg != 0) {
cmd->scan_begin_arg = 0;
err++;
}
if (cmd->convert_arg != 0) {
cmd->convert_arg = 0;
err++;
}
if (cmd->scan_end_arg != 1) {
cmd->scan_end_arg = 1;
err++;
}
if (cmd->stop_arg != 0) {
cmd->stop_arg = 0;
err++;
}
if (err)
return 3;
/* step 4: ignored */
if (err)
return 4;
return 0;
}
static int parport_intr_cmd(struct comedi_device *dev,
struct comedi_subdevice *s)
{
devpriv->c_data |= 0x10;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
devpriv->enable_irq = 1;
return 0;
}
static int parport_intr_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
printk(KERN_DEBUG "parport_intr_cancel()\n");
devpriv->c_data &= ~0x10;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
devpriv->enable_irq = 0;
return 0;
}
static irqreturn_t parport_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->subdevices + 3;
if (!devpriv->enable_irq) {
printk(KERN_ERR "comedi_parport: bogus irq, ignored\n");
return IRQ_NONE;
}
comedi_buf_put(s->async, 0);
s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOS;
comedi_event(dev, s);
return IRQ_HANDLED;
}
static int parport_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
int ret;
unsigned int irq;
unsigned long iobase;
struct comedi_subdevice *s;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: parport: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, PARPORT_SIZE, "parport (comedi)")) {
printk(KERN_ERR "I/O port conflict\n");
return -EIO;
}
dev->iobase = iobase;
irq = it->options[1];
if (irq) {
printk(KERN_INFO " irq=%u", irq);
ret = request_irq(irq, parport_interrupt, 0, "comedi_parport",
dev);
if (ret < 0) {
printk(KERN_ERR " irq not available\n");
return -EINVAL;
}
dev->irq = irq;
}
dev->board_name = "parport";
ret = alloc_subdevices(dev, 4);
if (ret < 0)
return ret;
ret = alloc_private(dev, sizeof(struct parport_private));
if (ret < 0)
return ret;
s = dev->subdevices + 0;
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_a;
s->insn_config = parport_insn_config_a;
s = dev->subdevices + 1;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 5;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_b;
s = dev->subdevices + 2;
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_insn_c;
s = dev->subdevices + 3;
if (irq) {
dev->read_subdev = s;
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE | SDF_CMD_READ;
s->n_chan = 1;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = parport_intr_insn;
s->do_cmdtest = parport_intr_cmdtest;
s->do_cmd = parport_intr_cmd;
s->cancel = parport_intr_cancel;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
devpriv->a_data = 0;
outb(devpriv->a_data, dev->iobase + PARPORT_A);
devpriv->c_data = 0;
outb(devpriv->c_data, dev->iobase + PARPORT_C);
printk(KERN_INFO "\n");
return 1;
}
static int parport_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: parport: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, PARPORT_SIZE);
if (dev->irq)
free_irq(dev->irq, dev);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CyanogenMod/zte-kernel-msm7x27 | drivers/video/i810/i810_dvt.c | 14679 | 11794 | /*-*- linux-c -*-
* linux/drivers/video/i810_dvt.c -- Intel 810 Discrete Video Timings (Intel)
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
* All Rights Reserved
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/kernel.h>
#include "i810_regs.h"
#include "i810.h"
struct mode_registers std_modes[] = {
/* 640x480 @ 60Hz */
{ 25000, 0x0013, 0x0003, 0x40, 0x5F, 0x4F, 0x50, 0x82, 0x51, 0x9D,
0x0B, 0x10, 0x40, 0xE9, 0x0B, 0xDF, 0x50, 0xE7, 0x04, 0x02,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22006000,
0x22002000, 0x22004000, 0x22006000, 0xC0 },
/* 640x480 @ 70Hz */
{ 28000, 0x0053, 0x0010, 0x40, 0x61, 0x4F, 0x4F, 0x85, 0x52, 0x9A,
0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22005000,
0x22002000, 0x22004000, 0x22005000, 0xC0 },
/* 640x480 @ 72Hz */
{ 31000, 0x0013, 0x0002, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x52, 0x97,
0x06, 0x0F, 0x40, 0xE8, 0x0B, 0xDF, 0x50, 0xDF, 0x07, 0x02,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000,
0x22003000, 0x22005000, 0x22007000, 0xC0 },
/* 640x480 @ 75Hz */
{ 31000, 0x0013, 0x0002, 0x40, 0x64, 0x4F, 0x4F, 0x88, 0x51, 0x99,
0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000,
0x22003000, 0x22005000, 0x22007000, 0xC0 },
/* 640x480 @ 85Hz */
{ 36000, 0x0010, 0x0001, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x56, 0x9D,
0xFB, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xFC, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000,
0x22003000, 0x22005000, 0x22107000, 0xC0 },
/* 800x600 @ 56Hz */
{ 36000, 0x0010, 0x0001, 0x40, 0x7B, 0x63, 0x63, 0x9F, 0x66, 0x8F,
0x6F, 0x10, 0x40, 0x58, 0x0A, 0x57, 0xC8, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000,
0x22003000, 0x22005000, 0x22107000, 0x00 },
/* 800x600 @ 60Hz */
{ 40000, 0x0008, 0x0001, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x68, 0x18,
0x72, 0x10, 0x40, 0x58, 0x0C, 0x57, 0xC8, 0x57, 0x73, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22003000, 0x22006000, 0x22108000,
0x22003000, 0x22006000, 0x22108000, 0x00 },
/* 800x600 @ 70Hz */
{ 45000, 0x0054, 0x0015, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x68, 0x12,
0x6f, 0x10, 0x40, 0x58, 0x0b, 0x57, 0x64, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000,
0x22004000, 0x22007000, 0x2210A000, 0x00 },
/* 800x600 @ 72Hz */
{ 50000, 0x0017, 0x0004, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x6A, 0x19,
0x98, 0x10, 0x40, 0x7C, 0x02, 0x57, 0xC8, 0x57, 0x99, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000,
0x22004000, 0x22007000, 0x2210A000, 0x00 },
/* 800x600 @ 75Hz */
{ 49000, 0x001F, 0x0006, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x65, 0x0F,
0x6F, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210B000,
0x22004000, 0x22007000, 0x2210B000, 0x00 },
/* 800x600 @ 85Hz */
{ 56000, 0x0049, 0x000E, 0x30, 0x7E, 0x63, 0x63, 0x82, 0x67, 0x0F,
0x75, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x76, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22108000, 0x2210b000,
0x22004000, 0x22108000, 0x2210b000, 0x00 },
/* 1024x768 @ 60Hz */
{ 65000, 0x003F, 0x000A, 0x30, 0xA3, 0x7F, 0x7F, 0x87, 0x83, 0x94,
0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x22109000, 0x2220D000,
0x22005000, 0x22109000, 0x2220D000, 0xC0 },
/* 1024x768 @ 70Hz */
{ 75000, 0x0017, 0x0002, 0x30, 0xA1, 0x7F, 0x7F, 0x85, 0x82, 0x93,
0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x2210A000, 0x2220F000,
0x22005000, 0x2210A000, 0x2220F000, 0xC0 },
/* 1024x768 @ 75Hz */
{ 78000, 0x0050, 0x0017, 0x20, 0x9F, 0x7F, 0x7F, 0x83, 0x81, 0x8D,
0x1E, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x1F, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22006000, 0x2210B000, 0x22210000,
0x22006000, 0x2210B000, 0x22210000, 0x00 },
/* 1024x768 @ 85Hz */
{ 94000, 0x003D, 0x000E, 0x20, 0xA7, 0x7F, 0x7F, 0x8B, 0x85, 0x91,
0x26, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x27, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22007000, 0x2220E000, 0x22212000,
0x22007000, 0x2220E000, 0x22212000, 0x00 },
/* 1152x864 @ 60Hz */
{ 80000, 0x0008, 0x0001, 0x20, 0xB3, 0x8F, 0x8F, 0x97, 0x93, 0x9f,
0x87, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5f, 0x88, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x2220C000, 0x22210000, 0x22415000,
0x2220C000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 70Hz */
{ 96000, 0x000a, 0x0001, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87,
0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 72Hz */
{ 99000, 0x001f, 0x0006, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87,
0x83, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x84, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 75Hz */
{ 108000, 0x0010, 0x0002, 0x20, 0xC3, 0x8F, 0x8F, 0x87, 0x97, 0x07,
0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 85Hz */
{ 121000, 0x006D, 0x0014, 0x20, 0xc0, 0x8F, 0x8F, 0x84, 0x97, 0x07,
0x93, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x94, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2220C000, 0x22210000, 0x22415000,
0x2220C000, 0x22210000, 0x22415000, 0x0 },
/* 1280x960 @ 60Hz */
{ 108000, 0x0010, 0x0002, 0x20, 0xDC, 0x9F, 0x9F, 0x80, 0xAB, 0x99,
0xE6, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE7, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x22415000,
0x2210A000, 0x22210000, 0x22415000, 0x00 },
/* 1280x960 @ 75Hz */
{ 129000, 0x0029, 0x0006, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xaa, 0x1b,
0xE8, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE9, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x2241B000,
0x2210A000, 0x22210000, 0x2241B000, 0x00 },
/* 1280x960 @ 85Hz */
{ 148000, 0x0042, 0x0009, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xA7, 0x1B,
0xF1, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xF2, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22220000, 0x2241D000,
0x2210A000, 0x22220000, 0x2241D000, 0x00 },
/* 1600x1200 @ 60Hz */
{ 162000, 0x0019, 0x0006, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2210b000, 0x22416000, 0x44419000,
0x2210b000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 65 Hz */
{ 175000, 0x005d, 0x0018, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2210c000, 0x22416000, 0x44419000,
0x2210c000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 70 Hz */
{ 189000, 0x003D, 0x000e, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 72 Hz */
{ 195000, 0x003f, 0x000e, 0x10, 0x0b, 0xC7, 0xC7, 0x8f, 0xd5, 0x0b,
0xE1, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xe2, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 75 Hz */
{ 202000, 0x0024, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 85 Hz */
{ 229000, 0x0029, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x22210000, 0x22416000, 0x0,
0x22210000, 0x22416000, 0x0, 0x00 },
};
void round_off_xres(u32 *xres)
{
if (*xres <= 640)
*xres = 640;
else if (*xres <= 800)
*xres = 800;
else if (*xres <= 1024)
*xres = 1024;
else if (*xres <= 1152)
*xres = 1152;
else if (*xres <= 1280)
*xres = 1280;
else
*xres = 1600;
}
inline void round_off_yres(u32 *xres, u32 *yres)
{
*yres = (*xres * 3) >> 2;
}
static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock)
{
u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0;
u8 hfl = (u8) ((xres >> 3) - 1);
for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
if (std_modes[i].cr01 == hfl) {
if (std_modes[i].pixclock <= pixclock)
diff = pixclock - std_modes[i].pixclock;
if (diff < diff_best) {
i_best = i;
diff_best = diff;
}
}
}
return i_best;
}
void i810fb_encode_registers(const struct fb_var_screeninfo *var,
struct i810fb_par *par, u32 xres, u32 yres)
{
u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock);
par->regs = std_modes[i_best];
/* overlay */
par->ovract = ((xres + var->right_margin + var->hsync_len +
var->left_margin - 32) | ((xres - 32) << 16));
}
void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
{
u32 total, xres, yres;
u32 mode, pixclock;
xres = var->xres;
yres = var->yres;
pixclock = 1000000000 / var->pixclock;
mode = i810fb_find_best_mode(xres, yres, pixclock);
total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3;
total <<= 3;
var->pixclock = 1000000000 / std_modes[mode].pixclock;
var->right_margin = (std_modes[mode].cr04 << 3) - xres;
var->hsync_len = ((std_modes[mode].cr05 & 0x1F) -
(std_modes[mode].cr04 & 0x1F)) << 3;
var->left_margin = (total - (xres + var->right_margin +
var->hsync_len));
var->sync = FB_SYNC_ON_GREEN;
if (~(std_modes[mode].msr & (1 << 6)))
var->sync |= FB_SYNC_HOR_HIGH_ACT;
if (~(std_modes[mode].msr & (1 << 7)))
var->sync |= FB_SYNC_VERT_HIGH_ACT;
total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2;
var->lower_margin = (std_modes[mode].cr10 |
(std_modes[mode].cr32 & 0x0F) << 8) - yres;
var->vsync_len = (std_modes[mode].cr11 & 0x0F) -
(var->lower_margin & 0x0F);
var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
}
u32 i810_get_watermark(struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
struct mode_registers *params = &par->regs;
u32 wmark = 0;
if (par->mem_freq == 100) {
switch (var->bits_per_pixel) {
case 8:
wmark = params->bpp8_100;
break;
case 16:
wmark = params->bpp16_100;
break;
case 24:
case 32:
wmark = params->bpp24_100;
}
} else {
switch (var->bits_per_pixel) {
case 8:
wmark = params->bpp8_133;
break;
case 16:
wmark = params->bpp16_133;
break;
case 24:
case 32:
wmark = params->bpp24_133;
}
}
return wmark;
}
| gpl-2.0 |
anarsoul/ipaq-s3c24xx-u-boot | board/omap5912osk/omap5912osk.c | 88 | 9075 | /*
* (C) Copyright 2002
* Sysgo Real-Time Solutions, GmbH <www.elinos.com>
* Marius Groeger <mgroeger@sysgo.de>
*
* (C) Copyright 2002
* David Mueller, ELSOFT AG, <d.mueller@elsoft.ch>
*
* (C) Copyright 2003
* Texas Instruments, <www.ti.com>
* Kshitij Gupta <Kshitij@ti.com>
*
* (C) Copyright 2004
* Texas Instruments, <www.ti.com>
* Rishi Bhattacharya <rishi@ti.com>
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#if defined(CONFIG_OMAP1610)
#include <./configs/omap1510.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
void flash__init (void);
void ether__init (void);
void set_muxconf_regs (void);
void peripheral_power_enable (void);
#define COMP_MODE_ENABLE ((unsigned int)0x0000EAEF)
static inline void delay (unsigned long loops)
{
__asm__ volatile ("1:\n"
"subs %0, %1, #1\n"
"bne 1b":"=r" (loops):"0" (loops));
}
/*
* Miscellaneous platform dependent initialisations
*/
int board_init (void)
{
gd->bd->bi_arch_number = MACH_TYPE_OMAP_OSK;
/* adress of boot parameters */
gd->bd->bi_boot_params = 0x10000100;
/* Configure MUX settings */
set_muxconf_regs ();
peripheral_power_enable ();
/* this speeds up your boot a quite a bit. However to make it
* work, you need make sure your kernel startup flush bug is fixed.
* ... rkw ...
*/
icache_enable ();
flash__init ();
ether__init ();
return 0;
}
int misc_init_r (void)
{
/* currently empty */
return (0);
}
/******************************
Routine:
Description:
******************************/
void flash__init (void)
{
#define EMIFS_GlB_Config_REG 0xfffecc0c
unsigned int regval;
regval = *((volatile unsigned int *) EMIFS_GlB_Config_REG);
/* Turn off write protection for flash devices. */
regval = regval | 0x0001;
*((volatile unsigned int *) EMIFS_GlB_Config_REG) = regval;
}
/*************************************************************
Routine:ether__init
Description: take the Ethernet controller out of reset and wait
for the EEPROM load to complete.
*************************************************************/
void ether__init (void)
{
#define ETH_CONTROL_REG 0x0480000b
int i;
*((volatile unsigned short *) 0xfffece08) = 0x03FF;
*((volatile unsigned short *) 0xfffb3824) = 0x8000;
*((volatile unsigned short *) 0xfffb3830) = 0x0000;
*((volatile unsigned short *) 0xfffb3834) = 0x0009;
*((volatile unsigned short *) 0xfffb3838) = 0x0009;
*((volatile unsigned short *) 0xfffb3818) = 0x0002;
*((volatile unsigned short *) 0xfffb382C) = 0x0048;
*((volatile unsigned short *) 0xfffb3824) = 0x8603;
udelay (3);
for (i=0;i<2000;i++);
*((volatile unsigned short *) 0xfffb381C) = 0x6610;
udelay (30);
for (i=0;i<10000;i++);
*((volatile unsigned char *) ETH_CONTROL_REG) &= ~0x01;
udelay (3);
}
/******************************
Routine:
Description:
******************************/
int dram_init (void)
{
gd->bd->bi_dram[0].start = PHYS_SDRAM_1;
gd->bd->bi_dram[0].size = PHYS_SDRAM_1_SIZE;
return 0;
}
/******************************************************
Routine: set_muxconf_regs
Description: Setting up the configuration Mux registers
specific to the hardware
*******************************************************/
void set_muxconf_regs (void)
{
volatile unsigned int *MuxConfReg;
/* set each registers to its reset value; */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_0);
/* setup for UART1 */
*MuxConfReg &= ~(0x02000000); /* bit 25 */
/* setup for UART2 */
*MuxConfReg &= ~(0x01000000); /* bit 24 */
/* Disable Uwire CS Hi-Z */
*MuxConfReg |= 0x08000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_3);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_4);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_5);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_6);
/*setup mux for UART3 */
*MuxConfReg |= 0x00000001; /* bit3, 1, 0 (mux0 5,5,26) */
*MuxConfReg &= ~0x0000003e;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_7);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_8);
/* Disable Uwire CS Hi-Z */
*MuxConfReg |= 0x00001200; /*bit 9 for CS0 12 for CS3 */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_9);
/* Need to turn on bits 21 and 12 in FUNC_MUX_CTRL_9 so the */
/* hardware will actually use TX and RTS based on bit 25 in */
/* FUNC_MUX_CTRL_0. I told you this thing was screwy! */
*MuxConfReg |= 0x00201000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_A);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_B);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_C);
/* setup for UART2 */
/* Need to turn on bits 27 and 24 in FUNC_MUX_CTRL_C so the */
/* hardware will actually use TX and RTS based on bit 24 in */
/* FUNC_MUX_CTRL_0. */
*MuxConfReg |= 0x09000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_D);
*MuxConfReg |= 0x00000020;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PULL_DWN_CTRL_0);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PULL_DWN_CTRL_1);
*MuxConfReg = 0x00000000;
/* mux setup for SD/MMC driver */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PULL_DWN_CTRL_2);
*MuxConfReg &= 0xFFFE0FFF;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PULL_DWN_CTRL_3);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) MOD_CONF_CTRL_0);
/* bit 13 for MMC2 XOR_CLK */
*MuxConfReg &= ~(0x00002000);
/* bit 29 for UART 1 */
*MuxConfReg &= ~(0x00002000);
MuxConfReg =
(volatile unsigned int *) ((unsigned int) FUNC_MUX_CTRL_0);
/* Configure for USB. Turn on VBUS_CTRL and VBUS_MODE. */
*MuxConfReg |= 0x000C0000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int)USB_TRANSCEIVER_CTRL);
*MuxConfReg &= ~(0x00000070);
*MuxConfReg &= ~(0x00000008);
*MuxConfReg |= 0x00000003;
*MuxConfReg |= 0x00000180;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) MOD_CONF_CTRL_0);
/* bit 17, software controls VBUS */
*MuxConfReg &= ~(0x00020000);
/* Enable USB 48 and 12M clocks */
*MuxConfReg |= 0x00000200;
*MuxConfReg &= ~(0x00000180);
/*2.75V for MMCSDIO1 */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) VOLTAGE_CTRL_0);
*MuxConfReg = 0x00001FE7;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_0);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_1);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_2);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_3);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_4);
*MuxConfReg = 0x00000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PULL_DWN_CTRL_4);
*MuxConfReg = 0x00000000;
/* Turn on UART2 48 MHZ clock */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) MOD_CONF_CTRL_0);
*MuxConfReg |= 0x40000000;
MuxConfReg =
(volatile unsigned int *) ((unsigned int) USB_OTG_CTRL);
/* setup for USB VBus detection OMAP161x */
*MuxConfReg |= 0x00040000; /* bit 18 */
MuxConfReg =
(volatile unsigned int *) ((unsigned int) PU_PD_SEL_2);
/* PullUps for SD/MMC driver */
*MuxConfReg |= ~(0xFFFE0FFF);
MuxConfReg =
(volatile unsigned int *) ((unsigned int)COMP_MODE_CTRL_0);
*MuxConfReg = COMP_MODE_ENABLE;
}
/******************************************************
Routine: peripheral_power_enable
Description: Enable the power for UART1
*******************************************************/
void peripheral_power_enable (void)
{
#define UART1_48MHZ_ENABLE ((unsigned short)0x0200)
#define SW_CLOCK_REQUEST ((volatile unsigned short *)0xFFFE0834)
*SW_CLOCK_REQUEST |= UART1_48MHZ_ENABLE;
}
/*
* Check Board Identity
*/
int checkboard(void)
{
char *s = getenv("serial#");
puts("Board: OSK5912");
if (s != NULL) {
puts(", serial# ");
puts(s);
}
putc('\n');
return (0);
}
| gpl-2.0 |
shesselba/linux-berlin | drivers/staging/dgap/dgap.c | 88 | 175618 | /*
* Copyright 2003 Digi International (www.digi.com)
* Scott H Kilau <Scott_Kilau at digi dot com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY, EXPRESS OR IMPLIED; without even the
* implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
* PURPOSE. See the GNU General Public License for more details.
*
*/
/*
* In the original out of kernel Digi dgap driver, firmware
* loading was done via user land to driver handshaking.
*
* For cards that support a concentrator (port expander),
* I believe the concentrator its self told the card which
* concentrator is actually attached and then that info
* was used to tell user land which concentrator firmware
* image was to be downloaded. I think even the BIOS or
* FEP images required could change with the connection
* of a particular concentrator.
*
* Since I have no access to any of these cards or
* concentrators, I cannot put the correct concentrator
* firmware file names into the firmware_info structure
* as is now done for the BIOS and FEP images.
*
* I think, but am not certain, that the cards supporting
* concentrators will function without them. So support
* of these cards has been left in this driver.
*
* In order to fully support those cards, they would
* either have to be acquired for dissection or maybe
* Digi International could provide some assistance.
*/
#undef DIGI_CONCENTRATORS_SUPPORTED
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h> /* For udelay */
#include <linux/slab.h>
#include <linux/uaccess.h>
#include <linux/sched.h>
#include <linux/interrupt.h> /* For tasklet and interrupt structs/defines */
#include <linux/ctype.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/serial_reg.h>
#include <linux/io.h> /* For read[bwl]/write[bwl] */
#include <linux/string.h>
#include <linux/device.h>
#include <linux/kdev_t.h>
#include <linux/firmware.h>
#include "dgap.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Digi International, http://www.digi.com");
MODULE_DESCRIPTION("Driver for the Digi International EPCA PCI based product line");
MODULE_SUPPORTED_DEVICE("dgap");
static int dgap_start(void);
static void dgap_init_globals(void);
static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
int boardnum);
static void dgap_cleanup_board(struct board_t *brd);
static void dgap_poll_handler(ulong dummy);
static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
static void dgap_remove_one(struct pci_dev *dev);
static int dgap_do_remap(struct board_t *brd);
static void dgap_release_remap(struct board_t *brd);
static irqreturn_t dgap_intr(int irq, void *voidbrd);
static int dgap_tty_open(struct tty_struct *tty, struct file *file);
static void dgap_tty_close(struct tty_struct *tty, struct file *file);
static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
struct channel_t *ch);
static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg);
static int dgap_tty_digigeta(struct channel_t *ch, struct digi_t __user *retinfo);
static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
struct un_t *un, struct digi_t __user *new_info);
static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo);
static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
struct un_t *un, int __user *new_info);
static int dgap_tty_write_room(struct tty_struct *tty);
static int dgap_tty_chars_in_buffer(struct tty_struct *tty);
static void dgap_tty_start(struct tty_struct *tty);
static void dgap_tty_stop(struct tty_struct *tty);
static void dgap_tty_throttle(struct tty_struct *tty);
static void dgap_tty_unthrottle(struct tty_struct *tty);
static void dgap_tty_flush_chars(struct tty_struct *tty);
static void dgap_tty_flush_buffer(struct tty_struct *tty);
static void dgap_tty_hangup(struct tty_struct *tty);
static int dgap_wait_for_drain(struct tty_struct *tty);
static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd, struct un_t *un,
unsigned int command, unsigned int __user *value);
static int dgap_get_modem_info(struct channel_t *ch,
unsigned int __user *value);
static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
struct un_t *un, int __user *new_info);
static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
int __user *retinfo);
static int dgap_tty_tiocmget(struct tty_struct *tty);
static int dgap_tty_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
static int dgap_tty_send_break(struct tty_struct *tty, int msec);
static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout);
static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
int count);
static void dgap_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios);
static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c);
static void dgap_tty_send_xchar(struct tty_struct *tty, char ch);
static int dgap_tty_register(struct board_t *brd);
static void dgap_tty_unregister(struct board_t *brd);
static int dgap_tty_init(struct board_t *);
static void dgap_tty_free(struct board_t *);
static void dgap_cleanup_tty(struct board_t *);
static void dgap_carrier(struct channel_t *ch);
static void dgap_input(struct channel_t *ch);
/*
* Our function prototypes from dgap_fep5
*/
static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds);
static int dgap_event(struct board_t *bd);
static void dgap_poll_tasklet(unsigned long data);
static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
u8 byte2, uint ncmds);
static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds);
static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt);
static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type);
static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
unsigned char *fbuf, int *len);
static uint dgap_get_custom_baud(struct channel_t *ch);
static void dgap_firmware_reset_port(struct channel_t *ch);
/*
* Function prototypes from dgap_parse.c.
*/
static int dgap_gettok(char **in);
static char *dgap_getword(char **in);
static int dgap_checknode(struct cnode *p);
static void dgap_err(char *s);
/*
* Function prototypes from dgap_sysfs.h
*/
struct board_t;
struct channel_t;
struct un_t;
struct pci_driver;
struct class_device;
static void dgap_create_ports_sysfiles(struct board_t *bd);
static void dgap_remove_ports_sysfiles(struct board_t *bd);
static int dgap_create_driver_sysfiles(struct pci_driver *);
static void dgap_remove_driver_sysfiles(struct pci_driver *);
static void dgap_create_tty_sysfs(struct un_t *un, struct device *c);
static void dgap_remove_tty_sysfs(struct device *c);
/*
* Function prototypes from dgap_parse.h
*/
static int dgap_parsefile(char **in);
static struct cnode *dgap_find_config(int type, int bus, int slot);
static uint dgap_config_get_num_prts(struct board_t *bd);
static char *dgap_create_config_string(struct board_t *bd, char *string);
static uint dgap_config_get_useintr(struct board_t *bd);
static uint dgap_config_get_altpin(struct board_t *bd);
static int dgap_ms_sleep(ulong ms);
static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len);
static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len);
#ifdef DIGI_CONCENTRATORS_SUPPORTED
static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len);
#endif
static int dgap_alloc_flipbuf(struct board_t *brd);
static void dgap_free_flipbuf(struct board_t *brd);
static int dgap_request_irq(struct board_t *brd);
static void dgap_free_irq(struct board_t *brd);
static void dgap_get_vpd(struct board_t *brd);
static void dgap_do_reset_board(struct board_t *brd);
static int dgap_test_bios(struct board_t *brd);
static int dgap_test_fep(struct board_t *brd);
static int dgap_tty_register_ports(struct board_t *brd);
static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
struct board_t *brd);
static void dgap_cleanup_module(void);
module_exit(dgap_cleanup_module);
/*
* File operations permitted on Control/Management major.
*/
static const struct file_operations dgap_board_fops = {
.owner = THIS_MODULE,
};
static uint dgap_numboards;
static struct board_t *dgap_board[MAXBOARDS];
static ulong dgap_poll_counter;
static int dgap_driver_state = DRIVER_INITIALIZED;
static int dgap_poll_tick = 20; /* Poll interval - 20 ms */
static struct class *dgap_class;
static struct board_t *dgap_boards_by_major[256];
static uint dgap_count = 500;
/*
* Poller stuff
*/
static DEFINE_SPINLOCK(dgap_poll_lock); /* Poll scheduling lock */
static ulong dgap_poll_time; /* Time of next poll */
static uint dgap_poll_stop; /* Used to tell poller to stop */
static struct timer_list dgap_poll_timer;
/*
SUPPORTED PRODUCTS
Card Model Number of Ports Interface
----------------------------------------------------------------
Acceleport Xem 4 - 64 (EIA232 & EIA422)
Acceleport Xr 4 & 8 (EIA232)
Acceleport Xr 920 4 & 8 (EIA232)
Acceleport C/X 8 - 128 (EIA232)
Acceleport EPC/X 8 - 224 (EIA232)
Acceleport Xr/422 4 & 8 (EIA422)
Acceleport 2r/920 2 (EIA232)
Acceleport 4r/920 4 (EIA232)
Acceleport 8r/920 8 (EIA232)
IBM 8-Port Asynchronous PCI Adapter (EIA232)
IBM 128-Port Asynchronous PCI Adapter (EIA232 & EIA422)
*/
static struct pci_device_id dgap_pci_tbl[] = {
{ DIGI_VID, PCI_DEV_XEM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
{ DIGI_VID, PCI_DEV_CX_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
{ DIGI_VID, PCI_DEV_CX_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 2 },
{ DIGI_VID, PCI_DEV_EPCJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 3 },
{ DIGI_VID, PCI_DEV_920_2_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 },
{ DIGI_VID, PCI_DEV_920_4_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 },
{ DIGI_VID, PCI_DEV_920_8_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 },
{ DIGI_VID, PCI_DEV_XR_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 7 },
{ DIGI_VID, PCI_DEV_XRJ_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 8 },
{ DIGI_VID, PCI_DEV_XR_422_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 9 },
{ DIGI_VID, PCI_DEV_XR_IBM_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 10 },
{ DIGI_VID, PCI_DEV_XR_SAIP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 11 },
{ DIGI_VID, PCI_DEV_XR_BULL_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 12 },
{ DIGI_VID, PCI_DEV_920_8_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 13 },
{ DIGI_VID, PCI_DEV_XEM_HP_DID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
{0,} /* 0 terminated list. */
};
MODULE_DEVICE_TABLE(pci, dgap_pci_tbl);
/*
* A generic list of Product names, PCI Vendor ID, and PCI Device ID.
*/
struct board_id {
uint config_type;
u8 *name;
uint maxports;
uint dpatype;
};
static struct board_id dgap_ids[] = {
{ PPCM, PCI_DEV_XEM_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
{ PCX, PCI_DEV_CX_NAME, 128, (T_CX|T_PCIBUS) },
{ PCX, PCI_DEV_CX_IBM_NAME, 128, (T_CX|T_PCIBUS) },
{ PEPC, PCI_DEV_EPCJ_NAME, 224, (T_EPC|T_PCIBUS) },
{ APORT2_920P, PCI_DEV_920_2_NAME, 2, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ APORT4_920P, PCI_DEV_920_4_NAME, 4, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ APORT8_920P, PCI_DEV_920_8_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XR_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XRJ_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XR_422_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XR_IBM_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XR_SAIP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PAPORT8, PCI_DEV_XR_BULL_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ APORT8_920P, PCI_DEV_920_8_HP_NAME, 8, (T_PCXR|T_PCLITE|T_PCIBUS) },
{ PPCM, PCI_DEV_XEM_HP_NAME, 64, (T_PCXM|T_PCLITE|T_PCIBUS) },
{0,} /* 0 terminated list. */
};
static struct pci_driver dgap_driver = {
.name = "dgap",
.probe = dgap_init_one,
.id_table = dgap_pci_tbl,
.remove = dgap_remove_one,
};
struct firmware_info {
u8 *conf_name; /* dgap.conf */
u8 *bios_name; /* BIOS filename */
u8 *fep_name; /* FEP filename */
u8 *con_name; /* Concentrator filename FIXME*/
int num; /* sequence number */
};
/*
* Firmware - BIOS, FEP, and CONC filenames
*/
static struct firmware_info fw_info[] = {
{ "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 0 },
{ "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 1 },
{ "dgap/dgap.conf", "dgap/cxpbios.bin", "dgap/cxpfep.bin", NULL, 2 },
{ "dgap/dgap.conf", "dgap/pcibios.bin", "dgap/pcifep.bin", NULL, 3 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 4 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 5 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 6 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 7 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 8 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 9 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 10 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 11 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 12 },
{ "dgap/dgap.conf", "dgap/xrbios.bin", "dgap/xrfep.bin", NULL, 13 },
{ "dgap/dgap.conf", "dgap/sxbios.bin", "dgap/sxfep.bin", NULL, 14 },
{NULL,}
};
/*
* Default transparent print information.
*/
static struct digi_t dgap_digi_init = {
.digi_flags = DIGI_COOK, /* Flags */
.digi_maxcps = 100, /* Max CPS */
.digi_maxchar = 50, /* Max chars in print queue */
.digi_bufsize = 100, /* Printer buffer size */
.digi_onlen = 4, /* size of printer on string */
.digi_offlen = 4, /* size of printer off string */
.digi_onstr = "\033[5i", /* ANSI printer on string ] */
.digi_offstr = "\033[4i", /* ANSI printer off string ] */
.digi_term = "ansi" /* default terminal type */
};
/*
* Define a local default termios struct. All ports will be created
* with this termios initially.
*
* This defines a raw port at 9600 baud, 8 data bits, no parity,
* 1 stop bit.
*/
static struct ktermios dgap_default_termios = {
.c_iflag = (DEFAULT_IFLAGS), /* iflags */
.c_oflag = (DEFAULT_OFLAGS), /* oflags */
.c_cflag = (DEFAULT_CFLAGS), /* cflags */
.c_lflag = (DEFAULT_LFLAGS), /* lflags */
.c_cc = INIT_C_CC,
.c_line = 0,
};
static const struct tty_operations dgap_tty_ops = {
.open = dgap_tty_open,
.close = dgap_tty_close,
.write = dgap_tty_write,
.write_room = dgap_tty_write_room,
.flush_buffer = dgap_tty_flush_buffer,
.chars_in_buffer = dgap_tty_chars_in_buffer,
.flush_chars = dgap_tty_flush_chars,
.ioctl = dgap_tty_ioctl,
.set_termios = dgap_tty_set_termios,
.stop = dgap_tty_stop,
.start = dgap_tty_start,
.throttle = dgap_tty_throttle,
.unthrottle = dgap_tty_unthrottle,
.hangup = dgap_tty_hangup,
.put_char = dgap_tty_put_char,
.tiocmget = dgap_tty_tiocmget,
.tiocmset = dgap_tty_tiocmset,
.break_ctl = dgap_tty_send_break,
.wait_until_sent = dgap_tty_wait_until_sent,
.send_xchar = dgap_tty_send_xchar
};
/*
* Our needed internal static variables from dgap_parse.c
*/
static struct cnode dgap_head;
#define MAXCWORD 200
static char dgap_cword[MAXCWORD];
struct toklist {
int token;
char *string;
};
static struct toklist dgap_tlist[] = {
{ BEGIN, "config_begin" },
{ END, "config_end" },
{ BOARD, "board" },
{ PCX, "Digi_AccelePort_C/X_PCI" },
{ PEPC, "Digi_AccelePort_EPC/X_PCI" },
{ PPCM, "Digi_AccelePort_Xem_PCI" },
{ APORT2_920P, "Digi_AccelePort_2r_920_PCI" },
{ APORT4_920P, "Digi_AccelePort_4r_920_PCI" },
{ APORT8_920P, "Digi_AccelePort_8r_920_PCI" },
{ PAPORT4, "Digi_AccelePort_4r_PCI(EIA-232/RS-422)" },
{ PAPORT8, "Digi_AccelePort_8r_PCI(EIA-232/RS-422)" },
{ IO, "io" },
{ PCIINFO, "pciinfo" },
{ LINE, "line" },
{ CONC, "conc" },
{ CONC, "concentrator" },
{ CX, "cx" },
{ CX, "ccon" },
{ EPC, "epccon" },
{ EPC, "epc" },
{ MOD, "module" },
{ ID, "id" },
{ STARTO, "start" },
{ SPEED, "speed" },
{ CABLE, "cable" },
{ CONNECT, "connect" },
{ METHOD, "method" },
{ STATUS, "status" },
{ CUSTOM, "Custom" },
{ BASIC, "Basic" },
{ MEM, "mem" },
{ MEM, "memory" },
{ PORTS, "ports" },
{ MODEM, "modem" },
{ NPORTS, "nports" },
{ TTYN, "ttyname" },
{ CU, "cuname" },
{ PRINT, "prname" },
{ CMAJOR, "major" },
{ ALTPIN, "altpin" },
{ USEINTR, "useintr" },
{ TTSIZ, "ttysize" },
{ CHSIZ, "chsize" },
{ BSSIZ, "boardsize" },
{ UNTSIZ, "schedsize" },
{ F2SIZ, "f2200size" },
{ VPSIZ, "vpixsize" },
{ 0, NULL }
};
/************************************************************************
*
* Driver load/unload functions
*
************************************************************************/
/*
* init_module()
*
* Module load. This is where it all starts.
*/
static int dgap_init_module(void)
{
int rc;
pr_info("%s, Digi International Part Number %s\n", DG_NAME, DG_PART);
rc = dgap_start();
if (rc)
return rc;
rc = pci_register_driver(&dgap_driver);
if (rc)
goto err_cleanup;
rc = dgap_create_driver_sysfiles(&dgap_driver);
if (rc)
goto err_cleanup;
dgap_driver_state = DRIVER_READY;
return 0;
err_cleanup:
dgap_cleanup_module();
return rc;
}
module_init(dgap_init_module);
/*
* Start of driver.
*/
static int dgap_start(void)
{
int rc;
unsigned long flags;
struct device *device;
/*
* make sure that the globals are
* init'd before we do anything else
*/
dgap_init_globals();
dgap_numboards = 0;
pr_info("For the tools package please visit http://www.digi.com\n");
/*
* Register our base character device into the kernel.
*/
/*
* Register management/dpa devices
*/
rc = register_chrdev(DIGI_DGAP_MAJOR, "dgap", &dgap_board_fops);
if (rc < 0)
return rc;
dgap_class = class_create(THIS_MODULE, "dgap_mgmt");
if (IS_ERR(dgap_class)) {
rc = PTR_ERR(dgap_class);
goto failed_class;
}
device = device_create(dgap_class, NULL,
MKDEV(DIGI_DGAP_MAJOR, 0),
NULL, "dgap_mgmt");
if (IS_ERR(device)) {
rc = PTR_ERR(device);
goto failed_device;
}
/* Start the poller */
spin_lock_irqsave(&dgap_poll_lock, flags);
init_timer(&dgap_poll_timer);
dgap_poll_timer.function = dgap_poll_handler;
dgap_poll_timer.data = 0;
dgap_poll_time = jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
dgap_poll_timer.expires = dgap_poll_time;
spin_unlock_irqrestore(&dgap_poll_lock, flags);
add_timer(&dgap_poll_timer);
return rc;
failed_device:
class_destroy(dgap_class);
failed_class:
unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
return rc;
}
static int dgap_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int rc;
struct board_t *brd;
if (dgap_numboards >= MAXBOARDS)
return -EPERM;
rc = pci_enable_device(pdev);
if (rc)
return -EIO;
brd = dgap_found_board(pdev, ent->driver_data, dgap_numboards);
if (IS_ERR(brd))
return PTR_ERR(brd);
rc = dgap_firmware_load(pdev, ent->driver_data, brd);
if (rc)
goto cleanup_brd;
rc = dgap_alloc_flipbuf(brd);
if (rc)
goto cleanup_brd;
rc = dgap_tty_register(brd);
if (rc)
goto free_flipbuf;
rc = dgap_request_irq(brd);
if (rc)
goto unregister_tty;
/*
* Do tty device initialization.
*/
rc = dgap_tty_init(brd);
if (rc < 0)
goto free_irq;
rc = dgap_tty_register_ports(brd);
if (rc)
goto tty_free;
brd->state = BOARD_READY;
brd->dpastatus = BD_RUNNING;
dgap_board[dgap_numboards++] = brd;
return 0;
tty_free:
dgap_tty_free(brd);
free_irq:
dgap_free_irq(brd);
unregister_tty:
dgap_tty_unregister(brd);
free_flipbuf:
dgap_free_flipbuf(brd);
cleanup_brd:
dgap_release_remap(brd);
kfree(brd);
return rc;
}
static void dgap_remove_one(struct pci_dev *dev)
{
/* Do Nothing */
}
/*
* dgap_cleanup_module()
*
* Module unload. This is where it all ends.
*/
static void dgap_cleanup_module(void)
{
int i;
ulong lock_flags;
spin_lock_irqsave(&dgap_poll_lock, lock_flags);
dgap_poll_stop = 1;
spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
/* Turn off poller right away. */
del_timer_sync(&dgap_poll_timer);
dgap_remove_driver_sysfiles(&dgap_driver);
device_destroy(dgap_class, MKDEV(DIGI_DGAP_MAJOR, 0));
class_destroy(dgap_class);
unregister_chrdev(DIGI_DGAP_MAJOR, "dgap");
for (i = 0; i < dgap_numboards; ++i) {
dgap_remove_ports_sysfiles(dgap_board[i]);
dgap_cleanup_tty(dgap_board[i]);
dgap_cleanup_board(dgap_board[i]);
}
if (dgap_numboards)
pci_unregister_driver(&dgap_driver);
}
/*
* dgap_cleanup_board()
*
* Free all the memory associated with a board
*/
static void dgap_cleanup_board(struct board_t *brd)
{
int i;
if (!brd || brd->magic != DGAP_BOARD_MAGIC)
return;
dgap_free_irq(brd);
tasklet_kill(&brd->helper_tasklet);
if (brd->re_map_port) {
release_mem_region(brd->membase + 0x200000, 0x200000);
iounmap(brd->re_map_port);
brd->re_map_port = NULL;
}
if (brd->re_map_membase) {
release_mem_region(brd->membase, 0x200000);
iounmap(brd->re_map_membase);
brd->re_map_membase = NULL;
}
/* Free all allocated channels structs */
for (i = 0; i < MAXPORTS ; i++)
kfree(brd->channels[i]);
kfree(brd->flipbuf);
kfree(brd->flipflagbuf);
dgap_board[brd->boardnum] = NULL;
kfree(brd);
}
/*
* dgap_found_board()
*
* A board has been found, init it.
*/
static struct board_t *dgap_found_board(struct pci_dev *pdev, int id,
int boardnum)
{
struct board_t *brd;
unsigned int pci_irq;
int i;
int ret;
/* get the board structure and prep it */
brd = kzalloc(sizeof(struct board_t), GFP_KERNEL);
if (!brd)
return ERR_PTR(-ENOMEM);
/* store the info for the board we've found */
brd->magic = DGAP_BOARD_MAGIC;
brd->boardnum = boardnum;
brd->vendor = dgap_pci_tbl[id].vendor;
brd->device = dgap_pci_tbl[id].device;
brd->pdev = pdev;
brd->pci_bus = pdev->bus->number;
brd->pci_slot = PCI_SLOT(pdev->devfn);
brd->name = dgap_ids[id].name;
brd->maxports = dgap_ids[id].maxports;
brd->type = dgap_ids[id].config_type;
brd->dpatype = dgap_ids[id].dpatype;
brd->dpastatus = BD_NOFEP;
init_waitqueue_head(&brd->state_wait);
spin_lock_init(&brd->bd_lock);
brd->runwait = 0;
brd->inhibit_poller = FALSE;
brd->wait_for_bios = 0;
brd->wait_for_fep = 0;
for (i = 0; i < MAXPORTS; i++)
brd->channels[i] = NULL;
/* store which card & revision we have */
pci_read_config_word(pdev, PCI_SUBSYSTEM_VENDOR_ID, &brd->subvendor);
pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &brd->subdevice);
pci_read_config_byte(pdev, PCI_REVISION_ID, &brd->rev);
pci_irq = pdev->irq;
brd->irq = pci_irq;
/* get the PCI Base Address Registers */
/* Xr Jupiter and EPC use BAR 2 */
if (brd->device == PCI_DEV_XRJ_DID || brd->device == PCI_DEV_EPCJ_DID) {
brd->membase = pci_resource_start(pdev, 2);
brd->membase_end = pci_resource_end(pdev, 2);
}
/* Everyone else uses BAR 0 */
else {
brd->membase = pci_resource_start(pdev, 0);
brd->membase_end = pci_resource_end(pdev, 0);
}
if (!brd->membase) {
ret = -ENODEV;
goto free_brd;
}
if (brd->membase & 1)
brd->membase &= ~3;
else
brd->membase &= ~15;
/*
* On the PCI boards, there is no IO space allocated
* The I/O registers will be in the first 3 bytes of the
* upper 2MB of the 4MB memory space. The board memory
* will be mapped into the low 2MB of the 4MB memory space
*/
brd->port = brd->membase + PCI_IO_OFFSET;
brd->port_end = brd->port + PCI_IO_SIZE;
/*
* Special initialization for non-PLX boards
*/
if (brd->device != PCI_DEV_XRJ_DID && brd->device != PCI_DEV_EPCJ_DID) {
unsigned short cmd;
pci_write_config_byte(pdev, 0x40, 0);
pci_write_config_byte(pdev, 0x46, 0);
/* Limit burst length to 2 doubleword transactions */
pci_write_config_byte(pdev, 0x42, 1);
/*
* Enable IO and mem if not already done.
* This was needed for support on Itanium.
*/
pci_read_config_word(pdev, PCI_COMMAND, &cmd);
cmd |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY);
pci_write_config_word(pdev, PCI_COMMAND, cmd);
}
/* init our poll helper tasklet */
tasklet_init(&brd->helper_tasklet, dgap_poll_tasklet,
(unsigned long) brd);
ret = dgap_do_remap(brd);
if (ret)
goto free_brd;
pr_info("dgap: board %d: %s (rev %d), irq %ld\n",
boardnum, brd->name, brd->rev, brd->irq);
return brd;
free_brd:
kfree(brd);
return ERR_PTR(ret);
}
static int dgap_request_irq(struct board_t *brd)
{
int rc;
if (!brd || brd->magic != DGAP_BOARD_MAGIC)
return -ENODEV;
brd->use_interrupts = dgap_config_get_useintr(brd);
/*
* Set up our interrupt handler if we are set to do interrupts.
*/
if (brd->use_interrupts && brd->irq) {
rc = request_irq(brd->irq, dgap_intr, IRQF_SHARED, "DGAP", brd);
if (rc)
brd->intr_used = 0;
else
brd->intr_used = 1;
} else {
brd->intr_used = 0;
}
return 0;
}
static void dgap_free_irq(struct board_t *brd)
{
if (brd->intr_used && brd->irq)
free_irq(brd->irq, brd);
}
static int dgap_firmware_load(struct pci_dev *pdev, int card_type,
struct board_t *brd)
{
const struct firmware *fw;
char *tmp_ptr;
int ret;
char *dgap_config_buf;
dgap_get_vpd(brd);
dgap_do_reset_board(brd);
if (fw_info[card_type].conf_name) {
ret = request_firmware(&fw, fw_info[card_type].conf_name,
&pdev->dev);
if (ret) {
pr_err("dgap: config file %s not found\n",
fw_info[card_type].conf_name);
return ret;
}
dgap_config_buf = kzalloc(fw->size + 1, GFP_KERNEL);
if (!dgap_config_buf) {
release_firmware(fw);
return -ENOMEM;
}
memcpy(dgap_config_buf, fw->data, fw->size);
release_firmware(fw);
/*
* preserve dgap_config_buf
* as dgap_parsefile would
* otherwise alter it.
*/
tmp_ptr = dgap_config_buf;
if (dgap_parsefile(&tmp_ptr) != 0) {
kfree(dgap_config_buf);
return -EINVAL;
}
kfree(dgap_config_buf);
}
/*
* Match this board to a config the user created for us.
*/
brd->bd_config =
dgap_find_config(brd->type, brd->pci_bus, brd->pci_slot);
/*
* Because the 4 port Xr products share the same PCI ID
* as the 8 port Xr products, if we receive a NULL config
* back, and this is a PAPORT8 board, retry with a
* PAPORT4 attempt as well.
*/
if (brd->type == PAPORT8 && !brd->bd_config)
brd->bd_config =
dgap_find_config(PAPORT4, brd->pci_bus, brd->pci_slot);
if (!brd->bd_config) {
pr_err("dgap: No valid configuration found\n");
return -EINVAL;
}
if (fw_info[card_type].bios_name) {
ret = request_firmware(&fw, fw_info[card_type].bios_name,
&pdev->dev);
if (ret) {
pr_err("dgap: bios file %s not found\n",
fw_info[card_type].bios_name);
return ret;
}
dgap_do_bios_load(brd, fw->data, fw->size);
release_firmware(fw);
/* Wait for BIOS to test board... */
ret = dgap_test_bios(brd);
if (ret)
return ret;
}
if (fw_info[card_type].fep_name) {
ret = request_firmware(&fw, fw_info[card_type].fep_name,
&pdev->dev);
if (ret) {
pr_err("dgap: fep file %s not found\n",
fw_info[card_type].fep_name);
return ret;
}
dgap_do_fep_load(brd, fw->data, fw->size);
release_firmware(fw);
/* Wait for FEP to load on board... */
ret = dgap_test_fep(brd);
if (ret)
return ret;
}
#ifdef DIGI_CONCENTRATORS_SUPPORTED
/*
* If this is a CX or EPCX, we need to see if the firmware
* is requesting a concentrator image from us.
*/
if ((bd->type == PCX) || (bd->type == PEPC)) {
chk_addr = (u16 *) (vaddr + DOWNREQ);
/* Nonzero if FEP is requesting concentrator image. */
check = readw(chk_addr);
vaddr = brd->re_map_membase;
}
if (fw_info[card_type].con_name && check && vaddr) {
ret = request_firmware(&fw, fw_info[card_type].con_name,
&pdev->dev);
if (ret) {
pr_err("dgap: conc file %s not found\n",
fw_info[card_type].con_name);
return ret;
}
/* Put concentrator firmware loading code here */
offset = readw((u16 *) (vaddr + DOWNREQ));
memcpy_toio(offset, fw->data, fw->size);
dgap_do_conc_load(brd, (char *)fw->data, fw->size)
release_firmware(fw);
}
#endif
return 0;
}
/*
* Remap PCI memory.
*/
static int dgap_do_remap(struct board_t *brd)
{
if (!brd || brd->magic != DGAP_BOARD_MAGIC)
return -EIO;
if (!request_mem_region(brd->membase, 0x200000, "dgap"))
return -ENOMEM;
if (!request_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000,
"dgap")) {
release_mem_region(brd->membase, 0x200000);
return -ENOMEM;
}
brd->re_map_membase = ioremap(brd->membase, 0x200000);
if (!brd->re_map_membase) {
release_mem_region(brd->membase, 0x200000);
release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
return -ENOMEM;
}
brd->re_map_port = ioremap((brd->membase + PCI_IO_OFFSET), 0x200000);
if (!brd->re_map_port) {
release_mem_region(brd->membase, 0x200000);
release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
iounmap(brd->re_map_membase);
return -ENOMEM;
}
return 0;
}
static void dgap_release_remap(struct board_t *brd)
{
release_mem_region(brd->membase, 0x200000);
release_mem_region(brd->membase + PCI_IO_OFFSET, 0x200000);
iounmap(brd->re_map_membase);
}
/*****************************************************************************
*
* Function:
*
* dgap_poll_handler
*
* Author:
*
* Scott H Kilau
*
* Parameters:
*
* dummy -- ignored
*
* Return Values:
*
* none
*
* Description:
*
* As each timer expires, it determines (a) whether the "transmit"
* waiter needs to be woken up, and (b) whether the poller needs to
* be rescheduled.
*
******************************************************************************/
static void dgap_poll_handler(ulong dummy)
{
int i;
struct board_t *brd;
unsigned long lock_flags;
ulong new_time;
dgap_poll_counter++;
/*
* Do not start the board state machine until
* driver tells us its up and running, and has
* everything it needs.
*/
if (dgap_driver_state != DRIVER_READY)
goto schedule_poller;
/*
* If we have just 1 board, or the system is not SMP,
* then use the typical old style poller.
* Otherwise, use our new tasklet based poller, which should
* speed things up for multiple boards.
*/
if ((dgap_numboards == 1) || (num_online_cpus() <= 1)) {
for (i = 0; i < dgap_numboards; i++) {
brd = dgap_board[i];
if (brd->state == BOARD_FAILED)
continue;
if (!brd->intr_running)
/* Call the real board poller directly */
dgap_poll_tasklet((unsigned long) brd);
}
} else {
/*
* Go thru each board, kicking off a
* tasklet for each if needed
*/
for (i = 0; i < dgap_numboards; i++) {
brd = dgap_board[i];
/*
* Attempt to grab the board lock.
*
* If we can't get it, no big deal, the next poll
* will get it. Basically, I just really don't want
* to spin in here, because I want to kick off my
* tasklets as fast as I can, and then get out the
* poller.
*/
if (!spin_trylock(&brd->bd_lock))
continue;
/*
* If board is in a failed state, don't bother
* scheduling a tasklet
*/
if (brd->state == BOARD_FAILED) {
spin_unlock(&brd->bd_lock);
continue;
}
/* Schedule a poll helper task */
if (!brd->intr_running)
tasklet_schedule(&brd->helper_tasklet);
/*
* Can't do DGAP_UNLOCK here, as we don't have
* lock_flags because we did a trylock above.
*/
spin_unlock(&brd->bd_lock);
}
}
schedule_poller:
/*
* Schedule ourself back at the nominal wakeup interval.
*/
spin_lock_irqsave(&dgap_poll_lock, lock_flags);
dgap_poll_time += dgap_jiffies_from_ms(dgap_poll_tick);
new_time = dgap_poll_time - jiffies;
if ((ulong) new_time >= 2 * dgap_poll_tick) {
dgap_poll_time =
jiffies + dgap_jiffies_from_ms(dgap_poll_tick);
}
dgap_poll_timer.function = dgap_poll_handler;
dgap_poll_timer.data = 0;
dgap_poll_timer.expires = dgap_poll_time;
spin_unlock_irqrestore(&dgap_poll_lock, lock_flags);
if (!dgap_poll_stop)
add_timer(&dgap_poll_timer);
}
/*
* dgap_intr()
*
* Driver interrupt handler.
*/
static irqreturn_t dgap_intr(int irq, void *voidbrd)
{
struct board_t *brd = (struct board_t *) voidbrd;
if (!brd)
return IRQ_NONE;
/*
* Check to make sure its for us.
*/
if (brd->magic != DGAP_BOARD_MAGIC)
return IRQ_NONE;
brd->intr_count++;
/*
* Schedule tasklet to run at a better time.
*/
tasklet_schedule(&brd->helper_tasklet);
return IRQ_HANDLED;
}
/*
* dgap_init_globals()
*
* This is where we initialize the globals from the static insmod
* configuration variables. These are declared near the head of
* this file.
*/
static void dgap_init_globals(void)
{
int i;
for (i = 0; i < MAXBOARDS; i++)
dgap_board[i] = NULL;
init_timer(&dgap_poll_timer);
}
/************************************************************************
*
* Utility functions
*
************************************************************************/
/*
* dgap_ms_sleep()
*
* Put the driver to sleep for x ms's
*
* Returns 0 if timed out, !0 (showing signal) if interrupted by a signal.
*/
static int dgap_ms_sleep(ulong ms)
{
current->state = TASK_INTERRUPTIBLE;
schedule_timeout((ms * HZ) / 1000);
return signal_pending(current);
}
/************************************************************************
*
* TTY Initialization/Cleanup Functions
*
************************************************************************/
/*
* dgap_tty_register()
*
* Init the tty subsystem for this board.
*/
static int dgap_tty_register(struct board_t *brd)
{
int rc;
brd->serial_driver = tty_alloc_driver(MAXPORTS, 0);
if (IS_ERR(brd->serial_driver))
return PTR_ERR(brd->serial_driver);
snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgap_%d_",
brd->boardnum);
brd->serial_driver->name = brd->serial_name;
brd->serial_driver->name_base = 0;
brd->serial_driver->major = 0;
brd->serial_driver->minor_start = 0;
brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
brd->serial_driver->init_termios = dgap_default_termios;
brd->serial_driver->driver_name = DRVSTR;
brd->serial_driver->flags = (TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
brd->serial_driver->ttys =
kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->serial_driver->ttys) {
rc = -ENOMEM;
goto free_serial_drv;
}
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
tty_set_operations(brd->serial_driver, &dgap_tty_ops);
/*
* If we're doing transparent print, we have to do all of the above
* again, separately so we don't get the LD confused about what major
* we are when we get into the dgap_tty_open() routine.
*/
brd->print_driver = tty_alloc_driver(MAXPORTS, 0);
if (IS_ERR(brd->print_driver)) {
rc = PTR_ERR(brd->print_driver);
goto free_serial_drv;
}
snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgap_%d_",
brd->boardnum);
brd->print_driver->name = brd->print_name;
brd->print_driver->name_base = 0;
brd->print_driver->major = 0;
brd->print_driver->minor_start = 0;
brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
brd->print_driver->init_termios = dgap_default_termios;
brd->print_driver->driver_name = DRVSTR;
brd->print_driver->flags = (TTY_DRIVER_REAL_RAW |
TTY_DRIVER_DYNAMIC_DEV |
TTY_DRIVER_HARDWARE_BREAK);
/* The kernel wants space to store pointers to tty_structs */
brd->print_driver->ttys =
kzalloc(MAXPORTS * sizeof(struct tty_struct *), GFP_KERNEL);
if (!brd->print_driver->ttys) {
rc = -ENOMEM;
goto free_print_drv;
}
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
tty_set_operations(brd->print_driver, &dgap_tty_ops);
/* Register tty devices */
rc = tty_register_driver(brd->serial_driver);
if (rc < 0)
goto free_print_drv;
/* Register Transparent Print devices */
rc = tty_register_driver(brd->print_driver);
if (rc < 0)
goto unregister_serial_drv;
brd->dgap_major_serial_registered = TRUE;
dgap_boards_by_major[brd->serial_driver->major] = brd;
brd->dgap_serial_major = brd->serial_driver->major;
brd->dgap_major_transparent_print_registered = TRUE;
dgap_boards_by_major[brd->print_driver->major] = brd;
brd->dgap_transparent_print_major = brd->print_driver->major;
return 0;
unregister_serial_drv:
tty_unregister_driver(brd->serial_driver);
free_print_drv:
put_tty_driver(brd->print_driver);
free_serial_drv:
put_tty_driver(brd->serial_driver);
return rc;
}
static void dgap_tty_unregister(struct board_t *brd)
{
tty_unregister_driver(brd->print_driver);
tty_unregister_driver(brd->serial_driver);
put_tty_driver(brd->print_driver);
put_tty_driver(brd->serial_driver);
}
/*
* dgap_tty_init()
*
* Init the tty subsystem. Called once per board after board has been
* downloaded and init'ed.
*/
static int dgap_tty_init(struct board_t *brd)
{
int i;
int tlw;
uint true_count;
u8 __iomem *vaddr;
u8 modem;
struct channel_t *ch;
struct bs_t __iomem *bs;
struct cm_t __iomem *cm;
int ret;
/*
* Initialize board structure elements.
*/
vaddr = brd->re_map_membase;
true_count = readw((vaddr + NCHAN));
brd->nasync = dgap_config_get_num_prts(brd);
if (!brd->nasync)
brd->nasync = brd->maxports;
if (brd->nasync > brd->maxports)
brd->nasync = brd->maxports;
if (true_count != brd->nasync) {
if ((brd->type == PPCM) && (true_count == 64)) {
pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
brd->name, brd->nasync, true_count);
pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
} else if ((brd->type == PPCM) && (true_count == 0)) {
pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
brd->name, brd->nasync, true_count);
pr_warn("dgap: Please make SURE the EBI cable running from the card\n");
pr_warn("dgap: to each EM module is plugged into EBI IN!\n");
} else
pr_warn("dgap: %s configured for %d ports, has %d ports.\n",
brd->name, brd->nasync, true_count);
brd->nasync = true_count;
/* If no ports, don't bother going any further */
if (!brd->nasync) {
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
return -EIO;
}
}
/*
* Allocate channel memory that might not have been allocated
* when the driver was first loaded.
*/
for (i = 0; i < brd->nasync; i++) {
brd->channels[i] =
kzalloc(sizeof(struct channel_t), GFP_KERNEL);
if (!brd->channels[i]) {
ret = -ENOMEM;
goto free_chan;
}
}
ch = brd->channels[0];
vaddr = brd->re_map_membase;
bs = (struct bs_t __iomem *) ((ulong) vaddr + CHANBUF);
cm = (struct cm_t __iomem *) ((ulong) vaddr + CMDBUF);
brd->bd_bs = bs;
/* Set up channel variables */
for (i = 0; i < brd->nasync; i++, ch = brd->channels[i], bs++) {
spin_lock_init(&ch->ch_lock);
/* Store all our magic numbers */
ch->magic = DGAP_CHANNEL_MAGIC;
ch->ch_tun.magic = DGAP_UNIT_MAGIC;
ch->ch_tun.un_type = DGAP_SERIAL;
ch->ch_tun.un_ch = ch;
ch->ch_tun.un_dev = i;
ch->ch_pun.magic = DGAP_UNIT_MAGIC;
ch->ch_pun.un_type = DGAP_PRINT;
ch->ch_pun.un_ch = ch;
ch->ch_pun.un_dev = i;
ch->ch_vaddr = vaddr;
ch->ch_bs = bs;
ch->ch_cm = cm;
ch->ch_bd = brd;
ch->ch_portnum = i;
ch->ch_digi = dgap_digi_init;
/*
* Set up digi dsr and dcd bits based on altpin flag.
*/
if (dgap_config_get_altpin(brd)) {
ch->ch_dsr = DM_CD;
ch->ch_cd = DM_DSR;
ch->ch_digi.digi_flags |= DIGI_ALTPIN;
} else {
ch->ch_cd = DM_CD;
ch->ch_dsr = DM_DSR;
}
ch->ch_taddr = vaddr + (ioread16(&(ch->ch_bs->tx_seg)) << 4);
ch->ch_raddr = vaddr + (ioread16(&(ch->ch_bs->rx_seg)) << 4);
ch->ch_tx_win = 0;
ch->ch_rx_win = 0;
ch->ch_tsize = readw(&(ch->ch_bs->tx_max)) + 1;
ch->ch_rsize = readw(&(ch->ch_bs->rx_max)) + 1;
ch->ch_tstart = 0;
ch->ch_rstart = 0;
/* .25 second delay */
ch->ch_close_delay = 250;
/*
* Set queue water marks, interrupt mask,
* and general tty parameters.
*/
tlw = ch->ch_tsize >= 2000 ? ((ch->ch_tsize * 5) / 8) :
ch->ch_tsize / 2;
ch->ch_tlw = tlw;
dgap_cmdw(ch, STLOW, tlw, 0);
dgap_cmdw(ch, SRLOW, ch->ch_rsize / 2, 0);
dgap_cmdw(ch, SRHIGH, 7 * ch->ch_rsize / 8, 0);
ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
init_waitqueue_head(&ch->ch_flags_wait);
init_waitqueue_head(&ch->ch_tun.un_flags_wait);
init_waitqueue_head(&ch->ch_pun.un_flags_wait);
/* Turn on all modem interrupts for now */
modem = (DM_CD | DM_DSR | DM_CTS | DM_RI);
writeb(modem, &(ch->ch_bs->m_int));
/*
* Set edelay to 0 if interrupts are turned on,
* otherwise set edelay to the usual 100.
*/
if (brd->intr_used)
writew(0, &(ch->ch_bs->edelay));
else
writew(100, &(ch->ch_bs->edelay));
writeb(1, &(ch->ch_bs->idata));
}
return 0;
free_chan:
while (--i >= 0) {
kfree(brd->channels[i]);
brd->channels[i] = NULL;
}
return ret;
}
/*
* dgap_tty_free()
*
* Free the channles which are allocated in dgap_tty_init().
*/
static void dgap_tty_free(struct board_t *brd)
{
int i;
for (i = 0; i < brd->nasync; i++)
kfree(brd->channels[i]);
}
/*
* dgap_cleanup_tty()
*
* Uninitialize the TTY portion of this driver. Free all memory and
* resources.
*/
static void dgap_cleanup_tty(struct board_t *brd)
{
struct device *dev;
int i;
if (brd->dgap_major_serial_registered) {
dgap_boards_by_major[brd->serial_driver->major] = NULL;
brd->dgap_serial_major = 0;
for (i = 0; i < brd->nasync; i++) {
tty_port_destroy(&brd->serial_ports[i]);
dev = brd->channels[i]->ch_tun.un_sysfs;
dgap_remove_tty_sysfs(dev);
tty_unregister_device(brd->serial_driver, i);
}
tty_unregister_driver(brd->serial_driver);
put_tty_driver(brd->serial_driver);
kfree(brd->serial_ports);
brd->dgap_major_serial_registered = FALSE;
}
if (brd->dgap_major_transparent_print_registered) {
dgap_boards_by_major[brd->print_driver->major] = NULL;
brd->dgap_transparent_print_major = 0;
for (i = 0; i < brd->nasync; i++) {
tty_port_destroy(&brd->printer_ports[i]);
dev = brd->channels[i]->ch_pun.un_sysfs;
dgap_remove_tty_sysfs(dev);
tty_unregister_device(brd->print_driver, i);
}
tty_unregister_driver(brd->print_driver);
put_tty_driver(brd->print_driver);
kfree(brd->printer_ports);
brd->dgap_major_transparent_print_registered = FALSE;
}
}
/*=======================================================================
*
* dgap_input - Process received data.
*
* ch - Pointer to channel structure.
*
*=======================================================================*/
static void dgap_input(struct channel_t *ch)
{
struct board_t *bd;
struct bs_t __iomem *bs;
struct tty_struct *tp;
struct tty_ldisc *ld;
uint rmask;
uint head;
uint tail;
int data_len;
ulong lock_flags;
ulong lock_flags2;
int flip_len;
int len;
int n;
u8 *buf;
u8 tmpchar;
int s;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
tp = ch->ch_tun.un_tty;
bs = ch->ch_bs;
if (!bs)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
/*
* Figure the number of characters in the buffer.
* Exit immediately if none.
*/
rmask = ch->ch_rsize - 1;
head = readw(&(bs->rx_head));
head &= rmask;
tail = readw(&(bs->rx_tail));
tail &= rmask;
data_len = (head - tail) & rmask;
if (data_len == 0) {
writeb(1, &(bs->idata));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
/*
* If the device is not open, or CREAD is off, flush
* input data and return immediately.
*/
if ((bd->state != BOARD_READY) || !tp ||
(tp->magic != TTY_MAGIC) ||
!(ch->ch_tun.un_flags & UN_ISOPEN) ||
!(tp->termios.c_cflag & CREAD) ||
(ch->ch_tun.un_flags & UN_CLOSING)) {
writew(head, &(bs->rx_tail));
writeb(1, &(bs->idata));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
/*
* If we are throttled, simply don't read any data.
*/
if (ch->ch_flags & CH_RXBLOCK) {
writeb(1, &(bs->idata));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
/*
* Ignore oruns.
*/
tmpchar = readb(&(bs->orun));
if (tmpchar) {
ch->ch_err_overrun++;
writeb(0, &(bs->orun));
}
/* Decide how much data we can send into the tty layer */
flip_len = TTY_FLIPBUF_SIZE;
/* Chop down the length, if needed */
len = min(data_len, flip_len);
len = min(len, (N_TTY_BUF_SIZE - 1));
ld = tty_ldisc_ref(tp);
#ifdef TTY_DONT_FLIP
/*
* If the DONT_FLIP flag is on, don't flush our buffer, and act
* like the ld doesn't have any space to put the data right now.
*/
if (test_bit(TTY_DONT_FLIP, &tp->flags))
len = 0;
#endif
/*
* If we were unable to get a reference to the ld,
* don't flush our buffer, and act like the ld doesn't
* have any space to put the data right now.
*/
if (!ld) {
len = 0;
} else {
/*
* If ld doesn't have a pointer to a receive_buf function,
* flush the data, then act like the ld doesn't have any
* space to put the data right now.
*/
if (!ld->ops->receive_buf) {
writew(head, &(bs->rx_tail));
len = 0;
}
}
if (len <= 0) {
writeb(1, &(bs->idata));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
if (ld)
tty_ldisc_deref(ld);
return;
}
buf = ch->ch_bd->flipbuf;
n = len;
/*
* n now contains the most amount of data we can copy,
* bounded either by our buffer size or the amount
* of data the card actually has pending...
*/
while (n) {
s = ((head >= tail) ? head : ch->ch_rsize) - tail;
s = min(s, n);
if (s <= 0)
break;
memcpy_fromio(buf, ch->ch_raddr + tail, s);
tail += s;
buf += s;
n -= s;
/* Flip queue if needed */
tail &= rmask;
}
writew(tail, &(bs->rx_tail));
writeb(1, &(bs->idata));
ch->ch_rxcount += len;
/*
* If we are completely raw, we don't need to go through a lot
* of the tty layers that exist.
* In this case, we take the shortest and fastest route we
* can to relay the data to the user.
*
* On the other hand, if we are not raw, we need to go through
* the tty layer, which has its API more well defined.
*/
if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
dgap_parity_scan(ch, ch->ch_bd->flipbuf,
ch->ch_bd->flipflagbuf, &len);
len = tty_buffer_request_room(tp->port, len);
tty_insert_flip_string_flags(tp->port, ch->ch_bd->flipbuf,
ch->ch_bd->flipflagbuf, len);
} else {
len = tty_buffer_request_room(tp->port, len);
tty_insert_flip_string(tp->port, ch->ch_bd->flipbuf, len);
}
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
/* Tell the tty layer its okay to "eat" the data now */
tty_flip_buffer_push(tp->port);
if (ld)
tty_ldisc_deref(ld);
}
/************************************************************************
* Determines when CARRIER changes state and takes appropriate
* action.
************************************************************************/
static void dgap_carrier(struct channel_t *ch)
{
struct board_t *bd;
int virt_carrier = 0;
int phys_carrier = 0;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
/* Make sure altpin is always set correctly */
if (ch->ch_digi.digi_flags & DIGI_ALTPIN) {
ch->ch_dsr = DM_CD;
ch->ch_cd = DM_DSR;
} else {
ch->ch_dsr = DM_DSR;
ch->ch_cd = DM_CD;
}
if (ch->ch_mistat & D_CD(ch))
phys_carrier = 1;
if (ch->ch_digi.digi_flags & DIGI_FORCEDCD)
virt_carrier = 1;
if (ch->ch_c_cflag & CLOCAL)
virt_carrier = 1;
/*
* Test for a VIRTUAL carrier transition to HIGH.
*/
if (((ch->ch_flags & CH_FCAR) == 0) && (virt_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
if (waitqueue_active(&(ch->ch_flags_wait)))
wake_up_interruptible(&ch->ch_flags_wait);
}
/*
* Test for a PHYSICAL carrier transition to HIGH.
*/
if (((ch->ch_flags & CH_CD) == 0) && (phys_carrier == 1)) {
/*
* When carrier rises, wake any threads waiting
* for carrier in the open routine.
*/
if (waitqueue_active(&(ch->ch_flags_wait)))
wake_up_interruptible(&ch->ch_flags_wait);
}
/*
* Test for a PHYSICAL transition to low, so long as we aren't
* currently ignoring physical transitions (which is what "virtual
* carrier" indicates).
*
* The transition of the virtual carrier to low really doesn't
* matter... it really only means "ignore carrier state", not
* "make pretend that carrier is there".
*/
if ((virt_carrier == 0) &&
((ch->ch_flags & CH_CD) != 0) &&
(phys_carrier == 0)) {
/*
* When carrier drops:
*
* Drop carrier on all open units.
*
* Flush queues, waking up any task waiting in the
* line discipline.
*
* Send a hangup to the control terminal.
*
* Enable all select calls.
*/
if (waitqueue_active(&(ch->ch_flags_wait)))
wake_up_interruptible(&ch->ch_flags_wait);
if (ch->ch_tun.un_open_count > 0)
tty_hangup(ch->ch_tun.un_tty);
if (ch->ch_pun.un_open_count > 0)
tty_hangup(ch->ch_pun.un_tty);
}
/*
* Make sure that our cached values reflect the current reality.
*/
if (virt_carrier == 1)
ch->ch_flags |= CH_FCAR;
else
ch->ch_flags &= ~CH_FCAR;
if (phys_carrier == 1)
ch->ch_flags |= CH_CD;
else
ch->ch_flags &= ~CH_CD;
}
/************************************************************************
*
* TTY Entry points and helper functions
*
************************************************************************/
/*
* dgap_tty_open()
*
*/
static int dgap_tty_open(struct tty_struct *tty, struct file *file)
{
struct board_t *brd;
struct channel_t *ch;
struct un_t *un;
struct bs_t __iomem *bs;
uint major;
uint minor;
int rc;
ulong lock_flags;
ulong lock_flags2;
u16 head;
major = MAJOR(tty_devnum(tty));
minor = MINOR(tty_devnum(tty));
if (major > 255)
return -EIO;
/* Get board pointer from our array of majors we have allocated */
brd = dgap_boards_by_major[major];
if (!brd)
return -EIO;
/*
* If board is not yet up to a state of READY, go to
* sleep waiting for it to happen or they cancel the open.
*/
rc = wait_event_interruptible(brd->state_wait,
(brd->state & BOARD_READY));
if (rc)
return rc;
spin_lock_irqsave(&brd->bd_lock, lock_flags);
/* The wait above should guarantee this cannot happen */
if (brd->state != BOARD_READY) {
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
return -EIO;
}
/* If opened device is greater than our number of ports, bail. */
if (MINOR(tty_devnum(tty)) > brd->nasync) {
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
return -EIO;
}
ch = brd->channels[minor];
if (!ch) {
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
return -EIO;
}
/* Grab channel lock */
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
/* Figure out our type */
if (major == brd->dgap_serial_major) {
un = &brd->channels[minor]->ch_tun;
un->un_type = DGAP_SERIAL;
} else if (major == brd->dgap_transparent_print_major) {
un = &brd->channels[minor]->ch_pun;
un->un_type = DGAP_PRINT;
} else {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
return -EIO;
}
/* Store our unit into driver_data, so we always have it available. */
tty->driver_data = un;
/*
* Error if channel info pointer is NULL.
*/
bs = ch->ch_bs;
if (!bs) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
return -EIO;
}
/*
* Initialize tty's
*/
if (!(un->un_flags & UN_ISOPEN)) {
/* Store important variables. */
un->un_tty = tty;
/* Maybe do something here to the TTY struct as well? */
}
/*
* Initialize if neither terminal or printer is open.
*/
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) & UN_ISOPEN)) {
ch->ch_mforce = 0;
ch->ch_mval = 0;
/*
* Flush input queue.
*/
head = readw(&(bs->rx_head));
writew(head, &(bs->rx_tail));
ch->ch_flags = 0;
ch->pscan_state = 0;
ch->pscan_savechar = 0;
ch->ch_c_cflag = tty->termios.c_cflag;
ch->ch_c_iflag = tty->termios.c_iflag;
ch->ch_c_oflag = tty->termios.c_oflag;
ch->ch_c_lflag = tty->termios.c_lflag;
ch->ch_startc = tty->termios.c_cc[VSTART];
ch->ch_stopc = tty->termios.c_cc[VSTOP];
/* TODO: flush our TTY struct here? */
}
dgap_carrier(ch);
/*
* Run param in case we changed anything
*/
dgap_param(ch, brd, un->un_type);
/*
* follow protocol for opening port
*/
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&brd->bd_lock, lock_flags);
rc = dgap_block_til_ready(tty, file, ch);
if (!un->un_tty)
return -ENODEV;
/* No going back now, increment our unit and channel counters */
spin_lock_irqsave(&ch->ch_lock, lock_flags);
ch->ch_open_count++;
un->un_open_count++;
un->un_flags |= (UN_ISOPEN);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return rc;
}
/*
* dgap_block_til_ready()
*
* Wait for DCD, if needed.
*/
static int dgap_block_til_ready(struct tty_struct *tty, struct file *file,
struct channel_t *ch)
{
int retval = 0;
struct un_t *un;
ulong lock_flags;
uint old_flags;
int sleep_on_un_flags;
if (!tty || tty->magic != TTY_MAGIC || !file || !ch ||
ch->magic != DGAP_CHANNEL_MAGIC)
return -EIO;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EIO;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
ch->ch_wopen++;
/* Loop forever */
while (1) {
sleep_on_un_flags = 0;
/*
* If board has failed somehow during our sleep,
* bail with error.
*/
if (ch->ch_bd->state == BOARD_FAILED) {
retval = -EIO;
break;
}
/* If tty was hung up, break out of loop and set error. */
if (tty_hung_up_p(file)) {
retval = -EAGAIN;
break;
}
/*
* If either unit is in the middle of the fragile part of close,
* we just cannot touch the channel safely.
* Go back to sleep, knowing that when the channel can be
* touched safely, the close routine will signal the
* ch_wait_flags to wake us back up.
*/
if (!((ch->ch_tun.un_flags | ch->ch_pun.un_flags) &
UN_CLOSING)) {
/*
* Our conditions to leave cleanly and happily:
* 1) NONBLOCKING on the tty is set.
* 2) CLOCAL is set.
* 3) DCD (fake or real) is active.
*/
if (file->f_flags & O_NONBLOCK)
break;
if (tty->flags & (1 << TTY_IO_ERROR))
break;
if (ch->ch_flags & CH_CD)
break;
if (ch->ch_flags & CH_FCAR)
break;
} else {
sleep_on_un_flags = 1;
}
/*
* If there is a signal pending, the user probably
* interrupted (ctrl-c) us.
* Leave loop with error set.
*/
if (signal_pending(current)) {
retval = -ERESTARTSYS;
break;
}
/*
* Store the flags before we let go of channel lock
*/
if (sleep_on_un_flags)
old_flags = ch->ch_tun.un_flags | ch->ch_pun.un_flags;
else
old_flags = ch->ch_flags;
/*
* Let go of channel lock before calling schedule.
* Our poller will get any FEP events and wake us up when DCD
* eventually goes active.
*/
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
/*
* Wait for something in the flags to change
* from the current value.
*/
if (sleep_on_un_flags) {
retval = wait_event_interruptible(un->un_flags_wait,
(old_flags != (ch->ch_tun.un_flags |
ch->ch_pun.un_flags)));
} else {
retval = wait_event_interruptible(ch->ch_flags_wait,
(old_flags != ch->ch_flags));
}
/*
* We got woken up for some reason.
* Before looping around, grab our channel lock.
*/
spin_lock_irqsave(&ch->ch_lock, lock_flags);
}
ch->ch_wopen--;
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return retval;
}
/*
* dgap_tty_hangup()
*
* Hangup the port. Like a close, but don't wait for output to drain.
*/
static void dgap_tty_hangup(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
/* flush the transmit queues */
dgap_tty_flush_buffer(tty);
}
/*
* dgap_tty_close()
*
*/
static void dgap_tty_close(struct tty_struct *tty, struct file *file)
{
struct ktermios *ts;
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
ts = &tty->termios;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
/*
* Determine if this is the last close or not - and if we agree about
* which type of close it is with the Line Discipline
*/
if ((tty->count == 1) && (un->un_open_count != 1)) {
/*
* Uh, oh. tty->count is 1, which means that the tty
* structure will be freed. un_open_count should always
* be one in these conditions. If it's greater than
* one, we've got real problems, since it means the
* serial port won't be shutdown.
*/
un->un_open_count = 1;
}
if (--un->un_open_count < 0)
un->un_open_count = 0;
ch->ch_open_count--;
if (ch->ch_open_count && un->un_open_count) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return;
}
/* OK, its the last close on the unit */
un->un_flags |= UN_CLOSING;
tty->closing = 1;
/*
* Only officially close channel if count is 0 and
* DIGI_PRINTER bit is not set.
*/
if ((ch->ch_open_count == 0) &&
!(ch->ch_digi.digi_flags & DIGI_PRINTER)) {
ch->ch_flags &= ~(CH_RXBLOCK);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
/* wait for output to drain */
/* This will also return if we take an interrupt */
dgap_wait_for_drain(tty);
dgap_tty_flush_buffer(tty);
tty_ldisc_flush(tty);
spin_lock_irqsave(&ch->ch_lock, lock_flags);
tty->closing = 0;
/*
* If we have HUPCL set, lower DTR and RTS
*/
if (ch->ch_c_cflag & HUPCL) {
ch->ch_mostat &= ~(D_RTS(ch)|D_DTR(ch));
dgap_cmdb(ch, SMODEM, 0, D_DTR(ch)|D_RTS(ch), 0);
/*
* Go to sleep to ensure RTS/DTR
* have been dropped for modems to see it.
*/
if (ch->ch_close_delay) {
spin_unlock_irqrestore(&ch->ch_lock,
lock_flags);
dgap_ms_sleep(ch->ch_close_delay);
spin_lock_irqsave(&ch->ch_lock, lock_flags);
}
}
ch->pscan_state = 0;
ch->pscan_savechar = 0;
ch->ch_baud_info = 0;
}
/*
* turn off print device when closing print device.
*/
if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
dgap_wmove(ch, ch->ch_digi.digi_offstr,
(int) ch->ch_digi.digi_offlen);
ch->ch_flags &= ~CH_PRON;
}
un->un_tty = NULL;
un->un_flags &= ~(UN_ISOPEN | UN_CLOSING);
tty->driver_data = NULL;
wake_up_interruptible(&ch->ch_flags_wait);
wake_up_interruptible(&un->un_flags_wait);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
}
/*
* dgap_tty_chars_in_buffer()
*
* Return number of characters that have not been transmitted yet.
*
* This routine is used by the line discipline to determine if there
* is data waiting to be transmitted/drained/flushed or not.
*/
static int dgap_tty_chars_in_buffer(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
struct bs_t __iomem *bs;
u8 tbusy;
uint chars;
u16 thead, ttail, tmask, chead, ctail;
ulong lock_flags = 0;
ulong lock_flags2 = 0;
if (!tty)
return 0;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
bs = ch->ch_bs;
if (!bs)
return 0;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
tmask = (ch->ch_tsize - 1);
/* Get Transmit queue pointers */
thead = readw(&(bs->tx_head)) & tmask;
ttail = readw(&(bs->tx_tail)) & tmask;
/* Get tbusy flag */
tbusy = readb(&(bs->tbusy));
/* Get Command queue pointers */
chead = readw(&(ch->ch_cm->cm_head));
ctail = readw(&(ch->ch_cm->cm_tail));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
/*
* The only way we know for sure if there is no pending
* data left to be transferred, is if:
* 1) Transmit head and tail are equal (empty).
* 2) Command queue head and tail are equal (empty).
* 3) The "TBUSY" flag is 0. (Transmitter not busy).
*/
if ((ttail == thead) && (tbusy == 0) && (chead == ctail)) {
chars = 0;
} else {
if (thead >= ttail)
chars = thead - ttail;
else
chars = thead - ttail + ch->ch_tsize;
/*
* Fudge factor here.
* If chars is zero, we know that the command queue had
* something in it or tbusy was set. Because we cannot
* be sure if there is still some data to be transmitted,
* lets lie, and tell ld we have 1 byte left.
*/
if (chars == 0) {
/*
* If TBUSY is still set, and our tx buffers are empty,
* force the firmware to send me another wakeup after
* TBUSY has been cleared.
*/
if (tbusy != 0) {
spin_lock_irqsave(&ch->ch_lock, lock_flags);
un->un_flags |= UN_EMPTY;
writeb(1, &(bs->iempty));
spin_unlock_irqrestore(&ch->ch_lock,
lock_flags);
}
chars = 1;
}
}
return chars;
}
static int dgap_wait_for_drain(struct tty_struct *tty)
{
struct channel_t *ch;
struct un_t *un;
struct bs_t __iomem *bs;
int ret = 0;
uint count = 1;
ulong lock_flags = 0;
if (!tty || tty->magic != TTY_MAGIC)
return -EIO;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EIO;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -EIO;
bs = ch->ch_bs;
if (!bs)
return -EIO;
/* Loop until data is drained */
while (count != 0) {
count = dgap_tty_chars_in_buffer(tty);
if (count == 0)
break;
/* Set flag waiting for drain */
spin_lock_irqsave(&ch->ch_lock, lock_flags);
un->un_flags |= UN_EMPTY;
writeb(1, &(bs->iempty));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
/* Go to sleep till we get woken up */
ret = wait_event_interruptible(un->un_flags_wait,
((un->un_flags & UN_EMPTY) == 0));
/* If ret is non-zero, user ctrl-c'ed us */
if (ret)
break;
}
spin_lock_irqsave(&ch->ch_lock, lock_flags);
un->un_flags &= ~(UN_EMPTY);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return ret;
}
/*
* dgap_maxcps_room
*
* Reduces bytes_available to the max number of characters
* that can be sent currently given the maxcps value, and
* returns the new bytes_available. This only affects printer
* output.
*/
static int dgap_maxcps_room(struct channel_t *ch, struct un_t *un,
int bytes_available)
{
/*
* If its not the Transparent print device, return
* the full data amount.
*/
if (un->un_type != DGAP_PRINT)
return bytes_available;
if (ch->ch_digi.digi_maxcps > 0 && ch->ch_digi.digi_bufsize > 0) {
int cps_limit = 0;
unsigned long current_time = jiffies;
unsigned long buffer_time = current_time +
(HZ * ch->ch_digi.digi_bufsize) /
ch->ch_digi.digi_maxcps;
if (ch->ch_cpstime < current_time) {
/* buffer is empty */
ch->ch_cpstime = current_time; /* reset ch_cpstime */
cps_limit = ch->ch_digi.digi_bufsize;
} else if (ch->ch_cpstime < buffer_time) {
/* still room in the buffer */
cps_limit = ((buffer_time - ch->ch_cpstime) *
ch->ch_digi.digi_maxcps) / HZ;
} else {
/* no room in the buffer */
cps_limit = 0;
}
bytes_available = min(cps_limit, bytes_available);
}
return bytes_available;
}
static inline void dgap_set_firmware_event(struct un_t *un, unsigned int event)
{
struct channel_t *ch;
struct bs_t __iomem *bs;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bs = ch->ch_bs;
if (!bs)
return;
if ((event & UN_LOW) != 0) {
if ((un->un_flags & UN_LOW) == 0) {
un->un_flags |= UN_LOW;
writeb(1, &(bs->ilow));
}
}
if ((event & UN_LOW) != 0) {
if ((un->un_flags & UN_EMPTY) == 0) {
un->un_flags |= UN_EMPTY;
writeb(1, &(bs->iempty));
}
}
}
/*
* dgap_tty_write_room()
*
* Return space available in Tx buffer
*/
static int dgap_tty_write_room(struct tty_struct *tty)
{
struct channel_t *ch;
struct un_t *un;
struct bs_t __iomem *bs;
u16 head, tail, tmask;
int ret;
ulong lock_flags = 0;
if (!tty)
return 0;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bs = ch->ch_bs;
if (!bs)
return 0;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
tmask = ch->ch_tsize - 1;
head = readw(&(bs->tx_head)) & tmask;
tail = readw(&(bs->tx_tail)) & tmask;
ret = tail - head - 1;
if (ret < 0)
ret += ch->ch_tsize;
/* Limit printer to maxcps */
ret = dgap_maxcps_room(ch, un, ret);
/*
* If we are printer device, leave space for
* possibly both the on and off strings.
*/
if (un->un_type == DGAP_PRINT) {
if (!(ch->ch_flags & CH_PRON))
ret -= ch->ch_digi.digi_onlen;
ret -= ch->ch_digi.digi_offlen;
} else {
if (ch->ch_flags & CH_PRON)
ret -= ch->ch_digi.digi_offlen;
}
if (ret < 0)
ret = 0;
/*
* Schedule FEP to wake us up if needed.
*
* TODO: This might be overkill...
* Do we really need to schedule callbacks from the FEP
* in every case? Can we get smarter based on ret?
*/
dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return ret;
}
/*
* dgap_tty_put_char()
*
* Put a character into ch->ch_buf
*
* - used by the line discipline for OPOST processing
*/
static int dgap_tty_put_char(struct tty_struct *tty, unsigned char c)
{
/*
* Simply call tty_write.
*/
dgap_tty_write(tty, &c, 1);
return 1;
}
/*
* dgap_tty_write()
*
* Take data from the user or kernel and send it out to the FEP.
* In here exists all the Transparent Print magic as well.
*/
static int dgap_tty_write(struct tty_struct *tty, const unsigned char *buf,
int count)
{
struct channel_t *ch;
struct un_t *un;
struct bs_t __iomem *bs;
char __iomem *vaddr;
u16 head, tail, tmask, remain;
int bufcount, n;
int orig_count;
ulong lock_flags;
if (!tty)
return 0;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bs = ch->ch_bs;
if (!bs)
return 0;
if (!count)
return 0;
/*
* Store original amount of characters passed in.
* This helps to figure out if we should ask the FEP
* to send us an event when it has more space available.
*/
orig_count = count;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
/* Get our space available for the channel from the board */
tmask = ch->ch_tsize - 1;
head = readw(&(bs->tx_head)) & tmask;
tail = readw(&(bs->tx_tail)) & tmask;
bufcount = tail - head - 1;
if (bufcount < 0)
bufcount += ch->ch_tsize;
/*
* Limit printer output to maxcps overall, with bursts allowed
* up to bufsize characters.
*/
bufcount = dgap_maxcps_room(ch, un, bufcount);
/*
* Take minimum of what the user wants to send, and the
* space available in the FEP buffer.
*/
count = min(count, bufcount);
/*
* Bail if no space left.
*/
if (count <= 0) {
dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return 0;
}
/*
* Output the printer ON string, if we are in terminal mode, but
* need to be in printer mode.
*/
if ((un->un_type == DGAP_PRINT) && !(ch->ch_flags & CH_PRON)) {
dgap_wmove(ch, ch->ch_digi.digi_onstr,
(int) ch->ch_digi.digi_onlen);
head = readw(&(bs->tx_head)) & tmask;
ch->ch_flags |= CH_PRON;
}
/*
* On the other hand, output the printer OFF string, if we are
* currently in printer mode, but need to output to the terminal.
*/
if ((un->un_type != DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
dgap_wmove(ch, ch->ch_digi.digi_offstr,
(int) ch->ch_digi.digi_offlen);
head = readw(&(bs->tx_head)) & tmask;
ch->ch_flags &= ~CH_PRON;
}
n = count;
/*
* If the write wraps over the top of the circular buffer,
* move the portion up to the wrap point, and reset the
* pointers to the bottom.
*/
remain = ch->ch_tstart + ch->ch_tsize - head;
if (n >= remain) {
n -= remain;
vaddr = ch->ch_taddr + head;
memcpy_toio(vaddr, (u8 *) buf, remain);
head = ch->ch_tstart;
buf += remain;
}
if (n > 0) {
/*
* Move rest of data.
*/
vaddr = ch->ch_taddr + head;
remain = n;
memcpy_toio(vaddr, (u8 *) buf, remain);
head += remain;
}
if (count) {
ch->ch_txcount += count;
head &= tmask;
writew(head, &(bs->tx_head));
}
dgap_set_firmware_event(un, UN_LOW | UN_EMPTY);
/*
* If this is the print device, and the
* printer is still on, we need to turn it
* off before going idle. If the buffer is
* non-empty, wait until it goes empty.
* Otherwise turn it off right now.
*/
if ((un->un_type == DGAP_PRINT) && (ch->ch_flags & CH_PRON)) {
tail = readw(&(bs->tx_tail)) & tmask;
if (tail != head) {
un->un_flags |= UN_EMPTY;
writeb(1, &(bs->iempty));
} else {
dgap_wmove(ch, ch->ch_digi.digi_offstr,
(int) ch->ch_digi.digi_offlen);
head = readw(&(bs->tx_head)) & tmask;
ch->ch_flags &= ~CH_PRON;
}
}
/* Update printer buffer empty time. */
if ((un->un_type == DGAP_PRINT) && (ch->ch_digi.digi_maxcps > 0)
&& (ch->ch_digi.digi_bufsize > 0)) {
ch->ch_cpstime += (HZ * count) / ch->ch_digi.digi_maxcps;
}
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
return count;
}
/*
* Return modem signals to ld.
*/
static int dgap_tty_tiocmget(struct tty_struct *tty)
{
struct channel_t *ch;
struct un_t *un;
int result;
u8 mstat;
ulong lock_flags;
if (!tty || tty->magic != TTY_MAGIC)
return -EIO;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EIO;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -EIO;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
mstat = readb(&(ch->ch_bs->m_stat));
/* Append any outbound signals that might be pending... */
mstat |= ch->ch_mostat;
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
result = 0;
if (mstat & D_DTR(ch))
result |= TIOCM_DTR;
if (mstat & D_RTS(ch))
result |= TIOCM_RTS;
if (mstat & D_CTS(ch))
result |= TIOCM_CTS;
if (mstat & D_DSR(ch))
result |= TIOCM_DSR;
if (mstat & D_RI(ch))
result |= TIOCM_RI;
if (mstat & D_CD(ch))
result |= TIOCM_CD;
return result;
}
/*
* dgap_tty_tiocmset()
*
* Set modem signals, called by ld.
*/
static int dgap_tty_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return -EIO;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EIO;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -EIO;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -EIO;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
if (set & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval |= D_RTS(ch);
}
if (set & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval |= D_DTR(ch);
}
if (clear & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval &= ~(D_RTS(ch));
}
if (clear & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval &= ~(D_DTR(ch));
}
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
/*
* dgap_tty_send_break()
*
* Send a Break, called by ld.
*/
static int dgap_tty_send_break(struct tty_struct *tty, int msec)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return -EIO;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EIO;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -EIO;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -EIO;
switch (msec) {
case -1:
msec = 0xFFFF;
break;
case 0:
msec = 1;
break;
default:
msec /= 10;
break;
}
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
#if 0
dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
#endif
dgap_cmdw(ch, SBREAK, (u16) msec, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
/*
* dgap_tty_wait_until_sent()
*
* wait until data has been transmitted, called by ld.
*/
static void dgap_tty_wait_until_sent(struct tty_struct *tty, int timeout)
{
dgap_wait_for_drain(tty);
}
/*
* dgap_send_xchar()
*
* send a high priority character, called by ld.
*/
static void dgap_tty_send_xchar(struct tty_struct *tty, char c)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
/*
* This is technically what we should do.
* However, the NIST tests specifically want
* to see each XON or XOFF character that it
* sends, so lets just send each character
* by hand...
*/
#if 0
if (c == STOP_CHAR(tty))
dgap_cmdw(ch, RPAUSE, 0, 0);
else if (c == START_CHAR(tty))
dgap_cmdw(ch, RRESUME, 0, 0);
else
dgap_wmove(ch, &c, 1);
#else
dgap_wmove(ch, &c, 1);
#endif
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*
* Return modem signals to ld.
*/
static int dgap_get_modem_info(struct channel_t *ch, unsigned int __user *value)
{
int result;
u8 mstat;
ulong lock_flags;
int rc;
spin_lock_irqsave(&ch->ch_lock, lock_flags);
mstat = readb(&(ch->ch_bs->m_stat));
/* Append any outbound signals that might be pending... */
mstat |= ch->ch_mostat;
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
result = 0;
if (mstat & D_DTR(ch))
result |= TIOCM_DTR;
if (mstat & D_RTS(ch))
result |= TIOCM_RTS;
if (mstat & D_CTS(ch))
result |= TIOCM_CTS;
if (mstat & D_DSR(ch))
result |= TIOCM_DSR;
if (mstat & D_RI(ch))
result |= TIOCM_RI;
if (mstat & D_CD(ch))
result |= TIOCM_CD;
rc = put_user(result, value);
return rc;
}
/*
* dgap_set_modem_info()
*
* Set modem signals, called by ld.
*/
static int dgap_set_modem_info(struct channel_t *ch, struct board_t *bd, struct un_t *un,
unsigned int command, unsigned int __user *value)
{
int ret;
unsigned int arg;
ulong lock_flags;
ulong lock_flags2;
ret = get_user(arg, value);
if (ret)
return ret;
switch (command) {
case TIOCMBIS:
if (arg & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval |= D_RTS(ch);
}
if (arg & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval |= D_DTR(ch);
}
break;
case TIOCMBIC:
if (arg & TIOCM_RTS) {
ch->ch_mforce |= D_RTS(ch);
ch->ch_mval &= ~(D_RTS(ch));
}
if (arg & TIOCM_DTR) {
ch->ch_mforce |= D_DTR(ch);
ch->ch_mval &= ~(D_DTR(ch));
}
break;
case TIOCMSET:
ch->ch_mforce = D_DTR(ch)|D_RTS(ch);
if (arg & TIOCM_RTS)
ch->ch_mval |= D_RTS(ch);
else
ch->ch_mval &= ~(D_RTS(ch));
if (arg & TIOCM_DTR)
ch->ch_mval |= (D_DTR(ch));
else
ch->ch_mval &= ~(D_DTR(ch));
break;
default:
return -EINVAL;
}
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
/*
* dgap_tty_digigeta()
*
* Ioctl to get the information for ditty.
*
*
*
*/
static int dgap_tty_digigeta(struct channel_t *ch, struct digi_t __user *retinfo)
{
struct digi_t tmp;
ulong lock_flags;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
spin_lock_irqsave(&ch->ch_lock, lock_flags);
memcpy(&tmp, &ch->ch_digi, sizeof(tmp));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
/*
* dgap_tty_digiseta()
*
* Ioctl to set the information for ditty.
*
*
*
*/
static int dgap_tty_digiseta(struct channel_t *ch, struct board_t *bd,
struct un_t *un, struct digi_t __user *new_info)
{
struct digi_t new_digi;
ulong lock_flags = 0;
unsigned long lock_flags2;
if (copy_from_user(&new_digi, new_info, sizeof(struct digi_t)))
return -EFAULT;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
memcpy(&ch->ch_digi, &new_digi, sizeof(struct digi_t));
if (ch->ch_digi.digi_maxcps < 1)
ch->ch_digi.digi_maxcps = 1;
if (ch->ch_digi.digi_maxcps > 10000)
ch->ch_digi.digi_maxcps = 10000;
if (ch->ch_digi.digi_bufsize < 10)
ch->ch_digi.digi_bufsize = 10;
if (ch->ch_digi.digi_maxchar < 1)
ch->ch_digi.digi_maxchar = 1;
if (ch->ch_digi.digi_maxchar > ch->ch_digi.digi_bufsize)
ch->ch_digi.digi_maxchar = ch->ch_digi.digi_bufsize;
if (ch->ch_digi.digi_onlen > DIGI_PLEN)
ch->ch_digi.digi_onlen = DIGI_PLEN;
if (ch->ch_digi.digi_offlen > DIGI_PLEN)
ch->ch_digi.digi_offlen = DIGI_PLEN;
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
/*
* dgap_tty_digigetedelay()
*
* Ioctl to get the current edelay setting.
*
*
*
*/
static int dgap_tty_digigetedelay(struct tty_struct *tty, int __user *retinfo)
{
struct channel_t *ch;
struct un_t *un;
int tmp;
ulong lock_flags;
if (!retinfo)
return -EFAULT;
if (!tty || tty->magic != TTY_MAGIC)
return -EFAULT;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -EFAULT;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
spin_lock_irqsave(&ch->ch_lock, lock_flags);
tmp = readw(&(ch->ch_bs->edelay));
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
/*
* dgap_tty_digisetedelay()
*
* Ioctl to set the EDELAY setting
*
*/
static int dgap_tty_digisetedelay(struct channel_t *ch, struct board_t *bd,
struct un_t *un, int __user *new_info)
{
int new_digi;
ulong lock_flags;
ulong lock_flags2;
if (copy_from_user(&new_digi, new_info, sizeof(int)))
return -EFAULT;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
writew((u16) new_digi, &(ch->ch_bs->edelay));
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
/*
* dgap_tty_digigetcustombaud()
*
* Ioctl to get the current custom baud rate setting.
*/
static int dgap_tty_digigetcustombaud(struct channel_t *ch, struct un_t *un,
int __user *retinfo)
{
int tmp;
ulong lock_flags;
if (!retinfo)
return -EFAULT;
memset(&tmp, 0, sizeof(tmp));
spin_lock_irqsave(&ch->ch_lock, lock_flags);
tmp = dgap_get_custom_baud(ch);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
/*
* dgap_tty_digisetcustombaud()
*
* Ioctl to set the custom baud rate setting
*/
static int dgap_tty_digisetcustombaud(struct channel_t *ch, struct board_t *bd,
struct un_t *un, int __user *new_info)
{
uint new_rate;
ulong lock_flags;
ulong lock_flags2;
if (copy_from_user(&new_rate, new_info, sizeof(unsigned int)))
return -EFAULT;
if (bd->bd_flags & BD_FEP5PLUS) {
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
ch->ch_custom_speed = new_rate;
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
return 0;
}
/*
* dgap_set_termios()
*/
static void dgap_tty_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
unsigned long lock_flags;
unsigned long lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
ch->ch_c_cflag = tty->termios.c_cflag;
ch->ch_c_iflag = tty->termios.c_iflag;
ch->ch_c_oflag = tty->termios.c_oflag;
ch->ch_c_lflag = tty->termios.c_lflag;
ch->ch_startc = tty->termios.c_cc[VSTART];
ch->ch_stopc = tty->termios.c_cc[VSTOP];
dgap_carrier(ch);
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
static void dgap_tty_throttle(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
ch->ch_flags |= (CH_RXBLOCK);
#if 1
dgap_cmdw(ch, RPAUSE, 0, 0);
#endif
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
static void dgap_tty_unthrottle(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
ch->ch_flags &= ~(CH_RXBLOCK);
#if 1
dgap_cmdw(ch, RRESUME, 0, 0);
#endif
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
static void dgap_tty_start(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
dgap_cmdw(ch, RESUMETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
static void dgap_tty_stop(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
dgap_cmdw(ch, PAUSETX, 0, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*
* dgap_tty_flush_chars()
*
* Flush the cook buffer
*
* Note to self, and any other poor souls who venture here:
*
* flush in this case DOES NOT mean dispose of the data.
* instead, it means "stop buffering and send it if you
* haven't already." Just guess how I figured that out... SRW 2-Jun-98
*
* It is also always called in interrupt context - JAR 8-Sept-99
*/
static void dgap_tty_flush_chars(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
/* TODO: Do something here */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*
* dgap_tty_flush_buffer()
*
* Flush Tx buffer (make in == out)
*/
static void dgap_tty_flush_buffer(struct tty_struct *tty)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
ulong lock_flags;
ulong lock_flags2;
u16 head;
if (!tty || tty->magic != TTY_MAGIC)
return;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
ch->ch_flags &= ~CH_STOP;
head = readw(&(ch->ch_bs->tx_head));
dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
dgap_cmdw(ch, RESUMETX, 0, 0);
if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
}
if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
wake_up_interruptible(&ch->ch_pun.un_flags_wait);
}
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
if (waitqueue_active(&tty->write_wait))
wake_up_interruptible(&tty->write_wait);
tty_wakeup(tty);
}
/*****************************************************************************
*
* The IOCTL function and all of its helpers
*
*****************************************************************************/
/*
* dgap_tty_ioctl()
*
* The usual assortment of ioctl's
*/
static int dgap_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
int rc;
u16 head;
ulong lock_flags = 0;
ulong lock_flags2 = 0;
void __user *uarg = (void __user *) arg;
if (!tty || tty->magic != TTY_MAGIC)
return -ENODEV;
un = tty->driver_data;
if (!un || un->magic != DGAP_UNIT_MAGIC)
return -ENODEV;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return -ENODEV;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -ENODEV;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
if (un->un_open_count <= 0) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
}
switch (cmd) {
/* Here are all the standard ioctl's that we MUST implement */
case TCSBRK:
/*
* TCSBRK is SVID version: non-zero arg --> no break
* this behaviour is exploited by tcdrain().
*
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
* between 0.25 and 0.5 seconds so we'll ask for something
* in the middle: 0.375 seconds.
*/
rc = tty_check_change(tty);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
if (rc)
return rc;
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
case TCSBRKP:
/* support for POSIX tcsendbreak()
* According to POSIX.1 spec (7.2.2.1.2) breaks should be
* between 0.25 and 0.5 seconds so we'll ask for something
* in the middle: 0.375 seconds.
*/
rc = tty_check_change(tty);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
if (rc)
return rc;
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
case TIOCSBRK:
/*
* FEP5 doesn't support turning on a break unconditionally.
* The FEP5 device will stop sending a break automatically
* after the specified time value that was sent when turning on
* the break.
*/
rc = tty_check_change(tty);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
if (rc)
return rc;
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
dgap_cmdw(ch, SBREAK, (u16) SBREAK_TIME, 0);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
case TIOCCBRK:
/*
* FEP5 doesn't support turning off a break unconditionally.
* The FEP5 device will stop sending a break automatically
* after the specified time value that was sent when turning on
* the break.
*/
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
case TIOCGSOFTCAR:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
rc = put_user(C_CLOCAL(tty) ? 1 : 0,
(unsigned long __user *) arg);
return rc;
case TIOCSSOFTCAR:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
rc = get_user(arg, (unsigned long __user *) arg);
if (rc)
return rc;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
case TIOCMGET:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_get_modem_info(ch, uarg);
case TIOCMBIS:
case TIOCMBIC:
case TIOCMSET:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_set_modem_info(ch, bd, un, cmd, uarg);
/*
* Here are any additional ioctl's that we want to implement
*/
case TCFLSH:
/*
* The linux tty driver doesn't have a flush
* input routine for the driver, assuming all backed
* up data is in the line disc. buffers. However,
* we all know that's not the case. Here, we
* act on the ioctl, but then lie and say we didn't
* so the line discipline will process the flush
* also.
*/
rc = tty_check_change(tty);
if (rc) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return rc;
}
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
if (!(un->un_type == DGAP_PRINT)) {
head = readw(&(ch->ch_bs->rx_head));
writew(head, &(ch->ch_bs->rx_tail));
writeb(0, &(ch->ch_bs->orun));
}
}
if ((arg != TCOFLUSH) && (arg != TCIOFLUSH)) {
/* pretend we didn't recognize this IOCTL */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -ENOIOCTLCMD;
}
ch->ch_flags &= ~CH_STOP;
head = readw(&(ch->ch_bs->tx_head));
dgap_cmdw(ch, FLUSHTX, (u16) head, 0);
dgap_cmdw(ch, RESUMETX, 0, 0);
if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_tun.un_flags &= ~(UN_LOW|UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
}
if (ch->ch_pun.un_flags & (UN_LOW|UN_EMPTY)) {
ch->ch_pun.un_flags &= ~(UN_LOW|UN_EMPTY);
wake_up_interruptible(&ch->ch_pun.un_flags_wait);
}
if (waitqueue_active(&tty->write_wait))
wake_up_interruptible(&tty->write_wait);
/* Can't hold any locks when calling tty_wakeup! */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
tty_wakeup(tty);
/* pretend we didn't recognize this IOCTL */
return -ENOIOCTLCMD;
case TCSETSF:
case TCSETSW:
/*
* The linux tty driver doesn't have a flush
* input routine for the driver, assuming all backed
* up data is in the line disc. buffers. However,
* we all know that's not the case. Here, we
* act on the ioctl, but then lie and say we didn't
* so the line discipline will process the flush
* also.
*/
if (cmd == TCSETSF) {
/* flush rx */
ch->ch_flags &= ~CH_STOP;
head = readw(&(ch->ch_bs->rx_head));
writew(head, &(ch->ch_bs->rx_tail));
}
/* now wait for all the output to drain */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
/* pretend we didn't recognize this */
return -ENOIOCTLCMD;
case TCSETAW:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
/* pretend we didn't recognize this */
return -ENOIOCTLCMD;
case TCXONC:
/*
* The Linux Line Discipline (LD) would do this for us if we
* let it, but we have the special firmware options to do this
* the "right way" regardless of hardware or software flow
* control so we'll do it outselves instead of letting the LD
* do it.
*/
rc = tty_check_change(tty);
if (rc) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return rc;
}
switch (arg) {
case TCOON:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
dgap_tty_start(tty);
return 0;
case TCOOFF:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
dgap_tty_stop(tty);
return 0;
case TCION:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
/* Make the ld do it */
return -ENOIOCTLCMD;
case TCIOFF:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
/* Make the ld do it */
return -ENOIOCTLCMD;
default:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EINVAL;
}
case DIGI_GETA:
/* get information for ditty */
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digigeta(ch, uarg);
case DIGI_SETAW:
case DIGI_SETAF:
/* set information for ditty */
if (cmd == (DIGI_SETAW)) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
rc = dgap_wait_for_drain(tty);
if (rc)
return -EINTR;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
} else
tty_ldisc_flush(tty);
/* fall thru */
case DIGI_SETA:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digiseta(ch, bd, un, uarg);
case DIGI_GEDELAY:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digigetedelay(tty, uarg);
case DIGI_SEDELAY:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digisetedelay(ch, bd, un, uarg);
case DIGI_GETCUSTOMBAUD:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digigetcustombaud(ch, un, uarg);
case DIGI_SETCUSTOMBAUD:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return dgap_tty_digisetcustombaud(ch, bd, un, uarg);
case DIGI_RESET_PORT:
dgap_firmware_reset_port(ch);
dgap_param(ch, bd, un->un_type);
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
default:
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -ENOIOCTLCMD;
}
}
static int dgap_alloc_flipbuf(struct board_t *brd)
{
/*
* allocate flip buffer for board.
*/
brd->flipbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
if (!brd->flipbuf)
return -ENOMEM;
brd->flipflagbuf = kmalloc(MYFLIPLEN, GFP_KERNEL);
if (!brd->flipflagbuf) {
kfree(brd->flipbuf);
return -ENOMEM;
}
return 0;
}
static void dgap_free_flipbuf(struct board_t *brd)
{
kfree(brd->flipbuf);
kfree(brd->flipflagbuf);
}
/*
* Create pr and tty device entries
*/
static int dgap_tty_register_ports(struct board_t *brd)
{
struct channel_t *ch;
int i;
int ret;
brd->serial_ports = kcalloc(brd->nasync, sizeof(*brd->serial_ports),
GFP_KERNEL);
if (!brd->serial_ports)
return -ENOMEM;
brd->printer_ports = kcalloc(brd->nasync, sizeof(*brd->printer_ports),
GFP_KERNEL);
if (!brd->printer_ports) {
ret = -ENOMEM;
goto free_serial_ports;
}
for (i = 0; i < brd->nasync; i++) {
tty_port_init(&brd->serial_ports[i]);
tty_port_init(&brd->printer_ports[i]);
}
ch = brd->channels[0];
for (i = 0; i < brd->nasync; i++, ch = brd->channels[i]) {
struct device *classp;
classp = tty_port_register_device(&brd->serial_ports[i],
brd->serial_driver,
i, NULL);
if (IS_ERR(classp)) {
ret = PTR_ERR(classp);
goto unregister_ttys;
}
dgap_create_tty_sysfs(&ch->ch_tun, classp);
ch->ch_tun.un_sysfs = classp;
classp = tty_port_register_device(&brd->printer_ports[i],
brd->print_driver,
i, NULL);
if (IS_ERR(classp)) {
ret = PTR_ERR(classp);
goto unregister_ttys;
}
dgap_create_tty_sysfs(&ch->ch_pun, classp);
ch->ch_pun.un_sysfs = classp;
}
dgap_create_ports_sysfiles(brd);
return 0;
unregister_ttys:
while (i >= 0) {
ch = brd->channels[i];
if (ch->ch_tun.un_sysfs) {
dgap_remove_tty_sysfs(ch->ch_tun.un_sysfs);
tty_unregister_device(brd->serial_driver, i);
}
if (ch->ch_pun.un_sysfs) {
dgap_remove_tty_sysfs(ch->ch_pun.un_sysfs);
tty_unregister_device(brd->print_driver, i);
}
i--;
}
for (i = 0; i < brd->nasync; i++) {
tty_port_destroy(&brd->serial_ports[i]);
tty_port_destroy(&brd->printer_ports[i]);
}
kfree(brd->printer_ports);
brd->printer_ports = NULL;
free_serial_ports:
kfree(brd->serial_ports);
brd->serial_ports = NULL;
return ret;
}
/*
* Copies the BIOS code from the user to the board,
* and starts the BIOS running.
*/
static void dgap_do_bios_load(struct board_t *brd, const u8 *ubios, int len)
{
u8 __iomem *addr;
uint offset;
int i;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
return;
addr = brd->re_map_membase;
/*
* clear POST area
*/
for (i = 0; i < 16; i++)
writeb(0, addr + POSTAREA + i);
/*
* Download bios
*/
offset = 0x1000;
memcpy_toio(addr + offset, ubios, len);
writel(0x0bf00401, addr);
writel(0, (addr + 4));
/* Clear the reset, and change states. */
writeb(FEPCLR, brd->re_map_port);
}
/*
* Checks to see if the BIOS completed running on the card.
*/
static int dgap_test_bios(struct board_t *brd)
{
u8 __iomem *addr;
u16 word;
u16 err1;
u16 err2;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
return -EINVAL;
addr = brd->re_map_membase;
word = readw(addr + POSTAREA);
/*
* It can take 5-6 seconds for a board to
* pass the bios self test and post results.
* Give it 10 seconds.
*/
brd->wait_for_bios = 0;
while (brd->wait_for_bios < 1000) {
/* Check to see if BIOS thinks board is good. (GD). */
if (word == *(u16 *) "GD")
return 0;
msleep_interruptible(10);
brd->wait_for_bios++;
word = readw(addr + POSTAREA);
}
/* Gave up on board after too long of time taken */
err1 = readw(addr + SEQUENCE);
err2 = readw(addr + ERROR);
pr_warn("dgap: %s failed diagnostics. Error #(%x,%x).\n",
brd->name, err1, err2);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOBIOS;
return -EIO;
}
/*
* Copies the FEP code from the user to the board,
* and starts the FEP running.
*/
static void dgap_do_fep_load(struct board_t *brd, const u8 *ufep, int len)
{
u8 __iomem *addr;
uint offset;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
return;
addr = brd->re_map_membase;
/*
* Download FEP
*/
offset = 0x1000;
memcpy_toio(addr + offset, ufep, len);
/*
* If board is a concentrator product, we need to give
* it its config string describing how the concentrators look.
*/
if ((brd->type == PCX) || (brd->type == PEPC)) {
u8 string[100];
u8 __iomem *config;
u8 *xconfig;
int i = 0;
xconfig = dgap_create_config_string(brd, string);
/* Write string to board memory */
config = addr + CONFIG;
for (; i < CONFIGSIZE; i++, config++, xconfig++) {
writeb(*xconfig, config);
if ((*xconfig & 0xff) == 0xff)
break;
}
}
writel(0xbfc01004, (addr + 0xc34));
writel(0x3, (addr + 0xc30));
}
/*
* Waits for the FEP to report thats its ready for us to use.
*/
static int dgap_test_fep(struct board_t *brd)
{
u8 __iomem *addr;
u16 word;
u16 err1;
u16 err2;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
return -EINVAL;
addr = brd->re_map_membase;
word = readw(addr + FEPSTAT);
/*
* It can take 2-3 seconds for the FEP to
* be up and running. Give it 5 secs.
*/
brd->wait_for_fep = 0;
while (brd->wait_for_fep < 500) {
/* Check to see if FEP is up and running now. */
if (word == *(u16 *) "OS") {
/*
* Check to see if the board can support FEP5+ commands.
*/
word = readw(addr + FEP5_PLUS);
if (word == *(u16 *) "5A")
brd->bd_flags |= BD_FEP5PLUS;
return 0;
}
msleep_interruptible(10);
brd->wait_for_fep++;
word = readw(addr + FEPSTAT);
}
/* Gave up on board after too long of time taken */
err1 = readw(addr + SEQUENCE);
err2 = readw(addr + ERROR);
pr_warn("dgap: FEPOS for %s not functioning. Error #(%x,%x).\n",
brd->name, err1, err2);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
return -EIO;
}
/*
* Physically forces the FEP5 card to reset itself.
*/
static void dgap_do_reset_board(struct board_t *brd)
{
u8 check;
u32 check1;
u32 check2;
int i;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) ||
!brd->re_map_membase || !brd->re_map_port)
return;
/* FEPRST does not vary among supported boards */
writeb(FEPRST, brd->re_map_port);
for (i = 0; i <= 1000; i++) {
check = readb(brd->re_map_port) & 0xe;
if (check == FEPRST)
break;
udelay(10);
}
if (i > 1000) {
pr_warn("dgap: Board not resetting... Failing board.\n");
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
return;
}
/*
* Make sure there really is memory out there.
*/
writel(0xa55a3cc3, (brd->re_map_membase + LOWMEM));
writel(0x5aa5c33c, (brd->re_map_membase + HIGHMEM));
check1 = readl(brd->re_map_membase + LOWMEM);
check2 = readl(brd->re_map_membase + HIGHMEM);
if ((check1 != 0xa55a3cc3) || (check2 != 0x5aa5c33c)) {
pr_warn("dgap: No memory at %p for board.\n",
brd->re_map_membase);
brd->state = BOARD_FAILED;
brd->dpastatus = BD_NOFEP;
return;
}
}
#ifdef DIGI_CONCENTRATORS_SUPPORTED
/*
* Sends a concentrator image into the FEP5 board.
*/
static void dgap_do_conc_load(struct board_t *brd, u8 *uaddr, int len)
{
char __iomem *vaddr;
u16 offset;
struct downld_t *to_dp;
if (!brd || (brd->magic != DGAP_BOARD_MAGIC) || !brd->re_map_membase)
return;
vaddr = brd->re_map_membase;
offset = readw((u16 *) (vaddr + DOWNREQ));
to_dp = (struct downld_t *) (vaddr + (int) offset);
memcpy_toio(to_dp, uaddr, len);
/* Tell card we have data for it */
writew(0, vaddr + (DOWNREQ));
brd->conc_dl_status = NO_PENDING_CONCENTRATOR_REQUESTS;
}
#endif
#define EXPANSION_ROM_SIZE (64 * 1024)
#define FEP5_ROM_MAGIC (0xFEFFFFFF)
static void dgap_get_vpd(struct board_t *brd)
{
u32 magic;
u32 base_offset;
u16 rom_offset;
u16 vpd_offset;
u16 image_length;
u16 i;
u8 byte1;
u8 byte2;
/*
* Poke the magic number at the PCI Rom Address location.
* If VPD is supported, the value read from that address
* will be non-zero.
*/
magic = FEP5_ROM_MAGIC;
pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
/* VPD not supported, bail */
if (!magic)
return;
/*
* To get to the OTPROM memory, we have to send the boards base
* address or'ed with 1 into the PCI Rom Address location.
*/
magic = brd->membase | 0x01;
pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
pci_read_config_dword(brd->pdev, PCI_ROM_ADDRESS, &magic);
byte1 = readb(brd->re_map_membase);
byte2 = readb(brd->re_map_membase + 1);
/*
* If the board correctly swapped to the OTPROM memory,
* the first 2 bytes (header) should be 0x55, 0xAA
*/
if (byte1 == 0x55 && byte2 == 0xAA) {
base_offset = 0;
/*
* We have to run through all the OTPROM memory looking
* for the VPD offset.
*/
while (base_offset <= EXPANSION_ROM_SIZE) {
/*
* Lots of magic numbers here.
*
* The VPD offset is located inside the ROM Data
* Structure.
*
* We also have to remember the length of each
* ROM Data Structure, so we can "hop" to the next
* entry if the VPD isn't in the current
* ROM Data Structure.
*/
rom_offset = readw(brd->re_map_membase +
base_offset + 0x18);
image_length = readw(brd->re_map_membase +
rom_offset + 0x10) * 512;
vpd_offset = readw(brd->re_map_membase +
rom_offset + 0x08);
/* Found the VPD entry */
if (vpd_offset)
break;
/* We didn't find a VPD entry, go to next ROM entry. */
base_offset += image_length;
byte1 = readb(brd->re_map_membase + base_offset);
byte2 = readb(brd->re_map_membase + base_offset + 1);
/*
* If the new ROM offset doesn't have 0x55, 0xAA
* as its header, we have run out of ROM.
*/
if (byte1 != 0x55 || byte2 != 0xAA)
break;
}
/*
* If we have a VPD offset, then mark the board
* as having a valid VPD, and copy VPDSIZE (512) bytes of
* that VPD to the buffer we have in our board structure.
*/
if (vpd_offset) {
brd->bd_flags |= BD_HAS_VPD;
for (i = 0; i < VPDSIZE; i++) {
brd->vpd[i] = readb(brd->re_map_membase +
vpd_offset + i);
}
}
}
/*
* We MUST poke the magic number at the PCI Rom Address location again.
* This makes the card report the regular board memory back to us,
* rather than the OTPROM memory.
*/
magic = FEP5_ROM_MAGIC;
pci_write_config_dword(brd->pdev, PCI_ROM_ADDRESS, magic);
}
/*
* Our board poller function.
*/
static void dgap_poll_tasklet(unsigned long data)
{
struct board_t *bd = (struct board_t *) data;
ulong lock_flags;
char __iomem *vaddr;
u16 head, tail;
if (!bd || (bd->magic != DGAP_BOARD_MAGIC))
return;
if (bd->inhibit_poller)
return;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
vaddr = bd->re_map_membase;
/*
* If board is ready, parse deeper to see if there is anything to do.
*/
if (bd->state == BOARD_READY) {
struct ev_t __iomem *eaddr;
if (!bd->re_map_membase) {
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
if (!bd->re_map_port) {
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
if (!bd->nasync)
goto out;
eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
/* Get our head and tail */
head = readw(&(eaddr->ev_head));
tail = readw(&(eaddr->ev_tail));
/*
* If there is an event pending. Go service it.
*/
if (head != tail) {
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
dgap_event(bd);
spin_lock_irqsave(&bd->bd_lock, lock_flags);
}
out:
/*
* If board is doing interrupts, ACK the interrupt.
*/
if (bd && bd->intr_running)
readb(bd->re_map_port + 2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return;
}
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
}
/*=======================================================================
*
* dgap_cmdb - Sends a 2 byte command to the FEP.
*
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* byte1 - Integer containing first byte to be sent.
* byte2 - Integer containing second byte to be sent.
* ncmds - Wait until ncmds or fewer cmds are left
* in the cmd buffer before returning.
*
*=======================================================================*/
static void dgap_cmdb(struct channel_t *ch, u8 cmd, u8 byte1,
u8 byte2, uint ncmds)
{
char __iomem *vaddr;
struct __iomem cm_t *cm_addr;
uint count;
uint n;
u16 head;
u16 tail;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
/*
* Check if board is still alive.
*/
if (ch->ch_bd->state == BOARD_FAILED)
return;
/*
* Make sure the pointers are in range before
* writing to the FEP memory.
*/
vaddr = ch->ch_bd->re_map_membase;
if (!vaddr)
return;
cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
/*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
/*
* Put the data in the circular command buffer.
*/
writeb(cmd, (vaddr + head + CMDSTART + 0));
writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
writeb(byte1, (vaddr + head + CMDSTART + 2));
writeb(byte2, (vaddr + head + CMDSTART + 3));
head = (head + 4) & (CMDMAX - CMDSTART - 4);
writew(head, &(cm_addr->cm_head));
/*
* Wait if necessary before updating the head
* pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
for (count = dgap_count ;;) {
head = readw(&(cm_addr->cm_head));
tail = readw(&(cm_addr->cm_tail));
n = (head - tail) & (CMDMAX - CMDSTART - 4);
if (n <= ncmds * sizeof(struct cm_t))
break;
if (--count == 0) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
udelay(10);
}
}
/*=======================================================================
*
* dgap_cmdw - Sends a 1 word command to the FEP.
*
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
* ncmds - Wait until ncmds or fewer cmds are left
* in the cmd buffer before returning.
*
*=======================================================================*/
static void dgap_cmdw(struct channel_t *ch, u8 cmd, u16 word, uint ncmds)
{
char __iomem *vaddr;
struct __iomem cm_t *cm_addr;
uint count;
uint n;
u16 head;
u16 tail;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
/*
* Check if board is still alive.
*/
if (ch->ch_bd->state == BOARD_FAILED)
return;
/*
* Make sure the pointers are in range before
* writing to the FEP memory.
*/
vaddr = ch->ch_bd->re_map_membase;
if (!vaddr)
return;
cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
/*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
/*
* Put the data in the circular command buffer.
*/
writeb(cmd, (vaddr + head + CMDSTART + 0));
writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
writew((u16) word, (vaddr + head + CMDSTART + 2));
head = (head + 4) & (CMDMAX - CMDSTART - 4);
writew(head, &(cm_addr->cm_head));
/*
* Wait if necessary before updating the head
* pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
for (count = dgap_count ;;) {
head = readw(&(cm_addr->cm_head));
tail = readw(&(cm_addr->cm_tail));
n = (head - tail) & (CMDMAX - CMDSTART - 4);
if (n <= ncmds * sizeof(struct cm_t))
break;
if (--count == 0) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
udelay(10);
}
}
/*=======================================================================
*
* dgap_cmdw_ext - Sends a extended word command to the FEP.
*
* ch - Pointer to channel structure.
* cmd - Command to be sent.
* word - Integer containing word to be sent.
* ncmds - Wait until ncmds or fewer cmds are left
* in the cmd buffer before returning.
*
*=======================================================================*/
static void dgap_cmdw_ext(struct channel_t *ch, u16 cmd, u16 word, uint ncmds)
{
char __iomem *vaddr;
struct __iomem cm_t *cm_addr;
uint count;
uint n;
u16 head;
u16 tail;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
/*
* Check if board is still alive.
*/
if (ch->ch_bd->state == BOARD_FAILED)
return;
/*
* Make sure the pointers are in range before
* writing to the FEP memory.
*/
vaddr = ch->ch_bd->re_map_membase;
if (!vaddr)
return;
cm_addr = (struct cm_t __iomem *) (vaddr + CMDBUF);
head = readw(&(cm_addr->cm_head));
/*
* Forget it if pointers out of range.
*/
if (head >= (CMDMAX - CMDSTART) || (head & 03)) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
/*
* Put the data in the circular command buffer.
*/
/* Write an FF to tell the FEP that we want an extended command */
writeb((u8) 0xff, (vaddr + head + CMDSTART + 0));
writeb((u8) ch->ch_portnum, (vaddr + head + CMDSTART + 1));
writew((u16) cmd, (vaddr + head + CMDSTART + 2));
/*
* If the second part of the command won't fit,
* put it at the beginning of the circular buffer.
*/
if (((head + 4) >= ((CMDMAX - CMDSTART)) || (head & 03)))
writew((u16) word, (vaddr + CMDSTART));
else
writew((u16) word, (vaddr + head + CMDSTART + 4));
head = (head + 8) & (CMDMAX - CMDSTART - 4);
writew(head, &(cm_addr->cm_head));
/*
* Wait if necessary before updating the head
* pointer to limit the number of outstanding
* commands to the FEP. If the time spent waiting
* is outlandish, declare the FEP dead.
*/
for (count = dgap_count ;;) {
head = readw(&(cm_addr->cm_head));
tail = readw(&(cm_addr->cm_tail));
n = (head - tail) & (CMDMAX - CMDSTART - 4);
if (n <= ncmds * sizeof(struct cm_t))
break;
if (--count == 0) {
ch->ch_bd->state = BOARD_FAILED;
return;
}
udelay(10);
}
}
/*=======================================================================
*
* dgap_wmove - Write data to FEP buffer.
*
* ch - Pointer to channel structure.
* buf - Poiter to characters to be moved.
* cnt - Number of characters to move.
*
*=======================================================================*/
static void dgap_wmove(struct channel_t *ch, char *buf, uint cnt)
{
int n;
char __iomem *taddr;
struct bs_t __iomem *bs;
u16 head;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
/*
* Check parameters.
*/
bs = ch->ch_bs;
head = readw(&(bs->tx_head));
/*
* If pointers are out of range, just return.
*/
if ((cnt > ch->ch_tsize) ||
(unsigned)(head - ch->ch_tstart) >= ch->ch_tsize)
return;
/*
* If the write wraps over the top of the circular buffer,
* move the portion up to the wrap point, and reset the
* pointers to the bottom.
*/
n = ch->ch_tstart + ch->ch_tsize - head;
if (cnt >= n) {
cnt -= n;
taddr = ch->ch_taddr + head;
memcpy_toio(taddr, buf, n);
head = ch->ch_tstart;
buf += n;
}
/*
* Move rest of data.
*/
taddr = ch->ch_taddr + head;
n = cnt;
memcpy_toio(taddr, buf, n);
head += cnt;
writew(head, &(bs->tx_head));
}
/*
* Retrives the current custom baud rate from FEP memory,
* and returns it back to the user.
* Returns 0 on error.
*/
static uint dgap_get_custom_baud(struct channel_t *ch)
{
u8 __iomem *vaddr;
ulong offset;
uint value;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
if (!ch->ch_bd || ch->ch_bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (!(ch->ch_bd->bd_flags & BD_FEP5PLUS))
return 0;
vaddr = ch->ch_bd->re_map_membase;
if (!vaddr)
return 0;
/*
* Go get from fep mem, what the fep
* believes the custom baud rate is.
*/
offset = (ioread16(vaddr + ECS_SEG) << 4) + (ch->ch_portnum * 0x28)
+ LINE_SPEED;
value = readw(vaddr + offset);
return value;
}
/*
* Calls the firmware to reset this channel.
*/
static void dgap_firmware_reset_port(struct channel_t *ch)
{
dgap_cmdb(ch, CHRESET, 0, 0, 0);
/*
* Now that the channel is reset, we need to make sure
* all the current settings get reapplied to the port
* in the firmware.
*
* So we will set the driver's cache of firmware
* settings all to 0, and then call param.
*/
ch->ch_fepiflag = 0;
ch->ch_fepcflag = 0;
ch->ch_fepoflag = 0;
ch->ch_fepstartc = 0;
ch->ch_fepstopc = 0;
ch->ch_fepastartc = 0;
ch->ch_fepastopc = 0;
ch->ch_mostat = 0;
ch->ch_hflow = 0;
}
/*=======================================================================
*
* dgap_param - Set Digi parameters.
*
* struct tty_struct * - TTY for port.
*
*=======================================================================*/
static int dgap_param(struct channel_t *ch, struct board_t *bd, u32 un_type)
{
u16 head;
u16 cflag;
u16 iflag;
u8 mval;
u8 hflow;
/*
* If baud rate is zero, flush queues, and set mval to drop DTR.
*/
if ((ch->ch_c_cflag & (CBAUD)) == 0) {
/* flush rx */
head = readw(&(ch->ch_bs->rx_head));
writew(head, &(ch->ch_bs->rx_tail));
/* flush tx */
head = readw(&(ch->ch_bs->tx_head));
writew(head, &(ch->ch_bs->tx_tail));
ch->ch_flags |= (CH_BAUD0);
/* Drop RTS and DTR */
ch->ch_mval &= ~(D_RTS(ch)|D_DTR(ch));
mval = D_DTR(ch) | D_RTS(ch);
ch->ch_baud_info = 0;
} else if (ch->ch_custom_speed && (bd->bd_flags & BD_FEP5PLUS)) {
/*
* Tell the fep to do the command
*/
dgap_cmdw_ext(ch, 0xff01, ch->ch_custom_speed, 0);
/*
* Now go get from fep mem, what the fep
* believes the custom baud rate is.
*/
ch->ch_custom_speed = dgap_get_custom_baud(ch);
ch->ch_baud_info = ch->ch_custom_speed;
/* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
}
mval = D_DTR(ch) | D_RTS(ch);
} else {
/*
* Set baud rate, character size, and parity.
*/
int iindex = 0;
int jindex = 0;
int baud = 0;
ulong bauds[4][16] = {
{ /* slowbaud */
0, 50, 75, 110,
134, 150, 200, 300,
600, 1200, 1800, 2400,
4800, 9600, 19200, 38400 },
{ /* slowbaud & CBAUDEX */
0, 57600, 115200, 230400,
460800, 150, 200, 921600,
600, 1200, 1800, 2400,
4800, 9600, 19200, 38400 },
{ /* fastbaud */
0, 57600, 76800, 115200,
14400, 57600, 230400, 76800,
115200, 230400, 28800, 460800,
921600, 9600, 19200, 38400 },
{ /* fastbaud & CBAUDEX */
0, 57600, 115200, 230400,
460800, 150, 200, 921600,
600, 1200, 1800, 2400,
4800, 9600, 19200, 38400 }
};
/*
* Only use the TXPrint baud rate if the
* terminal unit is NOT open
*/
if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
un_type == DGAP_PRINT)
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
if (ch->ch_c_cflag & CBAUDEX)
iindex = 1;
if (ch->ch_digi.digi_flags & DIGI_FAST)
iindex += 2;
jindex = baud;
if ((iindex >= 0) && (iindex < 4) &&
(jindex >= 0) && (jindex < 16))
baud = bauds[iindex][jindex];
else
baud = 0;
if (baud == 0)
baud = 9600;
ch->ch_baud_info = baud;
/*
* CBAUD has bit position 0x1000 set these days to
* indicate Linux baud rate remap.
* We use a different bit assignment for high speed.
* Clear this bit out while grabbing the parts of
* "cflag" we want.
*/
cflag = ch->ch_c_cflag & ((CBAUD ^ CBAUDEX) | PARODD | PARENB |
CSTOPB | CSIZE);
/*
* HUPCL bit is used by FEP to indicate fast baud
* table is to be used.
*/
if ((ch->ch_digi.digi_flags & DIGI_FAST) ||
(ch->ch_c_cflag & CBAUDEX))
cflag |= HUPCL;
if ((ch->ch_c_cflag & CBAUDEX) &&
!(ch->ch_digi.digi_flags & DIGI_FAST)) {
/*
* The below code is trying to guarantee that only
* baud rates 115200, 230400, 460800, 921600 are
* remapped. We use exclusive or because the various
* baud rates share common bit positions and therefore
* can't be tested for easily.
*/
tcflag_t tcflag = (ch->ch_c_cflag & CBAUD) | CBAUDEX;
int baudpart = 0;
/*
* Map high speed requests to index
* into FEP's baud table
*/
switch (tcflag) {
case B57600:
baudpart = 1;
break;
#ifdef B76800
case B76800:
baudpart = 2;
break;
#endif
case B115200:
baudpart = 3;
break;
case B230400:
baudpart = 9;
break;
case B460800:
baudpart = 11;
break;
#ifdef B921600
case B921600:
baudpart = 12;
break;
#endif
default:
baudpart = 0;
}
if (baudpart)
cflag = (cflag & ~(CBAUD | CBAUDEX)) | baudpart;
}
cflag &= 0xffff;
if (cflag != ch->ch_fepcflag) {
ch->ch_fepcflag = (u16) (cflag & 0xffff);
/*
* Okay to have channel and board
* locks held calling this
*/
dgap_cmdw(ch, SCFLAG, (u16) cflag, 0);
}
/* Handle transition from B0 */
if (ch->ch_flags & CH_BAUD0) {
ch->ch_flags &= ~(CH_BAUD0);
ch->ch_mval |= (D_RTS(ch)|D_DTR(ch));
}
mval = D_DTR(ch) | D_RTS(ch);
}
/*
* Get input flags.
*/
iflag = ch->ch_c_iflag & (IGNBRK | BRKINT | IGNPAR | PARMRK |
INPCK | ISTRIP | IXON | IXANY | IXOFF);
if ((ch->ch_startc == _POSIX_VDISABLE) ||
(ch->ch_stopc == _POSIX_VDISABLE)) {
iflag &= ~(IXON | IXOFF);
ch->ch_c_iflag &= ~(IXON | IXOFF);
}
/*
* Only the IBM Xr card can switch between
* 232 and 422 modes on the fly
*/
if (bd->device == PCI_DEV_XR_IBM_DID) {
if (ch->ch_digi.digi_flags & DIGI_422)
dgap_cmdb(ch, SCOMMODE, MODE_422, 0, 0);
else
dgap_cmdb(ch, SCOMMODE, MODE_232, 0, 0);
}
if (ch->ch_digi.digi_flags & DIGI_ALTPIN)
iflag |= IALTPIN;
if (iflag != ch->ch_fepiflag) {
ch->ch_fepiflag = iflag;
/* Okay to have channel and board locks held calling this */
dgap_cmdw(ch, SIFLAG, (u16) ch->ch_fepiflag, 0);
}
/*
* Select hardware handshaking.
*/
hflow = 0;
if (ch->ch_c_cflag & CRTSCTS)
hflow |= (D_RTS(ch) | D_CTS(ch));
if (ch->ch_digi.digi_flags & RTSPACE)
hflow |= D_RTS(ch);
if (ch->ch_digi.digi_flags & DTRPACE)
hflow |= D_DTR(ch);
if (ch->ch_digi.digi_flags & CTSPACE)
hflow |= D_CTS(ch);
if (ch->ch_digi.digi_flags & DSRPACE)
hflow |= D_DSR(ch);
if (ch->ch_digi.digi_flags & DCDPACE)
hflow |= D_CD(ch);
if (hflow != ch->ch_hflow) {
ch->ch_hflow = hflow;
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SHFLOW, (u8) hflow, 0xff, 0);
}
/*
* Set RTS and/or DTR Toggle if needed,
* but only if product is FEP5+ based.
*/
if (bd->bd_flags & BD_FEP5PLUS) {
u16 hflow2 = 0;
if (ch->ch_digi.digi_flags & DIGI_RTS_TOGGLE)
hflow2 |= (D_RTS(ch));
if (ch->ch_digi.digi_flags & DIGI_DTR_TOGGLE)
hflow2 |= (D_DTR(ch));
dgap_cmdw_ext(ch, 0xff03, hflow2, 0);
}
/*
* Set modem control lines.
*/
mval ^= ch->ch_mforce & (mval ^ ch->ch_mval);
if (ch->ch_mostat ^ mval) {
ch->ch_mostat = mval;
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SMODEM, (u8) mval, D_RTS(ch)|D_DTR(ch), 0);
}
/*
* Read modem signals, and then call carrier function.
*/
ch->ch_mistat = readb(&(ch->ch_bs->m_stat));
dgap_carrier(ch);
/*
* Set the start and stop characters.
*/
if (ch->ch_startc != ch->ch_fepstartc ||
ch->ch_stopc != ch->ch_fepstopc) {
ch->ch_fepstartc = ch->ch_startc;
ch->ch_fepstopc = ch->ch_stopc;
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SFLOWC, ch->ch_fepstartc, ch->ch_fepstopc, 0);
}
/*
* Set the Auxiliary start and stop characters.
*/
if (ch->ch_astartc != ch->ch_fepastartc ||
ch->ch_astopc != ch->ch_fepastopc) {
ch->ch_fepastartc = ch->ch_astartc;
ch->ch_fepastopc = ch->ch_astopc;
/* Okay to have channel and board locks held calling this */
dgap_cmdb(ch, SAFLOWC, ch->ch_fepastartc, ch->ch_fepastopc, 0);
}
return 0;
}
/*
* dgap_parity_scan()
*
* Convert the FEP5 way of reporting parity errors and breaks into
* the Linux line discipline way.
*/
static void dgap_parity_scan(struct channel_t *ch, unsigned char *cbuf,
unsigned char *fbuf, int *len)
{
int l = *len;
int count = 0;
unsigned char *in, *cout, *fout;
unsigned char c;
in = cbuf;
cout = cbuf;
fout = fbuf;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return;
while (l--) {
c = *in++;
switch (ch->pscan_state) {
default:
/* reset to sanity and fall through */
ch->pscan_state = 0;
case 0:
/* No FF seen yet */
if (c == (unsigned char) '\377')
/* delete this character from stream */
ch->pscan_state = 1;
else {
*cout++ = c;
*fout++ = TTY_NORMAL;
count += 1;
}
break;
case 1:
/* first FF seen */
if (c == (unsigned char) '\377') {
/* doubled ff, transform to single ff */
*cout++ = c;
*fout++ = TTY_NORMAL;
count += 1;
ch->pscan_state = 0;
} else {
/* save value examination in next state */
ch->pscan_savechar = c;
ch->pscan_state = 2;
}
break;
case 2:
/* third character of ff sequence */
*cout++ = c;
if (ch->pscan_savechar == 0x0) {
if (c == 0x0) {
ch->ch_err_break++;
*fout++ = TTY_BREAK;
} else {
ch->ch_err_parity++;
*fout++ = TTY_PARITY;
}
}
count += 1;
ch->pscan_state = 0;
}
}
*len = count;
}
static void dgap_write_wakeup(struct board_t *bd, struct channel_t *ch,
struct un_t *un, u32 mask,
unsigned long *irq_flags1,
unsigned long *irq_flags2)
{
if (!(un->un_flags & mask))
return;
un->un_flags &= ~mask;
if (!(un->un_flags & UN_ISOPEN))
return;
if ((un->un_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
un->un_tty->ldisc->ops->write_wakeup) {
spin_unlock_irqrestore(&ch->ch_lock, *irq_flags2);
spin_unlock_irqrestore(&bd->bd_lock, *irq_flags1);
(un->un_tty->ldisc->ops->write_wakeup)(un->un_tty);
spin_lock_irqsave(&bd->bd_lock, *irq_flags1);
spin_lock_irqsave(&ch->ch_lock, *irq_flags2);
}
wake_up_interruptible(&un->un_tty->write_wait);
wake_up_interruptible(&un->un_flags_wait);
}
/*=======================================================================
*
* dgap_event - FEP to host event processing routine.
*
* bd - Board of current event.
*
*=======================================================================*/
static int dgap_event(struct board_t *bd)
{
struct channel_t *ch;
ulong lock_flags;
ulong lock_flags2;
struct bs_t __iomem *bs;
u8 __iomem *event;
u8 __iomem *vaddr;
struct ev_t __iomem *eaddr;
uint head;
uint tail;
int port;
int reason;
int modem;
int b1;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return -EIO;
spin_lock_irqsave(&bd->bd_lock, lock_flags);
vaddr = bd->re_map_membase;
if (!vaddr) {
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
}
eaddr = (struct ev_t __iomem *) (vaddr + EVBUF);
/* Get our head and tail */
head = readw(&(eaddr->ev_head));
tail = readw(&(eaddr->ev_tail));
/*
* Forget it if pointers out of range.
*/
if (head >= EVMAX - EVSTART || tail >= EVMAX - EVSTART ||
(head | tail) & 03) {
/* Let go of board lock */
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return -EIO;
}
/*
* Loop to process all the events in the buffer.
*/
while (tail != head) {
/*
* Get interrupt information.
*/
event = bd->re_map_membase + tail + EVSTART;
port = ioread8(event);
reason = ioread8(event + 1);
modem = ioread8(event + 2);
b1 = ioread8(event + 3);
/*
* Make sure the interrupt is valid.
*/
if (port >= bd->nasync)
goto next;
if (!(reason & (IFMODEM | IFBREAK | IFTLW | IFTEM | IFDATA)))
goto next;
ch = bd->channels[port];
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
goto next;
/*
* If we have made it here, the event was valid.
* Lock down the channel.
*/
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
bs = ch->ch_bs;
if (!bs) {
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
goto next;
}
/*
* Process received data.
*/
if (reason & IFDATA) {
/*
* ALL LOCKS *MUST* BE DROPPED BEFORE CALLING INPUT!
* input could send some data to ld, which in turn
* could do a callback to one of our other functions.
*/
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
dgap_input(ch);
spin_lock_irqsave(&bd->bd_lock, lock_flags);
spin_lock_irqsave(&ch->ch_lock, lock_flags2);
if (ch->ch_flags & CH_RACTIVE)
ch->ch_flags |= CH_RENABLE;
else
writeb(1, &(bs->idata));
if (ch->ch_flags & CH_RWAIT) {
ch->ch_flags &= ~CH_RWAIT;
wake_up_interruptible
(&ch->ch_tun.un_flags_wait);
}
}
/*
* Process Modem change signals.
*/
if (reason & IFMODEM) {
ch->ch_mistat = modem;
dgap_carrier(ch);
}
/*
* Process break.
*/
if (reason & IFBREAK) {
if (ch->ch_tun.un_tty) {
/* A break has been indicated */
ch->ch_err_break++;
tty_buffer_request_room
(ch->ch_tun.un_tty->port, 1);
tty_insert_flip_char(ch->ch_tun.un_tty->port,
0, TTY_BREAK);
tty_flip_buffer_push(ch->ch_tun.un_tty->port);
}
}
/*
* Process Transmit low.
*/
if (reason & IFTLW) {
dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_LOW,
&lock_flags, &lock_flags2);
dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_LOW,
&lock_flags, &lock_flags2);
if (ch->ch_flags & CH_WLOW) {
ch->ch_flags &= ~CH_WLOW;
wake_up_interruptible(&ch->ch_flags_wait);
}
}
/*
* Process Transmit empty.
*/
if (reason & IFTEM) {
dgap_write_wakeup(bd, ch, &ch->ch_tun, UN_EMPTY,
&lock_flags, &lock_flags2);
dgap_write_wakeup(bd, ch, &ch->ch_pun, UN_EMPTY,
&lock_flags, &lock_flags2);
if (ch->ch_flags & CH_WEMPTY) {
ch->ch_flags &= ~CH_WEMPTY;
wake_up_interruptible(&ch->ch_flags_wait);
}
}
spin_unlock_irqrestore(&ch->ch_lock, lock_flags2);
next:
tail = (tail + 4) & (EVMAX - EVSTART - 4);
}
writew(tail, &(eaddr->ev_tail));
spin_unlock_irqrestore(&bd->bd_lock, lock_flags);
return 0;
}
static ssize_t dgap_driver_version_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", DG_PART);
}
static DRIVER_ATTR(version, S_IRUSR, dgap_driver_version_show, NULL);
static ssize_t dgap_driver_boards_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", dgap_numboards);
}
static DRIVER_ATTR(boards, S_IRUSR, dgap_driver_boards_show, NULL);
static ssize_t dgap_driver_maxboards_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", MAXBOARDS);
}
static DRIVER_ATTR(maxboards, S_IRUSR, dgap_driver_maxboards_show, NULL);
static ssize_t dgap_driver_pollcounter_show(struct device_driver *ddp,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%ld\n", dgap_poll_counter);
}
static DRIVER_ATTR(pollcounter, S_IRUSR, dgap_driver_pollcounter_show, NULL);
static ssize_t dgap_driver_pollrate_show(struct device_driver *ddp, char *buf)
{
return snprintf(buf, PAGE_SIZE, "%dms\n", dgap_poll_tick);
}
static ssize_t dgap_driver_pollrate_store(struct device_driver *ddp,
const char *buf, size_t count)
{
if (sscanf(buf, "%d\n", &dgap_poll_tick) != 1)
return -EINVAL;
return count;
}
static DRIVER_ATTR(pollrate, (S_IRUSR | S_IWUSR), dgap_driver_pollrate_show,
dgap_driver_pollrate_store);
static int dgap_create_driver_sysfiles(struct pci_driver *dgap_driver)
{
int rc = 0;
struct device_driver *driverfs = &dgap_driver->driver;
rc |= driver_create_file(driverfs, &driver_attr_version);
rc |= driver_create_file(driverfs, &driver_attr_boards);
rc |= driver_create_file(driverfs, &driver_attr_maxboards);
rc |= driver_create_file(driverfs, &driver_attr_pollrate);
rc |= driver_create_file(driverfs, &driver_attr_pollcounter);
return rc;
}
static void dgap_remove_driver_sysfiles(struct pci_driver *dgap_driver)
{
struct device_driver *driverfs = &dgap_driver->driver;
driver_remove_file(driverfs, &driver_attr_version);
driver_remove_file(driverfs, &driver_attr_boards);
driver_remove_file(driverfs, &driver_attr_maxboards);
driver_remove_file(driverfs, &driver_attr_pollrate);
driver_remove_file(driverfs, &driver_attr_pollcounter);
}
static struct board_t *dgap_verify_board(struct device *p)
{
struct board_t *bd;
if (!p)
return NULL;
bd = dev_get_drvdata(p);
if (!bd || bd->magic != DGAP_BOARD_MAGIC || bd->state != BOARD_READY)
return NULL;
return bd;
}
static ssize_t dgap_ports_state_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++) {
count += snprintf(buf + count, PAGE_SIZE - count,
"%d %s\n", bd->channels[i]->ch_portnum,
bd->channels[i]->ch_open_count ? "Open" : "Closed");
}
return count;
}
static DEVICE_ATTR(ports_state, S_IRUSR, dgap_ports_state_show, NULL);
static ssize_t dgap_ports_baud_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++) {
count += snprintf(buf + count, PAGE_SIZE - count, "%d %d\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_baud_info);
}
return count;
}
static DEVICE_ATTR(ports_baud, S_IRUSR, dgap_ports_baud_show, NULL);
static ssize_t dgap_ports_msignals_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++) {
if (bd->channels[i]->ch_open_count)
count += snprintf(buf + count, PAGE_SIZE - count,
"%d %s %s %s %s %s %s\n",
bd->channels[i]->ch_portnum,
(bd->channels[i]->ch_mostat &
UART_MCR_RTS) ? "RTS" : "",
(bd->channels[i]->ch_mistat &
UART_MSR_CTS) ? "CTS" : "",
(bd->channels[i]->ch_mostat &
UART_MCR_DTR) ? "DTR" : "",
(bd->channels[i]->ch_mistat &
UART_MSR_DSR) ? "DSR" : "",
(bd->channels[i]->ch_mistat &
UART_MSR_DCD) ? "DCD" : "",
(bd->channels[i]->ch_mistat &
UART_MSR_RI) ? "RI" : "");
else
count += snprintf(buf + count, PAGE_SIZE - count,
"%d\n", bd->channels[i]->ch_portnum);
}
return count;
}
static DEVICE_ATTR(ports_msignals, S_IRUSR, dgap_ports_msignals_show, NULL);
static ssize_t dgap_ports_iflag_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_c_iflag);
return count;
}
static DEVICE_ATTR(ports_iflag, S_IRUSR, dgap_ports_iflag_show, NULL);
static ssize_t dgap_ports_cflag_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_c_cflag);
return count;
}
static DEVICE_ATTR(ports_cflag, S_IRUSR, dgap_ports_cflag_show, NULL);
static ssize_t dgap_ports_oflag_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_c_oflag);
return count;
}
static DEVICE_ATTR(ports_oflag, S_IRUSR, dgap_ports_oflag_show, NULL);
static ssize_t dgap_ports_lflag_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_c_lflag);
return count;
}
static DEVICE_ATTR(ports_lflag, S_IRUSR, dgap_ports_lflag_show, NULL);
static ssize_t dgap_ports_digi_flag_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %x\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_digi.digi_flags);
return count;
}
static DEVICE_ATTR(ports_digi_flag, S_IRUSR, dgap_ports_digi_flag_show, NULL);
static ssize_t dgap_ports_rxcount_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_rxcount);
return count;
}
static DEVICE_ATTR(ports_rxcount, S_IRUSR, dgap_ports_rxcount_show, NULL);
static ssize_t dgap_ports_txcount_show(struct device *p,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
int count = 0;
int i;
bd = dgap_verify_board(p);
if (!bd)
return 0;
for (i = 0; i < bd->nasync; i++)
count += snprintf(buf + count, PAGE_SIZE - count, "%d %ld\n",
bd->channels[i]->ch_portnum,
bd->channels[i]->ch_txcount);
return count;
}
static DEVICE_ATTR(ports_txcount, S_IRUSR, dgap_ports_txcount_show, NULL);
/* this function creates the sys files that will export each signal status
* to sysfs each value will be put in a separate filename
*/
static void dgap_create_ports_sysfiles(struct board_t *bd)
{
dev_set_drvdata(&bd->pdev->dev, bd);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_state);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_baud);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
device_create_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
/* removes all the sys files created for that port */
static void dgap_remove_ports_sysfiles(struct board_t *bd)
{
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_state);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_baud);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_msignals);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_iflag);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_cflag);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_oflag);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_lflag);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_digi_flag);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_rxcount);
device_remove_file(&(bd->pdev->dev), &dev_attr_ports_txcount);
}
static ssize_t dgap_tty_state_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%s", un->un_open_count ?
"Open" : "Closed");
}
static DEVICE_ATTR(state, S_IRUSR, dgap_tty_state_show, NULL);
static ssize_t dgap_tty_baud_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n", ch->ch_baud_info);
}
static DEVICE_ATTR(baud, S_IRUSR, dgap_tty_baud_show, NULL);
static ssize_t dgap_tty_msignals_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
if (ch->ch_open_count) {
return snprintf(buf, PAGE_SIZE, "%s %s %s %s %s %s\n",
(ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
(ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
(ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
(ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
(ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
(ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
}
return 0;
}
static DEVICE_ATTR(msignals, S_IRUSR, dgap_tty_msignals_show, NULL);
static ssize_t dgap_tty_iflag_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_iflag);
}
static DEVICE_ATTR(iflag, S_IRUSR, dgap_tty_iflag_show, NULL);
static ssize_t dgap_tty_cflag_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_cflag);
}
static DEVICE_ATTR(cflag, S_IRUSR, dgap_tty_cflag_show, NULL);
static ssize_t dgap_tty_oflag_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_oflag);
}
static DEVICE_ATTR(oflag, S_IRUSR, dgap_tty_oflag_show, NULL);
static ssize_t dgap_tty_lflag_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_c_lflag);
}
static DEVICE_ATTR(lflag, S_IRUSR, dgap_tty_lflag_show, NULL);
static ssize_t dgap_tty_digi_flag_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%x\n", ch->ch_digi.digi_flags);
}
static DEVICE_ATTR(digi_flag, S_IRUSR, dgap_tty_digi_flag_show, NULL);
static ssize_t dgap_tty_rxcount_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_rxcount);
}
static DEVICE_ATTR(rxcount, S_IRUSR, dgap_tty_rxcount_show, NULL);
static ssize_t dgap_tty_txcount_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
return snprintf(buf, PAGE_SIZE, "%ld\n", ch->ch_txcount);
}
static DEVICE_ATTR(txcount, S_IRUSR, dgap_tty_txcount_show, NULL);
static ssize_t dgap_tty_name_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct board_t *bd;
struct channel_t *ch;
struct un_t *un;
int cn;
int bn;
struct cnode *cptr;
int found = FALSE;
int ncount = 0;
int starto = 0;
int i;
if (!d)
return 0;
un = dev_get_drvdata(d);
if (!un || un->magic != DGAP_UNIT_MAGIC)
return 0;
ch = un->un_ch;
if (!ch || ch->magic != DGAP_CHANNEL_MAGIC)
return 0;
bd = ch->ch_bd;
if (!bd || bd->magic != DGAP_BOARD_MAGIC)
return 0;
if (bd->state != BOARD_READY)
return 0;
bn = bd->boardnum;
cn = ch->ch_portnum;
for (cptr = bd->bd_config; cptr; cptr = cptr->next) {
if ((cptr->type == BNODE) &&
((cptr->u.board.type == APORT2_920P) ||
(cptr->u.board.type == APORT4_920P) ||
(cptr->u.board.type == APORT8_920P) ||
(cptr->u.board.type == PAPORT4) ||
(cptr->u.board.type == PAPORT8))) {
found = TRUE;
if (cptr->u.board.v_start)
starto = cptr->u.board.start;
else
starto = 1;
}
if (cptr->type == TNODE && found == TRUE) {
char *ptr1;
if (strstr(cptr->u.ttyname, "tty")) {
ptr1 = cptr->u.ttyname;
ptr1 += 3;
} else
ptr1 = cptr->u.ttyname;
for (i = 0; i < dgap_config_get_num_prts(bd); i++) {
if (cn != i)
continue;
return snprintf(buf, PAGE_SIZE, "%s%s%02d\n",
(un->un_type == DGAP_PRINT) ?
"pr" : "tty",
ptr1, i + starto);
}
}
if (cptr->type == CNODE) {
for (i = 0; i < cptr->u.conc.nport; i++) {
if (cn != (i + ncount))
continue;
return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
(un->un_type == DGAP_PRINT) ?
"pr" : "tty",
cptr->u.conc.id,
i + (cptr->u.conc.v_start ?
cptr->u.conc.start : 1));
}
ncount += cptr->u.conc.nport;
}
if (cptr->type == MNODE) {
for (i = 0; i < cptr->u.module.nport; i++) {
if (cn != (i + ncount))
continue;
return snprintf(buf, PAGE_SIZE, "%s%s%02ld\n",
(un->un_type == DGAP_PRINT) ?
"pr" : "tty",
cptr->u.module.id,
i + (cptr->u.module.v_start ?
cptr->u.module.start : 1));
}
ncount += cptr->u.module.nport;
}
}
return snprintf(buf, PAGE_SIZE, "%s_dgap_%d_%d\n",
(un->un_type == DGAP_PRINT) ? "pr" : "tty", bn, cn);
}
static DEVICE_ATTR(custom_name, S_IRUSR, dgap_tty_name_show, NULL);
static struct attribute *dgap_sysfs_tty_entries[] = {
&dev_attr_state.attr,
&dev_attr_baud.attr,
&dev_attr_msignals.attr,
&dev_attr_iflag.attr,
&dev_attr_cflag.attr,
&dev_attr_oflag.attr,
&dev_attr_lflag.attr,
&dev_attr_digi_flag.attr,
&dev_attr_rxcount.attr,
&dev_attr_txcount.attr,
&dev_attr_custom_name.attr,
NULL
};
static struct attribute_group dgap_tty_attribute_group = {
.name = NULL,
.attrs = dgap_sysfs_tty_entries,
};
static void dgap_create_tty_sysfs(struct un_t *un, struct device *c)
{
int ret;
ret = sysfs_create_group(&c->kobj, &dgap_tty_attribute_group);
if (ret)
return;
dev_set_drvdata(c, un);
}
static void dgap_remove_tty_sysfs(struct device *c)
{
sysfs_remove_group(&c->kobj, &dgap_tty_attribute_group);
}
/*
* Parse a configuration file read into memory as a string.
*/
static int dgap_parsefile(char **in)
{
struct cnode *p, *brd, *line, *conc;
int rc;
char *s;
int linecnt = 0;
p = &dgap_head;
brd = line = conc = NULL;
/* perhaps we are adding to an existing list? */
while (p->next)
p = p->next;
/* file must start with a BEGIN */
while ((rc = dgap_gettok(in)) != BEGIN) {
if (rc == 0) {
dgap_err("unexpected EOF");
return -1;
}
}
for (; ;) {
rc = dgap_gettok(in);
if (rc == 0) {
dgap_err("unexpected EOF");
return -1;
}
switch (rc) {
case BEGIN: /* should only be 1 begin */
dgap_err("unexpected config_begin\n");
return -1;
case END:
return 0;
case BOARD: /* board info */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = BNODE;
p->u.board.status = kstrdup("No", GFP_KERNEL);
line = conc = NULL;
brd = p;
linecnt = -1;
break;
case APORT2_920P: /* AccelePort_4 */
if (p->type != BNODE) {
dgap_err("unexpected Digi_2r_920 string");
return -1;
}
p->u.board.type = APORT2_920P;
p->u.board.v_type = 1;
break;
case APORT4_920P: /* AccelePort_4 */
if (p->type != BNODE) {
dgap_err("unexpected Digi_4r_920 string");
return -1;
}
p->u.board.type = APORT4_920P;
p->u.board.v_type = 1;
break;
case APORT8_920P: /* AccelePort_8 */
if (p->type != BNODE) {
dgap_err("unexpected Digi_8r_920 string");
return -1;
}
p->u.board.type = APORT8_920P;
p->u.board.v_type = 1;
break;
case PAPORT4: /* AccelePort_4 PCI */
if (p->type != BNODE) {
dgap_err("unexpected Digi_4r(PCI) string");
return -1;
}
p->u.board.type = PAPORT4;
p->u.board.v_type = 1;
break;
case PAPORT8: /* AccelePort_8 PCI */
if (p->type != BNODE) {
dgap_err("unexpected Digi_8r string");
return -1;
}
p->u.board.type = PAPORT8;
p->u.board.v_type = 1;
break;
case PCX: /* PCI C/X */
if (p->type != BNODE) {
dgap_err("unexpected Digi_C/X_(PCI) string");
return -1;
}
p->u.board.type = PCX;
p->u.board.v_type = 1;
p->u.board.conc1 = 0;
p->u.board.conc2 = 0;
p->u.board.module1 = 0;
p->u.board.module2 = 0;
break;
case PEPC: /* PCI EPC/X */
if (p->type != BNODE) {
dgap_err("unexpected \"Digi_EPC/X_(PCI)\" string");
return -1;
}
p->u.board.type = PEPC;
p->u.board.v_type = 1;
p->u.board.conc1 = 0;
p->u.board.conc2 = 0;
p->u.board.module1 = 0;
p->u.board.module2 = 0;
break;
case PPCM: /* PCI/Xem */
if (p->type != BNODE) {
dgap_err("unexpected PCI/Xem string");
return -1;
}
p->u.board.type = PPCM;
p->u.board.v_type = 1;
p->u.board.conc1 = 0;
p->u.board.conc2 = 0;
break;
case IO: /* i/o port */
if (p->type != BNODE) {
dgap_err("IO port only vaild for boards");
return -1;
}
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.portstr = kstrdup(s, GFP_KERNEL);
if (kstrtol(s, 0, &p->u.board.port)) {
dgap_err("bad number for IO port");
return -1;
}
p->u.board.v_port = 1;
break;
case MEM: /* memory address */
if (p->type != BNODE) {
dgap_err("memory address only vaild for boards");
return -1;
}
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.addrstr = kstrdup(s, GFP_KERNEL);
if (kstrtoul(s, 0, &p->u.board.addr)) {
dgap_err("bad number for memory address");
return -1;
}
p->u.board.v_addr = 1;
break;
case PCIINFO: /* pci information */
if (p->type != BNODE) {
dgap_err("memory address only vaild for boards");
return -1;
}
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.pcibusstr = kstrdup(s, GFP_KERNEL);
if (kstrtoul(s, 0, &p->u.board.pcibus)) {
dgap_err("bad number for pci bus");
return -1;
}
p->u.board.v_pcibus = 1;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.pcislotstr = kstrdup(s, GFP_KERNEL);
if (kstrtoul(s, 0, &p->u.board.pcislot)) {
dgap_err("bad number for pci slot");
return -1;
}
p->u.board.v_pcislot = 1;
break;
case METHOD:
if (p->type != BNODE) {
dgap_err("install method only vaild for boards");
return -1;
}
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.method = kstrdup(s, GFP_KERNEL);
p->u.board.v_method = 1;
break;
case STATUS:
if (p->type != BNODE) {
dgap_err("config status only vaild for boards");
return -1;
}
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.status = kstrdup(s, GFP_KERNEL);
break;
case NPORTS: /* number of ports */
if (p->type == BNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.board.nport)) {
dgap_err("bad number for number of ports");
return -1;
}
p->u.board.v_nport = 1;
} else if (p->type == CNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.conc.nport)) {
dgap_err("bad number for number of ports");
return -1;
}
p->u.conc.v_nport = 1;
} else if (p->type == MNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.module.nport)) {
dgap_err("bad number for number of ports");
return -1;
}
p->u.module.v_nport = 1;
} else {
dgap_err("nports only valid for concentrators or modules");
return -1;
}
break;
case ID: /* letter ID used in tty name */
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.board.status = kstrdup(s, GFP_KERNEL);
if (p->type == CNODE) {
p->u.conc.id = kstrdup(s, GFP_KERNEL);
p->u.conc.v_id = 1;
} else if (p->type == MNODE) {
p->u.module.id = kstrdup(s, GFP_KERNEL);
p->u.module.v_id = 1;
} else {
dgap_err("id only valid for concentrators or modules");
return -1;
}
break;
case STARTO: /* start offset of ID */
if (p->type == BNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.board.start)) {
dgap_err("bad number for start of tty count");
return -1;
}
p->u.board.v_start = 1;
} else if (p->type == CNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.conc.start)) {
dgap_err("bad number for start of tty count");
return -1;
}
p->u.conc.v_start = 1;
} else if (p->type == MNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.module.start)) {
dgap_err("bad number for start of tty count");
return -1;
}
p->u.module.v_start = 1;
} else {
dgap_err("start only valid for concentrators or modules");
return -1;
}
break;
case TTYN: /* tty name prefix */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = TNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpeced end of file");
return -1;
}
p->u.ttyname = kstrdup(s, GFP_KERNEL);
if (!p->u.ttyname) {
dgap_err("out of memory");
return -1;
}
break;
case CU: /* cu name prefix */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = CUNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpeced end of file");
return -1;
}
p->u.cuname = kstrdup(s, GFP_KERNEL);
if (!p->u.cuname) {
dgap_err("out of memory");
return -1;
}
break;
case LINE: /* line information */
if (dgap_checknode(p))
return -1;
if (!brd) {
dgap_err("must specify board before line info");
return -1;
}
switch (brd->u.board.type) {
case PPCM:
dgap_err("line not vaild for PC/em");
return -1;
}
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = LNODE;
conc = NULL;
line = p;
linecnt++;
break;
case CONC: /* concentrator information */
if (dgap_checknode(p))
return -1;
if (!line) {
dgap_err("must specify line info before concentrator");
return -1;
}
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = CNODE;
conc = p;
if (linecnt)
brd->u.board.conc2++;
else
brd->u.board.conc1++;
break;
case CX: /* c/x type concentrator */
if (p->type != CNODE) {
dgap_err("cx only valid for concentrators");
return -1;
}
p->u.conc.type = CX;
p->u.conc.v_type = 1;
break;
case EPC: /* epc type concentrator */
if (p->type != CNODE) {
dgap_err("cx only valid for concentrators");
return -1;
}
p->u.conc.type = EPC;
p->u.conc.v_type = 1;
break;
case MOD: /* EBI module */
if (dgap_checknode(p))
return -1;
if (!brd) {
dgap_err("must specify board info before EBI modules");
return -1;
}
switch (brd->u.board.type) {
case PPCM:
linecnt = 0;
break;
default:
if (!conc) {
dgap_err("must specify concentrator info before EBI module");
return -1;
}
}
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = MNODE;
if (linecnt)
brd->u.board.module2++;
else
brd->u.board.module1++;
break;
case PORTS: /* ports type EBI module */
if (p->type != MNODE) {
dgap_err("ports only valid for EBI modules");
return -1;
}
p->u.module.type = PORTS;
p->u.module.v_type = 1;
break;
case MODEM: /* ports type EBI module */
if (p->type != MNODE) {
dgap_err("modem only valid for modem modules");
return -1;
}
p->u.module.type = MODEM;
p->u.module.v_type = 1;
break;
case CABLE:
if (p->type == LNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.line.cable = kstrdup(s, GFP_KERNEL);
p->u.line.v_cable = 1;
}
break;
case SPEED: /* sync line speed indication */
if (p->type == LNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.line.speed)) {
dgap_err("bad number for line speed");
return -1;
}
p->u.line.v_speed = 1;
} else if (p->type == CNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.conc.speed)) {
dgap_err("bad number for line speed");
return -1;
}
p->u.conc.v_speed = 1;
} else {
dgap_err("speed valid only for lines or concentrators.");
return -1;
}
break;
case CONNECT:
if (p->type == CNODE) {
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
p->u.conc.connect = kstrdup(s, GFP_KERNEL);
p->u.conc.v_connect = 1;
}
break;
case PRINT: /* transparent print name prefix */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = PNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpeced end of file");
return -1;
}
p->u.printname = kstrdup(s, GFP_KERNEL);
if (!p->u.printname) {
dgap_err("out of memory");
return -1;
}
break;
case CMAJOR: /* major number */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = JNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.majornumber)) {
dgap_err("bad number for major number");
return -1;
}
break;
case ALTPIN: /* altpin setting */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = ANODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.altpin)) {
dgap_err("bad number for altpin");
return -1;
}
break;
case USEINTR: /* enable interrupt setting */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = INTRNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.useintr)) {
dgap_err("bad number for useintr");
return -1;
}
break;
case TTSIZ: /* size of tty structure */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = TSNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.ttysize)) {
dgap_err("bad number for ttysize");
return -1;
}
break;
case CHSIZ: /* channel structure size */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = CSNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.chsize)) {
dgap_err("bad number for chsize");
return -1;
}
break;
case BSSIZ: /* board structure size */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = BSNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.bssize)) {
dgap_err("bad number for bssize");
return -1;
}
break;
case UNTSIZ: /* sched structure size */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = USNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.unsize)) {
dgap_err("bad number for schedsize");
return -1;
}
break;
case F2SIZ: /* f2200 structure size */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = FSNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.f2size)) {
dgap_err("bad number for f2200size");
return -1;
}
break;
case VPSIZ: /* vpix structure size */
if (dgap_checknode(p))
return -1;
p->next = kzalloc(sizeof(struct cnode), GFP_KERNEL);
if (!p->next) {
dgap_err("out of memory");
return -1;
}
p = p->next;
p->type = VSNODE;
s = dgap_getword(in);
if (!s) {
dgap_err("unexpected end of file");
return -1;
}
if (kstrtol(s, 0, &p->u.vpixsize)) {
dgap_err("bad number for vpixsize");
return -1;
}
break;
}
}
}
/*
* dgap_sindex: much like index(), but it looks for a match of any character in
* the group, and returns that position. If the first character is a ^, then
* this will match the first occurrence not in that group.
*/
static char *dgap_sindex(char *string, char *group)
{
char *ptr;
if (!string || !group)
return (char *) NULL;
if (*group == '^') {
group++;
for (; *string; string++) {
for (ptr = group; *ptr; ptr++) {
if (*ptr == *string)
break;
}
if (*ptr == '\0')
return string;
}
} else {
for (; *string; string++) {
for (ptr = group; *ptr; ptr++) {
if (*ptr == *string)
return string;
}
}
}
return (char *) NULL;
}
/*
* Get a token from the input file; return 0 if end of file is reached
*/
static int dgap_gettok(char **in)
{
char *w;
struct toklist *t;
if (strstr(dgap_cword, "board")) {
w = dgap_getword(in);
snprintf(dgap_cword, MAXCWORD, "%s", w);
for (t = dgap_tlist; t->token != 0; t++) {
if (!strcmp(w, t->string))
return t->token;
}
dgap_err("board !!type not specified");
return 1;
} else {
while ((w = dgap_getword(in))) {
snprintf(dgap_cword, MAXCWORD, "%s", w);
for (t = dgap_tlist; t->token != 0; t++) {
if (!strcmp(w, t->string))
return t->token;
}
}
return 0;
}
}
/*
* get a word from the input stream, also keep track of current line number.
* words are separated by whitespace.
*/
static char *dgap_getword(char **in)
{
char *ret_ptr = *in;
char *ptr = dgap_sindex(*in, " \t\n");
/* If no word found, return null */
if (!ptr)
return NULL;
/* Mark new location for our buffer */
*ptr = '\0';
*in = ptr + 1;
/* Eat any extra spaces/tabs/newlines that might be present */
while (*in && **in && ((**in == ' ') ||
(**in == '\t') ||
(**in == '\n'))) {
**in = '\0';
*in = *in + 1;
}
return ret_ptr;
}
/*
* print an error message, giving the line number in the file where
* the error occurred.
*/
static void dgap_err(char *s)
{
pr_err("dgap: parse: %s\n", s);
}
/*
* dgap_checknode: see if all the necessary info has been supplied for a node
* before creating the next node.
*/
static int dgap_checknode(struct cnode *p)
{
switch (p->type) {
case BNODE:
if (p->u.board.v_type == 0) {
dgap_err("board type !not specified");
return 1;
}
return 0;
case LNODE:
if (p->u.line.v_speed == 0) {
dgap_err("line speed not specified");
return 1;
}
return 0;
case CNODE:
if (p->u.conc.v_type == 0) {
dgap_err("concentrator type not specified");
return 1;
}
if (p->u.conc.v_speed == 0) {
dgap_err("concentrator line speed not specified");
return 1;
}
if (p->u.conc.v_nport == 0) {
dgap_err("number of ports on concentrator not specified");
return 1;
}
if (p->u.conc.v_id == 0) {
dgap_err("concentrator id letter not specified");
return 1;
}
return 0;
case MNODE:
if (p->u.module.v_type == 0) {
dgap_err("EBI module type not specified");
return 1;
}
if (p->u.module.v_nport == 0) {
dgap_err("number of ports on EBI module not specified");
return 1;
}
if (p->u.module.v_id == 0) {
dgap_err("EBI module id letter not specified");
return 1;
}
return 0;
}
return 0;
}
/*
* Given a board pointer, returns whether we should use interrupts or not.
*/
static uint dgap_config_get_useintr(struct board_t *bd)
{
struct cnode *p;
if (!bd)
return 0;
for (p = bd->bd_config; p; p = p->next) {
if (p->type == INTRNODE) {
/*
* check for pcxr types.
*/
return p->u.useintr;
}
}
/* If not found, then don't turn on interrupts. */
return 0;
}
/*
* Given a board pointer, returns whether we turn on altpin or not.
*/
static uint dgap_config_get_altpin(struct board_t *bd)
{
struct cnode *p;
if (!bd)
return 0;
for (p = bd->bd_config; p; p = p->next) {
if (p->type == ANODE) {
/*
* check for pcxr types.
*/
return p->u.altpin;
}
}
/* If not found, then don't turn on interrupts. */
return 0;
}
/*
* Given a specific type of board, if found, detached link and
* returns the first occurrence in the list.
*/
static struct cnode *dgap_find_config(int type, int bus, int slot)
{
struct cnode *p, *prev, *prev2, *found;
p = &dgap_head;
while (p->next) {
prev = p;
p = p->next;
if (p->type != BNODE)
continue;
if (p->u.board.type != type)
continue;
if (p->u.board.v_pcibus &&
p->u.board.pcibus != bus)
continue;
if (p->u.board.v_pcislot &&
p->u.board.pcislot != slot)
continue;
found = p;
/*
* Keep walking thru the list till we
* find the next board.
*/
while (p->next) {
prev2 = p;
p = p->next;
if (p->type != BNODE)
continue;
/*
* Mark the end of our 1 board
* chain of configs.
*/
prev2->next = NULL;
/*
* Link the "next" board to the
* previous board, effectively
* "unlinking" our board from
* the main config.
*/
prev->next = p;
return found;
}
/*
* It must be the last board in the list.
*/
prev->next = NULL;
return found;
}
return NULL;
}
/*
* Given a board pointer, walks the config link, counting up
* all ports user specified should be on the board.
* (This does NOT mean they are all actually present right now tho)
*/
static uint dgap_config_get_num_prts(struct board_t *bd)
{
int count = 0;
struct cnode *p;
if (!bd)
return 0;
for (p = bd->bd_config; p; p = p->next) {
switch (p->type) {
case BNODE:
/*
* check for pcxr types.
*/
if (p->u.board.type > EPCFE)
count += p->u.board.nport;
break;
case CNODE:
count += p->u.conc.nport;
break;
case MNODE:
count += p->u.module.nport;
break;
}
}
return count;
}
static char *dgap_create_config_string(struct board_t *bd, char *string)
{
char *ptr = string;
struct cnode *p;
struct cnode *q;
int speed;
if (!bd) {
*ptr = 0xff;
return string;
}
for (p = bd->bd_config; p; p = p->next) {
switch (p->type) {
case LNODE:
*ptr = '\0';
ptr++;
*ptr = p->u.line.speed;
ptr++;
break;
case CNODE:
/*
* Because the EPC/con concentrators can have EM modules
* hanging off of them, we have to walk ahead in the
* list and keep adding the number of ports on each EM
* to the config. UGH!
*/
speed = p->u.conc.speed;
q = p->next;
if (q && (q->type == MNODE)) {
*ptr = (p->u.conc.nport + 0x80);
ptr++;
p = q;
while (q->next && (q->next->type) == MNODE) {
*ptr = (q->u.module.nport + 0x80);
ptr++;
p = q;
q = q->next;
}
*ptr = q->u.module.nport;
ptr++;
} else {
*ptr = p->u.conc.nport;
ptr++;
}
*ptr = speed;
ptr++;
break;
}
}
*ptr = 0xff;
return string;
}
| gpl-2.0 |
Evervolv/android_kernel_lge_pecan | sound/core/pcm_compat.c | 88 | 15370 | /*
* 32bit -> 64bit ioctl wrapper for PCM API
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file included from pcm_native.c */
#include <linux/compat.h>
static int snd_pcm_ioctl_delay_compat(struct snd_pcm_substream *substream,
s32 __user *src)
{
snd_pcm_sframes_t delay;
mm_segment_t fs;
int err;
fs = snd_enter_user();
err = snd_pcm_delay(substream, &delay);
snd_leave_user(fs);
if (err < 0)
return err;
if (put_user(delay, src))
return -EFAULT;
return err;
}
static int snd_pcm_ioctl_rewind_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_rewind(substream, frames);
else
err = snd_pcm_capture_rewind(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
static int snd_pcm_ioctl_forward_compat(struct snd_pcm_substream *substream,
u32 __user *src)
{
snd_pcm_uframes_t frames;
int err;
if (get_user(frames, src))
return -EFAULT;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_playback_forward(substream, frames);
else
err = snd_pcm_capture_forward(substream, frames);
if (put_user(err, src))
return -EFAULT;
return err < 0 ? err : 0;
}
struct snd_pcm_hw_params32 {
u32 flags;
struct snd_mask masks[SNDRV_PCM_HW_PARAM_LAST_MASK - SNDRV_PCM_HW_PARAM_FIRST_MASK + 1]; /* this must be identical */
struct snd_mask mres[5]; /* reserved masks */
struct snd_interval intervals[SNDRV_PCM_HW_PARAM_LAST_INTERVAL - SNDRV_PCM_HW_PARAM_FIRST_INTERVAL + 1];
struct snd_interval ires[9]; /* reserved intervals */
u32 rmask;
u32 cmask;
u32 info;
u32 msbits;
u32 rate_num;
u32 rate_den;
u32 fifo_size;
unsigned char reserved[64];
};
struct snd_pcm_sw_params32 {
s32 tstamp_mode;
u32 period_step;
u32 sleep_min;
u32 avail_min;
u32 xfer_align;
u32 start_threshold;
u32 stop_threshold;
u32 silence_threshold;
u32 silence_size;
u32 boundary;
unsigned char reserved[64];
};
/* recalcuate the boundary within 32bit */
static snd_pcm_uframes_t recalculate_boundary(struct snd_pcm_runtime *runtime)
{
snd_pcm_uframes_t boundary;
if (! runtime->buffer_size)
return 0;
boundary = runtime->buffer_size;
while (boundary * 2 <= 0x7fffffffUL - runtime->buffer_size)
boundary *= 2;
return boundary;
}
static int snd_pcm_ioctl_sw_params_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sw_params32 __user *src)
{
struct snd_pcm_sw_params params;
snd_pcm_uframes_t boundary;
int err;
memset(¶ms, 0, sizeof(params));
if (get_user(params.tstamp_mode, &src->tstamp_mode) ||
get_user(params.period_step, &src->period_step) ||
get_user(params.sleep_min, &src->sleep_min) ||
get_user(params.avail_min, &src->avail_min) ||
get_user(params.xfer_align, &src->xfer_align) ||
get_user(params.start_threshold, &src->start_threshold) ||
get_user(params.stop_threshold, &src->stop_threshold) ||
get_user(params.silence_threshold, &src->silence_threshold) ||
get_user(params.silence_size, &src->silence_size))
return -EFAULT;
/*
* Check silent_size parameter. Since we have 64bit boundary,
* silence_size must be compared with the 32bit boundary.
*/
boundary = recalculate_boundary(substream->runtime);
if (boundary && params.silence_size >= boundary)
params.silence_size = substream->runtime->boundary;
err = snd_pcm_sw_params(substream, ¶ms);
if (err < 0)
return err;
if (boundary && put_user(boundary, &src->boundary))
return -EFAULT;
return err;
}
struct snd_pcm_channel_info32 {
u32 channel;
u32 offset;
u32 first;
u32 step;
};
static int snd_pcm_ioctl_channel_info_compat(struct snd_pcm_substream *substream,
struct snd_pcm_channel_info32 __user *src)
{
struct snd_pcm_channel_info info;
int err;
if (get_user(info.channel, &src->channel) ||
get_user(info.offset, &src->offset) ||
get_user(info.first, &src->first) ||
get_user(info.step, &src->step))
return -EFAULT;
err = snd_pcm_channel_info(substream, &info);
if (err < 0)
return err;
if (put_user(info.channel, &src->channel) ||
put_user(info.offset, &src->offset) ||
put_user(info.first, &src->first) ||
put_user(info.step, &src->step))
return -EFAULT;
return err;
}
struct snd_pcm_status32 {
s32 state;
struct compat_timespec trigger_tstamp;
struct compat_timespec tstamp;
u32 appl_ptr;
u32 hw_ptr;
s32 delay;
u32 avail;
u32 avail_max;
u32 overrange;
s32 suspended_state;
unsigned char reserved[60];
} __attribute__((packed));
static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
struct snd_pcm_status32 __user *src)
{
struct snd_pcm_status status;
int err;
err = snd_pcm_status(substream, &status);
if (err < 0)
return err;
if (put_user(status.state, &src->state) ||
put_user(status.trigger_tstamp.tv_sec, &src->trigger_tstamp.tv_sec) ||
put_user(status.trigger_tstamp.tv_nsec, &src->trigger_tstamp.tv_nsec) ||
put_user(status.tstamp.tv_sec, &src->tstamp.tv_sec) ||
put_user(status.tstamp.tv_nsec, &src->tstamp.tv_nsec) ||
put_user(status.appl_ptr, &src->appl_ptr) ||
put_user(status.hw_ptr, &src->hw_ptr) ||
put_user(status.delay, &src->delay) ||
put_user(status.avail, &src->avail) ||
put_user(status.avail_max, &src->avail_max) ||
put_user(status.overrange, &src->overrange) ||
put_user(status.suspended_state, &src->suspended_state))
return -EFAULT;
return err;
}
/* both for HW_PARAMS and HW_REFINE */
static int snd_pcm_ioctl_hw_params_compat(struct snd_pcm_substream *substream,
int refine,
struct snd_pcm_hw_params32 __user *data32)
{
struct snd_pcm_hw_params *data;
struct snd_pcm_runtime *runtime;
int err;
if (! (runtime = substream->runtime))
return -ENOTTY;
/* only fifo_size is different, so just copy all */
data = memdup_user(data32, sizeof(*data32));
if (IS_ERR(data))
return PTR_ERR(data);
if (refine)
err = snd_pcm_hw_refine(substream, data);
else
err = snd_pcm_hw_params(substream, data);
if (err < 0)
goto error;
if (copy_to_user(data32, data, sizeof(*data32)) ||
put_user(data->fifo_size, &data32->fifo_size)) {
err = -EFAULT;
goto error;
}
if (! refine) {
unsigned int new_boundary = recalculate_boundary(runtime);
if (new_boundary)
runtime->boundary = new_boundary;
}
error:
kfree(data);
return err;
}
/*
*/
struct snd_xferi32 {
s32 result;
u32 buf;
u32 frames;
};
static int snd_pcm_ioctl_xferi_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xferi32 __user *data32)
{
compat_caddr_t buf;
u32 frames;
int err;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN)
return -EBADFD;
if (get_user(buf, &data32->buf) ||
get_user(frames, &data32->frames))
return -EFAULT;
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_write(substream, compat_ptr(buf), frames);
else
err = snd_pcm_lib_read(substream, compat_ptr(buf), frames);
if (err < 0)
return err;
/* copy the result */
if (put_user(err, &data32->result))
return -EFAULT;
return 0;
}
/* snd_xfern needs remapping of bufs */
struct snd_xfern32 {
s32 result;
u32 bufs; /* this is void **; */
u32 frames;
};
/*
* xfern ioctl nees to copy (up to) 128 pointers on stack.
* although we may pass the copied pointers through f_op->ioctl, but the ioctl
* handler there expands again the same 128 pointers on stack, so it is better
* to handle the function (calling pcm_readv/writev) directly in this handler.
*/
static int snd_pcm_ioctl_xfern_compat(struct snd_pcm_substream *substream,
int dir, struct snd_xfern32 __user *data32)
{
compat_caddr_t buf;
compat_caddr_t __user *bufptr;
u32 frames;
void __user **bufs;
int err, ch, i;
if (! substream->runtime)
return -ENOTTY;
if (substream->stream != dir)
return -EINVAL;
if ((ch = substream->runtime->channels) > 128)
return -EINVAL;
if (get_user(buf, &data32->bufs) ||
get_user(frames, &data32->frames))
return -EFAULT;
bufptr = compat_ptr(buf);
bufs = kmalloc(sizeof(void __user *) * ch, GFP_KERNEL);
if (bufs == NULL)
return -ENOMEM;
for (i = 0; i < ch; i++) {
u32 ptr;
if (get_user(ptr, bufptr)) {
kfree(bufs);
return -EFAULT;
}
bufs[i] = compat_ptr(ptr);
bufptr++;
}
if (dir == SNDRV_PCM_STREAM_PLAYBACK)
err = snd_pcm_lib_writev(substream, bufs, frames);
else
err = snd_pcm_lib_readv(substream, bufs, frames);
if (err >= 0) {
if (put_user(err, &data32->result))
err = -EFAULT;
}
kfree(bufs);
return err;
}
struct snd_pcm_mmap_status32 {
s32 state;
s32 pad1;
u32 hw_ptr;
struct compat_timespec tstamp;
s32 suspended_state;
} __attribute__((packed));
struct snd_pcm_mmap_control32 {
u32 appl_ptr;
u32 avail_min;
};
struct snd_pcm_sync_ptr32 {
u32 flags;
union {
struct snd_pcm_mmap_status32 status;
unsigned char reserved[64];
} s;
union {
struct snd_pcm_mmap_control32 control;
unsigned char reserved[64];
} c;
} __attribute__((packed));
static int snd_pcm_ioctl_sync_ptr_compat(struct snd_pcm_substream *substream,
struct snd_pcm_sync_ptr32 __user *src)
{
struct snd_pcm_runtime *runtime = substream->runtime;
volatile struct snd_pcm_mmap_status *status;
volatile struct snd_pcm_mmap_control *control;
u32 sflags;
struct snd_pcm_mmap_control scontrol;
struct snd_pcm_mmap_status sstatus;
snd_pcm_uframes_t boundary;
int err;
if (snd_BUG_ON(!runtime))
return -EINVAL;
if (get_user(sflags, &src->flags) ||
get_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
get_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
if (sflags & SNDRV_PCM_SYNC_PTR_HWSYNC) {
err = snd_pcm_hwsync(substream);
if (err < 0)
return err;
}
status = runtime->status;
control = runtime->control;
boundary = recalculate_boundary(runtime);
if (! boundary)
boundary = 0x7fffffff;
snd_pcm_stream_lock_irq(substream);
/* FIXME: we should consider the boundary for the sync from app */
if (!(sflags & SNDRV_PCM_SYNC_PTR_APPL))
control->appl_ptr = scontrol.appl_ptr;
else
scontrol.appl_ptr = control->appl_ptr % boundary;
if (!(sflags & SNDRV_PCM_SYNC_PTR_AVAIL_MIN))
control->avail_min = scontrol.avail_min;
else
scontrol.avail_min = control->avail_min;
sstatus.state = status->state;
sstatus.hw_ptr = status->hw_ptr % boundary;
sstatus.tstamp = status->tstamp;
sstatus.suspended_state = status->suspended_state;
snd_pcm_stream_unlock_irq(substream);
if (put_user(sstatus.state, &src->s.status.state) ||
put_user(sstatus.hw_ptr, &src->s.status.hw_ptr) ||
put_user(sstatus.tstamp.tv_sec, &src->s.status.tstamp.tv_sec) ||
put_user(sstatus.tstamp.tv_nsec, &src->s.status.tstamp.tv_nsec) ||
put_user(sstatus.suspended_state, &src->s.status.suspended_state) ||
put_user(scontrol.appl_ptr, &src->c.control.appl_ptr) ||
put_user(scontrol.avail_min, &src->c.control.avail_min))
return -EFAULT;
return 0;
}
/*
*/
enum {
SNDRV_PCM_IOCTL_HW_REFINE32 = _IOWR('A', 0x10, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_HW_PARAMS32 = _IOWR('A', 0x11, struct snd_pcm_hw_params32),
SNDRV_PCM_IOCTL_SW_PARAMS32 = _IOWR('A', 0x13, struct snd_pcm_sw_params32),
SNDRV_PCM_IOCTL_STATUS32 = _IOR('A', 0x20, struct snd_pcm_status32),
SNDRV_PCM_IOCTL_DELAY32 = _IOR('A', 0x21, s32),
SNDRV_PCM_IOCTL_CHANNEL_INFO32 = _IOR('A', 0x32, struct snd_pcm_channel_info32),
SNDRV_PCM_IOCTL_REWIND32 = _IOW('A', 0x46, u32),
SNDRV_PCM_IOCTL_FORWARD32 = _IOW('A', 0x49, u32),
SNDRV_PCM_IOCTL_WRITEI_FRAMES32 = _IOW('A', 0x50, struct snd_xferi32),
SNDRV_PCM_IOCTL_READI_FRAMES32 = _IOR('A', 0x51, struct snd_xferi32),
SNDRV_PCM_IOCTL_WRITEN_FRAMES32 = _IOW('A', 0x52, struct snd_xfern32),
SNDRV_PCM_IOCTL_READN_FRAMES32 = _IOR('A', 0x53, struct snd_xfern32),
SNDRV_PCM_IOCTL_SYNC_PTR32 = _IOWR('A', 0x23, struct snd_pcm_sync_ptr32),
};
static long snd_pcm_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_pcm_file *pcm_file;
struct snd_pcm_substream *substream;
void __user *argp = compat_ptr(arg);
pcm_file = file->private_data;
if (! pcm_file)
return -ENOTTY;
substream = pcm_file->substream;
if (! substream)
return -ENOTTY;
/*
* When PCM is used on 32bit mode, we need to disable
* mmap of PCM status/control records because of the size
* incompatibility.
*/
pcm_file->no_compat_mmap = 1;
switch (cmd) {
case SNDRV_PCM_IOCTL_PVERSION:
case SNDRV_PCM_IOCTL_INFO:
case SNDRV_PCM_IOCTL_TSTAMP:
case SNDRV_PCM_IOCTL_TTSTAMP:
case SNDRV_PCM_IOCTL_HWSYNC:
case SNDRV_PCM_IOCTL_PREPARE:
case SNDRV_PCM_IOCTL_RESET:
case SNDRV_PCM_IOCTL_START:
case SNDRV_PCM_IOCTL_DROP:
case SNDRV_PCM_IOCTL_DRAIN:
case SNDRV_PCM_IOCTL_PAUSE:
case SNDRV_PCM_IOCTL_HW_FREE:
case SNDRV_PCM_IOCTL_RESUME:
case SNDRV_PCM_IOCTL_XRUN:
case SNDRV_PCM_IOCTL_LINK:
case SNDRV_PCM_IOCTL_UNLINK:
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
return snd_pcm_playback_ioctl1(file, substream, cmd, argp);
else
return snd_pcm_capture_ioctl1(file, substream, cmd, argp);
case SNDRV_PCM_IOCTL_HW_REFINE32:
return snd_pcm_ioctl_hw_params_compat(substream, 1, argp);
case SNDRV_PCM_IOCTL_HW_PARAMS32:
return snd_pcm_ioctl_hw_params_compat(substream, 0, argp);
case SNDRV_PCM_IOCTL_SW_PARAMS32:
return snd_pcm_ioctl_sw_params_compat(substream, argp);
case SNDRV_PCM_IOCTL_STATUS32:
return snd_pcm_status_user_compat(substream, argp);
case SNDRV_PCM_IOCTL_SYNC_PTR32:
return snd_pcm_ioctl_sync_ptr_compat(substream, argp);
case SNDRV_PCM_IOCTL_CHANNEL_INFO32:
return snd_pcm_ioctl_channel_info_compat(substream, argp);
case SNDRV_PCM_IOCTL_WRITEI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READI_FRAMES32:
return snd_pcm_ioctl_xferi_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_WRITEN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_PLAYBACK, argp);
case SNDRV_PCM_IOCTL_READN_FRAMES32:
return snd_pcm_ioctl_xfern_compat(substream, SNDRV_PCM_STREAM_CAPTURE, argp);
case SNDRV_PCM_IOCTL_DELAY32:
return snd_pcm_ioctl_delay_compat(substream, argp);
case SNDRV_PCM_IOCTL_REWIND32:
return snd_pcm_ioctl_rewind_compat(substream, argp);
case SNDRV_PCM_IOCTL_FORWARD32:
return snd_pcm_ioctl_forward_compat(substream, argp);
}
return -ENOIOCTLCMD;
}
| gpl-2.0 |
nopy/acer_kernel_picasso | fs/jbd/journal.c | 88 | 55738 | /*
* linux/fs/jbd/journal.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
* Copyright 1998 Red Hat corp --- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* Generic filesystem journal-writing code; part of the ext2fs
* journaling system.
*
* This file manages journals: areas of disk reserved for logging
* transactional updates. This includes the kernel journaling thread
* which is responsible for scheduling updates to the log.
*
* We do not actually manage the physical storage of the journal in this
* file: that is left to a per-journal policy function, which allows us
* to store the journal within a filesystem-specified area for ext2
* journaling (ext2 can use a reserved inode for storing the log).
*/
#include <linux/module.h>
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/freezer.h>
#include <linux/pagemap.h>
#include <linux/kthread.h>
#include <linux/poison.h>
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
#include <asm/uaccess.h>
#include <asm/page.h>
EXPORT_SYMBOL(journal_start);
EXPORT_SYMBOL(journal_restart);
EXPORT_SYMBOL(journal_extend);
EXPORT_SYMBOL(journal_stop);
EXPORT_SYMBOL(journal_lock_updates);
EXPORT_SYMBOL(journal_unlock_updates);
EXPORT_SYMBOL(journal_get_write_access);
EXPORT_SYMBOL(journal_get_create_access);
EXPORT_SYMBOL(journal_get_undo_access);
EXPORT_SYMBOL(journal_dirty_data);
EXPORT_SYMBOL(journal_dirty_metadata);
EXPORT_SYMBOL(journal_release_buffer);
EXPORT_SYMBOL(journal_forget);
#if 0
EXPORT_SYMBOL(journal_sync_buffer);
#endif
EXPORT_SYMBOL(journal_flush);
EXPORT_SYMBOL(journal_revoke);
EXPORT_SYMBOL(journal_init_dev);
EXPORT_SYMBOL(journal_init_inode);
EXPORT_SYMBOL(journal_update_format);
EXPORT_SYMBOL(journal_check_used_features);
EXPORT_SYMBOL(journal_check_available_features);
EXPORT_SYMBOL(journal_set_features);
EXPORT_SYMBOL(journal_create);
EXPORT_SYMBOL(journal_load);
EXPORT_SYMBOL(journal_destroy);
EXPORT_SYMBOL(journal_abort);
EXPORT_SYMBOL(journal_errno);
EXPORT_SYMBOL(journal_ack_err);
EXPORT_SYMBOL(journal_clear_err);
EXPORT_SYMBOL(log_wait_commit);
EXPORT_SYMBOL(log_start_commit);
EXPORT_SYMBOL(journal_start_commit);
EXPORT_SYMBOL(journal_force_commit_nested);
EXPORT_SYMBOL(journal_wipe);
EXPORT_SYMBOL(journal_blocks_per_page);
EXPORT_SYMBOL(journal_invalidatepage);
EXPORT_SYMBOL(journal_try_to_free_buffers);
EXPORT_SYMBOL(journal_force_commit);
static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
static void __journal_abort_soft (journal_t *journal, int errno);
/*
* Helper function used to manage commit timeouts
*/
static void commit_timeout(unsigned long __data)
{
struct task_struct * p = (struct task_struct *) __data;
wake_up_process(p);
}
/*
* kjournald: The main thread function used to manage a logging device
* journal.
*
* This kernel thread is responsible for two things:
*
* 1) COMMIT: Every so often we need to commit the current state of the
* filesystem to disk. The journal thread is responsible for writing
* all of the metadata buffers to disk.
*
* 2) CHECKPOINT: We cannot reuse a used section of the log file until all
* of the data in that part of the log has been rewritten elsewhere on
* the disk. Flushing these old buffers to reclaim space in the log is
* known as checkpointing, and this thread is responsible for that job.
*/
static int kjournald(void *arg)
{
journal_t *journal = arg;
transaction_t *transaction;
/*
* Set up an interval timer which can be used to trigger a commit wakeup
* after the commit interval expires
*/
setup_timer(&journal->j_commit_timer, commit_timeout,
(unsigned long)current);
/* Record that the journal thread is running */
journal->j_task = current;
wake_up(&journal->j_wait_done_commit);
printk(KERN_INFO "kjournald starting. Commit interval %ld seconds\n",
journal->j_commit_interval / HZ);
/*
* And now, wait forever for commit wakeup events.
*/
spin_lock(&journal->j_state_lock);
loop:
if (journal->j_flags & JFS_UNMOUNT)
goto end_loop;
jbd_debug(1, "commit_sequence=%d, commit_request=%d\n",
journal->j_commit_sequence, journal->j_commit_request);
if (journal->j_commit_sequence != journal->j_commit_request) {
jbd_debug(1, "OK, requests differ\n");
spin_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal_commit_transaction(journal);
spin_lock(&journal->j_state_lock);
goto loop;
}
wake_up(&journal->j_wait_done_commit);
if (freezing(current)) {
/*
* The simpler the better. Flushing journal isn't a
* good idea, because that depends on threads that may
* be already stopped.
*/
jbd_debug(1, "Now suspending kjournald\n");
spin_unlock(&journal->j_state_lock);
refrigerator();
spin_lock(&journal->j_state_lock);
} else {
/*
* We assume on resume that commits are already there,
* so we don't sleep
*/
DEFINE_WAIT(wait);
int should_sleep = 1;
prepare_to_wait(&journal->j_wait_commit, &wait,
TASK_INTERRUPTIBLE);
if (journal->j_commit_sequence != journal->j_commit_request)
should_sleep = 0;
transaction = journal->j_running_transaction;
if (transaction && time_after_eq(jiffies,
transaction->t_expires))
should_sleep = 0;
if (journal->j_flags & JFS_UNMOUNT)
should_sleep = 0;
if (should_sleep) {
spin_unlock(&journal->j_state_lock);
schedule();
spin_lock(&journal->j_state_lock);
}
finish_wait(&journal->j_wait_commit, &wait);
}
jbd_debug(1, "kjournald wakes\n");
/*
* Were we woken up by a commit wakeup event?
*/
transaction = journal->j_running_transaction;
if (transaction && time_after_eq(jiffies, transaction->t_expires)) {
journal->j_commit_request = transaction->t_tid;
jbd_debug(1, "woke because of timeout\n");
}
goto loop;
end_loop:
spin_unlock(&journal->j_state_lock);
del_timer_sync(&journal->j_commit_timer);
journal->j_task = NULL;
wake_up(&journal->j_wait_done_commit);
jbd_debug(1, "Journal thread exiting.\n");
return 0;
}
static int journal_start_thread(journal_t *journal)
{
struct task_struct *t;
t = kthread_run(kjournald, journal, "kjournald");
if (IS_ERR(t))
return PTR_ERR(t);
wait_event(journal->j_wait_done_commit, journal->j_task != NULL);
return 0;
}
static void journal_kill_thread(journal_t *journal)
{
spin_lock(&journal->j_state_lock);
journal->j_flags |= JFS_UNMOUNT;
while (journal->j_task) {
wake_up(&journal->j_wait_commit);
spin_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit,
journal->j_task == NULL);
spin_lock(&journal->j_state_lock);
}
spin_unlock(&journal->j_state_lock);
}
/*
* journal_write_metadata_buffer: write a metadata buffer to the journal.
*
* Writes a metadata buffer to a given disk block. The actual IO is not
* performed but a new buffer_head is constructed which labels the data
* to be written with the correct destination disk block.
*
* Any magic-number escaping which needs to be done will cause a
* copy-out here. If the buffer happens to start with the
* JFS_MAGIC_NUMBER, then we can't write it to the log directly: the
* magic number is only written to the log for descripter blocks. In
* this case, we copy the data and replace the first word with 0, and we
* return a result code which indicates that this buffer needs to be
* marked as an escaped buffer in the corresponding log descriptor
* block. The missing word can then be restored when the block is read
* during recovery.
*
* If the source buffer has already been modified by a new transaction
* since we took the last commit snapshot, we use the frozen copy of
* that data for IO. If we end up using the existing buffer_head's data
* for the write, then we *have* to lock the buffer to prevent anyone
* else from using and possibly modifying it while the IO is in
* progress.
*
* The function returns a pointer to the buffer_heads to be used for IO.
*
* We assume that the journal has already been locked in this function.
*
* Return value:
* <0: Error
* >=0: Finished OK
*
* On success:
* Bit 0 set == escape performed on the data
* Bit 1 set == buffer copy-out performed (kfree the data after IO)
*/
int journal_write_metadata_buffer(transaction_t *transaction,
struct journal_head *jh_in,
struct journal_head **jh_out,
unsigned int blocknr)
{
int need_copy_out = 0;
int done_copy_out = 0;
int do_escape = 0;
char *mapped_data;
struct buffer_head *new_bh;
struct journal_head *new_jh;
struct page *new_page;
unsigned int new_offset;
struct buffer_head *bh_in = jh2bh(jh_in);
journal_t *journal = transaction->t_journal;
/*
* The buffer really shouldn't be locked: only the current committing
* transaction is allowed to write it, so nobody else is allowed
* to do any IO.
*
* akpm: except if we're journalling data, and write() output is
* also part of a shared mapping, and another thread has
* decided to launch a writepage() against this buffer.
*/
J_ASSERT_BH(bh_in, buffer_jbddirty(bh_in));
new_bh = alloc_buffer_head(GFP_NOFS|__GFP_NOFAIL);
/* keep subsequent assertions sane */
new_bh->b_state = 0;
init_buffer(new_bh, NULL, NULL);
atomic_set(&new_bh->b_count, 1);
new_jh = journal_add_journal_head(new_bh); /* This sleeps */
/*
* If a new transaction has already done a buffer copy-out, then
* we use that version of the data for the commit.
*/
jbd_lock_bh_state(bh_in);
repeat:
if (jh_in->b_frozen_data) {
done_copy_out = 1;
new_page = virt_to_page(jh_in->b_frozen_data);
new_offset = offset_in_page(jh_in->b_frozen_data);
} else {
new_page = jh2bh(jh_in)->b_page;
new_offset = offset_in_page(jh2bh(jh_in)->b_data);
}
mapped_data = kmap_atomic(new_page, KM_USER0);
/*
* Check for escaping
*/
if (*((__be32 *)(mapped_data + new_offset)) ==
cpu_to_be32(JFS_MAGIC_NUMBER)) {
need_copy_out = 1;
do_escape = 1;
}
kunmap_atomic(mapped_data, KM_USER0);
/*
* Do we need to do a data copy?
*/
if (need_copy_out && !done_copy_out) {
char *tmp;
jbd_unlock_bh_state(bh_in);
tmp = jbd_alloc(bh_in->b_size, GFP_NOFS);
jbd_lock_bh_state(bh_in);
if (jh_in->b_frozen_data) {
jbd_free(tmp, bh_in->b_size);
goto repeat;
}
jh_in->b_frozen_data = tmp;
mapped_data = kmap_atomic(new_page, KM_USER0);
memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
kunmap_atomic(mapped_data, KM_USER0);
new_page = virt_to_page(tmp);
new_offset = offset_in_page(tmp);
done_copy_out = 1;
}
/*
* Did we need to do an escaping? Now we've done all the
* copying, we can finally do so.
*/
if (do_escape) {
mapped_data = kmap_atomic(new_page, KM_USER0);
*((unsigned int *)(mapped_data + new_offset)) = 0;
kunmap_atomic(mapped_data, KM_USER0);
}
set_bh_page(new_bh, new_page, new_offset);
new_jh->b_transaction = NULL;
new_bh->b_size = jh2bh(jh_in)->b_size;
new_bh->b_bdev = transaction->t_journal->j_dev;
new_bh->b_blocknr = blocknr;
set_buffer_mapped(new_bh);
set_buffer_dirty(new_bh);
*jh_out = new_jh;
/*
* The to-be-written buffer needs to get moved to the io queue,
* and the original buffer whose contents we are shadowing or
* copying is moved to the transaction's shadow queue.
*/
JBUFFER_TRACE(jh_in, "file as BJ_Shadow");
spin_lock(&journal->j_list_lock);
__journal_file_buffer(jh_in, transaction, BJ_Shadow);
spin_unlock(&journal->j_list_lock);
jbd_unlock_bh_state(bh_in);
JBUFFER_TRACE(new_jh, "file as BJ_IO");
journal_file_buffer(new_jh, transaction, BJ_IO);
return do_escape | (done_copy_out << 1);
}
/*
* Allocation code for the journal file. Manage the space left in the
* journal, so that we can begin checkpointing when appropriate.
*/
/*
* __log_space_left: Return the number of free blocks left in the journal.
*
* Called with the journal already locked.
*
* Called under j_state_lock
*/
int __log_space_left(journal_t *journal)
{
int left = journal->j_free;
assert_spin_locked(&journal->j_state_lock);
/*
* Be pessimistic here about the number of those free blocks which
* might be required for log descriptor control blocks.
*/
#define MIN_LOG_RESERVED_BLOCKS 32 /* Allow for rounding errors */
left -= MIN_LOG_RESERVED_BLOCKS;
if (left <= 0)
return 0;
left -= (left >> 3);
return left;
}
/*
* Called under j_state_lock. Returns true if a transaction commit was started.
*/
int __log_start_commit(journal_t *journal, tid_t target)
{
/*
* Are we already doing a recent enough commit?
*/
if (!tid_geq(journal->j_commit_request, target)) {
/*
* We want a new commit: OK, mark the request and wakup the
* commit thread. We do _not_ do the commit ourselves.
*/
journal->j_commit_request = target;
jbd_debug(1, "JBD: requesting commit %d/%d\n",
journal->j_commit_request,
journal->j_commit_sequence);
wake_up(&journal->j_wait_commit);
return 1;
}
return 0;
}
int log_start_commit(journal_t *journal, tid_t tid)
{
int ret;
spin_lock(&journal->j_state_lock);
ret = __log_start_commit(journal, tid);
spin_unlock(&journal->j_state_lock);
return ret;
}
/*
* Force and wait upon a commit if the calling process is not within
* transaction. This is used for forcing out undo-protected data which contains
* bitmaps, when the fs is running out of space.
*
* We can only force the running transaction if we don't have an active handle;
* otherwise, we will deadlock.
*
* Returns true if a transaction was started.
*/
int journal_force_commit_nested(journal_t *journal)
{
transaction_t *transaction = NULL;
tid_t tid;
spin_lock(&journal->j_state_lock);
if (journal->j_running_transaction && !current->journal_info) {
transaction = journal->j_running_transaction;
__log_start_commit(journal, transaction->t_tid);
} else if (journal->j_committing_transaction)
transaction = journal->j_committing_transaction;
if (!transaction) {
spin_unlock(&journal->j_state_lock);
return 0; /* Nothing to retry */
}
tid = transaction->t_tid;
spin_unlock(&journal->j_state_lock);
log_wait_commit(journal, tid);
return 1;
}
/*
* Start a commit of the current running transaction (if any). Returns true
* if a transaction is going to be committed (or is currently already
* committing), and fills its tid in at *ptid
*/
int journal_start_commit(journal_t *journal, tid_t *ptid)
{
int ret = 0;
spin_lock(&journal->j_state_lock);
if (journal->j_running_transaction) {
tid_t tid = journal->j_running_transaction->t_tid;
__log_start_commit(journal, tid);
/* There's a running transaction and we've just made sure
* it's commit has been scheduled. */
if (ptid)
*ptid = tid;
ret = 1;
} else if (journal->j_committing_transaction) {
/*
* If ext3_write_super() recently started a commit, then we
* have to wait for completion of that transaction
*/
if (ptid)
*ptid = journal->j_committing_transaction->t_tid;
ret = 1;
}
spin_unlock(&journal->j_state_lock);
return ret;
}
/*
* Wait for a specified commit to complete.
* The caller may not hold the journal lock.
*/
int log_wait_commit(journal_t *journal, tid_t tid)
{
int err = 0;
#ifdef CONFIG_JBD_DEBUG
spin_lock(&journal->j_state_lock);
if (!tid_geq(journal->j_commit_request, tid)) {
printk(KERN_EMERG
"%s: error: j_commit_request=%d, tid=%d\n",
__func__, journal->j_commit_request, tid);
}
spin_unlock(&journal->j_state_lock);
#endif
spin_lock(&journal->j_state_lock);
while (tid_gt(tid, journal->j_commit_sequence)) {
jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
tid, journal->j_commit_sequence);
wake_up(&journal->j_wait_commit);
spin_unlock(&journal->j_state_lock);
wait_event(journal->j_wait_done_commit,
!tid_gt(tid, journal->j_commit_sequence));
spin_lock(&journal->j_state_lock);
}
spin_unlock(&journal->j_state_lock);
if (unlikely(is_journal_aborted(journal))) {
printk(KERN_EMERG "journal commit I/O error\n");
err = -EIO;
}
return err;
}
/*
* Return 1 if a given transaction has not yet sent barrier request
* connected with a transaction commit. If 0 is returned, transaction
* may or may not have sent the barrier. Used to avoid sending barrier
* twice in common cases.
*/
int journal_trans_will_send_data_barrier(journal_t *journal, tid_t tid)
{
int ret = 0;
transaction_t *commit_trans;
if (!(journal->j_flags & JFS_BARRIER))
return 0;
spin_lock(&journal->j_state_lock);
/* Transaction already committed? */
if (tid_geq(journal->j_commit_sequence, tid))
goto out;
/*
* Transaction is being committed and we already proceeded to
* writing commit record?
*/
commit_trans = journal->j_committing_transaction;
if (commit_trans && commit_trans->t_tid == tid &&
commit_trans->t_state >= T_COMMIT_RECORD)
goto out;
ret = 1;
out:
spin_unlock(&journal->j_state_lock);
return ret;
}
EXPORT_SYMBOL(journal_trans_will_send_data_barrier);
/*
* Log buffer allocation routines:
*/
int journal_next_log_block(journal_t *journal, unsigned int *retp)
{
unsigned int blocknr;
spin_lock(&journal->j_state_lock);
J_ASSERT(journal->j_free > 1);
blocknr = journal->j_head;
journal->j_head++;
journal->j_free--;
if (journal->j_head == journal->j_last)
journal->j_head = journal->j_first;
spin_unlock(&journal->j_state_lock);
return journal_bmap(journal, blocknr, retp);
}
/*
* Conversion of logical to physical block numbers for the journal
*
* On external journals the journal blocks are identity-mapped, so
* this is a no-op. If needed, we can use j_blk_offset - everything is
* ready.
*/
int journal_bmap(journal_t *journal, unsigned int blocknr,
unsigned int *retp)
{
int err = 0;
unsigned int ret;
if (journal->j_inode) {
ret = bmap(journal->j_inode, blocknr);
if (ret)
*retp = ret;
else {
char b[BDEVNAME_SIZE];
printk(KERN_ALERT "%s: journal block not found "
"at offset %u on %s\n",
__func__,
blocknr,
bdevname(journal->j_dev, b));
err = -EIO;
__journal_abort_soft(journal, err);
}
} else {
*retp = blocknr; /* +journal->j_blk_offset */
}
return err;
}
/*
* We play buffer_head aliasing tricks to write data/metadata blocks to
* the journal without copying their contents, but for journal
* descriptor blocks we do need to generate bona fide buffers.
*
* After the caller of journal_get_descriptor_buffer() has finished modifying
* the buffer's contents they really should run flush_dcache_page(bh->b_page).
* But we don't bother doing that, so there will be coherency problems with
* mmaps of blockdevs which hold live JBD-controlled filesystems.
*/
struct journal_head *journal_get_descriptor_buffer(journal_t *journal)
{
struct buffer_head *bh;
unsigned int blocknr;
int err;
err = journal_next_log_block(journal, &blocknr);
if (err)
return NULL;
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh)
return NULL;
lock_buffer(bh);
memset(bh->b_data, 0, journal->j_blocksize);
set_buffer_uptodate(bh);
unlock_buffer(bh);
BUFFER_TRACE(bh, "return this buffer");
return journal_add_journal_head(bh);
}
/*
* Management for journal control blocks: functions to create and
* destroy journal_t structures, and to initialise and read existing
* journal blocks from disk. */
/* First: create and setup a journal_t object in memory. We initialise
* very few fields yet: that has to wait until we have created the
* journal structures from from scratch, or loaded them from disk. */
static journal_t * journal_init_common (void)
{
journal_t *journal;
int err;
journal = kzalloc(sizeof(*journal), GFP_KERNEL);
if (!journal)
goto fail;
init_waitqueue_head(&journal->j_wait_transaction_locked);
init_waitqueue_head(&journal->j_wait_logspace);
init_waitqueue_head(&journal->j_wait_done_commit);
init_waitqueue_head(&journal->j_wait_checkpoint);
init_waitqueue_head(&journal->j_wait_commit);
init_waitqueue_head(&journal->j_wait_updates);
mutex_init(&journal->j_barrier);
mutex_init(&journal->j_checkpoint_mutex);
spin_lock_init(&journal->j_revoke_lock);
spin_lock_init(&journal->j_list_lock);
spin_lock_init(&journal->j_state_lock);
journal->j_commit_interval = (HZ * JBD_DEFAULT_MAX_COMMIT_AGE);
/* The journal is marked for error until we succeed with recovery! */
journal->j_flags = JFS_ABORT;
/* Set up a default-sized revoke table for the new mount. */
err = journal_init_revoke(journal, JOURNAL_REVOKE_DEFAULT_HASH);
if (err) {
kfree(journal);
goto fail;
}
return journal;
fail:
return NULL;
}
/* journal_init_dev and journal_init_inode:
*
* Create a journal structure assigned some fixed set of disk blocks to
* the journal. We don't actually touch those disk blocks yet, but we
* need to set up all of the mapping information to tell the journaling
* system where the journal blocks are.
*
*/
/**
* journal_t * journal_init_dev() - creates and initialises a journal structure
* @bdev: Block device on which to create the journal
* @fs_dev: Device which hold journalled filesystem for this journal.
* @start: Block nr Start of journal.
* @len: Length of the journal in blocks.
* @blocksize: blocksize of journalling device
*
* Returns: a newly created journal_t *
*
* journal_init_dev creates a journal which maps a fixed contiguous
* range of blocks on an arbitrary block device.
*
*/
journal_t * journal_init_dev(struct block_device *bdev,
struct block_device *fs_dev,
int start, int len, int blocksize)
{
journal_t *journal = journal_init_common();
struct buffer_head *bh;
int n;
if (!journal)
return NULL;
/* journal descriptor can store up to n blocks -bzzz */
journal->j_blocksize = blocksize;
n = journal->j_blocksize / sizeof(journal_block_tag_t);
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__func__);
goto out_err;
}
journal->j_dev = bdev;
journal->j_fs_dev = fs_dev;
journal->j_blk_offset = start;
journal->j_maxlen = len;
bh = __getblk(journal->j_dev, start, journal->j_blocksize);
if (!bh) {
printk(KERN_ERR
"%s: Cannot get buffer for journal superblock\n",
__func__);
goto out_err;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
out_err:
kfree(journal->j_wbuf);
kfree(journal);
return NULL;
}
/**
* journal_t * journal_init_inode () - creates a journal which maps to a inode.
* @inode: An inode to create the journal in
*
* journal_init_inode creates a journal which maps an on-disk inode as
* the journal. The inode must exist already, must support bmap() and
* must have all data blocks preallocated.
*/
journal_t * journal_init_inode (struct inode *inode)
{
struct buffer_head *bh;
journal_t *journal = journal_init_common();
int err;
int n;
unsigned int blocknr;
if (!journal)
return NULL;
journal->j_dev = journal->j_fs_dev = inode->i_sb->s_bdev;
journal->j_inode = inode;
jbd_debug(1,
"journal %p: inode %s/%ld, size %Ld, bits %d, blksize %ld\n",
journal, inode->i_sb->s_id, inode->i_ino,
(long long) inode->i_size,
inode->i_sb->s_blocksize_bits, inode->i_sb->s_blocksize);
journal->j_maxlen = inode->i_size >> inode->i_sb->s_blocksize_bits;
journal->j_blocksize = inode->i_sb->s_blocksize;
/* journal descriptor can store up to n blocks -bzzz */
n = journal->j_blocksize / sizeof(journal_block_tag_t);
journal->j_wbufsize = n;
journal->j_wbuf = kmalloc(n * sizeof(struct buffer_head*), GFP_KERNEL);
if (!journal->j_wbuf) {
printk(KERN_ERR "%s: Cant allocate bhs for commit thread\n",
__func__);
goto out_err;
}
err = journal_bmap(journal, 0, &blocknr);
/* If that failed, give up */
if (err) {
printk(KERN_ERR "%s: Cannnot locate journal superblock\n",
__func__);
goto out_err;
}
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
if (!bh) {
printk(KERN_ERR
"%s: Cannot get buffer for journal superblock\n",
__func__);
goto out_err;
}
journal->j_sb_buffer = bh;
journal->j_superblock = (journal_superblock_t *)bh->b_data;
return journal;
out_err:
kfree(journal->j_wbuf);
kfree(journal);
return NULL;
}
/*
* If the journal init or create aborts, we need to mark the journal
* superblock as being NULL to prevent the journal destroy from writing
* back a bogus superblock.
*/
static void journal_fail_superblock (journal_t *journal)
{
struct buffer_head *bh = journal->j_sb_buffer;
brelse(bh);
journal->j_sb_buffer = NULL;
}
/*
* Given a journal_t structure, initialise the various fields for
* startup of a new journaling session. We use this both when creating
* a journal, and after recovering an old journal to reset it for
* subsequent use.
*/
static int journal_reset(journal_t *journal)
{
journal_superblock_t *sb = journal->j_superblock;
unsigned int first, last;
first = be32_to_cpu(sb->s_first);
last = be32_to_cpu(sb->s_maxlen);
if (first + JFS_MIN_JOURNAL_BLOCKS > last + 1) {
printk(KERN_ERR "JBD: Journal too short (blocks %u-%u).\n",
first, last);
journal_fail_superblock(journal);
return -EINVAL;
}
journal->j_first = first;
journal->j_last = last;
journal->j_head = first;
journal->j_tail = first;
journal->j_free = last - first;
journal->j_tail_sequence = journal->j_transaction_sequence;
journal->j_commit_sequence = journal->j_transaction_sequence - 1;
journal->j_commit_request = journal->j_commit_sequence;
journal->j_max_transaction_buffers = journal->j_maxlen / 4;
/* Add the dynamic fields and write it to disk. */
journal_update_superblock(journal, 1);
return journal_start_thread(journal);
}
/**
* int journal_create() - Initialise the new journal file
* @journal: Journal to create. This structure must have been initialised
*
* Given a journal_t structure which tells us which disk blocks we can
* use, create a new journal superblock and initialise all of the
* journal fields from scratch.
**/
int journal_create(journal_t *journal)
{
unsigned int blocknr;
struct buffer_head *bh;
journal_superblock_t *sb;
int i, err;
if (journal->j_maxlen < JFS_MIN_JOURNAL_BLOCKS) {
printk (KERN_ERR "Journal length (%d blocks) too short.\n",
journal->j_maxlen);
journal_fail_superblock(journal);
return -EINVAL;
}
if (journal->j_inode == NULL) {
/*
* We don't know what block to start at!
*/
printk(KERN_EMERG
"%s: creation of journal on external device!\n",
__func__);
BUG();
}
/* Zero out the entire journal on disk. We cannot afford to
have any blocks on disk beginning with JFS_MAGIC_NUMBER. */
jbd_debug(1, "JBD: Zeroing out journal blocks...\n");
for (i = 0; i < journal->j_maxlen; i++) {
err = journal_bmap(journal, i, &blocknr);
if (err)
return err;
bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize);
lock_buffer(bh);
memset (bh->b_data, 0, journal->j_blocksize);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
BUFFER_TRACE(bh, "marking uptodate");
set_buffer_uptodate(bh);
unlock_buffer(bh);
__brelse(bh);
}
sync_blockdev(journal->j_dev);
jbd_debug(1, "JBD: journal cleared.\n");
/* OK, fill in the initial static fields in the new superblock */
sb = journal->j_superblock;
sb->s_header.h_magic = cpu_to_be32(JFS_MAGIC_NUMBER);
sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
sb->s_blocksize = cpu_to_be32(journal->j_blocksize);
sb->s_maxlen = cpu_to_be32(journal->j_maxlen);
sb->s_first = cpu_to_be32(1);
journal->j_transaction_sequence = 1;
journal->j_flags &= ~JFS_ABORT;
journal->j_format_version = 2;
return journal_reset(journal);
}
/**
* void journal_update_superblock() - Update journal sb on disk.
* @journal: The journal to update.
* @wait: Set to '0' if you don't want to wait for IO completion.
*
* Update a journal's dynamic superblock fields and write it to disk,
* optionally waiting for the IO to complete.
*/
void journal_update_superblock(journal_t *journal, int wait)
{
journal_superblock_t *sb = journal->j_superblock;
struct buffer_head *bh = journal->j_sb_buffer;
/*
* As a special case, if the on-disk copy is already marked as needing
* no recovery (s_start == 0) and there are no outstanding transactions
* in the filesystem, then we can safely defer the superblock update
* until the next commit by setting JFS_FLUSHED. This avoids
* attempting a write to a potential-readonly device.
*/
if (sb->s_start == 0 && journal->j_tail_sequence ==
journal->j_transaction_sequence) {
jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
"(start %u, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence,
journal->j_errno);
goto out;
}
spin_lock(&journal->j_state_lock);
jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n",
journal->j_tail, journal->j_tail_sequence, journal->j_errno);
sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
sb->s_start = cpu_to_be32(journal->j_tail);
sb->s_errno = cpu_to_be32(journal->j_errno);
spin_unlock(&journal->j_state_lock);
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
if (wait)
sync_dirty_buffer(bh);
else
write_dirty_buffer(bh, WRITE);
out:
/* If we have just flushed the log (by marking s_start==0), then
* any future commit will have to be careful to update the
* superblock again to re-record the true start of the log. */
spin_lock(&journal->j_state_lock);
if (sb->s_start)
journal->j_flags &= ~JFS_FLUSHED;
else
journal->j_flags |= JFS_FLUSHED;
spin_unlock(&journal->j_state_lock);
}
/*
* Read the superblock for a given journal, performing initial
* validation of the format.
*/
static int journal_get_superblock(journal_t *journal)
{
struct buffer_head *bh;
journal_superblock_t *sb;
int err = -EIO;
bh = journal->j_sb_buffer;
J_ASSERT(bh != NULL);
if (!buffer_uptodate(bh)) {
ll_rw_block(READ, 1, &bh);
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
printk (KERN_ERR
"JBD: IO error reading journal superblock\n");
goto out;
}
}
sb = journal->j_superblock;
err = -EINVAL;
if (sb->s_header.h_magic != cpu_to_be32(JFS_MAGIC_NUMBER) ||
sb->s_blocksize != cpu_to_be32(journal->j_blocksize)) {
printk(KERN_WARNING "JBD: no valid journal superblock found\n");
goto out;
}
switch(be32_to_cpu(sb->s_header.h_blocktype)) {
case JFS_SUPERBLOCK_V1:
journal->j_format_version = 1;
break;
case JFS_SUPERBLOCK_V2:
journal->j_format_version = 2;
break;
default:
printk(KERN_WARNING "JBD: unrecognised superblock format ID\n");
goto out;
}
if (be32_to_cpu(sb->s_maxlen) < journal->j_maxlen)
journal->j_maxlen = be32_to_cpu(sb->s_maxlen);
else if (be32_to_cpu(sb->s_maxlen) > journal->j_maxlen) {
printk (KERN_WARNING "JBD: journal file too short\n");
goto out;
}
return 0;
out:
journal_fail_superblock(journal);
return err;
}
/*
* Load the on-disk journal superblock and read the key fields into the
* journal_t.
*/
static int load_superblock(journal_t *journal)
{
int err;
journal_superblock_t *sb;
err = journal_get_superblock(journal);
if (err)
return err;
sb = journal->j_superblock;
journal->j_tail_sequence = be32_to_cpu(sb->s_sequence);
journal->j_tail = be32_to_cpu(sb->s_start);
journal->j_first = be32_to_cpu(sb->s_first);
journal->j_last = be32_to_cpu(sb->s_maxlen);
journal->j_errno = be32_to_cpu(sb->s_errno);
return 0;
}
/**
* int journal_load() - Read journal from disk.
* @journal: Journal to act on.
*
* Given a journal_t structure which tells us which disk blocks contain
* a journal, read the journal from disk to initialise the in-memory
* structures.
*/
int journal_load(journal_t *journal)
{
int err;
journal_superblock_t *sb;
err = load_superblock(journal);
if (err)
return err;
sb = journal->j_superblock;
/* If this is a V2 superblock, then we have to check the
* features flags on it. */
if (journal->j_format_version >= 2) {
if ((sb->s_feature_ro_compat &
~cpu_to_be32(JFS_KNOWN_ROCOMPAT_FEATURES)) ||
(sb->s_feature_incompat &
~cpu_to_be32(JFS_KNOWN_INCOMPAT_FEATURES))) {
printk (KERN_WARNING
"JBD: Unrecognised features on journal\n");
return -EINVAL;
}
}
/* Let the recovery code check whether it needs to recover any
* data from the journal. */
if (journal_recover(journal))
goto recovery_error;
/* OK, we've finished with the dynamic journal bits:
* reinitialise the dynamic contents of the superblock in memory
* and reset them on disk. */
if (journal_reset(journal))
goto recovery_error;
journal->j_flags &= ~JFS_ABORT;
journal->j_flags |= JFS_LOADED;
return 0;
recovery_error:
printk (KERN_WARNING "JBD: recovery failed\n");
return -EIO;
}
/**
* void journal_destroy() - Release a journal_t structure.
* @journal: Journal to act on.
*
* Release a journal_t structure once it is no longer in use by the
* journaled object.
* Return <0 if we couldn't clean up the journal.
*/
int journal_destroy(journal_t *journal)
{
int err = 0;
/* Wait for the commit thread to wake up and die. */
journal_kill_thread(journal);
/* Force a final log commit */
if (journal->j_running_transaction)
journal_commit_transaction(journal);
/* Force any old transactions to disk */
/* Totally anal locking here... */
spin_lock(&journal->j_list_lock);
while (journal->j_checkpoint_transactions != NULL) {
spin_unlock(&journal->j_list_lock);
log_do_checkpoint(journal);
spin_lock(&journal->j_list_lock);
}
J_ASSERT(journal->j_running_transaction == NULL);
J_ASSERT(journal->j_committing_transaction == NULL);
J_ASSERT(journal->j_checkpoint_transactions == NULL);
spin_unlock(&journal->j_list_lock);
if (journal->j_sb_buffer) {
if (!is_journal_aborted(journal)) {
/* We can now mark the journal as empty. */
journal->j_tail = 0;
journal->j_tail_sequence =
++journal->j_transaction_sequence;
journal_update_superblock(journal, 1);
} else {
err = -EIO;
}
brelse(journal->j_sb_buffer);
}
if (journal->j_inode)
iput(journal->j_inode);
if (journal->j_revoke)
journal_destroy_revoke(journal);
kfree(journal->j_wbuf);
kfree(journal);
return err;
}
/**
*int journal_check_used_features () - Check if features specified are used.
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features
*
* Check whether the journal uses all of a given set of
* features. Return true (non-zero) if it does.
**/
int journal_check_used_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
if (!compat && !ro && !incompat)
return 1;
if (journal->j_format_version == 1)
return 0;
sb = journal->j_superblock;
if (((be32_to_cpu(sb->s_feature_compat) & compat) == compat) &&
((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) &&
((be32_to_cpu(sb->s_feature_incompat) & incompat) == incompat))
return 1;
return 0;
}
/**
* int journal_check_available_features() - Check feature set in journalling layer
* @journal: Journal to check.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features
*
* Check whether the journaling code supports the use of
* all of a given set of features on this journal. Return true
* (non-zero) if it can. */
int journal_check_available_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
if (!compat && !ro && !incompat)
return 1;
/* We can support any known requested features iff the
* superblock is in version 2. Otherwise we fail to support any
* extended sb features. */
if (journal->j_format_version != 2)
return 0;
if ((compat & JFS_KNOWN_COMPAT_FEATURES) == compat &&
(ro & JFS_KNOWN_ROCOMPAT_FEATURES) == ro &&
(incompat & JFS_KNOWN_INCOMPAT_FEATURES) == incompat)
return 1;
return 0;
}
/**
* int journal_set_features () - Mark a given journal feature in the superblock
* @journal: Journal to act on.
* @compat: bitmask of compatible features
* @ro: bitmask of features that force read-only mount
* @incompat: bitmask of incompatible features
*
* Mark a given journal feature as present on the
* superblock. Returns true if the requested features could be set.
*
*/
int journal_set_features (journal_t *journal, unsigned long compat,
unsigned long ro, unsigned long incompat)
{
journal_superblock_t *sb;
if (journal_check_used_features(journal, compat, ro, incompat))
return 1;
if (!journal_check_available_features(journal, compat, ro, incompat))
return 0;
jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
compat, ro, incompat);
sb = journal->j_superblock;
sb->s_feature_compat |= cpu_to_be32(compat);
sb->s_feature_ro_compat |= cpu_to_be32(ro);
sb->s_feature_incompat |= cpu_to_be32(incompat);
return 1;
}
/**
* int journal_update_format () - Update on-disk journal structure.
* @journal: Journal to act on.
*
* Given an initialised but unloaded journal struct, poke about in the
* on-disk structure to update it to the most recent supported version.
*/
int journal_update_format (journal_t *journal)
{
journal_superblock_t *sb;
int err;
err = journal_get_superblock(journal);
if (err)
return err;
sb = journal->j_superblock;
switch (be32_to_cpu(sb->s_header.h_blocktype)) {
case JFS_SUPERBLOCK_V2:
return 0;
case JFS_SUPERBLOCK_V1:
return journal_convert_superblock_v1(journal, sb);
default:
break;
}
return -EINVAL;
}
static int journal_convert_superblock_v1(journal_t *journal,
journal_superblock_t *sb)
{
int offset, blocksize;
struct buffer_head *bh;
printk(KERN_WARNING
"JBD: Converting superblock from version 1 to 2.\n");
/* Pre-initialise new fields to zero */
offset = ((char *) &(sb->s_feature_compat)) - ((char *) sb);
blocksize = be32_to_cpu(sb->s_blocksize);
memset(&sb->s_feature_compat, 0, blocksize-offset);
sb->s_nr_users = cpu_to_be32(1);
sb->s_header.h_blocktype = cpu_to_be32(JFS_SUPERBLOCK_V2);
journal->j_format_version = 2;
bh = journal->j_sb_buffer;
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
sync_dirty_buffer(bh);
return 0;
}
/**
* int journal_flush () - Flush journal
* @journal: Journal to act on.
*
* Flush all data for a given journal to disk and empty the journal.
* Filesystems can use this when remounting readonly to ensure that
* recovery does not need to happen on remount.
*/
int journal_flush(journal_t *journal)
{
int err = 0;
transaction_t *transaction = NULL;
unsigned int old_tail;
spin_lock(&journal->j_state_lock);
/* Force everything buffered to the log... */
if (journal->j_running_transaction) {
transaction = journal->j_running_transaction;
__log_start_commit(journal, transaction->t_tid);
} else if (journal->j_committing_transaction)
transaction = journal->j_committing_transaction;
/* Wait for the log commit to complete... */
if (transaction) {
tid_t tid = transaction->t_tid;
spin_unlock(&journal->j_state_lock);
log_wait_commit(journal, tid);
} else {
spin_unlock(&journal->j_state_lock);
}
/* ...and flush everything in the log out to disk. */
spin_lock(&journal->j_list_lock);
while (!err && journal->j_checkpoint_transactions != NULL) {
spin_unlock(&journal->j_list_lock);
mutex_lock(&journal->j_checkpoint_mutex);
err = log_do_checkpoint(journal);
mutex_unlock(&journal->j_checkpoint_mutex);
spin_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
if (is_journal_aborted(journal))
return -EIO;
cleanup_journal_tail(journal);
/* Finally, mark the journal as really needing no recovery.
* This sets s_start==0 in the underlying superblock, which is
* the magic code for a fully-recovered superblock. Any future
* commits of data to the journal will restore the current
* s_start value. */
spin_lock(&journal->j_state_lock);
old_tail = journal->j_tail;
journal->j_tail = 0;
spin_unlock(&journal->j_state_lock);
journal_update_superblock(journal, 1);
spin_lock(&journal->j_state_lock);
journal->j_tail = old_tail;
J_ASSERT(!journal->j_running_transaction);
J_ASSERT(!journal->j_committing_transaction);
J_ASSERT(!journal->j_checkpoint_transactions);
J_ASSERT(journal->j_head == journal->j_tail);
J_ASSERT(journal->j_tail_sequence == journal->j_transaction_sequence);
spin_unlock(&journal->j_state_lock);
return 0;
}
/**
* int journal_wipe() - Wipe journal contents
* @journal: Journal to act on.
* @write: flag (see below)
*
* Wipe out all of the contents of a journal, safely. This will produce
* a warning if the journal contains any valid recovery information.
* Must be called between journal_init_*() and journal_load().
*
* If 'write' is non-zero, then we wipe out the journal on disk; otherwise
* we merely suppress recovery.
*/
int journal_wipe(journal_t *journal, int write)
{
int err = 0;
J_ASSERT (!(journal->j_flags & JFS_LOADED));
err = load_superblock(journal);
if (err)
return err;
if (!journal->j_tail)
goto no_recovery;
printk (KERN_WARNING "JBD: %s recovery information on journal\n",
write ? "Clearing" : "Ignoring");
err = journal_skip_recovery(journal);
if (write)
journal_update_superblock(journal, 1);
no_recovery:
return err;
}
/*
* journal_dev_name: format a character string to describe on what
* device this journal is present.
*/
static const char *journal_dev_name(journal_t *journal, char *buffer)
{
struct block_device *bdev;
if (journal->j_inode)
bdev = journal->j_inode->i_sb->s_bdev;
else
bdev = journal->j_dev;
return bdevname(bdev, buffer);
}
/*
* Journal abort has very specific semantics, which we describe
* for journal abort.
*
* Two internal function, which provide abort to te jbd layer
* itself are here.
*/
/*
* Quick version for internal journal use (doesn't lock the journal).
* Aborts hard --- we mark the abort as occurred, but do _nothing_ else,
* and don't attempt to make any other journal updates.
*/
static void __journal_abort_hard(journal_t *journal)
{
transaction_t *transaction;
char b[BDEVNAME_SIZE];
if (journal->j_flags & JFS_ABORT)
return;
printk(KERN_ERR "Aborting journal on device %s.\n",
journal_dev_name(journal, b));
spin_lock(&journal->j_state_lock);
journal->j_flags |= JFS_ABORT;
transaction = journal->j_running_transaction;
if (transaction)
__log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
}
/* Soft abort: record the abort error status in the journal superblock,
* but don't do any other IO. */
static void __journal_abort_soft (journal_t *journal, int errno)
{
if (journal->j_flags & JFS_ABORT)
return;
if (!journal->j_errno)
journal->j_errno = errno;
__journal_abort_hard(journal);
if (errno)
journal_update_superblock(journal, 1);
}
/**
* void journal_abort () - Shutdown the journal immediately.
* @journal: the journal to shutdown.
* @errno: an error number to record in the journal indicating
* the reason for the shutdown.
*
* Perform a complete, immediate shutdown of the ENTIRE
* journal (not of a single transaction). This operation cannot be
* undone without closing and reopening the journal.
*
* The journal_abort function is intended to support higher level error
* recovery mechanisms such as the ext2/ext3 remount-readonly error
* mode.
*
* Journal abort has very specific semantics. Any existing dirty,
* unjournaled buffers in the main filesystem will still be written to
* disk by bdflush, but the journaling mechanism will be suspended
* immediately and no further transaction commits will be honoured.
*
* Any dirty, journaled buffers will be written back to disk without
* hitting the journal. Atomicity cannot be guaranteed on an aborted
* filesystem, but we _do_ attempt to leave as much data as possible
* behind for fsck to use for cleanup.
*
* Any attempt to get a new transaction handle on a journal which is in
* ABORT state will just result in an -EROFS error return. A
* journal_stop on an existing handle will return -EIO if we have
* entered abort state during the update.
*
* Recursive transactions are not disturbed by journal abort until the
* final journal_stop, which will receive the -EIO error.
*
* Finally, the journal_abort call allows the caller to supply an errno
* which will be recorded (if possible) in the journal superblock. This
* allows a client to record failure conditions in the middle of a
* transaction without having to complete the transaction to record the
* failure to disk. ext3_error, for example, now uses this
* functionality.
*
* Errors which originate from within the journaling layer will NOT
* supply an errno; a null errno implies that absolutely no further
* writes are done to the journal (unless there are any already in
* progress).
*
*/
void journal_abort(journal_t *journal, int errno)
{
__journal_abort_soft(journal, errno);
}
/**
* int journal_errno () - returns the journal's error state.
* @journal: journal to examine.
*
* This is the errno numbet set with journal_abort(), the last
* time the journal was mounted - if the journal was stopped
* without calling abort this will be 0.
*
* If the journal has been aborted on this mount time -EROFS will
* be returned.
*/
int journal_errno(journal_t *journal)
{
int err;
spin_lock(&journal->j_state_lock);
if (journal->j_flags & JFS_ABORT)
err = -EROFS;
else
err = journal->j_errno;
spin_unlock(&journal->j_state_lock);
return err;
}
/**
* int journal_clear_err () - clears the journal's error state
* @journal: journal to act on.
*
* An error must be cleared or Acked to take a FS out of readonly
* mode.
*/
int journal_clear_err(journal_t *journal)
{
int err = 0;
spin_lock(&journal->j_state_lock);
if (journal->j_flags & JFS_ABORT)
err = -EROFS;
else
journal->j_errno = 0;
spin_unlock(&journal->j_state_lock);
return err;
}
/**
* void journal_ack_err() - Ack journal err.
* @journal: journal to act on.
*
* An error must be cleared or Acked to take a FS out of readonly
* mode.
*/
void journal_ack_err(journal_t *journal)
{
spin_lock(&journal->j_state_lock);
if (journal->j_errno)
journal->j_flags |= JFS_ACK_ERR;
spin_unlock(&journal->j_state_lock);
}
int journal_blocks_per_page(struct inode *inode)
{
return 1 << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
}
/*
* Journal_head storage management
*/
static struct kmem_cache *journal_head_cache;
#ifdef CONFIG_JBD_DEBUG
static atomic_t nr_journal_heads = ATOMIC_INIT(0);
#endif
static int journal_init_journal_head_cache(void)
{
int retval;
J_ASSERT(journal_head_cache == NULL);
journal_head_cache = kmem_cache_create("journal_head",
sizeof(struct journal_head),
0, /* offset */
SLAB_TEMPORARY, /* flags */
NULL); /* ctor */
retval = 0;
if (!journal_head_cache) {
retval = -ENOMEM;
printk(KERN_EMERG "JBD: no memory for journal_head cache\n");
}
return retval;
}
static void journal_destroy_journal_head_cache(void)
{
if (journal_head_cache) {
kmem_cache_destroy(journal_head_cache);
journal_head_cache = NULL;
}
}
/*
* journal_head splicing and dicing
*/
static struct journal_head *journal_alloc_journal_head(void)
{
struct journal_head *ret;
static unsigned long last_warning;
#ifdef CONFIG_JBD_DEBUG
atomic_inc(&nr_journal_heads);
#endif
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
if (ret == NULL) {
jbd_debug(1, "out of memory for journal_head\n");
if (time_after(jiffies, last_warning + 5*HZ)) {
printk(KERN_NOTICE "ENOMEM in %s, retrying.\n",
__func__);
last_warning = jiffies;
}
while (ret == NULL) {
yield();
ret = kmem_cache_alloc(journal_head_cache, GFP_NOFS);
}
}
return ret;
}
static void journal_free_journal_head(struct journal_head *jh)
{
#ifdef CONFIG_JBD_DEBUG
atomic_dec(&nr_journal_heads);
memset(jh, JBD_POISON_FREE, sizeof(*jh));
#endif
kmem_cache_free(journal_head_cache, jh);
}
/*
* A journal_head is attached to a buffer_head whenever JBD has an
* interest in the buffer.
*
* Whenever a buffer has an attached journal_head, its ->b_state:BH_JBD bit
* is set. This bit is tested in core kernel code where we need to take
* JBD-specific actions. Testing the zeroness of ->b_private is not reliable
* there.
*
* When a buffer has its BH_JBD bit set, its ->b_count is elevated by one.
*
* When a buffer has its BH_JBD bit set it is immune from being released by
* core kernel code, mainly via ->b_count.
*
* A journal_head may be detached from its buffer_head when the journal_head's
* b_transaction, b_cp_transaction and b_next_transaction pointers are NULL.
* Various places in JBD call journal_remove_journal_head() to indicate that the
* journal_head can be dropped if needed.
*
* Various places in the kernel want to attach a journal_head to a buffer_head
* _before_ attaching the journal_head to a transaction. To protect the
* journal_head in this situation, journal_add_journal_head elevates the
* journal_head's b_jcount refcount by one. The caller must call
* journal_put_journal_head() to undo this.
*
* So the typical usage would be:
*
* (Attach a journal_head if needed. Increments b_jcount)
* struct journal_head *jh = journal_add_journal_head(bh);
* ...
* jh->b_transaction = xxx;
* journal_put_journal_head(jh);
*
* Now, the journal_head's b_jcount is zero, but it is safe from being released
* because it has a non-zero b_transaction.
*/
/*
* Give a buffer_head a journal_head.
*
* Doesn't need the journal lock.
* May sleep.
*/
struct journal_head *journal_add_journal_head(struct buffer_head *bh)
{
struct journal_head *jh;
struct journal_head *new_jh = NULL;
repeat:
if (!buffer_jbd(bh)) {
new_jh = journal_alloc_journal_head();
memset(new_jh, 0, sizeof(*new_jh));
}
jbd_lock_bh_journal_head(bh);
if (buffer_jbd(bh)) {
jh = bh2jh(bh);
} else {
J_ASSERT_BH(bh,
(atomic_read(&bh->b_count) > 0) ||
(bh->b_page && bh->b_page->mapping));
if (!new_jh) {
jbd_unlock_bh_journal_head(bh);
goto repeat;
}
jh = new_jh;
new_jh = NULL; /* We consumed it */
set_buffer_jbd(bh);
bh->b_private = jh;
jh->b_bh = bh;
get_bh(bh);
BUFFER_TRACE(bh, "added journal_head");
}
jh->b_jcount++;
jbd_unlock_bh_journal_head(bh);
if (new_jh)
journal_free_journal_head(new_jh);
return bh->b_private;
}
/*
* Grab a ref against this buffer_head's journal_head. If it ended up not
* having a journal_head, return NULL
*/
struct journal_head *journal_grab_journal_head(struct buffer_head *bh)
{
struct journal_head *jh = NULL;
jbd_lock_bh_journal_head(bh);
if (buffer_jbd(bh)) {
jh = bh2jh(bh);
jh->b_jcount++;
}
jbd_unlock_bh_journal_head(bh);
return jh;
}
static void __journal_remove_journal_head(struct buffer_head *bh)
{
struct journal_head *jh = bh2jh(bh);
J_ASSERT_JH(jh, jh->b_jcount >= 0);
get_bh(bh);
if (jh->b_jcount == 0) {
if (jh->b_transaction == NULL &&
jh->b_next_transaction == NULL &&
jh->b_cp_transaction == NULL) {
J_ASSERT_JH(jh, jh->b_jlist == BJ_None);
J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head");
if (jh->b_frozen_data) {
printk(KERN_WARNING "%s: freeing "
"b_frozen_data\n",
__func__);
jbd_free(jh->b_frozen_data, bh->b_size);
}
if (jh->b_committed_data) {
printk(KERN_WARNING "%s: freeing "
"b_committed_data\n",
__func__);
jbd_free(jh->b_committed_data, bh->b_size);
}
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
clear_buffer_jbd(bh);
__brelse(bh);
journal_free_journal_head(jh);
} else {
BUFFER_TRACE(bh, "journal_head was locked");
}
}
}
/*
* journal_remove_journal_head(): if the buffer isn't attached to a transaction
* and has a zero b_jcount then remove and release its journal_head. If we did
* see that the buffer is not used by any transaction we also "logically"
* decrement ->b_count.
*
* We in fact take an additional increment on ->b_count as a convenience,
* because the caller usually wants to do additional things with the bh
* after calling here.
* The caller of journal_remove_journal_head() *must* run __brelse(bh) at some
* time. Once the caller has run __brelse(), the buffer is eligible for
* reaping by try_to_free_buffers().
*/
void journal_remove_journal_head(struct buffer_head *bh)
{
jbd_lock_bh_journal_head(bh);
__journal_remove_journal_head(bh);
jbd_unlock_bh_journal_head(bh);
}
/*
* Drop a reference on the passed journal_head. If it fell to zero then try to
* release the journal_head from the buffer_head.
*/
void journal_put_journal_head(struct journal_head *jh)
{
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_journal_head(bh);
J_ASSERT_JH(jh, jh->b_jcount > 0);
--jh->b_jcount;
if (!jh->b_jcount && !jh->b_transaction) {
__journal_remove_journal_head(bh);
__brelse(bh);
}
jbd_unlock_bh_journal_head(bh);
}
/*
* debugfs tunables
*/
#ifdef CONFIG_JBD_DEBUG
u8 journal_enable_debug __read_mostly;
EXPORT_SYMBOL(journal_enable_debug);
static struct dentry *jbd_debugfs_dir;
static struct dentry *jbd_debug;
static void __init jbd_create_debugfs_entry(void)
{
jbd_debugfs_dir = debugfs_create_dir("jbd", NULL);
if (jbd_debugfs_dir)
jbd_debug = debugfs_create_u8("jbd-debug", S_IRUGO | S_IWUSR,
jbd_debugfs_dir,
&journal_enable_debug);
}
static void __exit jbd_remove_debugfs_entry(void)
{
debugfs_remove(jbd_debug);
debugfs_remove(jbd_debugfs_dir);
}
#else
static inline void jbd_create_debugfs_entry(void)
{
}
static inline void jbd_remove_debugfs_entry(void)
{
}
#endif
struct kmem_cache *jbd_handle_cache;
static int __init journal_init_handle_cache(void)
{
jbd_handle_cache = kmem_cache_create("journal_handle",
sizeof(handle_t),
0, /* offset */
SLAB_TEMPORARY, /* flags */
NULL); /* ctor */
if (jbd_handle_cache == NULL) {
printk(KERN_EMERG "JBD: failed to create handle cache\n");
return -ENOMEM;
}
return 0;
}
static void journal_destroy_handle_cache(void)
{
if (jbd_handle_cache)
kmem_cache_destroy(jbd_handle_cache);
}
/*
* Module startup and shutdown
*/
static int __init journal_init_caches(void)
{
int ret;
ret = journal_init_revoke_caches();
if (ret == 0)
ret = journal_init_journal_head_cache();
if (ret == 0)
ret = journal_init_handle_cache();
return ret;
}
static void journal_destroy_caches(void)
{
journal_destroy_revoke_caches();
journal_destroy_journal_head_cache();
journal_destroy_handle_cache();
}
static int __init journal_init(void)
{
int ret;
BUILD_BUG_ON(sizeof(struct journal_superblock_s) != 1024);
ret = journal_init_caches();
if (ret != 0)
journal_destroy_caches();
jbd_create_debugfs_entry();
return ret;
}
static void __exit journal_exit(void)
{
#ifdef CONFIG_JBD_DEBUG
int n = atomic_read(&nr_journal_heads);
if (n)
printk(KERN_EMERG "JBD: leaked %d journal_heads!\n", n);
#endif
jbd_remove_debugfs_entry();
journal_destroy_caches();
}
MODULE_LICENSE("GPL");
module_init(journal_init);
module_exit(journal_exit);
| gpl-2.0 |
Lprigara/KernelLinuxRaspberry | arch/mips/powertv/asic/asic_devices.c | 88 | 14226 | /*
*
* Description: Defines the platform resources for Gaia-based settops.
*
* Copyright (C) 2005-2009 Scientific-Atlanta, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* NOTE: The bootloader allocates persistent memory at an address which is
* 16 MiB below the end of the highest address in KSEG0. All fixed
* address memory reservations must avoid this region.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/resource.h>
#include <linux/serial_reg.h>
#include <linux/io.h>
#include <linux/bootmem.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <asm/page.h>
#include <linux/swap.h>
#include <linux/highmem.h>
#include <linux/dma-mapping.h>
#include <asm/mach-powertv/asic.h>
#include <asm/mach-powertv/asic_regs.h>
#include <asm/mach-powertv/interrupts.h>
#ifdef CONFIG_BOOTLOADER_DRIVER
#include <asm/mach-powertv/kbldr.h>
#endif
#include <asm/bootinfo.h>
#define BOOTLDRFAMILY(byte1, byte0) (((byte1) << 8) | (byte0))
/*
* Forward Prototypes
*/
static void pmem_setup_resource(void);
/*
* Global Variables
*/
enum asic_type asic;
unsigned int platform_features;
unsigned int platform_family;
struct register_map _asic_register_map;
EXPORT_SYMBOL(_asic_register_map); /* Exported for testing */
unsigned long asic_phy_base;
unsigned long asic_base;
EXPORT_SYMBOL(asic_base); /* Exported for testing */
struct resource *gp_resources;
/*
* Don't recommend to use it directly, it is usually used by kernel internally.
* Portable code should be using interfaces such as ioremp, dma_map_single, etc.
*/
unsigned long phys_to_dma_offset;
EXPORT_SYMBOL(phys_to_dma_offset);
/*
*
* IO Resource Definition
*
*/
struct resource asic_resource = {
.name = "ASIC Resource",
.start = 0,
.end = ASIC_IO_SIZE,
.flags = IORESOURCE_MEM,
};
/*
* Allow override of bootloader-specified model
* Returns zero on success, a negative errno value on failure. This parameter
* allows overriding of the bootloader-specified model.
*/
static char __initdata cmdline[COMMAND_LINE_SIZE];
#define FORCEFAMILY_PARAM "forcefamily"
/*
* check_forcefamily - check for, and parse, forcefamily command line parameter
* @forced_family: Pointer to two-character array in which to store the
* value of the forcedfamily parameter, if any.
*/
static __init int check_forcefamily(unsigned char forced_family[2])
{
const char *p;
forced_family[0] = '\0';
forced_family[1] = '\0';
/* Check the command line for a forcefamily directive */
strncpy(cmdline, arcs_cmdline, COMMAND_LINE_SIZE - 1);
p = strstr(cmdline, FORCEFAMILY_PARAM);
if (p && (p != cmdline) && (*(p - 1) != ' '))
p = strstr(p, " " FORCEFAMILY_PARAM "=");
if (p) {
p += strlen(FORCEFAMILY_PARAM "=");
if (*p == '\0' || *(p + 1) == '\0' ||
(*(p + 2) != '\0' && *(p + 2) != ' '))
pr_err(FORCEFAMILY_PARAM " must be exactly two "
"characters long, ignoring value\n");
else {
forced_family[0] = *p;
forced_family[1] = *(p + 1);
}
}
return 0;
}
/*
* platform_set_family - determine major platform family type.
*
* Returns family type; -1 if none
* Returns the family type; -1 if none
*
*/
static __init noinline void platform_set_family(void)
{
unsigned char forced_family[2];
unsigned short bootldr_family;
if (check_forcefamily(forced_family) == 0)
bootldr_family = BOOTLDRFAMILY(forced_family[0],
forced_family[1]);
else
bootldr_family = (unsigned short) BOOTLDRFAMILY(
CONFIG_BOOTLOADER_FAMILY[0],
CONFIG_BOOTLOADER_FAMILY[1]);
pr_info("Bootloader Family = 0x%04X\n", bootldr_family);
switch (bootldr_family) {
case BOOTLDRFAMILY('R', '1'):
platform_family = FAMILY_1500;
break;
case BOOTLDRFAMILY('4', '4'):
platform_family = FAMILY_4500;
break;
case BOOTLDRFAMILY('4', '6'):
platform_family = FAMILY_4600;
break;
case BOOTLDRFAMILY('A', '1'):
platform_family = FAMILY_4600VZA;
break;
case BOOTLDRFAMILY('8', '5'):
platform_family = FAMILY_8500;
break;
case BOOTLDRFAMILY('R', '2'):
platform_family = FAMILY_8500RNG;
break;
case BOOTLDRFAMILY('8', '6'):
platform_family = FAMILY_8600;
break;
case BOOTLDRFAMILY('B', '1'):
platform_family = FAMILY_8600VZB;
break;
case BOOTLDRFAMILY('E', '1'):
platform_family = FAMILY_1500VZE;
break;
case BOOTLDRFAMILY('F', '1'):
platform_family = FAMILY_1500VZF;
break;
case BOOTLDRFAMILY('8', '7'):
platform_family = FAMILY_8700;
break;
default:
platform_family = -1;
}
}
unsigned int platform_get_family(void)
{
return platform_family;
}
EXPORT_SYMBOL(platform_get_family);
/*
* platform_get_asic - determine the ASIC type.
*
* Returns the ASIC type, or ASIC_UNKNOWN if unknown
*
*/
enum asic_type platform_get_asic(void)
{
return asic;
}
EXPORT_SYMBOL(platform_get_asic);
/*
* set_register_map - set ASIC register configuration
* @phys_base: Physical address of the base of the ASIC registers
* @map: Description of key ASIC registers
*/
static void __init set_register_map(unsigned long phys_base,
const struct register_map *map)
{
asic_phy_base = phys_base;
_asic_register_map = *map;
register_map_virtualize(&_asic_register_map);
asic_base = (unsigned long)ioremap_nocache(phys_base, ASIC_IO_SIZE);
}
/**
* configure_platform - configuration based on platform type.
*/
void __init configure_platform(void)
{
platform_set_family();
switch (platform_family) {
case FAMILY_1500:
case FAMILY_1500VZE:
case FAMILY_1500VZF:
platform_features = FFS_CAPABLE;
asic = ASIC_CALLIOPE;
set_register_map(CALLIOPE_IO_BASE, &calliope_register_map);
if (platform_family == FAMILY_1500VZE) {
gp_resources = non_dvr_vze_calliope_resources;
pr_info("Platform: 1500/Vz Class E - "
"CALLIOPE, NON_DVR_CAPABLE\n");
} else if (platform_family == FAMILY_1500VZF) {
gp_resources = non_dvr_vzf_calliope_resources;
pr_info("Platform: 1500/Vz Class F - "
"CALLIOPE, NON_DVR_CAPABLE\n");
} else {
gp_resources = non_dvr_calliope_resources;
pr_info("Platform: 1500/RNG100 - CALLIOPE, "
"NON_DVR_CAPABLE\n");
}
break;
case FAMILY_4500:
platform_features = FFS_CAPABLE | PCIE_CAPABLE |
DISPLAY_CAPABLE;
asic = ASIC_ZEUS;
set_register_map(ZEUS_IO_BASE, &zeus_register_map);
gp_resources = non_dvr_zeus_resources;
pr_info("Platform: 4500 - ZEUS, NON_DVR_CAPABLE\n");
break;
case FAMILY_4600:
{
unsigned int chipversion = 0;
/* The settop has PCIE but it isn't used, so don't advertise
* it*/
platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
/* Cronus and Cronus Lite have the same register map */
set_register_map(CRONUS_IO_BASE, &cronus_register_map);
/* ASIC version will determine if this is a real CronusLite or
* Castrati(Cronus) */
chipversion = asic_read(chipver3) << 24;
chipversion |= asic_read(chipver2) << 16;
chipversion |= asic_read(chipver1) << 8;
chipversion |= asic_read(chipver0);
if ((chipversion == CRONUS_10) || (chipversion == CRONUS_11))
asic = ASIC_CRONUS;
else
asic = ASIC_CRONUSLITE;
gp_resources = non_dvr_cronuslite_resources;
pr_info("Platform: 4600 - %s, NON_DVR_CAPABLE, "
"chipversion=0x%08X\n",
(asic == ASIC_CRONUS) ? "CRONUS" : "CRONUS LITE",
chipversion);
break;
}
case FAMILY_4600VZA:
platform_features = FFS_CAPABLE | DISPLAY_CAPABLE;
asic = ASIC_CRONUS;
set_register_map(CRONUS_IO_BASE, &cronus_register_map);
gp_resources = non_dvr_cronus_resources;
pr_info("Platform: Vz Class A - CRONUS, NON_DVR_CAPABLE\n");
break;
case FAMILY_8500:
case FAMILY_8500RNG:
platform_features = DVR_CAPABLE | PCIE_CAPABLE |
DISPLAY_CAPABLE;
asic = ASIC_ZEUS;
set_register_map(ZEUS_IO_BASE, &zeus_register_map);
gp_resources = dvr_zeus_resources;
pr_info("Platform: 8500/RNG200 - ZEUS, DVR_CAPABLE\n");
break;
case FAMILY_8600:
case FAMILY_8600VZB:
platform_features = DVR_CAPABLE | PCIE_CAPABLE |
DISPLAY_CAPABLE;
asic = ASIC_CRONUS;
set_register_map(CRONUS_IO_BASE, &cronus_register_map);
gp_resources = dvr_cronus_resources;
pr_info("Platform: 8600/Vz Class B - CRONUS, "
"DVR_CAPABLE\n");
break;
case FAMILY_8700:
platform_features = FFS_CAPABLE | PCIE_CAPABLE;
asic = ASIC_GAIA;
set_register_map(GAIA_IO_BASE, &gaia_register_map);
gp_resources = dvr_gaia_resources;
pr_info("Platform: 8700 - GAIA, DVR_CAPABLE\n");
break;
default:
pr_crit("Platform: UNKNOWN PLATFORM\n");
break;
}
switch (asic) {
case ASIC_ZEUS:
phys_to_dma_offset = 0x30000000;
break;
case ASIC_CALLIOPE:
phys_to_dma_offset = 0x10000000;
break;
case ASIC_CRONUSLITE:
/* Fall through */
case ASIC_CRONUS:
/*
* TODO: We suppose 0x10000000 aliases into 0x20000000-
* 0x2XXXXXXX. If 0x10000000 aliases into 0x60000000-
* 0x6XXXXXXX, the offset should be 0x50000000, not 0x10000000.
*/
phys_to_dma_offset = 0x10000000;
break;
default:
phys_to_dma_offset = 0x00000000;
break;
}
}
/*
* RESOURCE ALLOCATION
*
*/
/*
* Allocates/reserves the Platform memory resources early in the boot process.
* This ignores any resources that are designated IORESOURCE_IO
*/
void __init platform_alloc_bootmem(void)
{
int i;
int total = 0;
/* Get persistent memory data from command line before allocating
* resources. This need to happen before normal command line parsing
* has been done */
pmem_setup_resource();
/* Loop through looking for resources that want a particular address */
for (i = 0; gp_resources[i].flags != 0; i++) {
int size = resource_size(&gp_resources[i]);
if ((gp_resources[i].start != 0) &&
((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
reserve_bootmem(dma_to_phys(gp_resources[i].start),
size, 0);
total += resource_size(&gp_resources[i]);
pr_info("reserve resource %s at %08x (%u bytes)\n",
gp_resources[i].name, gp_resources[i].start,
resource_size(&gp_resources[i]));
}
}
/* Loop through assigning addresses for those that are left */
for (i = 0; gp_resources[i].flags != 0; i++) {
int size = resource_size(&gp_resources[i]);
if ((gp_resources[i].start == 0) &&
((gp_resources[i].flags & IORESOURCE_MEM) != 0)) {
void *mem = alloc_bootmem_pages(size);
if (mem == NULL)
pr_err("Unable to allocate bootmem pages "
"for %s\n", gp_resources[i].name);
else {
gp_resources[i].start =
phys_to_dma(virt_to_phys(mem));
gp_resources[i].end =
gp_resources[i].start + size - 1;
total += size;
pr_info("allocate resource %s at %08x "
"(%u bytes)\n",
gp_resources[i].name,
gp_resources[i].start, size);
}
}
}
pr_info("Total Platform driver memory allocation: 0x%08x\n", total);
/* indicate resources that are platform I/O related */
for (i = 0; gp_resources[i].flags != 0; i++) {
if ((gp_resources[i].start != 0) &&
((gp_resources[i].flags & IORESOURCE_IO) != 0)) {
pr_info("reserved platform resource %s at %08x\n",
gp_resources[i].name, gp_resources[i].start);
}
}
}
/*
*
* PERSISTENT MEMORY (PMEM) CONFIGURATION
*
*/
static unsigned long pmemaddr __initdata;
static int __init early_param_pmemaddr(char *p)
{
pmemaddr = (unsigned long)simple_strtoul(p, NULL, 0);
return 0;
}
early_param("pmemaddr", early_param_pmemaddr);
static long pmemlen __initdata;
static int __init early_param_pmemlen(char *p)
{
/* TODO: we can use this code when and if the bootloader ever changes this */
#if 0
pmemlen = (unsigned long)simple_strtoul(p, NULL, 0);
#else
pmemlen = 0x20000;
#endif
return 0;
}
early_param("pmemlen", early_param_pmemlen);
/*
* Set up persistent memory. If we were given values, we patch the array of
* resources. Otherwise, persistent memory may be allocated anywhere at all.
*/
static void __init pmem_setup_resource(void)
{
struct resource *resource;
resource = asic_resource_get("DiagPersistentMemory");
if (resource && pmemaddr && pmemlen) {
/* The address provided by bootloader is in kseg0. Convert to
* a bus address. */
resource->start = phys_to_dma(pmemaddr - 0x80000000);
resource->end = resource->start + pmemlen - 1;
pr_info("persistent memory: start=0x%x end=0x%x\n",
resource->start, resource->end);
}
}
/*
*
* RESOURCE ACCESS FUNCTIONS
*
*/
/**
* asic_resource_get - retrieves parameters for a platform resource.
* @name: string to match resource
*
* Returns a pointer to a struct resource corresponding to the given name.
*
* CANNOT BE NAMED platform_resource_get, which would be the obvious choice,
* as this function name is already declared
*/
struct resource *asic_resource_get(const char *name)
{
int i;
for (i = 0; gp_resources[i].flags != 0; i++) {
if (strcmp(gp_resources[i].name, name) == 0)
return &gp_resources[i];
}
return NULL;
}
EXPORT_SYMBOL(asic_resource_get);
/**
* platform_release_memory - release pre-allocated memory
* @ptr: pointer to memory to release
* @size: size of resource
*
* This must only be called for memory allocated or reserved via the boot
* memory allocator.
*/
void platform_release_memory(void *ptr, int size)
{
free_reserved_area(ptr, ptr + size, -1, NULL);
}
EXPORT_SYMBOL(platform_release_memory);
/*
*
* FEATURE AVAILABILITY FUNCTIONS
*
*/
int platform_supports_dvr(void)
{
return (platform_features & DVR_CAPABLE) != 0;
}
int platform_supports_ffs(void)
{
return (platform_features & FFS_CAPABLE) != 0;
}
int platform_supports_pcie(void)
{
return (platform_features & PCIE_CAPABLE) != 0;
}
int platform_supports_display(void)
{
return (platform_features & DISPLAY_CAPABLE) != 0;
}
| gpl-2.0 |
TEAM-Gummy/elite_kernel_jf | arch/arm/mach-msm/qdsp5/audio_evrc.c | 600 | 44987 | /* arch/arm/mach-msm/audio_evrc.c
*
* Copyright (c) 2008-2009, 2011-2012 The Linux Foundation. All rights reserved.
*
* This code also borrows from audio_aac.c, which is
* Copyright (C) 2008 Google, Inc.
* Copyright (C) 2008 HTC Corporation
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
* See the GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org.
*/
#include <asm/atomic.h>
#include <asm/ioctls.h>
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/uaccess.h>
#include <linux/kthread.h>
#include <linux/wait.h>
#include <linux/dma-mapping.h>
#include <linux/debugfs.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/earlysuspend.h>
#include <linux/slab.h>
#include <linux/msm_audio.h>
#include <linux/memory_alloc.h>
#include <linux/msm_ion.h>
#include <mach/msm_adsp.h>
#include <mach/iommu.h>
#include <mach/iommu_domains.h>
#include <mach/qdsp5/qdsp5audppcmdi.h>
#include <mach/qdsp5/qdsp5audppmsg.h>
#include <mach/qdsp5/qdsp5audplaycmdi.h>
#include <mach/qdsp5/qdsp5audplaymsg.h>
#include <mach/qdsp5/qdsp5rmtcmdi.h>
#include <mach/debug_mm.h>
#include <mach/msm_memtypes.h>
#include "audmgr.h"
/* Hold 30 packets of 24 bytes each and 14 bytes of meta in */
#define BUFSZ 734
#define DMASZ (BUFSZ * 2)
#define AUDDEC_DEC_EVRC 12
#define PCM_BUFSZ_MIN 1624 /* 100ms worth of data and
and 24 bytes of meta out */
#define PCM_BUF_MAX_COUNT 5
/* DSP only accepts 5 buffers at most
* but support 2 buffers currently
*/
#define EVRC_DECODED_FRSZ 320 /* EVRC 20ms 8KHz mono PCM size */
#define ROUTING_MODE_FTRT 1
#define ROUTING_MODE_RT 2
/* Decoder status received from AUDPPTASK */
#define AUDPP_DEC_STATUS_SLEEP 0
#define AUDPP_DEC_STATUS_INIT 1
#define AUDPP_DEC_STATUS_CFG 2
#define AUDPP_DEC_STATUS_PLAY 3
#define AUDEVRC_METAFIELD_MASK 0xFFFF0000
#define AUDEVRC_EOS_FLG_OFFSET 0x0A /* Offset from beginning of buffer */
#define AUDEVRC_EOS_FLG_MASK 0x01
#define AUDEVRC_EOS_NONE 0x0 /* No EOS detected */
#define AUDEVRC_EOS_SET 0x1 /* EOS set in meta field */
#define AUDEVRC_EVENT_NUM 10 /* Default number of pre-allocated event packets */
struct buffer {
void *data;
unsigned size;
unsigned used; /* Input usage actual DSP produced PCM size */
unsigned addr;
unsigned short mfield_sz; /*only useful for data has meta field */
};
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audevrc_suspend_ctl {
struct early_suspend node;
struct audio *audio;
};
#endif
struct audevrc_event{
struct list_head list;
int event_type;
union msm_audio_event_payload payload;
};
struct audio {
struct buffer out[2];
spinlock_t dsp_lock;
uint8_t out_head;
uint8_t out_tail;
uint8_t out_needed; /* number of buffers the dsp is waiting for */
atomic_t out_bytes;
struct mutex lock;
struct mutex write_lock;
wait_queue_head_t write_wait;
/* Host PCM section */
struct buffer in[PCM_BUF_MAX_COUNT];
struct mutex read_lock;
wait_queue_head_t read_wait; /* Wait queue for read */
char *read_data; /* pointer to reader buffer */
int32_t read_phys; /* physical address of reader buffer */
uint8_t read_next; /* index to input buffers to be read next */
uint8_t fill_next; /* index to buffer that DSP should be filling */
uint8_t pcm_buf_count; /* number of pcm buffer allocated */
/* ---- End of Host PCM section */
struct msm_adsp_module *audplay;
struct audmgr audmgr;
/* data allocated for various buffers */
char *data;
int32_t phys; /* physical address of write buffer */
void *map_v_read;
void *map_v_write;
int mfield; /* meta field embedded in data */
int rflush; /* Read flush */
int wflush; /* Write flush */
uint8_t opened:1;
uint8_t enabled:1;
uint8_t running:1;
uint8_t stopped:1; /* set when stopped, cleared on flush */
uint8_t pcm_feedback:1;
uint8_t buf_refresh:1;
int teos; /* valid only if tunnel mode & no data left for decoder */
enum msm_aud_decoder_state dec_state; /* Represents decoder state */
int rmt_resource_released;
const char *module_name;
unsigned queue_id;
uint16_t dec_id;
uint32_t read_ptr_offset;
#ifdef CONFIG_HAS_EARLYSUSPEND
struct audevrc_suspend_ctl suspend_ctl;
#endif
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
#endif
wait_queue_head_t wait;
struct list_head free_event_queue;
struct list_head event_queue;
wait_queue_head_t event_wait;
spinlock_t event_queue_lock;
struct mutex get_event_lock;
int event_abort;
int eq_enable;
int eq_needs_commit;
audpp_cmd_cfg_object_params_eqalizer eq;
audpp_cmd_cfg_object_params_volume vol_pan;
struct ion_client *client;
struct ion_handle *input_buff_handle;
struct ion_handle *output_buff_handle;
};
static int auddec_dsp_config(struct audio *audio, int enable);
static void audpp_cmd_cfg_adec_params(struct audio *audio);
static void audpp_cmd_cfg_routing_mode(struct audio *audio);
static void audevrc_send_data(struct audio *audio, unsigned needed);
static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg);
static void audevrc_config_hostpcm(struct audio *audio);
static void audevrc_buffer_refresh(struct audio *audio);
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audevrc_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload);
#endif
static int rmt_put_resource(struct audio *audio)
{
struct aud_codec_config_cmd cmd;
unsigned short client_idx;
cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
cmd.client_id = RM_AUD_CLIENT_ID;
cmd.task_id = audio->dec_id;
cmd.enable = RMT_DISABLE;
cmd.dec_type = AUDDEC_DEC_EVRC;
client_idx = ((cmd.client_id << 8) | cmd.task_id);
return put_adsp_resource(client_idx, &cmd, sizeof(cmd));
}
static int rmt_get_resource(struct audio *audio)
{
struct aud_codec_config_cmd cmd;
unsigned short client_idx;
cmd.cmd_id = RM_CMD_AUD_CODEC_CFG;
cmd.client_id = RM_AUD_CLIENT_ID;
cmd.task_id = audio->dec_id;
cmd.enable = RMT_ENABLE;
cmd.dec_type = AUDDEC_DEC_EVRC;
client_idx = ((cmd.client_id << 8) | cmd.task_id);
return get_adsp_resource(client_idx, &cmd, sizeof(cmd));
}
/* must be called with audio->lock held */
static int audevrc_enable(struct audio *audio)
{
struct audmgr_config cfg;
int rc;
if (audio->enabled)
return 0;
if (audio->rmt_resource_released == 1) {
audio->rmt_resource_released = 0;
rc = rmt_get_resource(audio);
if (rc) {
MM_ERR("ADSP resources are not available for EVRC \
session 0x%08x on decoder: %d\n Ignoring \
error and going ahead with the playback\n",
(int)audio, audio->dec_id);
}
}
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
audio->out_tail = 0;
audio->out_needed = 0;
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
cfg.tx_rate = RPC_AUD_DEF_SAMPLE_RATE_NONE;
cfg.rx_rate = RPC_AUD_DEF_SAMPLE_RATE_48000;
cfg.def_method = RPC_AUD_DEF_METHOD_PLAYBACK;
cfg.codec = RPC_AUD_DEF_CODEC_EVRC;
cfg.snd_method = RPC_SND_METHOD_MIDI;
rc = audmgr_enable(&audio->audmgr, &cfg);
if (rc < 0)
return rc;
}
if (msm_adsp_enable(audio->audplay)) {
MM_ERR("msm_adsp_enable(audplay) failed\n");
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK)
audmgr_disable(&audio->audmgr);
return -ENODEV;
}
if (audpp_enable(audio->dec_id, audevrc_dsp_event, audio)) {
MM_ERR("audpp_enable() failed\n");
msm_adsp_disable(audio->audplay);
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK)
audmgr_disable(&audio->audmgr);
return -ENODEV;
}
audio->enabled = 1;
return 0;
}
/* must be called with audio->lock held */
static int audevrc_disable(struct audio *audio)
{
int rc = 0;
if (audio->enabled) {
audio->enabled = 0;
audio->dec_state = MSM_AUD_DECODER_STATE_NONE;
auddec_dsp_config(audio, 0);
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
if (rc == 0)
rc = -ETIMEDOUT;
else if (audio->dec_state != MSM_AUD_DECODER_STATE_CLOSE)
rc = -EFAULT;
else
rc = 0;
audio->stopped = 1;
wake_up(&audio->write_wait);
wake_up(&audio->read_wait);
msm_adsp_disable(audio->audplay);
audpp_disable(audio->dec_id, audio);
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK)
audmgr_disable(&audio->audmgr);
audio->out_needed = 0;
rmt_put_resource(audio);
audio->rmt_resource_released = 1;
}
return rc;
}
/* ------------------- dsp --------------------- */
static void audevrc_update_pcm_buf_entry(struct audio *audio,
uint32_t *payload)
{
uint8_t index;
unsigned long flags;
if (audio->rflush)
return;
spin_lock_irqsave(&audio->dsp_lock, flags);
for (index = 0; index < payload[1]; index++) {
if (audio->in[audio->fill_next].addr
== payload[2 + index * 2]) {
MM_DBG("in[%d] ready\n", audio->fill_next);
audio->in[audio->fill_next].used =
payload[3 + index * 2];
if ((++audio->fill_next) == audio->pcm_buf_count)
audio->fill_next = 0;
} else {
MM_ERR("expected=%x ret=%x\n",
audio->in[audio->fill_next].addr,
payload[1 + index * 2]);
break;
}
}
if (audio->in[audio->fill_next].used == 0) {
audevrc_buffer_refresh(audio);
} else {
MM_DBG("read cannot keep up\n");
audio->buf_refresh = 1;
}
wake_up(&audio->read_wait);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audplay_dsp_event(void *data, unsigned id, size_t len,
void (*getevent) (void *ptr, size_t len))
{
struct audio *audio = data;
uint32_t msg[28];
getevent(msg, sizeof(msg));
MM_DBG("msg_id=%x\n", id);
switch (id) {
case AUDPLAY_MSG_DEC_NEEDS_DATA:
audevrc_send_data(audio, 1);
break;
case AUDPLAY_MSG_BUFFER_UPDATE:
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_update_pcm_buf_entry(audio, msg);
break;
case ADSP_MESSAGE_ID:
MM_DBG("Received ADSP event: module enable(audplaytask)\n");
break;
default:
MM_ERR("unexpected message from decoder \n");
}
}
static void audevrc_dsp_event(void *private, unsigned id, uint16_t *msg)
{
struct audio *audio = private;
switch (id) {
case AUDPP_MSG_STATUS_MSG:{
unsigned status = msg[1];
switch (status) {
case AUDPP_DEC_STATUS_SLEEP: {
uint16_t reason = msg[2];
MM_DBG("decoder status:sleep reason = \
0x%04x\n", reason);
if ((reason == AUDPP_MSG_REASON_MEM)
|| (reason ==
AUDPP_MSG_REASON_NODECODER)) {
audio->dec_state =
MSM_AUD_DECODER_STATE_FAILURE;
wake_up(&audio->wait);
} else if (reason == AUDPP_MSG_REASON_NONE) {
/* decoder is in disable state */
audio->dec_state =
MSM_AUD_DECODER_STATE_CLOSE;
wake_up(&audio->wait);
}
break;
}
case AUDPP_DEC_STATUS_INIT:
MM_DBG("decoder status: init \n");
if (audio->pcm_feedback)
audpp_cmd_cfg_routing_mode(audio);
else
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_DEC_STATUS_CFG:
MM_DBG("decoder status: cfg \n");
break;
case AUDPP_DEC_STATUS_PLAY:
MM_DBG("decoder status: play \n");
if (audio->pcm_feedback) {
audevrc_config_hostpcm(audio);
audevrc_buffer_refresh(audio);
}
audio->dec_state =
MSM_AUD_DECODER_STATE_SUCCESS;
wake_up(&audio->wait);
break;
default:
MM_ERR("unknown decoder status \n");
}
break;
}
case AUDPP_MSG_CFG_MSG:
if (msg[0] == AUDPP_MSG_ENA_ENA) {
MM_DBG("CFG_MSG ENABLE\n");
auddec_dsp_config(audio, 1);
audio->out_needed = 0;
audio->running = 1;
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan);
audpp_dsp_set_eq(audio->dec_id, audio->eq_enable,
&audio->eq);
audpp_avsync(audio->dec_id, 22050);
} else if (msg[0] == AUDPP_MSG_ENA_DIS) {
MM_DBG("CFG_MSG DISABLE\n");
audpp_avsync(audio->dec_id, 0);
audio->running = 0;
} else {
MM_DBG("CFG_MSG %d?\n", msg[0]);
}
break;
case AUDPP_MSG_ROUTING_ACK:
MM_DBG("ROUTING_ACK\n");
audpp_cmd_cfg_adec_params(audio);
break;
case AUDPP_MSG_FLUSH_ACK:
MM_DBG("FLUSH_ACK\n");
audio->wflush = 0;
audio->rflush = 0;
wake_up(&audio->write_wait);
if (audio->pcm_feedback)
audevrc_buffer_refresh(audio);
break;
case AUDPP_MSG_PCMDMAMISSED:
MM_DBG("PCMDMAMISSED\n");
audio->teos = 1;
wake_up(&audio->write_wait);
break;
default:
MM_ERR("UNKNOWN (%d)\n", id);
}
}
struct msm_adsp_ops audplay_adsp_ops_evrc = {
.event = audplay_dsp_event,
};
#define audplay_send_queue0(audio, cmd, len) \
msm_adsp_write(audio->audplay, audio->queue_id, \
cmd, len)
static int auddec_dsp_config(struct audio *audio, int enable)
{
u16 cfg_dec_cmd[AUDPP_CMD_CFG_DEC_TYPE_LEN / sizeof(unsigned short)];
memset(cfg_dec_cmd, 0, sizeof(cfg_dec_cmd));
cfg_dec_cmd[0] = AUDPP_CMD_CFG_DEC_TYPE;
if (enable)
cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_ENA_DEC_V | AUDDEC_DEC_EVRC;
else
cfg_dec_cmd[1 + audio->dec_id] = AUDPP_CMD_UPDATDE_CFG_DEC |
AUDPP_CMD_DIS_DEC_V;
return audpp_send_queue1(&cfg_dec_cmd, sizeof(cfg_dec_cmd));
}
static void audpp_cmd_cfg_adec_params(struct audio *audio)
{
struct audpp_cmd_cfg_adec_params_evrc cmd;
memset(&cmd, 0, sizeof(cmd));
cmd.common.cmd_id = AUDPP_CMD_CFG_ADEC_PARAMS;
cmd.common.length = sizeof(cmd);
cmd.common.dec_id = audio->dec_id;
cmd.common.input_sampling_frequency = 8000;
cmd.stereo_cfg = AUDPP_CMD_PCM_INTF_MONO_V;
audpp_send_queue2(&cmd, sizeof(cmd));
}
static void audpp_cmd_cfg_routing_mode(struct audio *audio)
{
struct audpp_cmd_routing_mode cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
memset(&cmd, 0, sizeof(cmd));
cmd.cmd_id = AUDPP_CMD_ROUTING_MODE;
cmd.object_number = audio->dec_id;
if (audio->pcm_feedback)
cmd.routing_mode = ROUTING_MODE_FTRT;
else
cmd.routing_mode = ROUTING_MODE_RT;
audpp_send_queue1(&cmd, sizeof(cmd));
}
static int audplay_dsp_send_data_avail(struct audio *audio,
unsigned idx, unsigned len)
{
struct audplay_cmd_bitstream_data_avail_nt2 cmd;
cmd.cmd_id = AUDPLAY_CMD_BITSTREAM_DATA_AVAIL_NT2;
if (audio->mfield)
cmd.decoder_id = AUDEVRC_METAFIELD_MASK |
(audio->out[idx].mfield_sz >> 1);
else
cmd.decoder_id = audio->dec_id;
cmd.buf_ptr = audio->out[idx].addr;
cmd.buf_size = len / 2;
cmd.partition_number = 0;
/* complete writes to the input buffer */
wmb();
return audplay_send_queue0(audio, &cmd, sizeof(cmd));
}
static void audevrc_buffer_refresh(struct audio *audio)
{
struct audplay_cmd_buffer_refresh refresh_cmd;
refresh_cmd.cmd_id = AUDPLAY_CMD_BUFFER_REFRESH;
refresh_cmd.num_buffers = 1;
refresh_cmd.buf0_address = audio->in[audio->fill_next].addr;
refresh_cmd.buf0_length = audio->in[audio->fill_next].size;
refresh_cmd.buf_read_count = 0;
MM_DBG("buf0_addr=%x buf0_len=%d\n", refresh_cmd.buf0_address,
refresh_cmd.buf0_length);
audplay_send_queue0(audio, &refresh_cmd, sizeof(refresh_cmd));
}
static void audevrc_config_hostpcm(struct audio *audio)
{
struct audplay_cmd_hpcm_buf_cfg cfg_cmd;
MM_DBG("\n"); /* Macro prints the file name and function */
cfg_cmd.cmd_id = AUDPLAY_CMD_HPCM_BUF_CFG;
cfg_cmd.max_buffers = 1;
cfg_cmd.byte_swap = 0;
cfg_cmd.hostpcm_config = (0x8000) | (0x4000);
cfg_cmd.feedback_frequency = 1;
cfg_cmd.partition_number = 0;
audplay_send_queue0(audio, &cfg_cmd, sizeof(cfg_cmd));
}
static void audevrc_send_data(struct audio *audio, unsigned needed)
{
struct buffer *frame;
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
if (!audio->running)
goto done;
if (needed && !audio->wflush) {
/* We were called from the callback because the DSP
* requested more data. Note that the DSP does want
* more data, and if a buffer was in-flight, mark it
* as available (since the DSP must now be done with
* it).
*/
audio->out_needed = 1;
frame = audio->out + audio->out_tail;
if (frame->used == 0xffffffff) {
MM_DBG("frame %d free\n", audio->out_tail);
frame->used = 0;
audio->out_tail ^= 1;
wake_up(&audio->write_wait);
}
}
if (audio->out_needed) {
/* If the DSP currently wants data and we have a
* buffer available, we will send it and reset
* the needed flag. We'll mark the buffer as in-flight
* so that it won't be recycled until the next buffer
* is requested
*/
frame = audio->out + audio->out_tail;
if (frame->used) {
BUG_ON(frame->used == 0xffffffff);
MM_DBG("frame %d busy\n", audio->out_tail);
audplay_dsp_send_data_avail(audio, audio->out_tail,
frame->used);
frame->used = 0xffffffff;
audio->out_needed = 0;
}
}
done:
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
/* ------------------- device --------------------- */
static void audevrc_flush(struct audio *audio)
{
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->out[0].used = 0;
audio->out[1].used = 0;
audio->out_head = 0;
audio->out_tail = 0;
audio->out_needed = 0;
spin_unlock_irqrestore(&audio->dsp_lock, flags);
atomic_set(&audio->out_bytes, 0);
}
static void audevrc_flush_pcm_buf(struct audio *audio)
{
uint8_t index;
unsigned long flags;
spin_lock_irqsave(&audio->dsp_lock, flags);
for (index = 0; index < PCM_BUF_MAX_COUNT; index++)
audio->in[index].used = 0;
audio->buf_refresh = 0;
audio->read_next = 0;
audio->fill_next = 0;
spin_unlock_irqrestore(&audio->dsp_lock, flags);
}
static void audevrc_ioport_reset(struct audio *audio)
{
/* Make sure read/write thread are free from
* sleep and knowing that system is not able
* to process io request at the moment
*/
wake_up(&audio->write_wait);
mutex_lock(&audio->write_lock);
audevrc_flush(audio);
mutex_unlock(&audio->write_lock);
wake_up(&audio->read_wait);
mutex_lock(&audio->read_lock);
audevrc_flush_pcm_buf(audio);
mutex_unlock(&audio->read_lock);
}
static int audevrc_events_pending(struct audio *audio)
{
unsigned long flags;
int empty;
spin_lock_irqsave(&audio->event_queue_lock, flags);
empty = !list_empty(&audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return empty || audio->event_abort;
}
static void audevrc_reset_event_queue(struct audio *audio)
{
unsigned long flags;
struct audevrc_event *drv_evt;
struct list_head *ptr, *next;
spin_lock_irqsave(&audio->event_queue_lock, flags);
list_for_each_safe(ptr, next, &audio->event_queue) {
drv_evt = list_first_entry(&audio->event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
list_for_each_safe(ptr, next, &audio->free_event_queue) {
drv_evt = list_first_entry(&audio->free_event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
kfree(drv_evt);
}
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return;
}
static long audevrc_process_event_req(struct audio *audio, void __user *arg)
{
long rc;
struct msm_audio_event usr_evt;
struct audevrc_event *drv_evt = NULL;
int timeout;
unsigned long flags;
if (copy_from_user(&usr_evt, arg, sizeof(struct msm_audio_event)))
return -EFAULT;
timeout = (int) usr_evt.timeout_ms;
if (timeout > 0) {
rc = wait_event_interruptible_timeout(
audio->event_wait, audevrc_events_pending(audio),
msecs_to_jiffies(timeout));
if (rc == 0)
return -ETIMEDOUT;
} else {
rc = wait_event_interruptible(
audio->event_wait, audevrc_events_pending(audio));
}
if (rc < 0)
return rc;
if (audio->event_abort) {
audio->event_abort = 0;
return -ENODEV;
}
rc = 0;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->event_queue)) {
drv_evt = list_first_entry(&audio->event_queue,
struct audevrc_event, list);
list_del(&drv_evt->list);
}
if (drv_evt) {
usr_evt.event_type = drv_evt->event_type;
usr_evt.event_payload = drv_evt->payload;
list_add_tail(&drv_evt->list, &audio->free_event_queue);
} else
rc = -1;
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
if (!rc && copy_to_user(arg, &usr_evt, sizeof(usr_evt)))
rc = -EFAULT;
return rc;
}
static int audio_enable_eq(struct audio *audio, int enable)
{
if (audio->eq_enable == enable && !audio->eq_needs_commit)
return 0;
audio->eq_enable = enable;
if (audio->running) {
audpp_dsp_set_eq(audio->dec_id, enable, &audio->eq);
audio->eq_needs_commit = 0;
}
return 0;
}
static long audevrc_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct audio *audio = file->private_data;
int rc = -EINVAL;
unsigned long flags = 0;
uint16_t enable_mask;
int enable;
int prev_state;
unsigned long ionflag = 0;
ion_phys_addr_t addr = 0;
struct ion_handle *handle = NULL;
int len = 0;
MM_DBG("cmd = %d\n", cmd);
if (cmd == AUDIO_GET_STATS) {
struct msm_audio_stats stats;
stats.byte_count = audpp_avsync_byte_count(audio->dec_id);
stats.sample_count = audpp_avsync_sample_count(audio->dec_id);
if (copy_to_user((void *)arg, &stats, sizeof(stats)))
return -EFAULT;
return 0;
}
switch (cmd) {
case AUDIO_ENABLE_AUDPP:
if (copy_from_user(&enable_mask, (void *) arg,
sizeof(enable_mask))) {
rc = -EFAULT;
break;
}
spin_lock_irqsave(&audio->dsp_lock, flags);
enable = (enable_mask & EQ_ENABLE) ? 1 : 0;
audio_enable_eq(audio, enable);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_VOLUME:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.volume = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_PAN:
spin_lock_irqsave(&audio->dsp_lock, flags);
audio->vol_pan.pan = arg;
if (audio->running)
audpp_dsp_set_vol_pan(audio->dec_id, &audio->vol_pan);
spin_unlock_irqrestore(&audio->dsp_lock, flags);
rc = 0;
break;
case AUDIO_SET_EQ:
prev_state = audio->eq_enable;
audio->eq_enable = 0;
if (copy_from_user(&audio->eq.num_bands, (void *) arg,
sizeof(audio->eq) -
(AUDPP_CMD_CFG_OBJECT_PARAMS_COMMON_LEN + 2))) {
rc = -EFAULT;
break;
}
audio->eq_enable = prev_state;
audio->eq_needs_commit = 1;
rc = 0;
break;
}
if (-EINVAL != rc)
return rc;
if (cmd == AUDIO_GET_EVENT) {
MM_DBG("AUDIO_GET_EVENT\n");
if (mutex_trylock(&audio->get_event_lock)) {
rc = audevrc_process_event_req(audio,
(void __user *) arg);
mutex_unlock(&audio->get_event_lock);
} else
rc = -EBUSY;
return rc;
}
if (cmd == AUDIO_ABORT_GET_EVENT) {
audio->event_abort = 1;
wake_up(&audio->event_wait);
return 0;
}
mutex_lock(&audio->lock);
switch (cmd) {
case AUDIO_START:
MM_DBG("AUDIO_START\n");
rc = audevrc_enable(audio);
if (!rc) {
rc = wait_event_interruptible_timeout(audio->wait,
audio->dec_state != MSM_AUD_DECODER_STATE_NONE,
msecs_to_jiffies(MSM_AUD_DECODER_WAIT_MS));
MM_INFO("dec_state %d rc = %d\n", audio->dec_state, rc);
if (audio->dec_state != MSM_AUD_DECODER_STATE_SUCCESS)
rc = -ENODEV;
else
rc = 0;
}
break;
case AUDIO_STOP:
MM_DBG("AUDIO_STOP\n");
rc = audevrc_disable(audio);
audevrc_ioport_reset(audio);
audio->stopped = 0;
break;
case AUDIO_FLUSH:
MM_DBG("AUDIO_FLUSH\n");
audio->rflush = 1;
audio->wflush = 1;
audevrc_ioport_reset(audio);
if (audio->running) {
audpp_flush(audio->dec_id);
rc = wait_event_interruptible(audio->write_wait,
!audio->wflush);
if (rc < 0) {
MM_ERR("AUDIO_FLUSH interrupted\n");
rc = -EINTR;
}
} else {
audio->rflush = 0;
audio->wflush = 0;
}
break;
case AUDIO_SET_CONFIG:{
struct msm_audio_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
audio->mfield = config.meta_field;
rc = 0;
MM_DBG("AUDIO_SET_CONFIG applicable only \
for meta field configuration\n");
break;
}
case AUDIO_GET_CONFIG:{
struct msm_audio_config config;
config.buffer_size = BUFSZ;
config.buffer_count = 2;
config.sample_rate = 8000;
config.channel_count = 1;
config.meta_field = 0;
config.unused[0] = 0;
config.unused[1] = 0;
config.unused[2] = 0;
if (copy_to_user((void *)arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_GET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
config.pcm_feedback = audio->pcm_feedback;
config.buffer_count = PCM_BUF_MAX_COUNT;
config.buffer_size = PCM_BUFSZ_MIN;
if (copy_to_user((void *)arg, &config, sizeof(config)))
rc = -EFAULT;
else
rc = 0;
break;
}
case AUDIO_SET_PCM_CONFIG:{
struct msm_audio_pcm_config config;
if (copy_from_user
(&config, (void *)arg, sizeof(config))) {
rc = -EFAULT;
break;
}
if (config.pcm_feedback != audio->pcm_feedback) {
MM_ERR("Not sufficient permission to"
"change the playback mode\n");
rc = -EACCES;
break;
}
if ((config.buffer_count > PCM_BUF_MAX_COUNT) ||
(config.buffer_count == 1))
config.buffer_count = PCM_BUF_MAX_COUNT;
if (config.buffer_size < PCM_BUFSZ_MIN)
config.buffer_size = PCM_BUFSZ_MIN;
/* Check if pcm feedback is required */
if ((config.pcm_feedback) && (!audio->read_data)) {
MM_DBG("allocate PCM buf %d\n",
config.buffer_count *
config.buffer_size);
handle = ion_alloc(audio->client,
(config.buffer_size *
config.buffer_count),
SZ_4K, ION_HEAP(ION_AUDIO_HEAP_ID), 0);
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to alloc I/P buffs\n");
audio->input_buff_handle = NULL;
rc = -ENOMEM;
break;
}
audio->input_buff_handle = handle;
rc = ion_phys(audio->client ,
handle, &addr, &len);
if (rc) {
MM_ERR("Invalid phy: %x sz: %x\n",
(unsigned int) addr,
(unsigned int) len);
ion_free(audio->client, handle);
audio->input_buff_handle = NULL;
rc = -ENOMEM;
break;
} else {
MM_INFO("Got valid phy: %x sz: %x\n",
(unsigned int) audio->read_phys,
(unsigned int) len);
}
audio->read_phys = (int32_t)addr;
rc = ion_handle_get_flags(audio->client,
handle, &ionflag);
if (rc) {
MM_ERR("could not get flags\n");
ion_free(audio->client, handle);
audio->input_buff_handle = NULL;
rc = -ENOMEM;
break;
}
audio->map_v_read = ion_map_kernel(
audio->client, handle);
if (IS_ERR(audio->map_v_read)) {
MM_ERR("failed to map mem"
" for read buf\n");
ion_free(audio->client, handle);
audio->input_buff_handle = NULL;
rc = -ENOMEM;
} else {
uint8_t index;
uint32_t offset = 0;
audio->read_data =
audio->map_v_read;
audio->buf_refresh = 0;
audio->pcm_buf_count =
config.buffer_count;
audio->read_next = 0;
audio->fill_next = 0;
for (index = 0;
index < config.buffer_count;
index++) {
audio->in[index].data =
audio->read_data + offset;
audio->in[index].addr =
audio->read_phys + offset;
audio->in[index].size =
config.buffer_size;
audio->in[index].used = 0;
offset += config.buffer_size;
}
MM_DBG("read buf: phy addr \
0x%08x kernel addr 0x%08x\n",
audio->read_phys,
(int)audio->read_data);
rc = 0;
}
} else {
rc = 0;
}
break;
}
case AUDIO_PAUSE:
MM_DBG("AUDIO_PAUSE %ld\n", arg);
rc = audpp_pause(audio->dec_id, (int) arg);
break;
default:
rc = -EINVAL;
}
mutex_unlock(&audio->lock);
return rc;
}
/* Only useful in tunnel-mode */
static int audevrc_fsync(struct file *file, loff_t a, loff_t b, int datasync)
{
struct audio *audio = file->private_data;
int rc = 0;
MM_DBG("\n"); /* Macro prints the file name and function */
if (!audio->running || audio->pcm_feedback) {
rc = -EINVAL;
goto done_nolock;
}
mutex_lock(&audio->write_lock);
rc = wait_event_interruptible(audio->write_wait,
(!audio->out[0].used &&
!audio->out[1].used &&
audio->out_needed) || audio->wflush);
if (rc < 0)
goto done;
else if (audio->wflush) {
rc = -EBUSY;
goto done;
}
/* pcm dmamiss message is sent continously
* when decoder is starved so no race
* condition concern
*/
audio->teos = 0;
rc = wait_event_interruptible(audio->write_wait,
audio->teos || audio->wflush);
if (audio->wflush)
rc = -EBUSY;
done:
mutex_unlock(&audio->write_lock);
done_nolock:
return rc;
}
static ssize_t audevrc_read(struct file *file, char __user *buf, size_t count,
loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
int rc = 0;
if (!audio->pcm_feedback) {
return 0;
/* PCM feedback is not enabled. Nothing to read */
}
mutex_lock(&audio->read_lock);
MM_DBG("\n"); /* Macro prints the file name and function */
while (count > 0) {
rc = wait_event_interruptible(audio->read_wait,
(audio->in[audio->read_next].used > 0) ||
(audio->stopped) || (audio->rflush));
MM_DBG("wait terminated \n");
if (rc < 0)
break;
if (audio->stopped || audio->rflush) {
rc = -EBUSY;
break;
}
if (count < audio->in[audio->read_next].used) {
/* Read must happen in frame boundary. Since driver does
* not know frame size, read count must be greater or
* equal to size of PCM samples
*/
MM_DBG("read stop - partial frame\n");
break;
} else {
MM_DBG("read from in[%d]\n", audio->read_next);
/* order reads from the output buffer */
rmb();
if (copy_to_user
(buf, audio->in[audio->read_next].data,
audio->in[audio->read_next].used)) {
MM_ERR("invalid addr %x \n",
(unsigned int)buf);
rc = -EFAULT;
break;
}
count -= audio->in[audio->read_next].used;
buf += audio->in[audio->read_next].used;
audio->in[audio->read_next].used = 0;
if ((++audio->read_next) == audio->pcm_buf_count)
audio->read_next = 0;
break;
/* Force to exit while loop
* to prevent output thread
* sleep too long if data is
* not ready at this moment
*/
}
}
/* don't feed output buffer to HW decoder during flushing
* buffer refresh command will be sent once flush completes
* send buf refresh command here can confuse HW decoder
*/
if (audio->buf_refresh && !audio->rflush) {
audio->buf_refresh = 0;
MM_DBG("kick start pcm feedback again\n");
audevrc_buffer_refresh(audio);
}
mutex_unlock(&audio->read_lock);
if (buf > start)
rc = buf - start;
MM_DBG("read %d bytes\n", rc);
return rc;
}
static int audevrc_process_eos(struct audio *audio,
const char __user *buf_start, unsigned short mfield_size)
{
int rc = 0;
struct buffer *frame;
frame = audio->out + audio->out_head;
rc = wait_event_interruptible(audio->write_wait,
(audio->out_needed &&
audio->out[0].used == 0 &&
audio->out[1].used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
goto done;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
goto done;
}
if (copy_from_user(frame->data, buf_start, mfield_size)) {
rc = -EFAULT;
goto done;
}
frame->mfield_sz = mfield_size;
audio->out_head ^= 1;
frame->used = mfield_size;
audevrc_send_data(audio, 0);
done:
return rc;
}
static ssize_t audevrc_write(struct file *file, const char __user *buf,
size_t count, loff_t *pos)
{
struct audio *audio = file->private_data;
const char __user *start = buf;
struct buffer *frame;
size_t xfer;
char *cpy_ptr;
unsigned short mfield_size = 0;
int rc = 0, eos_condition = AUDEVRC_EOS_NONE;
MM_DBG("cnt=%d\n", count);
if (count & 1)
return -EINVAL;
mutex_lock(&audio->write_lock);
while (count > 0) {
frame = audio->out + audio->out_head;
cpy_ptr = frame->data;
rc = wait_event_interruptible(audio->write_wait,
(frame->used == 0)
|| (audio->stopped)
|| (audio->wflush));
if (rc < 0)
break;
if (audio->stopped || audio->wflush) {
rc = -EBUSY;
break;
}
if (audio->mfield) {
if (buf == start) {
/* Processing beginning of user buffer */
if (__get_user(mfield_size,
(unsigned short __user *) buf)) {
rc = -EFAULT;
break;
} else if (mfield_size > count) {
rc = -EINVAL;
break;
}
MM_DBG("mf offset_val %x\n", mfield_size);
if (copy_from_user(cpy_ptr, buf,
mfield_size)) {
rc = -EFAULT;
break;
}
/* Check if EOS flag is set and buffer has
* contains just meta field
*/
if (cpy_ptr[AUDEVRC_EOS_FLG_OFFSET] &
AUDEVRC_EOS_FLG_MASK) {
MM_DBG("eos set\n");
eos_condition = AUDEVRC_EOS_SET;
if (mfield_size == count) {
buf += mfield_size;
break;
} else
cpy_ptr[AUDEVRC_EOS_FLG_OFFSET] &=
~AUDEVRC_EOS_FLG_MASK;
}
/* Check EOS to see if */
cpy_ptr += mfield_size;
count -= mfield_size;
buf += mfield_size;
} else {
mfield_size = 0;
MM_DBG("continuous buffer\n");
}
frame->mfield_sz = mfield_size;
}
xfer = (count > (frame->size - mfield_size)) ?
(frame->size - mfield_size) : count;
if (copy_from_user(cpy_ptr, buf, xfer)) {
rc = -EFAULT;
break;
}
frame->used = xfer + mfield_size;
audio->out_head ^= 1;
count -= xfer;
buf += xfer;
audevrc_send_data(audio, 0);
}
if (eos_condition == AUDEVRC_EOS_SET)
rc = audevrc_process_eos(audio, start, mfield_size);
mutex_unlock(&audio->write_lock);
if (!rc) {
if (buf > start)
return buf - start;
}
return rc;
}
static int audevrc_release(struct inode *inode, struct file *file)
{
struct audio *audio = file->private_data;
MM_INFO("audio instance 0x%08x freeing\n", (int)audio);
mutex_lock(&audio->lock);
audevrc_disable(audio);
if (audio->rmt_resource_released == 0)
rmt_put_resource(audio);
audevrc_flush(audio);
audevrc_flush_pcm_buf(audio);
msm_adsp_put(audio->audplay);
audpp_adec_free(audio->dec_id);
#ifdef CONFIG_HAS_EARLYSUSPEND
unregister_early_suspend(&audio->suspend_ctl.node);
#endif
audio->event_abort = 1;
wake_up(&audio->event_wait);
audevrc_reset_event_queue(audio);
ion_unmap_kernel(audio->client, audio->output_buff_handle);
ion_free(audio->client, audio->output_buff_handle);
if (audio->input_buff_handle != NULL) {
ion_unmap_kernel(audio->client, audio->input_buff_handle);
ion_free(audio->client, audio->input_buff_handle);
}
ion_client_destroy(audio->client);
mutex_unlock(&audio->lock);
#ifdef CONFIG_DEBUG_FS
if (audio->dentry)
debugfs_remove(audio->dentry);
#endif
kfree(audio);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void audevrc_post_event(struct audio *audio, int type,
union msm_audio_event_payload payload)
{
struct audevrc_event *e_node = NULL;
unsigned long flags;
spin_lock_irqsave(&audio->event_queue_lock, flags);
if (!list_empty(&audio->free_event_queue)) {
e_node = list_first_entry(&audio->free_event_queue,
struct audevrc_event, list);
list_del(&e_node->list);
} else {
e_node = kmalloc(sizeof(struct audevrc_event), GFP_ATOMIC);
if (!e_node) {
MM_ERR("No mem to post event %d\n", type);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
return;
}
}
e_node->event_type = type;
e_node->payload = payload;
list_add_tail(&e_node->list, &audio->event_queue);
spin_unlock_irqrestore(&audio->event_queue_lock, flags);
wake_up(&audio->event_wait);
}
static void audevrc_suspend(struct early_suspend *h)
{
struct audevrc_suspend_ctl *ctl =
container_of(h, struct audevrc_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_post_event(ctl->audio, AUDIO_EVENT_SUSPEND, payload);
}
static void audevrc_resume(struct early_suspend *h)
{
struct audevrc_suspend_ctl *ctl =
container_of(h, struct audevrc_suspend_ctl, node);
union msm_audio_event_payload payload;
MM_DBG("\n"); /* Macro prints the file name and function */
audevrc_post_event(ctl->audio, AUDIO_EVENT_RESUME, payload);
}
#endif
#ifdef CONFIG_DEBUG_FS
static ssize_t audevrc_debug_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t audevrc_debug_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
const int debug_bufmax = 1024;
static char buffer[1024];
int n = 0, i;
struct audio *audio = file->private_data;
mutex_lock(&audio->lock);
n = scnprintf(buffer, debug_bufmax, "opened %d\n", audio->opened);
n += scnprintf(buffer + n, debug_bufmax - n,
"enabled %d\n", audio->enabled);
n += scnprintf(buffer + n, debug_bufmax - n,
"stopped %d\n", audio->stopped);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_feedback %d\n", audio->pcm_feedback);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_buf_sz %d\n", audio->out[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_count %d \n", audio->pcm_buf_count);
n += scnprintf(buffer + n, debug_bufmax - n,
"pcm_buf_sz %d \n", audio->in[0].size);
n += scnprintf(buffer + n, debug_bufmax - n,
"volume %x \n", audio->vol_pan.volume);
mutex_unlock(&audio->lock);
/* Following variables are only useful for debugging when
* when playback halts unexpectedly. Thus, no mutual exclusion
* enforced
*/
n += scnprintf(buffer + n, debug_bufmax - n,
"wflush %d\n", audio->wflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"rflush %d\n", audio->rflush);
n += scnprintf(buffer + n, debug_bufmax - n,
"running %d \n", audio->running);
n += scnprintf(buffer + n, debug_bufmax - n,
"dec state %d \n", audio->dec_state);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_needed %d \n", audio->out_needed);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_head %d \n", audio->out_head);
n += scnprintf(buffer + n, debug_bufmax - n,
"out_tail %d \n", audio->out_tail);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[0].used %d \n", audio->out[0].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"out[1].used %d \n", audio->out[1].used);
n += scnprintf(buffer + n, debug_bufmax - n,
"buffer_refresh %d \n", audio->buf_refresh);
n += scnprintf(buffer + n, debug_bufmax - n,
"read_next %d \n", audio->read_next);
n += scnprintf(buffer + n, debug_bufmax - n,
"fill_next %d \n", audio->fill_next);
for (i = 0; i < audio->pcm_buf_count; i++)
n += scnprintf(buffer + n, debug_bufmax - n,
"in[%d].size %d \n", i, audio->in[i].used);
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static const struct file_operations audevrc_debug_fops = {
.read = audevrc_debug_read,
.open = audevrc_debug_open,
};
#endif
static int audevrc_open(struct inode *inode, struct file *file)
{
struct audio *audio = NULL;
int rc, dec_attrb, decid, i;
struct audevrc_event *e_node = NULL;
unsigned mem_sz = DMASZ;
unsigned long ionflag = 0;
ion_phys_addr_t addr = 0;
struct ion_handle *handle = NULL;
struct ion_client *client = NULL;
int len = 0;
#ifdef CONFIG_DEBUG_FS
/* 4 bytes represents decoder number, 1 byte for terminate string */
char name[sizeof "msm_evrc_" + 5];
#endif
/* Allocate audio instance, set to zero */
audio = kzalloc(sizeof(struct audio), GFP_KERNEL);
if (!audio) {
MM_ERR("no memory to allocate audio instance\n");
rc = -ENOMEM;
goto done;
}
MM_INFO("audio instance 0x%08x created\n", (int)audio);
/* Allocate the decoder */
dec_attrb = AUDDEC_DEC_EVRC;
if ((file->f_mode & FMODE_WRITE) &&
(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_NONTUNNEL;
audio->pcm_feedback = NON_TUNNEL_MODE_PLAYBACK;
} else if ((file->f_mode & FMODE_WRITE) &&
!(file->f_mode & FMODE_READ)) {
dec_attrb |= MSM_AUD_MODE_TUNNEL;
audio->pcm_feedback = TUNNEL_MODE_PLAYBACK;
} else {
kfree(audio);
rc = -EACCES;
goto done;
}
decid = audpp_adec_alloc(dec_attrb, &audio->module_name,
&audio->queue_id);
if (decid < 0) {
MM_ERR("No free decoder available, freeing instance 0x%08x\n",
(int)audio);
rc = -ENODEV;
kfree(audio);
goto done;
}
audio->dec_id = decid & MSM_AUD_DECODER_MASK;
client = msm_ion_client_create(UINT_MAX, "Audio_EVRC_Client");
if (IS_ERR_OR_NULL(client)) {
pr_err("Unable to create ION client\n");
rc = -ENOMEM;
goto client_create_error;
}
audio->client = client;
handle = ion_alloc(client, mem_sz, SZ_4K,
ION_HEAP(ION_AUDIO_HEAP_ID), 0);
if (IS_ERR_OR_NULL(handle)) {
MM_ERR("Unable to create allocate O/P buffers\n");
rc = -ENOMEM;
goto output_buff_alloc_error;
}
audio->output_buff_handle = handle;
rc = ion_phys(client, handle, &addr, &len);
if (rc) {
MM_ERR("O/P buffers:Invalid phy: %x sz: %x\n",
(unsigned int) addr, (unsigned int) len);
goto output_buff_get_phys_error;
} else {
MM_INFO("O/P buffers:valid phy: %x sz: %x\n",
(unsigned int) addr, (unsigned int) len);
}
audio->phys = (int32_t)addr;
rc = ion_handle_get_flags(client, handle, &ionflag);
if (rc) {
MM_ERR("could not get flags for the handle\n");
goto output_buff_get_flags_error;
}
audio->map_v_write = ion_map_kernel(client, handle);
if (IS_ERR(audio->map_v_write)) {
MM_ERR("could not map write buffers\n");
rc = -ENOMEM;
goto output_buff_map_error;
}
audio->data = audio->map_v_write;
MM_DBG("write buf: phy addr 0x%08x kernel addr 0x%08x\n",
audio->phys, (int)audio->data);
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK) {
rc = audmgr_open(&audio->audmgr);
if (rc) {
MM_ERR("audmgr open failed, freeing instance \
0x%08x\n", (int)audio);
goto err;
}
}
rc = msm_adsp_get(audio->module_name, &audio->audplay,
&audplay_adsp_ops_evrc, audio);
if (rc) {
MM_ERR("failed to get %s module, freeing instance 0x%08x\n",
audio->module_name, (int)audio);
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK)
audmgr_close(&audio->audmgr);
goto err;
}
rc = rmt_get_resource(audio);
if (rc) {
MM_ERR("ADSP resources are not available for EVRC session \
0x%08x on decoder: %d\n", (int)audio, audio->dec_id);
if (audio->pcm_feedback == TUNNEL_MODE_PLAYBACK)
audmgr_close(&audio->audmgr);
msm_adsp_put(audio->audplay);
goto err;
}
audio->input_buff_handle = NULL;
/* Initialize all locks of audio instance */
mutex_init(&audio->lock);
mutex_init(&audio->write_lock);
mutex_init(&audio->read_lock);
mutex_init(&audio->get_event_lock);
spin_lock_init(&audio->dsp_lock);
init_waitqueue_head(&audio->write_wait);
init_waitqueue_head(&audio->read_wait);
INIT_LIST_HEAD(&audio->free_event_queue);
INIT_LIST_HEAD(&audio->event_queue);
init_waitqueue_head(&audio->wait);
init_waitqueue_head(&audio->event_wait);
spin_lock_init(&audio->event_queue_lock);
audio->out[0].data = audio->data + 0;
audio->out[0].addr = audio->phys + 0;
audio->out[0].size = BUFSZ;
audio->out[1].data = audio->data + BUFSZ;
audio->out[1].addr = audio->phys + BUFSZ;
audio->out[1].size = BUFSZ;
audio->vol_pan.volume = 0x3FFF;
audevrc_flush(audio);
file->private_data = audio;
audio->opened = 1;
#ifdef CONFIG_DEBUG_FS
snprintf(name, sizeof name, "msm_evrc_%04x", audio->dec_id);
audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
NULL, (void *) audio, &audevrc_debug_fops);
if (IS_ERR(audio->dentry))
MM_DBG("debugfs_create_file failed\n");
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
audio->suspend_ctl.node.level = EARLY_SUSPEND_LEVEL_DISABLE_FB;
audio->suspend_ctl.node.resume = audevrc_resume;
audio->suspend_ctl.node.suspend = audevrc_suspend;
audio->suspend_ctl.audio = audio;
register_early_suspend(&audio->suspend_ctl.node);
#endif
for (i = 0; i < AUDEVRC_EVENT_NUM; i++) {
e_node = kmalloc(sizeof(struct audevrc_event), GFP_KERNEL);
if (e_node)
list_add_tail(&e_node->list, &audio->free_event_queue);
else {
MM_ERR("event pkt alloc failed\n");
break;
}
}
done:
return rc;
err:
ion_unmap_kernel(client, audio->output_buff_handle);
output_buff_map_error:
output_buff_get_phys_error:
output_buff_get_flags_error:
ion_free(client, audio->output_buff_handle);
output_buff_alloc_error:
ion_client_destroy(client);
client_create_error:
audpp_adec_free(audio->dec_id);
kfree(audio);
return rc;
}
static const struct file_operations audio_evrc_fops = {
.owner = THIS_MODULE,
.open = audevrc_open,
.release = audevrc_release,
.read = audevrc_read,
.write = audevrc_write,
.unlocked_ioctl = audevrc_ioctl,
.fsync = audevrc_fsync,
};
struct miscdevice audio_evrc_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_evrc",
.fops = &audio_evrc_fops,
};
static int __init audevrc_init(void)
{
return misc_register(&audio_evrc_misc);
}
static void __exit audevrc_exit(void)
{
misc_deregister(&audio_evrc_misc);
}
module_init(audevrc_init);
module_exit(audevrc_exit);
MODULE_DESCRIPTION("MSM EVRC driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
caio2k/kernel-n9 | arch/frv/mb93090-mb00/pci-vdk.c | 600 | 12765 | /* pci-vdk.c: MB93090-MB00 (VDK) PCI support
*
* Copyright (C) 2003, 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <asm/segment.h>
#include <asm/io.h>
#include <asm/mb-regs.h>
#include <asm/mb86943a.h>
#include "pci-frv.h"
unsigned int __nongpreldata pci_probe = 1;
int __nongpreldata pcibios_last_bus = -1;
struct pci_bus *__nongpreldata pci_root_bus;
struct pci_ops *__nongpreldata pci_root_ops;
/*
* The accessible PCI window does not cover the entire CPU address space, but
* there are devices we want to access outside of that window, so we need to
* insert specific PCI bus resources instead of using the platform-level bus
* resources directly for the PCI root bus.
*
* These are configured and inserted by pcibios_init() and are attached to the
* root bus by pcibios_fixup_bus().
*/
static struct resource pci_ioport_resource = {
.name = "PCI IO",
.start = 0,
.end = IO_SPACE_LIMIT,
.flags = IORESOURCE_IO,
};
static struct resource pci_iomem_resource = {
.name = "PCI mem",
.start = 0,
.end = -1,
.flags = IORESOURCE_MEM,
};
/*
* Functions for accessing PCI configuration space
*/
#define CONFIG_CMD(bus, dev, where) \
(0x80000000 | (bus->number << 16) | (devfn << 8) | (where & ~3))
#define __set_PciCfgAddr(A) writel((A), (volatile void __iomem *) __region_CS1 + 0x80)
#define __get_PciCfgDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 3))
#define __get_PciCfgDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x88 + ((A) & 2))
#define __get_PciCfgDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x88)
#define __set_PciCfgDataB(A,V) \
writeb((V), (volatile void __iomem *) __region_CS1 + 0x88 + (3 - ((A) & 3)))
#define __set_PciCfgDataW(A,V) \
writew((V), (volatile void __iomem *) __region_CS1 + 0x88 + (2 - ((A) & 2)))
#define __set_PciCfgDataL(A,V) \
writel((V), (volatile void __iomem *) __region_CS1 + 0x88)
#define __get_PciBridgeDataB(A) readb((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __get_PciBridgeDataW(A) readw((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __get_PciBridgeDataL(A) readl((volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataB(A,V) writeb((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataW(A,V) writew((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
#define __set_PciBridgeDataL(A,V) writel((V), (volatile void __iomem *) __region_CS1 + 0x800 + (A))
static inline int __query(const struct pci_dev *dev)
{
// return dev->bus->number==0 && (dev->devfn==PCI_DEVFN(0,0));
// return dev->bus->number==1;
// return dev->bus->number==0 &&
// (dev->devfn==PCI_DEVFN(2,0) || dev->devfn==PCI_DEVFN(3,0));
return 0;
}
/*****************************************************************************/
/*
*
*/
static int pci_frv_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 *val)
{
u32 _value;
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
_value = __get_PciBridgeDataL(where & ~3);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
_value = __get_PciCfgDataL(where & ~3);
}
switch (size) {
case 1:
_value = _value >> ((where & 3) * 8);
break;
case 2:
_value = _value >> ((where & 2) * 8);
break;
case 4:
break;
default:
BUG();
}
*val = _value;
return PCIBIOS_SUCCESSFUL;
}
static int pci_frv_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
u32 value)
{
switch (size) {
case 1:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataB(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataB(where, value);
}
break;
case 2:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataW(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataW(where, value);
}
break;
case 4:
if (bus->number == 0 && devfn == PCI_DEVFN(0, 0)) {
__set_PciBridgeDataL(where, value);
}
else {
__set_PciCfgAddr(CONFIG_CMD(bus, devfn, where));
__set_PciCfgDataL(where, value);
}
break;
default:
BUG();
}
return PCIBIOS_SUCCESSFUL;
}
static struct pci_ops pci_direct_frv = {
pci_frv_read_config,
pci_frv_write_config,
};
/*
* Before we decide to use direct hardware access mechanisms, we try to do some
* trivial checks to ensure it at least _seems_ to be working -- we just test
* whether bus 00 contains a host bridge (this is similar to checking
* techniques used in XFree86, but ours should be more reliable since we
* attempt to make use of direct access hints provided by the PCI BIOS).
*
* This should be close to trivial, but it isn't, because there are buggy
* chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
*/
static int __init pci_sanity_check(struct pci_ops *o)
{
struct pci_bus bus; /* Fake bus and device */
u32 id;
bus.number = 0;
if (o->read(&bus, 0, PCI_VENDOR_ID, 4, &id) == PCIBIOS_SUCCESSFUL) {
printk("PCI: VDK Bridge device:vendor: %08x\n", id);
if (id == 0x200e10cf)
return 1;
}
printk("PCI: VDK Bridge: Sanity check failed\n");
return 0;
}
static struct pci_ops * __init pci_check_direct(void)
{
unsigned long flags;
local_irq_save(flags);
/* check if access works */
if (pci_sanity_check(&pci_direct_frv)) {
local_irq_restore(flags);
printk("PCI: Using configuration frv\n");
// request_mem_region(0xBE040000, 256, "FRV bridge");
// request_mem_region(0xBFFFFFF4, 12, "PCI frv");
return &pci_direct_frv;
}
local_irq_restore(flags);
return NULL;
}
/*
* Discover remaining PCI buses in case there are peer host bridges.
* We use the number of last PCI bus provided by the PCI BIOS.
*/
static void __init pcibios_fixup_peer_bridges(void)
{
struct pci_bus bus;
struct pci_dev dev;
int n;
u16 l;
if (pcibios_last_bus <= 0 || pcibios_last_bus >= 0xff)
return;
printk("PCI: Peer bridge fixup\n");
for (n=0; n <= pcibios_last_bus; n++) {
if (pci_find_bus(0, n))
continue;
bus.number = n;
bus.ops = pci_root_ops;
dev.bus = &bus;
for(dev.devfn=0; dev.devfn<256; dev.devfn += 8)
if (!pci_read_config_word(&dev, PCI_VENDOR_ID, &l) &&
l != 0x0000 && l != 0xffff) {
printk("Found device at %02x:%02x [%04x]\n", n, dev.devfn, l);
printk("PCI: Discovered peer bus %02x\n", n);
pci_scan_bus(n, pci_root_ops, NULL);
break;
}
}
}
/*
* Exceptions for specific devices. Usually work-arounds for fatal design flaws.
*/
static void __init pci_fixup_umc_ide(struct pci_dev *d)
{
/*
* UM8886BF IDE controller sets region type bits incorrectly,
* therefore they look like memory despite of them being I/O.
*/
int i;
printk("PCI: Fixing base address flags for device %s\n", pci_name(d));
for(i=0; i<4; i++)
d->resource[i].flags |= PCI_BASE_ADDRESS_SPACE_IO;
}
static void __init pci_fixup_ide_bases(struct pci_dev *d)
{
int i;
/*
* PCI IDE controllers use non-standard I/O port decoding, respect it.
*/
if ((d->class >> 8) != PCI_CLASS_STORAGE_IDE)
return;
printk("PCI: IDE base address fixup for %s\n", pci_name(d));
for(i=0; i<4; i++) {
struct resource *r = &d->resource[i];
if ((r->start & ~0x80) == 0x374) {
r->start |= 2;
r->end = r->start;
}
}
}
static void __init pci_fixup_ide_trash(struct pci_dev *d)
{
int i;
/*
* There exist PCI IDE controllers which have utter garbage
* in first four base registers. Ignore that.
*/
printk("PCI: IDE base address trash cleared for %s\n", pci_name(d));
for(i=0; i<4; i++)
d->resource[i].start = d->resource[i].end = d->resource[i].flags = 0;
}
static void __devinit pci_fixup_latency(struct pci_dev *d)
{
/*
* SiS 5597 and 5598 chipsets require latency timer set to
* at most 32 to avoid lockups.
*/
DBG("PCI: Setting max latency to 32\n");
pcibios_max_latency = 32;
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_UMC, PCI_DEVICE_ID_UMC_UM8886BF, pci_fixup_umc_ide);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5513, pci_fixup_ide_trash);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5597, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_5598, pci_fixup_latency);
DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
/*
* Called after each bus is probed, but before its children
* are examined.
*/
void __init pcibios_fixup_bus(struct pci_bus *bus)
{
#if 0
printk("### PCIBIOS_FIXUP_BUS(%d)\n",bus->number);
#endif
if (bus->number == 0) {
bus->resource[0] = &pci_ioport_resource;
bus->resource[1] = &pci_iomem_resource;
}
pci_read_bridge_bases(bus);
if (bus->number == 0) {
struct list_head *ln;
struct pci_dev *dev;
for (ln=bus->devices.next; ln != &bus->devices; ln=ln->next) {
dev = pci_dev_b(ln);
if (dev->devfn == 0) {
dev->resource[0].start = 0;
dev->resource[0].end = 0;
}
}
}
}
/*
* Initialization. Try all known PCI access methods. Note that we support
* using both PCI BIOS and direct access: in such cases, we use I/O ports
* to access config space, but we still keep BIOS order of cards to be
* compatible with 2.0.X. This should go away some day.
*/
int __init pcibios_init(void)
{
struct pci_ops *dir = NULL;
if (!mb93090_mb00_detected)
return -ENXIO;
__reg_MB86943_sl_ctl |= MB86943_SL_CTL_DRCT_MASTER_SWAP | MB86943_SL_CTL_DRCT_SLAVE_SWAP;
__reg_MB86943_ecs_base(1) = ((__region_CS2 + 0x01000000) >> 9) | 0x08000000;
__reg_MB86943_ecs_base(2) = ((__region_CS2 + 0x00000000) >> 9) | 0x08000000;
*(volatile uint32_t *) (__region_CS1 + 0x848) = 0xe0000000;
*(volatile uint32_t *) (__region_CS1 + 0x8b8) = 0x00000000;
__reg_MB86943_sl_pci_io_base = (__region_CS2 + 0x04000000) >> 9;
__reg_MB86943_sl_pci_mem_base = (__region_CS2 + 0x08000000) >> 9;
__reg_MB86943_pci_sl_io_base = __region_CS2 + 0x04000000;
__reg_MB86943_pci_sl_mem_base = __region_CS2 + 0x08000000;
mb();
/* enable PCI arbitration */
__reg_MB86943_pci_arbiter = MB86943_PCIARB_EN;
pci_ioport_resource.start = (__reg_MB86943_sl_pci_io_base << 9) & 0xfffffc00;
pci_ioport_resource.end = (__reg_MB86943_sl_pci_io_range << 9) | 0x3ff;
pci_ioport_resource.end += pci_ioport_resource.start;
printk("PCI IO window: %08llx-%08llx\n",
(unsigned long long) pci_ioport_resource.start,
(unsigned long long) pci_ioport_resource.end);
pci_iomem_resource.start = (__reg_MB86943_sl_pci_mem_base << 9) & 0xfffffc00;
pci_iomem_resource.end = (__reg_MB86943_sl_pci_mem_range << 9) | 0x3ff;
pci_iomem_resource.end += pci_iomem_resource.start;
/* Reserve somewhere to write to flush posted writes. This is used by
* __flush_PCI_writes() from asm/io.h to force the write FIFO in the
* CPU-PCI bridge to flush as this doesn't happen automatically when a
* read is performed on the MB93090 development kit motherboard.
*/
pci_iomem_resource.start += 0x400;
printk("PCI MEM window: %08llx-%08llx\n",
(unsigned long long) pci_iomem_resource.start,
(unsigned long long) pci_iomem_resource.end);
printk("PCI DMA memory: %08lx-%08lx\n",
dma_coherent_mem_start, dma_coherent_mem_end);
if (insert_resource(&iomem_resource, &pci_iomem_resource) < 0)
panic("Unable to insert PCI IOMEM resource\n");
if (insert_resource(&ioport_resource, &pci_ioport_resource) < 0)
panic("Unable to insert PCI IOPORT resource\n");
if (!pci_probe)
return -ENXIO;
dir = pci_check_direct();
if (dir)
pci_root_ops = dir;
else {
printk("PCI: No PCI bus detected\n");
return -ENXIO;
}
printk("PCI: Probing PCI hardware\n");
pci_root_bus = pci_scan_bus(0, pci_root_ops, NULL);
pcibios_irq_init();
pcibios_fixup_peer_bridges();
pcibios_fixup_irqs();
pcibios_resource_survey();
return 0;
}
arch_initcall(pcibios_init);
char * __init pcibios_setup(char *str)
{
if (!strcmp(str, "off")) {
pci_probe = 0;
return NULL;
} else if (!strncmp(str, "lastbus=", 8)) {
pcibios_last_bus = simple_strtol(str+8, NULL, 0);
return NULL;
}
return str;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
int err;
if ((err = pci_enable_resources(dev, mask)) < 0)
return err;
if (!dev->msi_enabled)
pcibios_enable_irq(dev);
return 0;
}
| gpl-2.0 |
jznomoney/htc_spade_2.6.35 | drivers/staging/udlfb/udlfb.c | 600 | 43499 | /*
* udlfb.c -- Framebuffer driver for DisplayLink USB controller
*
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
* usb-skeleton by GregKH.
*
* Device-specific portions based on information from Displaylink, with work
* from Florian Echtler, Henrik Bjerregaard Pedersen, and others.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "udlfb.h"
static struct fb_fix_screeninfo dlfb_fix = {
.id = "udlfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
#ifdef FBINFO_VIRTFB
FBINFO_VIRTFB |
#endif
FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
/*
* There are many DisplayLink-based products, all with unique PIDs. We are able
* to support all volume ones (circa 2009) with a single driver, so we match
* globally on VID. TODO: Probe() needs to detect when we might be running
* "future" chips, and bail on those, so a compatible driver can match.
*/
static struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
#ifndef CONFIG_FB_DEFERRED_IO
#warning Please set CONFIG_FB_DEFFERRED_IO option to support generic fbdev apps
#endif
#ifndef CONFIG_FB_SYS_IMAGEBLIT
#ifndef CONFIG_FB_SYS_IMAGEBLIT_MODULE
#warning Please set CONFIG_FB_SYS_IMAGEBLIT option to support fb console
#endif
#endif
#ifndef CONFIG_FB_MODE_HELPERS
#warning CONFIG_FB_MODE_HELPERS required. Expect build break
#endif
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
static struct urb *dlfb_get_urb(struct dlfb_data *dev);
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len);
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size);
static void dlfb_free_urb_list(struct dlfb_data *dev);
/* other symbols with dependents */
#ifdef CONFIG_FB_DEFERRED_IO
static struct fb_deferred_io dlfb_defio;
#endif
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
*/
static char *dlfb_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = 0xAF;
*buf++ = 0x20;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *dlfb_vidreg_lock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0x00);
}
static char *dlfb_vidreg_unlock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0xFF);
}
/*
* On/Off for driving the DisplayLink framebuffer to the display
*/
static char *dlfb_enable_hvsync(char *buf, bool enable)
{
if (enable)
return dlfb_set_register(buf, 0x1F, 0x00);
else
return dlfb_set_register(buf, 0x1F, 0x01);
}
static char *dlfb_set_color_depth(char *buf, u8 selection)
{
return dlfb_set_register(buf, 0x00, selection);
}
static char *dlfb_set_base16bpp(char *wrptr, u32 base)
{
/* the base pointer is 16 bits wide, 0x20 is hi byte. */
wrptr = dlfb_set_register(wrptr, 0x20, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x21, base >> 8);
return dlfb_set_register(wrptr, 0x22, base);
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *dlfb_set_base8bpp(char *wrptr, u32 base)
{
wrptr = dlfb_set_register(wrptr, 0x26, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x27, base >> 8);
return dlfb_set_register(wrptr, 0x28, base);
}
static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value >> 8);
return dlfb_set_register(wrptr, reg+1, value);
}
/*
* This is kind of weird because the controller takes some
* register values in a different byte order than other registers.
*/
static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value);
return dlfb_set_register(wrptr, reg+1, value >> 8);
}
/*
* LFSR is linear feedback shift register. The reason we have this is
* because the display controller needs to minimize the clock depth of
* various counters used in the display path. So this code reverses the
* provided value into the lfsr16 value by counting backwards to get
* the value that needs to be set in the hardware comparator to get the
* same actual count. This makes sense once you read above a couple of
* times and think about it from a hardware perspective.
*/
static u16 dlfb_lfsr16(u16 actual_count)
{
u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
while (actual_count--) {
lv = ((lv << 1) |
(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
& 0xFFFF;
}
return (u16) lv;
}
/*
* This does LFSR conversion on the value that is to be written.
* See LFSR explanation above for more detail.
*/
static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
{
return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value));
}
/*
* This takes a standard fbdev screeninfo struct and all of its monitor mode
* details and converts them into the DisplayLink equivalent register commands.
*/
static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var)
{
u16 xds, yds;
u16 xde, yde;
u16 yec;
/* x display start */
xds = var->left_margin + var->hsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds);
/* x display end */
xde = xds + var->xres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde);
/* y display start */
yds = var->upper_margin + var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds);
/* y display end */
yde = yds + var->yres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde);
/* x end count is active + blanking - 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x09,
xde + var->right_margin - 1);
/* libdlo hardcodes hsync start to 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1);
/* hsync end is width of sync pulse + 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1);
/* hpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres);
/* yendcount is vertical active + vertical blanking */
yec = var->yres + var->upper_margin + var->lower_margin +
var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec);
/* libdlo hardcodes vsync start to 0 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0);
/* vsync end is width of vsync pulse */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len);
/* vpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres);
/* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */
wrptr = dlfb_set_register_16be(wrptr, 0x1B,
200*1000*1000/var->pixclock);
return wrptr;
}
/*
* This takes a standard fbdev screeninfo struct that was fetched or prepared
* and then generates the appropriate command sequence that then drives the
* display controller.
*/
static int dlfb_set_video_mode(struct dlfb_data *dev,
struct fb_var_screeninfo *var)
{
char *buf;
char *wrptr;
int retval = 0;
int writesize;
struct urb *urb;
if (!atomic_read(&dev->usb_active))
return -EPERM;
urb = dlfb_get_urb(dev);
if (!urb)
return -ENOMEM;
buf = (char *) urb->transfer_buffer;
/*
* This first section has to do with setting the base address on the
* controller * associated with the display. There are 2 base
* pointers, currently, we only * use the 16 bpp segment.
*/
wrptr = dlfb_vidreg_lock(buf);
wrptr = dlfb_set_color_depth(wrptr, 0x00);
/* set base for 16bpp segment to 0 */
wrptr = dlfb_set_base16bpp(wrptr, 0);
/* set base for 8bpp segment to end of fb */
wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
wrptr = dlfb_set_vid_cmds(wrptr, var);
wrptr = dlfb_enable_hvsync(wrptr, true);
wrptr = dlfb_vidreg_unlock(wrptr);
writesize = wrptr - buf;
retval = dlfb_submit_urb(dev, urb, writesize);
return retval;
}
static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
struct dlfb_data *dev = info->par;
dl_notice("MMAP: %lu %u\n", offset + size, info->fix.smem_len);
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
/*
* Trims identical data from front and back of line
* Sets new front buffer address and width
* And returns byte count of identical pixels
* Assumes CPU natural alignment (unsigned long)
* for back and front buffer ptrs and width
*/
static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
{
int j, k;
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
int identical = width;
int start = width;
int end = width;
prefetch((void *) front);
prefetch((void *) back);
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
break;
}
}
for (k = width - 1; k > j; k--) {
if (back[k] != front[k]) {
end = k+1;
break;
}
}
identical = start + (width - end);
*bfront = (u8 *) &front[start];
*width_bytes = (end - start) * sizeof(unsigned long);
return identical * sizeof(unsigned long);
}
/*
* Render a command stream for an encoded horizontal line segment of pixels.
*
* A command buffer holds several commands.
* It always begins with a fresh command header
* (the protocol doesn't require this, but we enforce it to allow
* multiple buffers to be potentially encoded and sent in parallel).
* A single command encodes one contiguous horizontal line of pixels
*
* The function relies on the client to do all allocation, so that
* rendering can be done directly to output buffers (e.g. USB URBs).
* The function fills the supplied command buffer, providing information
* on where it left off, so the client may call in again with additional
* buffers if the line will take several buffers to complete.
*
* A single command can transmit a maximum of 256 pixels,
* regardless of the compression ratio (protocol design limit).
* To the hardware, 0 for a size byte means 256
*
* Rather than 256 pixel commands which are either rl or raw encoded,
* the rlx command simply assumes alternating raw and rl spans within one cmd.
* This has a slightly larger header overhead, but produces more even results.
* It also processes all data (read and write) in a single pass.
* Performance benchmarks of common cases show it having just slightly better
* compression than 256 pixel raw -or- rle commands, with similar CPU consumpion.
* But for very rl friendly data, will compress not quite as well.
*/
static void dlfb_compress_hline(
const uint16_t **pixel_start_ptr,
const uint16_t *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
const uint8_t *const cmd_buffer_end)
{
const uint16_t *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
const int bpp = 2;
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
uint8_t *raw_pixels_count_byte = 0;
uint8_t *cmd_pixels_count_byte = 0;
const uint16_t *raw_pixel_start = 0;
const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0;
const uint32_t be_dev_addr = cpu_to_be32(dev_addr);
prefetchw((void *) cmd); /* pull in one cache line at least */
*cmd++ = 0xAF;
*cmd++ = 0x6B;
*cmd++ = (uint8_t) ((be_dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((be_dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((be_dev_addr >> 24) & 0xFF);
cmd_pixels_count_byte = cmd++; /* we'll know this later */
cmd_pixel_start = pixel;
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
min((int)(pixel_end - pixel),
(int)(cmd_buffer_end - cmd) / bpp));
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
const uint16_t * const repeating_pixel = pixel;
*(uint16_t *)cmd = cpu_to_be16p(pixel);
cmd += 2;
pixel++;
if (unlikely((pixel < cmd_pixel_end) &&
(*pixel == *repeating_pixel))) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = ((repeating_pixel -
raw_pixel_start) + 1) & 0xFF;
while ((pixel < cmd_pixel_end)
&& (*pixel == *repeating_pixel)) {
pixel++;
}
/* immediately after raw data is repeat byte */
*cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
raw_pixels_count_byte = cmd++;
}
}
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
}
*cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
dev_addr += (pixel - cmd_pixel_start) * bpp;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
*command_buffer_ptr = cmd;
*pixel_start_ptr = pixel;
*device_address_ptr = dev_addr;
return;
}
/*
* There are 3 copies of every pixel: The front buffer that the fbdev
* client renders to, the actual framebuffer across the USB bus in hardware
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
static void dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
u32 dev_addr = dev->base16 + byte_offset;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
if (dev->backing_buffer) {
int offset;
const u8 *back_start = (u8 *) (dev->backing_buffer
+ byte_offset);
*ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
&byte_width);
offset = next_pixel - line_start;
line_end = next_pixel + byte_width;
dev_addr += offset;
back_start += offset;
line_start += offset;
memcpy((char *)back_start, (char *) line_start,
byte_width);
}
while (next_pixel < line_end) {
dlfb_compress_hline((const uint16_t **) &next_pixel,
(const uint16_t *) line_end, &dev_addr,
(u8 **) &cmd, (u8 *) cmd_end);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
if (dlfb_submit_urb(dev, urb, len))
return; /* lost pixels is set */
*sent_ptr += len;
urb = dlfb_get_urb(dev);
if (!urb)
return; /* lost_pixels is set */
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
}
}
*urb_buf_ptr = cmd;
}
int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
int width, int height, char *data)
{
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
start_cycles = get_cycles();
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > dev->info->var.xres) ||
(y + height > dev->info->var.yres))
return -EINVAL;
if (!atomic_read(&dev->usb_active))
return 0;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
for (i = y; i < y + height ; i++) {
const int line_offset = dev->info->fix.line_length * i;
const int byte_offset = line_offset + (x * BPP);
dlfb_render_hline(dev, &urb, (char *) dev->info->fix.smem_start,
&cmd, byte_offset, width * BPP,
&bytes_identical, &bytes_sent);
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
ret = dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(width*height*2, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
return 0;
}
/* hardware has native COPY command (see libdlo), but not worth it for fbcon */
static void dlfb_ops_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_COPYAREA || defined CONFIG_FB_SYS_COPYAREA_MODULE
sys_copyarea(info, area);
dlfb_handle_damage(dev, area->dx, area->dy,
area->width, area->height, info->screen_base);
#endif
atomic_inc(&dev->copy_count);
}
static void dlfb_ops_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_IMAGEBLIT || defined CONFIG_FB_SYS_IMAGEBLIT_MODULE
sys_imageblit(info, image);
dlfb_handle_damage(dev, image->dx, image->dy,
image->width, image->height, info->screen_base);
#endif
atomic_inc(&dev->blit_count);
}
static void dlfb_ops_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_FILLRECT || defined CONFIG_FB_SYS_FILLRECT_MODULE
sys_fillrect(info, rect);
dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width,
rect->height, info->screen_base);
#endif
atomic_inc(&dev->fill_count);
}
static void dlfb_get_edid(struct dlfb_data *dev)
{
int i;
int ret;
char rbuf[2];
for (i = 0; i < sizeof(dev->edid); i++) {
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
0);
dev->edid[i] = rbuf[1];
}
}
static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct dlfb_data *dev = info->par;
struct dloarea *area = NULL;
if (!atomic_read(&dev->usb_active))
return 0;
/* TODO: Update X server to get this from sysfs instead */
if (cmd == DLFB_IOCTL_RETURN_EDID) {
char *edid = (char *)arg;
dlfb_get_edid(dev);
if (copy_to_user(edid, dev->edid, sizeof(dev->edid)))
return -EFAULT;
return 0;
}
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
area = (struct dloarea *)arg;
if (area->x < 0)
area->x = 0;
if (area->x > info->var.xres)
area->x = info->var.xres;
if (area->y < 0)
area->y = 0;
if (area->y > info->var.yres)
area->y = info->var.yres;
atomic_set(&dev->use_defio, 0);
dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
info->screen_base);
atomic_inc(&dev->damage_count);
}
return 0;
}
/* taken from vesafb */
static int
dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
int err = 0;
if (regno >= info->cmap.len)
return 1;
if (regno < 16) {
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800)) |
((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
}
}
return err;
}
/*
* It's common for several clients to have framebuffer open simultaneously.
* e.g. both fbcon and X. Makes things interesting.
*/
static int dlfb_ops_open(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
/* if (user == 0)
* We could special case kernel mode clients (fbcon) here
*/
mutex_lock(&dev->fb_open_lock);
dev->fb_count++;
#ifdef CONFIG_FB_DEFERRED_IO
if ((atomic_read(&dev->use_defio)) && (info->fbdefio == NULL)) {
/* enable defio */
info->fbdefio = &dlfb_defio;
fb_deferred_io_init(info);
}
#endif
dl_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, dev->fb_count);
mutex_unlock(&dev->fb_open_lock);
return 0;
}
static int dlfb_ops_release(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
mutex_lock(&dev->fb_open_lock);
dev->fb_count--;
#ifdef CONFIG_FB_DEFERRED_IO
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
info->fbdefio = NULL;
info->fbops->fb_mmap = dlfb_ops_mmap;
}
#endif
dl_notice("release /dev/fb%d user=%d count=%d\n",
info->node, user, dev->fb_count);
mutex_unlock(&dev->fb_open_lock);
return 0;
}
/*
* Called when all client interfaces to start transactions have been disabled,
* and all references to our device instance (dlfb_data) are released.
* Every transaction must have a reference, so we know are fully spun down
*/
static void dlfb_delete(struct kref *kref)
{
struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
if (dev->backing_buffer)
vfree(dev->backing_buffer);
mutex_destroy(&dev->fb_open_lock);
kfree(dev);
}
/*
* Called by fbdev as last part of unregister_framebuffer() process
* No new clients can open connections. Deallocate everything fb_info.
*/
static void dlfb_ops_destroy(struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
if (info->screen_base)
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
framebuffer_release(info);
/* ref taken before register_framebuffer() for dlfb_data clients */
kref_put(&dev->kref, dlfb_delete);
}
/*
* Check whether a video mode is supported by the DisplayLink chip
* We start from monitor's modes, so don't need to filter that here
*/
static int dlfb_is_valid_mode(struct fb_videomode *mode,
struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (mode->xres * mode->yres > dev->sku_pixel_limit)
return 0;
return 1;
}
static void dlfb_var_color_format(struct fb_var_screeninfo *var)
{
const struct fb_bitfield red = { 11, 5, 0 };
const struct fb_bitfield green = { 5, 6, 0 };
const struct fb_bitfield blue = { 0, 5, 0 };
var->bits_per_pixel = 16;
var->red = red;
var->green = green;
var->blue = blue;
}
static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct fb_videomode mode;
/* TODO: support dynamically changing framebuffer size */
if ((var->xres * var->yres * 2) > info->fix.smem_len)
return -EINVAL;
/* set device-specific elements of var unrelated to mode */
dlfb_var_color_format(var);
fb_var_to_videomode(&mode, var);
if (!dlfb_is_valid_mode(&mode, info))
return -EINVAL;
return 0;
}
static int dlfb_ops_set_par(struct fb_info *info)
{
struct dlfb_data *dev = info->par;
dl_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres);
return dlfb_set_video_mode(dev, &info->var);
}
static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
{
struct dlfb_data *dev = info->par;
char *bufptr;
struct urb *urb;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
bufptr = (char *) urb->transfer_buffer;
/* overloading usb_active. UNBLANK can conflict with teardown */
bufptr = dlfb_vidreg_lock(bufptr);
if (blank_mode != FB_BLANK_UNBLANK) {
atomic_set(&dev->usb_active, 0);
bufptr = dlfb_enable_hvsync(bufptr, false);
} else {
atomic_set(&dev->usb_active, 1);
bufptr = dlfb_enable_hvsync(bufptr, true);
}
bufptr = dlfb_vidreg_unlock(bufptr);
dlfb_submit_urb(dev, urb, bufptr - (char *) urb->transfer_buffer);
return 0;
}
static struct fb_ops dlfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = dlfb_ops_setcolreg,
.fb_fillrect = dlfb_ops_fillrect,
.fb_copyarea = dlfb_ops_copyarea,
.fb_imageblit = dlfb_ops_imageblit,
.fb_mmap = dlfb_ops_mmap,
.fb_ioctl = dlfb_ops_ioctl,
.fb_open = dlfb_ops_open,
.fb_release = dlfb_ops_release,
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
};
/*
* Calls dlfb_get_edid() to query the EDID of attached monitor via usb cmds
* Then parses EDID into three places used by various parts of fbdev:
* fb_var_screeninfo contains the timing of the monitor's preferred mode
* fb_info.monspecs is full parsed EDID info, including monspecs.modedb
* fb_info.modelist is a linked list of all monitor & VESA modes which work
*
* If EDID is not readable/valid, then modelist is all VESA modes,
* monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
* Returns 0 if EDID parses successfully
*/
static int dlfb_parse_edid(struct dlfb_data *dev,
struct fb_var_screeninfo *var,
struct fb_info *info)
{
int i;
const struct fb_videomode *default_vmode = NULL;
int result = 0;
fb_destroy_modelist(&info->modelist);
memset(&info->monspecs, 0, sizeof(info->monspecs));
dlfb_get_edid(dev);
fb_edid_to_monspecs(dev->edid, &info->monspecs);
if (info->monspecs.modedb_len > 0) {
for (i = 0; i < info->monspecs.modedb_len; i++) {
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
}
default_vmode = fb_find_best_display(&info->monspecs,
&info->modelist);
} else {
struct fb_videomode fb_vmode = {0};
dl_err("Unable to get valid EDID from device/display\n");
result = 1;
/*
* Add the standard VESA modes to our modelist
* Since we don't have EDID, there may be modes that
* overspec monitor and/or are incorrect aspect ratio, etc.
* But at least the user has a chance to choose
*/
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (dlfb_is_valid_mode((struct fb_videomode *)
&vesa_modes[i], info))
fb_add_videomode(&vesa_modes[i],
&info->modelist);
}
/*
* default to resolution safe for projectors
* (since they are most common case without EDID)
*/
fb_vmode.xres = 800;
fb_vmode.yres = 600;
fb_vmode.refresh = 60;
default_vmode = fb_find_nearest_mode(&fb_vmode,
&info->modelist);
}
fb_videomode_to_var(var, default_vmode);
dlfb_var_color_format(var);
return result;
}
static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_rendered));
}
static ssize_t metrics_bytes_identical_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_identical));
}
static ssize_t metrics_bytes_sent_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_sent));
}
static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->cpu_kcycles_used));
}
static ssize_t metrics_misc_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE,
"Calls to\ndamage: %u\nblit: %u\n"
"defio faults: %u\ncopy: %u\n"
"fill: %u\n\n"
"active framebuffer clients: %d\n"
"urbs available %d(%d)\n"
"Shadow framebuffer in use? %s\n"
"Any lost pixels? %s\n",
atomic_read(&dev->damage_count),
atomic_read(&dev->blit_count),
atomic_read(&dev->defio_fault_count),
atomic_read(&dev->copy_count),
atomic_read(&dev->fill_count),
dev->fb_count,
dev->urbs.available, dev->urbs.limit_sem.count,
(dev->backing_buffer) ? "yes" : "no",
atomic_read(&dev->lost_pixels) ? "yes" : "no");
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
char *edid = &dev->edid[0];
const size_t size = sizeof(dev->edid);
if (dlfb_parse_edid(dev, &fb_info->var, fb_info))
return 0;
if (off >= size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, edid + off, count);
return count;
}
static ssize_t metrics_reset_store(struct device *fbdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
atomic_set(&dev->bytes_rendered, 0);
atomic_set(&dev->bytes_identical, 0);
atomic_set(&dev->bytes_sent, 0);
atomic_set(&dev->cpu_kcycles_used, 0);
atomic_set(&dev->blit_count, 0);
atomic_set(&dev->copy_count, 0);
atomic_set(&dev->fill_count, 0);
atomic_set(&dev->defio_fault_count, 0);
atomic_set(&dev->damage_count, 0);
return count;
}
static ssize_t use_defio_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%d\n",
atomic_read(&dev->use_defio));
}
static ssize_t use_defio_store(struct device *fbdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
if (count > 0) {
if (buf[0] == '0')
atomic_set(&dev->use_defio, 0);
if (buf[0] == '1')
atomic_set(&dev->use_defio, 1);
}
return count;
}
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 128,
.read = edid_show,
};
static struct device_attribute fb_device_attrs[] = {
__ATTR_RO(metrics_bytes_rendered),
__ATTR_RO(metrics_bytes_identical),
__ATTR_RO(metrics_bytes_sent),
__ATTR_RO(metrics_cpu_kcycles_used),
__ATTR_RO(metrics_misc),
__ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store),
__ATTR_RW(use_defio),
};
#ifdef CONFIG_FB_DEFERRED_IO
static void dlfb_dpy_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct dlfb_data *dev = info->par;
struct urb *urb;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
int bytes_rendered = 0;
int fault_count = 0;
if (!atomic_read(&dev->use_defio))
return;
if (!atomic_read(&dev->usb_active))
return;
start_cycles = get_cycles();
urb = dlfb_get_urb(dev);
if (!urb)
return;
cmd = urb->transfer_buffer;
/* walk the written page list and render each to device */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start,
&cmd, cur->index << PAGE_SHIFT,
PAGE_SIZE, &bytes_identical, &bytes_sent);
bytes_rendered += PAGE_SIZE;
fault_count++;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
atomic_add(fault_count, &dev->defio_fault_count);
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(bytes_rendered, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
}
static struct fb_deferred_io dlfb_defio = {
.delay = 5,
.deferred_io = dlfb_dpy_deferred_io,
};
#endif
/*
* This is necessary before we can communicate with the display controller.
*/
static int dlfb_select_std_channel(struct dlfb_data *dev)
{
int ret;
u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
return ret;
}
static int dlfb_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
struct dlfb_data *dev;
struct fb_info *info;
int videomemorysize;
int i;
unsigned char *videomemory;
int retval = -ENOMEM;
struct fb_var_screeninfo *var;
int registered = 0;
u16 *pix_framebuffer;
/* usb initialization */
usbdev = interface_to_usbdev(interface);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
err("dlfb_usb_probe: failed alloc of dev struct\n");
goto error;
}
/* we need to wait for both usb and fbdev to spin down on disconnect */
kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
kref_get(&dev->kref); /* matching kref_put in .fb_destroy function*/
dev->udev = usbdev;
dev->gdev = &usbdev->dev; /* our generic struct device * */
usb_set_intfdata(interface, dev);
if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
retval = -ENOMEM;
dl_err("dlfb_alloc_urb_list failed\n");
goto error;
}
mutex_init(&dev->fb_open_lock);
/* We don't register a new USB class. Our client interface is fbdev */
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &usbdev->dev);
if (!info) {
retval = -ENOMEM;
dl_err("framebuffer_alloc failed\n");
goto error;
}
dev->info = info;
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &dlfb_ops;
var = &info->var;
/* TODO set limit based on actual SKU detection */
dev->sku_pixel_limit = 2048 * 1152;
INIT_LIST_HEAD(&info->modelist);
dlfb_parse_edid(dev, var, info);
/*
* ok, now that we've got the size info, we can alloc our framebuffer.
*/
info->fix = dlfb_fix;
info->fix.line_length = var->xres * (var->bits_per_pixel / 8);
videomemorysize = info->fix.line_length * var->yres;
/*
* The big chunk of system memory we use as a virtual framebuffer.
* TODO: Handle fbcon cursor code calling blit in interrupt context
*/
videomemory = vmalloc(videomemorysize);
if (!videomemory) {
retval = -ENOMEM;
dl_err("Virtual framebuffer alloc failed\n");
goto error;
}
info->screen_base = videomemory;
info->fix.smem_len = PAGE_ALIGN(videomemorysize);
info->fix.smem_start = (unsigned long) videomemory;
info->flags = udlfb_info_flags;
/*
* Second framebuffer copy, mirroring the state of the framebuffer
* on the physical USB device. We can function without this.
* But with imperfect damage info we may end up sending pixels over USB
* that were, in fact, unchanged -- wasting limited USB bandwidth
*/
dev->backing_buffer = vmalloc(videomemorysize);
if (!dev->backing_buffer)
dl_warn("No shadow/backing buffer allcoated\n");
else
memset(dev->backing_buffer, 0, videomemorysize);
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dl_err("fb_alloc_cmap failed %x\n", retval);
goto error;
}
/* ready to begin using device */
#ifdef CONFIG_FB_DEFERRED_IO
atomic_set(&dev->use_defio, 1);
#endif
atomic_set(&dev->usb_active, 1);
dlfb_select_std_channel(dev);
dlfb_ops_check_var(var, info);
dlfb_ops_set_par(info);
/* paint greenscreen */
pix_framebuffer = (u16 *) videomemory;
for (i = 0; i < videomemorysize / 2; i++)
pix_framebuffer[i] = 0x37e6;
dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres,
videomemory);
retval = register_framebuffer(info);
if (retval < 0) {
dl_err("register_framebuffer failed %d\n", retval);
goto error;
}
registered = 1;
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_create_file(info->dev, &fb_device_attrs[i]);
device_create_bin_file(info->dev, &edid_attr);
dl_err("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
" Using %dK framebuffer memory\n", info->node,
var->xres, var->yres,
((dev->backing_buffer) ?
videomemorysize * 2 : videomemorysize) >> 10);
return 0;
error:
if (dev) {
if (registered) {
unregister_framebuffer(info);
dlfb_ops_destroy(info);
} else
kref_put(&dev->kref, dlfb_delete);
if (dev->urbs.count > 0)
dlfb_free_urb_list(dev);
kref_put(&dev->kref, dlfb_delete); /* last ref from kref_init */
/* dev has been deallocated. Do not dereference */
}
return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *interface)
{
struct dlfb_data *dev;
struct fb_info *info;
int i;
dev = usb_get_intfdata(interface);
info = dev->info;
/* when non-active we'll update virtual framebuffer, but no new urbs */
atomic_set(&dev->usb_active, 0);
usb_set_intfdata(interface, NULL);
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
/* this function will wait for all in-flight urbs to complete */
dlfb_free_urb_list(dev);
if (info) {
dl_notice("Detaching /dev/fb%d\n", info->node);
unregister_framebuffer(info);
dlfb_ops_destroy(info);
}
/* release reference taken by kref_init in probe() */
kref_put(&dev->kref, dlfb_delete);
/* consider dlfb_data freed */
return;
}
static struct usb_driver dlfb_driver = {
.name = "udlfb",
.probe = dlfb_usb_probe,
.disconnect = dlfb_usb_disconnect,
.id_table = id_table,
};
static int __init dlfb_module_init(void)
{
int res;
res = usb_register(&dlfb_driver);
if (res)
err("usb_register failed. Error number %d", res);
printk(KERN_INFO "VMODES initialized\n");
return res;
}
static void __exit dlfb_module_exit(void)
{
usb_deregister(&dlfb_driver);
}
module_init(dlfb_module_init);
module_exit(dlfb_module_exit);
static void dlfb_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct dlfb_data *dev = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
dl_err("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
atomic_set(&dev->lost_pixels, 1);
}
}
urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
spin_lock_irqsave(&dev->urbs.lock, flags);
list_add_tail(&unode->entry, &dev->urbs.list);
dev->urbs.available++;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
up(&dev->urbs.limit_sem);
}
static void dlfb_free_urb_list(struct dlfb_data *dev)
{
int count = dev->urbs.count;
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
int ret;
unsigned long flags;
dl_notice("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
/* Timeout means a memory leak and/or fault */
ret = down_timeout(&dev->urbs.limit_sem, FREE_URB_TIMEOUT);
if (ret) {
BUG_ON(ret);
break;
}
spin_lock_irqsave(&dev->urbs.lock, flags);
node = dev->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, dev->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
kref_put(&dev->kref, dlfb_delete);
}
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
{
int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
spin_lock_init(&dev->urbs.lock);
dev->urbs.size = size;
INIT_LIST_HEAD(&dev->urbs.list);
while (i < count) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = dev;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
buf, size, dlfb_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &dev->urbs.list);
i++;
}
sema_init(&dev->urbs.limit_sem, i);
dev->urbs.count = i;
dev->urbs.available = i;
kref_get(&dev->kref); /* released in free_render_urbs() */
dl_notice("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
static struct urb *dlfb_get_urb(struct dlfb_data *dev)
{
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
atomic_set(&dev->lost_pixels, 1);
dl_err("wait for urb interrupted: %x\n", ret);
goto error;
}
spin_lock_irqsave(&dev->urbs.lock, flags);
BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
entry = dev->urbs.list.next;
list_del_init(entry);
dev->urbs.available--;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
error:
return urb;
}
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
{
int ret;
BUG_ON(len > dev->urbs.size);
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
dlfb_urb_completion(urb); /* because no one else will */
atomic_set(&dev->lost_pixels, 1);
dl_err("usb_submit_urb error %x\n", ret);
}
return ret;
}
MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
"Jaya Kumar <jayakumar.lkml@gmail.com>, "
"Bernie Thompson <bernie@plugable.com>");
MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
shumashv1/hp-kernel-tenderloin | drivers/staging/udlfb/udlfb.c | 600 | 43499 | /*
* udlfb.c -- Framebuffer driver for DisplayLink USB controller
*
* Copyright (C) 2009 Roberto De Ioris <roberto@unbit.it>
* Copyright (C) 2009 Jaya Kumar <jayakumar.lkml@gmail.com>
* Copyright (C) 2009 Bernie Thompson <bernie@plugable.com>
*
* This file is subject to the terms and conditions of the GNU General Public
* License v2. See the file COPYING in the main directory of this archive for
* more details.
*
* Layout is based on skeletonfb by James Simmons and Geert Uytterhoeven,
* usb-skeleton by GregKH.
*
* Device-specific portions based on information from Displaylink, with work
* from Florian Echtler, Henrik Bjerregaard Pedersen, and others.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/usb.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/fb.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include "udlfb.h"
static struct fb_fix_screeninfo dlfb_fix = {
.id = "udlfb",
.type = FB_TYPE_PACKED_PIXELS,
.visual = FB_VISUAL_TRUECOLOR,
.xpanstep = 0,
.ypanstep = 0,
.ywrapstep = 0,
.accel = FB_ACCEL_NONE,
};
static const u32 udlfb_info_flags = FBINFO_DEFAULT | FBINFO_READS_FAST |
#ifdef FBINFO_VIRTFB
FBINFO_VIRTFB |
#endif
FBINFO_HWACCEL_IMAGEBLIT | FBINFO_HWACCEL_FILLRECT |
FBINFO_HWACCEL_COPYAREA | FBINFO_MISC_ALWAYS_SETPAR;
/*
* There are many DisplayLink-based products, all with unique PIDs. We are able
* to support all volume ones (circa 2009) with a single driver, so we match
* globally on VID. TODO: Probe() needs to detect when we might be running
* "future" chips, and bail on those, so a compatible driver can match.
*/
static struct usb_device_id id_table[] = {
{.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,},
{},
};
MODULE_DEVICE_TABLE(usb, id_table);
#ifndef CONFIG_FB_DEFERRED_IO
#warning Please set CONFIG_FB_DEFFERRED_IO option to support generic fbdev apps
#endif
#ifndef CONFIG_FB_SYS_IMAGEBLIT
#ifndef CONFIG_FB_SYS_IMAGEBLIT_MODULE
#warning Please set CONFIG_FB_SYS_IMAGEBLIT option to support fb console
#endif
#endif
#ifndef CONFIG_FB_MODE_HELPERS
#warning CONFIG_FB_MODE_HELPERS required. Expect build break
#endif
/* dlfb keeps a list of urbs for efficient bulk transfers */
static void dlfb_urb_completion(struct urb *urb);
static struct urb *dlfb_get_urb(struct dlfb_data *dev);
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb * urb, size_t len);
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size);
static void dlfb_free_urb_list(struct dlfb_data *dev);
/* other symbols with dependents */
#ifdef CONFIG_FB_DEFERRED_IO
static struct fb_deferred_io dlfb_defio;
#endif
/*
* All DisplayLink bulk operations start with 0xAF, followed by specific code
* All operations are written to buffers which then later get sent to device
*/
static char *dlfb_set_register(char *buf, u8 reg, u8 val)
{
*buf++ = 0xAF;
*buf++ = 0x20;
*buf++ = reg;
*buf++ = val;
return buf;
}
static char *dlfb_vidreg_lock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0x00);
}
static char *dlfb_vidreg_unlock(char *buf)
{
return dlfb_set_register(buf, 0xFF, 0xFF);
}
/*
* On/Off for driving the DisplayLink framebuffer to the display
*/
static char *dlfb_enable_hvsync(char *buf, bool enable)
{
if (enable)
return dlfb_set_register(buf, 0x1F, 0x00);
else
return dlfb_set_register(buf, 0x1F, 0x01);
}
static char *dlfb_set_color_depth(char *buf, u8 selection)
{
return dlfb_set_register(buf, 0x00, selection);
}
static char *dlfb_set_base16bpp(char *wrptr, u32 base)
{
/* the base pointer is 16 bits wide, 0x20 is hi byte. */
wrptr = dlfb_set_register(wrptr, 0x20, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x21, base >> 8);
return dlfb_set_register(wrptr, 0x22, base);
}
/*
* DisplayLink HW has separate 16bpp and 8bpp framebuffers.
* In 24bpp modes, the low 323 RGB bits go in the 8bpp framebuffer
*/
static char *dlfb_set_base8bpp(char *wrptr, u32 base)
{
wrptr = dlfb_set_register(wrptr, 0x26, base >> 16);
wrptr = dlfb_set_register(wrptr, 0x27, base >> 8);
return dlfb_set_register(wrptr, 0x28, base);
}
static char *dlfb_set_register_16(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value >> 8);
return dlfb_set_register(wrptr, reg+1, value);
}
/*
* This is kind of weird because the controller takes some
* register values in a different byte order than other registers.
*/
static char *dlfb_set_register_16be(char *wrptr, u8 reg, u16 value)
{
wrptr = dlfb_set_register(wrptr, reg, value);
return dlfb_set_register(wrptr, reg+1, value >> 8);
}
/*
* LFSR is linear feedback shift register. The reason we have this is
* because the display controller needs to minimize the clock depth of
* various counters used in the display path. So this code reverses the
* provided value into the lfsr16 value by counting backwards to get
* the value that needs to be set in the hardware comparator to get the
* same actual count. This makes sense once you read above a couple of
* times and think about it from a hardware perspective.
*/
static u16 dlfb_lfsr16(u16 actual_count)
{
u32 lv = 0xFFFF; /* This is the lfsr value that the hw starts with */
while (actual_count--) {
lv = ((lv << 1) |
(((lv >> 15) ^ (lv >> 4) ^ (lv >> 2) ^ (lv >> 1)) & 1))
& 0xFFFF;
}
return (u16) lv;
}
/*
* This does LFSR conversion on the value that is to be written.
* See LFSR explanation above for more detail.
*/
static char *dlfb_set_register_lfsr16(char *wrptr, u8 reg, u16 value)
{
return dlfb_set_register_16(wrptr, reg, dlfb_lfsr16(value));
}
/*
* This takes a standard fbdev screeninfo struct and all of its monitor mode
* details and converts them into the DisplayLink equivalent register commands.
*/
static char *dlfb_set_vid_cmds(char *wrptr, struct fb_var_screeninfo *var)
{
u16 xds, yds;
u16 xde, yde;
u16 yec;
/* x display start */
xds = var->left_margin + var->hsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x01, xds);
/* x display end */
xde = xds + var->xres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x03, xde);
/* y display start */
yds = var->upper_margin + var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x05, yds);
/* y display end */
yde = yds + var->yres;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x07, yde);
/* x end count is active + blanking - 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x09,
xde + var->right_margin - 1);
/* libdlo hardcodes hsync start to 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0B, 1);
/* hsync end is width of sync pulse + 1 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x0D, var->hsync_len + 1);
/* hpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x0F, var->xres);
/* yendcount is vertical active + vertical blanking */
yec = var->yres + var->upper_margin + var->lower_margin +
var->vsync_len;
wrptr = dlfb_set_register_lfsr16(wrptr, 0x11, yec);
/* libdlo hardcodes vsync start to 0 */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x13, 0);
/* vsync end is width of vsync pulse */
wrptr = dlfb_set_register_lfsr16(wrptr, 0x15, var->vsync_len);
/* vpixels is active pixels */
wrptr = dlfb_set_register_16(wrptr, 0x17, var->yres);
/* convert picoseconds to 5kHz multiple for pclk5k = x * 1E12/5k */
wrptr = dlfb_set_register_16be(wrptr, 0x1B,
200*1000*1000/var->pixclock);
return wrptr;
}
/*
* This takes a standard fbdev screeninfo struct that was fetched or prepared
* and then generates the appropriate command sequence that then drives the
* display controller.
*/
static int dlfb_set_video_mode(struct dlfb_data *dev,
struct fb_var_screeninfo *var)
{
char *buf;
char *wrptr;
int retval = 0;
int writesize;
struct urb *urb;
if (!atomic_read(&dev->usb_active))
return -EPERM;
urb = dlfb_get_urb(dev);
if (!urb)
return -ENOMEM;
buf = (char *) urb->transfer_buffer;
/*
* This first section has to do with setting the base address on the
* controller * associated with the display. There are 2 base
* pointers, currently, we only * use the 16 bpp segment.
*/
wrptr = dlfb_vidreg_lock(buf);
wrptr = dlfb_set_color_depth(wrptr, 0x00);
/* set base for 16bpp segment to 0 */
wrptr = dlfb_set_base16bpp(wrptr, 0);
/* set base for 8bpp segment to end of fb */
wrptr = dlfb_set_base8bpp(wrptr, dev->info->fix.smem_len);
wrptr = dlfb_set_vid_cmds(wrptr, var);
wrptr = dlfb_enable_hvsync(wrptr, true);
wrptr = dlfb_vidreg_unlock(wrptr);
writesize = wrptr - buf;
retval = dlfb_submit_urb(dev, urb, writesize);
return retval;
}
static int dlfb_ops_mmap(struct fb_info *info, struct vm_area_struct *vma)
{
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
unsigned long page, pos;
struct dlfb_data *dev = info->par;
dl_notice("MMAP: %lu %u\n", offset + size, info->fix.smem_len);
if (offset + size > info->fix.smem_len)
return -EINVAL;
pos = (unsigned long)info->fix.smem_start + offset;
while (size > 0) {
page = vmalloc_to_pfn((void *)pos);
if (remap_pfn_range(vma, start, page, PAGE_SIZE, PAGE_SHARED))
return -EAGAIN;
start += PAGE_SIZE;
pos += PAGE_SIZE;
if (size > PAGE_SIZE)
size -= PAGE_SIZE;
else
size = 0;
}
vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */
return 0;
}
/*
* Trims identical data from front and back of line
* Sets new front buffer address and width
* And returns byte count of identical pixels
* Assumes CPU natural alignment (unsigned long)
* for back and front buffer ptrs and width
*/
static int dlfb_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
{
int j, k;
const unsigned long *back = (const unsigned long *) bback;
const unsigned long *front = (const unsigned long *) *bfront;
const int width = *width_bytes / sizeof(unsigned long);
int identical = width;
int start = width;
int end = width;
prefetch((void *) front);
prefetch((void *) back);
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
break;
}
}
for (k = width - 1; k > j; k--) {
if (back[k] != front[k]) {
end = k+1;
break;
}
}
identical = start + (width - end);
*bfront = (u8 *) &front[start];
*width_bytes = (end - start) * sizeof(unsigned long);
return identical * sizeof(unsigned long);
}
/*
* Render a command stream for an encoded horizontal line segment of pixels.
*
* A command buffer holds several commands.
* It always begins with a fresh command header
* (the protocol doesn't require this, but we enforce it to allow
* multiple buffers to be potentially encoded and sent in parallel).
* A single command encodes one contiguous horizontal line of pixels
*
* The function relies on the client to do all allocation, so that
* rendering can be done directly to output buffers (e.g. USB URBs).
* The function fills the supplied command buffer, providing information
* on where it left off, so the client may call in again with additional
* buffers if the line will take several buffers to complete.
*
* A single command can transmit a maximum of 256 pixels,
* regardless of the compression ratio (protocol design limit).
* To the hardware, 0 for a size byte means 256
*
* Rather than 256 pixel commands which are either rl or raw encoded,
* the rlx command simply assumes alternating raw and rl spans within one cmd.
* This has a slightly larger header overhead, but produces more even results.
* It also processes all data (read and write) in a single pass.
* Performance benchmarks of common cases show it having just slightly better
* compression than 256 pixel raw -or- rle commands, with similar CPU consumpion.
* But for very rl friendly data, will compress not quite as well.
*/
static void dlfb_compress_hline(
const uint16_t **pixel_start_ptr,
const uint16_t *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
const uint8_t *const cmd_buffer_end)
{
const uint16_t *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
const int bpp = 2;
while ((pixel_end > pixel) &&
(cmd_buffer_end - MIN_RLX_CMD_BYTES > cmd)) {
uint8_t *raw_pixels_count_byte = 0;
uint8_t *cmd_pixels_count_byte = 0;
const uint16_t *raw_pixel_start = 0;
const uint16_t *cmd_pixel_start, *cmd_pixel_end = 0;
const uint32_t be_dev_addr = cpu_to_be32(dev_addr);
prefetchw((void *) cmd); /* pull in one cache line at least */
*cmd++ = 0xAF;
*cmd++ = 0x6B;
*cmd++ = (uint8_t) ((be_dev_addr >> 8) & 0xFF);
*cmd++ = (uint8_t) ((be_dev_addr >> 16) & 0xFF);
*cmd++ = (uint8_t) ((be_dev_addr >> 24) & 0xFF);
cmd_pixels_count_byte = cmd++; /* we'll know this later */
cmd_pixel_start = pixel;
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
cmd_pixel_end = pixel + min(MAX_CMD_PIXELS + 1,
min((int)(pixel_end - pixel),
(int)(cmd_buffer_end - cmd) / bpp));
prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
while (pixel < cmd_pixel_end) {
const uint16_t * const repeating_pixel = pixel;
*(uint16_t *)cmd = cpu_to_be16p(pixel);
cmd += 2;
pixel++;
if (unlikely((pixel < cmd_pixel_end) &&
(*pixel == *repeating_pixel))) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = ((repeating_pixel -
raw_pixel_start) + 1) & 0xFF;
while ((pixel < cmd_pixel_end)
&& (*pixel == *repeating_pixel)) {
pixel++;
}
/* immediately after raw data is repeat byte */
*cmd++ = ((pixel - repeating_pixel) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
raw_pixels_count_byte = cmd++;
}
}
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
*raw_pixels_count_byte = (pixel-raw_pixel_start) & 0xFF;
}
*cmd_pixels_count_byte = (pixel - cmd_pixel_start) & 0xFF;
dev_addr += (pixel - cmd_pixel_start) * bpp;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
/* Fill leftover bytes with no-ops */
if (cmd_buffer_end > cmd)
memset(cmd, 0xAF, cmd_buffer_end - cmd);
cmd = (uint8_t *) cmd_buffer_end;
}
*command_buffer_ptr = cmd;
*pixel_start_ptr = pixel;
*device_address_ptr = dev_addr;
return;
}
/*
* There are 3 copies of every pixel: The front buffer that the fbdev
* client renders to, the actual framebuffer across the USB bus in hardware
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
static void dlfb_render_hline(struct dlfb_data *dev, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
u32 dev_addr = dev->base16 + byte_offset;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
line_end = next_pixel + byte_width;
if (dev->backing_buffer) {
int offset;
const u8 *back_start = (u8 *) (dev->backing_buffer
+ byte_offset);
*ident_ptr += dlfb_trim_hline(back_start, &next_pixel,
&byte_width);
offset = next_pixel - line_start;
line_end = next_pixel + byte_width;
dev_addr += offset;
back_start += offset;
line_start += offset;
memcpy((char *)back_start, (char *) line_start,
byte_width);
}
while (next_pixel < line_end) {
dlfb_compress_hline((const uint16_t **) &next_pixel,
(const uint16_t *) line_end, &dev_addr,
(u8 **) &cmd, (u8 *) cmd_end);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
if (dlfb_submit_urb(dev, urb, len))
return; /* lost pixels is set */
*sent_ptr += len;
urb = dlfb_get_urb(dev);
if (!urb)
return; /* lost_pixels is set */
*urb_ptr = urb;
cmd = urb->transfer_buffer;
cmd_end = &cmd[urb->transfer_buffer_length];
}
}
*urb_buf_ptr = cmd;
}
int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
int width, int height, char *data)
{
int i, ret;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
start_cycles = get_cycles();
aligned_x = DL_ALIGN_DOWN(x, sizeof(unsigned long));
width = DL_ALIGN_UP(width + (x-aligned_x), sizeof(unsigned long));
x = aligned_x;
if ((width <= 0) ||
(x + width > dev->info->var.xres) ||
(y + height > dev->info->var.yres))
return -EINVAL;
if (!atomic_read(&dev->usb_active))
return 0;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
cmd = urb->transfer_buffer;
for (i = y; i < y + height ; i++) {
const int line_offset = dev->info->fix.line_length * i;
const int byte_offset = line_offset + (x * BPP);
dlfb_render_hline(dev, &urb, (char *) dev->info->fix.smem_start,
&cmd, byte_offset, width * BPP,
&bytes_identical, &bytes_sent);
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
ret = dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(width*height*2, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
return 0;
}
/* hardware has native COPY command (see libdlo), but not worth it for fbcon */
static void dlfb_ops_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_COPYAREA || defined CONFIG_FB_SYS_COPYAREA_MODULE
sys_copyarea(info, area);
dlfb_handle_damage(dev, area->dx, area->dy,
area->width, area->height, info->screen_base);
#endif
atomic_inc(&dev->copy_count);
}
static void dlfb_ops_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_IMAGEBLIT || defined CONFIG_FB_SYS_IMAGEBLIT_MODULE
sys_imageblit(info, image);
dlfb_handle_damage(dev, image->dx, image->dy,
image->width, image->height, info->screen_base);
#endif
atomic_inc(&dev->blit_count);
}
static void dlfb_ops_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct dlfb_data *dev = info->par;
#if defined CONFIG_FB_SYS_FILLRECT || defined CONFIG_FB_SYS_FILLRECT_MODULE
sys_fillrect(info, rect);
dlfb_handle_damage(dev, rect->dx, rect->dy, rect->width,
rect->height, info->screen_base);
#endif
atomic_inc(&dev->fill_count);
}
static void dlfb_get_edid(struct dlfb_data *dev)
{
int i;
int ret;
char rbuf[2];
for (i = 0; i < sizeof(dev->edid); i++) {
ret = usb_control_msg(dev->udev,
usb_rcvctrlpipe(dev->udev, 0), (0x02),
(0x80 | (0x02 << 5)), i << 8, 0xA1, rbuf, 2,
0);
dev->edid[i] = rbuf[1];
}
}
static int dlfb_ops_ioctl(struct fb_info *info, unsigned int cmd,
unsigned long arg)
{
struct dlfb_data *dev = info->par;
struct dloarea *area = NULL;
if (!atomic_read(&dev->usb_active))
return 0;
/* TODO: Update X server to get this from sysfs instead */
if (cmd == DLFB_IOCTL_RETURN_EDID) {
char *edid = (char *)arg;
dlfb_get_edid(dev);
if (copy_to_user(edid, dev->edid, sizeof(dev->edid)))
return -EFAULT;
return 0;
}
/* TODO: Help propose a standard fb.h ioctl to report mmap damage */
if (cmd == DLFB_IOCTL_REPORT_DAMAGE) {
area = (struct dloarea *)arg;
if (area->x < 0)
area->x = 0;
if (area->x > info->var.xres)
area->x = info->var.xres;
if (area->y < 0)
area->y = 0;
if (area->y > info->var.yres)
area->y = info->var.yres;
atomic_set(&dev->use_defio, 0);
dlfb_handle_damage(dev, area->x, area->y, area->w, area->h,
info->screen_base);
atomic_inc(&dev->damage_count);
}
return 0;
}
/* taken from vesafb */
static int
dlfb_ops_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp, struct fb_info *info)
{
int err = 0;
if (regno >= info->cmap.len)
return 1;
if (regno < 16) {
if (info->var.red.offset == 10) {
/* 1:5:5:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800) >> 1) |
((green & 0xf800) >> 6) | ((blue & 0xf800) >> 11);
} else {
/* 0:5:6:5 */
((u32 *) (info->pseudo_palette))[regno] =
((red & 0xf800)) |
((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
}
}
return err;
}
/*
* It's common for several clients to have framebuffer open simultaneously.
* e.g. both fbcon and X. Makes things interesting.
*/
static int dlfb_ops_open(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
/* if (user == 0)
* We could special case kernel mode clients (fbcon) here
*/
mutex_lock(&dev->fb_open_lock);
dev->fb_count++;
#ifdef CONFIG_FB_DEFERRED_IO
if ((atomic_read(&dev->use_defio)) && (info->fbdefio == NULL)) {
/* enable defio */
info->fbdefio = &dlfb_defio;
fb_deferred_io_init(info);
}
#endif
dl_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, dev->fb_count);
mutex_unlock(&dev->fb_open_lock);
return 0;
}
static int dlfb_ops_release(struct fb_info *info, int user)
{
struct dlfb_data *dev = info->par;
mutex_lock(&dev->fb_open_lock);
dev->fb_count--;
#ifdef CONFIG_FB_DEFERRED_IO
if ((dev->fb_count == 0) && (info->fbdefio)) {
fb_deferred_io_cleanup(info);
info->fbdefio = NULL;
info->fbops->fb_mmap = dlfb_ops_mmap;
}
#endif
dl_notice("release /dev/fb%d user=%d count=%d\n",
info->node, user, dev->fb_count);
mutex_unlock(&dev->fb_open_lock);
return 0;
}
/*
* Called when all client interfaces to start transactions have been disabled,
* and all references to our device instance (dlfb_data) are released.
* Every transaction must have a reference, so we know are fully spun down
*/
static void dlfb_delete(struct kref *kref)
{
struct dlfb_data *dev = container_of(kref, struct dlfb_data, kref);
if (dev->backing_buffer)
vfree(dev->backing_buffer);
mutex_destroy(&dev->fb_open_lock);
kfree(dev);
}
/*
* Called by fbdev as last part of unregister_framebuffer() process
* No new clients can open connections. Deallocate everything fb_info.
*/
static void dlfb_ops_destroy(struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (info->cmap.len != 0)
fb_dealloc_cmap(&info->cmap);
if (info->monspecs.modedb)
fb_destroy_modedb(info->monspecs.modedb);
if (info->screen_base)
vfree(info->screen_base);
fb_destroy_modelist(&info->modelist);
framebuffer_release(info);
/* ref taken before register_framebuffer() for dlfb_data clients */
kref_put(&dev->kref, dlfb_delete);
}
/*
* Check whether a video mode is supported by the DisplayLink chip
* We start from monitor's modes, so don't need to filter that here
*/
static int dlfb_is_valid_mode(struct fb_videomode *mode,
struct fb_info *info)
{
struct dlfb_data *dev = info->par;
if (mode->xres * mode->yres > dev->sku_pixel_limit)
return 0;
return 1;
}
static void dlfb_var_color_format(struct fb_var_screeninfo *var)
{
const struct fb_bitfield red = { 11, 5, 0 };
const struct fb_bitfield green = { 5, 6, 0 };
const struct fb_bitfield blue = { 0, 5, 0 };
var->bits_per_pixel = 16;
var->red = red;
var->green = green;
var->blue = blue;
}
static int dlfb_ops_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct fb_videomode mode;
/* TODO: support dynamically changing framebuffer size */
if ((var->xres * var->yres * 2) > info->fix.smem_len)
return -EINVAL;
/* set device-specific elements of var unrelated to mode */
dlfb_var_color_format(var);
fb_var_to_videomode(&mode, var);
if (!dlfb_is_valid_mode(&mode, info))
return -EINVAL;
return 0;
}
static int dlfb_ops_set_par(struct fb_info *info)
{
struct dlfb_data *dev = info->par;
dl_notice("set_par mode %dx%d\n", info->var.xres, info->var.yres);
return dlfb_set_video_mode(dev, &info->var);
}
static int dlfb_ops_blank(int blank_mode, struct fb_info *info)
{
struct dlfb_data *dev = info->par;
char *bufptr;
struct urb *urb;
urb = dlfb_get_urb(dev);
if (!urb)
return 0;
bufptr = (char *) urb->transfer_buffer;
/* overloading usb_active. UNBLANK can conflict with teardown */
bufptr = dlfb_vidreg_lock(bufptr);
if (blank_mode != FB_BLANK_UNBLANK) {
atomic_set(&dev->usb_active, 0);
bufptr = dlfb_enable_hvsync(bufptr, false);
} else {
atomic_set(&dev->usb_active, 1);
bufptr = dlfb_enable_hvsync(bufptr, true);
}
bufptr = dlfb_vidreg_unlock(bufptr);
dlfb_submit_urb(dev, urb, bufptr - (char *) urb->transfer_buffer);
return 0;
}
static struct fb_ops dlfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = dlfb_ops_setcolreg,
.fb_fillrect = dlfb_ops_fillrect,
.fb_copyarea = dlfb_ops_copyarea,
.fb_imageblit = dlfb_ops_imageblit,
.fb_mmap = dlfb_ops_mmap,
.fb_ioctl = dlfb_ops_ioctl,
.fb_open = dlfb_ops_open,
.fb_release = dlfb_ops_release,
.fb_blank = dlfb_ops_blank,
.fb_check_var = dlfb_ops_check_var,
.fb_set_par = dlfb_ops_set_par,
};
/*
* Calls dlfb_get_edid() to query the EDID of attached monitor via usb cmds
* Then parses EDID into three places used by various parts of fbdev:
* fb_var_screeninfo contains the timing of the monitor's preferred mode
* fb_info.monspecs is full parsed EDID info, including monspecs.modedb
* fb_info.modelist is a linked list of all monitor & VESA modes which work
*
* If EDID is not readable/valid, then modelist is all VESA modes,
* monspecs is NULL, and fb_var_screeninfo is set to safe VESA mode
* Returns 0 if EDID parses successfully
*/
static int dlfb_parse_edid(struct dlfb_data *dev,
struct fb_var_screeninfo *var,
struct fb_info *info)
{
int i;
const struct fb_videomode *default_vmode = NULL;
int result = 0;
fb_destroy_modelist(&info->modelist);
memset(&info->monspecs, 0, sizeof(info->monspecs));
dlfb_get_edid(dev);
fb_edid_to_monspecs(dev->edid, &info->monspecs);
if (info->monspecs.modedb_len > 0) {
for (i = 0; i < info->monspecs.modedb_len; i++) {
if (dlfb_is_valid_mode(&info->monspecs.modedb[i], info))
fb_add_videomode(&info->monspecs.modedb[i],
&info->modelist);
}
default_vmode = fb_find_best_display(&info->monspecs,
&info->modelist);
} else {
struct fb_videomode fb_vmode = {0};
dl_err("Unable to get valid EDID from device/display\n");
result = 1;
/*
* Add the standard VESA modes to our modelist
* Since we don't have EDID, there may be modes that
* overspec monitor and/or are incorrect aspect ratio, etc.
* But at least the user has a chance to choose
*/
for (i = 0; i < VESA_MODEDB_SIZE; i++) {
if (dlfb_is_valid_mode((struct fb_videomode *)
&vesa_modes[i], info))
fb_add_videomode(&vesa_modes[i],
&info->modelist);
}
/*
* default to resolution safe for projectors
* (since they are most common case without EDID)
*/
fb_vmode.xres = 800;
fb_vmode.yres = 600;
fb_vmode.refresh = 60;
default_vmode = fb_find_nearest_mode(&fb_vmode,
&info->modelist);
}
fb_videomode_to_var(var, default_vmode);
dlfb_var_color_format(var);
return result;
}
static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_rendered));
}
static ssize_t metrics_bytes_identical_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_identical));
}
static ssize_t metrics_bytes_sent_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->bytes_sent));
}
static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%u\n",
atomic_read(&dev->cpu_kcycles_used));
}
static ssize_t metrics_misc_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE,
"Calls to\ndamage: %u\nblit: %u\n"
"defio faults: %u\ncopy: %u\n"
"fill: %u\n\n"
"active framebuffer clients: %d\n"
"urbs available %d(%d)\n"
"Shadow framebuffer in use? %s\n"
"Any lost pixels? %s\n",
atomic_read(&dev->damage_count),
atomic_read(&dev->blit_count),
atomic_read(&dev->defio_fault_count),
atomic_read(&dev->copy_count),
atomic_read(&dev->fill_count),
dev->fb_count,
dev->urbs.available, dev->urbs.limit_sem.count,
(dev->backing_buffer) ? "yes" : "no",
atomic_read(&dev->lost_pixels) ? "yes" : "no");
}
static ssize_t edid_show(struct file *filp, struct kobject *kobj,
struct bin_attribute *a,
char *buf, loff_t off, size_t count) {
struct device *fbdev = container_of(kobj, struct device, kobj);
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
char *edid = &dev->edid[0];
const size_t size = sizeof(dev->edid);
if (dlfb_parse_edid(dev, &fb_info->var, fb_info))
return 0;
if (off >= size)
return 0;
if (off + count > size)
count = size - off;
memcpy(buf, edid + off, count);
return count;
}
static ssize_t metrics_reset_store(struct device *fbdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
atomic_set(&dev->bytes_rendered, 0);
atomic_set(&dev->bytes_identical, 0);
atomic_set(&dev->bytes_sent, 0);
atomic_set(&dev->cpu_kcycles_used, 0);
atomic_set(&dev->blit_count, 0);
atomic_set(&dev->copy_count, 0);
atomic_set(&dev->fill_count, 0);
atomic_set(&dev->defio_fault_count, 0);
atomic_set(&dev->damage_count, 0);
return count;
}
static ssize_t use_defio_show(struct device *fbdev,
struct device_attribute *a, char *buf) {
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
return snprintf(buf, PAGE_SIZE, "%d\n",
atomic_read(&dev->use_defio));
}
static ssize_t use_defio_store(struct device *fbdev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fb_info = dev_get_drvdata(fbdev);
struct dlfb_data *dev = fb_info->par;
if (count > 0) {
if (buf[0] == '0')
atomic_set(&dev->use_defio, 0);
if (buf[0] == '1')
atomic_set(&dev->use_defio, 1);
}
return count;
}
static struct bin_attribute edid_attr = {
.attr.name = "edid",
.attr.mode = 0444,
.size = 128,
.read = edid_show,
};
static struct device_attribute fb_device_attrs[] = {
__ATTR_RO(metrics_bytes_rendered),
__ATTR_RO(metrics_bytes_identical),
__ATTR_RO(metrics_bytes_sent),
__ATTR_RO(metrics_cpu_kcycles_used),
__ATTR_RO(metrics_misc),
__ATTR(metrics_reset, S_IWUGO, NULL, metrics_reset_store),
__ATTR_RW(use_defio),
};
#ifdef CONFIG_FB_DEFERRED_IO
static void dlfb_dpy_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
struct page *cur;
struct fb_deferred_io *fbdefio = info->fbdefio;
struct dlfb_data *dev = info->par;
struct urb *urb;
char *cmd;
cycles_t start_cycles, end_cycles;
int bytes_sent = 0;
int bytes_identical = 0;
int bytes_rendered = 0;
int fault_count = 0;
if (!atomic_read(&dev->use_defio))
return;
if (!atomic_read(&dev->usb_active))
return;
start_cycles = get_cycles();
urb = dlfb_get_urb(dev);
if (!urb)
return;
cmd = urb->transfer_buffer;
/* walk the written page list and render each to device */
list_for_each_entry(cur, &fbdefio->pagelist, lru) {
dlfb_render_hline(dev, &urb, (char *) info->fix.smem_start,
&cmd, cur->index << PAGE_SHIFT,
PAGE_SIZE, &bytes_identical, &bytes_sent);
bytes_rendered += PAGE_SIZE;
fault_count++;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
int len = cmd - (char *) urb->transfer_buffer;
dlfb_submit_urb(dev, urb, len);
bytes_sent += len;
} else
dlfb_urb_completion(urb);
atomic_add(fault_count, &dev->defio_fault_count);
atomic_add(bytes_sent, &dev->bytes_sent);
atomic_add(bytes_identical, &dev->bytes_identical);
atomic_add(bytes_rendered, &dev->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
&dev->cpu_kcycles_used);
}
static struct fb_deferred_io dlfb_defio = {
.delay = 5,
.deferred_io = dlfb_dpy_deferred_io,
};
#endif
/*
* This is necessary before we can communicate with the display controller.
*/
static int dlfb_select_std_channel(struct dlfb_data *dev)
{
int ret;
u8 set_def_chn[] = { 0x57, 0xCD, 0xDC, 0xA7,
0x1C, 0x88, 0x5E, 0x15,
0x60, 0xFE, 0xC6, 0x97,
0x16, 0x3D, 0x47, 0xF2 };
ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
NR_USB_REQUEST_CHANNEL,
(USB_DIR_OUT | USB_TYPE_VENDOR), 0, 0,
set_def_chn, sizeof(set_def_chn), USB_CTRL_SET_TIMEOUT);
return ret;
}
static int dlfb_usb_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct usb_device *usbdev;
struct dlfb_data *dev;
struct fb_info *info;
int videomemorysize;
int i;
unsigned char *videomemory;
int retval = -ENOMEM;
struct fb_var_screeninfo *var;
int registered = 0;
u16 *pix_framebuffer;
/* usb initialization */
usbdev = interface_to_usbdev(interface);
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
err("dlfb_usb_probe: failed alloc of dev struct\n");
goto error;
}
/* we need to wait for both usb and fbdev to spin down on disconnect */
kref_init(&dev->kref); /* matching kref_put in usb .disconnect fn */
kref_get(&dev->kref); /* matching kref_put in .fb_destroy function*/
dev->udev = usbdev;
dev->gdev = &usbdev->dev; /* our generic struct device * */
usb_set_intfdata(interface, dev);
if (!dlfb_alloc_urb_list(dev, WRITES_IN_FLIGHT, MAX_TRANSFER)) {
retval = -ENOMEM;
dl_err("dlfb_alloc_urb_list failed\n");
goto error;
}
mutex_init(&dev->fb_open_lock);
/* We don't register a new USB class. Our client interface is fbdev */
/* allocates framebuffer driver structure, not framebuffer memory */
info = framebuffer_alloc(0, &usbdev->dev);
if (!info) {
retval = -ENOMEM;
dl_err("framebuffer_alloc failed\n");
goto error;
}
dev->info = info;
info->par = dev;
info->pseudo_palette = dev->pseudo_palette;
info->fbops = &dlfb_ops;
var = &info->var;
/* TODO set limit based on actual SKU detection */
dev->sku_pixel_limit = 2048 * 1152;
INIT_LIST_HEAD(&info->modelist);
dlfb_parse_edid(dev, var, info);
/*
* ok, now that we've got the size info, we can alloc our framebuffer.
*/
info->fix = dlfb_fix;
info->fix.line_length = var->xres * (var->bits_per_pixel / 8);
videomemorysize = info->fix.line_length * var->yres;
/*
* The big chunk of system memory we use as a virtual framebuffer.
* TODO: Handle fbcon cursor code calling blit in interrupt context
*/
videomemory = vmalloc(videomemorysize);
if (!videomemory) {
retval = -ENOMEM;
dl_err("Virtual framebuffer alloc failed\n");
goto error;
}
info->screen_base = videomemory;
info->fix.smem_len = PAGE_ALIGN(videomemorysize);
info->fix.smem_start = (unsigned long) videomemory;
info->flags = udlfb_info_flags;
/*
* Second framebuffer copy, mirroring the state of the framebuffer
* on the physical USB device. We can function without this.
* But with imperfect damage info we may end up sending pixels over USB
* that were, in fact, unchanged -- wasting limited USB bandwidth
*/
dev->backing_buffer = vmalloc(videomemorysize);
if (!dev->backing_buffer)
dl_warn("No shadow/backing buffer allcoated\n");
else
memset(dev->backing_buffer, 0, videomemorysize);
retval = fb_alloc_cmap(&info->cmap, 256, 0);
if (retval < 0) {
dl_err("fb_alloc_cmap failed %x\n", retval);
goto error;
}
/* ready to begin using device */
#ifdef CONFIG_FB_DEFERRED_IO
atomic_set(&dev->use_defio, 1);
#endif
atomic_set(&dev->usb_active, 1);
dlfb_select_std_channel(dev);
dlfb_ops_check_var(var, info);
dlfb_ops_set_par(info);
/* paint greenscreen */
pix_framebuffer = (u16 *) videomemory;
for (i = 0; i < videomemorysize / 2; i++)
pix_framebuffer[i] = 0x37e6;
dlfb_handle_damage(dev, 0, 0, info->var.xres, info->var.yres,
videomemory);
retval = register_framebuffer(info);
if (retval < 0) {
dl_err("register_framebuffer failed %d\n", retval);
goto error;
}
registered = 1;
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_create_file(info->dev, &fb_device_attrs[i]);
device_create_bin_file(info->dev, &edid_attr);
dl_err("DisplayLink USB device /dev/fb%d attached. %dx%d resolution."
" Using %dK framebuffer memory\n", info->node,
var->xres, var->yres,
((dev->backing_buffer) ?
videomemorysize * 2 : videomemorysize) >> 10);
return 0;
error:
if (dev) {
if (registered) {
unregister_framebuffer(info);
dlfb_ops_destroy(info);
} else
kref_put(&dev->kref, dlfb_delete);
if (dev->urbs.count > 0)
dlfb_free_urb_list(dev);
kref_put(&dev->kref, dlfb_delete); /* last ref from kref_init */
/* dev has been deallocated. Do not dereference */
}
return retval;
}
static void dlfb_usb_disconnect(struct usb_interface *interface)
{
struct dlfb_data *dev;
struct fb_info *info;
int i;
dev = usb_get_intfdata(interface);
info = dev->info;
/* when non-active we'll update virtual framebuffer, but no new urbs */
atomic_set(&dev->usb_active, 0);
usb_set_intfdata(interface, NULL);
for (i = 0; i < ARRAY_SIZE(fb_device_attrs); i++)
device_remove_file(info->dev, &fb_device_attrs[i]);
device_remove_bin_file(info->dev, &edid_attr);
/* this function will wait for all in-flight urbs to complete */
dlfb_free_urb_list(dev);
if (info) {
dl_notice("Detaching /dev/fb%d\n", info->node);
unregister_framebuffer(info);
dlfb_ops_destroy(info);
}
/* release reference taken by kref_init in probe() */
kref_put(&dev->kref, dlfb_delete);
/* consider dlfb_data freed */
return;
}
static struct usb_driver dlfb_driver = {
.name = "udlfb",
.probe = dlfb_usb_probe,
.disconnect = dlfb_usb_disconnect,
.id_table = id_table,
};
static int __init dlfb_module_init(void)
{
int res;
res = usb_register(&dlfb_driver);
if (res)
err("usb_register failed. Error number %d", res);
printk(KERN_INFO "VMODES initialized\n");
return res;
}
static void __exit dlfb_module_exit(void)
{
usb_deregister(&dlfb_driver);
}
module_init(dlfb_module_init);
module_exit(dlfb_module_exit);
static void dlfb_urb_completion(struct urb *urb)
{
struct urb_node *unode = urb->context;
struct dlfb_data *dev = unode->dev;
unsigned long flags;
/* sync/async unlink faults aren't errors */
if (urb->status) {
if (!(urb->status == -ENOENT ||
urb->status == -ECONNRESET ||
urb->status == -ESHUTDOWN)) {
dl_err("%s - nonzero write bulk status received: %d\n",
__func__, urb->status);
atomic_set(&dev->lost_pixels, 1);
}
}
urb->transfer_buffer_length = dev->urbs.size; /* reset to actual */
spin_lock_irqsave(&dev->urbs.lock, flags);
list_add_tail(&unode->entry, &dev->urbs.list);
dev->urbs.available++;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
up(&dev->urbs.limit_sem);
}
static void dlfb_free_urb_list(struct dlfb_data *dev)
{
int count = dev->urbs.count;
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
int ret;
unsigned long flags;
dl_notice("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
/* Timeout means a memory leak and/or fault */
ret = down_timeout(&dev->urbs.limit_sem, FREE_URB_TIMEOUT);
if (ret) {
BUG_ON(ret);
break;
}
spin_lock_irqsave(&dev->urbs.lock, flags);
node = dev->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
/* Free each separately allocated piece */
usb_free_coherent(urb->dev, dev->urbs.size,
urb->transfer_buffer, urb->transfer_dma);
usb_free_urb(urb);
kfree(node);
}
kref_put(&dev->kref, dlfb_delete);
}
static int dlfb_alloc_urb_list(struct dlfb_data *dev, int count, size_t size)
{
int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
spin_lock_init(&dev->urbs.lock);
dev->urbs.size = size;
INIT_LIST_HEAD(&dev->urbs.list);
while (i < count) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
unode->dev = dev;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb) {
kfree(unode);
break;
}
unode->urb = urb;
buf = usb_alloc_coherent(dev->udev, MAX_TRANSFER, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
break;
}
/* urb->transfer_buffer_length set to actual before submit */
usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 1),
buf, size, dlfb_urb_completion, unode);
urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
list_add_tail(&unode->entry, &dev->urbs.list);
i++;
}
sema_init(&dev->urbs.limit_sem, i);
dev->urbs.count = i;
dev->urbs.available = i;
kref_get(&dev->kref); /* released in free_render_urbs() */
dl_notice("allocated %d %d byte urbs\n", i, (int) size);
return i;
}
static struct urb *dlfb_get_urb(struct dlfb_data *dev)
{
int ret = 0;
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&dev->urbs.limit_sem, GET_URB_TIMEOUT);
if (ret) {
atomic_set(&dev->lost_pixels, 1);
dl_err("wait for urb interrupted: %x\n", ret);
goto error;
}
spin_lock_irqsave(&dev->urbs.lock, flags);
BUG_ON(list_empty(&dev->urbs.list)); /* reserved one with limit_sem */
entry = dev->urbs.list.next;
list_del_init(entry);
dev->urbs.available--;
spin_unlock_irqrestore(&dev->urbs.lock, flags);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
error:
return urb;
}
static int dlfb_submit_urb(struct dlfb_data *dev, struct urb *urb, size_t len)
{
int ret;
BUG_ON(len > dev->urbs.size);
urb->transfer_buffer_length = len; /* set to actual payload len */
ret = usb_submit_urb(urb, GFP_KERNEL);
if (ret) {
dlfb_urb_completion(urb); /* because no one else will */
atomic_set(&dev->lost_pixels, 1);
dl_err("usb_submit_urb error %x\n", ret);
}
return ret;
}
MODULE_AUTHOR("Roberto De Ioris <roberto@unbit.it>, "
"Jaya Kumar <jayakumar.lkml@gmail.com>, "
"Bernie Thompson <bernie@plugable.com>");
MODULE_DESCRIPTION("DisplayLink kernel framebuffer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CunningLogic/asteroid_smart_kernel | drivers/media/video/cx88/cx88-video.c | 856 | 56595 | /*
*
* device driver for Conexant 2388x based TV cards
* video4linux video interface
*
* (c) 2003-04 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]
*
* (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
* - Multituner support
* - video_ioctl2 conversion
* - PAL/M fixes
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <asm/div64.h>
#include "cx88.h"
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
MODULE_DESCRIPTION("v4l2 driver module for cx2388x based TV cards");
MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
/* ------------------------------------------------------------------ */
static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
module_param_array(video_nr, int, NULL, 0444);
module_param_array(vbi_nr, int, NULL, 0444);
module_param_array(radio_nr, int, NULL, 0444);
MODULE_PARM_DESC(video_nr,"video device numbers");
MODULE_PARM_DESC(vbi_nr,"vbi device numbers");
MODULE_PARM_DESC(radio_nr,"radio device numbers");
static unsigned int video_debug;
module_param(video_debug,int,0644);
MODULE_PARM_DESC(video_debug,"enable debug messages [video]");
static unsigned int irq_debug;
module_param(irq_debug,int,0644);
MODULE_PARM_DESC(irq_debug,"enable debug messages [IRQ handler]");
static unsigned int vid_limit = 16;
module_param(vid_limit,int,0644);
MODULE_PARM_DESC(vid_limit,"capture memory limit in megabytes");
#define dprintk(level,fmt, arg...) if (video_debug >= level) \
printk(KERN_DEBUG "%s/0: " fmt, core->name , ## arg)
/* ------------------------------------------------------------------- */
/* static data */
static struct cx8800_fmt formats[] = {
{
.name = "8 bpp, gray",
.fourcc = V4L2_PIX_FMT_GREY,
.cxformat = ColorFormatY8,
.depth = 8,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "15 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB555,
.cxformat = ColorFormatRGB15,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "15 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB555X,
.cxformat = ColorFormatRGB15 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "16 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_RGB565,
.cxformat = ColorFormatRGB16,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "16 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB565X,
.cxformat = ColorFormatRGB16 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "24 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR24,
.cxformat = ColorFormatRGB24,
.depth = 24,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "32 bpp RGB, le",
.fourcc = V4L2_PIX_FMT_BGR32,
.cxformat = ColorFormatRGB32,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "32 bpp RGB, be",
.fourcc = V4L2_PIX_FMT_RGB32,
.cxformat = ColorFormatRGB32 | ColorFormatBSWAP | ColorFormatWSWAP,
.depth = 32,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.cxformat = ColorFormatYUY2,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},{
.name = "4:2:2, packed, UYVY",
.fourcc = V4L2_PIX_FMT_UYVY,
.cxformat = ColorFormatYUY2 | ColorFormatBSWAP,
.depth = 16,
.flags = FORMAT_FLAGS_PACKED,
},
};
static struct cx8800_fmt* format_by_fourcc(unsigned int fourcc)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(formats); i++)
if (formats[i].fourcc == fourcc)
return formats+i;
return NULL;
}
/* ------------------------------------------------------------------- */
static const struct v4l2_queryctrl no_ctl = {
.name = "42",
.flags = V4L2_CTRL_FLAG_DISABLED,
};
static struct cx88_ctrl cx8800_ctls[] = {
/* --- video --- */
{
.v = {
.id = V4L2_CID_BRIGHTNESS,
.name = "Brightness",
.minimum = 0x00,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = MO_CONTR_BRIGHT,
.mask = 0x00ff,
.shift = 0,
},{
.v = {
.id = V4L2_CID_CONTRAST,
.name = "Contrast",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = MO_CONTR_BRIGHT,
.mask = 0xff00,
.shift = 8,
},{
.v = {
.id = V4L2_CID_HUE,
.name = "Hue",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 128,
.reg = MO_HUE,
.mask = 0x00ff,
.shift = 0,
},{
/* strictly, this only describes only U saturation.
* V saturation is handled specially through code.
*/
.v = {
.id = V4L2_CID_SATURATION,
.name = "Saturation",
.minimum = 0,
.maximum = 0xff,
.step = 1,
.default_value = 0x7f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.off = 0,
.reg = MO_UV_SATURATION,
.mask = 0x00ff,
.shift = 0,
},{
.v = {
.id = V4L2_CID_CHROMA_AGC,
.name = "Chroma AGC",
.minimum = 0,
.maximum = 1,
.default_value = 0x1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = MO_INPUT_FORMAT,
.mask = 1 << 10,
.shift = 10,
}, {
.v = {
.id = V4L2_CID_COLOR_KILLER,
.name = "Color killer",
.minimum = 0,
.maximum = 1,
.default_value = 0x1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = MO_INPUT_FORMAT,
.mask = 1 << 9,
.shift = 9,
}, {
/* --- audio --- */
.v = {
.id = V4L2_CID_AUDIO_MUTE,
.name = "Mute",
.minimum = 0,
.maximum = 1,
.default_value = 1,
.type = V4L2_CTRL_TYPE_BOOLEAN,
},
.reg = AUD_VOL_CTL,
.sreg = SHADOW_AUD_VOL_CTL,
.mask = (1 << 6),
.shift = 6,
},{
.v = {
.id = V4L2_CID_AUDIO_VOLUME,
.name = "Volume",
.minimum = 0,
.maximum = 0x3f,
.step = 1,
.default_value = 0x3f,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = AUD_VOL_CTL,
.sreg = SHADOW_AUD_VOL_CTL,
.mask = 0x3f,
.shift = 0,
},{
.v = {
.id = V4L2_CID_AUDIO_BALANCE,
.name = "Balance",
.minimum = 0,
.maximum = 0x7f,
.step = 1,
.default_value = 0x40,
.type = V4L2_CTRL_TYPE_INTEGER,
},
.reg = AUD_BAL_CTL,
.sreg = SHADOW_AUD_BAL_CTL,
.mask = 0x7f,
.shift = 0,
}
};
static const int CX8800_CTLS = ARRAY_SIZE(cx8800_ctls);
/* Must be sorted from low to high control ID! */
const u32 cx88_user_ctrls[] = {
V4L2_CID_USER_CLASS,
V4L2_CID_BRIGHTNESS,
V4L2_CID_CONTRAST,
V4L2_CID_SATURATION,
V4L2_CID_HUE,
V4L2_CID_AUDIO_VOLUME,
V4L2_CID_AUDIO_BALANCE,
V4L2_CID_AUDIO_MUTE,
V4L2_CID_CHROMA_AGC,
V4L2_CID_COLOR_KILLER,
0
};
EXPORT_SYMBOL(cx88_user_ctrls);
static const u32 *ctrl_classes[] = {
cx88_user_ctrls,
NULL
};
int cx8800_ctrl_query(struct cx88_core *core, struct v4l2_queryctrl *qctrl)
{
int i;
if (qctrl->id < V4L2_CID_BASE ||
qctrl->id >= V4L2_CID_LASTP1)
return -EINVAL;
for (i = 0; i < CX8800_CTLS; i++)
if (cx8800_ctls[i].v.id == qctrl->id)
break;
if (i == CX8800_CTLS) {
*qctrl = no_ctl;
return 0;
}
*qctrl = cx8800_ctls[i].v;
/* Report chroma AGC as inactive when SECAM is selected */
if (cx8800_ctls[i].v.id == V4L2_CID_CHROMA_AGC &&
core->tvnorm & V4L2_STD_SECAM)
qctrl->flags |= V4L2_CTRL_FLAG_INACTIVE;
return 0;
}
EXPORT_SYMBOL(cx8800_ctrl_query);
/* ------------------------------------------------------------------- */
/* resource management */
static int res_get(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bit)
{
struct cx88_core *core = dev->core;
if (fh->resources & bit)
/* have it already allocated */
return 1;
/* is it free? */
mutex_lock(&core->lock);
if (dev->resources & bit) {
/* no, someone else uses it */
mutex_unlock(&core->lock);
return 0;
}
/* it's free, grab it */
fh->resources |= bit;
dev->resources |= bit;
dprintk(1,"res: get %d\n",bit);
mutex_unlock(&core->lock);
return 1;
}
static
int res_check(struct cx8800_fh *fh, unsigned int bit)
{
return (fh->resources & bit);
}
static
int res_locked(struct cx8800_dev *dev, unsigned int bit)
{
return (dev->resources & bit);
}
static
void res_free(struct cx8800_dev *dev, struct cx8800_fh *fh, unsigned int bits)
{
struct cx88_core *core = dev->core;
BUG_ON((fh->resources & bits) != bits);
mutex_lock(&core->lock);
fh->resources &= ~bits;
dev->resources &= ~bits;
dprintk(1,"res: put %d\n",bits);
mutex_unlock(&core->lock);
}
/* ------------------------------------------------------------------ */
int cx88_video_mux(struct cx88_core *core, unsigned int input)
{
/* struct cx88_core *core = dev->core; */
dprintk(1,"video_mux: %d [vmux=%d,gpio=0x%x,0x%x,0x%x,0x%x]\n",
input, INPUT(input).vmux,
INPUT(input).gpio0,INPUT(input).gpio1,
INPUT(input).gpio2,INPUT(input).gpio3);
core->input = input;
cx_andor(MO_INPUT_FORMAT, 0x03 << 14, INPUT(input).vmux << 14);
cx_write(MO_GP3_IO, INPUT(input).gpio3);
cx_write(MO_GP0_IO, INPUT(input).gpio0);
cx_write(MO_GP1_IO, INPUT(input).gpio1);
cx_write(MO_GP2_IO, INPUT(input).gpio2);
switch (INPUT(input).type) {
case CX88_VMUX_SVIDEO:
cx_set(MO_AFECFG_IO, 0x00000001);
cx_set(MO_INPUT_FORMAT, 0x00010010);
cx_set(MO_FILTER_EVEN, 0x00002020);
cx_set(MO_FILTER_ODD, 0x00002020);
break;
default:
cx_clear(MO_AFECFG_IO, 0x00000001);
cx_clear(MO_INPUT_FORMAT, 0x00010010);
cx_clear(MO_FILTER_EVEN, 0x00002020);
cx_clear(MO_FILTER_ODD, 0x00002020);
break;
}
/* if there are audioroutes defined, we have an external
ADC to deal with audio */
if (INPUT(input).audioroute) {
/* The wm8775 module has the "2" route hardwired into
the initialization. Some boards may use different
routes for different inputs. HVR-1300 surely does */
if (core->board.audio_chip &&
core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
INPUT(input).audioroute, 0, 0);
}
/* cx2388's C-ADC is connected to the tuner only.
When used with S-Video, that ADC is busy dealing with
chroma, so an external must be used for baseband audio */
if (INPUT(input).type != CX88_VMUX_TELEVISION &&
INPUT(input).type != CX88_VMUX_CABLE) {
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
cx88_set_tvaudio(core);
} else {
/* Normal mode */
cx_write(AUD_I2SCNTL, 0x0);
cx_clear(AUD_CTL, EN_I2SIN_ENABLE);
}
}
return 0;
}
EXPORT_SYMBOL(cx88_video_mux);
/* ------------------------------------------------------------------ */
static int start_video_dma(struct cx8800_dev *dev,
struct cx88_dmaqueue *q,
struct cx88_buffer *buf)
{
struct cx88_core *core = dev->core;
/* setup fifo + format */
cx88_sram_channel_setup(core, &cx88_sram_channels[SRAM_CH21],
buf->bpl, buf->risc.dma);
cx88_set_scale(core, buf->vb.width, buf->vb.height, buf->vb.field);
cx_write(MO_COLOR_CTRL, buf->fmt->cxformat | ColorFormatGamma);
/* reset counter */
cx_write(MO_VIDY_GPCNTRL,GP_COUNT_CONTROL_RESET);
q->count = 1;
/* enable irqs */
cx_set(MO_PCI_INTMSK, core->pci_irqmask | PCI_INT_VIDINT);
/* Enables corresponding bits at PCI_INT_STAT:
bits 0 to 4: video, audio, transport stream, VIP, Host
bit 7: timer
bits 8 and 9: DMA complete for: SRC, DST
bits 10 and 11: BERR signal asserted for RISC: RD, WR
bits 12 to 15: BERR signal asserted for: BRDG, SRC, DST, IPB
*/
cx_set(MO_VID_INTMSK, 0x0f0011);
/* enable capture */
cx_set(VID_CAPTURE_CONTROL,0x06);
/* start dma */
cx_set(MO_DEV_CNTRL2, (1<<5));
cx_set(MO_VID_DMACNTRL, 0x11); /* Planar Y and packed FIFO and RISC enable */
return 0;
}
#ifdef CONFIG_PM
static int stop_video_dma(struct cx8800_dev *dev)
{
struct cx88_core *core = dev->core;
/* stop dma */
cx_clear(MO_VID_DMACNTRL, 0x11);
/* disable capture */
cx_clear(VID_CAPTURE_CONTROL,0x06);
/* disable irqs */
cx_clear(MO_PCI_INTMSK, PCI_INT_VIDINT);
cx_clear(MO_VID_INTMSK, 0x0f0011);
return 0;
}
#endif
static int restart_video_queue(struct cx8800_dev *dev,
struct cx88_dmaqueue *q)
{
struct cx88_core *core = dev->core;
struct cx88_buffer *buf, *prev;
if (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
dprintk(2,"restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.i);
start_video_dma(dev, q, buf);
list_for_each_entry(buf, &q->active, vb.queue)
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
return 0;
}
prev = NULL;
for (;;) {
if (list_empty(&q->queued))
return 0;
buf = list_entry(q->queued.next, struct cx88_buffer, vb.queue);
if (NULL == prev) {
list_move_tail(&buf->vb.queue, &q->active);
start_video_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2,"[%p/%d] restart_queue - first active\n",
buf,buf->vb.i);
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
list_move_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2,"[%p/%d] restart_queue - move to active\n",
buf,buf->vb.i);
} else {
return 0;
}
prev = buf;
}
}
/* ------------------------------------------------------------------ */
static int
buffer_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size)
{
struct cx8800_fh *fh = q->priv_data;
*size = fh->fmt->depth*fh->width*fh->height >> 3;
if (0 == *count)
*count = 32;
if (*size * *count > vid_limit * 1024 * 1024)
*count = (vid_limit * 1024 * 1024) / *size;
return 0;
}
static int
buffer_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx8800_fh *fh = q->priv_data;
struct cx8800_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
struct videobuf_dmabuf *dma=videobuf_to_dma(&buf->vb);
int rc, init_buffer = 0;
BUG_ON(NULL == fh->fmt);
if (fh->width < 48 || fh->width > norm_maxw(core->tvnorm) ||
fh->height < 32 || fh->height > norm_maxh(core->tvnorm))
return -EINVAL;
buf->vb.size = (fh->width * fh->height * fh->fmt->depth) >> 3;
if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size)
return -EINVAL;
if (buf->fmt != fh->fmt ||
buf->vb.width != fh->width ||
buf->vb.height != fh->height ||
buf->vb.field != field) {
buf->fmt = fh->fmt;
buf->vb.width = fh->width;
buf->vb.height = fh->height;
buf->vb.field = field;
init_buffer = 1;
}
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
init_buffer = 1;
if (0 != (rc = videobuf_iolock(q,&buf->vb,NULL)))
goto fail;
}
if (init_buffer) {
buf->bpl = buf->vb.width * buf->fmt->depth >> 3;
switch (buf->vb.field) {
case V4L2_FIELD_TOP:
cx88_risc_buffer(dev->pci, &buf->risc,
dma->sglist, 0, UNSET,
buf->bpl, 0, buf->vb.height);
break;
case V4L2_FIELD_BOTTOM:
cx88_risc_buffer(dev->pci, &buf->risc,
dma->sglist, UNSET, 0,
buf->bpl, 0, buf->vb.height);
break;
case V4L2_FIELD_INTERLACED:
cx88_risc_buffer(dev->pci, &buf->risc,
dma->sglist, 0, buf->bpl,
buf->bpl, buf->bpl,
buf->vb.height >> 1);
break;
case V4L2_FIELD_SEQ_TB:
cx88_risc_buffer(dev->pci, &buf->risc,
dma->sglist,
0, buf->bpl * (buf->vb.height >> 1),
buf->bpl, 0,
buf->vb.height >> 1);
break;
case V4L2_FIELD_SEQ_BT:
cx88_risc_buffer(dev->pci, &buf->risc,
dma->sglist,
buf->bpl * (buf->vb.height >> 1), 0,
buf->bpl, 0,
buf->vb.height >> 1);
break;
default:
BUG();
}
}
dprintk(2,"[%p/%d] buffer_prepare - %dx%d %dbpp \"%s\" - dma=0x%08lx\n",
buf, buf->vb.i,
fh->width, fh->height, fh->fmt->depth, fh->fmt->name,
(unsigned long)buf->risc.dma);
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
cx88_free_buffer(q,buf);
return rc;
}
static void
buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb)
{
struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
struct cx88_buffer *prev;
struct cx8800_fh *fh = vq->priv_data;
struct cx8800_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
/* add jump to stopper */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma);
if (!list_empty(&q->queued)) {
list_add_tail(&buf->vb.queue,&q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2,"[%p/%d] buffer_queue - append to queued\n",
buf, buf->vb.i);
} else if (list_empty(&q->active)) {
list_add_tail(&buf->vb.queue,&q->active);
start_video_dma(dev, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(2,"[%p/%d] buffer_queue - first active\n",
buf, buf->vb.i);
} else {
prev = list_entry(q->active.prev, struct cx88_buffer, vb.queue);
if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
list_add_tail(&buf->vb.queue,&q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
dprintk(2,"[%p/%d] buffer_queue - append to active\n",
buf, buf->vb.i);
} else {
list_add_tail(&buf->vb.queue,&q->queued);
buf->vb.state = VIDEOBUF_QUEUED;
dprintk(2,"[%p/%d] buffer_queue - first queued\n",
buf, buf->vb.i);
}
}
}
static void buffer_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx88_buffer *buf = container_of(vb,struct cx88_buffer,vb);
cx88_free_buffer(q,buf);
}
static struct videobuf_queue_ops cx8800_video_qops = {
.buf_setup = buffer_setup,
.buf_prepare = buffer_prepare,
.buf_queue = buffer_queue,
.buf_release = buffer_release,
};
/* ------------------------------------------------------------------ */
/* ------------------------------------------------------------------ */
static struct videobuf_queue* get_queue(struct cx8800_fh *fh)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &fh->vidq;
case V4L2_BUF_TYPE_VBI_CAPTURE:
return &fh->vbiq;
default:
BUG();
return NULL;
}
}
static int get_ressource(struct cx8800_fh *fh)
{
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return RESOURCE_VIDEO;
case V4L2_BUF_TYPE_VBI_CAPTURE:
return RESOURCE_VBI;
default:
BUG();
return 0;
}
}
static int video_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct cx8800_dev *dev = video_drvdata(file);
struct cx88_core *core;
struct cx8800_fh *fh;
enum v4l2_buf_type type = 0;
int radio = 0;
switch (vdev->vfl_type) {
case VFL_TYPE_GRABBER:
type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
break;
case VFL_TYPE_VBI:
type = V4L2_BUF_TYPE_VBI_CAPTURE;
break;
case VFL_TYPE_RADIO:
radio = 1;
break;
}
lock_kernel();
core = dev->core;
dprintk(1, "open dev=%s radio=%d type=%s\n",
video_device_node_name(vdev), radio, v4l2_type_names[type]);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
unlock_kernel();
return -ENOMEM;
}
file->private_data = fh;
fh->dev = dev;
fh->radio = radio;
fh->type = type;
fh->width = 320;
fh->height = 240;
fh->fmt = format_by_fourcc(V4L2_PIX_FMT_BGR24);
videobuf_queue_sg_init(&fh->vidq, &cx8800_video_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
fh);
videobuf_queue_sg_init(&fh->vbiq, &cx8800_vbi_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VBI_CAPTURE,
V4L2_FIELD_SEQ_TB,
sizeof(struct cx88_buffer),
fh);
if (fh->radio) {
dprintk(1,"video_open: setting radio device\n");
cx_write(MO_GP3_IO, core->board.radio.gpio3);
cx_write(MO_GP0_IO, core->board.radio.gpio0);
cx_write(MO_GP1_IO, core->board.radio.gpio1);
cx_write(MO_GP2_IO, core->board.radio.gpio2);
if (core->board.radio.audioroute) {
if(core->board.audio_chip &&
core->board.audio_chip == V4L2_IDENT_WM8775) {
call_all(core, audio, s_routing,
core->board.radio.audioroute, 0, 0);
}
/* "I2S ADC mode" */
core->tvaudio = WW_I2SADC;
cx88_set_tvaudio(core);
} else {
/* FM Mode */
core->tvaudio = WW_FM;
cx88_set_tvaudio(core);
cx88_set_stereo(core,V4L2_TUNER_MODE_STEREO,1);
}
call_all(core, tuner, s_radio);
}
unlock_kernel();
atomic_inc(&core->users);
return 0;
}
static ssize_t
video_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct cx8800_fh *fh = file->private_data;
switch (fh->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
if (res_locked(fh->dev,RESOURCE_VIDEO))
return -EBUSY;
return videobuf_read_one(&fh->vidq, data, count, ppos,
file->f_flags & O_NONBLOCK);
case V4L2_BUF_TYPE_VBI_CAPTURE:
if (!res_get(fh->dev,fh,RESOURCE_VBI))
return -EBUSY;
return videobuf_read_stream(&fh->vbiq, data, count, ppos, 1,
file->f_flags & O_NONBLOCK);
default:
BUG();
return 0;
}
}
static unsigned int
video_poll(struct file *file, struct poll_table_struct *wait)
{
struct cx8800_fh *fh = file->private_data;
struct cx88_buffer *buf;
unsigned int rc = POLLERR;
if (V4L2_BUF_TYPE_VBI_CAPTURE == fh->type) {
if (!res_get(fh->dev,fh,RESOURCE_VBI))
return POLLERR;
return videobuf_poll_stream(file, &fh->vbiq, wait);
}
mutex_lock(&fh->vidq.vb_lock);
if (res_check(fh,RESOURCE_VIDEO)) {
/* streaming capture */
if (list_empty(&fh->vidq.stream))
goto done;
buf = list_entry(fh->vidq.stream.next,struct cx88_buffer,vb.stream);
} else {
/* read() capture */
buf = (struct cx88_buffer*)fh->vidq.read_buf;
if (NULL == buf)
goto done;
}
poll_wait(file, &buf->vb.done, wait);
if (buf->vb.state == VIDEOBUF_DONE ||
buf->vb.state == VIDEOBUF_ERROR)
rc = POLLIN|POLLRDNORM;
else
rc = 0;
done:
mutex_unlock(&fh->vidq.vb_lock);
return rc;
}
static int video_release(struct file *file)
{
struct cx8800_fh *fh = file->private_data;
struct cx8800_dev *dev = fh->dev;
/* turn off overlay */
if (res_check(fh, RESOURCE_OVERLAY)) {
/* FIXME */
res_free(dev,fh,RESOURCE_OVERLAY);
}
/* stop video capture */
if (res_check(fh, RESOURCE_VIDEO)) {
videobuf_queue_cancel(&fh->vidq);
res_free(dev,fh,RESOURCE_VIDEO);
}
if (fh->vidq.read_buf) {
buffer_release(&fh->vidq,fh->vidq.read_buf);
kfree(fh->vidq.read_buf);
}
/* stop vbi capture */
if (res_check(fh, RESOURCE_VBI)) {
videobuf_stop(&fh->vbiq);
res_free(dev,fh,RESOURCE_VBI);
}
videobuf_mmap_free(&fh->vidq);
videobuf_mmap_free(&fh->vbiq);
file->private_data = NULL;
kfree(fh);
mutex_lock(&dev->core->lock);
if(atomic_dec_and_test(&dev->core->users))
call_all(dev->core, core, s_power, 0);
mutex_unlock(&dev->core->lock);
return 0;
}
static int
video_mmap(struct file *file, struct vm_area_struct * vma)
{
struct cx8800_fh *fh = file->private_data;
return videobuf_mmap_mapper(get_queue(fh), vma);
}
/* ------------------------------------------------------------------ */
/* VIDEO CTRL IOCTLS */
int cx88_get_control (struct cx88_core *core, struct v4l2_control *ctl)
{
struct cx88_ctrl *c = NULL;
u32 value;
int i;
for (i = 0; i < CX8800_CTLS; i++)
if (cx8800_ctls[i].v.id == ctl->id)
c = &cx8800_ctls[i];
if (unlikely(NULL == c))
return -EINVAL;
value = c->sreg ? cx_sread(c->sreg) : cx_read(c->reg);
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
ctl->value = ((value & 0x7f) < 0x40) ? ((value & 0x7f) + 0x40)
: (0x7f - (value & 0x7f));
break;
case V4L2_CID_AUDIO_VOLUME:
ctl->value = 0x3f - (value & 0x3f);
break;
default:
ctl->value = ((value + (c->off << c->shift)) & c->mask) >> c->shift;
break;
}
dprintk(1,"get_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
ctl->id, c->v.name, ctl->value, c->reg,
value,c->mask, c->sreg ? " [shadowed]" : "");
return 0;
}
EXPORT_SYMBOL(cx88_get_control);
int cx88_set_control(struct cx88_core *core, struct v4l2_control *ctl)
{
struct cx88_ctrl *c = NULL;
u32 value,mask;
int i;
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == ctl->id) {
c = &cx8800_ctls[i];
}
}
if (unlikely(NULL == c))
return -EINVAL;
if (ctl->value < c->v.minimum)
ctl->value = c->v.minimum;
if (ctl->value > c->v.maximum)
ctl->value = c->v.maximum;
mask=c->mask;
switch (ctl->id) {
case V4L2_CID_AUDIO_BALANCE:
value = (ctl->value < 0x40) ? (0x7f - ctl->value) : (ctl->value - 0x40);
break;
case V4L2_CID_AUDIO_VOLUME:
value = 0x3f - (ctl->value & 0x3f);
break;
case V4L2_CID_SATURATION:
/* special v_sat handling */
value = ((ctl->value - c->off) << c->shift) & c->mask;
if (core->tvnorm & V4L2_STD_SECAM) {
/* For SECAM, both U and V sat should be equal */
value=value<<8|value;
} else {
/* Keeps U Saturation proportional to V Sat */
value=(value*0x5a)/0x7f<<8|value;
}
mask=0xffff;
break;
case V4L2_CID_CHROMA_AGC:
/* Do not allow chroma AGC to be enabled for SECAM */
value = ((ctl->value - c->off) << c->shift) & c->mask;
if (core->tvnorm & V4L2_STD_SECAM && value)
return -EINVAL;
break;
default:
value = ((ctl->value - c->off) << c->shift) & c->mask;
break;
}
dprintk(1,"set_control id=0x%X(%s) ctrl=0x%02x, reg=0x%02x val=0x%02x (mask 0x%02x)%s\n",
ctl->id, c->v.name, ctl->value, c->reg, value,
mask, c->sreg ? " [shadowed]" : "");
if (c->sreg) {
cx_sandor(c->sreg, c->reg, mask, value);
} else {
cx_andor(c->reg, mask, value);
}
return 0;
}
EXPORT_SYMBOL(cx88_set_control);
static void init_controls(struct cx88_core *core)
{
struct v4l2_control ctrl;
int i;
for (i = 0; i < CX8800_CTLS; i++) {
ctrl.id=cx8800_ctls[i].v.id;
ctrl.value=cx8800_ctls[i].v.default_value;
cx88_set_control(core, &ctrl);
}
}
/* ------------------------------------------------------------------ */
/* VIDEO IOCTLS */
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8800_fh *fh = priv;
f->fmt.pix.width = fh->width;
f->fmt.pix.height = fh->height;
f->fmt.pix.field = fh->vidq.field;
f->fmt.pix.pixelformat = fh->fmt->fourcc;
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fh->fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
struct cx8800_fmt *fmt;
enum v4l2_field field;
unsigned int maxw, maxh;
fmt = format_by_fourcc(f->fmt.pix.pixelformat);
if (NULL == fmt)
return -EINVAL;
field = f->fmt.pix.field;
maxw = norm_maxw(core->tvnorm);
maxh = norm_maxh(core->tvnorm);
if (V4L2_FIELD_ANY == field) {
field = (f->fmt.pix.height > maxh/2)
? V4L2_FIELD_INTERLACED
: V4L2_FIELD_BOTTOM;
}
switch (field) {
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
maxh = maxh / 2;
break;
case V4L2_FIELD_INTERLACED:
break;
default:
return -EINVAL;
}
f->fmt.pix.field = field;
v4l_bound_align_image(&f->fmt.pix.width, 48, maxw, 2,
&f->fmt.pix.height, 32, maxh, 0, 0);
f->fmt.pix.bytesperline =
(f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage =
f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8800_fh *fh = priv;
int err = vidioc_try_fmt_vid_cap (file,priv,f);
if (0 != err)
return err;
fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat);
fh->width = f->fmt.pix.width;
fh->height = f->fmt.pix.height;
fh->vidq.field = f->fmt.pix.field;
return 0;
}
static int vidioc_querycap (struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
cap->version = CX88_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING |
V4L2_CAP_VBI_CAPTURE;
if (UNSET != core->board.tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (unlikely(f->index >= ARRAY_SIZE(formats)))
return -EINVAL;
strlcpy(f->description,formats[f->index].name,sizeof(f->description));
f->pixelformat = formats[f->index].fourcc;
return 0;
}
#ifdef CONFIG_VIDEO_V4L1_COMPAT
static int vidiocgmbuf (struct file *file, void *priv, struct video_mbuf *mbuf)
{
struct cx8800_fh *fh = priv;
return videobuf_cgmbuf (get_queue(fh), mbuf, 8);
}
#endif
static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
struct cx8800_fh *fh = priv;
return (videobuf_reqbufs(get_queue(fh), p));
}
static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8800_fh *fh = priv;
return (videobuf_querybuf(get_queue(fh), p));
}
static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8800_fh *fh = priv;
return (videobuf_qbuf(get_queue(fh), p));
}
static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8800_fh *fh = priv;
return (videobuf_dqbuf(get_queue(fh), p,
file->f_flags & O_NONBLOCK));
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8800_fh *fh = priv;
struct cx8800_dev *dev = fh->dev;
/* We should remember that this driver also supports teletext, */
/* so we have to test if the v4l2_buf_type is VBI capture data. */
if (unlikely((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(fh->type != V4L2_BUF_TYPE_VBI_CAPTURE)))
return -EINVAL;
if (unlikely(i != fh->type))
return -EINVAL;
if (unlikely(!res_get(dev,fh,get_ressource(fh))))
return -EBUSY;
return videobuf_streamon(get_queue(fh));
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8800_fh *fh = priv;
struct cx8800_dev *dev = fh->dev;
int err, res;
if ((fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) &&
(fh->type != V4L2_BUF_TYPE_VBI_CAPTURE))
return -EINVAL;
if (i != fh->type)
return -EINVAL;
res = get_ressource(fh);
err = videobuf_streamoff(get_queue(fh));
if (err < 0)
return err;
res_free(dev,fh,res);
return 0;
}
static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *tvnorms)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
mutex_lock(&core->lock);
cx88_set_tvnorm(core,*tvnorms);
mutex_unlock(&core->lock);
return 0;
}
/* only one input in this sample driver */
int cx88_enum_input (struct cx88_core *core,struct v4l2_input *i)
{
static const char *iname[] = {
[ CX88_VMUX_COMPOSITE1 ] = "Composite1",
[ CX88_VMUX_COMPOSITE2 ] = "Composite2",
[ CX88_VMUX_COMPOSITE3 ] = "Composite3",
[ CX88_VMUX_COMPOSITE4 ] = "Composite4",
[ CX88_VMUX_SVIDEO ] = "S-Video",
[ CX88_VMUX_TELEVISION ] = "Television",
[ CX88_VMUX_CABLE ] = "Cable TV",
[ CX88_VMUX_DVB ] = "DVB",
[ CX88_VMUX_DEBUG ] = "for debug only",
};
unsigned int n = i->index;
if (n >= 4)
return -EINVAL;
if (0 == INPUT(n).type)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
strcpy(i->name,iname[INPUT(n).type]);
if ((CX88_VMUX_TELEVISION == INPUT(n).type) ||
(CX88_VMUX_CABLE == INPUT(n).type))
i->type = V4L2_INPUT_TYPE_TUNER;
i->std = CX88_NORMS;
return 0;
}
EXPORT_SYMBOL(cx88_enum_input);
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
return cx88_enum_input (core,i);
}
static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
*i = core->input;
return 0;
}
static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
if (i >= 4)
return -EINVAL;
mutex_lock(&core->lock);
cx88_newstation(core);
cx88_video_mux(core,i);
mutex_unlock(&core->lock);
return 0;
}
static int vidioc_queryctrl (struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (unlikely(qctrl->id == 0))
return -EINVAL;
return cx8800_ctrl_query(core, qctrl);
}
static int vidioc_g_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
return
cx88_get_control(core,ctl);
}
static int vidioc_s_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
return
cx88_set_control(core,ctl);
}
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
u32 reg;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
cx88_get_stereo(core ,t);
reg = cx_read(MO_DEVICE_STATUS);
t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
return 0;
}
static int vidioc_s_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
if (UNSET == core->board.tuner_type)
return -EINVAL;
if (0 != t->index)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
return 0;
}
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8800_fh *fh = priv;
struct cx88_core *core = fh->dev->core;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
/* f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV; */
f->type = fh->radio ? V4L2_TUNER_RADIO : V4L2_TUNER_ANALOG_TV;
f->frequency = core->freq;
call_all(core, tuner, g_frequency, f);
return 0;
}
int cx88_set_freq (struct cx88_core *core,
struct v4l2_frequency *f)
{
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
if (unlikely(f->tuner != 0))
return -EINVAL;
mutex_lock(&core->lock);
core->freq = f->frequency;
cx88_newstation(core);
call_all(core, tuner, s_frequency, f);
/* When changing channels it is required to reset TVAUDIO */
msleep (10);
cx88_set_tvaudio(core);
mutex_unlock(&core->lock);
return 0;
}
EXPORT_SYMBOL(cx88_set_freq);
static int vidioc_s_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8800_fh *fh = priv;
struct cx88_core *core = fh->dev->core;
if (unlikely(0 == fh->radio && f->type != V4L2_TUNER_ANALOG_TV))
return -EINVAL;
if (unlikely(1 == fh->radio && f->type != V4L2_TUNER_RADIO))
return -EINVAL;
return
cx88_set_freq (core,f);
}
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int vidioc_g_register (struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
if (!v4l2_chip_match_host(®->match))
return -EINVAL;
/* cx2388x has a 24-bit register space */
reg->val = cx_read(reg->reg & 0xffffff);
reg->size = 4;
return 0;
}
static int vidioc_s_register (struct file *file, void *fh,
struct v4l2_dbg_register *reg)
{
struct cx88_core *core = ((struct cx8800_fh*)fh)->dev->core;
if (!v4l2_chip_match_host(®->match))
return -EINVAL;
cx_write(reg->reg & 0xffffff, reg->val);
return 0;
}
#endif
/* ----------------------------------------------------------- */
/* RADIO ESPECIFIC IOCTLS */
/* ----------------------------------------------------------- */
static int radio_querycap (struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8800_dev *dev = ((struct cx8800_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx8800");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s", pci_name(dev->pci));
cap->version = CX88_VERSION_CODE;
cap->capabilities = V4L2_CAP_TUNER;
return 0;
}
static int radio_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
if (unlikely(t->index > 0))
return -EINVAL;
strcpy(t->name, "Radio");
t->type = V4L2_TUNER_RADIO;
call_all(core, tuner, g_tuner, t);
return 0;
}
static int radio_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
if (i->index != 0)
return -EINVAL;
strcpy(i->name,"Radio");
i->type = V4L2_INPUT_TYPE_TUNER;
return 0;
}
static int radio_g_audio (struct file *file, void *priv, struct v4l2_audio *a)
{
if (unlikely(a->index))
return -EINVAL;
strcpy(a->name,"Radio");
return 0;
}
/* FIXME: Should add a standard for radio */
static int radio_s_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8800_fh *)priv)->dev->core;
if (0 != t->index)
return -EINVAL;
call_all(core, tuner, s_tuner, t);
return 0;
}
static int radio_s_audio (struct file *file, void *fh,
struct v4l2_audio *a)
{
return 0;
}
static int radio_s_input (struct file *file, void *fh, unsigned int i)
{
return 0;
}
static int radio_queryctrl (struct file *file, void *priv,
struct v4l2_queryctrl *c)
{
int i;
if (c->id < V4L2_CID_BASE ||
c->id >= V4L2_CID_LASTP1)
return -EINVAL;
if (c->id == V4L2_CID_AUDIO_MUTE) {
for (i = 0; i < CX8800_CTLS; i++) {
if (cx8800_ctls[i].v.id == c->id)
break;
}
if (i == CX8800_CTLS)
return -EINVAL;
*c = cx8800_ctls[i].v;
} else
*c = no_ctl;
return 0;
}
/* ----------------------------------------------------------- */
static void cx8800_vid_timeout(unsigned long data)
{
struct cx8800_dev *dev = (struct cx8800_dev*)data;
struct cx88_core *core = dev->core;
struct cx88_dmaqueue *q = &dev->vidq;
struct cx88_buffer *buf;
unsigned long flags;
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
cx_clear(MO_VID_DMACNTRL, 0x11);
cx_clear(VID_CAPTURE_CONTROL, 0x06);
spin_lock_irqsave(&dev->slock,flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx88_buffer, vb.queue);
list_del(&buf->vb.queue);
buf->vb.state = VIDEOBUF_ERROR;
wake_up(&buf->vb.done);
printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", core->name,
buf, buf->vb.i, (unsigned long)buf->risc.dma);
}
restart_video_queue(dev,q);
spin_unlock_irqrestore(&dev->slock,flags);
}
static char *cx88_vid_irqs[32] = {
"y_risci1", "u_risci1", "v_risci1", "vbi_risc1",
"y_risci2", "u_risci2", "v_risci2", "vbi_risc2",
"y_oflow", "u_oflow", "v_oflow", "vbi_oflow",
"y_sync", "u_sync", "v_sync", "vbi_sync",
"opc_err", "par_err", "rip_err", "pci_abort",
};
static void cx8800_vid_irq(struct cx8800_dev *dev)
{
struct cx88_core *core = dev->core;
u32 status, mask, count;
status = cx_read(MO_VID_INTSTAT);
mask = cx_read(MO_VID_INTMSK);
if (0 == (status & mask))
return;
cx_write(MO_VID_INTSTAT, status);
if (irq_debug || (status & mask & ~0xff))
cx88_print_irqbits(core->name, "irq vid",
cx88_vid_irqs, ARRAY_SIZE(cx88_vid_irqs),
status, mask);
/* risc op code error */
if (status & (1 << 16)) {
printk(KERN_WARNING "%s/0: video risc op code error\n",core->name);
cx_clear(MO_VID_DMACNTRL, 0x11);
cx_clear(VID_CAPTURE_CONTROL, 0x06);
cx88_sram_channel_dump(core, &cx88_sram_channels[SRAM_CH21]);
}
/* risc1 y */
if (status & 0x01) {
spin_lock(&dev->slock);
count = cx_read(MO_VIDY_GPCNT);
cx88_wakeup(core, &dev->vidq, count);
spin_unlock(&dev->slock);
}
/* risc1 vbi */
if (status & 0x08) {
spin_lock(&dev->slock);
count = cx_read(MO_VBI_GPCNT);
cx88_wakeup(core, &dev->vbiq, count);
spin_unlock(&dev->slock);
}
/* risc2 y */
if (status & 0x10) {
dprintk(2,"stopper video\n");
spin_lock(&dev->slock);
restart_video_queue(dev,&dev->vidq);
spin_unlock(&dev->slock);
}
/* risc2 vbi */
if (status & 0x80) {
dprintk(2,"stopper vbi\n");
spin_lock(&dev->slock);
cx8800_restart_vbi_queue(dev,&dev->vbiq);
spin_unlock(&dev->slock);
}
}
static irqreturn_t cx8800_irq(int irq, void *dev_id)
{
struct cx8800_dev *dev = dev_id;
struct cx88_core *core = dev->core;
u32 status;
int loop, handled = 0;
for (loop = 0; loop < 10; loop++) {
status = cx_read(MO_PCI_INTSTAT) &
(core->pci_irqmask | PCI_INT_VIDINT);
if (0 == status)
goto out;
cx_write(MO_PCI_INTSTAT, status);
handled = 1;
if (status & core->pci_irqmask)
cx88_core_irq(core,status);
if (status & PCI_INT_VIDINT)
cx8800_vid_irq(dev);
};
if (10 == loop) {
printk(KERN_WARNING "%s/0: irq loop -- clearing mask\n",
core->name);
cx_write(MO_PCI_INTMSK,0);
}
out:
return IRQ_RETVAL(handled);
}
/* ----------------------------------------------------------- */
/* exported stuff */
static const struct v4l2_file_operations video_fops =
{
.owner = THIS_MODULE,
.open = video_open,
.release = video_release,
.read = video_read,
.poll = video_poll,
.mmap = video_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops video_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_g_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_try_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_s_fmt_vbi_cap = cx8800_vbi_fmt,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
#ifdef CONFIG_VIDEO_V4L1_COMPAT
.vidiocgmbuf = vidiocgmbuf,
#endif
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
static struct video_device cx8800_vbi_template;
static struct video_device cx8800_video_template = {
.name = "cx8800-video",
.fops = &video_fops,
.ioctl_ops = &video_ioctl_ops,
.tvnorms = CX88_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
static const struct v4l2_file_operations radio_fops =
{
.owner = THIS_MODULE,
.open = video_open,
.release = video_release,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops radio_ioctl_ops = {
.vidioc_querycap = radio_querycap,
.vidioc_g_tuner = radio_g_tuner,
.vidioc_enum_input = radio_enum_input,
.vidioc_g_audio = radio_g_audio,
.vidioc_s_tuner = radio_s_tuner,
.vidioc_s_audio = radio_s_audio,
.vidioc_s_input = radio_s_input,
.vidioc_queryctrl = radio_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = vidioc_g_register,
.vidioc_s_register = vidioc_s_register,
#endif
};
static struct video_device cx8800_radio_template = {
.name = "cx8800-radio",
.fops = &radio_fops,
.ioctl_ops = &radio_ioctl_ops,
};
/* ----------------------------------------------------------- */
static void cx8800_unregister_video(struct cx8800_dev *dev)
{
if (dev->radio_dev) {
if (video_is_registered(dev->radio_dev))
video_unregister_device(dev->radio_dev);
else
video_device_release(dev->radio_dev);
dev->radio_dev = NULL;
}
if (dev->vbi_dev) {
if (video_is_registered(dev->vbi_dev))
video_unregister_device(dev->vbi_dev);
else
video_device_release(dev->vbi_dev);
dev->vbi_dev = NULL;
}
if (dev->video_dev) {
if (video_is_registered(dev->video_dev))
video_unregister_device(dev->video_dev);
else
video_device_release(dev->video_dev);
dev->video_dev = NULL;
}
}
static int __devinit cx8800_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx8800_dev *dev;
struct cx88_core *core;
int err;
dev = kzalloc(sizeof(*dev),GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
goto fail_free;
}
core = cx88_core_get(dev->pci);
if (NULL == core) {
err = -EINVAL;
goto fail_free;
}
dev->core = core;
/* print pci info */
pci_read_config_byte(pci_dev, PCI_CLASS_REVISION, &dev->pci_rev);
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
"latency: %d, mmio: 0x%llx\n", core->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,(unsigned long long)pci_resource_start(pci_dev,0));
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev,DMA_BIT_MASK(32))) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n",core->name);
err = -EIO;
goto fail_core;
}
/* Initialize VBI template */
memcpy( &cx8800_vbi_template, &cx8800_video_template,
sizeof(cx8800_vbi_template) );
strcpy(cx8800_vbi_template.name,"cx8800-vbi");
/* initialize driver struct */
spin_lock_init(&dev->slock);
core->tvnorm = cx8800_video_template.current_norm;
/* init video dma queues */
INIT_LIST_HEAD(&dev->vidq.active);
INIT_LIST_HEAD(&dev->vidq.queued);
dev->vidq.timeout.function = cx8800_vid_timeout;
dev->vidq.timeout.data = (unsigned long)dev;
init_timer(&dev->vidq.timeout);
cx88_risc_stopper(dev->pci,&dev->vidq.stopper,
MO_VID_DMACNTRL,0x11,0x00);
/* init vbi dma queues */
INIT_LIST_HEAD(&dev->vbiq.active);
INIT_LIST_HEAD(&dev->vbiq.queued);
dev->vbiq.timeout.function = cx8800_vbi_timeout;
dev->vbiq.timeout.data = (unsigned long)dev;
init_timer(&dev->vbiq.timeout);
cx88_risc_stopper(dev->pci,&dev->vbiq.stopper,
MO_VID_DMACNTRL,0x88,0x00);
/* get irq */
err = request_irq(pci_dev->irq, cx8800_irq,
IRQF_SHARED | IRQF_DISABLED, core->name, dev);
if (err < 0) {
printk(KERN_ERR "%s/0: can't get IRQ %d\n",
core->name,pci_dev->irq);
goto fail_core;
}
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
/* load and configure helper modules */
if (core->board.audio_chip == V4L2_IDENT_WM8775)
v4l2_i2c_new_subdev(&core->v4l2_dev, &core->i2c_adap,
"wm8775", "wm8775", 0x36 >> 1, NULL);
if (core->board.audio_chip == V4L2_IDENT_TVAUDIO) {
/* This probes for a tda9874 as is used on some
Pixelview Ultra boards. */
v4l2_i2c_new_subdev(&core->v4l2_dev,
&core->i2c_adap,
"tvaudio", "tvaudio", 0, I2C_ADDRS(0xb0 >> 1));
}
switch (core->boardnr) {
case CX88_BOARD_DVICO_FUSIONHDTV_5_GOLD:
case CX88_BOARD_DVICO_FUSIONHDTV_7_GOLD: {
static struct i2c_board_info rtc_info = {
I2C_BOARD_INFO("isl1208", 0x6f)
};
request_module("rtc-isl1208");
core->i2c_rtc = i2c_new_device(&core->i2c_adap, &rtc_info);
}
/* break intentionally omitted */
case CX88_BOARD_DVICO_FUSIONHDTV_5_PCI_NANO:
request_module("ir-kbd-i2c");
}
/* register v4l devices */
dev->video_dev = cx88_vdev_init(core,dev->pci,
&cx8800_video_template,"video");
video_set_drvdata(dev->video_dev, dev);
err = video_register_device(dev->video_dev,VFL_TYPE_GRABBER,
video_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register video device\n",
core->name);
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s [v4l2]\n",
core->name, video_device_node_name(dev->video_dev));
dev->vbi_dev = cx88_vdev_init(core,dev->pci,&cx8800_vbi_template,"vbi");
video_set_drvdata(dev->vbi_dev, dev);
err = video_register_device(dev->vbi_dev,VFL_TYPE_VBI,
vbi_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register vbi device\n",
core->name);
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s\n",
core->name, video_device_node_name(dev->vbi_dev));
if (core->board.radio.type == CX88_RADIO) {
dev->radio_dev = cx88_vdev_init(core,dev->pci,
&cx8800_radio_template,"radio");
video_set_drvdata(dev->radio_dev, dev);
err = video_register_device(dev->radio_dev,VFL_TYPE_RADIO,
radio_nr[core->nr]);
if (err < 0) {
printk(KERN_ERR "%s/0: can't register radio device\n",
core->name);
goto fail_unreg;
}
printk(KERN_INFO "%s/0: registered device %s\n",
core->name, video_device_node_name(dev->radio_dev));
}
/* everything worked */
pci_set_drvdata(pci_dev,dev);
/* initial device configuration */
mutex_lock(&core->lock);
cx88_set_tvnorm(core,core->tvnorm);
init_controls(core);
cx88_video_mux(core,0);
mutex_unlock(&core->lock);
/* start tvaudio thread */
if (core->board.tuner_type != TUNER_ABSENT) {
core->kthread = kthread_run(cx88_audio_thread, core, "cx88 tvaudio");
if (IS_ERR(core->kthread)) {
err = PTR_ERR(core->kthread);
printk(KERN_ERR "%s/0: failed to create cx88 audio thread, err=%d\n",
core->name, err);
}
}
return 0;
fail_unreg:
cx8800_unregister_video(dev);
free_irq(pci_dev->irq, dev);
fail_core:
cx88_core_put(core,dev->pci);
fail_free:
kfree(dev);
return err;
}
static void __devexit cx8800_finidev(struct pci_dev *pci_dev)
{
struct cx8800_dev *dev = pci_get_drvdata(pci_dev);
struct cx88_core *core = dev->core;
/* stop thread */
if (core->kthread) {
kthread_stop(core->kthread);
core->kthread = NULL;
}
if (core->ir)
cx88_ir_stop(core);
cx88_shutdown(core); /* FIXME */
pci_disable_device(pci_dev);
/* unregister stuff */
free_irq(pci_dev->irq, dev);
cx8800_unregister_video(dev);
pci_set_drvdata(pci_dev, NULL);
/* free memory */
btcx_riscmem_free(dev->pci,&dev->vidq.stopper);
cx88_core_put(core,dev->pci);
kfree(dev);
}
#ifdef CONFIG_PM
static int cx8800_suspend(struct pci_dev *pci_dev, pm_message_t state)
{
struct cx8800_dev *dev = pci_get_drvdata(pci_dev);
struct cx88_core *core = dev->core;
/* stop video+vbi capture */
spin_lock(&dev->slock);
if (!list_empty(&dev->vidq.active)) {
printk("%s/0: suspend video\n", core->name);
stop_video_dma(dev);
del_timer(&dev->vidq.timeout);
}
if (!list_empty(&dev->vbiq.active)) {
printk("%s/0: suspend vbi\n", core->name);
cx8800_stop_vbi_dma(dev);
del_timer(&dev->vbiq.timeout);
}
spin_unlock(&dev->slock);
if (core->ir)
cx88_ir_stop(core);
/* FIXME -- shutdown device */
cx88_shutdown(core);
pci_save_state(pci_dev);
if (0 != pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state))) {
pci_disable_device(pci_dev);
dev->state.disabled = 1;
}
return 0;
}
static int cx8800_resume(struct pci_dev *pci_dev)
{
struct cx8800_dev *dev = pci_get_drvdata(pci_dev);
struct cx88_core *core = dev->core;
int err;
if (dev->state.disabled) {
err=pci_enable_device(pci_dev);
if (err) {
printk(KERN_ERR "%s/0: can't enable device\n",
core->name);
return err;
}
dev->state.disabled = 0;
}
err= pci_set_power_state(pci_dev, PCI_D0);
if (err) {
printk(KERN_ERR "%s/0: can't set power state\n", core->name);
pci_disable_device(pci_dev);
dev->state.disabled = 1;
return err;
}
pci_restore_state(pci_dev);
/* FIXME: re-initialize hardware */
cx88_reset(core);
if (core->ir)
cx88_ir_start(core);
cx_set(MO_PCI_INTMSK, core->pci_irqmask);
/* restart video+vbi capture */
spin_lock(&dev->slock);
if (!list_empty(&dev->vidq.active)) {
printk("%s/0: resume video\n", core->name);
restart_video_queue(dev,&dev->vidq);
}
if (!list_empty(&dev->vbiq.active)) {
printk("%s/0: resume vbi\n", core->name);
cx8800_restart_vbi_queue(dev,&dev->vbiq);
}
spin_unlock(&dev->slock);
return 0;
}
#endif
/* ----------------------------------------------------------- */
static struct pci_device_id cx8800_pci_tbl[] = {
{
.vendor = 0x14f1,
.device = 0x8800,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},{
/* --- end of list --- */
}
};
MODULE_DEVICE_TABLE(pci, cx8800_pci_tbl);
static struct pci_driver cx8800_pci_driver = {
.name = "cx8800",
.id_table = cx8800_pci_tbl,
.probe = cx8800_initdev,
.remove = __devexit_p(cx8800_finidev),
#ifdef CONFIG_PM
.suspend = cx8800_suspend,
.resume = cx8800_resume,
#endif
};
static int __init cx8800_init(void)
{
printk(KERN_INFO "cx88/0: cx2388x v4l2 driver version %d.%d.%d loaded\n",
(CX88_VERSION_CODE >> 16) & 0xff,
(CX88_VERSION_CODE >> 8) & 0xff,
CX88_VERSION_CODE & 0xff);
#ifdef SNAPSHOT
printk(KERN_INFO "cx2388x: snapshot date %04d-%02d-%02d\n",
SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
#endif
return pci_register_driver(&cx8800_pci_driver);
}
static void __exit cx8800_fini(void)
{
pci_unregister_driver(&cx8800_pci_driver);
}
module_init(cx8800_init);
module_exit(cx8800_fini);
/* ----------------------------------------------------------- */
/*
* Local variables:
* c-basic-offset: 8
* End:
* kate: eol "unix"; indent-width 3; remove-trailing-space on; replace-trailing-space-save on; tab-width 8; replace-tabs off; space-indent off; mixed-indent off
*/
| gpl-2.0 |
roggin/iconia-a500-kernel | arch/blackfin/mach-bf537/boards/cm_bf537e.c | 856 | 18659 | /*
* Copyright 2004-2009 Analog Devices Inc.
* 2008-2009 Bluetechnix
* 2005 National ICT Australia (NICTA)
* Aidan Williams <aidan@nicta.com.au>
*
* Licensed under the GPL-2 or later.
*/
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/platform_device.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/partitions.h>
#include <linux/mtd/physmap.h>
#include <linux/spi/spi.h>
#include <linux/spi/flash.h>
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
#include <linux/usb/isp1362.h>
#endif
#include <linux/ata_platform.h>
#include <linux/irq.h>
#include <asm/dma.h>
#include <asm/bfin5xx_spi.h>
#include <asm/portmux.h>
#include <asm/dpmc.h>
/*
* Name the Board for the /proc/cpuinfo
*/
const char bfin_board_name[] = "Bluetechnix CM BF537E";
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
/* all SPI peripherals info goes here */
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
static struct mtd_partition bfin_spi_flash_partitions[] = {
{
.name = "bootloader(spi)",
.size = 0x00020000,
.offset = 0,
.mask_flags = MTD_CAP_ROM
}, {
.name = "linux kernel(spi)",
.size = 0xe0000,
.offset = 0x20000
}, {
.name = "file system(spi)",
.size = 0x700000,
.offset = 0x00100000,
}
};
static struct flash_platform_data bfin_spi_flash_data = {
.name = "m25p80",
.parts = bfin_spi_flash_partitions,
.nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions),
.type = "m25p64",
};
/* SPI flash chip (m25p64) */
static struct bfin5xx_spi_chip spi_flash_chip_info = {
.enable_dma = 0, /* use dma transfer with this chip*/
.bits_per_word = 8,
};
#endif
#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
/* SPI ADC chip */
static struct bfin5xx_spi_chip spi_adc_chip_info = {
.enable_dma = 1, /* use dma transfer with this chip*/
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
static struct bfin5xx_spi_chip ad1836_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 16,
};
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
static struct bfin5xx_spi_chip mmc_spi_chip_info = {
.enable_dma = 0,
.bits_per_word = 8,
};
#endif
static struct spi_board_info bfin_spi_board_info[] __initdata = {
#if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE)
{
/* the modalias must be the same as spi device driver name */
.modalias = "m25p80", /* Name of spi_driver for this device */
.max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/
.platform_data = &bfin_spi_flash_data,
.controller_data = &spi_flash_chip_info,
.mode = SPI_MODE_3,
},
#endif
#if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE)
{
.modalias = "bfin_spi_adc", /* Name of spi_driver for this device */
.max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0, /* Framework bus number */
.chip_select = 1, /* Framework chip select. */
.platform_data = NULL, /* No spi_driver specific config */
.controller_data = &spi_adc_chip_info,
},
#endif
#if defined(CONFIG_SND_BLACKFIN_AD183X) || defined(CONFIG_SND_BLACKFIN_AD183X_MODULE)
{
.modalias = "ad1836",
.max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT,
.controller_data = &ad1836_spi_chip_info,
},
#endif
#if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE)
{
.modalias = "mmc_spi",
.max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */
.bus_num = 0,
.chip_select = 1,
.controller_data = &mmc_spi_chip_info,
.mode = SPI_MODE_3,
},
#endif
};
/* SPI (0) */
static struct resource bfin_spi0_resource[] = {
[0] = {
.start = SPI0_REGBASE,
.end = SPI0_REGBASE + 0xFF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = CH_SPI,
.end = CH_SPI,
.flags = IORESOURCE_DMA,
},
[2] = {
.start = IRQ_SPI,
.end = IRQ_SPI,
.flags = IORESOURCE_IRQ,
},
};
/* SPI controller data */
static struct bfin5xx_spi_master bfin_spi0_info = {
.num_chipselect = 8,
.enable_dma = 1, /* master has the ability to do dma transfer */
.pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0},
};
static struct platform_device bfin_spi0_device = {
.name = "bfin-spi",
.id = 0, /* Bus number */
.num_resources = ARRAY_SIZE(bfin_spi0_resource),
.resource = bfin_spi0_resource,
.dev = {
.platform_data = &bfin_spi0_info, /* Passed to driver */
},
};
#endif /* spi master and devices */
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
static struct platform_device rtc_device = {
.name = "rtc-bfin",
.id = -1,
};
#endif
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
static struct platform_device hitachi_fb_device = {
.name = "hitachi-tx09",
};
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
#include <linux/smc91x.h>
static struct smc91x_platdata smc91x_info = {
.flags = SMC91X_USE_16BIT | SMC91X_NOWAIT,
.leda = RPC_LED_100_10,
.ledb = RPC_LED_TX_RX,
};
static struct resource smc91x_resources[] = {
{
.start = 0x20200300,
.end = 0x20200300 + 16,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PF14,
.end = IRQ_PF14,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device smc91x_device = {
.name = "smc91x",
.id = 0,
.num_resources = ARRAY_SIZE(smc91x_resources),
.resource = smc91x_resources,
.dev = {
.platform_data = &smc91x_info,
},
};
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
static struct resource isp1362_hcd_resources[] = {
{
.start = 0x20308000,
.end = 0x20308000,
.flags = IORESOURCE_MEM,
}, {
.start = 0x20308004,
.end = 0x20308004,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG15,
.end = IRQ_PG15,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct isp1362_platform_data isp1362_priv = {
.sel15Kres = 1,
.clknotstop = 0,
.oc_enable = 0,
.int_act_high = 0,
.int_edge_triggered = 0,
.remote_wakeup_connected = 0,
.no_power_switching = 1,
.power_switching_mode = 0,
};
static struct platform_device isp1362_hcd_device = {
.name = "isp1362-hcd",
.id = 0,
.dev = {
.platform_data = &isp1362_priv,
},
.num_resources = ARRAY_SIZE(isp1362_hcd_resources),
.resource = isp1362_hcd_resources,
};
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
static struct resource net2272_bfin_resources[] = {
{
.start = 0x20300000,
.end = 0x20300000 + 0x100,
.flags = IORESOURCE_MEM,
}, {
.start = IRQ_PG13,
.end = IRQ_PG13,
.flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL,
},
};
static struct platform_device net2272_bfin_device = {
.name = "net2272",
.id = -1,
.num_resources = ARRAY_SIZE(net2272_bfin_resources),
.resource = net2272_bfin_resources,
};
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
static struct mtd_partition cm_partitions[] = {
{
.name = "bootloader(nor)",
.size = 0x40000,
.offset = 0,
}, {
.name = "linux kernel(nor)",
.size = 0x100000,
.offset = MTDPART_OFS_APPEND,
}, {
.name = "file system(nor)",
.size = MTDPART_SIZ_FULL,
.offset = MTDPART_OFS_APPEND,
}
};
static struct physmap_flash_data cm_flash_data = {
.width = 2,
.parts = cm_partitions,
.nr_parts = ARRAY_SIZE(cm_partitions),
};
static unsigned cm_flash_gpios[] = { GPIO_PF4 };
static struct resource cm_flash_resource[] = {
{
.name = "cfi_probe",
.start = 0x20000000,
.end = 0x201fffff,
.flags = IORESOURCE_MEM,
}, {
.start = (unsigned long)cm_flash_gpios,
.end = ARRAY_SIZE(cm_flash_gpios),
.flags = IORESOURCE_IRQ,
}
};
static struct platform_device cm_flash_device = {
.name = "gpio-addr-flash",
.id = 0,
.dev = {
.platform_data = &cm_flash_data,
},
.num_resources = ARRAY_SIZE(cm_flash_resource),
.resource = cm_flash_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
static struct resource bfin_uart0_resources[] = {
{
.start = UART0_THR,
.end = UART0_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART0_ERROR,
.end = IRQ_UART0_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_TX,
.end = CH_UART0_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART0_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
unsigned short bfin_uart0_peripherals[] = {
P_UART0_TX, P_UART0_RX, 0
};
static struct platform_device bfin_uart0_device = {
.name = "bfin-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_uart0_resources),
.resource = bfin_uart0_resources,
.dev = {
.platform_data = &bfin_uart0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
static struct resource bfin_uart1_resources[] = {
{
.start = UART1_THR,
.end = UART1_GCTL+2,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_UART1_ERROR,
.end = IRQ_UART1_ERROR,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_TX,
.end = CH_UART1_TX,
.flags = IORESOURCE_DMA,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX,
.flags = IORESOURCE_DMA,
},
#ifdef CONFIG_BFIN_UART1_CTSRTS
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
{
/*
* Refer to arch/blackfin/mach-xxx/include/mach/gpio.h for the GPIO map.
*/
.start = -1,
.end = -1,
.flags = IORESOURCE_IO,
},
#endif
};
unsigned short bfin_uart1_peripherals[] = {
P_UART1_TX, P_UART1_RX, 0
};
static struct platform_device bfin_uart1_device = {
.name = "bfin-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_uart1_resources),
.resource = bfin_uart1_resources,
.dev = {
.platform_data = &bfin_uart1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
static struct resource bfin_sir0_resources[] = {
{
.start = 0xFFC00400,
.end = 0xFFC004FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART0_RX,
.end = IRQ_UART0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART0_RX,
.end = CH_UART0_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir0_device = {
.name = "bfin_sir",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sir0_resources),
.resource = bfin_sir0_resources,
};
#endif
#ifdef CONFIG_BFIN_SIR1
static struct resource bfin_sir1_resources[] = {
{
.start = 0xFFC02000,
.end = 0xFFC020FF,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_UART1_RX,
.end = IRQ_UART1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = CH_UART1_RX,
.end = CH_UART1_RX+1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device bfin_sir1_device = {
.name = "bfin_sir",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sir1_resources),
.resource = bfin_sir1_resources,
};
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
static struct resource bfin_twi0_resource[] = {
[0] = {
.start = TWI0_REGBASE,
.end = TWI0_REGBASE,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_TWI,
.end = IRQ_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device i2c_bfin_twi_device = {
.name = "i2c-bfin-twi",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_twi0_resource),
.resource = bfin_twi0_resource,
};
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
static struct resource bfin_sport0_uart_resources[] = {
{
.start = SPORT0_TCR1,
.end = SPORT0_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT0_RX,
.end = IRQ_SPORT0_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT0_ERROR,
.end = IRQ_SPORT0_ERROR,
.flags = IORESOURCE_IRQ,
},
};
unsigned short bfin_sport0_peripherals[] = {
P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS,
P_SPORT0_DRPRI, P_SPORT0_RSCLK, P_SPORT0_DRSEC, P_SPORT0_DTSEC, 0
};
static struct platform_device bfin_sport0_uart_device = {
.name = "bfin-sport-uart",
.id = 0,
.num_resources = ARRAY_SIZE(bfin_sport0_uart_resources),
.resource = bfin_sport0_uart_resources,
.dev = {
.platform_data = &bfin_sport0_peripherals, /* Passed to driver */
},
};
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
static struct resource bfin_sport1_uart_resources[] = {
{
.start = SPORT1_TCR1,
.end = SPORT1_MRCS3+4,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_SPORT1_RX,
.end = IRQ_SPORT1_RX+1,
.flags = IORESOURCE_IRQ,
},
{
.start = IRQ_SPORT1_ERROR,
.end = IRQ_SPORT1_ERROR,
.flags = IORESOURCE_IRQ,
},
};
unsigned short bfin_sport1_peripherals[] = {
P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS,
P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_DRSEC, P_SPORT1_DTSEC, 0
};
static struct platform_device bfin_sport1_uart_device = {
.name = "bfin-sport-uart",
.id = 1,
.num_resources = ARRAY_SIZE(bfin_sport1_uart_resources),
.resource = bfin_sport1_uart_resources,
.dev = {
.platform_data = &bfin_sport1_peripherals, /* Passed to driver */
},
};
#endif
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
static struct platform_device bfin_mii_bus = {
.name = "bfin_mii_bus",
};
static struct platform_device bfin_mac_device = {
.name = "bfin_mac",
.dev.platform_data = &bfin_mii_bus,
};
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
#define PATA_INT IRQ_PF14
static struct pata_platform_info bfin_pata_platform_data = {
.ioport_shift = 2,
.irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED,
};
static struct resource bfin_pata_resources[] = {
{
.start = 0x2030C000,
.end = 0x2030C01F,
.flags = IORESOURCE_MEM,
},
{
.start = 0x2030D018,
.end = 0x2030D01B,
.flags = IORESOURCE_MEM,
},
{
.start = PATA_INT,
.end = PATA_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device bfin_pata_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(bfin_pata_resources),
.resource = bfin_pata_resources,
.dev = {
.platform_data = &bfin_pata_platform_data,
}
};
#endif
static const unsigned int cclk_vlev_datasheet[] =
{
VRPAIR(VLEV_085, 250000000),
VRPAIR(VLEV_090, 376000000),
VRPAIR(VLEV_095, 426000000),
VRPAIR(VLEV_100, 426000000),
VRPAIR(VLEV_105, 476000000),
VRPAIR(VLEV_110, 476000000),
VRPAIR(VLEV_115, 476000000),
VRPAIR(VLEV_120, 500000000),
VRPAIR(VLEV_125, 533000000),
VRPAIR(VLEV_130, 600000000),
};
static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = {
.tuple_tab = cclk_vlev_datasheet,
.tabsize = ARRAY_SIZE(cclk_vlev_datasheet),
.vr_settling_time = 25 /* us */,
};
static struct platform_device bfin_dpmc = {
.name = "bfin dpmc",
.dev = {
.platform_data = &bfin_dmpc_vreg_data,
},
};
static struct platform_device *cm_bf537e_devices[] __initdata = {
&bfin_dpmc,
#if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE)
&hitachi_fb_device,
#endif
#if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE)
&rtc_device,
#endif
#if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE)
#ifdef CONFIG_BFIN_SIR0
&bfin_sir0_device,
#endif
#ifdef CONFIG_BFIN_SIR1
&bfin_sir1_device,
#endif
#endif
#if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE)
&i2c_bfin_twi_device,
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
#if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE)
&isp1362_hcd_device,
#endif
#if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE)
&smc91x_device,
#endif
#if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE)
&bfin_mii_bus,
&bfin_mac_device,
#endif
#if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE)
&net2272_bfin_device,
#endif
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
&bfin_spi0_device,
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
&bfin_pata_device,
#endif
#if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE)
&cm_flash_device,
#endif
};
static int __init cm_bf537e_init(void)
{
printk(KERN_INFO "%s(): registering device resources\n", __func__);
platform_add_devices(cm_bf537e_devices, ARRAY_SIZE(cm_bf537e_devices));
#if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE)
spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info));
#endif
#if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE)
irq_desc[PATA_INT].status |= IRQ_NOAUTOEN;
#endif
return 0;
}
arch_initcall(cm_bf537e_init);
static struct platform_device *cm_bf537e_early_devices[] __initdata = {
#if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK)
#ifdef CONFIG_SERIAL_BFIN_UART0
&bfin_uart0_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_UART1
&bfin_uart1_device,
#endif
#endif
#if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE)
#ifdef CONFIG_SERIAL_BFIN_SPORT0_UART
&bfin_sport0_uart_device,
#endif
#ifdef CONFIG_SERIAL_BFIN_SPORT1_UART
&bfin_sport1_uart_device,
#endif
#endif
};
void __init native_machine_early_platform_add_devices(void)
{
printk(KERN_INFO "register early platform devices\n");
early_platform_add_devices(cm_bf537e_early_devices,
ARRAY_SIZE(cm_bf537e_early_devices));
}
void bfin_get_ether_addr(char *addr)
{
random_ether_addr(addr);
printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__);
}
EXPORT_SYMBOL(bfin_get_ether_addr);
| gpl-2.0 |
kerneldevs/RM-35-KERNEL-UNIVA | drivers/media/video/cx23885/cx23885-417.c | 856 | 49125 | /*
*
* Support for a cx23417 mpeg encoder via cx23885 host port.
*
* (c) 2004 Jelle Foks <jelle@foks.us>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org>
* (c) 2008 Steven Toth <stoth@linuxtv.org>
* - CX23885/7/8 support
*
* Includes parts from the ivtv driver( http://ivtv.sourceforge.net/),
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <linux/smp_lock.h>
#include <linux/slab.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/cx2341x.h>
#include "cx23885.h"
#include "cx23885-ioctl.h"
#define CX23885_FIRM_IMAGE_SIZE 376836
#define CX23885_FIRM_IMAGE_NAME "v4l-cx23885-enc.fw"
static unsigned int mpegbufs = 32;
module_param(mpegbufs, int, 0644);
MODULE_PARM_DESC(mpegbufs, "number of mpeg buffers, range 2-32");
static unsigned int mpeglines = 32;
module_param(mpeglines, int, 0644);
MODULE_PARM_DESC(mpeglines, "number of lines in an MPEG buffer, range 2-32");
static unsigned int mpeglinesize = 512;
module_param(mpeglinesize, int, 0644);
MODULE_PARM_DESC(mpeglinesize,
"number of bytes in each line of an MPEG buffer, range 512-1024");
static unsigned int v4l_debug;
module_param(v4l_debug, int, 0644);
MODULE_PARM_DESC(v4l_debug, "enable V4L debug messages");
#define dprintk(level, fmt, arg...)\
do { if (v4l_debug >= level) \
printk(KERN_DEBUG "%s: " fmt, \
(dev) ? dev->name : "cx23885[?]", ## arg); \
} while (0)
static struct cx23885_tvnorm cx23885_tvnorms[] = {
{
.name = "NTSC-M",
.id = V4L2_STD_NTSC_M,
}, {
.name = "NTSC-JP",
.id = V4L2_STD_NTSC_M_JP,
}, {
.name = "PAL-BG",
.id = V4L2_STD_PAL_BG,
}, {
.name = "PAL-DK",
.id = V4L2_STD_PAL_DK,
}, {
.name = "PAL-I",
.id = V4L2_STD_PAL_I,
}, {
.name = "PAL-M",
.id = V4L2_STD_PAL_M,
}, {
.name = "PAL-N",
.id = V4L2_STD_PAL_N,
}, {
.name = "PAL-Nc",
.id = V4L2_STD_PAL_Nc,
}, {
.name = "PAL-60",
.id = V4L2_STD_PAL_60,
}, {
.name = "SECAM-L",
.id = V4L2_STD_SECAM_L,
}, {
.name = "SECAM-DK",
.id = V4L2_STD_SECAM_DK,
}
};
/* ------------------------------------------------------------------ */
enum cx23885_capture_type {
CX23885_MPEG_CAPTURE,
CX23885_RAW_CAPTURE,
CX23885_RAW_PASSTHRU_CAPTURE
};
enum cx23885_capture_bits {
CX23885_RAW_BITS_NONE = 0x00,
CX23885_RAW_BITS_YUV_CAPTURE = 0x01,
CX23885_RAW_BITS_PCM_CAPTURE = 0x02,
CX23885_RAW_BITS_VBI_CAPTURE = 0x04,
CX23885_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
CX23885_RAW_BITS_TO_HOST_CAPTURE = 0x10
};
enum cx23885_capture_end {
CX23885_END_AT_GOP, /* stop at the end of gop, generate irq */
CX23885_END_NOW, /* stop immediately, no irq */
};
enum cx23885_framerate {
CX23885_FRAMERATE_NTSC_30, /* NTSC: 30fps */
CX23885_FRAMERATE_PAL_25 /* PAL: 25fps */
};
enum cx23885_stream_port {
CX23885_OUTPUT_PORT_MEMORY,
CX23885_OUTPUT_PORT_STREAMING,
CX23885_OUTPUT_PORT_SERIAL
};
enum cx23885_data_xfer_status {
CX23885_MORE_BUFFERS_FOLLOW,
CX23885_LAST_BUFFER,
};
enum cx23885_picture_mask {
CX23885_PICTURE_MASK_NONE,
CX23885_PICTURE_MASK_I_FRAMES,
CX23885_PICTURE_MASK_I_P_FRAMES = 0x3,
CX23885_PICTURE_MASK_ALL_FRAMES = 0x7,
};
enum cx23885_vbi_mode_bits {
CX23885_VBI_BITS_SLICED,
CX23885_VBI_BITS_RAW,
};
enum cx23885_vbi_insertion_bits {
CX23885_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
CX23885_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
CX23885_VBI_BITS_SEPARATE_STREAM = 0x2 << 1,
CX23885_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
CX23885_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
};
enum cx23885_dma_unit {
CX23885_DMA_BYTES,
CX23885_DMA_FRAMES,
};
enum cx23885_dma_transfer_status_bits {
CX23885_DMA_TRANSFER_BITS_DONE = 0x01,
CX23885_DMA_TRANSFER_BITS_ERROR = 0x04,
CX23885_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
};
enum cx23885_pause {
CX23885_PAUSE_ENCODING,
CX23885_RESUME_ENCODING,
};
enum cx23885_copyright {
CX23885_COPYRIGHT_OFF,
CX23885_COPYRIGHT_ON,
};
enum cx23885_notification_type {
CX23885_NOTIFICATION_REFRESH,
};
enum cx23885_notification_status {
CX23885_NOTIFICATION_OFF,
CX23885_NOTIFICATION_ON,
};
enum cx23885_notification_mailbox {
CX23885_NOTIFICATION_NO_MAILBOX = -1,
};
enum cx23885_field1_lines {
CX23885_FIELD1_SAA7114 = 0x00EF, /* 239 */
CX23885_FIELD1_SAA7115 = 0x00F0, /* 240 */
CX23885_FIELD1_MICRONAS = 0x0105, /* 261 */
};
enum cx23885_field2_lines {
CX23885_FIELD2_SAA7114 = 0x00EF, /* 239 */
CX23885_FIELD2_SAA7115 = 0x00F0, /* 240 */
CX23885_FIELD2_MICRONAS = 0x0106, /* 262 */
};
enum cx23885_custom_data_type {
CX23885_CUSTOM_EXTENSION_USR_DATA,
CX23885_CUSTOM_PRIVATE_PACKET,
};
enum cx23885_mute {
CX23885_UNMUTE,
CX23885_MUTE,
};
enum cx23885_mute_video_mask {
CX23885_MUTE_VIDEO_V_MASK = 0x0000FF00,
CX23885_MUTE_VIDEO_U_MASK = 0x00FF0000,
CX23885_MUTE_VIDEO_Y_MASK = 0xFF000000,
};
enum cx23885_mute_video_shift {
CX23885_MUTE_VIDEO_V_SHIFT = 8,
CX23885_MUTE_VIDEO_U_SHIFT = 16,
CX23885_MUTE_VIDEO_Y_SHIFT = 24,
};
/* defines below are from ivtv-driver.h */
#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
/* Firmware API commands */
#define IVTV_API_STD_TIMEOUT 500
/* Registers */
/* IVTV_REG_OFFSET */
#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8)
#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC)
#define IVTV_REG_SPU (0x9050)
#define IVTV_REG_HW_BLOCKS (0x9054)
#define IVTV_REG_VPU (0x9058)
#define IVTV_REG_APU (0xA064)
/**** Bit definitions for MC417_RWD and MC417_OEN registers ***
bits 31-16
+-----------+
| Reserved |
+-----------+
bit 15 bit 14 bit 13 bit 12 bit 11 bit 10 bit 9 bit 8
+-------+-------+-------+-------+-------+-------+-------+-------+
| MIWR# | MIRD# | MICS# |MIRDY# |MIADDR3|MIADDR2|MIADDR1|MIADDR0|
+-------+-------+-------+-------+-------+-------+-------+-------+
bit 7 bit 6 bit 5 bit 4 bit 3 bit 2 bit 1 bit 0
+-------+-------+-------+-------+-------+-------+-------+-------+
|MIDATA7|MIDATA6|MIDATA5|MIDATA4|MIDATA3|MIDATA2|MIDATA1|MIDATA0|
+-------+-------+-------+-------+-------+-------+-------+-------+
***/
#define MC417_MIWR 0x8000
#define MC417_MIRD 0x4000
#define MC417_MICS 0x2000
#define MC417_MIRDY 0x1000
#define MC417_MIADDR 0x0F00
#define MC417_MIDATA 0x00FF
/* MIADDR* nibble definitions */
#define MCI_MEMORY_DATA_BYTE0 0x000
#define MCI_MEMORY_DATA_BYTE1 0x100
#define MCI_MEMORY_DATA_BYTE2 0x200
#define MCI_MEMORY_DATA_BYTE3 0x300
#define MCI_MEMORY_ADDRESS_BYTE2 0x400
#define MCI_MEMORY_ADDRESS_BYTE1 0x500
#define MCI_MEMORY_ADDRESS_BYTE0 0x600
#define MCI_REGISTER_DATA_BYTE0 0x800
#define MCI_REGISTER_DATA_BYTE1 0x900
#define MCI_REGISTER_DATA_BYTE2 0xA00
#define MCI_REGISTER_DATA_BYTE3 0xB00
#define MCI_REGISTER_ADDRESS_BYTE0 0xC00
#define MCI_REGISTER_ADDRESS_BYTE1 0xD00
#define MCI_REGISTER_MODE 0xE00
/* Read and write modes */
#define MCI_MODE_REGISTER_READ 0
#define MCI_MODE_REGISTER_WRITE 1
#define MCI_MODE_MEMORY_READ 0
#define MCI_MODE_MEMORY_WRITE 0x40
/*** Bit definitions for MC417_CTL register ****
bits 31-6 bits 5-4 bit 3 bits 2-1 Bit 0
+--------+-------------+--------+--------------+------------+
|Reserved|MC417_SPD_CTL|Reserved|MC417_GPIO_SEL|UART_GPIO_EN|
+--------+-------------+--------+--------------+------------+
***/
#define MC417_SPD_CTL(x) (((x) << 4) & 0x00000030)
#define MC417_GPIO_SEL(x) (((x) << 1) & 0x00000006)
#define MC417_UART_GPIO_EN 0x00000001
/* Values for speed control */
#define MC417_SPD_CTL_SLOW 0x1
#define MC417_SPD_CTL_MEDIUM 0x0
#define MC417_SPD_CTL_FAST 0x3 /* b'1x, but we use b'11 */
/* Values for GPIO select */
#define MC417_GPIO_SEL_GPIO3 0x3
#define MC417_GPIO_SEL_GPIO2 0x2
#define MC417_GPIO_SEL_GPIO1 0x1
#define MC417_GPIO_SEL_GPIO0 0x0
void cx23885_mc417_init(struct cx23885_dev *dev)
{
u32 regval;
dprintk(2, "%s()\n", __func__);
/* Configure MC417_CTL register to defaults. */
regval = MC417_SPD_CTL(MC417_SPD_CTL_FAST) |
MC417_GPIO_SEL(MC417_GPIO_SEL_GPIO3) |
MC417_UART_GPIO_EN;
cx_write(MC417_CTL, regval);
/* Configure MC417_OEN to defaults. */
regval = MC417_MIRDY;
cx_write(MC417_OEN, regval);
/* Configure MC417_RWD to defaults. */
regval = MC417_MIWR | MC417_MIRD | MC417_MICS;
cx_write(MC417_RWD, regval);
}
static int mc417_wait_ready(struct cx23885_dev *dev)
{
u32 mi_ready;
unsigned long timeout = jiffies + msecs_to_jiffies(1);
for (;;) {
mi_ready = cx_read(MC417_RWD) & MC417_MIRDY;
if (mi_ready != 0)
return 0;
if (time_after(jiffies, timeout))
return -1;
udelay(1);
}
}
int mc417_register_write(struct cx23885_dev *dev, u16 address, u32 value)
{
u32 regval;
/* Enable MC417 GPIO outputs except for MC417_MIRDY,
* which is an input.
*/
cx_write(MC417_OEN, MC417_MIRDY);
/* Write data byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0 |
(value & 0x000000FF);
cx_write(MC417_RWD, regval);
/* Transition CS/WR to effect write transaction across bus. */
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1 |
((value >> 8) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2 |
((value >> 16) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 3 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3 |
((value >> 24) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE0 |
(address & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE1 |
((address >> 8) & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Indicate that this is a write. */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_MODE |
MCI_MODE_REGISTER_WRITE;
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Wait for the trans to complete (MC417_MIRDY asserted). */
return mc417_wait_ready(dev);
}
int mc417_register_read(struct cx23885_dev *dev, u16 address, u32 *value)
{
int retval;
u32 regval;
u32 tempval;
u32 dataval;
/* Enable MC417 GPIO outputs except for MC417_MIRDY,
* which is an input.
*/
cx_write(MC417_OEN, MC417_MIRDY);
/* Write address byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE0 |
((address & 0x00FF));
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_ADDRESS_BYTE1 |
((address >> 8) & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Indicate that this is a register read. */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_MODE |
MCI_MODE_REGISTER_READ;
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Wait for the trans to complete (MC417_MIRDY asserted). */
retval = mc417_wait_ready(dev);
/* switch the DAT0-7 GPIO[10:3] to input mode */
cx_write(MC417_OEN, MC417_MIRDY | MC417_MIDATA);
/* Read data byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0;
cx_write(MC417_RWD, regval);
/* Transition RD to effect read transaction across bus.
* Transtion 0x5000 -> 0x9000 correct (RD/RDY -> WR/RDY)?
* Should it be 0x9000 -> 0xF000 (also why is RDY being set, its
* input only...)
*/
regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE0;
cx_write(MC417_RWD, regval);
/* Collect byte */
tempval = cx_read(MC417_RWD);
dataval = tempval & 0x000000FF;
/* Bring CS and RD high. */
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE1;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= ((tempval & 0x000000FF) << 8);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE2;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= ((tempval & 0x000000FF) << 16);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 3 */
regval = MC417_MIRD | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_REGISTER_DATA_BYTE3;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= ((tempval & 0x000000FF) << 24);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
*value = dataval;
return retval;
}
int mc417_memory_write(struct cx23885_dev *dev, u32 address, u32 value)
{
u32 regval;
/* Enable MC417 GPIO outputs except for MC417_MIRDY,
* which is an input.
*/
cx_write(MC417_OEN, MC417_MIRDY);
/* Write data byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0 |
(value & 0x000000FF);
cx_write(MC417_RWD, regval);
/* Transition CS/WR to effect write transaction across bus. */
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1 |
((value >> 8) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2 |
((value >> 16) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write data byte 3 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3 |
((value >> 24) & 0x000000FF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE2 |
MCI_MODE_MEMORY_WRITE | ((address >> 16) & 0x3F);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE1 |
((address >> 8) & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE0 |
(address & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Wait for the trans to complete (MC417_MIRDY asserted). */
return mc417_wait_ready(dev);
}
int mc417_memory_read(struct cx23885_dev *dev, u32 address, u32 *value)
{
int retval;
u32 regval;
u32 tempval;
u32 dataval;
/* Enable MC417 GPIO outputs except for MC417_MIRDY,
* which is an input.
*/
cx_write(MC417_OEN, MC417_MIRDY);
/* Write address byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE2 |
MCI_MODE_MEMORY_READ | ((address >> 16) & 0x3F);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE1 |
((address >> 8) & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Write address byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_ADDRESS_BYTE0 |
(address & 0xFF);
cx_write(MC417_RWD, regval);
regval |= MC417_MICS | MC417_MIWR;
cx_write(MC417_RWD, regval);
/* Wait for the trans to complete (MC417_MIRDY asserted). */
retval = mc417_wait_ready(dev);
/* switch the DAT0-7 GPIO[10:3] to input mode */
cx_write(MC417_OEN, MC417_MIRDY | MC417_MIDATA);
/* Read data byte 3 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3;
cx_write(MC417_RWD, regval);
/* Transition RD to effect read transaction across bus. */
regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE3;
cx_write(MC417_RWD, regval);
/* Collect byte */
tempval = cx_read(MC417_RWD);
dataval = ((tempval & 0x000000FF) << 24);
/* Bring CS and RD high. */
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 2 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE2;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= ((tempval & 0x000000FF) << 16);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 1 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE1;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= ((tempval & 0x000000FF) << 8);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
/* Read data byte 0 */
regval = MC417_MIRD | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0;
cx_write(MC417_RWD, regval);
regval = MC417_MIWR | MC417_MIRDY | MCI_MEMORY_DATA_BYTE0;
cx_write(MC417_RWD, regval);
tempval = cx_read(MC417_RWD);
dataval |= (tempval & 0x000000FF);
regval = MC417_MIWR | MC417_MIRD | MC417_MICS | MC417_MIRDY;
cx_write(MC417_RWD, regval);
*value = dataval;
return retval;
}
void mc417_gpio_set(struct cx23885_dev *dev, u32 mask)
{
u32 val;
/* Set the gpio value */
mc417_register_read(dev, 0x900C, &val);
val |= (mask & 0x000ffff);
mc417_register_write(dev, 0x900C, val);
}
void mc417_gpio_clear(struct cx23885_dev *dev, u32 mask)
{
u32 val;
/* Clear the gpio value */
mc417_register_read(dev, 0x900C, &val);
val &= ~(mask & 0x0000ffff);
mc417_register_write(dev, 0x900C, val);
}
void mc417_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
{
u32 val;
/* Enable GPIO direction bits */
mc417_register_read(dev, 0x9020, &val);
if (asoutput)
val |= (mask & 0x0000ffff);
else
val &= ~(mask & 0x0000ffff);
mc417_register_write(dev, 0x9020, val);
}
/* ------------------------------------------------------------------ */
/* MPEG encoder API */
static char *cmd_to_str(int cmd)
{
switch (cmd) {
case CX2341X_ENC_PING_FW:
return "PING_FW";
case CX2341X_ENC_START_CAPTURE:
return "START_CAPTURE";
case CX2341X_ENC_STOP_CAPTURE:
return "STOP_CAPTURE";
case CX2341X_ENC_SET_AUDIO_ID:
return "SET_AUDIO_ID";
case CX2341X_ENC_SET_VIDEO_ID:
return "SET_VIDEO_ID";
case CX2341X_ENC_SET_PCR_ID:
return "SET_PCR_ID";
case CX2341X_ENC_SET_FRAME_RATE:
return "SET_FRAME_RATE";
case CX2341X_ENC_SET_FRAME_SIZE:
return "SET_FRAME_SIZE";
case CX2341X_ENC_SET_BIT_RATE:
return "SET_BIT_RATE";
case CX2341X_ENC_SET_GOP_PROPERTIES:
return "SET_GOP_PROPERTIES";
case CX2341X_ENC_SET_ASPECT_RATIO:
return "SET_ASPECT_RATIO";
case CX2341X_ENC_SET_DNR_FILTER_MODE:
return "SET_DNR_FILTER_MODE";
case CX2341X_ENC_SET_DNR_FILTER_PROPS:
return "SET_DNR_FILTER_PROPS";
case CX2341X_ENC_SET_CORING_LEVELS:
return "SET_CORING_LEVELS";
case CX2341X_ENC_SET_SPATIAL_FILTER_TYPE:
return "SET_SPATIAL_FILTER_TYPE";
case CX2341X_ENC_SET_VBI_LINE:
return "SET_VBI_LINE";
case CX2341X_ENC_SET_STREAM_TYPE:
return "SET_STREAM_TYPE";
case CX2341X_ENC_SET_OUTPUT_PORT:
return "SET_OUTPUT_PORT";
case CX2341X_ENC_SET_AUDIO_PROPERTIES:
return "SET_AUDIO_PROPERTIES";
case CX2341X_ENC_HALT_FW:
return "HALT_FW";
case CX2341X_ENC_GET_VERSION:
return "GET_VERSION";
case CX2341X_ENC_SET_GOP_CLOSURE:
return "SET_GOP_CLOSURE";
case CX2341X_ENC_GET_SEQ_END:
return "GET_SEQ_END";
case CX2341X_ENC_SET_PGM_INDEX_INFO:
return "SET_PGM_INDEX_INFO";
case CX2341X_ENC_SET_VBI_CONFIG:
return "SET_VBI_CONFIG";
case CX2341X_ENC_SET_DMA_BLOCK_SIZE:
return "SET_DMA_BLOCK_SIZE";
case CX2341X_ENC_GET_PREV_DMA_INFO_MB_10:
return "GET_PREV_DMA_INFO_MB_10";
case CX2341X_ENC_GET_PREV_DMA_INFO_MB_9:
return "GET_PREV_DMA_INFO_MB_9";
case CX2341X_ENC_SCHED_DMA_TO_HOST:
return "SCHED_DMA_TO_HOST";
case CX2341X_ENC_INITIALIZE_INPUT:
return "INITIALIZE_INPUT";
case CX2341X_ENC_SET_FRAME_DROP_RATE:
return "SET_FRAME_DROP_RATE";
case CX2341X_ENC_PAUSE_ENCODER:
return "PAUSE_ENCODER";
case CX2341X_ENC_REFRESH_INPUT:
return "REFRESH_INPUT";
case CX2341X_ENC_SET_COPYRIGHT:
return "SET_COPYRIGHT";
case CX2341X_ENC_SET_EVENT_NOTIFICATION:
return "SET_EVENT_NOTIFICATION";
case CX2341X_ENC_SET_NUM_VSYNC_LINES:
return "SET_NUM_VSYNC_LINES";
case CX2341X_ENC_SET_PLACEHOLDER:
return "SET_PLACEHOLDER";
case CX2341X_ENC_MUTE_VIDEO:
return "MUTE_VIDEO";
case CX2341X_ENC_MUTE_AUDIO:
return "MUTE_AUDIO";
case CX2341X_ENC_MISC:
return "MISC";
default:
return "UNKNOWN";
}
}
static int cx23885_mbox_func(void *priv,
u32 command,
int in,
int out,
u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx23885_dev *dev = priv;
unsigned long timeout;
u32 value, flag, retval = 0;
int i;
dprintk(3, "%s: command(0x%X) = %s\n", __func__, command,
cmd_to_str(command));
/* this may not be 100% safe if we can't read any memory location
without side effects */
mc417_memory_read(dev, dev->cx23417_mailbox - 4, &value);
if (value != 0x12345678) {
printk(KERN_ERR
"Firmware and/or mailbox pointer not initialized "
"or corrupted, signature = 0x%x, cmd = %s\n", value,
cmd_to_str(command));
return -1;
}
/* This read looks at 32 bits, but flag is only 8 bits.
* Seems we also bail if CMD or TIMEOUT bytes are set???
*/
mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
if (flag) {
printk(KERN_ERR "ERROR: Mailbox appears to be in use "
"(%x), cmd = %s\n", flag, cmd_to_str(command));
return -1;
}
flag |= 1; /* tell 'em we're working on it */
mc417_memory_write(dev, dev->cx23417_mailbox, flag);
/* write command + args + fill remaining with zeros */
/* command code */
mc417_memory_write(dev, dev->cx23417_mailbox + 1, command);
mc417_memory_write(dev, dev->cx23417_mailbox + 3,
IVTV_API_STD_TIMEOUT); /* timeout */
for (i = 0; i < in; i++) {
mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, data[i]);
dprintk(3, "API Input %d = %d\n", i, data[i]);
}
for (; i < CX2341X_MBOX_MAX_DATA; i++)
mc417_memory_write(dev, dev->cx23417_mailbox + 4 + i, 0);
flag |= 3; /* tell 'em we're done writing */
mc417_memory_write(dev, dev->cx23417_mailbox, flag);
/* wait for firmware to handle the API command */
timeout = jiffies + msecs_to_jiffies(10);
for (;;) {
mc417_memory_read(dev, dev->cx23417_mailbox, &flag);
if (0 != (flag & 4))
break;
if (time_after(jiffies, timeout)) {
printk(KERN_ERR "ERROR: API Mailbox timeout\n");
return -1;
}
udelay(10);
}
/* read output values */
for (i = 0; i < out; i++) {
mc417_memory_read(dev, dev->cx23417_mailbox + 4 + i, data + i);
dprintk(3, "API Output %d = %d\n", i, data[i]);
}
mc417_memory_read(dev, dev->cx23417_mailbox + 2, &retval);
dprintk(3, "API result = %d\n", retval);
flag = 0;
mc417_memory_write(dev, dev->cx23417_mailbox, flag);
return retval;
}
/* We don't need to call the API often, so using just one
* mailbox will probably suffice
*/
static int cx23885_api_cmd(struct cx23885_dev *dev,
u32 command,
u32 inputcnt,
u32 outputcnt,
...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list vargs;
int i, err;
dprintk(3, "%s() cmds = 0x%08x\n", __func__, command);
va_start(vargs, outputcnt);
for (i = 0; i < inputcnt; i++)
data[i] = va_arg(vargs, int);
err = cx23885_mbox_func(dev, command, inputcnt, outputcnt, data);
for (i = 0; i < outputcnt; i++) {
int *vptr = va_arg(vargs, int *);
*vptr = data[i];
}
va_end(vargs);
return err;
}
static int cx23885_find_mailbox(struct cx23885_dev *dev)
{
u32 signature[4] = {
0x12345678, 0x34567812, 0x56781234, 0x78123456
};
int signaturecnt = 0;
u32 value;
int i;
dprintk(2, "%s()\n", __func__);
for (i = 0; i < CX23885_FIRM_IMAGE_SIZE; i++) {
mc417_memory_read(dev, i, &value);
if (value == signature[signaturecnt])
signaturecnt++;
else
signaturecnt = 0;
if (4 == signaturecnt) {
dprintk(1, "Mailbox signature found at 0x%x\n", i+1);
return i+1;
}
}
printk(KERN_ERR "Mailbox signature values not found!\n");
return -1;
}
static int cx23885_load_firmware(struct cx23885_dev *dev)
{
static const unsigned char magic[8] = {
0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa
};
const struct firmware *firmware;
int i, retval = 0;
u32 value = 0;
u32 gpio_output = 0;
u32 checksum = 0;
u32 *dataptr;
dprintk(2, "%s()\n", __func__);
/* Save GPIO settings before reset of APU */
retval |= mc417_memory_read(dev, 0x9020, &gpio_output);
retval |= mc417_memory_read(dev, 0x900C, &value);
retval = mc417_register_write(dev,
IVTV_REG_VPU, 0xFFFFFFED);
retval |= mc417_register_write(dev,
IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
retval |= mc417_register_write(dev,
IVTV_REG_ENC_SDRAM_REFRESH, 0x80000800);
retval |= mc417_register_write(dev,
IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
retval |= mc417_register_write(dev,
IVTV_REG_APU, 0);
if (retval != 0) {
printk(KERN_ERR "%s: Error with mc417_register_write\n",
__func__);
return -1;
}
retval = request_firmware(&firmware, CX23885_FIRM_IMAGE_NAME,
&dev->pci->dev);
if (retval != 0) {
printk(KERN_ERR
"ERROR: Hotplug firmware request failed (%s).\n",
CX23885_FIRM_IMAGE_NAME);
printk(KERN_ERR "Please fix your hotplug setup, the board will "
"not work without firmware loaded!\n");
return -1;
}
if (firmware->size != CX23885_FIRM_IMAGE_SIZE) {
printk(KERN_ERR "ERROR: Firmware size mismatch "
"(have %zd, expected %d)\n",
firmware->size, CX23885_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
printk(KERN_ERR
"ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
/* transfer to the chip */
dprintk(2, "Loading firmware ...\n");
dataptr = (u32 *)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = *dataptr;
checksum += ~value;
if (mc417_memory_write(dev, i, value) != 0) {
printk(KERN_ERR "ERROR: Loading firmware failed!\n");
release_firmware(firmware);
return -1;
}
dataptr++;
}
/* read back to verify with the checksum */
dprintk(1, "Verifying firmware ...\n");
for (i--; i >= 0; i--) {
if (mc417_memory_read(dev, i, &value) != 0) {
printk(KERN_ERR "ERROR: Reading firmware failed!\n");
release_firmware(firmware);
return -1;
}
checksum -= ~value;
}
if (checksum) {
printk(KERN_ERR
"ERROR: Firmware load failed (checksum mismatch).\n");
release_firmware(firmware);
return -1;
}
release_firmware(firmware);
dprintk(1, "Firmware upload successful.\n");
retval |= mc417_register_write(dev, IVTV_REG_HW_BLOCKS,
IVTV_CMD_HW_BLOCKS_RST);
/* F/W power up disturbs the GPIOs, restore state */
retval |= mc417_register_write(dev, 0x9020, gpio_output);
retval |= mc417_register_write(dev, 0x900C, value);
retval |= mc417_register_read(dev, IVTV_REG_VPU, &value);
retval |= mc417_register_write(dev, IVTV_REG_VPU, value & 0xFFFFFFE8);
if (retval < 0)
printk(KERN_ERR "%s: Error with mc417_register_write\n",
__func__);
return 0;
}
void cx23885_417_check_encoder(struct cx23885_dev *dev)
{
u32 status, seq;
status = seq = 0;
cx23885_api_cmd(dev, CX2341X_ENC_GET_SEQ_END, 0, 2, &status, &seq);
dprintk(1, "%s() status = %d, seq = %d\n", __func__, status, seq);
}
static void cx23885_codec_settings(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
/* assign frame size */
cx23885_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->ts1.height, dev->ts1.width);
dev->mpeg_params.width = dev->ts1.width;
dev->mpeg_params.height = dev->ts1.height;
dev->mpeg_params.is_50hz =
(dev->encodernorm.id & V4L2_STD_625_50) != 0;
cx2341x_update(dev, cx23885_mbox_func, NULL, &dev->mpeg_params);
cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 3, 1);
cx23885_api_cmd(dev, CX2341X_ENC_MISC, 2, 0, 4, 1);
}
static int cx23885_initialize_codec(struct cx23885_dev *dev)
{
int version;
int retval;
u32 i, data[7];
dprintk(1, "%s()\n", __func__);
retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dprintk(2, "%s() PING OK\n", __func__);
retval = cx23885_load_firmware(dev);
if (retval < 0) {
printk(KERN_ERR "%s() f/w load failed\n", __func__);
return retval;
}
retval = cx23885_find_mailbox(dev);
if (retval < 0) {
printk(KERN_ERR "%s() mailbox < 0, error\n",
__func__);
return -1;
}
dev->cx23417_mailbox = retval;
retval = cx23885_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0);
if (retval < 0) {
printk(KERN_ERR
"ERROR: cx23417 firmware ping failed!\n");
return -1;
}
retval = cx23885_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1,
&version);
if (retval < 0) {
printk(KERN_ERR "ERROR: cx23417 firmware get encoder :"
"version failed!\n");
return -1;
}
dprintk(1, "cx23417 firmware version is 0x%08x\n", version);
msleep(200);
}
cx23885_codec_settings(dev);
msleep(60);
cx23885_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
CX23885_FIELD1_SAA7115, CX23885_FIELD2_SAA7115);
cx23885_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
CX23885_CUSTOM_EXTENSION_USR_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0);
/* Setup to capture VBI */
data[0] = 0x0001BD00;
data[1] = 1; /* frames per interrupt */
data[2] = 4; /* total bufs */
data[3] = 0x91559155; /* start codes */
data[4] = 0x206080C0; /* stop codes */
data[5] = 6; /* lines */
data[6] = 64; /* BPL */
cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_CONFIG, 7, 0, data[0], data[1],
data[2], data[3], data[4], data[5], data[6]);
for (i = 2; i <= 24; i++) {
int valid;
valid = ((i >= 19) && (i <= 21));
cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0, i,
valid, 0 , 0, 0);
cx23885_api_cmd(dev, CX2341X_ENC_SET_VBI_LINE, 5, 0,
i | 0x80000000, valid, 0, 0, 0);
}
cx23885_api_cmd(dev, CX2341X_ENC_MUTE_AUDIO, 1, 0, CX23885_UNMUTE);
msleep(60);
/* initialize the video input */
cx23885_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
msleep(60);
/* Enable VIP style pixel invalidation so we work with scaled mode */
mc417_memory_write(dev, 2120, 0x00000080);
/* start capturing to the host interface */
cx23885_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
CX23885_MPEG_CAPTURE, CX23885_RAW_BITS_NONE);
msleep(10);
return 0;
}
/* ------------------------------------------------------------------ */
static int bb_buf_setup(struct videobuf_queue *q,
unsigned int *count, unsigned int *size)
{
struct cx23885_fh *fh = q->priv_data;
fh->dev->ts1.ts_packet_size = mpeglinesize;
fh->dev->ts1.ts_packet_count = mpeglines;
*size = fh->dev->ts1.ts_packet_size * fh->dev->ts1.ts_packet_count;
*count = mpegbufs;
return 0;
}
static int bb_buf_prepare(struct videobuf_queue *q,
struct videobuf_buffer *vb, enum v4l2_field field)
{
struct cx23885_fh *fh = q->priv_data;
return cx23885_buf_prepare(q, &fh->dev->ts1,
(struct cx23885_buffer *)vb,
field);
}
static void bb_buf_queue(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
struct cx23885_fh *fh = q->priv_data;
cx23885_buf_queue(&fh->dev->ts1, (struct cx23885_buffer *)vb);
}
static void bb_buf_release(struct videobuf_queue *q,
struct videobuf_buffer *vb)
{
cx23885_free_buffer(q, (struct cx23885_buffer *)vb);
}
static struct videobuf_queue_ops cx23885_qops = {
.buf_setup = bb_buf_setup,
.buf_prepare = bb_buf_prepare,
.buf_queue = bb_buf_queue,
.buf_release = bb_buf_release,
};
/* ------------------------------------------------------------------ */
static const u32 *ctrl_classes[] = {
cx2341x_mpeg_ctrls,
NULL
};
static int cx23885_queryctrl(struct cx23885_dev *dev,
struct v4l2_queryctrl *qctrl)
{
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (qctrl->id == 0)
return -EINVAL;
/* MPEG V4L2 controls */
if (cx2341x_ctrl_query(&dev->mpeg_params, qctrl))
qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
return 0;
}
static int cx23885_querymenu(struct cx23885_dev *dev,
struct v4l2_querymenu *qmenu)
{
struct v4l2_queryctrl qctrl;
qctrl.id = qmenu->id;
cx23885_queryctrl(dev, &qctrl);
return v4l2_ctrl_query_menu(qmenu, &qctrl,
cx2341x_ctrl_get_menu(&dev->mpeg_params, qmenu->id));
}
static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(cx23885_tvnorms); i++)
if (*id & cx23885_tvnorms[i].id)
break;
if (i == ARRAY_SIZE(cx23885_tvnorms))
return -EINVAL;
dev->encodernorm = cx23885_tvnorms[i];
return 0;
}
static int vidioc_enum_input(struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
struct cx23885_input *input;
int n;
if (i->index >= 4)
return -EINVAL;
input = &cx23885_boards[dev->board].input[i->index];
if (input->type == 0)
return -EINVAL;
/* FIXME
* strcpy(i->name, input->name); */
strcpy(i->name, "unset");
if (input->type == CX23885_VMUX_TELEVISION ||
input->type == CX23885_VMUX_CABLE)
i->type = V4L2_INPUT_TYPE_TUNER;
else
i->type = V4L2_INPUT_TYPE_CAMERA;
for (n = 0; n < ARRAY_SIZE(cx23885_tvnorms); n++)
i->std |= cx23885_tvnorms[n].id;
return 0;
}
static int vidioc_g_input(struct file *file, void *priv, unsigned int *i)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
*i = dev->input;
return 0;
}
static int vidioc_s_input(struct file *file, void *priv, unsigned int i)
{
if (i >= 4)
return -EINVAL;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
if (UNSET == dev->tuner_type)
return -EINVAL;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
call_all(dev, tuner, g_tuner, t);
dprintk(1, "VIDIOC_G_TUNER: tuner type %d\n", t->type);
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
if (UNSET == dev->tuner_type)
return -EINVAL;
/* Update the A/V core */
call_all(dev, tuner, s_tuner, t);
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
if (UNSET == dev->tuner_type)
return -EINVAL;
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = dev->freq;
call_all(dev, tuner, g_frequency, f);
return 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
cx23885_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
CX23885_END_NOW, CX23885_MPEG_CAPTURE,
CX23885_RAW_BITS_NONE);
dprintk(1, "VIDIOC_S_FREQUENCY: dev type %d, f\n",
dev->tuner_type);
dprintk(1, "VIDIOC_S_FREQUENCY: f tuner %d, f type %d\n",
f->tuner, f->type);
if (UNSET == dev->tuner_type)
return -EINVAL;
if (f->tuner != 0)
return -EINVAL;
if (f->type != V4L2_TUNER_ANALOG_TV)
return -EINVAL;
dev->freq = f->frequency;
call_all(dev, tuner, s_frequency, f);
cx23885_initialize_codec(dev);
return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
/* Update the A/V core */
call_all(dev, core, s_ctrl, ctl);
return 0;
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
struct cx23885_tsport *tsport = &dev->ts1;
strlcpy(cap->driver, dev->name, sizeof(cap->driver));
strlcpy(cap->card, cx23885_boards[tsport->dev->board].name,
sizeof(cap->card));
sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci));
cap->version = CX23885_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING |
0;
if (UNSET != dev->tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
strlcpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
f->fmt.pix.colorspace = 0;
f->fmt.pix.width = dev->ts1.width;
f->fmt.pix.height = dev->ts1.height;
f->fmt.pix.field = fh->mpegq.field;
dprintk(1, "VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
dev->ts1.width, dev->ts1.height, fh->mpegq.field);
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
f->fmt.pix.colorspace = 0;
dprintk(1, "VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
dev->ts1.width, dev->ts1.height, fh->mpegq.field);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage =
dev->ts1.ts_packet_size * dev->ts1.ts_packet_count;
f->fmt.pix.colorspace = 0;
dprintk(1, "VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
return 0;
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *p)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_reqbufs(&fh->mpegq, p);
}
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_querybuf(&fh->mpegq, p);
}
static int vidioc_qbuf(struct file *file, void *priv,
struct v4l2_buffer *p)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_qbuf(&fh->mpegq, p);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b)
{
struct cx23885_fh *fh = priv;
return videobuf_dqbuf(&fh->mpegq, b, file->f_flags & O_NONBLOCK);
}
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type i)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_streamon(&fh->mpegq);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx23885_fh *fh = file->private_data;
return videobuf_streamoff(&fh->mpegq);
}
static int vidioc_g_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
return cx2341x_ext_ctrls(&dev->mpeg_params, 0, f, VIDIOC_G_EXT_CTRLS);
}
static int vidioc_s_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
p = dev->mpeg_params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS);
if (err == 0) {
err = cx2341x_update(dev, cx23885_mbox_func,
&dev->mpeg_params, &p);
dev->mpeg_params = p;
}
return err;
}
static int vidioc_try_ext_ctrls(struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
p = dev->mpeg_params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
return err;
}
static int vidioc_log_status(struct file *file, void *priv)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", dev->name);
printk(KERN_INFO
"%s/2: ============ START LOG STATUS ============\n",
dev->name);
call_all(dev, core, log_status);
cx2341x_log_status(&dev->mpeg_params, name);
printk(KERN_INFO
"%s/2: ============= END LOG STATUS =============\n",
dev->name);
return 0;
}
static int vidioc_querymenu(struct file *file, void *priv,
struct v4l2_querymenu *a)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
return cx23885_querymenu(dev, a);
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *c)
{
struct cx23885_fh *fh = priv;
struct cx23885_dev *dev = fh->dev;
return cx23885_queryctrl(dev, c);
}
static int mpeg_open(struct file *file)
{
struct cx23885_dev *dev = video_drvdata(file);
struct cx23885_fh *fh;
dprintk(2, "%s()\n", __func__);
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (NULL == fh) {
unlock_kernel();
return -ENOMEM;
}
lock_kernel();
file->private_data = fh;
fh->dev = dev;
videobuf_queue_sg_init(&fh->mpegq, &cx23885_qops,
&dev->pci->dev, &dev->ts1.slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx23885_buffer),
fh);
unlock_kernel();
return 0;
}
static int mpeg_release(struct file *file)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
dprintk(2, "%s()\n", __func__);
/* FIXME: Review this crap */
/* Shut device down on last close */
if (atomic_cmpxchg(&fh->v4l_reading, 1, 0) == 1) {
if (atomic_dec_return(&dev->v4l_reader_count) == 0) {
/* stop mpeg capture */
cx23885_api_cmd(fh->dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
CX23885_END_NOW, CX23885_MPEG_CAPTURE,
CX23885_RAW_BITS_NONE);
msleep(500);
cx23885_417_check_encoder(dev);
cx23885_cancel_buffers(&fh->dev->ts1);
}
}
if (fh->mpegq.streaming)
videobuf_streamoff(&fh->mpegq);
if (fh->mpegq.reading)
videobuf_read_stop(&fh->mpegq);
videobuf_mmap_free(&fh->mpegq);
file->private_data = NULL;
kfree(fh);
return 0;
}
static ssize_t mpeg_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
dprintk(2, "%s()\n", __func__);
/* Deal w/ A/V decoder * and mpeg encoder sync issues. */
/* Start mpeg encoder on first read. */
if (atomic_cmpxchg(&fh->v4l_reading, 0, 1) == 0) {
if (atomic_inc_return(&dev->v4l_reader_count) == 1) {
if (cx23885_initialize_codec(dev) < 0)
return -EINVAL;
}
}
return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int mpeg_poll(struct file *file,
struct poll_table_struct *wait)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
dprintk(2, "%s\n", __func__);
return videobuf_poll_stream(file, &fh->mpegq, wait);
}
static int mpeg_mmap(struct file *file, struct vm_area_struct *vma)
{
struct cx23885_fh *fh = file->private_data;
struct cx23885_dev *dev = fh->dev;
dprintk(2, "%s()\n", __func__);
return videobuf_mmap_mapper(&fh->mpegq, vma);
}
static struct v4l2_file_operations mpeg_fops = {
.owner = THIS_MODULE,
.open = mpeg_open,
.release = mpeg_release,
.read = mpeg_read,
.poll = mpeg_poll,
.mmap = mpeg_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_s_std = vidioc_s_std,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
.vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
.vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
.vidioc_log_status = vidioc_log_status,
.vidioc_querymenu = vidioc_querymenu,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_chip_ident = cx23885_g_chip_ident,
#ifdef CONFIG_VIDEO_ADV_DEBUG
.vidioc_g_register = cx23885_g_register,
.vidioc_s_register = cx23885_s_register,
#endif
};
static struct video_device cx23885_mpeg_template = {
.name = "cx23885",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX23885_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
void cx23885_417_unregister(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
if (dev->v4l_device) {
if (video_is_registered(dev->v4l_device))
video_unregister_device(dev->v4l_device);
else
video_device_release(dev->v4l_device);
dev->v4l_device = NULL;
}
}
static struct video_device *cx23885_video_dev_alloc(
struct cx23885_tsport *tsport,
struct pci_dev *pci,
struct video_device *template,
char *type)
{
struct video_device *vfd;
struct cx23885_dev *dev = tsport->dev;
dprintk(1, "%s()\n", __func__);
vfd = video_device_alloc();
if (NULL == vfd)
return NULL;
*vfd = *template;
snprintf(vfd->name, sizeof(vfd->name), "%s %s (%s)", dev->name,
type, cx23885_boards[tsport->dev->board].name);
vfd->parent = &pci->dev;
vfd->release = video_device_release;
return vfd;
}
int cx23885_417_register(struct cx23885_dev *dev)
{
/* FIXME: Port1 hardcoded here */
int err = -ENODEV;
struct cx23885_tsport *tsport = &dev->ts1;
dprintk(1, "%s()\n", __func__);
if (cx23885_boards[dev->board].portb != CX23885_MPEG_ENCODER)
return err;
/* Set default TV standard */
dev->encodernorm = cx23885_tvnorms[0];
if (dev->encodernorm.id & V4L2_STD_525_60)
tsport->height = 480;
else
tsport->height = 576;
tsport->width = 720;
cx2341x_fill_defaults(&dev->mpeg_params);
dev->mpeg_params.port = CX2341X_PORT_SERIAL;
/* Allocate and initialize V4L video device */
dev->v4l_device = cx23885_video_dev_alloc(tsport,
dev->pci, &cx23885_mpeg_template, "mpeg");
video_set_drvdata(dev->v4l_device, dev);
err = video_register_device(dev->v4l_device,
VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s: can't register mpeg device\n", dev->name);
return err;
}
printk(KERN_INFO "%s: registered device %s [mpeg]\n",
dev->name, video_device_node_name(dev->v4l_device));
return 0;
}
| gpl-2.0 |
mattyen/ntb | drivers/input/misc/gpio-beeper.c | 1112 | 2866 | /*
* Generic GPIO beeper driver
*
* Copyright (C) 2013-2014 Alexander Shiyan <shc_work@mail.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/input.h>
#include <linux/module.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#define BEEPER_MODNAME "gpio-beeper"
struct gpio_beeper {
struct work_struct work;
struct gpio_desc *desc;
bool beeping;
};
static void gpio_beeper_toggle(struct gpio_beeper *beep, bool on)
{
gpiod_set_value_cansleep(beep->desc, on);
}
static void gpio_beeper_work(struct work_struct *work)
{
struct gpio_beeper *beep = container_of(work, struct gpio_beeper, work);
gpio_beeper_toggle(beep, beep->beeping);
}
static int gpio_beeper_event(struct input_dev *dev, unsigned int type,
unsigned int code, int value)
{
struct gpio_beeper *beep = input_get_drvdata(dev);
if (type != EV_SND || code != SND_BELL)
return -ENOTSUPP;
if (value < 0)
return -EINVAL;
beep->beeping = value;
/* Schedule work to actually turn the beeper on or off */
schedule_work(&beep->work);
return 0;
}
static void gpio_beeper_close(struct input_dev *input)
{
struct gpio_beeper *beep = input_get_drvdata(input);
cancel_work_sync(&beep->work);
gpio_beeper_toggle(beep, false);
}
static int gpio_beeper_probe(struct platform_device *pdev)
{
struct gpio_beeper *beep;
struct input_dev *input;
beep = devm_kzalloc(&pdev->dev, sizeof(*beep), GFP_KERNEL);
if (!beep)
return -ENOMEM;
beep->desc = devm_gpiod_get(&pdev->dev, NULL, GPIOD_OUT_LOW);
if (IS_ERR(beep->desc))
return PTR_ERR(beep->desc);
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
INIT_WORK(&beep->work, gpio_beeper_work);
input->name = pdev->name;
input->id.bustype = BUS_HOST;
input->id.vendor = 0x0001;
input->id.product = 0x0001;
input->id.version = 0x0100;
input->close = gpio_beeper_close;
input->event = gpio_beeper_event;
input_set_capability(input, EV_SND, SND_BELL);
input_set_drvdata(input, beep);
return input_register_device(input);
}
#ifdef CONFIG_OF
static const struct of_device_id gpio_beeper_of_match[] = {
{ .compatible = BEEPER_MODNAME, },
{ }
};
MODULE_DEVICE_TABLE(of, gpio_beeper_of_match);
#endif
static struct platform_driver gpio_beeper_platform_driver = {
.driver = {
.name = BEEPER_MODNAME,
.of_match_table = of_match_ptr(gpio_beeper_of_match),
},
.probe = gpio_beeper_probe,
};
module_platform_driver(gpio_beeper_platform_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
MODULE_DESCRIPTION("Generic GPIO beeper driver");
| gpl-2.0 |
metredigm/linux | drivers/tty/hvc/hvc_rtas.c | 2136 | 3401 | /*
* IBM RTAS driver interface to hvc_console.c
*
* (C) Copyright IBM Corporation 2001-2005
* (C) Copyright Red Hat, Inc. 2005
*
* Author(s): Maximino Augilar <IBM STI Design Center>
* : Ryan S. Arnold <rsa@us.ibm.com>
* : Utz Bacher <utz.bacher@de.ibm.com>
* : David Woodhouse <dwmw2@infradead.org>
*
* inspired by drivers/char/hvc_console.c
* written by Anton Blanchard and Paul Mackerras
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <asm/irq.h>
#include <asm/rtas.h>
#include "hvc_console.h"
#define hvc_rtas_cookie 0x67781e15
struct hvc_struct *hvc_rtas_dev;
static int rtascons_put_char_token = RTAS_UNKNOWN_SERVICE;
static int rtascons_get_char_token = RTAS_UNKNOWN_SERVICE;
static inline int hvc_rtas_write_console(uint32_t vtermno, const char *buf,
int count)
{
int i;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_put_char_token, 1, 1, NULL, buf[i]))
break;
}
return i;
}
static int hvc_rtas_read_console(uint32_t vtermno, char *buf, int count)
{
int i, c;
for (i = 0; i < count; i++) {
if (rtas_call(rtascons_get_char_token, 0, 2, &c))
break;
buf[i] = c;
}
return i;
}
static const struct hv_ops hvc_rtas_get_put_ops = {
.get_chars = hvc_rtas_read_console,
.put_chars = hvc_rtas_write_console,
};
static int __init hvc_rtas_init(void)
{
struct hvc_struct *hp;
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
BUG_ON(hvc_rtas_dev);
/* Allocate an hvc_struct for the console device we instantiated
* earlier. Save off hp so that we can return it on exit */
hp = hvc_alloc(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops, 16);
if (IS_ERR(hp))
return PTR_ERR(hp);
hvc_rtas_dev = hp;
return 0;
}
device_initcall(hvc_rtas_init);
/* This will happen prior to module init. There is no tty at this time? */
static int __init hvc_rtas_console_init(void)
{
rtascons_put_char_token = rtas_token("put-term-char");
if (rtascons_put_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
rtascons_get_char_token = rtas_token("get-term-char");
if (rtascons_get_char_token == RTAS_UNKNOWN_SERVICE)
return -EIO;
hvc_instantiate(hvc_rtas_cookie, 0, &hvc_rtas_get_put_ops);
add_preferred_console("hvc", 0, NULL);
return 0;
}
console_initcall(hvc_rtas_console_init);
| gpl-2.0 |
abyssxsy/linux-tk1 | drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 2392 | 12420 | /*******************************************************************************
Intel 10 Gigabit PCI Express Linux driver
Copyright(c) 1999 - 2013 Intel Corporation.
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Contact Information:
e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/pci.h>
#include <linux/delay.h>
#include "ixgbe_type.h"
#include "ixgbe_common.h"
#include "ixgbe_mbx.h"
/**
* ixgbe_read_mbx - Reads a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to read
*
* returns SUCCESS if it successfully read message from buffer
**/
s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
/* limit read to size of mailbox */
if (size > mbx->size)
size = mbx->size;
if (mbx->ops.read)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_write_mbx - Write a message to the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer
**/
s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = 0;
if (size > mbx->size)
ret_val = IXGBE_ERR_MBX;
else if (mbx->ops.write)
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_msg - checks to see if someone sent us mail
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_msg)
ret_val = mbx->ops.check_for_msg(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_ack - checks to see if someone sent us ACK
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_ack)
ret_val = mbx->ops.check_for_ack(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_check_for_rst - checks to see if other side has reset
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to check
*
* returns SUCCESS if the Status bit was found or else ERR_MBX
**/
s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (mbx->ops.check_for_rst)
ret_val = mbx->ops.check_for_rst(hw, mbx_id);
return ret_val;
}
/**
* ixgbe_poll_for_msg - Wait for message notification
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification
**/
static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_msg)
goto out;
while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbe_poll_for_ack - Wait for message acknowledgement
* @hw: pointer to the HW structure
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message acknowledgement
**/
static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
int countdown = mbx->timeout;
if (!countdown || !mbx->ops.check_for_ack)
goto out;
while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
countdown--;
if (!countdown)
break;
udelay(mbx->usec_delay);
}
out:
return countdown ? 0 : IXGBE_ERR_MBX;
}
/**
* ixgbe_read_posted_mbx - Wait for message notification and receive message
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully received a message notification and
* copied it into the receive buffer.
**/
static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
if (!mbx->ops.read)
goto out;
ret_val = ixgbe_poll_for_msg(hw, mbx_id);
/* if ack received read message, otherwise we timed out */
if (!ret_val)
ret_val = mbx->ops.read(hw, msg, size, mbx_id);
out:
return ret_val;
}
/**
* ixgbe_write_posted_mbx - Write a message to the mailbox, wait for ack
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @mbx_id: id of mailbox to write
*
* returns SUCCESS if it successfully copied message into the buffer and
* received an ack to that message within delay * timeout period
**/
static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 mbx_id)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
s32 ret_val = IXGBE_ERR_MBX;
/* exit if either we can't write or there isn't a defined timeout */
if (!mbx->ops.write || !mbx->timeout)
goto out;
/* send msg */
ret_val = mbx->ops.write(hw, msg, size, mbx_id);
/* if msg sent wait until we receive an ack */
if (!ret_val)
ret_val = ixgbe_poll_for_ack(hw, mbx_id);
out:
return ret_val;
}
static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
{
u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
s32 ret_val = IXGBE_ERR_MBX;
if (mbvficr & mask) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
}
return ret_val;
}
/**
* ixgbe_check_for_msg_pf - checks to see if the VF has sent mail
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.reqs++;
}
return ret_val;
}
/**
* ixgbe_check_for_ack_pf - checks to see if the VF has ACKed
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
s32 index = IXGBE_MBVFICR_INDEX(vf_number);
u32 vf_bit = vf_number % 16;
if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
index)) {
ret_val = 0;
hw->mbx.stats.acks++;
}
return ret_val;
}
/**
* ixgbe_check_for_rst_pf - checks to see if the VF has reset
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* returns SUCCESS if the VF has set the Status bit or else ERR_MBX
**/
static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
{
u32 reg_offset = (vf_number < 32) ? 0 : 1;
u32 vf_shift = vf_number % 32;
u32 vflre = 0;
s32 ret_val = IXGBE_ERR_MBX;
switch (hw->mac.type) {
case ixgbe_mac_82599EB:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
break;
case ixgbe_mac_X540:
vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
break;
default:
break;
}
if (vflre & (1 << vf_shift)) {
ret_val = 0;
IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
hw->mbx.stats.rsts++;
}
return ret_val;
}
/**
* ixgbe_obtain_mbx_lock_pf - obtain mailbox lock
* @hw: pointer to the HW structure
* @vf_number: the VF index
*
* return SUCCESS if we obtained the mailbox lock
**/
static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
{
s32 ret_val = IXGBE_ERR_MBX;
u32 p2v_mailbox;
/* Take ownership of the buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU);
/* reserve mailbox for vf use */
p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
ret_val = 0;
return ret_val;
}
/**
* ixgbe_write_mbx_pf - Places a message in the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* returns SUCCESS if it successfully copied message into the buffer
**/
static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_write;
/* flush msg and acks as we are overwriting the message buffer */
ixgbe_check_for_msg_pf(hw, vf_number);
ixgbe_check_for_ack_pf(hw, vf_number);
/* copy the caller specified message to the mailbox memory buffer */
for (i = 0; i < size; i++)
IXGBE_WRITE_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i, msg[i]);
/* Interrupt VF to tell it a message has been sent and release buffer*/
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_STS);
/* update stats */
hw->mbx.stats.msgs_tx++;
out_no_write:
return ret_val;
}
/**
* ixgbe_read_mbx_pf - Read a message from the mailbox
* @hw: pointer to the HW structure
* @msg: The message buffer
* @size: Length of buffer
* @vf_number: the VF index
*
* This function copies a message from the mailbox buffer to the caller's
* memory buffer. The presumption is that the caller knows that there was
* a message due to a VF request so no polling for message is needed.
**/
static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
u16 vf_number)
{
s32 ret_val;
u16 i;
/* lock the mailbox to prevent pf/vf race condition */
ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
if (ret_val)
goto out_no_read;
/* copy the message to the mailbox memory buffer */
for (i = 0; i < size; i++)
msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_PFMBMEM(vf_number), i);
/* Acknowledge the message and release buffer */
IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_ACK);
/* update stats */
hw->mbx.stats.msgs_rx++;
out_no_read:
return ret_val;
}
#ifdef CONFIG_PCI_IOV
/**
* ixgbe_init_mbx_params_pf - set initial values for pf mailbox
* @hw: pointer to the HW structure
*
* Initializes the hw->mbx struct to correct values for pf mailbox
*/
void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
{
struct ixgbe_mbx_info *mbx = &hw->mbx;
if (hw->mac.type != ixgbe_mac_82599EB &&
hw->mac.type != ixgbe_mac_X540)
return;
mbx->timeout = 0;
mbx->usec_delay = 0;
mbx->stats.msgs_tx = 0;
mbx->stats.msgs_rx = 0;
mbx->stats.reqs = 0;
mbx->stats.acks = 0;
mbx->stats.rsts = 0;
mbx->size = IXGBE_VFMAILBOX_SIZE;
}
#endif /* CONFIG_PCI_IOV */
struct ixgbe_mbx_operations mbx_ops_generic = {
.read = ixgbe_read_mbx_pf,
.write = ixgbe_write_mbx_pf,
.read_posted = ixgbe_read_posted_mbx,
.write_posted = ixgbe_write_posted_mbx,
.check_for_msg = ixgbe_check_for_msg_pf,
.check_for_ack = ixgbe_check_for_ack_pf,
.check_for_rst = ixgbe_check_for_rst_pf,
};
| gpl-2.0 |
shukiz/VAR-SOM-AM33-Kernel-3-15 | arch/mips/dec/ioasic-irq.c | 2392 | 3270 | /*
* DEC I/O ASIC interrupts.
*
* Copyright (c) 2002, 2003, 2013 Maciej W. Rozycki
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/types.h>
#include <asm/dec/ioasic.h>
#include <asm/dec/ioasic_addrs.h>
#include <asm/dec/ioasic_ints.h>
static int ioasic_irq_base;
static void unmask_ioasic_irq(struct irq_data *d)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr |= (1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static void mask_ioasic_irq(struct irq_data *d)
{
u32 simr;
simr = ioasic_read(IO_REG_SIMR);
simr &= ~(1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIMR, simr);
}
static void ack_ioasic_irq(struct irq_data *d)
{
mask_ioasic_irq(d);
fast_iob();
}
static struct irq_chip ioasic_irq_type = {
.name = "IO-ASIC",
.irq_ack = ack_ioasic_irq,
.irq_mask = mask_ioasic_irq,
.irq_mask_ack = ack_ioasic_irq,
.irq_unmask = unmask_ioasic_irq,
};
static void clear_ioasic_dma_irq(struct irq_data *d)
{
u32 sir;
sir = ~(1 << (d->irq - ioasic_irq_base));
ioasic_write(IO_REG_SIR, sir);
fast_iob();
}
static struct irq_chip ioasic_dma_irq_type = {
.name = "IO-ASIC-DMA",
.irq_ack = clear_ioasic_dma_irq,
.irq_mask = mask_ioasic_irq,
.irq_unmask = unmask_ioasic_irq,
.irq_eoi = clear_ioasic_dma_irq,
};
/*
* I/O ASIC implements two kinds of DMA interrupts, informational and
* error interrupts.
*
* The formers do not stop DMA and should be cleared as soon as possible
* so that if they retrigger before the handler has completed, usually as
* a side effect of actions taken by the handler, then they are reissued.
* These use the `handle_edge_irq' handler that clears the request right
* away.
*
* The latters stop DMA and do not resume it until the interrupt has been
* cleared. This cannot be done until after a corrective action has been
* taken and this also means they will not retrigger. Therefore they use
* the `handle_fasteoi_irq' handler that only clears the request on the
* way out. Because MIPS processor interrupt inputs, one of which the I/O
* ASIC is cascaded to, are level-triggered it is recommended that error
* DMA interrupt action handlers are registered with the IRQF_ONESHOT flag
* set so that they are run with the interrupt line masked.
*
* This mask has `1' bits in the positions of informational interrupts.
*/
#define IO_IRQ_DMA_INFO \
(IO_IRQ_MASK(IO_INR_SCC0A_RXDMA) | \
IO_IRQ_MASK(IO_INR_SCC1A_RXDMA) | \
IO_IRQ_MASK(IO_INR_ISDN_TXDMA) | \
IO_IRQ_MASK(IO_INR_ISDN_RXDMA) | \
IO_IRQ_MASK(IO_INR_ASC_DMA))
void __init init_ioasic_irqs(int base)
{
int i;
/* Mask interrupts. */
ioasic_write(IO_REG_SIMR, 0);
fast_iob();
for (i = base; i < base + IO_INR_DMA; i++)
irq_set_chip_and_handler(i, &ioasic_irq_type,
handle_level_irq);
for (; i < base + IO_IRQ_LINES; i++)
irq_set_chip_and_handler(i, &ioasic_dma_irq_type,
1 << (i - base) & IO_IRQ_DMA_INFO ?
handle_edge_irq : handle_fasteoi_irq);
ioasic_irq_base = base;
}
| gpl-2.0 |
regalstreak/android_kernel_samsung_logan2g | kernel/rtmutex-tester.c | 2904 | 8883 | /*
* RT-Mutex-tester: scriptable tester for rt mutexes
*
* started by Thomas Gleixner:
*
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include "rtmutex.h"
#define MAX_RT_TEST_THREADS 8
#define MAX_RT_TEST_MUTEXES 8
static spinlock_t rttest_lock;
static atomic_t rttest_event;
struct test_thread_data {
int opcode;
int opdata;
int mutexes[MAX_RT_TEST_MUTEXES];
int event;
struct sys_device sysdev;
};
static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
static struct task_struct *threads[MAX_RT_TEST_THREADS];
static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
enum test_opcodes {
RTTEST_NOP = 0,
RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
/* 9, 10 - reserved for BKL commemoration */
RTTEST_SIGNAL = 11, /* 11 Signal other test thread, data = thread id */
RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
RTTEST_RESET = 99, /* 99 Reset all pending operations */
};
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
int i, id, ret = -EINVAL;
switch(td->opcode) {
case RTTEST_NOP:
return 0;
case RTTEST_LOCKCONT:
td->mutexes[td->opdata] = 1;
td->event = atomic_add_return(1, &rttest_event);
return 0;
case RTTEST_RESET:
for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
if (td->mutexes[i] == 4) {
rt_mutex_unlock(&mutexes[i]);
td->mutexes[i] = 0;
}
}
return 0;
case RTTEST_RESETEVENT:
atomic_set(&rttest_event, 0);
return 0;
default:
if (lockwakeup)
return ret;
}
switch(td->opcode) {
case RTTEST_LOCK:
case RTTEST_LOCKNOWAIT:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
return ret;
td->mutexes[id] = 1;
td->event = atomic_add_return(1, &rttest_event);
rt_mutex_lock(&mutexes[id]);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = 4;
return 0;
case RTTEST_LOCKINT:
case RTTEST_LOCKINTNOWAIT:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
return ret;
td->mutexes[id] = 1;
td->event = atomic_add_return(1, &rttest_event);
ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = ret ? 0 : 4;
return ret ? -EINTR : 0;
case RTTEST_UNLOCK:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
return ret;
td->event = atomic_add_return(1, &rttest_event);
rt_mutex_unlock(&mutexes[id]);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = 0;
return 0;
default:
break;
}
return ret;
}
/*
* Schedule replacement for rtsem_down(). Only called for threads with
* PF_MUTEX_TESTER set.
*
* This allows us to have finegrained control over the event flow.
*
*/
void schedule_rt_mutex_test(struct rt_mutex *mutex)
{
int tid, op, dat;
struct test_thread_data *td;
/* We have to lookup the task */
for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
if (threads[tid] == current)
break;
}
BUG_ON(tid == MAX_RT_TEST_THREADS);
td = &thread_data[tid];
op = td->opcode;
dat = td->opdata;
switch (op) {
case RTTEST_LOCK:
case RTTEST_LOCKINT:
case RTTEST_LOCKNOWAIT:
case RTTEST_LOCKINTNOWAIT:
if (mutex != &mutexes[dat])
break;
if (td->mutexes[dat] != 1)
break;
td->mutexes[dat] = 2;
td->event = atomic_add_return(1, &rttest_event);
break;
default:
break;
}
schedule();
switch (op) {
case RTTEST_LOCK:
case RTTEST_LOCKINT:
if (mutex != &mutexes[dat])
return;
if (td->mutexes[dat] != 2)
return;
td->mutexes[dat] = 3;
td->event = atomic_add_return(1, &rttest_event);
break;
case RTTEST_LOCKNOWAIT:
case RTTEST_LOCKINTNOWAIT:
if (mutex != &mutexes[dat])
return;
if (td->mutexes[dat] != 2)
return;
td->mutexes[dat] = 1;
td->event = atomic_add_return(1, &rttest_event);
return;
default:
return;
}
td->opcode = 0;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode > 0) {
int ret;
set_current_state(TASK_RUNNING);
ret = handle_op(td, 1);
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode == RTTEST_LOCKCONT)
break;
td->opcode = ret;
}
/* Wait for the next command to be executed */
schedule();
}
/* Restore previous command and data */
td->opcode = op;
td->opdata = dat;
}
static int test_func(void *data)
{
struct test_thread_data *td = data;
int ret;
current->flags |= PF_MUTEX_TESTER;
set_freezable();
allow_signal(SIGHUP);
for(;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode > 0) {
set_current_state(TASK_RUNNING);
ret = handle_op(td, 0);
set_current_state(TASK_INTERRUPTIBLE);
td->opcode = ret;
}
/* Wait for the next command to be executed */
schedule();
try_to_freeze();
if (signal_pending(current))
flush_signals(current);
if(kthread_should_stop())
break;
}
return 0;
}
/**
* sysfs_test_command - interface for test commands
* @dev: thread reference
* @buf: command for actual step
* @count: length of buffer
*
* command syntax:
*
* opcode:data
*/
static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
const char *buf, size_t count)
{
struct sched_param schedpar;
struct test_thread_data *td;
char cmdbuf[32];
int op, dat, tid, ret;
td = container_of(dev, struct test_thread_data, sysdev);
tid = td->sysdev.id;
/* strings from sysfs write are not 0 terminated! */
if (count >= sizeof(cmdbuf))
return -EINVAL;
/* strip of \n: */
if (buf[count-1] == '\n')
count--;
if (count < 1)
return -EINVAL;
memcpy(cmdbuf, buf, count);
cmdbuf[count] = 0;
if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
return -EINVAL;
switch (op) {
case RTTEST_SCHEDOT:
schedpar.sched_priority = 0;
ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
if (ret)
return ret;
set_user_nice(current, 0);
break;
case RTTEST_SCHEDRT:
schedpar.sched_priority = dat;
ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
if (ret)
return ret;
break;
case RTTEST_SIGNAL:
send_sig(SIGHUP, threads[tid], 0);
break;
default:
if (td->opcode > 0)
return -EBUSY;
td->opdata = dat;
td->opcode = op;
wake_up_process(threads[tid]);
}
return count;
}
/**
* sysfs_test_status - sysfs interface for rt tester
* @dev: thread to query
* @buf: char buffer to be filled with thread status info
*/
static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
char *buf)
{
struct test_thread_data *td;
struct task_struct *tsk;
char *curr = buf;
int i;
td = container_of(dev, struct test_thread_data, sysdev);
tsk = threads[td->sysdev.id];
spin_lock(&rttest_lock);
curr += sprintf(curr,
"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
td->opcode, td->event, tsk->state,
(MAX_RT_PRIO - 1) - tsk->prio,
(MAX_RT_PRIO - 1) - tsk->normal_prio,
tsk->pi_blocked_on);
for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
curr += sprintf(curr, "%d", td->mutexes[i]);
spin_unlock(&rttest_lock);
curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
mutexes[td->sysdev.id].owner);
return curr - buf;
}
static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
static struct sysdev_class rttest_sysclass = {
.name = "rttest",
};
static int init_test_thread(int id)
{
thread_data[id].sysdev.cls = &rttest_sysclass;
thread_data[id].sysdev.id = id;
threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
if (IS_ERR(threads[id]))
return PTR_ERR(threads[id]);
return sysdev_register(&thread_data[id].sysdev);
}
static int init_rttest(void)
{
int ret, i;
spin_lock_init(&rttest_lock);
for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
rt_mutex_init(&mutexes[i]);
ret = sysdev_class_register(&rttest_sysclass);
if (ret)
return ret;
for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
ret = init_test_thread(i);
if (ret)
break;
ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
if (ret)
break;
ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
if (ret)
break;
}
printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
return ret;
}
device_initcall(init_rttest);
| gpl-2.0 |
g7755725/android_kernel_samsung_jf | fs/affs/namei.c | 4952 | 11082 | /*
* linux/fs/affs/namei.c
*
* (c) 1996 Hans-Joachim Widmaier - Rewritten
*
* (C) 1993 Ray Burr - Modified for Amiga FFS filesystem.
*
* (C) 1991 Linus Torvalds - minix filesystem
*/
#include "affs.h"
typedef int (*toupper_t)(int);
static int affs_toupper(int ch);
static int affs_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
static int affs_intl_toupper(int ch);
static int affs_intl_hash_dentry(const struct dentry *,
const struct inode *, struct qstr *);
static int affs_intl_compare_dentry(const struct dentry *parent,
const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name);
const struct dentry_operations affs_dentry_operations = {
.d_hash = affs_hash_dentry,
.d_compare = affs_compare_dentry,
};
const struct dentry_operations affs_intl_dentry_operations = {
.d_hash = affs_intl_hash_dentry,
.d_compare = affs_intl_compare_dentry,
};
/* Simple toupper() for DOS\1 */
static int
affs_toupper(int ch)
{
return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch;
}
/* International toupper() for DOS\3 ("international") */
static int
affs_intl_toupper(int ch)
{
return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0
&& ch <= 0xFE && ch != 0xF7) ?
ch - ('a' - 'A') : ch;
}
static inline toupper_t
affs_get_toupper(struct super_block *sb)
{
return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper;
}
/*
* Note: the dentry argument is the parent dentry.
*/
static inline int
__affs_hash_dentry(struct qstr *qstr, toupper_t toupper)
{
const u8 *name = qstr->name;
unsigned long hash;
int i;
i = affs_check_name(qstr->name, qstr->len);
if (i)
return i;
hash = init_name_hash();
i = min(qstr->len, 30u);
for (; i > 0; name++, i--)
hash = partial_name_hash(toupper(*name), hash);
qstr->hash = end_name_hash(hash);
return 0;
}
static int
affs_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_toupper);
}
static int
affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode,
struct qstr *qstr)
{
return __affs_hash_dentry(qstr, affs_intl_toupper);
}
static inline int __affs_compare_dentry(unsigned int len,
const char *str, const struct qstr *name, toupper_t toupper)
{
const u8 *aname = str;
const u8 *bname = name->name;
/*
* 'str' is the name of an already existing dentry, so the name
* must be valid. 'name' must be validated first.
*/
if (affs_check_name(name->name, name->len))
return 1;
/*
* If the names are longer than the allowed 30 chars,
* the excess is ignored, so their length may differ.
*/
if (len >= 30) {
if (name->len < 30)
return 1;
len = 30;
} else if (len != name->len)
return 1;
for (; len > 0; len--)
if (toupper(*aname++) != toupper(*bname++))
return 1;
return 0;
}
static int
affs_compare_dentry(const struct dentry *parent, const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_toupper);
}
static int
affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode,
const struct dentry *dentry, const struct inode *inode,
unsigned int len, const char *str, const struct qstr *name)
{
return __affs_compare_dentry(len, str, name, affs_intl_toupper);
}
/*
* NOTE! unlike strncmp, affs_match returns 1 for success, 0 for failure.
*/
static inline int
affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper)
{
const u8 *name = dentry->d_name.name;
int len = dentry->d_name.len;
if (len >= 30) {
if (*name2 < 30)
return 0;
len = 30;
} else if (len != *name2)
return 0;
for (name2++; len > 0; len--)
if (toupper(*name++) != toupper(*name2++))
return 0;
return 1;
}
int
affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len)
{
toupper_t toupper = affs_get_toupper(sb);
int hash;
hash = len = min(len, 30u);
for (; len > 0; len--)
hash = (hash * 13 + toupper(*name++)) & 0x7ff;
return hash % AFFS_SB(sb)->s_hashsize;
}
static struct buffer_head *
affs_find_entry(struct inode *dir, struct dentry *dentry)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
toupper_t toupper = affs_get_toupper(sb);
u32 key;
pr_debug("AFFS: find_entry(\"%.*s\")\n", (int)dentry->d_name.len, dentry->d_name.name);
bh = affs_bread(sb, dir->i_ino);
if (!bh)
return ERR_PTR(-EIO);
key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]);
for (;;) {
affs_brelse(bh);
if (key == 0)
return NULL;
bh = affs_bread(sb, key);
if (!bh)
return ERR_PTR(-EIO);
if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper))
return bh;
key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain);
}
}
struct dentry *
affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode = NULL;
pr_debug("AFFS: lookup(\"%.*s\")\n",(int)dentry->d_name.len,dentry->d_name.name);
affs_lock_dir(dir);
bh = affs_find_entry(dir, dentry);
affs_unlock_dir(dir);
if (IS_ERR(bh))
return ERR_CAST(bh);
if (bh) {
u32 ino = bh->b_blocknr;
/* store the real header ino in d_fsdata for faster lookups */
dentry->d_fsdata = (void *)(long)ino;
switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) {
//link to dirs disabled
//case ST_LINKDIR:
case ST_LINKFILE:
ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original);
}
affs_brelse(bh);
inode = affs_iget(sb, ino);
if (IS_ERR(inode))
return ERR_CAST(inode);
}
d_add(dentry, inode);
return NULL;
}
int
affs_unlink(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: unlink(dir=%d, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd)
{
struct super_block *sb = dir->i_sb;
struct inode *inode;
int error;
pr_debug("AFFS: create(%lu,\"%.*s\",0%ho)\n",dir->i_ino,(int)dentry->d_name.len,
dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = mode;
mode_to_prot(inode);
mark_inode_dirty(inode);
inode->i_op = &affs_file_inode_operations;
inode->i_fop = &affs_file_operations;
inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops;
error = affs_add_entry(dir, inode, dentry, ST_FILE);
if (error) {
clear_nlink(inode);
iput(inode);
return error;
}
return 0;
}
int
affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct inode *inode;
int error;
pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%ho)\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,mode);
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_mode = S_IFDIR | mode;
mode_to_prot(inode);
inode->i_op = &affs_dir_inode_operations;
inode->i_fop = &affs_dir_operations;
error = affs_add_entry(dir, inode, dentry, ST_USERDIR);
if (error) {
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
return error;
}
return 0;
}
int
affs_rmdir(struct inode *dir, struct dentry *dentry)
{
pr_debug("AFFS: rmdir(dir=%u, %lu \"%.*s\")\n", (u32)dir->i_ino,
dentry->d_inode->i_ino,
(int)dentry->d_name.len, dentry->d_name.name);
return affs_remove_header(dentry);
}
int
affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
{
struct super_block *sb = dir->i_sb;
struct buffer_head *bh;
struct inode *inode;
char *p;
int i, maxlen, error;
char c, lc;
pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name,symname);
maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1;
inode = affs_new_inode(dir);
if (!inode)
return -ENOSPC;
inode->i_op = &affs_symlink_inode_operations;
inode->i_data.a_ops = &affs_symlink_aops;
inode->i_mode = S_IFLNK | 0777;
mode_to_prot(inode);
error = -EIO;
bh = affs_bread(sb, inode->i_ino);
if (!bh)
goto err;
i = 0;
p = (char *)AFFS_HEAD(bh)->table;
lc = '/';
if (*symname == '/') {
struct affs_sb_info *sbi = AFFS_SB(sb);
while (*symname == '/')
symname++;
spin_lock(&sbi->symlink_lock);
while (sbi->s_volume[i]) /* Cannot overflow */
*p++ = sbi->s_volume[i++];
spin_unlock(&sbi->symlink_lock);
}
while (i < maxlen && (c = *symname++)) {
if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') {
*p++ = '/';
i++;
symname += 2;
lc = '/';
} else if (c == '.' && lc == '/' && *symname == '/') {
symname++;
lc = '/';
} else {
*p++ = c;
lc = c;
i++;
}
if (lc == '/')
while (*symname == '/')
symname++;
}
*p = 0;
mark_buffer_dirty_inode(bh, inode);
affs_brelse(bh);
mark_inode_dirty(inode);
error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK);
if (error)
goto err;
return 0;
err:
clear_nlink(inode);
mark_inode_dirty(inode);
iput(inode);
return error;
}
int
affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry)
{
struct inode *inode = old_dentry->d_inode;
pr_debug("AFFS: link(%u, %u, \"%.*s\")\n", (u32)inode->i_ino, (u32)dir->i_ino,
(int)dentry->d_name.len,dentry->d_name.name);
return affs_add_entry(dir, inode, dentry, ST_LINKFILE);
}
int
affs_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct super_block *sb = old_dir->i_sb;
struct buffer_head *bh = NULL;
int retval;
pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n",
(u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name,
(u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name);
retval = affs_check_name(new_dentry->d_name.name,new_dentry->d_name.len);
if (retval)
return retval;
/* Unlink destination if it already exists */
if (new_dentry->d_inode) {
retval = affs_remove_header(new_dentry);
if (retval)
return retval;
}
bh = affs_bread(sb, old_dentry->d_inode->i_ino);
if (!bh)
return -EIO;
/* Remove header from its parent directory. */
affs_lock_dir(old_dir);
retval = affs_remove_hash(old_dir, bh);
affs_unlock_dir(old_dir);
if (retval)
goto done;
/* And insert it into the new directory with the new name. */
affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry);
affs_fix_checksum(sb, bh);
affs_lock_dir(new_dir);
retval = affs_insert_hash(new_dir, bh);
affs_unlock_dir(new_dir);
/* TODO: move it back to old_dir, if error? */
done:
mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir);
affs_brelse(bh);
return retval;
}
| gpl-2.0 |
talnoah/Lemur_UpdatedBase | arch/powerpc/kernel/btext.c | 7000 | 38467 | /*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/export.h>
#include <linux/memblock.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#define NO_SCROLL
#ifndef NO_SCROLL
static void scrollscreen(void);
#endif
static void draw_byte(unsigned char c, long locX, long locY);
static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb);
static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb);
#define __force_data __attribute__((__section__(".data")))
static int g_loc_X __force_data;
static int g_loc_Y __force_data;
static int g_max_loc_X __force_data;
static int g_max_loc_Y __force_data;
static int dispDeviceRowBytes __force_data;
static int dispDeviceDepth __force_data;
static int dispDeviceRect[4] __force_data;
static unsigned char *dispDeviceBase __force_data;
static unsigned char *logicalDisplayBase __force_data;
unsigned long disp_BAT[2] __initdata = {0, 0};
#define cmapsz (16*256)
static unsigned char vga_font[cmapsz];
int boot_text_mapped __force_data = 0;
int force_printk_to_btext = 0;
#ifdef CONFIG_PPC32
/* Calc BAT values for mapping the display and store them
* in disp_BAT. Those values are then used from head.S to map
* the display during identify_machine() and MMU_Init()
*
* The display is mapped to virtual address 0xD0000000, rather
* than 1:1, because some some CHRP machines put the frame buffer
* in the region starting at 0xC0000000 (PAGE_OFFSET).
* This mapping is temporary and will disappear as soon as the
* setup done by MMU_Init() is applied.
*
* For now, we align the BAT and then map 8Mb on 601 and 16Mb
* on other PPCs. This may cause trouble if the framebuffer
* is really badly aligned, but I didn't encounter this case
* yet.
*/
void __init btext_prepare_BAT(void)
{
unsigned long vaddr = PAGE_OFFSET + 0x10000000;
unsigned long addr;
unsigned long lowbits;
addr = (unsigned long)dispDeviceBase;
if (!addr) {
boot_text_mapped = 0;
return;
}
if (PVR_VER(mfspr(SPRN_PVR)) != 1) {
/* 603, 604, G3, G4, ... */
lowbits = addr & ~0xFF000000UL;
addr &= 0xFF000000UL;
disp_BAT[0] = vaddr | (BL_16M<<2) | 2;
disp_BAT[1] = addr | (_PAGE_NO_CACHE | _PAGE_GUARDED | BPP_RW);
} else {
/* 601 */
lowbits = addr & ~0xFF800000UL;
addr &= 0xFF800000UL;
disp_BAT[0] = vaddr | (_PAGE_NO_CACHE | PP_RWXX) | 4;
disp_BAT[1] = addr | BL_8M | 0x40;
}
logicalDisplayBase = (void *) (vaddr + lowbits);
}
#endif
/* This function can be used to enable the early boot text when doing
* OF booting or within bootx init. It must be followed by a btext_unmap()
* call before the logical address becomes unusable
*/
void __init btext_setup_display(int width, int height, int depth, int pitch,
unsigned long address)
{
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
logicalDisplayBase = (unsigned char *)address;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
boot_text_mapped = 1;
}
void __init btext_unmap(void)
{
boot_text_mapped = 0;
}
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
* todo:
*
* - build some kind of vgacon with it to enable early printk
* - move to a separate file
* - add a few video driver hooks to keep in sync with display
* changes.
*/
static void map_boot_text(void)
{
unsigned long base, offset, size;
unsigned char *vbase;
/* By default, we are no longer mapped */
boot_text_mapped = 0;
if (dispDeviceBase == 0)
return;
base = ((unsigned long) dispDeviceBase) & 0xFFFFF000UL;
offset = ((unsigned long) dispDeviceBase) - base;
size = dispDeviceRowBytes * dispDeviceRect[3] + offset
+ dispDeviceRect[0];
vbase = __ioremap(base, size, _PAGE_NO_CACHE);
if (vbase == 0)
return;
logicalDisplayBase = vbase + offset;
boot_text_mapped = 1;
}
int btext_initialize(struct device_node *np)
{
unsigned int width, height, depth, pitch;
unsigned long address = 0;
const u32 *prop;
prop = of_get_property(np, "linux,bootx-width", NULL);
if (prop == NULL)
prop = of_get_property(np, "width", NULL);
if (prop == NULL)
return -EINVAL;
width = *prop;
prop = of_get_property(np, "linux,bootx-height", NULL);
if (prop == NULL)
prop = of_get_property(np, "height", NULL);
if (prop == NULL)
return -EINVAL;
height = *prop;
prop = of_get_property(np, "linux,bootx-depth", NULL);
if (prop == NULL)
prop = of_get_property(np, "depth", NULL);
if (prop == NULL)
return -EINVAL;
depth = *prop;
pitch = width * ((depth + 7) / 8);
prop = of_get_property(np, "linux,bootx-linebytes", NULL);
if (prop == NULL)
prop = of_get_property(np, "linebytes", NULL);
if (prop && *prop != 0xffffffffu)
pitch = *prop;
if (pitch == 1)
pitch = 0x1000;
prop = of_get_property(np, "linux,bootx-addr", NULL);
if (prop == NULL)
prop = of_get_property(np, "address", NULL);
if (prop)
address = *prop;
/* FIXME: Add support for PCI reg properties. Right now, only
* reliable on macs
*/
if (address == 0)
return -EINVAL;
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
dispDeviceBase = (unsigned char *)address;
dispDeviceRowBytes = pitch;
dispDeviceDepth = depth == 15 ? 16 : depth;
dispDeviceRect[0] = dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
map_boot_text();
return 0;
}
int __init btext_find_display(int allow_nonstdout)
{
const char *name;
struct device_node *np = NULL;
int rc = -ENODEV;
name = of_get_property(of_chosen, "linux,stdout-path", NULL);
if (name != NULL) {
np = of_find_node_by_path(name);
if (np != NULL) {
if (strcmp(np->type, "display") != 0) {
printk("boot stdout isn't a display !\n");
of_node_put(np);
np = NULL;
}
}
}
if (np)
rc = btext_initialize(np);
if (rc == 0 || !allow_nonstdout)
return rc;
for_each_node_by_type(np, "display") {
if (of_get_property(np, "linux,opened", NULL)) {
printk("trying %s ...\n", np->full_name);
rc = btext_initialize(np);
printk("result: %d\n", rc);
}
if (rc == 0)
break;
}
return rc;
}
/* Calc the base address of a given point (x,y) */
static unsigned char * calc_base(int x, int y)
{
unsigned char *base;
base = logicalDisplayBase;
if (base == 0)
base = dispDeviceBase;
base += (x + dispDeviceRect[0]) * (dispDeviceDepth >> 3);
base += (y + dispDeviceRect[1]) * dispDeviceRowBytes;
return base;
}
/* Adjust the display to a new resolution */
void btext_update_display(unsigned long phys, int width, int height,
int depth, int pitch)
{
if (dispDeviceBase == 0)
return;
/* check it's the same frame buffer (within 256MB) */
if ((phys ^ (unsigned long)dispDeviceBase) & 0xf0000000)
return;
dispDeviceBase = (__u8 *) phys;
dispDeviceRect[0] = 0;
dispDeviceRect[1] = 0;
dispDeviceRect[2] = width;
dispDeviceRect[3] = height;
dispDeviceDepth = depth;
dispDeviceRowBytes = pitch;
if (boot_text_mapped) {
iounmap(logicalDisplayBase);
boot_text_mapped = 0;
}
map_boot_text();
g_loc_X = 0;
g_loc_Y = 0;
g_max_loc_X = width / 8;
g_max_loc_Y = height / 16;
}
EXPORT_SYMBOL(btext_update_display);
void btext_clearscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j=width; j; --j)
*(ptr++) = 0;
base += (dispDeviceRowBytes >> 2);
}
}
void btext_flushscreen(void)
{
unsigned int *base = (unsigned int *)calc_base(0, 0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++)
{
unsigned int *ptr = base;
for(j = width; j > 0; j -= 8) {
__asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
ptr += 8;
}
base += (dispDeviceRowBytes >> 2);
}
__asm__ __volatile__ ("sync" ::: "memory");
}
void btext_flushline(void)
{
unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i < 16; i++)
{
unsigned int *ptr = base;
for(j = width; j > 0; j -= 8) {
__asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr));
ptr += 8;
}
base += (dispDeviceRowBytes >> 2);
}
__asm__ __volatile__ ("sync" ::: "memory");
}
#ifndef NO_SCROLL
static void scrollscreen(void)
{
unsigned int *src = (unsigned int *)calc_base(0,16);
unsigned int *dst = (unsigned int *)calc_base(0,0);
unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) *
(dispDeviceDepth >> 3)) >> 2;
int i,j;
for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++)
{
unsigned int *src_ptr = src;
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = *(src_ptr++);
src += (dispDeviceRowBytes >> 2);
dst += (dispDeviceRowBytes >> 2);
}
for (i=0; i<16; i++)
{
unsigned int *dst_ptr = dst;
for(j=width; j; --j)
*(dst_ptr++) = 0;
dst += (dispDeviceRowBytes >> 2);
}
}
#endif /* ndef NO_SCROLL */
void btext_drawchar(char c)
{
int cline = 0;
#ifdef NO_SCROLL
int x;
#endif
if (!boot_text_mapped)
return;
switch (c) {
case '\b':
if (g_loc_X > 0)
--g_loc_X;
break;
case '\t':
g_loc_X = (g_loc_X & -8) + 8;
break;
case '\r':
g_loc_X = 0;
break;
case '\n':
g_loc_X = 0;
g_loc_Y++;
cline = 1;
break;
default:
draw_byte(c, g_loc_X++, g_loc_Y);
}
if (g_loc_X >= g_max_loc_X) {
g_loc_X = 0;
g_loc_Y++;
cline = 1;
}
#ifndef NO_SCROLL
while (g_loc_Y >= g_max_loc_Y) {
scrollscreen();
g_loc_Y--;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if (g_loc_Y >= g_max_loc_Y)
g_loc_Y = 0;
if (cline) {
for (x = 0; x < g_max_loc_X; ++x)
draw_byte(' ', x, g_loc_Y);
}
#endif
}
void btext_drawstring(const char *c)
{
if (!boot_text_mapped)
return;
while (*c)
btext_drawchar(*c++);
}
void btext_drawtext(const char *c, unsigned int len)
{
if (!boot_text_mapped)
return;
while (len--)
btext_drawchar(*c++);
}
void btext_drawhex(unsigned long v)
{
if (!boot_text_mapped)
return;
#ifdef CONFIG_PPC64
btext_drawchar(hex_asc_hi(v >> 56));
btext_drawchar(hex_asc_lo(v >> 56));
btext_drawchar(hex_asc_hi(v >> 48));
btext_drawchar(hex_asc_lo(v >> 48));
btext_drawchar(hex_asc_hi(v >> 40));
btext_drawchar(hex_asc_lo(v >> 40));
btext_drawchar(hex_asc_hi(v >> 32));
btext_drawchar(hex_asc_lo(v >> 32));
#endif
btext_drawchar(hex_asc_hi(v >> 24));
btext_drawchar(hex_asc_lo(v >> 24));
btext_drawchar(hex_asc_hi(v >> 16));
btext_drawchar(hex_asc_lo(v >> 16));
btext_drawchar(hex_asc_hi(v >> 8));
btext_drawchar(hex_asc_lo(v >> 8));
btext_drawchar(hex_asc_hi(v));
btext_drawchar(hex_asc_lo(v));
btext_drawchar(' ');
}
static void draw_byte(unsigned char c, long locX, long locY)
{
unsigned char *base = calc_base(locX << 3, locY << 4);
unsigned char *font = &vga_font[((unsigned int)c) * 16];
int rb = dispDeviceRowBytes;
switch(dispDeviceDepth) {
case 24:
case 32:
draw_byte_32(font, (unsigned int *)base, rb);
break;
case 15:
case 16:
draw_byte_16(font, (unsigned int *)base, rb);
break;
case 8:
draw_byte_8(font, (unsigned int *)base, rb);
break;
}
}
static unsigned int expand_bits_8[16] = {
0x00000000,
0x000000ff,
0x0000ff00,
0x0000ffff,
0x00ff0000,
0x00ff00ff,
0x00ffff00,
0x00ffffff,
0xff000000,
0xff0000ff,
0xff00ff00,
0xff00ffff,
0xffff0000,
0xffff00ff,
0xffffff00,
0xffffffff
};
static unsigned int expand_bits_16[4] = {
0x00000000,
0x0000ffff,
0xffff0000,
0xffffffff
};
static void draw_byte_32(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (-(bits >> 7) & fg) ^ bg;
base[1] = (-((bits >> 6) & 1) & fg) ^ bg;
base[2] = (-((bits >> 5) & 1) & fg) ^ bg;
base[3] = (-((bits >> 4) & 1) & fg) ^ bg;
base[4] = (-((bits >> 3) & 1) & fg) ^ bg;
base[5] = (-((bits >> 2) & 1) & fg) ^ bg;
base[6] = (-((bits >> 1) & 1) & fg) ^ bg;
base[7] = (-(bits & 1) & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_16(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0xFFFFFFFFUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_16;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 6] & fg) ^ bg;
base[1] = (eb[(bits >> 4) & 3] & fg) ^ bg;
base[2] = (eb[(bits >> 2) & 3] & fg) ^ bg;
base[3] = (eb[bits & 3] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static void draw_byte_8(unsigned char *font, unsigned int *base, int rb)
{
int l, bits;
int fg = 0x0F0F0F0FUL;
int bg = 0x00000000UL;
unsigned int *eb = (int *)expand_bits_8;
for (l = 0; l < 16; ++l)
{
bits = *font++;
base[0] = (eb[bits >> 4] & fg) ^ bg;
base[1] = (eb[bits & 0xf] & fg) ^ bg;
base = (unsigned int *) ((char *)base + rb);
}
}
static unsigned char vga_font[cmapsz] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x81, 0xa5, 0x81, 0x81, 0xbd,
0x99, 0x81, 0x81, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xff,
0xdb, 0xff, 0xff, 0xc3, 0xe7, 0xff, 0xff, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x6c, 0xfe, 0xfe, 0xfe, 0xfe, 0x7c, 0x38, 0x10,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x7c, 0xfe,
0x7c, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18,
0x3c, 0x3c, 0xe7, 0xe7, 0xe7, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x3c, 0x7e, 0xff, 0xff, 0x7e, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xe7, 0xc3, 0xc3, 0xe7, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x42, 0x42, 0x66, 0x3c, 0x00,
0x00, 0x00, 0x00, 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xc3, 0x99, 0xbd,
0xbd, 0x99, 0xc3, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x1e, 0x0e,
0x1a, 0x32, 0x78, 0xcc, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x66, 0x66, 0x66, 0x66, 0x3c, 0x18, 0x7e, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x33, 0x3f, 0x30, 0x30, 0x30,
0x30, 0x70, 0xf0, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0x63,
0x7f, 0x63, 0x63, 0x63, 0x63, 0x67, 0xe7, 0xe6, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x18, 0x18, 0xdb, 0x3c, 0xe7, 0x3c, 0xdb, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfe, 0xf8,
0xf0, 0xe0, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x06, 0x0e,
0x1e, 0x3e, 0xfe, 0x3e, 0x1e, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66,
0x66, 0x00, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7f, 0xdb,
0xdb, 0xdb, 0x7b, 0x1b, 0x1b, 0x1b, 0x1b, 0x1b, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0x60, 0x38, 0x6c, 0xc6, 0xc6, 0x6c, 0x38, 0x0c, 0xc6,
0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0xfe, 0xfe, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c,
0x7e, 0x18, 0x18, 0x18, 0x7e, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x7e, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x0c, 0xfe, 0x0c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x60, 0xfe, 0x60, 0x30, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xc0,
0xc0, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x24, 0x66, 0xff, 0x66, 0x24, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x38, 0x7c, 0x7c, 0xfe, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xfe, 0x7c, 0x7c,
0x38, 0x38, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x66, 0x66, 0x24, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6c,
0x6c, 0xfe, 0x6c, 0x6c, 0x6c, 0xfe, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x7c, 0xc6, 0xc2, 0xc0, 0x7c, 0x06, 0x06, 0x86, 0xc6, 0x7c,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc2, 0xc6, 0x0c, 0x18,
0x30, 0x60, 0xc6, 0x86, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c,
0x6c, 0x38, 0x76, 0xdc, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x30, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x18, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x18,
0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x3c, 0xff, 0x3c, 0x66, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x02, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0x80, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xce, 0xde, 0xf6, 0xe6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x38, 0x78, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0x06, 0x06, 0x3c, 0x06, 0x06, 0x06, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x1c, 0x3c, 0x6c, 0xcc, 0xfe,
0x0c, 0x0c, 0x0c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xfc, 0x06, 0x06, 0x06, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x60, 0xc0, 0xc0, 0xfc, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0x06, 0x06, 0x0c, 0x18,
0x30, 0x30, 0x30, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0xc6, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x06, 0x06, 0x0c, 0x78,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00,
0x00, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x18, 0x00, 0x00, 0x00, 0x18, 0x18, 0x30, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x06,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x00, 0x00,
0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60,
0x30, 0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x60, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0x0c, 0x18, 0x18, 0x18, 0x00, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xde, 0xde,
0xde, 0xdc, 0xc0, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x66, 0x66, 0x66, 0x66, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0xc2, 0xc0, 0xc0, 0xc0,
0xc0, 0xc2, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x6c,
0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x6c, 0xf8, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68, 0x60, 0x62, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x66, 0x62, 0x68, 0x78, 0x68,
0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xde, 0xc6, 0xc6, 0x66, 0x3a, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x0c,
0x0c, 0x0c, 0x0c, 0x0c, 0xcc, 0xcc, 0xcc, 0x78, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xe6, 0x66, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0x66, 0xe6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, 0x60, 0x60, 0x60, 0x60, 0x60,
0x60, 0x62, 0x66, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xe7,
0xff, 0xff, 0xdb, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66,
0x66, 0x66, 0x7c, 0x60, 0x60, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xd6, 0xde, 0x7c,
0x0c, 0x0e, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66, 0x66, 0x7c, 0x6c,
0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6,
0xc6, 0x60, 0x38, 0x0c, 0x06, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xdb, 0x99, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xc3, 0xc3, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x66,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0x66, 0x3c, 0x18, 0x18,
0x3c, 0x66, 0xc3, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xff, 0xc3, 0x86, 0x0c, 0x18, 0x30, 0x60, 0xc1, 0xc3, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
0xc0, 0xe0, 0x70, 0x38, 0x1c, 0x0e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x3c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00,
0x30, 0x30, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x78, 0x6c, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc0, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x0c, 0x0c, 0x3c, 0x6c, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0xcc, 0x78, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x6c, 0x76, 0x66, 0x66, 0x66, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x18, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x0e, 0x06, 0x06,
0x06, 0x06, 0x06, 0x06, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0xe0, 0x60,
0x60, 0x66, 0x6c, 0x78, 0x78, 0x6c, 0x66, 0xe6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe6, 0xff, 0xdb,
0xdb, 0xdb, 0xdb, 0xdb, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x66, 0x66,
0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x7c, 0x0c, 0x0c, 0x1e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x76, 0x66, 0x60, 0x60, 0x60, 0xf0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c, 0xc6, 0x60,
0x38, 0x0c, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x30,
0x30, 0xfc, 0x30, 0x30, 0x30, 0x30, 0x36, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0xc3, 0xc3,
0xc3, 0x66, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc3, 0xc3, 0xc3, 0xdb, 0xdb, 0xff, 0x66, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0x3c, 0x66, 0xc3,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0xf8, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xfe, 0xcc, 0x18, 0x30, 0x60, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0e, 0x18, 0x18, 0x18, 0x70, 0x18, 0x18, 0x18, 0x18, 0x0e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x00, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x18,
0x18, 0x18, 0x0e, 0x18, 0x18, 0x18, 0x18, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0xc6,
0xc6, 0xc6, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x66,
0xc2, 0xc0, 0xc0, 0xc0, 0xc2, 0x66, 0x3c, 0x0c, 0x06, 0x7c, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xcc, 0x00, 0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38,
0x00, 0x78, 0x0c, 0x7c, 0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x3c, 0x66, 0x60, 0x60, 0x66, 0x3c, 0x0c, 0x06,
0x3c, 0x00, 0x00, 0x00, 0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xfe,
0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x7c, 0xc6, 0xfe, 0xc0, 0xc0, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x66, 0x00, 0x00, 0x38, 0x18, 0x18,
0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x3c, 0x66,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x60, 0x30, 0x18, 0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x10, 0x38, 0x6c, 0xc6, 0xc6,
0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x38, 0x00,
0x38, 0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x18, 0x30, 0x60, 0x00, 0xfe, 0x66, 0x60, 0x7c, 0x60, 0x60, 0x66, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6e, 0x3b, 0x1b,
0x7e, 0xd8, 0xdc, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3e, 0x6c,
0xcc, 0xcc, 0xfe, 0xcc, 0xcc, 0xcc, 0xcc, 0xce, 0x00, 0x00, 0x00, 0x00,
0x00, 0x10, 0x38, 0x6c, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0x00, 0x7c, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18,
0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x30, 0x78, 0xcc, 0x00, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0xcc, 0x76,
0x00, 0x00, 0x00, 0x00, 0x00, 0x60, 0x30, 0x18, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00,
0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7e, 0x06, 0x0c, 0x78, 0x00,
0x00, 0xc6, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6,
0xc6, 0xc6, 0xc6, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e,
0xc3, 0xc0, 0xc0, 0xc0, 0xc3, 0x7e, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x38, 0x6c, 0x64, 0x60, 0xf0, 0x60, 0x60, 0x60, 0x60, 0xe6, 0xfc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc3, 0x66, 0x3c, 0x18, 0xff, 0x18,
0xff, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfc, 0x66, 0x66,
0x7c, 0x62, 0x66, 0x6f, 0x66, 0x66, 0x66, 0xf3, 0x00, 0x00, 0x00, 0x00,
0x00, 0x0e, 0x1b, 0x18, 0x18, 0x18, 0x7e, 0x18, 0x18, 0x18, 0x18, 0x18,
0xd8, 0x70, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0x78, 0x0c, 0x7c,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x18, 0x30,
0x00, 0x38, 0x18, 0x18, 0x18, 0x18, 0x18, 0x3c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x18, 0x30, 0x60, 0x00, 0x7c, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x30, 0x60, 0x00, 0xcc, 0xcc, 0xcc,
0xcc, 0xcc, 0xcc, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc,
0x00, 0xdc, 0x66, 0x66, 0x66, 0x66, 0x66, 0x66, 0x00, 0x00, 0x00, 0x00,
0x76, 0xdc, 0x00, 0xc6, 0xe6, 0xf6, 0xfe, 0xde, 0xce, 0xc6, 0xc6, 0xc6,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0x6c, 0x6c, 0x3e, 0x00, 0x7e, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x30, 0x30, 0x00, 0x30, 0x30, 0x60, 0xc0, 0xc6, 0xc6, 0x7c,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc0,
0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0xfe, 0x06, 0x06, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30, 0x60, 0xce, 0x9b, 0x06,
0x0c, 0x1f, 0x00, 0x00, 0x00, 0xc0, 0xc0, 0xc2, 0xc6, 0xcc, 0x18, 0x30,
0x66, 0xce, 0x96, 0x3e, 0x06, 0x06, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18,
0x00, 0x18, 0x18, 0x18, 0x3c, 0x3c, 0x3c, 0x18, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x6c, 0xd8, 0x6c, 0x36, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, 0x6c, 0x36,
0x6c, 0xd8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x44, 0x11, 0x44,
0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44, 0x11, 0x44,
0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa, 0x55, 0xaa,
0x55, 0xaa, 0x55, 0xaa, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77,
0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0xdd, 0x77, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf8, 0x18, 0xf8,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xf6, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x06, 0xf6,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf6, 0x06, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xfe, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xf8, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x37,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x37, 0x30, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xf7, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x37, 0x30, 0x37, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36,
0x36, 0xf7, 0x00, 0xf7, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0xff,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xff, 0x00, 0xff, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x3f,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x18, 0x18,
0x18, 0x1f, 0x18, 0x1f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x1f, 0x18, 0x1f, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f,
0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x36, 0x36, 0x36, 0xff, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36, 0x36,
0x18, 0x18, 0x18, 0x18, 0x18, 0xff, 0x18, 0xff, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0xf8,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x1f, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xf0, 0xf0, 0xf0, 0xf0,
0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0, 0xf0,
0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f, 0x0f,
0x0f, 0x0f, 0x0f, 0x0f, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x76, 0xdc, 0xd8, 0xd8, 0xd8, 0xdc, 0x76, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x78, 0xcc, 0xcc, 0xcc, 0xd8, 0xcc, 0xc6, 0xc6, 0xc6, 0xcc,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xfe, 0xc6, 0xc6, 0xc0, 0xc0, 0xc0,
0xc0, 0xc0, 0xc0, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xfe, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xfe, 0xc6, 0x60, 0x30, 0x18, 0x30, 0x60, 0xc6, 0xfe,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0xd8, 0xd8,
0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x66, 0x66, 0x66, 0x66, 0x66, 0x7c, 0x60, 0x60, 0xc0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7e, 0x18, 0x3c, 0x66, 0x66,
0x66, 0x3c, 0x18, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38,
0x6c, 0xc6, 0xc6, 0xfe, 0xc6, 0xc6, 0x6c, 0x38, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x38, 0x6c, 0xc6, 0xc6, 0xc6, 0x6c, 0x6c, 0x6c, 0x6c, 0xee,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1e, 0x30, 0x18, 0x0c, 0x3e, 0x66,
0x66, 0x66, 0x66, 0x3c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x7e, 0xdb, 0xdb, 0xdb, 0x7e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0x06, 0x7e, 0xdb, 0xdb, 0xf3, 0x7e, 0x60, 0xc0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1c, 0x30, 0x60, 0x60, 0x7c, 0x60,
0x60, 0x60, 0x30, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x7c,
0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0xc6, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00, 0x00, 0xfe, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x7e, 0x18,
0x18, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30,
0x18, 0x0c, 0x06, 0x0c, 0x18, 0x30, 0x00, 0x7e, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x0c, 0x18, 0x30, 0x60, 0x30, 0x18, 0x0c, 0x00, 0x7e,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x1b, 0x1b, 0x1b, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18, 0x18,
0x18, 0x18, 0x18, 0x18, 0xd8, 0xd8, 0xd8, 0x70, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x7e, 0x00, 0x18, 0x18, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0xdc, 0x00,
0x76, 0xdc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x38, 0x6c, 0x6c,
0x38, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x18, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0c, 0x0c,
0x0c, 0x0c, 0x0c, 0xec, 0x6c, 0x6c, 0x3c, 0x1c, 0x00, 0x00, 0x00, 0x00,
0x00, 0xd8, 0x6c, 0x6c, 0x6c, 0x6c, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0xd8, 0x30, 0x60, 0xc8, 0xf8, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x7c, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
};
void __init udbg_init_btext(void)
{
/* If btext is enabled, we might have a BAT setup for early display,
* thus we do enable some very basic udbg output
*/
udbg_putc = btext_drawchar;
}
| gpl-2.0 |
Jackeagle/android_kernel_lge_d838 | arch/x86/boot/mkcpustr.c | 9048 | 1251 | /* ----------------------------------------------------------------------- *
*
* Copyright 2008 rPath, Inc. - All Rights Reserved
*
* This file is part of the Linux kernel, and is made available under
* the terms of the GNU General Public License version 2 or (at your
* option) any later version; incorporated herein by reference.
*
* ----------------------------------------------------------------------- */
/*
* This is a host program to preprocess the CPU strings into a
* compact format suitable for the setup code.
*/
#include <stdio.h>
#include "../kernel/cpu/capflags.c"
int main(void)
{
int i, j;
const char *str;
printf("static const char x86_cap_strs[] =\n");
for (i = 0; i < NCAPINTS; i++) {
for (j = 0; j < 32; j++) {
str = x86_cap_flags[i*32+j];
if (i == NCAPINTS-1 && j == 31) {
/* The last entry must be unconditional; this
also consumes the compiler-added null
character */
if (!str)
str = "";
printf("\t\"\\x%02x\\x%02x\"\"%s\"\n",
i, j, str);
} else if (str) {
printf("#if REQUIRED_MASK%d & (1 << %d)\n"
"\t\"\\x%02x\\x%02x\"\"%s\\0\"\n"
"#endif\n",
i, j, i, j, str);
}
}
}
printf("\t;\n");
return 0;
}
| gpl-2.0 |
miaoxie/linux-btrfs | arch/mips/pci/pci-tx4927.c | 9048 | 2643 | /*
* Based on linux/arch/mips/txx9/rbtx4938/setup.c,
* and RBTX49xx patch from CELF patch archive.
*
* Copyright 2001, 2003-2005 MontaVista Software Inc.
* Copyright (C) 2004 by Ralf Baechle (ralf@linux-mips.org)
* (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <asm/txx9/generic.h>
#include <asm/txx9/tx4927.h>
int __init tx4927_report_pciclk(void)
{
int pciclk = 0;
printk(KERN_INFO "PCIC --%s PCICLK:",
(__raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_PCI66) ?
" PCI66" : "");
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
switch ((unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK) {
case TX4927_CCFG_PCIDIVMODE_2_5:
pciclk = txx9_cpu_clock * 2 / 5; break;
case TX4927_CCFG_PCIDIVMODE_3:
pciclk = txx9_cpu_clock / 3; break;
case TX4927_CCFG_PCIDIVMODE_5:
pciclk = txx9_cpu_clock / 5; break;
case TX4927_CCFG_PCIDIVMODE_6:
pciclk = txx9_cpu_clock / 6; break;
}
printk("Internal(%u.%uMHz)",
(pciclk + 50000) / 1000000,
((pciclk + 50000) / 100000) % 10);
} else {
printk("External");
pciclk = -1;
}
printk("\n");
return pciclk;
}
int __init tx4927_pciclk66_setup(void)
{
int pciclk;
/* Assert M66EN */
tx4927_ccfg_set(TX4927_CCFG_PCI66);
/* Double PCICLK (if possible) */
if (__raw_readq(&tx4927_ccfgptr->pcfg) & TX4927_PCFG_PCICLKEN_ALL) {
unsigned int pcidivmode = 0;
u64 ccfg = __raw_readq(&tx4927_ccfgptr->ccfg);
pcidivmode = (unsigned long)ccfg &
TX4927_CCFG_PCIDIVMODE_MASK;
switch (pcidivmode) {
case TX4927_CCFG_PCIDIVMODE_5:
case TX4927_CCFG_PCIDIVMODE_2_5:
pcidivmode = TX4927_CCFG_PCIDIVMODE_2_5;
pciclk = txx9_cpu_clock * 2 / 5;
break;
case TX4927_CCFG_PCIDIVMODE_6:
case TX4927_CCFG_PCIDIVMODE_3:
default:
pcidivmode = TX4927_CCFG_PCIDIVMODE_3;
pciclk = txx9_cpu_clock / 3;
}
tx4927_ccfg_change(TX4927_CCFG_PCIDIVMODE_MASK,
pcidivmode);
printk(KERN_DEBUG "PCICLK: ccfg:%08lx\n",
(unsigned long)__raw_readq(&tx4927_ccfgptr->ccfg));
} else
pciclk = -1;
return pciclk;
}
void __init tx4927_setup_pcierr_irq(void)
{
if (request_irq(TXX9_IRQ_BASE + TX4927_IR_PCIERR,
tx4927_pcierr_interrupt,
0, "PCI error",
(void *)TX4927_PCIC_REG))
printk(KERN_WARNING "Failed to request irq for PCIERR\n");
}
| gpl-2.0 |
gk1/xbmc | xbmc/network/linux/ZeroconfBrowserAvahi.cpp | 89 | 15315 | /*
* Copyright (C) 2005-2013 Team XBMC
* http://xbmc.org
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with XBMC; see the file COPYING. If not, see
* <http://www.gnu.org/licenses/>.
*
*/
#include "ZeroconfBrowserAvahi.h"
#ifdef HAS_AVAHI
#include <utils/log.h>
#include "guilib/GUIWindowManager.h"
#include "guilib/GUIMessage.h"
#include "GUIUserMessages.h"
#include <avahi-common/malloc.h>
#include <avahi-common/error.h>
namespace
{
///helper RAII-struct to block event loop for modifications
struct ScopedEventLoopBlock
{
ScopedEventLoopBlock ( AvahiThreadedPoll* fp_poll ) : mp_poll ( fp_poll )
{
avahi_threaded_poll_lock ( mp_poll );
}
~ScopedEventLoopBlock()
{
avahi_threaded_poll_unlock ( mp_poll );
}
private:
AvahiThreadedPoll* mp_poll;
};
}
CZeroconfBrowserAvahi::CZeroconfBrowserAvahi() : mp_client ( 0 ), mp_poll ( 0 ), m_shutdown(false), m_thread_id(0)
{
if ( ! ( mp_poll = avahi_threaded_poll_new() ) )
{
CLog::Log ( LOGERROR, "CZeroconfAvahi::CZeroconfAvahi(): Could not create threaded poll object" );
//TODO: throw exception? can this even happen?
return;
}
if ( !createClient() )
{
CLog::Log ( LOGERROR, "CZeroconfAvahi::CZeroconfAvahi(): Could not create client" );
//yeah, what if not? but should always succeed (as client_no_fail or something is passed)
}
//start event loop thread
if ( avahi_threaded_poll_start ( mp_poll ) < 0 )
{
CLog::Log ( LOGERROR, "CZeroconfAvahi::CZeroconfAvahi(): Failed to start avahi client thread" );
}
}
CZeroconfBrowserAvahi::~CZeroconfBrowserAvahi()
{
CLog::Log ( LOGDEBUG, "CZeroconfAvahi::~CZeroconfAvahi() Going down! cleaning up..." );
if ( mp_poll )
{
//normally we would stop the avahi thread here and do our work, but
//it looks like this does not work -> www.avahi.org/ticket/251
//so instead of calling
//avahi_threaded_poll_stop(mp_poll);
//we set m_shutdown=true, post an event and wait for it to stop itself
struct timeval tv = { 0, 0 }; //TODO: does tv survive the thread?
AvahiTimeout* lp_timeout;
{
ScopedEventLoopBlock l_block(mp_poll);
const AvahiPoll* cp_apoll = avahi_threaded_poll_get(mp_poll);
m_shutdown = true;
lp_timeout = cp_apoll->timeout_new(cp_apoll,
&tv,
shutdownCallback,
this);
}
//now wait for the thread to stop
assert(m_thread_id);
pthread_join(m_thread_id, NULL);
avahi_threaded_poll_get(mp_poll)->timeout_free(lp_timeout);
}
//free the client (frees all browsers, groups, ...)
if ( mp_client )
avahi_client_free ( mp_client );
if ( mp_poll )
avahi_threaded_poll_free ( mp_poll );
}
bool CZeroconfBrowserAvahi::doAddServiceType ( const std::string& fcr_service_type )
{
ScopedEventLoopBlock lock ( mp_poll );
tBrowserMap::iterator it = m_browsers.find ( fcr_service_type );
if ( it != m_browsers.end() )
return false;
else
it = m_browsers.insert ( std::make_pair ( fcr_service_type, ( AvahiServiceBrowser* ) 0 ) ).first;
//if the client is running, we directly create a browser for the service here
if ( mp_client && avahi_client_get_state ( mp_client ) == AVAHI_CLIENT_S_RUNNING )
{
AvahiServiceBrowser* browser = createServiceBrowser ( fcr_service_type, mp_client, this);
if ( !browser )
{
m_browsers.erase ( it );
return false;
}
else
{
it->second = browser;
return true;
}
}
else
{
CLog::Log ( LOGINFO, "CZeroconfBrowserAvahi::doAddServiceType client not available. service browsing queued" );
return true;
}
}
bool CZeroconfBrowserAvahi::doRemoveServiceType ( const std::string& fcr_service_type )
{
ScopedEventLoopBlock lock ( mp_poll );
tBrowserMap::iterator it = m_browsers.find ( fcr_service_type );
if ( it == m_browsers.end() )
return false;
else
{
if ( it->second )
{
avahi_service_browser_free ( it->second );
m_all_for_now_browsers.erase ( it->second );
}
m_browsers.erase ( it );
//remove this serviceType from the list of discovered services
for ( tDiscoveredServices::iterator it = m_discovered_services.begin(); it != m_discovered_services.end(); ++it )
if ( it->first.GetType() == fcr_service_type )
m_discovered_services.erase ( it++ );
}
return true;
}
std::vector<CZeroconfBrowser::ZeroconfService> CZeroconfBrowserAvahi::doGetFoundServices()
{
std::vector<CZeroconfBrowser::ZeroconfService> ret;
ScopedEventLoopBlock lock ( mp_poll );
ret.reserve ( m_discovered_services.size() );
for ( tDiscoveredServices::iterator it = m_discovered_services.begin(); it != m_discovered_services.end(); ++it )
ret.push_back ( it->first );
return ret;
}
bool CZeroconfBrowserAvahi::doResolveService ( CZeroconfBrowser::ZeroconfService& fr_service, double f_timeout )
{
{
//wait for lock on event-loop to schedule resolving
ScopedEventLoopBlock lock ( mp_poll );
//avahi can only resolve already discovered services, as it needs info from there
tDiscoveredServices::const_iterator it = m_discovered_services.find( fr_service );
if ( it == m_discovered_services.end() )
{
CLog::Log ( LOGERROR, "CZeroconfBrowserAvahi::doResolveService called with undiscovered service, resolving is NOT possible" );
return false;
}
//start resolving
m_resolving_service = fr_service;
m_resolved_event.Reset();
if ( !avahi_service_resolver_new ( mp_client, it->second.interface, it->second.protocol,
it->first.GetName().c_str(), it->first.GetType().c_str(), it->first.GetDomain().c_str(),
AVAHI_PROTO_UNSPEC, AvahiLookupFlags ( 0 ), resolveCallback, this ) )
{
CLog::Log ( LOGERROR, "CZeroconfBrowserAvahi::doResolveService Failed to resolve service '%s': %s\n", it->first.GetName().c_str(),
avahi_strerror ( avahi_client_errno ( mp_client ) ) );
return false;
}
} // end of this block releases lock of eventloop
//wait for resolve to return or timeout
m_resolved_event.WaitMSec(f_timeout*1000);
{
ScopedEventLoopBlock lock ( mp_poll );
fr_service = m_resolving_service;
return (!fr_service.GetIP().empty());
}
}
void CZeroconfBrowserAvahi::clientCallback ( AvahiClient* fp_client, AvahiClientState f_state, void* fp_data )
{
CZeroconfBrowserAvahi* p_instance = static_cast<CZeroconfBrowserAvahi*> ( fp_data );
//store our thread ID and check for shutdown -> check details in destructor
p_instance->m_thread_id = pthread_self();
if (p_instance->m_shutdown)
{
avahi_threaded_poll_quit(p_instance->mp_poll);
return;
}
switch ( f_state )
{
case AVAHI_CLIENT_S_RUNNING:
{
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::clientCallback: client is up and running" );
for ( tBrowserMap::iterator it = p_instance->m_browsers.begin(); it != p_instance->m_browsers.end(); ++it )
{
assert ( !it->second );
it->second = createServiceBrowser ( it->first, fp_client, fp_data );
}
break;
}
case AVAHI_CLIENT_FAILURE:
{
CLog::Log ( LOGINFO, "CZeroconfBrowserAvahi::clientCallback: client failure. avahi-daemon stopped? Recreating client..." );
//We were forced to disconnect from server. now free and recreate the client object
avahi_client_free ( fp_client );
p_instance->mp_client = 0;
//freeing the client also frees all groups and browsers, pointers are undefined afterwards, so fix that now
for ( tBrowserMap::iterator it = p_instance->m_browsers.begin(); it != p_instance->m_browsers.end(); ++it )
it->second = ( AvahiServiceBrowser* ) 0;
//clean the list of discovered services and update gui (if someone is interested)
p_instance->m_discovered_services.clear();
CGUIMessage message ( GUI_MSG_NOTIFY_ALL, 0, 0, GUI_MSG_UPDATE_PATH );
message.SetStringParam ( "zeroconf://" );
g_windowManager.SendThreadMessage ( message );
p_instance->createClient();
break;
}
case AVAHI_CLIENT_S_COLLISION:
case AVAHI_CLIENT_S_REGISTERING:
//HERE WE SHOULD REMOVE ALL OF OUR SERVICES AND "RESCHEDULE" them for later addition
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::clientCallback: This should not happen" );
break;
case AVAHI_CLIENT_CONNECTING:
CLog::Log ( LOGINFO, "CZeroconfBrowserAvahi::clientCallback: avahi server not available. But may become later..." );
break;
}
}
void CZeroconfBrowserAvahi::browseCallback (
AvahiServiceBrowser *browser, AvahiIfIndex interface, AvahiProtocol protocol, AvahiBrowserEvent event,
const char *name, const char *type, const char *domain,
AvahiLookupResultFlags flags, void* fp_data )
{
CZeroconfBrowserAvahi* p_instance = static_cast<CZeroconfBrowserAvahi*> ( fp_data );
assert ( browser );
bool update_gui = false;
/* Called whenever a new services becomes available on the LAN or is removed from the LAN */
switch ( event )
{
case AVAHI_BROWSER_FAILURE:
CLog::Log ( LOGERROR, "CZeroconfBrowserAvahi::browseCallback error: %s\n", avahi_strerror ( avahi_client_errno ( avahi_service_browser_get_client ( browser ) ) ) );
//TODO
return;
case AVAHI_BROWSER_NEW:
{
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::browseCallback NEW: service '%s' of type '%s' in domain '%s'\n", name, type, domain );
//store the service
ZeroconfService service(name, type, domain);
AvahiSpecificInfos info;
info.interface = interface;
info.protocol = protocol;
p_instance->m_discovered_services.insert ( std::make_pair ( service, info ) );
//if this browser already sent the all for now message, we need to update the gui now
if( p_instance->m_all_for_now_browsers.find(browser) != p_instance->m_all_for_now_browsers.end() )
update_gui = true;
break;
}
case AVAHI_BROWSER_REMOVE:
{
//remove the service
ZeroconfService service(name, type, domain);
p_instance->m_discovered_services.erase ( service );
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::browseCallback REMOVE: service '%s' of type '%s' in domain '%s'\n", name, type, domain );
//if this browser already sent the all for now message, we need to update the gui now
if( p_instance->m_all_for_now_browsers.find(browser) != p_instance->m_all_for_now_browsers.end() )
update_gui = true;
break;
}
case AVAHI_BROWSER_CACHE_EXHAUSTED:
//do we need that?
break;
case AVAHI_BROWSER_ALL_FOR_NOW:
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::browseCallback all for now (service = %s)", type);
//if this browser already sent the all for now message, we need to update the gui now
bool success = p_instance->m_all_for_now_browsers.insert(browser).second;
if(!success)
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::browseCallback AVAHI_BROWSER_ALL_FOR_NOW sent twice?!");
update_gui = true;
break;
}
if ( update_gui )
{
CGUIMessage message ( GUI_MSG_NOTIFY_ALL, 0, 0, GUI_MSG_UPDATE_PATH );
message.SetStringParam ( "zeroconf://" );
g_windowManager.SendThreadMessage ( message );
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::browseCallback sent gui update for path zeroconf://" );
}
}
CZeroconfBrowser::ZeroconfService::tTxtRecordMap GetTxtRecords(AvahiStringList *txt)
{
AvahiStringList *i = NULL;
CZeroconfBrowser::ZeroconfService::tTxtRecordMap recordMap;
for( i = txt; i; i = i->next )
{
char *key, *value;
if( avahi_string_list_get_pair( i, &key, &value, NULL ) < 0 )
continue;
recordMap.insert(
std::make_pair(
std::string(key),
std::string(value)
)
);
if( key )
avahi_free(key);
if( value )
avahi_free(value);
}
return recordMap;
}
void CZeroconfBrowserAvahi::resolveCallback(
AvahiServiceResolver *r, AvahiIfIndex interface, AvahiProtocol protocol, AvahiResolverEvent event,
const char *name, const char *type, const char *domain, const char *host_name,
const AvahiAddress *address, uint16_t port, AvahiStringList *txt, AvahiLookupResultFlags flags, void* userdata )
{
assert ( r );
assert ( userdata );
CZeroconfBrowserAvahi* p_instance = static_cast<CZeroconfBrowserAvahi*> ( userdata );
switch ( event )
{
case AVAHI_RESOLVER_FAILURE:
CLog::Log ( LOGERROR, "CZeroconfBrowserAvahi::resolveCallback Failed to resolve service '%s' of type '%s' in domain '%s': %s\n", name, type, domain, avahi_strerror ( avahi_client_errno ( avahi_service_resolver_get_client ( r ) ) ) );
break;
case AVAHI_RESOLVER_FOUND:
{
char a[AVAHI_ADDRESS_STR_MAX];
CLog::Log ( LOGDEBUG, "CZeroconfBrowserAvahi::resolveCallback resolved service '%s' of type '%s' in domain '%s':\n", name, type, domain );
avahi_address_snprint ( a, sizeof ( a ), address );
p_instance->m_resolving_service.SetIP(a);
p_instance->m_resolving_service.SetPort(port);
//get txt-record list
p_instance->m_resolving_service.SetTxtRecords(GetTxtRecords(txt));
break;
}
}
avahi_service_resolver_free ( r );
p_instance->m_resolved_event.Set();
}
bool CZeroconfBrowserAvahi::createClient()
{
assert ( mp_poll );
if ( mp_client )
{
avahi_client_free ( mp_client );
}
mp_client = avahi_client_new ( avahi_threaded_poll_get ( mp_poll ),
AVAHI_CLIENT_NO_FAIL, &clientCallback, this, 0 );
if ( !mp_client )
{
mp_client = 0;
return false;
}
return true;
}
AvahiServiceBrowser* CZeroconfBrowserAvahi::createServiceBrowser ( const std::string& fcr_service_type, AvahiClient* fp_client, void* fp_userdata)
{
assert(fp_client);
AvahiServiceBrowser* ret = avahi_service_browser_new ( fp_client, AVAHI_IF_UNSPEC, AVAHI_PROTO_UNSPEC, fcr_service_type.c_str(),
NULL, ( AvahiLookupFlags ) 0, browseCallback, fp_userdata );
if ( !ret )
{
CLog::Log ( LOGERROR, "CZeroconfBrowserAvahi::createServiceBrowser Failed to create service (%s) browser: %s",
avahi_strerror ( avahi_client_errno ( fp_client ) ), fcr_service_type.c_str() );
}
return ret;
}
void CZeroconfBrowserAvahi::shutdownCallback(AvahiTimeout *fp_e, void *fp_data)
{
CZeroconfBrowserAvahi* p_instance = static_cast<CZeroconfBrowserAvahi*>(fp_data);
//should only be called on shutdown
if (p_instance->m_shutdown)
{
avahi_threaded_poll_quit(p_instance->mp_poll);
}
}
#endif //HAS_AVAHI
| gpl-2.0 |
erikcas/android_kernel_sony_msm | kernel/events/core.c | 89 | 188972 | /*
* Performance events core code:
*
* Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
* Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
* Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
* Copyright © 2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
*
* For licensing details see kernel-base/COPYING
*/
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/smp.h>
#include <linux/idr.h>
#include <linux/file.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/hash.h>
#include <linux/tick.h>
#include <linux/sysfs.h>
#include <linux/dcache.h>
#include <linux/percpu.h>
#include <linux/ptrace.h>
#include <linux/reboot.h>
#include <linux/vmstat.h>
#include <linux/device.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/hardirq.h>
#include <linux/rculist.h>
#include <linux/uaccess.h>
#include <linux/syscalls.h>
#include <linux/anon_inodes.h>
#include <linux/kernel_stat.h>
#include <linux/perf_event.h>
#include <linux/ftrace_event.h>
#include <linux/hw_breakpoint.h>
#include <linux/mm_types.h>
#include <linux/cgroup.h>
#include "internal.h"
#include <asm/irq_regs.h>
static struct workqueue_struct *perf_wq;
struct remote_function_call {
struct task_struct *p;
int (*func)(void *info);
void *info;
int ret;
};
static void remote_function(void *data)
{
struct remote_function_call *tfc = data;
struct task_struct *p = tfc->p;
if (p) {
tfc->ret = -EAGAIN;
if (task_cpu(p) != smp_processor_id() || !task_curr(p))
return;
}
tfc->ret = tfc->func(tfc->info);
}
/**
* task_function_call - call a function on the cpu on which a task runs
* @p: the task to evaluate
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func when the task is currently running. This might
* be on the current CPU, which just calls the function directly
*
* returns: @func return value, or
* -ESRCH - when the process isn't running
* -EAGAIN - when the process moved away
*/
static int
task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = p,
.func = func,
.info = info,
.ret = -ESRCH, /* No such (running) process */
};
if (task_curr(p))
smp_call_function_single(task_cpu(p), remote_function, &data, 1);
return data.ret;
}
/**
* cpu_function_call - call a function on the cpu
* @func: the function to be called
* @info: the function call argument
*
* Calls the function @func on the remote cpu.
*
* returns: @func return value or -ENXIO when the cpu is offline
*/
static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
{
struct remote_function_call data = {
.p = NULL,
.func = func,
.info = info,
.ret = -ENXIO, /* No such CPU */
};
smp_call_function_single(cpu, remote_function, &data, 1);
return data.ret;
}
#define EVENT_OWNER_KERNEL ((void *) -1)
static bool is_kernel_event(struct perf_event *event)
{
return event->owner == EVENT_OWNER_KERNEL;
}
#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
PERF_FLAG_FD_OUTPUT |\
PERF_FLAG_PID_CGROUP)
/*
* branch priv levels that need permission checks
*/
#define PERF_SAMPLE_BRANCH_PERM_PLM \
(PERF_SAMPLE_BRANCH_KERNEL |\
PERF_SAMPLE_BRANCH_HV)
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
/*
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
struct static_key_deferred perf_sched_events __read_mostly;
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
static atomic_t nr_mmap_events __read_mostly;
static atomic_t nr_comm_events __read_mostly;
static atomic_t nr_task_events __read_mostly;
static LIST_HEAD(pmus);
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
/*
* perf event paranoia level:
* -1 - not paranoid at all
* 0 - disallow raw tracepoint access for unpriv
* 1 - disallow cpu events for unpriv
* 2 - disallow kernel profiling for unpriv
*/
#ifdef CONFIG_PERF_EVENTS_USERMODE
int sysctl_perf_event_paranoid __read_mostly = -1;
#else
int sysctl_perf_event_paranoid __read_mostly = 1;
#endif
/* Minimum for 512 kiB + 1 user control page */
int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
/*
* max perf event sample rate
*/
#define DEFAULT_MAX_SAMPLE_RATE 100000
#define DEFAULT_SAMPLE_PERIOD_NS (NSEC_PER_SEC / DEFAULT_MAX_SAMPLE_RATE)
#define DEFAULT_CPU_TIME_MAX_PERCENT 25
int sysctl_perf_event_sample_rate __read_mostly = DEFAULT_MAX_SAMPLE_RATE;
static int max_samples_per_tick __read_mostly = DIV_ROUND_UP(DEFAULT_MAX_SAMPLE_RATE, HZ);
static int perf_sample_period_ns __read_mostly = DEFAULT_SAMPLE_PERIOD_NS;
static atomic_t perf_sample_allowed_ns __read_mostly =
ATOMIC_INIT( DEFAULT_SAMPLE_PERIOD_NS * DEFAULT_CPU_TIME_MAX_PERCENT / 100);
void update_perf_cpu_limits(void)
{
u64 tmp = perf_sample_period_ns;
tmp *= sysctl_perf_cpu_time_max_percent;
do_div(tmp, 100);
atomic_set(&perf_sample_allowed_ns, tmp);
}
int perf_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
return 0;
}
int sysctl_perf_cpu_time_max_percent __read_mostly = DEFAULT_CPU_TIME_MAX_PERCENT;
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
int ret = proc_dointvec(table, write, buffer, lenp, ppos);
if (ret || !write)
return ret;
update_perf_cpu_limits();
return 0;
}
/*
* perf samples are done in some very critical code paths (NMIs).
* If they take too much CPU time, the system can lock up and not
* get any real work done. This will drop the sample rate when
* we detect that events are taking too long.
*/
#define NR_ACCUMULATED_SAMPLES 128
DEFINE_PER_CPU(u64, running_sample_length);
void perf_sample_event_took(u64 sample_len_ns)
{
u64 avg_local_sample_len;
u64 local_samples_len;
if (atomic_read(&perf_sample_allowed_ns) == 0)
return;
/* decay the counter by 1 average sample */
local_samples_len = __get_cpu_var(running_sample_length);
local_samples_len -= local_samples_len/NR_ACCUMULATED_SAMPLES;
local_samples_len += sample_len_ns;
__get_cpu_var(running_sample_length) = local_samples_len;
/*
* note: this will be biased artifically low until we have
* seen NR_ACCUMULATED_SAMPLES. Doing it this way keeps us
* from having to maintain a count.
*/
avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
if (avg_local_sample_len <= atomic_read(&perf_sample_allowed_ns))
return;
if (max_samples_per_tick <= 1)
return;
max_samples_per_tick = DIV_ROUND_UP(max_samples_per_tick, 2);
sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
printk_ratelimited(KERN_WARNING
"perf samples too long (%lld > %d), lowering "
"kernel.perf_event_max_sample_rate to %d\n",
avg_local_sample_len,
atomic_read(&perf_sample_allowed_ns),
sysctl_perf_event_sample_rate);
update_perf_cpu_limits();
}
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
void __weak perf_event_print_debug(void) { }
extern __weak const char *perf_pmu_name(void)
{
return "pmu";
}
static inline u64 perf_clock(void)
{
return local_clock();
}
static inline struct perf_cpu_context *
__get_cpu_context(struct perf_event_context *ctx)
{
return this_cpu_ptr(ctx->pmu->pmu_cpu_context);
}
static void perf_ctx_lock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx)
raw_spin_lock(&ctx->lock);
}
static void perf_ctx_unlock(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
if (ctx)
raw_spin_unlock(&ctx->lock);
raw_spin_unlock(&cpuctx->ctx.lock);
}
#ifdef CONFIG_CGROUP_PERF
/*
* perf_cgroup_info keeps track of time_enabled for a cgroup.
* This is a per-cpu dynamically allocated data structure.
*/
struct perf_cgroup_info {
u64 time;
u64 timestamp;
};
struct perf_cgroup {
struct cgroup_subsys_state css;
struct perf_cgroup_info __percpu *info;
};
/*
* Must ensure cgroup is pinned (css_get) before calling
* this function. In other words, we cannot call this function
* if there is no cgroup event for the current CPU context.
*/
static inline struct perf_cgroup *
perf_cgroup_from_task(struct task_struct *task)
{
return container_of(task_subsys_state(task, perf_subsys_id),
struct perf_cgroup, css);
}
static inline bool
perf_cgroup_match(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/* @event doesn't care about cgroup */
if (!event->cgrp)
return true;
/* wants specific cgroup scope but @cpuctx isn't associated with any */
if (!cpuctx->cgrp)
return false;
/*
* Cgroup scoping is recursive. An event enabled for a cgroup is
* also enabled for all its descendant cgroups. If @cpuctx's
* cgroup is a descendant of @event's (the test covers identity
* case), it's a match.
*/
return cgroup_is_descendant(cpuctx->cgrp->css.cgroup,
event->cgrp->css.cgroup);
}
static inline bool perf_tryget_cgroup(struct perf_event *event)
{
return css_tryget(&event->cgrp->css);
}
static inline void perf_put_cgroup(struct perf_event *event)
{
css_put(&event->cgrp->css);
}
static inline void perf_detach_cgroup(struct perf_event *event)
{
perf_put_cgroup(event);
event->cgrp = NULL;
}
static inline int is_cgroup_event(struct perf_event *event)
{
return event->cgrp != NULL;
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
return t->time;
}
static inline void __update_cgrp_time(struct perf_cgroup *cgrp)
{
struct perf_cgroup_info *info;
u64 now;
now = perf_clock();
info = this_cpu_ptr(cgrp->info);
info->time += now - info->timestamp;
info->timestamp = now;
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
struct perf_cgroup *cgrp_out = cpuctx->cgrp;
if (cgrp_out)
__update_cgrp_time(cgrp_out);
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
struct perf_cgroup *cgrp;
/*
* ensure we access cgroup data only when needed and
* when we know the cgroup is pinned (css_get)
*/
if (!is_cgroup_event(event))
return;
cgrp = perf_cgroup_from_task(current);
/*
* Do not update time when cgroup is not active
*/
if (cgrp == event->cgrp)
__update_cgrp_time(event->cgrp);
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
struct perf_cgroup *cgrp;
struct perf_cgroup_info *info;
/*
* ctx->lock held by caller
* ensure we do not access cgroup data
* unless we have the cgroup pinned (css_get)
*/
if (!task || !ctx->nr_cgroups)
return;
cgrp = perf_cgroup_from_task(task);
info = this_cpu_ptr(cgrp->info);
info->timestamp = ctx->timestamp;
}
#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
/*
* reschedule events based on the cgroup constraint of task.
*
* mode SWOUT : schedule out everything
* mode SWIN : schedule in based on cgroup for next
*/
void perf_cgroup_switch(struct task_struct *task, int mode)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/*
* disable interrupts to avoid geting nr_cgroup
* changes via __perf_event_disable(). Also
* avoids preemption.
*/
local_irq_save(flags);
/*
* we reschedule only in the presence of cgroup
* constrained events.
*/
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
continue; /* ensure we process each cpuctx once */
/*
* perf_cgroup_events says at least one
* context on this CPU has cgroup events.
*
* ctx->nr_cgroups reports the number of cgroup
* events for a context.
*/
if (cpuctx->ctx.nr_cgroups > 0) {
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
*/
cpuctx->cgrp = NULL;
}
if (mode & PERF_CGROUP_SWIN) {
WARN_ON_ONCE(cpuctx->cgrp);
/*
* set cgrp before ctxsw in to allow
* event_filter_match() to not have to pass
* task around
*/
cpuctx->cgrp = perf_cgroup_from_task(task);
cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/*
* next is NULL when called from perf_event_enable_on_exec()
* that will systematically cause a cgroup_switch()
*/
if (next)
cgrp2 = perf_cgroup_from_task(next);
/*
* only schedule out current cgroup events if we know
* that we are switching to a different cgroup. Otherwise,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
/*
* we come here when we know perf_cgroup_events > 0
*/
cgrp1 = perf_cgroup_from_task(task);
/* prev can never be NULL */
cgrp2 = perf_cgroup_from_task(prev);
/*
* only need to schedule in cgroup events if we are changing
* cgroup during ctxsw. Cgroup events were not scheduled
* out of ctxsw out if that was not the case.
*/
if (cgrp1 != cgrp2)
perf_cgroup_switch(task, PERF_CGROUP_SWIN);
}
static inline int perf_cgroup_connect(int fd, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
struct perf_cgroup *cgrp;
struct cgroup_subsys_state *css;
struct fd f = fdget(fd);
int ret = 0;
if (!f.file)
return -EBADF;
css = cgroup_css_from_dir(f.file, perf_subsys_id);
if (IS_ERR(css)) {
ret = PTR_ERR(css);
goto out;
}
cgrp = container_of(css, struct perf_cgroup, css);
event->cgrp = cgrp;
/* must be done before we fput() the file */
if (!perf_tryget_cgroup(event)) {
event->cgrp = NULL;
ret = -ENOENT;
goto out;
}
/*
* all events in a group must monitor
* the same cgroup because a task belongs
* to only one perf cgroup at a time
*/
if (group_leader && group_leader->cgrp != cgrp) {
perf_detach_cgroup(event);
ret = -EINVAL;
}
out:
fdput(f);
return ret;
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
struct perf_cgroup_info *t;
t = per_cpu_ptr(event->cgrp->info, event->cpu);
event->shadow_ctx_time = now - t->timestamp;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
/*
* when the current task's perf cgroup does not match
* the event's, we need to remember to call the
* perf_mark_enable() function the first time a task with
* a matching perf cgroup is scheduled in.
*/
if (is_cgroup_event(event) && !perf_cgroup_match(event))
event->cgrp_defer_enabled = 1;
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
if (!event->cgrp_defer_enabled)
return;
event->cgrp_defer_enabled = 0;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE) {
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
sub->cgrp_defer_enabled = 0;
}
}
}
#else /* !CONFIG_CGROUP_PERF */
static inline bool
perf_cgroup_match(struct perf_event *event)
{
return true;
}
static inline void perf_detach_cgroup(struct perf_event *event)
{}
static inline int is_cgroup_event(struct perf_event *event)
{
return 0;
}
static inline u64 perf_cgroup_event_cgrp_time(struct perf_event *event)
{
return 0;
}
static inline void update_cgrp_time_from_event(struct perf_event *event)
{
}
static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx)
{
}
static inline void perf_cgroup_sched_out(struct task_struct *task,
struct task_struct *next)
{
}
static inline void perf_cgroup_sched_in(struct task_struct *prev,
struct task_struct *task)
{
}
static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event,
struct perf_event_attr *attr,
struct perf_event *group_leader)
{
return -EINVAL;
}
static inline void
perf_cgroup_set_timestamp(struct task_struct *task,
struct perf_event_context *ctx)
{
}
void
perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
{
}
static inline void
perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
{
}
static inline u64 perf_cgroup_event_time(struct perf_event *event)
{
return 0;
}
static inline void
perf_cgroup_defer_enabled(struct perf_event *event)
{
}
static inline void
perf_cgroup_mark_enabled(struct perf_event *event,
struct perf_event_context *ctx)
{
}
#endif
void perf_pmu_disable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!(*count)++)
pmu->pmu_disable(pmu);
}
void perf_pmu_enable(struct pmu *pmu)
{
int *count = this_cpu_ptr(pmu->pmu_disable_count);
if (!--(*count))
pmu->pmu_enable(pmu);
}
static DEFINE_PER_CPU(struct list_head, rotation_list);
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_pmu_rotate_start(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct list_head *head = &__get_cpu_var(rotation_list);
WARN_ON(!irqs_disabled());
if (list_empty(&cpuctx->rotation_list)) {
int was_empty = list_empty(head);
list_add(&cpuctx->rotation_list, head);
if (was_empty)
tick_nohz_full_kick();
}
}
static void get_ctx(struct perf_event_context *ctx)
{
WARN_ON(!atomic_inc_not_zero(&ctx->refcount));
}
static void put_ctx(struct perf_event_context *ctx)
{
if (atomic_dec_and_test(&ctx->refcount)) {
if (ctx->parent_ctx)
put_ctx(ctx->parent_ctx);
if (ctx->task)
put_task_struct(ctx->task);
kfree_rcu(ctx, rcu_head);
}
}
static void unclone_ctx(struct perf_event_context *ctx)
{
if (ctx->parent_ctx) {
put_ctx(ctx->parent_ctx);
ctx->parent_ctx = NULL;
}
}
static u32 perf_event_pid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_tgid_nr_ns(p, event->ns);
}
static u32 perf_event_tid(struct perf_event *event, struct task_struct *p)
{
/*
* only top level events have the pid namespace they were created in
*/
if (event->parent)
event = event->parent;
return task_pid_nr_ns(p, event->ns);
}
/*
* If we inherit events we want to return the parent event id
* to userspace.
*/
static u64 primary_event_id(struct perf_event *event)
{
u64 id = event->id;
if (event->parent)
id = event->parent->id;
return id;
}
/*
* Get the perf_event_context for a task and lock it.
* This has to cope with with the fact that until it is locked,
* the context could get moved to another task.
*/
static struct perf_event_context *
perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags)
{
struct perf_event_context *ctx;
retry:
/*
* One of the few rules of preemptible RCU is that one cannot do
* rcu_read_unlock() while holding a scheduler (or nested) lock when
* part of the read side critical section was preemptible -- see
* rcu_read_unlock_special().
*
* Since ctx->lock nests under rq->lock we must ensure the entire read
* side critical section is non-preemptible.
*/
preempt_disable();
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
if (ctx) {
/*
* If this context is a clone of another, it might
* get swapped for another underneath us by
* perf_event_task_sched_out, though the
* rcu_read_lock() protects us from any context
* getting freed. Lock the context and check if it
* got swapped before we could get the lock, and retry
* if so. If we locked the right context, then it
* can't get swapped on us any more.
*/
raw_spin_lock_irqsave(&ctx->lock, *flags);
if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
rcu_read_unlock();
preempt_enable();
goto retry;
}
if (!atomic_inc_not_zero(&ctx->refcount)) {
raw_spin_unlock_irqrestore(&ctx->lock, *flags);
ctx = NULL;
}
}
rcu_read_unlock();
preempt_enable();
return ctx;
}
/*
* Get the context for a task and increment its pin_count so it
* can't get swapped to another task. This also increments its
* reference count so that the context can't get freed.
*/
static struct perf_event_context *
perf_pin_task_context(struct task_struct *task, int ctxn)
{
struct perf_event_context *ctx;
unsigned long flags;
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return ctx;
}
static void perf_unpin_context(struct perf_event_context *ctx)
{
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
--ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
/*
* Update the record of the current time in a context.
*/
static void update_context_time(struct perf_event_context *ctx)
{
u64 now = perf_clock();
ctx->time += now - ctx->timestamp;
ctx->timestamp = now;
}
static u64 perf_event_time(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
if (is_cgroup_event(event))
return perf_cgroup_event_time(event);
return ctx ? ctx->time : 0;
}
/*
* Update the total_time_enabled and total_time_running fields for a event.
* The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
* tasks were in the monitored cgroup. This is
* independent of the activity of the context as
* there may be a mix of cgroup and non-cgroup events.
*
* That is why we treat cgroup events differently
* here.
*/
if (is_cgroup_event(event))
run_end = perf_cgroup_event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
run_end = event->tstamp_stopped;
event->total_time_enabled = run_end - event->tstamp_enabled;
if (event->state == PERF_EVENT_STATE_INACTIVE)
run_end = event->tstamp_stopped;
else
run_end = perf_event_time(event);
event->total_time_running = run_end - event->tstamp_running;
}
/*
* Update total_time_enabled and total_time_running for all events in a group.
*/
static void update_group_times(struct perf_event *leader)
{
struct perf_event *event;
update_event_times(leader);
list_for_each_entry(event, &leader->sibling_list, group_entry)
update_event_times(event);
}
static struct list_head *
ctx_group_list(struct perf_event *event, struct perf_event_context *ctx)
{
if (event->attr.pinned)
return &ctx->pinned_groups;
else
return &ctx->flexible_groups;
}
/*
* Add a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_add_event(struct perf_event *event, struct perf_event_context *ctx)
{
WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT);
event->attach_state |= PERF_ATTACH_CONTEXT;
/*
* If we're a stand alone event or group leader, we go to the context
* list, group events are kept attached to the group so that
* perf_group_detach can, at all times, locate all siblings.
*/
if (event->group_leader == event) {
struct list_head *list;
if (is_software_event(event))
event->group_flags |= PERF_GROUP_SOFTWARE;
list = ctx_group_list(event, ctx);
list_add_tail(&event->group_entry, list);
}
if (is_cgroup_event(event))
ctx->nr_cgroups++;
if (has_branch_stack(event))
ctx->nr_branch_stack++;
list_add_rcu(&event->event_entry, &ctx->event_list);
if (!ctx->nr_events)
perf_pmu_rotate_start(ctx->pmu);
ctx->nr_events++;
if (event->attr.inherit_stat)
ctx->nr_stat++;
}
/*
* Initialize event state based on the perf_event_attr::disabled.
*/
static inline void perf_event__state_init(struct perf_event *event)
{
event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF :
PERF_EVENT_STATE_INACTIVE;
}
/*
* Called at perf_event creation and when events are attached/detached from a
* group.
*/
static void perf_event__read_size(struct perf_event *event)
{
int entry = sizeof(u64); /* value */
int size = 0;
int nr = 1;
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
size += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_ID)
entry += sizeof(u64);
if (event->attr.read_format & PERF_FORMAT_GROUP) {
nr += event->group_leader->nr_siblings;
size += sizeof(u64);
}
size += entry * nr;
event->read_size = size;
}
static void perf_event__header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
perf_event__read_size(event);
if (sample_type & PERF_SAMPLE_IP)
size += sizeof(data->ip);
if (sample_type & PERF_SAMPLE_ADDR)
size += sizeof(data->addr);
if (sample_type & PERF_SAMPLE_PERIOD)
size += sizeof(data->period);
if (sample_type & PERF_SAMPLE_WEIGHT)
size += sizeof(data->weight);
if (sample_type & PERF_SAMPLE_READ)
size += event->read_size;
if (sample_type & PERF_SAMPLE_DATA_SRC)
size += sizeof(data->data_src.val);
event->header_size = size;
}
static void perf_event__id_header_size(struct perf_event *event)
{
struct perf_sample_data *data;
u64 sample_type = event->attr.sample_type;
u16 size = 0;
if (sample_type & PERF_SAMPLE_TID)
size += sizeof(data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
size += sizeof(data->time);
if (sample_type & PERF_SAMPLE_ID)
size += sizeof(data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
size += sizeof(data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
size += sizeof(data->cpu_entry);
event->id_header_size = size;
}
static void perf_group_attach(struct perf_event *event)
{
struct perf_event *group_leader = event->group_leader, *pos;
/*
* We can have double attach due to group movement in perf_event_open.
*/
if (event->attach_state & PERF_ATTACH_GROUP)
return;
event->attach_state |= PERF_ATTACH_GROUP;
if (group_leader == event)
return;
if (group_leader->group_flags & PERF_GROUP_SOFTWARE &&
!is_software_event(event))
group_leader->group_flags &= ~PERF_GROUP_SOFTWARE;
list_add_tail(&event->group_entry, &group_leader->sibling_list);
group_leader->nr_siblings++;
perf_event__header_size(group_leader);
list_for_each_entry(pos, &group_leader->sibling_list, group_entry)
perf_event__header_size(pos);
}
/*
* Remove a event from the lists for its context.
* Must be called with ctx->mutex and ctx->lock held.
*/
static void
list_del_event(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_CONTEXT))
return;
event->attach_state &= ~PERF_ATTACH_CONTEXT;
if (is_cgroup_event(event)) {
ctx->nr_cgroups--;
cpuctx = __get_cpu_context(ctx);
/*
* if there are no more cgroup events
* then cler cgrp to avoid stale pointer
* in update_cgrp_time_from_cpuctx()
*/
if (!ctx->nr_cgroups)
cpuctx->cgrp = NULL;
}
if (has_branch_stack(event))
ctx->nr_branch_stack--;
ctx->nr_events--;
if (event->attr.inherit_stat)
ctx->nr_stat--;
list_del_rcu(&event->event_entry);
if (event->group_leader == event)
list_del_init(&event->group_entry);
update_group_times(event);
/*
* If event was in error state, then keep it
* that way, otherwise bogus counts will be
* returned on read(). The only way to get out
* of error state is by explicit re-enabling
* of the event
*/
if (event->state > PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_OFF;
}
static void perf_group_detach(struct perf_event *event)
{
struct perf_event *sibling, *tmp;
struct list_head *list = NULL;
/*
* We can have double detach due to exit/hot-unplug + close.
*/
if (!(event->attach_state & PERF_ATTACH_GROUP))
return;
event->attach_state &= ~PERF_ATTACH_GROUP;
/*
* If this is a sibling, remove it from its group.
*/
if (event->group_leader != event) {
list_del_init(&event->group_entry);
event->group_leader->nr_siblings--;
goto out;
}
if (!list_empty(&event->group_entry))
list = &event->group_entry;
/*
* If this was a group event with sibling events then
* upgrade the siblings to singleton events by adding them
* to whatever list we are on.
*/
list_for_each_entry_safe(sibling, tmp, &event->sibling_list, group_entry) {
if (list)
list_move_tail(&sibling->group_entry, list);
sibling->group_leader = sibling;
/* Inherit group flags from the previous leader */
sibling->group_flags = event->group_flags;
}
out:
perf_event__header_size(event->group_leader);
list_for_each_entry(tmp, &event->group_leader->sibling_list, group_entry)
perf_event__header_size(tmp);
}
/*
* User event without the task.
*/
static bool is_orphaned_event(struct perf_event *event)
{
return event && !is_kernel_event(event) && !event->owner;
}
/*
* Event has a parent but parent's task finished and it's
* alive only because of children holding refference.
*/
static bool is_orphaned_child(struct perf_event *event)
{
return is_orphaned_event(event->parent);
}
static void orphans_remove_work(struct work_struct *work);
static void schedule_orphans_remove(struct perf_event_context *ctx)
{
if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
return;
if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
get_ctx(ctx);
ctx->orphans_remove_sched = true;
}
}
static int __init perf_workqueue_init(void)
{
perf_wq = create_singlethread_workqueue("perf");
WARN(!perf_wq, "failed to create perf workqueue\n");
return perf_wq ? 0 : -1;
}
core_initcall(perf_workqueue_init);
static inline int
event_filter_match(struct perf_event *event)
{
return (event->cpu == -1 || event->cpu == smp_processor_id())
&& perf_cgroup_match(event);
}
static void
event_sched_out(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
u64 delta;
/*
* An event which could not be activated because of
* filter mismatch still needs to have its timings
* maintained, otherwise bogus information is return
* via read() for time_enabled, time_running:
*/
if (event->state == PERF_EVENT_STATE_INACTIVE
&& !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta;
event->tstamp_stopped = tstamp;
}
if (event->state != PERF_EVENT_STATE_ACTIVE)
return;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
event->tstamp_stopped = tstamp;
event->pmu->del(event, 0);
event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
ctx->nr_active--;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq--;
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
if (is_orphaned_child(event))
schedule_orphans_remove(ctx);
}
static void
group_sched_out(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event;
int state = group_event->state;
event_sched_out(group_event, cpuctx, ctx);
/*
* Schedule out siblings (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry)
event_sched_out(event, cpuctx, ctx);
if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
cpuctx->exclusive = 0;
}
struct remove_event {
struct perf_event *event;
bool detach_group;
};
/*
* Cross CPU call to remove a performance event
*
* We disable the event on the hardware level first. After that we
* remove it from the context list.
*/
static int __perf_remove_from_context(void *info)
{
struct remove_event *re = info;
struct perf_event *event = re->event;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
raw_spin_lock(&ctx->lock);
event_sched_out(event, cpuctx, ctx);
if (re->detach_group)
perf_group_detach(event);
list_del_event(event, ctx);
if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
ctx->is_active = 0;
cpuctx->task_ctx = NULL;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
#ifdef CONFIG_SMP
static void perf_retry_remove(struct remove_event *rep)
{
int up_ret;
struct perf_event *event = rep->event;
/*
* CPU was offline. Bring it online so we can
* gracefully exit a perf context.
*/
up_ret = cpu_up(event->cpu);
if (!up_ret)
/* Try the remove call once again. */
cpu_function_call(event->cpu, __perf_remove_from_context, rep);
else
pr_err("Failed to bring up CPU: %d, ret: %d\n",
event->cpu, up_ret);
}
#else
static void perf_retry_remove(struct remove_event *rep)
{
}
#endif
/*
* Remove the event from a task's (or a CPU's) list of events.
*
* CPU events are removed with a smp call. For task events we only
* call when the task is on a CPU.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This is OK when called from perf_release since
* that only calls us on the top-level context, which can't be a clone.
* When called from perf_event_exit_task, it's OK because the
* context has been detached from its task.
*/
static void __ref perf_remove_from_context(struct perf_event *event, bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
int ret;
struct remove_event re = {
.event = event,
.detach_group = detach_group,
};
lockdep_assert_held(&ctx->mutex);
if (!task) {
/*
* Per cpu events are removed via an smp call
*/
ret = cpu_function_call(event->cpu, __perf_remove_from_context,
&re);
if (ret == -ENXIO)
perf_retry_remove(&re);
return;
}
retry:
if (!task_function_call(task, __perf_remove_from_context, &re))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
*/
if (detach_group)
perf_group_detach(event);
list_del_event(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Cross CPU call to disable a performance event
*/
int __perf_event_disable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a per-task event, need to check whether this
* event's task is the current task on this cpu.
*
* Can trigger due to concurrent perf_event_context_sched_out()
* flipping contexts around.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return -EINVAL;
raw_spin_lock(&ctx->lock);
/*
* If the event is on, turn it off.
* If it is in error state, leave it in error state.
*/
if (event->state >= PERF_EVENT_STATE_INACTIVE) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
update_group_times(event);
if (event == event->group_leader)
group_sched_out(event, cpuctx, ctx);
else
event_sched_out(event, cpuctx, ctx);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Disable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisifed when called through
* perf_event_for_each_child or perf_event_for_each because they
* hold the top-level event's child_mutex, so any descendant that
* goes to exit will block in sync_child_event.
* When called from perf_pending_event it's OK because event->ctx
* is the current context on this CPU and preemption is disabled,
* hence we can't get into perf_event_task_sched_out for this context.
*/
void perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Disable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_disable, event);
return;
}
retry:
if (!task_function_call(task, __perf_event_disable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the event is still active, we need to retry the cross-call.
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
raw_spin_unlock_irq(&ctx->lock);
/*
* Reload the task pointer, it might have been changed by
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
goto retry;
}
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_OFF;
}
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_disable);
static void perf_set_shadow_time(struct perf_event *event,
struct perf_event_context *ctx,
u64 tstamp)
{
/*
* use the correct time source for the time snapshot
*
* We could get by without this by leveraging the
* fact that to get to this function, the caller
* has most likely already called update_context_time()
* and update_cgrp_time_xx() and thus both timestamp
* are identical (or very close). Given that tstamp is,
* already adjusted for cgroup, we could say that:
* tstamp - ctx->timestamp
* is equivalent to
* tstamp - cgrp->timestamp.
*
* Then, in perf_output_read(), the calculation would
* work with no changes because:
* - event is guaranteed scheduled in
* - no scheduled out in between
* - thus the timestamp would be the same
*
* But this is a bit hairy.
*
* So instead, we have an explicit cgroup call to remain
* within the time time source all along. We believe it
* is cleaner and simpler to understand.
*/
if (is_cgroup_event(event))
perf_cgroup_set_shadow_time(event, tstamp);
else
event->shadow_ctx_time = tstamp - ctx->timestamp;
}
#define MAX_INTERRUPTS (~0ULL)
static void perf_log_throttle(struct perf_event *event, int enable);
static int
event_sched_in(struct perf_event *event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;
event->state = PERF_EVENT_STATE_ACTIVE;
event->oncpu = smp_processor_id();
/*
* Unthrottle events, since we scheduled we might have missed several
* ticks already, also for a heavily scheduling task there is little
* guarantee it'll get a tick in a timely manner.
*/
if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) {
perf_log_throttle(event, 1);
event->hw.interrupts = 0;
}
/*
* The new state must be visible before we turn it on in the hardware:
*/
smp_wmb();
if (event->pmu->add(event, PERF_EF_START)) {
event->state = PERF_EVENT_STATE_INACTIVE;
event->oncpu = -1;
return -EAGAIN;
}
event->tstamp_running += tstamp - event->tstamp_stopped;
perf_set_shadow_time(event, ctx, tstamp);
if (!is_software_event(event))
cpuctx->active_oncpu++;
ctx->nr_active++;
if (event->attr.freq && event->attr.sample_freq)
ctx->nr_freq++;
if (event->attr.exclusive)
cpuctx->exclusive = 1;
if (is_orphaned_child(event))
schedule_orphans_remove(ctx);
return 0;
}
static int
group_sched_in(struct perf_event *group_event,
struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx)
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
u64 now = ctx->time;
bool simulate = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) {
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Schedule in siblings as one group (if any):
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event_sched_in(event, cpuctx, ctx)) {
partial_group = event;
goto group_error;
}
}
if (!pmu->commit_txn(pmu))
return 0;
group_error:
/*
* Groups can be scheduled in as one unit only, so undo any
* partial group before returning:
* The events up to the failed event are scheduled out normally,
* tstamp_stopped will be updated.
*
* The failed events and the remaining siblings need to have
* their timings updated as if they had gone thru event_sched_in()
* and event_sched_out(). This is required to get consistent timings
* across the group. This also takes care of the case where the group
* could never be scheduled by ensuring tstamp_stopped is set to mark
* the time the event was actually stopped, such that time delta
* calculation in update_event_times() is correct.
*/
list_for_each_entry(event, &group_event->sibling_list, group_entry) {
if (event == partial_group)
simulate = true;
if (simulate) {
event->tstamp_running += now - event->tstamp_stopped;
event->tstamp_stopped = now;
} else {
event_sched_out(event, cpuctx, ctx);
}
}
event_sched_out(group_event, cpuctx, ctx);
pmu->cancel_txn(pmu);
return -EAGAIN;
}
/*
* Work out whether we can put this event group on the CPU now.
*/
static int group_can_go_on(struct perf_event *event,
struct perf_cpu_context *cpuctx,
int can_add_hw)
{
/*
* Groups consisting entirely of software events can always go on.
*/
if (event->group_flags & PERF_GROUP_SOFTWARE)
return 1;
/*
* If an exclusive group is already on, no other hardware
* events can go on.
*/
if (cpuctx->exclusive)
return 0;
/*
* If this group is exclusive and there are already
* events on the CPU, it can't go on.
*/
if (event->attr.exclusive && cpuctx->active_oncpu)
return 0;
/*
* Otherwise, try to add it if all previous groups were able
* to go on.
*/
return can_add_hw;
}
static void add_event_to_ctx(struct perf_event *event,
struct perf_event_context *ctx)
{
u64 tstamp = perf_event_time(event);
list_add_event(event, ctx);
perf_group_attach(event);
event->tstamp_enabled = tstamp;
event->tstamp_running = tstamp;
event->tstamp_stopped = tstamp;
}
static void task_ctx_sched_out(struct perf_event_context *ctx);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
{
cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (ctx)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
}
/*
* Cross CPU call to install and enable a performance event
*
* Must be called with ctx->mutex held
*/
static int __perf_install_in_context(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
struct task_struct *task = current;
perf_ctx_lock(cpuctx, task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
/*
* If there was an active task_ctx schedule it out.
*/
if (task_ctx)
task_ctx_sched_out(task_ctx);
/*
* If the context we're installing events in is not the
* active task_ctx, flip them.
*/
if (ctx->task && task_ctx != ctx) {
if (task_ctx)
raw_spin_unlock(&task_ctx->lock);
raw_spin_lock(&ctx->lock);
task_ctx = ctx;
}
if (task_ctx) {
cpuctx->task_ctx = task_ctx;
task = task_ctx->task;
}
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
update_context_time(ctx);
/*
* update cgrp time only if current cgrp
* matches event->cgrp. Must be done before
* calling add_event_to_ctx()
*/
update_cgrp_time_from_event(event);
add_event_to_ctx(event, ctx);
/*
* Schedule everything back in
*/
perf_event_sched_in(cpuctx, task_ctx, task);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, task_ctx);
return 0;
}
/*
* Attach a performance event to a context
*
* First we add the event to the list with the hardware enable bit
* in event->hw_config cleared.
*
* If the event is attached to a task which is on a CPU we use a smp
* call to enable it in the task context. The task might have been
* scheduled away, but we check this in the smp call again.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
struct task_struct *task = ctx->task;
lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;
if (!task) {
/*
* Per cpu events are installed via an smp call and
* the install is always successful.
*/
cpu_function_call(cpu, __perf_install_in_context, event);
return;
}
retry:
if (!task_function_call(task, __perf_install_in_context, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If we failed to find a running task, but find the context active now
* that we've acquired the ctx->lock, retry.
*/
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
goto retry;
}
/*
* Since the task isn't running, its safe to add the event, us holding
* the ctx->lock ensures the task won't get scheduled in.
*/
add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
}
/*
* Put a event into inactive state and update time fields.
* Enabling the leader of a group effectively enables all
* the group members that aren't explicitly disabled, so we
* have to update their ->tstamp_enabled also.
* Note: this works for group members as well as group leaders
* since the non-leader members' sibling_lists will be empty.
*/
static void __perf_event_mark_enabled(struct perf_event *event)
{
struct perf_event *sub;
u64 tstamp = perf_event_time(event);
event->state = PERF_EVENT_STATE_INACTIVE;
event->tstamp_enabled = tstamp - event->total_time_enabled;
list_for_each_entry(sub, &event->sibling_list, group_entry) {
if (sub->state >= PERF_EVENT_STATE_INACTIVE)
sub->tstamp_enabled = tstamp - sub->total_time_enabled;
}
}
/*
* Cross CPU call to enable a performance event
*/
static int __perf_event_enable(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err;
/*
* There's a time window between 'ctx->is_active' check
* in perf_event_enable function and this place having:
* - IRQs on
* - ctx->lock unlocked
*
* where the task could be killed and 'ctx' deactivated
* by perf_event_exit_task.
*/
if (!ctx->is_active)
return -EINVAL;
raw_spin_lock(&ctx->lock);
update_context_time(ctx);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto unlock;
/*
* set current task's cgroup time reference point
*/
perf_cgroup_set_timestamp(current, ctx);
__perf_event_mark_enabled(event);
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
goto unlock;
}
/*
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
goto unlock;
if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST;
} else {
if (event == leader)
err = group_sched_in(event, cpuctx, ctx);
else
err = event_sched_in(event, cpuctx, ctx);
}
if (err) {
/*
* If this event can't go on and it's part of a
* group, then the whole group has to come off.
*/
if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_EVENT_STATE_ERROR;
}
}
unlock:
raw_spin_unlock(&ctx->lock);
return 0;
}
/*
* Enable a event.
*
* If event->ctx is a cloned context, callers must make sure that
* every task struct that event->ctx->task could possibly point to
* remains valid. This condition is satisfied when called through
* perf_event_for_each_child or perf_event_for_each as described
* for perf_event_disable.
*/
void perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
struct task_struct *task = ctx->task;
if (!task) {
/*
* Enable the event on the cpu that it's on
*/
cpu_function_call(event->cpu, __perf_event_enable, event);
return;
}
raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
goto out;
/*
* If the event is in error state, clear that first.
* That way, if we see the event in error state below, we
* know that it has gone back into error state, as distinct
* from the task having been scheduled away before the
* cross-call arrived.
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
retry:
if (!ctx->is_active) {
__perf_event_mark_enabled(event);
goto out;
}
raw_spin_unlock_irq(&ctx->lock);
if (!task_function_call(task, __perf_event_enable, event))
return;
raw_spin_lock_irq(&ctx->lock);
/*
* If the context is active and the event is still off,
* we need to retry the cross-call.
*/
if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
/*
* task could have been flipped by a concurrent
* perf_event_context_sched_out()
*/
task = ctx->task;
goto retry;
}
out:
raw_spin_unlock_irq(&ctx->lock);
}
EXPORT_SYMBOL_GPL(perf_event_enable);
int perf_event_refresh(struct perf_event *event, int refresh)
{
/*
* not supported on inherited events
*/
if (event->attr.inherit || !is_sampling_event(event))
return -EINVAL;
atomic_add(refresh, &event->event_limit);
perf_event_enable(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_refresh);
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
struct perf_event *event;
int is_active = ctx->is_active;
ctx->is_active &= ~event_type;
if (likely(!ctx->nr_events))
return;
update_context_time(ctx);
update_cgrp_time_from_cpuctx(cpuctx);
if (!ctx->nr_active)
return;
perf_pmu_disable(ctx->pmu);
if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
perf_pmu_enable(ctx->pmu);
}
/*
* Test whether two contexts are equivalent, i.e. whether they
* have both been cloned from the same version of the same context
* and they both have the same number of enabled events.
* If the number of enabled events is the same, then the set
* of enabled events should be the same, because these are both
* inherited contexts, therefore we can't access individual events
* in them directly with an fd; we can only enable/disable all
* events via prctl, or enable/disable all events in a family
* via ioctl, which will have the same effect on both contexts.
*/
static int context_equiv(struct perf_event_context *ctx1,
struct perf_event_context *ctx2)
{
return ctx1->parent_ctx && ctx1->parent_ctx == ctx2->parent_ctx
&& ctx1->parent_gen == ctx2->parent_gen
&& !ctx1->pin_count && !ctx2->pin_count;
}
static void __perf_event_sync_stat(struct perf_event *event,
struct perf_event *next_event)
{
u64 value;
if (!event->attr.inherit_stat)
return;
/*
* Update the event value, we cannot use perf_event_read()
* because we're in the middle of a context switch and have IRQs
* disabled, which upsets smp_call_function_single(), however
* we know the event must be on the current CPU, therefore we
* don't need to use it.
*/
switch (event->state) {
case PERF_EVENT_STATE_ACTIVE:
event->pmu->read(event);
/* fall-through */
case PERF_EVENT_STATE_INACTIVE:
update_event_times(event);
break;
default:
break;
}
/*
* In order to keep per-task stats reliable we need to flip the event
* values when we flip the contexts.
*/
value = local64_read(&next_event->count);
value = local64_xchg(&event->count, value);
local64_set(&next_event->count, value);
swap(event->total_time_enabled, next_event->total_time_enabled);
swap(event->total_time_running, next_event->total_time_running);
/*
* Since we swizzled the values, update the user visible data too.
*/
perf_event_update_userpage(event);
perf_event_update_userpage(next_event);
}
static void perf_event_sync_stat(struct perf_event_context *ctx,
struct perf_event_context *next_ctx)
{
struct perf_event *event, *next_event;
if (!ctx->nr_stat)
return;
update_context_time(ctx);
event = list_first_entry(&ctx->event_list,
struct perf_event, event_entry);
next_event = list_first_entry(&next_ctx->event_list,
struct perf_event, event_entry);
while (&event->event_entry != &ctx->event_list &&
&next_event->event_entry != &next_ctx->event_list) {
__perf_event_sync_stat(event, next_event);
event = list_next_entry(event, event_entry);
next_event = list_next_entry(next_event, event_entry);
}
}
static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
struct task_struct *next)
{
struct perf_event_context *ctx = task->perf_event_ctxp[ctxn];
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
struct perf_cpu_context *cpuctx;
int do_switch = 1;
if (likely(!ctx))
return;
cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
next_ctx = next->perf_event_ctxp[ctxn];
if (parent && next_ctx &&
rcu_dereference(next_ctx->parent_ctx) == parent) {
/*
* Looks like the two contexts are clones, so we might be
* able to optimize the context switch. We lock both
* contexts and check that they are clones under the
* lock (including re-checking that neither has been
* uncloned in the meantime). It doesn't matter which
* order we take the locks because no other cpu could
* be trying to lock both of these tasks.
*/
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
/*
* XXX do we need a memory barrier of sorts
* wrt to rcu_dereference() of perf_event_ctxp
*/
task->perf_event_ctxp[ctxn] = next_ctx;
next->perf_event_ctxp[ctxn] = ctx;
ctx->task = next;
next_ctx->task = task;
do_switch = 0;
perf_event_sync_stat(ctx, next_ctx);
}
raw_spin_unlock(&next_ctx->lock);
raw_spin_unlock(&ctx->lock);
}
rcu_read_unlock();
if (do_switch) {
raw_spin_lock(&ctx->lock);
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
raw_spin_unlock(&ctx->lock);
}
}
#define for_each_task_context_nr(ctxn) \
for ((ctxn) = 0; (ctxn) < perf_nr_task_contexts; (ctxn)++)
/*
* Called from scheduler to remove the events of the current task,
* with interrupts disabled.
*
* We stop each event and update the event value in event->count.
*
* This does not protect us against NMI, but disable()
* sets the disabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* not restart the event.
*/
void __perf_event_task_sched_out(struct task_struct *task,
struct task_struct *next)
{
int ctxn;
for_each_task_context_nr(ctxn)
perf_event_context_sched_out(task, ctxn, next);
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch out PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_out(task, next);
}
static void task_ctx_sched_out(struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
ctx_sched_out(ctx, cpuctx, EVENT_ALL);
cpuctx->task_ctx = NULL;
}
/*
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
enum event_type_t event_type)
{
ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
}
static void
ctx_pinned_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, 1))
group_sched_in(event, cpuctx, ctx);
/*
* If this pinned group hasn't been scheduled,
* put it in error state.
*/
if (event->state == PERF_EVENT_STATE_INACTIVE) {
update_group_times(event);
event->state = PERF_EVENT_STATE_ERROR;
}
}
}
static void
ctx_flexible_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx)
{
struct perf_event *event;
int can_add_hw = 1;
list_for_each_entry(event, &ctx->flexible_groups, group_entry) {
/* Ignore events in OFF or ERROR state */
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
/*
* Listen to the 'cpu' scheduling filter constraint
* of events:
*/
if (!event_filter_match(event))
continue;
/* may need to reset tstamp_enabled */
if (is_cgroup_event(event))
perf_cgroup_mark_enabled(event, ctx);
if (group_can_go_on(event, cpuctx, can_add_hw)) {
if (group_sched_in(event, cpuctx, ctx))
can_add_hw = 0;
}
}
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
u64 now;
int is_active = ctx->is_active;
ctx->is_active |= event_type;
if (likely(!ctx->nr_events))
return;
now = perf_clock();
ctx->timestamp = now;
perf_cgroup_set_timestamp(task, ctx);
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
ctx_flexible_sched_in(ctx, cpuctx);
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task)
{
struct perf_event_context *ctx = &cpuctx->ctx;
ctx_sched_in(ctx, cpuctx, event_type, task);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
return;
perf_ctx_lock(cpuctx, ctx);
perf_pmu_disable(ctx->pmu);
/*
* We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned,
* cpu flexible, task flexible.
*/
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx->nr_events)
cpuctx->task_ctx = ctx;
perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
/*
* Since these rotations are per-cpu, we need to ensure the
* cpu-context we got scheduled on is actually rotating.
*/
perf_pmu_rotate_start(ctx->pmu);
}
/*
* When sampling the branck stack in system-wide, it may be necessary
* to flush the stack on context switch. This happens when the branch
* stack does not tag its entries with the pid of the current task.
* Otherwise it becomes impossible to associate a branch entry with a
* task. This ambiguity is more likely to appear when the branch stack
* supports priv level filtering and the user sets it to monitor only
* at the user level (which could be a useful measurement in system-wide
* mode). In that case, the risk is high of having a branch stack with
* branch from multiple tasks. Flushing may mean dropping the existing
* entries or stashing them somewhere in the PMU specific code layer.
*
* This function provides the context switch callback to the lower code
* layer. It is invoked ONLY when there is at least one system-wide context
* with at least one active event using taken branch sampling.
*/
static void perf_branch_stack_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
struct pmu *pmu;
unsigned long flags;
/* no need to flush branch stack if not changing task */
if (prev == task)
return;
local_irq_save(flags);
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
/*
* check if the context has at least one
* event using PERF_SAMPLE_BRANCH_STACK
*/
if (cpuctx->ctx.nr_branch_stack > 0
&& pmu->flush_branch_stack) {
pmu = cpuctx->ctx.pmu;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(pmu);
pmu->flush_branch_stack();
perf_pmu_enable(pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
}
rcu_read_unlock();
local_irq_restore(flags);
}
/*
* Called from scheduler to add the events of the current task
* with interrupts disabled.
*
* We restore the event value and then enable it.
*
* This does not protect us against NMI, but enable()
* sets the enabled bit in the control field of event _before_
* accessing the event control register. If a NMI hits, then it will
* keep the event running.
*/
void __perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
struct perf_event_context *ctx;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
continue;
perf_event_context_sched_in(ctx, task);
}
/*
* if cgroup events exist on this CPU, then we need
* to check if we have to switch in PMU state.
* cgroup event are system-wide mode only
*/
if (atomic_read(&__get_cpu_var(perf_cgroup_events)))
perf_cgroup_sched_in(prev, task);
/* check for system-wide branch_stack events */
if (atomic_read(&__get_cpu_var(perf_branch_stack_events)))
perf_branch_stack_sched_in(prev, task);
}
static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count)
{
u64 frequency = event->attr.sample_freq;
u64 sec = NSEC_PER_SEC;
u64 divisor, dividend;
int count_fls, nsec_fls, frequency_fls, sec_fls;
count_fls = fls64(count);
nsec_fls = fls64(nsec);
frequency_fls = fls64(frequency);
sec_fls = 30;
/*
* We got @count in @nsec, with a target of sample_freq HZ
* the target period becomes:
*
* @count * 10^9
* period = -------------------
* @nsec * sample_freq
*
*/
/*
* Reduce accuracy by one bit such that @a and @b converge
* to a similar magnitude.
*/
#define REDUCE_FLS(a, b) \
do { \
if (a##_fls > b##_fls) { \
a >>= 1; \
a##_fls--; \
} else { \
b >>= 1; \
b##_fls--; \
} \
} while (0)
/*
* Reduce accuracy until either term fits in a u64, then proceed with
* the other, so that finally we can do a u64/u64 division.
*/
while (count_fls + sec_fls > 64 && nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
REDUCE_FLS(sec, count);
}
if (count_fls + sec_fls > 64) {
divisor = nsec * frequency;
while (count_fls + sec_fls > 64) {
REDUCE_FLS(count, sec);
divisor >>= 1;
}
dividend = count * sec;
} else {
dividend = count * sec;
while (nsec_fls + frequency_fls > 64) {
REDUCE_FLS(nsec, frequency);
dividend >>= 1;
}
divisor = nsec * frequency;
}
if (!divisor)
return dividend;
return div64_u64(dividend, divisor);
}
static DEFINE_PER_CPU(int, perf_throttled_count);
static DEFINE_PER_CPU(u64, perf_throttled_seq);
static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable)
{
struct hw_perf_event *hwc = &event->hw;
s64 period, sample_period;
s64 delta;
period = perf_calculate_period(event, nsec, count);
delta = (s64)(period - hwc->sample_period);
delta = (delta + 7) / 8; /* low pass filter */
sample_period = hwc->sample_period + delta;
if (!sample_period)
sample_period = 1;
hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) {
if (disable)
event->pmu->stop(event, PERF_EF_UPDATE);
local64_set(&hwc->period_left, 0);
if (disable)
event->pmu->start(event, PERF_EF_RELOAD);
}
}
/*
* combine freq adjustment with unthrottling to avoid two passes over the
* events. At the same time, make sure, having freq events does not change
* the rate of unthrottling as that would introduce bias.
*/
static void perf_adjust_freq_unthr_context(struct perf_event_context *ctx,
int needs_unthr)
{
struct perf_event *event;
struct hw_perf_event *hwc;
u64 now, period = TICK_NSEC;
s64 delta;
/*
* only need to iterate over all events iff:
* - context have events in frequency mode (needs freq adjust)
* - there are events to unthrottle on this cpu
*/
if (!(ctx->nr_freq || needs_unthr))
return;
raw_spin_lock(&ctx->lock);
perf_pmu_disable(ctx->pmu);
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state != PERF_EVENT_STATE_ACTIVE)
continue;
if (!event_filter_match(event))
continue;
hwc = &event->hw;
if (needs_unthr && hwc->interrupts == MAX_INTERRUPTS) {
hwc->interrupts = 0;
perf_log_throttle(event, 1);
event->pmu->start(event, 0);
}
if (!event->attr.freq || !event->attr.sample_freq)
continue;
/*
* stop the event and update event->count
*/
event->pmu->stop(event, PERF_EF_UPDATE);
now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp;
hwc->freq_count_stamp = now;
/*
* restart the event
* reload only if value has changed
* we have stopped the event so tell that
* to perf_adjust_period() to avoid stopping it
* twice.
*/
if (delta > 0)
perf_adjust_period(event, period, delta, false);
event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0);
}
perf_pmu_enable(ctx->pmu);
raw_spin_unlock(&ctx->lock);
}
/*
* Round-robin a context's events:
*/
static void rotate_ctx(struct perf_event_context *ctx)
{
/*
* Rotate the first entry last of non-pinned groups. Rotation might be
* disabled by the inheritance code.
*/
if (!ctx->rotate_disable)
list_rotate_left(&ctx->flexible_groups);
}
/*
* perf_pmu_rotate_start() and perf_rotate_context() are fully serialized
* because they're strictly cpu affine and rotate_start is called with IRQs
* disabled, while rotate_context is called from IRQ context.
*/
static void perf_rotate_context(struct perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0, remove = 1;
if (cpuctx->ctx.nr_events) {
remove = 0;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
rotate = 1;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
remove = 0;
if (ctx->nr_events != ctx->nr_active)
rotate = 1;
}
if (!rotate)
goto done;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx)
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
perf_event_sched_in(cpuctx, ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
done:
if (remove)
list_del_init(&cpuctx->rotation_list);
}
#ifdef CONFIG_NO_HZ_FULL
bool perf_event_can_stop_tick(void)
{
if (list_empty(&__get_cpu_var(rotation_list)))
return true;
else
return false;
}
#endif
void perf_event_task_tick(void)
{
struct list_head *head = &__get_cpu_var(rotation_list);
struct perf_cpu_context *cpuctx, *tmp;
struct perf_event_context *ctx;
int throttled;
WARN_ON(!irqs_disabled());
__this_cpu_inc(perf_throttled_seq);
throttled = __this_cpu_xchg(perf_throttled_count, 0);
list_for_each_entry_safe(cpuctx, tmp, head, rotation_list) {
ctx = &cpuctx->ctx;
perf_adjust_freq_unthr_context(ctx, throttled);
ctx = cpuctx->task_ctx;
if (ctx)
perf_adjust_freq_unthr_context(ctx, throttled);
if (cpuctx->jiffies_interval == 1 ||
!(jiffies % cpuctx->jiffies_interval))
perf_rotate_context(cpuctx);
}
}
static int event_enable_on_exec(struct perf_event *event,
struct perf_event_context *ctx)
{
if (!event->attr.enable_on_exec)
return 0;
event->attr.enable_on_exec = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE)
return 0;
__perf_event_mark_enabled(event);
return 1;
}
/*
* Enable all of a task's events that have been marked enable-on-exec.
* This expects task == current.
*/
static void perf_event_enable_on_exec(struct perf_event_context *ctx)
{
struct perf_event *event;
unsigned long flags;
int enabled = 0;
int ret;
local_irq_save(flags);
if (!ctx || !ctx->nr_events)
goto out;
/*
* We must ctxsw out cgroup events to avoid conflict
* when invoking perf_task_event_sched_in() later on
* in this function. Otherwise we end up trying to
* ctxswin cgroup events which are already scheduled
* in.
*/
perf_cgroup_sched_out(current, NULL);
raw_spin_lock(&ctx->lock);
task_ctx_sched_out(ctx);
list_for_each_entry(event, &ctx->event_list, event_entry) {
ret = event_enable_on_exec(event, ctx);
if (ret)
enabled = 1;
}
/*
* Unclone this context if we enabled any event.
*/
if (enabled)
unclone_ctx(ctx);
raw_spin_unlock(&ctx->lock);
/*
* Also calls ctxswin for cgroup events, if any:
*/
perf_event_context_sched_in(ctx, ctx->task);
out:
local_irq_restore(flags);
}
/*
* Cross CPU call to read the hardware event
*/
static void __perf_event_read(void *info)
{
struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
/*
* If this is a task context, we need to check whether it is
* the current task context of this cpu. If not it has been
* scheduled out before the smp call arrived. In that case
* event->count would have been updated to a recent sample
* when the event was scheduled out.
*/
if (ctx->task && cpuctx->task_ctx != ctx)
return;
raw_spin_lock(&ctx->lock);
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
if (event->state == PERF_EVENT_STATE_ACTIVE)
event->pmu->read(event);
raw_spin_unlock(&ctx->lock);
}
static inline u64 perf_event_count(struct perf_event *event)
{
return local64_read(&event->count) + atomic64_read(&event->child_count);
}
static u64 perf_event_read(struct perf_event *event)
{
/*
* If event is enabled and currently active on a CPU, update the
* value in the event structure:
*/
if (event->state == PERF_EVENT_STATE_ACTIVE) {
smp_call_function_single(event->oncpu,
__perf_event_read, event, 1);
} else if (event->state == PERF_EVENT_STATE_INACTIVE) {
struct perf_event_context *ctx = event->ctx;
unsigned long flags;
raw_spin_lock_irqsave(&ctx->lock, flags);
/*
* may read while context is not active
* (e.g., thread is blocked), in that case
* we cannot update context time
*/
if (ctx->is_active) {
update_context_time(ctx);
update_cgrp_time_from_event(event);
}
update_event_times(event);
raw_spin_unlock_irqrestore(&ctx->lock, flags);
}
return perf_event_count(event);
}
/*
* Initialize the perf_event context in a task_struct:
*/
static void __perf_event_init_context(struct perf_event_context *ctx)
{
raw_spin_lock_init(&ctx->lock);
mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->pinned_groups);
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
}
static struct perf_event_context *
alloc_perf_context(struct pmu *pmu, struct task_struct *task)
{
struct perf_event_context *ctx;
ctx = kzalloc(sizeof(struct perf_event_context), GFP_KERNEL);
if (!ctx)
return NULL;
__perf_event_init_context(ctx);
if (task) {
ctx->task = task;
get_task_struct(task);
}
ctx->pmu = pmu;
return ctx;
}
static struct task_struct *
find_lively_task_by_vpid(pid_t vpid)
{
struct task_struct *task;
int err;
rcu_read_lock();
if (!vpid)
task = current;
else
task = find_task_by_vpid(vpid);
if (task)
get_task_struct(task);
rcu_read_unlock();
if (!task)
return ERR_PTR(-ESRCH);
/* Reuse ptrace permission checks for now. */
err = -EACCES;
if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto errout;
return task;
errout:
put_task_struct(task);
return ERR_PTR(err);
}
/*
* Returns a matching context with refcount and pincount.
*/
static struct perf_event_context *
find_get_context(struct pmu *pmu, struct task_struct *task, int cpu)
{
struct perf_event_context *ctx;
struct perf_cpu_context *cpuctx;
unsigned long flags;
int ctxn, err;
if (!task) {
/* Must be root to operate on a CPU event: */
if (perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
return ERR_PTR(-EACCES);
/*
* We could be clever and allow to attach a event to an
* offline CPU and activate it when the CPU comes up, but
* that's for later.
*/
if (!cpu_online(cpu))
return ERR_PTR(-ENODEV);
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
ctx = &cpuctx->ctx;
get_ctx(ctx);
++ctx->pin_count;
return ctx;
}
err = -EINVAL;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto errout;
retry:
ctx = perf_lock_task_context(task, ctxn, &flags);
if (ctx) {
unclone_ctx(ctx);
++ctx->pin_count;
raw_spin_unlock_irqrestore(&ctx->lock, flags);
} else {
ctx = alloc_perf_context(pmu, task);
err = -ENOMEM;
if (!ctx)
goto errout;
err = 0;
mutex_lock(&task->perf_event_mutex);
/*
* If it has already passed perf_event_exit_task().
* we must see PF_EXITING, it takes this mutex too.
*/
if (task->flags & PF_EXITING)
err = -ESRCH;
else if (task->perf_event_ctxp[ctxn])
err = -EAGAIN;
else {
get_ctx(ctx);
++ctx->pin_count;
rcu_assign_pointer(task->perf_event_ctxp[ctxn], ctx);
}
mutex_unlock(&task->perf_event_mutex);
if (unlikely(err)) {
put_ctx(ctx);
if (err == -EAGAIN)
goto retry;
goto errout;
}
}
return ctx;
errout:
return ERR_PTR(err);
}
static void perf_event_free_filter(struct perf_event *event);
static void free_event_rcu(struct rcu_head *head)
{
struct perf_event *event;
event = container_of(head, struct perf_event, rcu_head);
if (event->ns)
put_pid_ns(event->ns);
perf_event_free_filter(event);
kfree(event);
}
static void ring_buffer_put(struct ring_buffer *rb);
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
static void free_event(struct perf_event *event)
{
irq_work_sync(&event->pending);
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_dec_deferred(&perf_sched_events);
if (event->attr.mmap || event->attr.mmap_data)
atomic_dec(&nr_mmap_events);
if (event->attr.comm)
atomic_dec(&nr_comm_events);
if (event->attr.task)
atomic_dec(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
put_callchain_buffers();
if (is_cgroup_event(event)) {
atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_dec_deferred(&perf_sched_events);
}
if (has_branch_stack(event)) {
static_key_slow_dec_deferred(&perf_sched_events);
/* is system-wide event */
if (!(event->attach_state & PERF_ATTACH_TASK)) {
atomic_dec(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
}
if (event->rb) {
struct ring_buffer *rb;
/*
* Can happen when we close an event with re-directed output.
*
* Since we have a 0 refcount, perf_mmap_close() will skip
* over us; possibly making our ring_buffer_put() the last.
*/
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb) {
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
ring_buffer_put(rb); /* could be last */
}
mutex_unlock(&event->mmap_mutex);
}
if (is_cgroup_event(event))
perf_detach_cgroup(event);
if (event->destroy)
event->destroy(event);
if (event->ctx)
put_ctx(event->ctx);
call_rcu(&event->rcu_head, free_event_rcu);
}
int perf_event_release_kernel(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
WARN_ON_ONCE(ctx->parent_ctx);
/*
* There are two ways this annotation is useful:
*
* 1) there is a lock recursion from perf_event_exit_task
* see the comment there.
*
* 2) there is a lock-inversion with mmap_sem through
* perf_event_read_group(), which takes faults while
* holding ctx->mutex, however this is called after
* the last filedesc died, so there is no possibility
* to trigger the AB-BA case.
*/
mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
perf_remove_from_context(event, true);
mutex_unlock(&ctx->mutex);
free_event(event);
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
/*
* Remove user event from the owner task.
*/
static void perf_remove_from_owner(struct perf_event *event)
{
struct task_struct *owner;
rcu_read_lock();
owner = ACCESS_ONCE(event->owner);
/*
* Matches the smp_wmb() in perf_event_exit_task(). If we observe
* !owner it means the list deletion is complete and we can indeed
* free this event, otherwise we need to serialize on
* owner->perf_event_mutex.
*/
smp_read_barrier_depends();
if (owner) {
/*
* Since delayed_put_task_struct() also drops the last
* task reference we can safely take a new reference
* while holding the rcu_read_lock().
*/
get_task_struct(owner);
}
rcu_read_unlock();
if (owner) {
mutex_lock(&owner->perf_event_mutex);
/*
* We have to re-check the event->owner field, if it is cleared
* we raced with perf_event_exit_task(), acquiring the mutex
* ensured they're done, and we can proceed with freeing the
* event.
*/
if (event->owner)
list_del_init(&event->owner_entry);
mutex_unlock(&owner->perf_event_mutex);
put_task_struct(owner);
}
}
/*
* Called when the last reference to the file is gone.
*/
static void put_event(struct perf_event *event)
{
if (!atomic_long_dec_and_test(&event->refcount))
return;
if (!is_kernel_event(event))
perf_remove_from_owner(event);
perf_event_release_kernel(event);
}
static int perf_release(struct inode *inode, struct file *file)
{
struct perf_event *event = file->private_data;
/*
* Event can be in state OFF because of a constraint check.
* Change to ACTIVE so that it gets cleaned up correctly.
*/
if ((event->state == PERF_EVENT_STATE_OFF) &&
event->attr.constraint_duplicate)
event->state = PERF_EVENT_STATE_ACTIVE;
put_event(file->private_data);
return 0;
}
/*
* Remove all orphanes events from the context.
*/
static void orphans_remove_work(struct work_struct *work)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
ctx = container_of(work, struct perf_event_context,
orphans_remove.work);
mutex_lock(&ctx->mutex);
list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
struct perf_event *parent_event = event->parent;
if (!is_orphaned_child(event))
continue;
perf_remove_from_context(event, true);
mutex_lock(&parent_event->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent_event->child_mutex);
free_event(event);
put_event(parent_event);
}
raw_spin_lock_irq(&ctx->lock);
ctx->orphans_remove_sched = false;
raw_spin_unlock_irq(&ctx->lock);
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
{
struct perf_event *child;
u64 total = 0;
*enabled = 0;
*running = 0;
mutex_lock(&event->child_mutex);
total += perf_event_read(event);
*enabled += event->total_time_enabled +
atomic64_read(&event->child_total_time_enabled);
*running += event->total_time_running +
atomic64_read(&event->child_total_time_running);
list_for_each_entry(child, &event->child_list, child_list) {
total += perf_event_read(child);
*enabled += child->total_time_enabled;
*running += child->total_time_running;
}
mutex_unlock(&event->child_mutex);
return total;
}
EXPORT_SYMBOL_GPL(perf_event_read_value);
static int perf_event_read_group(struct perf_event *event,
u64 read_format, char __user *buf)
{
struct perf_event *leader = event->group_leader, *sub;
int n = 0, size = 0, ret = -EFAULT;
struct perf_event_context *ctx = leader->ctx;
u64 values[5];
u64 count, enabled, running;
mutex_lock(&ctx->mutex);
count = perf_event_read_value(leader, &enabled, &running);
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
values[n++] = count;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
size = n * sizeof(u64);
if (copy_to_user(buf, values, size))
goto unlock;
ret = size;
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
values[n++] = perf_event_read_value(sub, &enabled, &running);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
size = n * sizeof(u64);
if (copy_to_user(buf + ret, values, size)) {
ret = -EFAULT;
goto unlock;
}
ret += size;
}
unlock:
mutex_unlock(&ctx->mutex);
return ret;
}
static int perf_event_read_one(struct perf_event *event,
u64 read_format, char __user *buf)
{
u64 enabled, running;
u64 values[4];
int n = 0;
values[n++] = perf_event_read_value(event, &enabled, &running);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
if (copy_to_user(buf, values, n * sizeof(u64)))
return -EFAULT;
return n * sizeof(u64);
}
/*
* Read the performance event - simple non blocking version for now
*/
static ssize_t
perf_read_hw(struct perf_event *event, char __user *buf, size_t count)
{
u64 read_format = event->attr.read_format;
int ret;
/*
* Return end-of-file for a read on a event that is in
* error state (i.e. because it was pinned but it couldn't be
* scheduled on to the CPU at some point).
*/
if (event->state == PERF_EVENT_STATE_ERROR)
return 0;
if (count < event->read_size)
return -ENOSPC;
WARN_ON_ONCE(event->ctx->parent_ctx);
if (read_format & PERF_FORMAT_GROUP)
ret = perf_event_read_group(event, read_format, buf);
else
ret = perf_event_read_one(event, read_format, buf);
return ret;
}
static ssize_t
perf_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
struct perf_event *event = file->private_data;
return perf_read_hw(event, buf, count);
}
static unsigned int perf_poll(struct file *file, poll_table *wait)
{
struct perf_event *event = file->private_data;
struct ring_buffer *rb;
unsigned int events = POLL_HUP;
/*
* Pin the event->rb by taking event->mmap_mutex; otherwise
* perf_event_set_output() can swizzle our rb and make us miss wakeups.
*/
mutex_lock(&event->mmap_mutex);
rb = event->rb;
if (rb)
events = atomic_xchg(&rb->poll, 0);
mutex_unlock(&event->mmap_mutex);
poll_wait(file, &event->waitq, wait);
return events;
}
static void perf_event_reset(struct perf_event *event)
{
(void)perf_event_read(event);
local64_set(&event->count, 0);
perf_event_update_userpage(event);
}
/*
* Holding the top-level event's child_mutex means that any
* descendant process that has inherited this event will block
* in sync_child_event if it goes to exit, thus satisfying the
* task existence requirements of perf_event_enable/disable.
*/
static void perf_event_for_each_child(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event *child;
WARN_ON_ONCE(event->ctx->parent_ctx);
mutex_lock(&event->child_mutex);
func(event);
list_for_each_entry(child, &event->child_list, child_list)
func(child);
mutex_unlock(&event->child_mutex);
}
static void perf_event_for_each(struct perf_event *event,
void (*func)(struct perf_event *))
{
struct perf_event_context *ctx = event->ctx;
struct perf_event *sibling;
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
event = event->group_leader;
perf_event_for_each_child(event, func);
list_for_each_entry(sibling, &event->sibling_list, group_entry)
perf_event_for_each_child(sibling, func);
mutex_unlock(&ctx->mutex);
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct perf_event_context *ctx = event->ctx;
int ret = 0;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
raw_spin_lock_irq(&ctx->lock);
if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value;
} else {
event->attr.sample_period = value;
event->hw.sample_period = value;
}
unlock:
raw_spin_unlock_irq(&ctx->lock);
return ret;
}
static const struct file_operations perf_fops;
static inline int perf_fget_light(int fd, struct fd *p)
{
struct fd f = fdget(fd);
if (!f.file)
return -EBADF;
if (f.file->f_op != &perf_fops) {
fdput(f);
return -EBADF;
}
*p = f;
return 0;
}
static int perf_event_set_output(struct perf_event *event,
struct perf_event *output_event);
static int perf_event_set_filter(struct perf_event *event, void __user *arg);
static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct perf_event *event = file->private_data;
void (*func)(struct perf_event *);
u32 flags = arg;
switch (cmd) {
case PERF_EVENT_IOC_ENABLE:
func = perf_event_enable;
break;
case PERF_EVENT_IOC_DISABLE:
func = perf_event_disable;
break;
case PERF_EVENT_IOC_RESET:
func = perf_event_reset;
break;
case PERF_EVENT_IOC_REFRESH:
return perf_event_refresh(event, arg);
case PERF_EVENT_IOC_PERIOD:
return perf_event_period(event, (u64 __user *)arg);
case PERF_EVENT_IOC_SET_OUTPUT:
{
int ret;
if (arg != -1) {
struct perf_event *output_event;
struct fd output;
ret = perf_fget_light(arg, &output);
if (ret)
return ret;
output_event = output.file->private_data;
ret = perf_event_set_output(event, output_event);
fdput(output);
} else {
ret = perf_event_set_output(event, NULL);
}
return ret;
}
case PERF_EVENT_IOC_SET_FILTER:
return perf_event_set_filter(event, (void __user *)arg);
default:
return -ENOTTY;
}
if (flags & PERF_IOC_FLAG_GROUP)
perf_event_for_each(event, func);
else
perf_event_for_each_child(event, func);
return 0;
}
int perf_event_task_enable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_enable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
int perf_event_task_disable(void)
{
struct perf_event *event;
mutex_lock(¤t->perf_event_mutex);
list_for_each_entry(event, ¤t->perf_event_list, owner_entry)
perf_event_for_each_child(event, perf_event_disable);
mutex_unlock(¤t->perf_event_mutex);
return 0;
}
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
return event->pmu->event_idx(event);
}
static void calc_timer_values(struct perf_event *event,
u64 *now,
u64 *enabled,
u64 *running)
{
u64 ctx_time;
*now = perf_clock();
ctx_time = event->shadow_ctx_time + *now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
void __weak arch_perf_update_userpage(struct perf_event_mmap_page *userpg, u64 now)
{
}
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
* code calls this from NMI context.
*/
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
u64 enabled, running, now;
rcu_read_lock();
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we can be called in
* NMI context
*/
calc_timer_values(event, &now, &enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
userpg = rb->user_page;
/*
* Disable preemption so as to not let the corresponding user-space
* spin too long if we get preempted.
*/
preempt_disable();
++userpg->lock;
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
if (userpg->index)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
atomic64_read(&event->child_total_time_enabled);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
arch_perf_update_userpage(userpg, now);
barrier();
++userpg->lock;
preempt_enable();
unlock:
rcu_read_unlock();
}
static int perf_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb;
int ret = VM_FAULT_SIGBUS;
if (vmf->flags & FAULT_FLAG_MKWRITE) {
if (vmf->pgoff == 0)
ret = 0;
return ret;
}
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
if (vmf->pgoff && (vmf->flags & FAULT_FLAG_WRITE))
goto unlock;
vmf->page = perf_mmap_to_page(rb, vmf->pgoff);
if (!vmf->page)
goto unlock;
get_page(vmf->page);
vmf->page->mapping = vma->vm_file->f_mapping;
vmf->page->index = vmf->pgoff;
ret = 0;
unlock:
rcu_read_unlock();
return ret;
}
static void ring_buffer_attach(struct perf_event *event,
struct ring_buffer *rb)
{
unsigned long flags;
if (!list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
if (list_empty(&event->rb_entry))
list_add(&event->rb_entry, &rb->event_list);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb)
{
unsigned long flags;
if (list_empty(&event->rb_entry))
return;
spin_lock_irqsave(&rb->event_lock, flags);
list_del_init(&event->rb_entry);
wake_up_all(&event->waitq);
spin_unlock_irqrestore(&rb->event_lock, flags);
}
static void ring_buffer_wakeup(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
list_for_each_entry_rcu(event, &rb->event_list, rb_entry)
wake_up_all(&event->waitq);
}
rcu_read_unlock();
}
static void rb_free_rcu(struct rcu_head *rcu_head)
{
struct ring_buffer *rb;
rb = container_of(rcu_head, struct ring_buffer, rcu_head);
rb_free(rb);
}
static struct ring_buffer *ring_buffer_get(struct perf_event *event)
{
struct ring_buffer *rb;
rcu_read_lock();
rb = rcu_dereference(event->rb);
if (rb) {
if (!atomic_inc_not_zero(&rb->refcount))
rb = NULL;
}
rcu_read_unlock();
return rb;
}
static void ring_buffer_put(struct ring_buffer *rb)
{
if (!atomic_dec_and_test(&rb->refcount))
return;
WARN_ON_ONCE(!list_empty(&rb->event_list));
call_rcu(&rb->rcu_head, rb_free_rcu);
}
static void perf_mmap_open(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
atomic_inc(&event->mmap_count);
atomic_inc(&event->rb->mmap_count);
}
/*
* A buffer can be mmap()ed multiple times; either directly through the same
* event, or through other events by use of perf_event_set_output().
*
* In order to undo the VM accounting done by perf_mmap() we need to destroy
* the buffer here, where we still have a VM context. This means we need
* to detach all events redirecting to us.
*/
static void perf_mmap_close(struct vm_area_struct *vma)
{
struct perf_event *event = vma->vm_file->private_data;
struct ring_buffer *rb = event->rb;
struct user_struct *mmap_user = rb->mmap_user;
int mmap_locked = rb->mmap_locked;
unsigned long size = perf_data_size(rb);
atomic_dec(&rb->mmap_count);
if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex))
return;
/* Detach current event from the buffer. */
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
mutex_unlock(&event->mmap_mutex);
/* If there's still other mmap()s of this buffer, we're done. */
if (atomic_read(&rb->mmap_count)) {
ring_buffer_put(rb); /* can't be last */
return;
}
/*
* No other mmap()s, detach from all other events that might redirect
* into the now unreachable buffer. Somewhat complicated by the
* fact that rb::event_lock otherwise nests inside mmap_mutex.
*/
again:
rcu_read_lock();
list_for_each_entry_rcu(event, &rb->event_list, rb_entry) {
if (!atomic_long_inc_not_zero(&event->refcount)) {
/*
* This event is en-route to free_event() which will
* detach it and remove it from the list.
*/
continue;
}
rcu_read_unlock();
mutex_lock(&event->mmap_mutex);
/*
* Check we didn't race with perf_event_set_output() which can
* swizzle the rb from under us while we were waiting to
* acquire mmap_mutex.
*
* If we find a different rb; ignore this event, a next
* iteration will no longer find it on the list. We have to
* still restart the iteration to make sure we're not now
* iterating the wrong list.
*/
if (event->rb == rb) {
rcu_assign_pointer(event->rb, NULL);
ring_buffer_detach(event, rb);
ring_buffer_put(rb); /* can't be last, we still have one */
}
mutex_unlock(&event->mmap_mutex);
put_event(event);
/*
* Restart the iteration; either we're on the wrong list or
* destroyed its integrity by doing a deletion.
*/
goto again;
}
rcu_read_unlock();
/*
* It could be there's still a few 0-ref events on the list; they'll
* get cleaned up by free_event() -- they'll also still have their
* ref on the rb and will free it whenever they are done with it.
*
* Aside from that, this buffer is 'fully' detached and unmapped,
* undo the VM accounting.
*/
atomic_long_sub((size >> PAGE_SHIFT) + 1, &mmap_user->locked_vm);
vma->vm_mm->pinned_vm -= mmap_locked;
free_uid(mmap_user);
ring_buffer_put(rb); /* could be last */
}
static const struct vm_operations_struct perf_mmap_vmops = {
.open = perf_mmap_open,
.close = perf_mmap_close,
.fault = perf_mmap_fault,
.page_mkwrite = perf_mmap_fault,
};
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
{
struct perf_event *event = file->private_data;
unsigned long user_locked, user_lock_limit;
struct user_struct *user = current_user();
unsigned long locked, lock_limit;
struct ring_buffer *rb;
unsigned long vma_size;
unsigned long nr_pages;
long user_extra, extra;
int ret = 0, flags = 0;
/*
* Don't allow mmap() of inherited per-task counters. This would
* create a performance issue due to all children writing to the
* same rb.
*/
if (event->cpu == -1 && event->attr.inherit)
return -EINVAL;
if (!(vma->vm_flags & VM_SHARED))
return -EINVAL;
vma_size = vma->vm_end - vma->vm_start;
nr_pages = (vma_size / PAGE_SIZE) - 1;
/*
* If we have rb pages ensure they're a power-of-two number, so we
* can do bitmasks instead of modulo.
*/
if (nr_pages != 0 && !is_power_of_2(nr_pages))
return -EINVAL;
if (vma_size != PAGE_SIZE * (1 + nr_pages))
return -EINVAL;
if (vma->vm_pgoff != 0)
return -EINVAL;
WARN_ON_ONCE(event->ctx->parent_ctx);
again:
mutex_lock(&event->mmap_mutex);
if (event->rb) {
if (event->rb->nr_pages != nr_pages) {
ret = -EINVAL;
goto unlock;
}
if (!atomic_inc_not_zero(&event->rb->mmap_count)) {
/*
* Raced against perf_mmap_close() through
* perf_event_set_output(). Try again, hope for better
* luck.
*/
mutex_unlock(&event->mmap_mutex);
goto again;
}
goto unlock;
}
user_extra = nr_pages + 1;
user_lock_limit = sysctl_perf_event_mlock >> (PAGE_SHIFT - 10);
/*
* Increase the limit linearly with more CPUs:
*/
user_lock_limit *= num_online_cpus();
user_locked = atomic_long_read(&user->locked_vm) + user_extra;
extra = 0;
if (user_locked > user_lock_limit)
extra = user_locked - user_lock_limit;
lock_limit = rlimit(RLIMIT_MEMLOCK);
lock_limit >>= PAGE_SHIFT;
locked = vma->vm_mm->pinned_vm + extra;
if ((locked > lock_limit) && perf_paranoid_tracepoint_raw() &&
!capable(CAP_IPC_LOCK)) {
ret = -EPERM;
goto unlock;
}
WARN_ON(event->rb);
if (vma->vm_flags & VM_WRITE)
flags |= RING_BUFFER_WRITABLE;
rb = rb_alloc(nr_pages,
event->attr.watermark ? event->attr.wakeup_watermark : 0,
event->cpu, flags);
if (!rb) {
ret = -ENOMEM;
goto unlock;
}
atomic_set(&rb->mmap_count, 1);
rb->mmap_locked = extra;
rb->mmap_user = get_current_user();
atomic_long_add(user_extra, &user->locked_vm);
vma->vm_mm->pinned_vm += extra;
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
perf_event_update_userpage(event);
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
mutex_unlock(&event->mmap_mutex);
/*
* Since pinned accounting is per vm we cannot allow fork() to copy our
* vma.
*/
vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &perf_mmap_vmops;
return ret;
}
static int perf_fasync(int fd, struct file *filp, int on)
{
struct inode *inode = file_inode(filp);
struct perf_event *event = filp->private_data;
int retval;
mutex_lock(&inode->i_mutex);
retval = fasync_helper(fd, filp, on, &event->fasync);
mutex_unlock(&inode->i_mutex);
if (retval < 0)
return retval;
return 0;
}
static const struct file_operations perf_fops = {
.llseek = no_llseek,
.release = perf_release,
.read = perf_read,
.poll = perf_poll,
.unlocked_ioctl = perf_ioctl,
.compat_ioctl = perf_ioctl,
.mmap = perf_mmap,
.fasync = perf_fasync,
};
/*
* Perf event wakeup
*
* If there's data, ensure we set the poll() state and publish everything
* to user-space before waking everybody up.
*/
void perf_event_wakeup(struct perf_event *event)
{
ring_buffer_wakeup(event);
if (event->pending_kill) {
kill_fasync(&event->fasync, SIGIO, event->pending_kill);
event->pending_kill = 0;
}
}
static void perf_pending_event(struct irq_work *entry)
{
struct perf_event *event = container_of(entry,
struct perf_event, pending);
if (event->pending_disable) {
event->pending_disable = 0;
__perf_event_disable(event);
}
if (event->pending_wakeup) {
event->pending_wakeup = 0;
perf_event_wakeup(event);
}
}
/*
* We assume there is only KVM supporting the callbacks.
* Later on, we might change it to a list if there is
* another virtualization implementation supporting the callbacks.
*/
struct perf_guest_info_callbacks *perf_guest_cbs;
int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = cbs;
return 0;
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);
int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
perf_guest_cbs = NULL;
return 0;
}
EXPORT_SYMBOL_GPL(perf_unregister_guest_info_callbacks);
static void
perf_output_sample_regs(struct perf_output_handle *handle,
struct pt_regs *regs, u64 mask)
{
int bit;
for_each_set_bit(bit, (const unsigned long *) &mask,
sizeof(mask) * BITS_PER_BYTE) {
u64 val;
val = perf_reg_value(regs, bit);
perf_output_put(handle, val);
}
}
static void perf_sample_regs_user(struct perf_regs_user *regs_user,
struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (current->mm)
regs = task_pt_regs(current);
else
regs = NULL;
}
if (regs) {
regs_user->regs = regs;
regs_user->abi = perf_reg_abi(current);
}
}
/*
* Get remaining task size from user stack pointer.
*
* It'd be better to take stack vma map and limit this more
* precisly, but there's no way to get it safely under interrupt,
* so using TASK_SIZE as limit.
*/
static u64 perf_ustack_task_size(struct pt_regs *regs)
{
unsigned long addr = perf_user_stack_pointer(regs);
if (!addr || addr >= TASK_SIZE)
return 0;
return TASK_SIZE - addr;
}
static u16
perf_sample_ustack_size(u16 stack_size, u16 header_size,
struct pt_regs *regs)
{
u64 task_size;
/* No regs, no stack pointer, no dump. */
if (!regs)
return 0;
/*
* Check if we fit in with the requested stack size into the:
* - TASK_SIZE
* If we don't, we limit the size to the TASK_SIZE.
*
* - remaining sample size
* If we don't, we customize the stack size to
* fit in to the remaining sample size.
*/
task_size = min((u64) USHRT_MAX, perf_ustack_task_size(regs));
stack_size = min(stack_size, (u16) task_size);
/* Current header size plus static size and dynamic size. */
header_size += 2 * sizeof(u64);
/* Do we fit in with the current stack dump size? */
if ((u16) (header_size + stack_size) < header_size) {
/*
* If we overflow the maximum size for the sample,
* we customize the stack dump size to fit in.
*/
stack_size = USHRT_MAX - header_size - sizeof(u64);
stack_size = round_up(stack_size, sizeof(u64));
}
return stack_size;
}
static void
perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
struct pt_regs *regs)
{
/* Case of a kernel thread, nothing to dump */
if (!regs) {
u64 size = 0;
perf_output_put(handle, size);
} else {
unsigned long sp;
unsigned int rem;
u64 dyn_size;
/*
* We dump:
* static size
* - the size requested by user or the best one we can fit
* in to the sample max size
* data
* - user stack dump data
* dynamic size
* - the actual dumped size
*/
/* Static size. */
perf_output_put(handle, dump_size);
/* Data. */
sp = perf_user_stack_pointer(regs);
rem = __output_copy_user(handle, (void *) sp, dump_size);
dyn_size = dump_size - rem;
perf_output_skip(handle, rem);
/* Dynamic size. */
perf_output_put(handle, dyn_size);
}
}
static void __perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = event->attr.sample_type;
data->type = sample_type;
header->size += event->id_header_size;
if (sample_type & PERF_SAMPLE_TID) {
/* namespace issues */
data->tid_entry.pid = perf_event_pid(event, current);
data->tid_entry.tid = perf_event_tid(event, current);
}
if (sample_type & PERF_SAMPLE_TIME)
data->time = perf_clock();
if (sample_type & PERF_SAMPLE_ID)
data->id = primary_event_id(event);
if (sample_type & PERF_SAMPLE_STREAM_ID)
data->stream_id = event->id;
if (sample_type & PERF_SAMPLE_CPU) {
data->cpu_entry.cpu = raw_smp_processor_id();
data->cpu_entry.reserved = 0;
}
}
void perf_event_header__init_id(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
if (event->attr.sample_id_all)
__perf_event_header__init_id(header, data, event);
}
static void __perf_event__output_id_sample(struct perf_output_handle *handle,
struct perf_sample_data *data)
{
u64 sample_type = data->type;
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
}
void perf_event__output_id_sample(struct perf_event *event,
struct perf_output_handle *handle,
struct perf_sample_data *sample)
{
if (event->attr.sample_id_all)
__perf_event__output_id_sample(handle, sample);
}
static void perf_output_read_one(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
u64 read_format = event->attr.read_format;
u64 values[4];
int n = 0;
values[n++] = perf_event_count(event);
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
values[n++] = enabled +
atomic64_read(&event->child_total_time_enabled);
}
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
values[n++] = running +
atomic64_read(&event->child_total_time_running);
}
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(event);
__output_copy(handle, values, n * sizeof(u64));
}
/*
* XXX PERF_FORMAT_GROUP vs inherited events seems difficult.
*/
static void perf_output_read_group(struct perf_output_handle *handle,
struct perf_event *event,
u64 enabled, u64 running)
{
struct perf_event *leader = event->group_leader, *sub;
u64 read_format = event->attr.read_format;
u64 values[5];
int n = 0;
values[n++] = 1 + leader->nr_siblings;
if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
values[n++] = enabled;
if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
values[n++] = running;
if (leader != event)
leader->pmu->read(leader);
values[n++] = perf_event_count(leader);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(leader);
__output_copy(handle, values, n * sizeof(u64));
list_for_each_entry(sub, &leader->sibling_list, group_entry) {
n = 0;
if (sub != event)
sub->pmu->read(sub);
values[n++] = perf_event_count(sub);
if (read_format & PERF_FORMAT_ID)
values[n++] = primary_event_id(sub);
__output_copy(handle, values, n * sizeof(u64));
}
}
#define PERF_FORMAT_TOTAL_TIMES (PERF_FORMAT_TOTAL_TIME_ENABLED|\
PERF_FORMAT_TOTAL_TIME_RUNNING)
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
u64 enabled = 0, running = 0, now;
u64 read_format = event->attr.read_format;
/*
* compute total_time_enabled, total_time_running
* based on snapshot values taken when the event
* was last scheduled in.
*
* we cannot simply called update_context_time()
* because of locking issue as we are called in
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
calc_timer_values(event, &now, &enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
else
perf_output_read_one(handle, event, enabled, running);
}
void perf_output_sample(struct perf_output_handle *handle,
struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event)
{
u64 sample_type = data->type;
perf_output_put(handle, *header);
if (sample_type & PERF_SAMPLE_IP)
perf_output_put(handle, data->ip);
if (sample_type & PERF_SAMPLE_TID)
perf_output_put(handle, data->tid_entry);
if (sample_type & PERF_SAMPLE_TIME)
perf_output_put(handle, data->time);
if (sample_type & PERF_SAMPLE_ADDR)
perf_output_put(handle, data->addr);
if (sample_type & PERF_SAMPLE_ID)
perf_output_put(handle, data->id);
if (sample_type & PERF_SAMPLE_STREAM_ID)
perf_output_put(handle, data->stream_id);
if (sample_type & PERF_SAMPLE_CPU)
perf_output_put(handle, data->cpu_entry);
if (sample_type & PERF_SAMPLE_PERIOD)
perf_output_put(handle, data->period);
if (sample_type & PERF_SAMPLE_READ)
perf_output_read(handle, event);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
if (data->callchain) {
int size = 1;
if (data->callchain)
size += data->callchain->nr;
size *= sizeof(u64);
__output_copy(handle, data->callchain, size);
} else {
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_RAW) {
if (data->raw) {
perf_output_put(handle, data->raw->size);
__output_copy(handle, data->raw->data,
data->raw->size);
} else {
struct {
u32 size;
u32 data;
} raw = {
.size = sizeof(u32),
.data = 0,
};
perf_output_put(handle, raw);
}
}
if (!event->attr.watermark) {
int wakeup_events = event->attr.wakeup_events;
if (wakeup_events) {
struct ring_buffer *rb = handle->rb;
int events = local_inc_return(&rb->events);
if (events >= wakeup_events) {
local_sub(wakeup_events, &rb->events);
local_inc(&rb->wakeup);
}
}
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
if (data->br_stack) {
size_t size;
size = data->br_stack->nr
* sizeof(struct perf_branch_entry);
perf_output_put(handle, data->br_stack->nr);
perf_output_copy(handle, data->br_stack->entries, size);
} else {
/*
* we always store at least the value of nr
*/
u64 nr = 0;
perf_output_put(handle, nr);
}
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
u64 abi = data->regs_user.abi;
/*
* If there are no regs to dump, notice it through
* first u64 being zero (PERF_SAMPLE_REGS_ABI_NONE).
*/
perf_output_put(handle, abi);
if (abi) {
u64 mask = event->attr.sample_regs_user;
perf_output_sample_regs(handle,
data->regs_user.regs,
mask);
}
}
if (sample_type & PERF_SAMPLE_STACK_USER)
perf_output_sample_ustack(handle,
data->stack_user_size,
data->regs_user.regs);
if (sample_type & PERF_SAMPLE_WEIGHT)
perf_output_put(handle, data->weight);
if (sample_type & PERF_SAMPLE_DATA_SRC)
perf_output_put(handle, data->data_src.val);
}
void perf_prepare_sample(struct perf_event_header *header,
struct perf_sample_data *data,
struct perf_event *event,
struct pt_regs *regs)
{
u64 sample_type = event->attr.sample_type;
header->type = PERF_RECORD_SAMPLE;
header->size = sizeof(*header) + event->header_size;
header->misc = 0;
header->misc |= perf_misc_flags(regs);
__perf_event_header__init_id(header, data, event);
if (sample_type & PERF_SAMPLE_IP)
data->ip = perf_instruction_pointer(regs);
if (sample_type & PERF_SAMPLE_CALLCHAIN) {
int size = 1;
data->callchain = perf_callchain(event, regs);
if (data->callchain)
size += data->callchain->nr;
header->size += size * sizeof(u64);
}
if (sample_type & PERF_SAMPLE_RAW) {
int size = sizeof(u32);
if (data->raw)
size += data->raw->size;
else
size += sizeof(u32);
WARN_ON_ONCE(size & (sizeof(u64)-1));
header->size += size;
}
if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
int size = sizeof(u64); /* nr */
if (data->br_stack) {
size += data->br_stack->nr
* sizeof(struct perf_branch_entry);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_REGS_USER) {
/* regs dump ABI info */
int size = sizeof(u64);
perf_sample_regs_user(&data->regs_user, regs);
if (data->regs_user.regs) {
u64 mask = event->attr.sample_regs_user;
size += hweight64(mask) * sizeof(u64);
}
header->size += size;
}
if (sample_type & PERF_SAMPLE_STACK_USER) {
/*
* Either we need PERF_SAMPLE_STACK_USER bit to be allways
* processed as the last one or have additional check added
* in case new sample type is added, because we could eat
* up the rest of the sample size.
*/
struct perf_regs_user *uregs = &data->regs_user;
u16 stack_size = event->attr.sample_stack_user;
u16 size = sizeof(u64);
if (!uregs->abi)
perf_sample_regs_user(uregs, regs);
stack_size = perf_sample_ustack_size(stack_size, header->size,
uregs->regs);
/*
* If there is something to dump, add space for the dump
* itself and for the field that tells the dynamic size,
* which is how many have been actually dumped.
*/
if (stack_size)
size += sizeof(u64) + stack_size;
data->stack_user_size = stack_size;
header->size += size;
}
}
static void perf_event_output(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct perf_output_handle handle;
struct perf_event_header header;
/* protect the callchain buffers */
rcu_read_lock();
perf_prepare_sample(&header, data, event, regs);
if (perf_output_begin(&handle, event, header.size))
goto exit;
perf_output_sample(&handle, &header, data, event);
perf_output_end(&handle);
exit:
rcu_read_unlock();
}
/*
* read event_id
*/
struct perf_read_event {
struct perf_event_header header;
u32 pid;
u32 tid;
};
static void
perf_event_read_event(struct perf_event *event,
struct task_struct *task)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
struct perf_read_event read_event = {
.header = {
.type = PERF_RECORD_READ,
.misc = 0,
.size = sizeof(read_event) + event->read_size,
},
.pid = perf_event_pid(event, task),
.tid = perf_event_tid(event, task),
};
int ret;
perf_event_header__init_id(&read_event.header, &sample, event);
ret = perf_output_begin(&handle, event, read_event.header.size);
if (ret)
return;
perf_output_put(&handle, read_event);
perf_output_read(&handle, event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
typedef int (perf_event_aux_match_cb)(struct perf_event *event, void *data);
typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
static void
perf_event_aux_ctx(struct perf_event_context *ctx,
perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->state < PERF_EVENT_STATE_INACTIVE)
continue;
if (!event_filter_match(event))
continue;
if (match(event, data))
output(event, data);
}
}
static void
perf_event_aux(perf_event_aux_match_cb match,
perf_event_aux_output_cb output,
void *data,
struct perf_event_context *task_ctx)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
struct pmu *pmu;
int ctxn;
rcu_read_lock();
list_for_each_entry_rcu(pmu, &pmus, entry) {
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
perf_event_aux_ctx(&cpuctx->ctx, match, output, data);
if (task_ctx)
goto next;
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
perf_event_aux_ctx(ctx, match, output, data);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
if (task_ctx) {
preempt_disable();
perf_event_aux_ctx(task_ctx, match, output, data);
preempt_enable();
}
rcu_read_unlock();
}
/*
* task tracking -- fork/exit
*
* enabled by: attr.comm | attr.mmap | attr.mmap_data | attr.task
*/
struct perf_task_event {
struct task_struct *task;
struct perf_event_context *task_ctx;
struct {
struct perf_event_header header;
u32 pid;
u32 ppid;
u32 tid;
u32 ptid;
u64 time;
} event_id;
};
static void perf_event_task_output(struct perf_event *event,
void *data)
{
struct perf_task_event *task_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
struct task_struct *task = task_event->task;
int ret, size = task_event->event_id.header.size;
perf_event_header__init_id(&task_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
task_event->event_id.header.size);
if (ret)
goto out;
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
task_event->event_id.tid = perf_event_tid(event, task);
task_event->event_id.ptid = perf_event_tid(event, current);
perf_output_put(&handle, task_event->event_id);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
task_event->event_id.header.size = size;
}
static int perf_event_task_match(struct perf_event *event,
void *data __maybe_unused)
{
return event->attr.comm || event->attr.mmap ||
event->attr.mmap_data || event->attr.task;
}
static void perf_event_task(struct task_struct *task,
struct perf_event_context *task_ctx,
int new)
{
struct perf_task_event task_event;
if (!atomic_read(&nr_comm_events) &&
!atomic_read(&nr_mmap_events) &&
!atomic_read(&nr_task_events))
return;
task_event = (struct perf_task_event){
.task = task,
.task_ctx = task_ctx,
.event_id = {
.header = {
.type = new ? PERF_RECORD_FORK : PERF_RECORD_EXIT,
.misc = 0,
.size = sizeof(task_event.event_id),
},
/* .pid */
/* .ppid */
/* .tid */
/* .ptid */
.time = perf_clock(),
},
};
perf_event_aux(perf_event_task_match,
perf_event_task_output,
&task_event,
task_ctx);
}
void perf_event_fork(struct task_struct *task)
{
perf_event_task(task, NULL, 1);
}
/*
* comm tracking
*/
struct perf_comm_event {
struct task_struct *task;
char *comm;
int comm_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
} event_id;
};
static void perf_event_comm_output(struct perf_event *event,
void *data)
{
struct perf_comm_event *comm_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = comm_event->event_id.header.size;
int ret;
perf_event_header__init_id(&comm_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
comm_event->event_id.header.size);
if (ret)
goto out;
comm_event->event_id.pid = perf_event_pid(event, comm_event->task);
comm_event->event_id.tid = perf_event_tid(event, comm_event->task);
perf_output_put(&handle, comm_event->event_id);
__output_copy(&handle, comm_event->comm,
comm_event->comm_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
comm_event->event_id.header.size = size;
}
static int perf_event_comm_match(struct perf_event *event,
void *data __maybe_unused)
{
return event->attr.comm;
}
static void perf_event_comm_event(struct perf_comm_event *comm_event)
{
char comm[TASK_COMM_LEN];
unsigned int size;
memset(comm, 0, sizeof(comm));
strlcpy(comm, comm_event->task->comm, sizeof(comm));
size = ALIGN(strlen(comm)+1, sizeof(u64));
comm_event->comm = comm;
comm_event->comm_size = size;
comm_event->event_id.header.size = sizeof(comm_event->event_id) + size;
perf_event_aux(perf_event_comm_match,
perf_event_comm_output,
comm_event,
NULL);
}
void perf_event_comm(struct task_struct *task)
{
struct perf_comm_event comm_event;
struct perf_event_context *ctx;
int ctxn;
rcu_read_lock();
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
perf_event_enable_on_exec(ctx);
}
rcu_read_unlock();
if (!atomic_read(&nr_comm_events))
return;
comm_event = (struct perf_comm_event){
.task = task,
/* .comm */
/* .comm_size */
.event_id = {
.header = {
.type = PERF_RECORD_COMM,
.misc = 0,
/* .size */
},
/* .pid */
/* .tid */
},
};
perf_event_comm_event(&comm_event);
}
/*
* mmap tracking
*/
struct perf_mmap_event {
struct vm_area_struct *vma;
const char *file_name;
int file_size;
struct {
struct perf_event_header header;
u32 pid;
u32 tid;
u64 start;
u64 len;
u64 pgoff;
} event_id;
};
static void perf_event_mmap_output(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct perf_output_handle handle;
struct perf_sample_data sample;
int size = mmap_event->event_id.header.size;
int ret;
perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
ret = perf_output_begin(&handle, event,
mmap_event->event_id.header.size);
if (ret)
goto out;
mmap_event->event_id.pid = perf_event_pid(event, current);
mmap_event->event_id.tid = perf_event_tid(event, current);
perf_output_put(&handle, mmap_event->event_id);
__output_copy(&handle, mmap_event->file_name,
mmap_event->file_size);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
out:
mmap_event->event_id.header.size = size;
}
static int perf_event_mmap_match(struct perf_event *event,
void *data)
{
struct perf_mmap_event *mmap_event = data;
struct vm_area_struct *vma = mmap_event->vma;
int executable = vma->vm_flags & VM_EXEC;
return (!executable && event->attr.mmap_data) ||
(executable && event->attr.mmap);
}
static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
{
struct vm_area_struct *vma = mmap_event->vma;
struct file *file = vma->vm_file;
unsigned int size;
char tmp[16];
char *buf = NULL;
const char *name;
memset(tmp, 0, sizeof(tmp));
if (file) {
/*
* d_path works from the end of the rb backwards, so we
* need to add enough zero bytes after the string to handle
* the 64bit alignment we do later.
*/
buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
if (!buf) {
name = strncpy(tmp, "//enomem", sizeof(tmp));
goto got_name;
}
name = d_path(&file->f_path, buf, PATH_MAX);
if (IS_ERR(name)) {
name = strncpy(tmp, "//toolong", sizeof(tmp));
goto got_name;
}
} else {
if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma),
sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name;
}
if (!vma->vm_mm) {
name = strncpy(tmp, "[vdso]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_brk &&
vma->vm_end >= vma->vm_mm->brk) {
name = strncpy(tmp, "[heap]", sizeof(tmp));
goto got_name;
} else if (vma->vm_start <= vma->vm_mm->start_stack &&
vma->vm_end >= vma->vm_mm->start_stack) {
name = strncpy(tmp, "[stack]", sizeof(tmp));
goto got_name;
}
name = strncpy(tmp, "//anon", sizeof(tmp));
goto got_name;
}
got_name:
size = ALIGN(strlen(name)+1, sizeof(u64));
mmap_event->file_name = name;
mmap_event->file_size = size;
if (!(vma->vm_flags & VM_EXEC))
mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
mmap_event->event_id.header.size = sizeof(mmap_event->event_id) + size;
perf_event_aux(perf_event_mmap_match,
perf_event_mmap_output,
mmap_event,
NULL);
kfree(buf);
}
void perf_event_mmap(struct vm_area_struct *vma)
{
struct perf_mmap_event mmap_event;
if (!atomic_read(&nr_mmap_events))
return;
mmap_event = (struct perf_mmap_event){
.vma = vma,
/* .file_name */
/* .file_size */
.event_id = {
.header = {
.type = PERF_RECORD_MMAP,
.misc = PERF_RECORD_MISC_USER,
/* .size */
},
/* .pid */
/* .tid */
.start = vma->vm_start,
.len = vma->vm_end - vma->vm_start,
.pgoff = (u64)vma->vm_pgoff << PAGE_SHIFT,
},
};
perf_event_mmap_event(&mmap_event);
}
/*
* IRQ throttle logging
*/
static void perf_log_throttle(struct perf_event *event, int enable)
{
struct perf_output_handle handle;
struct perf_sample_data sample;
int ret;
struct {
struct perf_event_header header;
u64 time;
u64 id;
u64 stream_id;
} throttle_event = {
.header = {
.type = PERF_RECORD_THROTTLE,
.misc = 0,
.size = sizeof(throttle_event),
},
.time = perf_clock(),
.id = primary_event_id(event),
.stream_id = event->id,
};
if (enable)
throttle_event.header.type = PERF_RECORD_UNTHROTTLE;
perf_event_header__init_id(&throttle_event.header, &sample, event);
ret = perf_output_begin(&handle, event,
throttle_event.header.size);
if (ret)
return;
perf_output_put(&handle, throttle_event);
perf_event__output_id_sample(event, &handle, &sample);
perf_output_end(&handle);
}
/*
* Generic event overflow handling, sampling.
*/
static int __perf_event_overflow(struct perf_event *event,
int throttle, struct perf_sample_data *data,
struct pt_regs *regs)
{
int events = atomic_read(&event->event_limit);
struct hw_perf_event *hwc = &event->hw;
u64 seq;
int ret = 0;
/*
* Non-sampling counters might still use the PMI to fold short
* hardware counters, ignore those.
*/
if (unlikely(!is_sampling_event(event)))
return 0;
seq = __this_cpu_read(perf_throttled_seq);
if (seq != hwc->interrupts_seq) {
hwc->interrupts_seq = seq;
hwc->interrupts = 1;
} else {
hwc->interrupts++;
if (unlikely(throttle
&& hwc->interrupts >= max_samples_per_tick)) {
__this_cpu_inc(perf_throttled_count);
hwc->interrupts = MAX_INTERRUPTS;
perf_log_throttle(event, 0);
ret = 1;
}
}
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
hwc->freq_time_stamp = now;
if (delta > 0 && delta < 2*TICK_NSEC)
perf_adjust_period(event, delta, hwc->last_period, true);
}
/*
* XXX event_limit might not quite work as expected on inherited
* events
*/
event->pending_kill = POLL_IN;
if (events && atomic_dec_and_test(&event->event_limit)) {
ret = 1;
event->pending_kill = POLL_HUP;
event->pending_disable = 1;
irq_work_queue(&event->pending);
}
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
perf_event_output(event, data, regs);
if (event->fasync && event->pending_kill) {
event->pending_wakeup = 1;
irq_work_queue(&event->pending);
}
return ret;
}
int perf_event_overflow(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
return __perf_event_overflow(event, 1, data, regs);
}
/*
* Generic software event infrastructure
*/
struct swevent_htable {
struct swevent_hlist *swevent_hlist;
struct mutex hlist_mutex;
int hlist_refcount;
/* Recursion avoidance in each contexts */
int recursion[PERF_NR_CONTEXTS];
/* Keeps track of cpu being initialized/exited */
bool online;
};
static DEFINE_PER_CPU(struct swevent_htable, swevent_htable);
/*
* We directly increment event->count and keep a second value in
* event->hw.period_left to count intervals. This period event
* is kept in the range [-sample_period, 0] so that we can use the
* sign as trigger.
*/
static u64 perf_swevent_set_period(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
u64 period = hwc->last_period;
u64 nr, offset;
s64 old, val;
hwc->last_period = hwc->sample_period;
again:
old = val = local64_read(&hwc->period_left);
if (val < 0)
return 0;
nr = div64_u64(period + val, period);
offset = nr * period;
val -= offset;
if (local64_cmpxchg(&hwc->period_left, old, val) != old)
goto again;
return nr;
}
static void perf_swevent_overflow(struct perf_event *event, u64 overflow,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
int throttle = 0;
if (!overflow)
overflow = perf_swevent_set_period(event);
if (hwc->interrupts == MAX_INTERRUPTS)
return;
for (; overflow; overflow--) {
if (__perf_event_overflow(event, throttle,
data, regs)) {
/*
* We inhibit the overflow from happening when
* hwc->interrupts == MAX_INTERRUPTS.
*/
break;
}
throttle = 1;
}
}
static void perf_swevent_event(struct perf_event *event, u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct hw_perf_event *hwc = &event->hw;
local64_add(nr, &event->count);
if (!regs)
return;
if (!is_sampling_event(event))
return;
if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) {
data->period = nr;
return perf_swevent_overflow(event, 1, data, regs);
} else
data->period = event->hw.last_period;
if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq)
return perf_swevent_overflow(event, 1, data, regs);
if (local64_add_negative(nr, &hwc->period_left))
return;
perf_swevent_overflow(event, 0, data, regs);
}
static int perf_exclude_event(struct perf_event *event,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 1;
if (regs) {
if (event->attr.exclude_user && user_mode(regs))
return 1;
if (event->attr.exclude_kernel && !user_mode(regs))
return 1;
}
return 0;
}
static int perf_swevent_match(struct perf_event *event,
enum perf_type_id type,
u32 event_id,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->attr.type != type)
return 0;
if (event->attr.config != event_id)
return 0;
if (perf_exclude_event(event, regs))
return 0;
return 1;
}
static inline u64 swevent_hash(u64 type, u32 event_id)
{
u64 val = event_id | (type << 32);
return hash_64(val, SWEVENT_HLIST_BITS);
}
static inline struct hlist_head *
__find_swevent_head(struct swevent_hlist *hlist, u64 type, u32 event_id)
{
u64 hash = swevent_hash(type, event_id);
return &hlist->heads[hash];
}
/* For the read side: events when they trigger */
static inline struct hlist_head *
find_swevent_head_rcu(struct swevent_htable *swhash, u64 type, u32 event_id)
{
struct swevent_hlist *hlist;
hlist = rcu_dereference(swhash->swevent_hlist);
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
/* For the event head insertion and removal in the hlist */
static inline struct hlist_head *
find_swevent_head(struct swevent_htable *swhash, struct perf_event *event)
{
struct swevent_hlist *hlist;
u32 event_id = event->attr.config;
u64 type = event->attr.type;
/*
* Event scheduling is always serialized against hlist allocation
* and release. Which makes the protected version suitable here.
* The context lock guarantees that.
*/
hlist = rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&event->ctx->lock));
if (!hlist)
return NULL;
return __find_swevent_head(hlist, type, event_id);
}
static void do_perf_sw_event(enum perf_type_id type, u32 event_id,
u64 nr,
struct perf_sample_data *data,
struct pt_regs *regs)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct perf_event *event;
struct hlist_head *head;
rcu_read_lock();
head = find_swevent_head_rcu(swhash, type, event_id);
if (!head)
goto end;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_swevent_match(event, type, event_id, data, regs))
perf_swevent_event(event, nr, data, regs);
}
end:
rcu_read_unlock();
}
int perf_swevent_get_recursion_context(void)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
return get_recursion_context(swhash->recursion);
}
EXPORT_SYMBOL_GPL(perf_swevent_get_recursion_context);
inline void perf_swevent_put_recursion_context(int rctx)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
put_recursion_context(swhash->recursion, rctx);
}
void __perf_sw_event(u32 event_id, u64 nr, struct pt_regs *regs, u64 addr)
{
struct perf_sample_data data;
int rctx;
preempt_disable_notrace();
rctx = perf_swevent_get_recursion_context();
if (rctx < 0)
return;
perf_sample_data_init(&data, addr, 0);
do_perf_sw_event(PERF_TYPE_SOFTWARE, event_id, nr, &data, regs);
perf_swevent_put_recursion_context(rctx);
preempt_enable_notrace();
}
static void perf_swevent_read(struct perf_event *event)
{
}
static int perf_swevent_add(struct perf_event *event, int flags)
{
struct swevent_htable *swhash = &__get_cpu_var(swevent_htable);
struct hw_perf_event *hwc = &event->hw;
struct hlist_head *head;
if (is_sampling_event(event)) {
hwc->last_period = hwc->sample_period;
perf_swevent_set_period(event);
}
hwc->state = !(flags & PERF_EF_START);
head = find_swevent_head(swhash, event);
if (!head) {
/*
* We can race with cpu hotplug code. Do not
* WARN if the cpu just got unplugged.
*/
WARN_ON_ONCE(swhash->online);
return -EINVAL;
}
hlist_add_head_rcu(&event->hlist_entry, head);
return 0;
}
static void perf_swevent_del(struct perf_event *event, int flags)
{
hlist_del_rcu(&event->hlist_entry);
}
static void perf_swevent_start(struct perf_event *event, int flags)
{
event->hw.state = 0;
}
static void perf_swevent_stop(struct perf_event *event, int flags)
{
event->hw.state = PERF_HES_STOPPED;
}
/* Deref the hlist from the update side */
static inline struct swevent_hlist *
swevent_hlist_deref(struct swevent_htable *swhash)
{
return rcu_dereference_protected(swhash->swevent_hlist,
lockdep_is_held(&swhash->hlist_mutex));
}
static void swevent_hlist_release(struct swevent_htable *swhash)
{
struct swevent_hlist *hlist = swevent_hlist_deref(swhash);
if (!hlist)
return;
rcu_assign_pointer(swhash->swevent_hlist, NULL);
kfree_rcu(hlist, rcu_head);
}
static void swevent_hlist_put_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
if (!--swhash->hlist_refcount)
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
static void swevent_hlist_put(struct perf_event *event)
{
int cpu;
if (event->cpu != -1) {
swevent_hlist_put_cpu(event, event->cpu);
return;
}
for_each_possible_cpu(cpu)
swevent_hlist_put_cpu(event, cpu);
}
static int swevent_hlist_get_cpu(struct perf_event *event, int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
int err = 0;
mutex_lock(&swhash->hlist_mutex);
if (!swevent_hlist_deref(swhash) && cpu_online(cpu)) {
struct swevent_hlist *hlist;
hlist = kzalloc(sizeof(*hlist), GFP_KERNEL);
if (!hlist) {
err = -ENOMEM;
goto exit;
}
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
swhash->hlist_refcount++;
exit:
mutex_unlock(&swhash->hlist_mutex);
return err;
}
static int swevent_hlist_get(struct perf_event *event)
{
int err;
int cpu, failed_cpu;
if (event->cpu != -1)
return swevent_hlist_get_cpu(event, event->cpu);
get_online_cpus();
for_each_possible_cpu(cpu) {
err = swevent_hlist_get_cpu(event, cpu);
if (err) {
failed_cpu = cpu;
goto fail;
}
}
put_online_cpus();
return 0;
fail:
for_each_possible_cpu(cpu) {
if (cpu == failed_cpu)
break;
swevent_hlist_put_cpu(event, cpu);
}
put_online_cpus();
return err;
}
struct static_key perf_swevent_enabled[PERF_COUNT_SW_MAX];
static void sw_perf_event_destroy(struct perf_event *event)
{
u64 event_id = event->attr.config;
WARN_ON(event->parent);
static_key_slow_dec(&perf_swevent_enabled[event_id]);
swevent_hlist_put(event);
}
static int perf_swevent_init(struct perf_event *event)
{
u64 event_id = event->attr.config;
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
switch (event_id) {
case PERF_COUNT_SW_CPU_CLOCK:
case PERF_COUNT_SW_TASK_CLOCK:
return -ENOENT;
default:
break;
}
if (event_id >= PERF_COUNT_SW_MAX)
return -ENOENT;
if (!event->parent) {
int err;
err = swevent_hlist_get(event);
if (err)
return err;
static_key_slow_inc(&perf_swevent_enabled[event_id]);
event->destroy = sw_perf_event_destroy;
}
return 0;
}
static int perf_swevent_event_idx(struct perf_event *event)
{
return 0;
}
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_swevent_init,
.add = perf_swevent_add,
.del = perf_swevent_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
.events_across_hotplug = 1,
};
#ifdef CONFIG_EVENT_TRACING
static int perf_tp_filter_match(struct perf_event *event,
struct perf_sample_data *data)
{
void *record = data->raw->data;
if (likely(!event->filter) || filter_match_preds(event->filter, record))
return 1;
return 0;
}
static int perf_tp_event_match(struct perf_event *event,
struct perf_sample_data *data,
struct pt_regs *regs)
{
if (event->hw.state & PERF_HES_STOPPED)
return 0;
/*
* All tracepoints are from kernel-space.
*/
if (event->attr.exclude_kernel)
return 0;
if (!perf_tp_filter_match(event, data))
return 0;
return 1;
}
void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
struct task_struct *task)
{
struct perf_sample_data data;
struct perf_event *event;
struct perf_raw_record raw = {
.size = entry_size,
.data = record,
};
perf_sample_data_init(&data, addr, 0);
data.raw = &raw;
hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
/*
* If we got specified a target task, also iterate its context and
* deliver this event there too.
*/
if (task && task != current) {
struct perf_event_context *ctx;
struct trace_entry *entry = record;
rcu_read_lock();
ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]);
if (!ctx)
goto unlock;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
if (event->attr.type != PERF_TYPE_TRACEPOINT)
continue;
if (event->attr.config != entry->type)
continue;
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
}
unlock:
rcu_read_unlock();
}
perf_swevent_put_recursion_context(rctx);
}
EXPORT_SYMBOL_GPL(perf_tp_event);
static void tp_perf_event_destroy(struct perf_event *event)
{
perf_trace_destroy(event);
}
static int perf_tp_event_init(struct perf_event *event)
{
int err;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -ENOENT;
/*
* no branch sampling for tracepoint events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
err = perf_trace_init(event);
if (err)
return err;
event->destroy = tp_perf_event_destroy;
return 0;
}
static struct pmu perf_tracepoint = {
.task_ctx_nr = perf_sw_context,
.event_init = perf_tp_event_init,
.add = perf_trace_add,
.del = perf_trace_del,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.event_idx = perf_swevent_event_idx,
.events_across_hotplug = 1,
};
static inline void perf_tp_register(void)
{
perf_pmu_register(&perf_tracepoint, "tracepoint", PERF_TYPE_TRACEPOINT);
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
char *filter_str;
int ret;
if (event->attr.type != PERF_TYPE_TRACEPOINT)
return -EINVAL;
filter_str = strndup_user(arg, PAGE_SIZE);
if (IS_ERR(filter_str))
return PTR_ERR(filter_str);
ret = ftrace_profile_set_filter(event, event->attr.config, filter_str);
kfree(filter_str);
return ret;
}
static void perf_event_free_filter(struct perf_event *event)
{
ftrace_profile_free_filter(event);
}
#else
static inline void perf_tp_register(void)
{
}
static int perf_event_set_filter(struct perf_event *event, void __user *arg)
{
return -ENOENT;
}
static void perf_event_free_filter(struct perf_event *event)
{
}
#endif /* CONFIG_EVENT_TRACING */
#ifdef CONFIG_HAVE_HW_BREAKPOINT
void perf_bp_event(struct perf_event *bp, void *data)
{
struct perf_sample_data sample;
struct pt_regs *regs = data;
perf_sample_data_init(&sample, bp->attr.bp_addr, 0);
if (!bp->hw.state && !perf_exclude_event(bp, regs))
perf_swevent_event(bp, 1, &sample, regs);
}
#endif
/*
* hrtimer based swevent callback
*/
static enum hrtimer_restart perf_swevent_hrtimer(struct hrtimer *hrtimer)
{
enum hrtimer_restart ret = HRTIMER_RESTART;
struct perf_sample_data data;
struct pt_regs *regs;
struct perf_event *event;
u64 period;
event = container_of(hrtimer, struct perf_event, hw.hrtimer);
if (event->state != PERF_EVENT_STATE_ACTIVE)
return HRTIMER_NORESTART;
event->pmu->read(event);
perf_sample_data_init(&data, 0, event->hw.last_period);
regs = get_irq_regs();
if (regs && !perf_exclude_event(event, regs)) {
if (!(event->attr.exclude_idle && is_idle_task(current)))
if (__perf_event_overflow(event, 1, &data, regs))
ret = HRTIMER_NORESTART;
}
period = max_t(u64, 10000, event->hw.sample_period);
hrtimer_forward_now(hrtimer, ns_to_ktime(period));
return ret;
}
static void perf_swevent_start_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
s64 period;
if (!is_sampling_event(event))
return;
period = local64_read(&hwc->period_left);
if (period) {
if (period < 0)
period = 10000;
local64_set(&hwc->period_left, 0);
} else {
period = max_t(u64, 10000, hwc->sample_period);
}
__hrtimer_start_range_ns(&hwc->hrtimer,
ns_to_ktime(period), 0,
HRTIMER_MODE_REL_PINNED, 0);
}
static void perf_swevent_cancel_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (is_sampling_event(event)) {
ktime_t remaining = hrtimer_get_remaining(&hwc->hrtimer);
local64_set(&hwc->period_left, ktime_to_ns(remaining));
hrtimer_cancel(&hwc->hrtimer);
}
}
static void perf_swevent_init_hrtimer(struct perf_event *event)
{
struct hw_perf_event *hwc = &event->hw;
if (!is_sampling_event(event))
return;
hrtimer_init(&hwc->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hwc->hrtimer.function = perf_swevent_hrtimer;
/*
* Since hrtimers have a fixed rate, we can do a static freq->period
* mapping and avoid the whole period adjust feedback stuff.
*/
if (event->attr.freq) {
long freq = event->attr.sample_freq;
event->attr.sample_period = NSEC_PER_SEC / freq;
hwc->sample_period = event->attr.sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
hwc->last_period = hwc->sample_period;
event->attr.freq = 0;
}
}
/*
* Software event: cpu wall time clock
*/
static void cpu_clock_event_update(struct perf_event *event)
{
s64 prev;
u64 now;
now = local_clock();
prev = local64_xchg(&event->hw.prev_count, now);
local64_add(now - prev, &event->count);
}
static void cpu_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, local_clock());
perf_swevent_start_hrtimer(event);
}
static void cpu_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
cpu_clock_event_update(event);
}
static int cpu_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
cpu_clock_event_start(event, flags);
return 0;
}
static void cpu_clock_event_del(struct perf_event *event, int flags)
{
cpu_clock_event_stop(event, flags);
}
static void cpu_clock_event_read(struct perf_event *event)
{
cpu_clock_event_update(event);
}
static int cpu_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_cpu_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = cpu_clock_event_init,
.add = cpu_clock_event_add,
.del = cpu_clock_event_del,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
.event_idx = perf_swevent_event_idx,
.events_across_hotplug = 1,
};
/*
* Software event: task time clock
*/
static void task_clock_event_update(struct perf_event *event, u64 now)
{
u64 prev;
s64 delta;
prev = local64_xchg(&event->hw.prev_count, now);
delta = now - prev;
local64_add(delta, &event->count);
}
static void task_clock_event_start(struct perf_event *event, int flags)
{
local64_set(&event->hw.prev_count, event->ctx->time);
perf_swevent_start_hrtimer(event);
}
static void task_clock_event_stop(struct perf_event *event, int flags)
{
perf_swevent_cancel_hrtimer(event);
task_clock_event_update(event, event->ctx->time);
}
static int task_clock_event_add(struct perf_event *event, int flags)
{
if (flags & PERF_EF_START)
task_clock_event_start(event, flags);
return 0;
}
static void task_clock_event_del(struct perf_event *event, int flags)
{
task_clock_event_stop(event, PERF_EF_UPDATE);
}
static void task_clock_event_read(struct perf_event *event)
{
u64 now = perf_clock();
u64 delta = now - event->ctx->timestamp;
u64 time = event->ctx->time + delta;
task_clock_event_update(event, time);
}
static int task_clock_event_init(struct perf_event *event)
{
if (event->attr.type != PERF_TYPE_SOFTWARE)
return -ENOENT;
if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK)
return -ENOENT;
/*
* no branch sampling for software events
*/
if (has_branch_stack(event))
return -EOPNOTSUPP;
perf_swevent_init_hrtimer(event);
return 0;
}
static struct pmu perf_task_clock = {
.task_ctx_nr = perf_sw_context,
.event_init = task_clock_event_init,
.add = task_clock_event_add,
.del = task_clock_event_del,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
.event_idx = perf_swevent_event_idx,
.events_across_hotplug = 1,
};
static void perf_pmu_nop_void(struct pmu *pmu)
{
}
static int perf_pmu_nop_int(struct pmu *pmu)
{
return 0;
}
static void perf_pmu_start_txn(struct pmu *pmu)
{
perf_pmu_disable(pmu);
}
static int perf_pmu_commit_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
return 0;
}
static void perf_pmu_cancel_txn(struct pmu *pmu)
{
perf_pmu_enable(pmu);
}
static int perf_event_idx_default(struct perf_event *event)
{
return event->hw.idx + 1;
}
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
*/
static void *find_pmu_context(int ctxn)
{
struct pmu *pmu;
if (ctxn < 0)
return NULL;
list_for_each_entry(pmu, &pmus, entry) {
if (pmu->task_ctx_nr == ctxn)
return pmu->pmu_cpu_context;
}
return NULL;
}
static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu)
{
int cpu;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
if (cpuctx->unique_pmu == old_pmu)
cpuctx->unique_pmu = pmu;
}
}
static void free_pmu_context(struct pmu *pmu)
{
struct pmu *i;
mutex_lock(&pmus_lock);
/*
* Like a real lame refcount.
*/
list_for_each_entry(i, &pmus, entry) {
if (i->pmu_cpu_context == pmu->pmu_cpu_context) {
update_pmu_context(i, pmu);
goto out;
}
}
free_percpu(pmu->pmu_cpu_context);
out:
mutex_unlock(&pmus_lock);
}
static struct idr pmu_idr;
static ssize_t
type_show(struct device *dev, struct device_attribute *attr, char *page)
{
struct pmu *pmu = dev_get_drvdata(dev);
return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type);
}
static struct device_attribute pmu_dev_attrs[] = {
__ATTR_RO(type),
__ATTR_NULL,
};
static int pmu_bus_running;
static struct bus_type pmu_bus = {
.name = "event_source",
.dev_attrs = pmu_dev_attrs,
};
static void pmu_dev_release(struct device *dev)
{
kfree(dev);
}
static int pmu_dev_alloc(struct pmu *pmu)
{
int ret = -ENOMEM;
pmu->dev = kzalloc(sizeof(struct device), GFP_KERNEL);
if (!pmu->dev)
goto out;
pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
goto free_dev;
dev_set_drvdata(pmu->dev, pmu);
pmu->dev->bus = &pmu_bus;
pmu->dev->release = pmu_dev_release;
ret = device_add(pmu->dev);
if (ret)
goto free_dev;
out:
return ret;
free_dev:
put_device(pmu->dev);
goto out;
}
static struct lock_class_key cpuctx_mutex;
static struct lock_class_key cpuctx_lock;
int perf_pmu_register(struct pmu *pmu, char *name, int type)
{
int cpu, ret;
mutex_lock(&pmus_lock);
ret = -ENOMEM;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
pmu->type = -1;
if (!name)
goto skip_type;
pmu->name = name;
if (type < 0) {
type = idr_alloc(&pmu_idr, pmu, PERF_TYPE_MAX, 0, GFP_KERNEL);
if (type < 0) {
ret = type;
goto free_pdc;
}
}
pmu->type = type;
if (pmu_bus_running) {
ret = pmu_dev_alloc(pmu);
if (ret)
goto free_idr;
}
skip_type:
pmu->pmu_cpu_context = find_pmu_context(pmu->task_ctx_nr);
if (pmu->pmu_cpu_context)
goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context)
goto free_dev;
for_each_possible_cpu(cpu) {
struct perf_cpu_context *cpuctx;
cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
__perf_event_init_context(&cpuctx->ctx);
lockdep_set_class(&cpuctx->ctx.mutex, &cpuctx_mutex);
lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
cpuctx->ctx.type = cpu_context;
cpuctx->ctx.pmu = pmu;
cpuctx->jiffies_interval = 1;
INIT_LIST_HEAD(&cpuctx->rotation_list);
cpuctx->unique_pmu = pmu;
}
got_cpu_context:
if (!pmu->start_txn) {
if (pmu->pmu_enable) {
/*
* If we have pmu_enable/pmu_disable calls, install
* transaction stubs that use that to try and batch
* hardware accesses.
*/
pmu->start_txn = perf_pmu_start_txn;
pmu->commit_txn = perf_pmu_commit_txn;
pmu->cancel_txn = perf_pmu_cancel_txn;
} else {
pmu->start_txn = perf_pmu_nop_void;
pmu->commit_txn = perf_pmu_nop_int;
pmu->cancel_txn = perf_pmu_nop_void;
}
}
if (!pmu->pmu_enable) {
pmu->pmu_enable = perf_pmu_nop_void;
pmu->pmu_disable = perf_pmu_nop_void;
}
if (!pmu->event_idx)
pmu->event_idx = perf_event_idx_default;
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
free_dev:
device_del(pmu->dev);
put_device(pmu->dev);
free_idr:
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
free_pdc:
free_percpu(pmu->pmu_disable_count);
goto unlock;
}
void perf_pmu_unregister(struct pmu *pmu)
{
mutex_lock(&pmus_lock);
list_del_rcu(&pmu->entry);
mutex_unlock(&pmus_lock);
/*
* We dereference the pmu list under both SRCU and regular RCU, so
* synchronize against both of those.
*/
synchronize_srcu(&pmus_srcu);
synchronize_rcu();
free_percpu(pmu->pmu_disable_count);
if (pmu->type >= PERF_TYPE_MAX)
idr_remove(&pmu_idr, pmu->type);
device_del(pmu->dev);
put_device(pmu->dev);
free_pmu_context(pmu);
}
struct pmu *perf_init_event(struct perf_event *event)
{
struct pmu *pmu = NULL;
int idx;
int ret;
idx = srcu_read_lock(&pmus_srcu);
rcu_read_lock();
pmu = idr_find(&pmu_idr, event->attr.type);
rcu_read_unlock();
if (pmu) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (ret)
pmu = ERR_PTR(ret);
goto unlock;
}
list_for_each_entry_rcu(pmu, &pmus, entry) {
event->pmu = pmu;
ret = pmu->event_init(event);
if (!ret)
goto unlock;
if (ret != -ENOENT) {
pmu = ERR_PTR(ret);
goto unlock;
}
}
pmu = ERR_PTR(-ENOENT);
unlock:
srcu_read_unlock(&pmus_srcu, idx);
return pmu;
}
/*
* Allocate and initialize a event structure
*/
static struct perf_event *
perf_event_alloc(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
struct perf_event *group_leader,
struct perf_event *parent_event,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct pmu *pmu;
struct perf_event *event;
struct hw_perf_event *hwc;
long err;
if ((unsigned)cpu >= nr_cpu_ids) {
if (!task || cpu != -1)
return ERR_PTR(-EINVAL);
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (!event)
return ERR_PTR(-ENOMEM);
/*
* Single events are their own group leaders, with an
* empty sibling list:
*/
if (!group_leader)
group_leader = event;
mutex_init(&event->child_mutex);
INIT_LIST_HEAD(&event->child_list);
INIT_LIST_HEAD(&event->group_entry);
INIT_LIST_HEAD(&event->event_entry);
INIT_LIST_HEAD(&event->sibling_list);
INIT_LIST_HEAD(&event->rb_entry);
init_waitqueue_head(&event->waitq);
init_irq_work(&event->pending, perf_pending_event);
mutex_init(&event->mmap_mutex);
atomic_long_set(&event->refcount, 1);
event->cpu = cpu;
event->attr = *attr;
event->group_leader = group_leader;
event->pmu = NULL;
event->oncpu = -1;
event->parent = parent_event;
event->ns = get_pid_ns(task_active_pid_ns(current));
event->id = atomic64_inc_return(&perf_event_id);
event->state = PERF_EVENT_STATE_INACTIVE;
if (task) {
event->attach_state = PERF_ATTACH_TASK;
if (attr->type == PERF_TYPE_TRACEPOINT)
event->hw.tp_target = task;
#ifdef CONFIG_HAVE_HW_BREAKPOINT
/*
* hw_breakpoint is a bit difficult here..
*/
else if (attr->type == PERF_TYPE_BREAKPOINT)
event->hw.bp_target = task;
#endif
}
if (!overflow_handler && parent_event) {
overflow_handler = parent_event->overflow_handler;
context = parent_event->overflow_handler_context;
}
event->overflow_handler = overflow_handler;
event->overflow_handler_context = context;
perf_event__state_init(event);
pmu = NULL;
hwc = &event->hw;
hwc->sample_period = attr->sample_period;
if (attr->freq && attr->sample_freq)
hwc->sample_period = 1;
hwc->last_period = hwc->sample_period;
local64_set(&hwc->period_left, hwc->sample_period);
/*
* we currently do not support PERF_FORMAT_GROUP on inherited events
*/
if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
goto done;
pmu = perf_init_event(event);
done:
err = 0;
if (!pmu)
err = -EINVAL;
else if (IS_ERR(pmu))
err = PTR_ERR(pmu);
if (err) {
if (event->ns)
put_pid_ns(event->ns);
kfree(event);
return ERR_PTR(err);
}
if (!event->parent) {
if (event->attach_state & PERF_ATTACH_TASK)
static_key_slow_inc(&perf_sched_events.key);
if (event->attr.mmap || event->attr.mmap_data)
atomic_inc(&nr_mmap_events);
if (event->attr.comm)
atomic_inc(&nr_comm_events);
if (event->attr.task)
atomic_inc(&nr_task_events);
if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
err = get_callchain_buffers();
if (err) {
free_event(event);
return ERR_PTR(err);
}
}
if (has_branch_stack(event)) {
static_key_slow_inc(&perf_sched_events.key);
if (!(event->attach_state & PERF_ATTACH_TASK))
atomic_inc(&per_cpu(perf_branch_stack_events,
event->cpu));
}
}
return event;
}
static int perf_copy_attr(struct perf_event_attr __user *uattr,
struct perf_event_attr *attr)
{
u32 size;
int ret;
if (!access_ok(VERIFY_WRITE, uattr, PERF_ATTR_SIZE_VER0))
return -EFAULT;
/*
* zero the full structure, so that a short copy will be nice.
*/
memset(attr, 0, sizeof(*attr));
ret = get_user(size, &uattr->size);
if (ret)
return ret;
if (size > PAGE_SIZE) /* silly large */
goto err_size;
if (!size) /* abi compat */
size = PERF_ATTR_SIZE_VER0;
if (size < PERF_ATTR_SIZE_VER0)
goto err_size;
/*
* If we're handed a bigger struct than we know of,
* ensure all the unknown bits are 0 - i.e. new
* user-space does not rely on any kernel feature
* extensions we dont know about yet.
*/
if (size > sizeof(*attr)) {
unsigned char __user *addr;
unsigned char __user *end;
unsigned char val;
addr = (void __user *)uattr + sizeof(*attr);
end = (void __user *)uattr + size;
for (; addr < end; addr++) {
ret = get_user(val, addr);
if (ret)
return ret;
if (val)
goto err_size;
}
size = sizeof(*attr);
}
ret = copy_from_user(attr, uattr, size);
if (ret)
return -EFAULT;
if (attr->__reserved_1)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))
return -EINVAL;
if (attr->read_format & ~(PERF_FORMAT_MAX-1))
return -EINVAL;
if (attr->sample_type & PERF_SAMPLE_BRANCH_STACK) {
u64 mask = attr->branch_sample_type;
/* only using defined bits */
if (mask & ~(PERF_SAMPLE_BRANCH_MAX-1))
return -EINVAL;
/* at least one branch bit must be set */
if (!(mask & ~PERF_SAMPLE_BRANCH_PLM_ALL))
return -EINVAL;
/* kernel level capture: check permissions */
if ((mask & PERF_SAMPLE_BRANCH_PERM_PLM)
&& perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
/* propagate priv level, when not set for branch */
if (!(mask & PERF_SAMPLE_BRANCH_PLM_ALL)) {
/* exclude_kernel checked on syscall entry */
if (!attr->exclude_kernel)
mask |= PERF_SAMPLE_BRANCH_KERNEL;
if (!attr->exclude_user)
mask |= PERF_SAMPLE_BRANCH_USER;
if (!attr->exclude_hv)
mask |= PERF_SAMPLE_BRANCH_HV;
/*
* adjust user setting (for HW filter setup)
*/
attr->branch_sample_type = mask;
}
}
if (attr->sample_type & PERF_SAMPLE_REGS_USER) {
ret = perf_reg_validate(attr->sample_regs_user);
if (ret)
return ret;
}
if (attr->sample_type & PERF_SAMPLE_STACK_USER) {
if (!arch_perf_have_user_stack_dump())
return -ENOSYS;
/*
* We have __u32 type for the size, but so far
* we can only use __u16 as maximum due to the
* __u16 sample size limit.
*/
if (attr->sample_stack_user >= USHRT_MAX)
ret = -EINVAL;
else if (!IS_ALIGNED(attr->sample_stack_user, sizeof(u64)))
ret = -EINVAL;
}
out:
return ret;
err_size:
put_user(sizeof(*attr), &uattr->size);
ret = -E2BIG;
goto out;
}
static int
perf_event_set_output(struct perf_event *event, struct perf_event *output_event)
{
struct ring_buffer *rb = NULL, *old_rb = NULL;
int ret = -EINVAL;
if (!output_event)
goto set;
/* don't allow circular references */
if (event == output_event)
goto out;
/*
* Don't allow cross-cpu buffers
*/
if (output_event->cpu != event->cpu)
goto out;
/*
* If its not a per-cpu rb, it must be the same task.
*/
if (output_event->cpu == -1 && output_event->ctx != event->ctx)
goto out;
set:
mutex_lock(&event->mmap_mutex);
/* Can't redirect output if we've got an active mmap() */
if (atomic_read(&event->mmap_count))
goto unlock;
old_rb = event->rb;
if (output_event) {
/* get the rb we want to redirect to */
rb = ring_buffer_get(output_event);
if (!rb)
goto unlock;
}
if (old_rb)
ring_buffer_detach(event, old_rb);
if (rb)
ring_buffer_attach(event, rb);
rcu_assign_pointer(event->rb, rb);
if (old_rb) {
ring_buffer_put(old_rb);
/*
* Since we detached before setting the new rb, so that we
* could attach the new rb, we could have missed a wakeup.
* Provide it now.
*/
wake_up_all(&event->waitq);
}
ret = 0;
unlock:
mutex_unlock(&event->mmap_mutex);
out:
return ret;
}
/**
* sys_perf_event_open - open a performance event, associate it to a task/cpu
*
* @attr_uptr: event_id type attributes for monitoring/sampling
* @pid: target pid
* @cpu: target cpu
* @group_fd: group leader event fd
*/
SYSCALL_DEFINE5(perf_event_open,
struct perf_event_attr __user *, attr_uptr,
pid_t, pid, int, cpu, int, group_fd, unsigned long, flags)
{
struct perf_event *group_leader = NULL, *output_event = NULL;
struct perf_event *event, *sibling;
struct perf_event_attr attr;
struct perf_event_context *ctx;
struct file *event_file = NULL;
struct fd group = {NULL, 0};
struct task_struct *task = NULL;
struct pmu *pmu;
int event_fd;
int move_group = 0;
int err;
/* for future expandability... */
if (flags & ~PERF_FLAG_ALL)
return -EINVAL;
err = perf_copy_attr(attr_uptr, &attr);
if (err)
return err;
if (!attr.exclude_kernel) {
if (perf_paranoid_kernel() && !capable(CAP_SYS_ADMIN))
return -EACCES;
}
if (attr.freq) {
if (attr.sample_freq > sysctl_perf_event_sample_rate)
return -EINVAL;
} else {
if (attr.sample_period & (1ULL << 63))
return -EINVAL;
}
/*
* In cgroup mode, the pid argument is used to pass the fd
* opened to the cgroup directory in cgroupfs. The cpu argument
* designates the cpu on which to monitor threads from that
* cgroup.
*/
if ((flags & PERF_FLAG_PID_CGROUP) && (pid == -1 || cpu == -1))
return -EINVAL;
event_fd = get_unused_fd();
if (event_fd < 0)
return event_fd;
if (group_fd != -1) {
err = perf_fget_light(group_fd, &group);
if (err)
goto err_fd;
group_leader = group.file->private_data;
if (flags & PERF_FLAG_FD_OUTPUT)
output_event = group_leader;
if (flags & PERF_FLAG_FD_NO_GROUP)
group_leader = NULL;
}
if (pid != -1 && !(flags & PERF_FLAG_PID_CGROUP)) {
task = find_lively_task_by_vpid(pid);
if (IS_ERR(task)) {
err = PTR_ERR(task);
goto err_group_fd;
}
}
get_online_cpus();
event = perf_event_alloc(&attr, cpu, task, group_leader, NULL,
NULL, NULL);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err_task;
}
if (flags & PERF_FLAG_PID_CGROUP) {
err = perf_cgroup_connect(pid, event, &attr, group_leader);
if (err)
goto err_alloc;
/*
* one more event:
* - that has cgroup constraint on event->cpu
* - that may need work on context switch
*/
atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
static_key_slow_inc(&perf_sched_events.key);
}
/*
* Special case software events and allow them to be part of
* any hardware group.
*/
pmu = event->pmu;
if (group_leader &&
(is_software_event(event) != is_software_event(group_leader))) {
if (is_software_event(event)) {
/*
* If event and group_leader are not both a software
* event, and event is, then group leader is not.
*
* Allow the addition of software events to !software
* groups, this is safe because software events never
* fail to schedule.
*/
pmu = group_leader->pmu;
} else if (is_software_event(group_leader) &&
(group_leader->group_flags & PERF_GROUP_SOFTWARE)) {
/*
* In case the group is a pure software group, and we
* try to add a hardware event, move the whole group to
* the hardware context.
*/
move_group = 1;
}
}
/*
* Get the target context (task or percpu):
*/
ctx = find_get_context(pmu, task, event->cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_alloc;
}
if (task) {
put_task_struct(task);
task = NULL;
}
/*
* Look up the group leader (we will attach this event to it):
*/
if (group_leader) {
err = -EINVAL;
/*
* Do not allow a recursive hierarchy (this new sibling
* becoming part of another group-sibling):
*/
if (group_leader->group_leader != group_leader)
goto err_context;
/*
* Do not allow to attach to a group in a different
* task or CPU context:
*/
if (move_group) {
if (group_leader->ctx->type != ctx->type)
goto err_context;
} else {
if (group_leader->ctx != ctx)
goto err_context;
}
/*
* Only a group leader can be exclusive or pinned
*/
if (attr.exclusive || attr.pinned)
goto err_context;
}
if (output_event) {
err = perf_event_set_output(event, output_event);
if (err)
goto err_context;
}
event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, O_RDWR);
if (IS_ERR(event_file)) {
err = PTR_ERR(event_file);
goto err_context;
}
if (move_group) {
struct perf_event_context *gctx = group_leader->ctx;
mutex_lock(&gctx->mutex);
perf_remove_from_context(group_leader, false);
/*
* Removing from the context ends up with disabled
* event. What we want here is event in the initial
* startup state, ready to be add into new context.
*/
perf_event__state_init(group_leader);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_remove_from_context(sibling, false);
perf_event__state_init(sibling);
put_ctx(gctx);
}
mutex_unlock(&gctx->mutex);
put_ctx(gctx);
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
if (move_group) {
synchronize_rcu();
perf_install_in_context(ctx, group_leader, event->cpu);
get_ctx(ctx);
list_for_each_entry(sibling, &group_leader->sibling_list,
group_entry) {
perf_install_in_context(ctx, sibling, event->cpu);
get_ctx(ctx);
}
}
perf_install_in_context(ctx, event, event->cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
put_online_cpus();
event->owner = current;
mutex_lock(¤t->perf_event_mutex);
list_add_tail(&event->owner_entry, ¤t->perf_event_list);
mutex_unlock(¤t->perf_event_mutex);
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(event);
perf_event__id_header_size(event);
/*
* Drop the reference on the group_event after placing the
* new event on the sibling_list. This ensures destruction
* of the group leader will find the pointer to itself in
* perf_group_detach().
*/
fdput(group);
fd_install(event_fd, event_file);
return event_fd;
err_context:
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
free_event(event);
err_task:
put_online_cpus();
if (task)
put_task_struct(task);
err_group_fd:
fdput(group);
err_fd:
put_unused_fd(event_fd);
return err;
}
/**
* perf_event_create_kernel_counter
*
* @attr: attributes of the counter to create
* @cpu: cpu in which the counter is bound
* @task: task to profile (NULL for percpu)
*/
struct perf_event *
perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,
struct task_struct *task,
perf_overflow_handler_t overflow_handler,
void *context)
{
struct perf_event_context *ctx;
struct perf_event *event;
int err;
/*
* Get the target context (task or percpu):
*/
event = perf_event_alloc(attr, cpu, task, NULL, NULL,
overflow_handler, context);
if (IS_ERR(event)) {
err = PTR_ERR(event);
goto err;
}
/* Mark owner so we could distinguish it from user events. */
event->owner = EVENT_OWNER_KERNEL;
ctx = find_get_context(event->pmu, task, cpu);
if (IS_ERR(ctx)) {
err = PTR_ERR(ctx);
goto err_free;
}
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
perf_install_in_context(ctx, event, cpu);
++ctx->generation;
perf_unpin_context(ctx);
mutex_unlock(&ctx->mutex);
return event;
err_free:
free_event(event);
err:
return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(perf_event_create_kernel_counter);
void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
{
struct perf_event_context *src_ctx;
struct perf_event_context *dst_ctx;
struct perf_event *event, *tmp;
LIST_HEAD(events);
src_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, src_cpu)->ctx;
dst_ctx = &per_cpu_ptr(pmu->pmu_cpu_context, dst_cpu)->ctx;
mutex_lock(&src_ctx->mutex);
list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
event_entry) {
perf_remove_from_context(event, false);
put_ctx(src_ctx);
list_add(&event->event_entry, &events);
}
mutex_unlock(&src_ctx->mutex);
synchronize_rcu();
mutex_lock(&dst_ctx->mutex);
list_for_each_entry_safe(event, tmp, &events, event_entry) {
list_del(&event->event_entry);
if (event->state >= PERF_EVENT_STATE_OFF)
event->state = PERF_EVENT_STATE_INACTIVE;
perf_install_in_context(dst_ctx, event, dst_cpu);
get_ctx(dst_ctx);
}
mutex_unlock(&dst_ctx->mutex);
}
EXPORT_SYMBOL_GPL(perf_pmu_migrate_context);
static void sync_child_event(struct perf_event *child_event,
struct task_struct *child)
{
struct perf_event *parent_event = child_event->parent;
u64 child_val;
if (child_event->attr.inherit_stat)
perf_event_read_event(child_event, child);
child_val = perf_event_count(child_event);
/*
* Add back the child's count to the parent's count:
*/
atomic64_add(child_val, &parent_event->child_count);
atomic64_add(child_event->total_time_enabled,
&parent_event->child_total_time_enabled);
atomic64_add(child_event->total_time_running,
&parent_event->child_total_time_running);
/*
* Remove this event from the parent's list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_del_init(&child_event->child_list);
mutex_unlock(&parent_event->child_mutex);
/*
* Release the parent event, if this was the last
* reference to it.
*/
put_event(parent_event);
}
static void
__perf_event_exit_task(struct perf_event *child_event,
struct perf_event_context *child_ctx,
struct task_struct *child)
{
perf_remove_from_context(child_event, !!child_event->parent);
/*
* It can happen that the parent exits first, and has events
* that are still around due to the child reference. These
* events need to be zapped.
*/
if (child_event->parent) {
sync_child_event(child_event, child);
free_event(child_event);
}
}
static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
{
struct perf_event *child_event, *tmp;
struct perf_event_context *child_ctx;
unsigned long flags;
if (likely(!child->perf_event_ctxp[ctxn])) {
perf_event_task(child, NULL, 0);
return;
}
local_irq_save(flags);
/*
* We can't reschedule here because interrupts are disabled,
* and either child is current or it is a task that can't be
* scheduled, so we are now safe from rescheduling changing
* our context.
*/
child_ctx = rcu_dereference_raw(child->perf_event_ctxp[ctxn]);
/*
* Take the context lock here so that if find_get_context is
* reading child->perf_event_ctxp, we wait until it has
* incremented the context's refcount before we do put_ctx below.
*/
raw_spin_lock(&child_ctx->lock);
task_ctx_sched_out(child_ctx);
child->perf_event_ctxp[ctxn] = NULL;
/*
* If this context is a clone; unclone it so it can't get
* swapped to another process while we're removing all
* the events from it.
*/
unclone_ctx(child_ctx);
update_context_time(child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Report the task dead after unscheduling the events so that we
* won't get any samples after PERF_RECORD_EXIT. We can however still
* get a few PERF_RECORD_READ events.
*/
perf_event_task(child, child_ctx, 0);
/*
* We can recurse on the same lock type through:
*
* __perf_event_exit_task()
* sync_child_event()
* put_event()
* mutex_lock(&ctx->mutex)
*
* But since its the parent context it won't be the same instance.
*/
mutex_lock(&child_ctx->mutex);
again:
list_for_each_entry_safe(child_event, tmp, &child_ctx->pinned_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
list_for_each_entry_safe(child_event, tmp, &child_ctx->flexible_groups,
group_entry)
__perf_event_exit_task(child_event, child_ctx, child);
/*
* If the last event was a group event, it will have appended all
* its siblings to the list, but we obtained 'tmp' before that which
* will still point to the list head terminating the iteration.
*/
if (!list_empty(&child_ctx->pinned_groups) ||
!list_empty(&child_ctx->flexible_groups))
goto again;
mutex_unlock(&child_ctx->mutex);
put_ctx(child_ctx);
}
/*
* When a child task exits, feed back event values to parent events.
*/
void perf_event_exit_task(struct task_struct *child)
{
struct perf_event *event, *tmp;
int ctxn;
mutex_lock(&child->perf_event_mutex);
list_for_each_entry_safe(event, tmp, &child->perf_event_list,
owner_entry) {
list_del_init(&event->owner_entry);
/*
* Ensure the list deletion is visible before we clear
* the owner, closes a race against perf_release() where
* we need to serialize on the owner->perf_event_mutex.
*/
smp_wmb();
event->owner = NULL;
}
mutex_unlock(&child->perf_event_mutex);
for_each_task_context_nr(ctxn)
perf_event_exit_task_context(child, ctxn);
}
static void perf_free_event(struct perf_event *event,
struct perf_event_context *ctx)
{
struct perf_event *parent = event->parent;
if (WARN_ON_ONCE(!parent))
return;
mutex_lock(&parent->child_mutex);
list_del_init(&event->child_list);
mutex_unlock(&parent->child_mutex);
put_event(parent);
perf_group_detach(event);
list_del_event(event, ctx);
free_event(event);
}
/*
* free an unexposed, unused context as created by inheritance by
* perf_event_init_task below, used by fork() in case of fail.
*/
void perf_event_free_task(struct task_struct *task)
{
struct perf_event_context *ctx;
struct perf_event *event, *tmp;
int ctxn;
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (!ctx)
continue;
mutex_lock(&ctx->mutex);
again:
list_for_each_entry_safe(event, tmp, &ctx->pinned_groups,
group_entry)
perf_free_event(event, ctx);
list_for_each_entry_safe(event, tmp, &ctx->flexible_groups,
group_entry)
perf_free_event(event, ctx);
if (!list_empty(&ctx->pinned_groups) ||
!list_empty(&ctx->flexible_groups))
goto again;
mutex_unlock(&ctx->mutex);
put_ctx(ctx);
}
}
void perf_event_delayed_put(struct task_struct *task)
{
int ctxn;
for_each_task_context_nr(ctxn)
WARN_ON_ONCE(task->perf_event_ctxp[ctxn]);
}
/*
* inherit a event from parent task to child task:
*/
static struct perf_event *
inherit_event(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event *group_leader,
struct perf_event_context *child_ctx)
{
struct perf_event *child_event;
unsigned long flags;
/*
* Instead of creating recursive hierarchies of events,
* we link inherited events back to the original parent,
* which has a filp for sure, which we use as the reference
* count:
*/
if (parent_event->parent)
parent_event = parent_event->parent;
child_event = perf_event_alloc(&parent_event->attr,
parent_event->cpu,
child,
group_leader, parent_event,
NULL, NULL);
if (IS_ERR(child_event))
return child_event;
if (is_orphaned_event(parent_event) ||
!atomic_long_inc_not_zero(&parent_event->refcount)) {
free_event(child_event);
return NULL;
}
get_ctx(child_ctx);
/*
* Make the child state follow the state of the parent event,
* not its attr.disabled bit. We hold the parent's mutex,
* so we won't race with perf_event_{en, dis}able_family.
*/
if (parent_event->state >= PERF_EVENT_STATE_INACTIVE)
child_event->state = PERF_EVENT_STATE_INACTIVE;
else
child_event->state = PERF_EVENT_STATE_OFF;
if (parent_event->attr.freq) {
u64 sample_period = parent_event->hw.sample_period;
struct hw_perf_event *hwc = &child_event->hw;
hwc->sample_period = sample_period;
hwc->last_period = sample_period;
local64_set(&hwc->period_left, sample_period);
}
child_event->ctx = child_ctx;
child_event->overflow_handler = parent_event->overflow_handler;
child_event->overflow_handler_context
= parent_event->overflow_handler_context;
/*
* Precalculate sample_data sizes
*/
perf_event__header_size(child_event);
perf_event__id_header_size(child_event);
/*
* Link it up in the child's context:
*/
raw_spin_lock_irqsave(&child_ctx->lock, flags);
add_event_to_ctx(child_event, child_ctx);
raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
/*
* Link this into the parent event's child list
*/
WARN_ON_ONCE(parent_event->ctx->parent_ctx);
mutex_lock(&parent_event->child_mutex);
list_add_tail(&child_event->child_list, &parent_event->child_list);
mutex_unlock(&parent_event->child_mutex);
return child_event;
}
static int inherit_group(struct perf_event *parent_event,
struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child,
struct perf_event_context *child_ctx)
{
struct perf_event *leader;
struct perf_event *sub;
struct perf_event *child_ctr;
leader = inherit_event(parent_event, parent, parent_ctx,
child, NULL, child_ctx);
if (IS_ERR(leader))
return PTR_ERR(leader);
list_for_each_entry(sub, &parent_event->sibling_list, group_entry) {
child_ctr = inherit_event(sub, parent, parent_ctx,
child, leader, child_ctx);
if (IS_ERR(child_ctr))
return PTR_ERR(child_ctr);
}
return 0;
}
static int
inherit_task_group(struct perf_event *event, struct task_struct *parent,
struct perf_event_context *parent_ctx,
struct task_struct *child, int ctxn,
int *inherited_all)
{
int ret;
struct perf_event_context *child_ctx;
if (!event->attr.inherit) {
*inherited_all = 0;
return 0;
}
child_ctx = child->perf_event_ctxp[ctxn];
if (!child_ctx) {
/*
* This is executed from the parent task context, so
* inherit events that have been marked for cloning.
* First allocate and initialize a context for the
* child.
*/
child_ctx = alloc_perf_context(parent_ctx->pmu, child);
if (!child_ctx)
return -ENOMEM;
child->perf_event_ctxp[ctxn] = child_ctx;
}
ret = inherit_group(event, parent, parent_ctx,
child, child_ctx);
if (ret)
*inherited_all = 0;
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_context(struct task_struct *child, int ctxn)
{
struct perf_event_context *child_ctx, *parent_ctx;
struct perf_event_context *cloned_ctx;
struct perf_event *event;
struct task_struct *parent = current;
int inherited_all = 1;
unsigned long flags;
int ret = 0;
if (likely(!parent->perf_event_ctxp[ctxn]))
return 0;
/*
* If the parent's context is a clone, pin it so it won't get
* swapped under us.
*/
parent_ctx = perf_pin_task_context(parent, ctxn);
/*
* No need to check if parent_ctx != NULL here; since we saw
* it non-NULL earlier, the only reason for it to become NULL
* is if we exit, and since we're currently in the middle of
* a fork we can't be exiting at the same time.
*/
/*
* Lock the parent list. No need to lock the child - not PID
* hashed yet and not running, so nobody can access it.
*/
mutex_lock(&parent_ctx->mutex);
/*
* We dont have to disable NMIs - we are only looking at
* the list, not manipulating it:
*/
list_for_each_entry(event, &parent_ctx->pinned_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
/*
* We can't hold ctx->lock when iterating the ->flexible_group list due
* to allocations, but we need to prevent rotation because
* rotate_ctx() will change the list from interrupt context.
*/
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 1;
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
list_for_each_entry(event, &parent_ctx->flexible_groups, group_entry) {
ret = inherit_task_group(event, parent, parent_ctx,
child, ctxn, &inherited_all);
if (ret)
break;
}
raw_spin_lock_irqsave(&parent_ctx->lock, flags);
parent_ctx->rotate_disable = 0;
child_ctx = child->perf_event_ctxp[ctxn];
if (child_ctx && inherited_all) {
/*
* Mark the child context as a clone of the parent
* context, or of whatever the parent is a clone of.
*
* Note that if the parent is a clone, the holding of
* parent_ctx->lock avoids it from being uncloned.
*/
cloned_ctx = parent_ctx->parent_ctx;
if (cloned_ctx) {
child_ctx->parent_ctx = cloned_ctx;
child_ctx->parent_gen = parent_ctx->parent_gen;
} else {
child_ctx->parent_ctx = parent_ctx;
child_ctx->parent_gen = parent_ctx->generation;
}
get_ctx(child_ctx->parent_ctx);
}
raw_spin_unlock_irqrestore(&parent_ctx->lock, flags);
mutex_unlock(&parent_ctx->mutex);
perf_unpin_context(parent_ctx);
put_ctx(parent_ctx);
return ret;
}
/*
* Initialize the perf_event context in task_struct
*/
int perf_event_init_task(struct task_struct *child)
{
int ctxn, ret;
memset(child->perf_event_ctxp, 0, sizeof(child->perf_event_ctxp));
mutex_init(&child->perf_event_mutex);
INIT_LIST_HEAD(&child->perf_event_list);
for_each_task_context_nr(ctxn) {
ret = perf_event_init_context(child, ctxn);
if (ret)
return ret;
}
return 0;
}
static void __init perf_event_init_all_cpus(void)
{
struct swevent_htable *swhash;
int cpu;
for_each_possible_cpu(cpu) {
swhash = &per_cpu(swevent_htable, cpu);
mutex_init(&swhash->hlist_mutex);
INIT_LIST_HEAD(&per_cpu(rotation_list, cpu));
}
}
static void __cpuinit perf_event_init_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
swhash->online = true;
if (swhash->hlist_refcount > 0) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
WARN_ON(!hlist);
rcu_assign_pointer(swhash->swevent_hlist, hlist);
}
mutex_unlock(&swhash->hlist_mutex);
}
#if defined CONFIG_HOTPLUG_CPU || defined CONFIG_KEXEC
static void perf_pmu_rotate_stop(struct pmu *pmu)
{
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
WARN_ON(!irqs_disabled());
list_del_init(&cpuctx->rotation_list);
}
static void __perf_event_exit_context(void *__info)
{
struct remove_event re = { .detach_group = false };
struct perf_event_context *ctx = __info;
perf_pmu_rotate_stop(ctx->pmu);
rcu_read_lock();
list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
__perf_remove_from_context(&re);
rcu_read_unlock();
}
static void __perf_event_stop_swclock(void *__info)
{
struct perf_event_context *ctx = __info;
struct perf_event *event, *tmp;
list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
if (event->attr.config == PERF_COUNT_SW_CPU_CLOCK &&
event->attr.type == PERF_TYPE_SOFTWARE)
cpu_clock_event_stop(event, 0);
}
}
static void perf_event_exit_cpu_context(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
mutex_lock(&ctx->mutex);
/*
* If keeping events across hotplugging is supported, do not
* remove the event list, but keep it alive across CPU hotplug.
* The context is exited via an fd close path when userspace
* is done and the target CPU is online. If software clock
* event is active, then stop hrtimer associated with it.
* Start the timer when the CPU comes back online.
*/
if (!pmu->events_across_hotplug)
smp_call_function_single(cpu, __perf_event_exit_context,
ctx, 1);
else
smp_call_function_single(cpu, __perf_event_stop_swclock,
ctx, 1);
mutex_unlock(&ctx->mutex);
}
srcu_read_unlock(&pmus_srcu, idx);
}
static void perf_event_start_swclock(int cpu)
{
struct perf_event_context *ctx;
struct pmu *pmu;
int idx;
struct perf_event *event, *tmp;
idx = srcu_read_lock(&pmus_srcu);
list_for_each_entry_rcu(pmu, &pmus, entry) {
if (pmu->events_across_hotplug) {
ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
list_for_each_entry_safe(event, tmp, &ctx->event_list,
event_entry) {
if (event->attr.config ==
PERF_COUNT_SW_CPU_CLOCK &&
event->attr.type ==
PERF_TYPE_SOFTWARE)
cpu_clock_event_start(event, 0);
}
}
}
srcu_read_unlock(&pmus_srcu, idx);
}
static void perf_event_exit_cpu(int cpu)
{
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
perf_event_exit_cpu_context(cpu);
mutex_lock(&swhash->hlist_mutex);
swhash->online = false;
swevent_hlist_release(swhash);
mutex_unlock(&swhash->hlist_mutex);
}
#else
static inline void perf_event_exit_cpu(int cpu) { }
static inline void perf_event_start_swclock(int cpu) { }
#endif
static int
perf_reboot(struct notifier_block *notifier, unsigned long val, void *v)
{
int cpu;
for_each_online_cpu(cpu)
perf_event_exit_cpu(cpu);
return NOTIFY_OK;
}
/*
* Run the perf reboot notifier at the very last possible moment so that
* the generic watchdog code runs as long as possible.
*/
static struct notifier_block perf_reboot_notifier = {
.notifier_call = perf_reboot,
.priority = INT_MIN,
};
static int __cpuinit
perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
case CPU_STARTING:
perf_event_start_swclock(cpu);
break;
default:
break;
}
return NOTIFY_OK;
}
void __init perf_event_init(void)
{
int ret;
idr_init(&pmu_idr);
perf_event_init_all_cpus();
init_srcu_struct(&pmus_srcu);
perf_pmu_register(&perf_swevent, "software", PERF_TYPE_SOFTWARE);
perf_pmu_register(&perf_cpu_clock, NULL, -1);
perf_pmu_register(&perf_task_clock, NULL, -1);
perf_tp_register();
perf_cpu_notifier(perf_cpu_notify);
register_reboot_notifier(&perf_reboot_notifier);
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
/* do not patch jump label more than once per second */
jump_label_rate_limit(&perf_sched_events, HZ);
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
*/
BUILD_BUG_ON((offsetof(struct perf_event_mmap_page, data_head))
!= 1024);
}
static int __init perf_event_sysfs_init(void)
{
struct pmu *pmu;
int ret;
mutex_lock(&pmus_lock);
ret = bus_register(&pmu_bus);
if (ret)
goto unlock;
list_for_each_entry(pmu, &pmus, entry) {
if (!pmu->name || pmu->type < 0)
continue;
ret = pmu_dev_alloc(pmu);
WARN(ret, "Failed to register pmu: %s, reason %d\n", pmu->name, ret);
}
pmu_bus_running = 1;
ret = 0;
unlock:
mutex_unlock(&pmus_lock);
return ret;
}
device_initcall(perf_event_sysfs_init);
#ifdef CONFIG_CGROUP_PERF
static struct cgroup_subsys_state *perf_cgroup_css_alloc(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = kzalloc(sizeof(*jc), GFP_KERNEL);
if (!jc)
return ERR_PTR(-ENOMEM);
jc->info = alloc_percpu(struct perf_cgroup_info);
if (!jc->info) {
kfree(jc);
return ERR_PTR(-ENOMEM);
}
return &jc->css;
}
static void perf_cgroup_css_free(struct cgroup *cont)
{
struct perf_cgroup *jc;
jc = container_of(cgroup_subsys_state(cont, perf_subsys_id),
struct perf_cgroup, css);
free_percpu(jc->info);
kfree(jc);
}
static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
return 0;
}
static void perf_cgroup_attach(struct cgroup *cgrp, struct cgroup_taskset *tset)
{
struct task_struct *task;
cgroup_taskset_for_each(task, cgrp, tset)
task_function_call(task, __perf_cgroup_move, task);
}
static void perf_cgroup_exit(struct cgroup *cgrp, struct cgroup *old_cgrp,
struct task_struct *task)
{
/*
* cgroup_exit() is called in the copy_process() failure path.
* Ignore this case since the task hasn't ran yet, this avoids
* trying to poke a half freed task state from generic code.
*/
if (!(task->flags & PF_EXITING))
return;
task_function_call(task, __perf_cgroup_move, task);
}
struct cgroup_subsys perf_subsys = {
.name = "perf_event",
.subsys_id = perf_subsys_id,
.css_alloc = perf_cgroup_css_alloc,
.css_free = perf_cgroup_css_free,
.exit = perf_cgroup_exit,
.attach = perf_cgroup_attach,
};
#endif /* CONFIG_CGROUP_PERF */
| gpl-2.0 |
Jason-Choi/EastSea-Kernel | arch/arm/mach-omap2/id.c | 89 | 15108 | /*
* linux/arch/arm/mach-omap2/id.c
*
* OMAP2 CPU identification code
*
* Copyright (C) 2005 Nokia Corporation
* Written by Tony Lindgren <tony@atomide.com>
*
* Copyright (C) 2009-11 Texas Instruments
* Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/cputype.h>
#include <plat/common.h>
#include <plat/cpu.h>
#include <mach/id.h>
#include "control.h"
static struct omap_chip_id omap_chip;
static unsigned int omap_revision;
u32 omap3_features;
u32 omap4_features;
unsigned int omap_rev(void)
{
return omap_revision;
}
EXPORT_SYMBOL(omap_rev);
/**
* omap_chip_is - test whether currently running OMAP matches a chip type
* @oc: omap_chip_t to test against
*
* Test whether the currently-running OMAP chip matches the supplied
* chip type 'oc'. Returns 1 upon a match; 0 upon failure.
*/
int omap_chip_is(struct omap_chip_id oci)
{
return (oci.oc & omap_chip.oc) ? 1 : 0;
}
EXPORT_SYMBOL(omap_chip_is);
int omap_type(void)
{
u32 val = 0;
if (cpu_is_omap24xx()) {
val = omap_ctrl_readl(OMAP24XX_CONTROL_STATUS);
} else if (cpu_is_omap34xx()) {
val = omap_ctrl_readl(OMAP343X_CONTROL_STATUS);
} else if (cpu_is_omap44xx()) {
val = omap_ctrl_readl(OMAP4_CTRL_MODULE_CORE_STATUS);
} else {
pr_err("Cannot detect omap type!\n");
goto out;
}
val &= OMAP2_DEVICETYPE_MASK;
val >>= 8;
out:
return val;
}
EXPORT_SYMBOL(omap_type);
/*----------------------------------------------------------------------------*/
#define OMAP_TAP_IDCODE 0x0204
#define OMAP_TAP_DIE_ID_0 0x0218
#define OMAP_TAP_DIE_ID_1 0x021C
#define OMAP_TAP_DIE_ID_2 0x0220
#define OMAP_TAP_DIE_ID_3 0x0224
#define OMAP_TAP_DIE_ID_44XX_0 0x0200
#define OMAP_TAP_DIE_ID_44XX_1 0x0208
#define OMAP_TAP_DIE_ID_44XX_2 0x020c
#define OMAP_TAP_DIE_ID_44XX_3 0x0210
#define OMAP_TAP_PROD_ID_44XX_0 0x0214
#define OMAP_TAP_PROD_ID_44XX_1 0x0218
#define read_tap_reg(reg) __raw_readl(tap_base + (reg))
struct omap_id {
u16 hawkeye; /* Silicon type (Hawkeye id) */
u8 dev; /* Device type from production_id reg */
u32 type; /* Combined type id copied to omap_revision */
};
/* Register values to detect the OMAP version */
static struct omap_id omap_ids[] __initdata = {
{ .hawkeye = 0xb5d9, .dev = 0x0, .type = 0x24200024 },
{ .hawkeye = 0xb5d9, .dev = 0x1, .type = 0x24201024 },
{ .hawkeye = 0xb5d9, .dev = 0x2, .type = 0x24202024 },
{ .hawkeye = 0xb5d9, .dev = 0x4, .type = 0x24220024 },
{ .hawkeye = 0xb5d9, .dev = 0x8, .type = 0x24230024 },
{ .hawkeye = 0xb68a, .dev = 0x0, .type = 0x24300024 },
};
static void __iomem *tap_base;
static u16 tap_prod_id;
void omap_get_die_id(struct omap_die_id *odi)
{
if (cpu_is_omap44xx()) {
odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_0);
odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_1);
odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_2);
odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_44XX_3);
return;
}
odi->id_0 = read_tap_reg(OMAP_TAP_DIE_ID_0);
odi->id_1 = read_tap_reg(OMAP_TAP_DIE_ID_1);
odi->id_2 = read_tap_reg(OMAP_TAP_DIE_ID_2);
odi->id_3 = read_tap_reg(OMAP_TAP_DIE_ID_3);
}
void omap_get_production_id(struct omap_die_id *odi)
{
if (cpu_is_omap44xx()) {
odi->id_0 = read_tap_reg(OMAP_TAP_PROD_ID_44XX_0);
odi->id_1 = read_tap_reg(OMAP_TAP_PROD_ID_44XX_1);
odi->id_2 = 0;
odi->id_3 = 0;
}
}
static void __init omap24xx_check_revision(void)
{
int i, j;
u32 idcode, prod_id;
u16 hawkeye;
u8 dev_type, rev;
struct omap_die_id odi;
idcode = read_tap_reg(OMAP_TAP_IDCODE);
prod_id = read_tap_reg(tap_prod_id);
hawkeye = (idcode >> 12) & 0xffff;
rev = (idcode >> 28) & 0x0f;
dev_type = (prod_id >> 16) & 0x0f;
omap_get_die_id(&odi);
pr_debug("OMAP_TAP_IDCODE 0x%08x REV %i HAWKEYE 0x%04x MANF %03x\n",
idcode, rev, hawkeye, (idcode >> 1) & 0x7ff);
pr_debug("OMAP_TAP_DIE_ID_0: 0x%08x\n", odi.id_0);
pr_debug("OMAP_TAP_DIE_ID_1: 0x%08x DEV_REV: %i\n",
odi.id_1, (odi.id_1 >> 28) & 0xf);
pr_debug("OMAP_TAP_DIE_ID_2: 0x%08x\n", odi.id_2);
pr_debug("OMAP_TAP_DIE_ID_3: 0x%08x\n", odi.id_3);
pr_debug("OMAP_TAP_PROD_ID_0: 0x%08x DEV_TYPE: %i\n",
prod_id, dev_type);
/* Check hawkeye ids */
for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
if (hawkeye == omap_ids[i].hawkeye)
break;
}
if (i == ARRAY_SIZE(omap_ids)) {
printk(KERN_ERR "Unknown OMAP CPU id\n");
return;
}
for (j = i; j < ARRAY_SIZE(omap_ids); j++) {
if (dev_type == omap_ids[j].dev)
break;
}
if (j == ARRAY_SIZE(omap_ids)) {
printk(KERN_ERR "Unknown OMAP device type. "
"Handling it as OMAP%04x\n",
omap_ids[i].type >> 16);
j = i;
}
pr_info("OMAP%04x", omap_rev() >> 16);
if ((omap_rev() >> 8) & 0x0f)
pr_info("ES%x", (omap_rev() >> 12) & 0xf);
pr_info("\n");
}
#define OMAP3_CHECK_FEATURE(status,feat) \
if (((status & OMAP3_ ##feat## _MASK) \
>> OMAP3_ ##feat## _SHIFT) != FEAT_ ##feat## _NONE) { \
omap3_features |= OMAP3_HAS_ ##feat; \
}
static void __init omap3_check_features(void)
{
u32 status;
omap3_features = 0;
status = omap_ctrl_readl(OMAP3_CONTROL_OMAP_STATUS);
OMAP3_CHECK_FEATURE(status, L2CACHE);
OMAP3_CHECK_FEATURE(status, IVA);
OMAP3_CHECK_FEATURE(status, SGX);
OMAP3_CHECK_FEATURE(status, NEON);
OMAP3_CHECK_FEATURE(status, ISP);
if (cpu_is_omap3630())
omap3_features |= OMAP3_HAS_192MHZ_CLK;
if (!cpu_is_omap3505() && !cpu_is_omap3517())
omap3_features |= OMAP3_HAS_IO_WAKEUP;
omap3_features |= OMAP3_HAS_SDRC;
/*
* TODO: Get additional info (where applicable)
* e.g. Size of L2 cache.
*/
}
static void __init omap4_check_features(void)
{
u32 si_type;
omap4_features = 0;
if (cpu_is_omap443x())
omap4_features |= OMAP4_HAS_MPU_1GHZ;
if (cpu_is_omap446x()) {
si_type =
read_tap_reg(OMAP4_CTRL_MODULE_CORE_STD_FUSE_PROD_ID_1);
switch ((si_type & (3 << 16)) >> 16) {
case 2:
/* High performance device */
omap4_features |= OMAP4_HAS_MPU_1_5GHZ;
omap4_features |= OMAP4_HAS_MPU_1_2GHZ;
break;
case 1:
default:
/* Standard device */
omap4_features |= OMAP4_HAS_MPU_1_2GHZ;
break;
}
}
}
static void __init ti816x_check_features(void)
{
omap3_features = OMAP3_HAS_NEON;
}
static void __init omap3_check_revision(void)
{
u32 cpuid, idcode;
u16 hawkeye;
u8 rev;
omap_chip.oc = CHIP_IS_OMAP3430;
/*
* We cannot access revision registers on ES1.0.
* If the processor type is Cortex-A8 and the revision is 0x0
* it means its Cortex r0p0 which is 3430 ES1.0.
*/
cpuid = read_cpuid(CPUID_ID);
if ((((cpuid >> 4) & 0xfff) == 0xc08) && ((cpuid & 0xf) == 0x0)) {
omap_revision = OMAP3430_REV_ES1_0;
omap_chip.oc |= CHIP_IS_OMAP3430ES1;
return;
}
/*
* Detection for 34xx ES2.0 and above can be done with just
* hawkeye and rev. See TRM 1.5.2 Device Identification.
* Note that rev does not map directly to our defined processor
* revision numbers as ES1.0 uses value 0.
*/
idcode = read_tap_reg(OMAP_TAP_IDCODE);
hawkeye = (idcode >> 12) & 0xffff;
rev = (idcode >> 28) & 0xff;
switch (hawkeye) {
case 0xb7ae:
/* Handle 34xx/35xx devices */
switch (rev) {
case 0: /* Take care of early samples */
case 1:
omap_revision = OMAP3430_REV_ES2_0;
omap_chip.oc |= CHIP_IS_OMAP3430ES2;
break;
case 2:
omap_revision = OMAP3430_REV_ES2_1;
omap_chip.oc |= CHIP_IS_OMAP3430ES2;
break;
case 3:
omap_revision = OMAP3430_REV_ES3_0;
omap_chip.oc |= CHIP_IS_OMAP3430ES3_0;
break;
case 4:
omap_revision = OMAP3430_REV_ES3_1;
omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
break;
case 7:
/* FALLTHROUGH */
default:
/* Use the latest known revision as default */
omap_revision = OMAP3430_REV_ES3_1_2;
/* REVISIT: Add CHIP_IS_OMAP3430ES3_1_2? */
omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
}
break;
case 0xb868:
/* Handle OMAP35xx/AM35xx devices
*
* Set the device to be OMAP3505 here. Actual device
* is identified later based on the features.
*
* REVISIT: AM3505/AM3517 should have their own CHIP_IS
*/
omap_revision = OMAP3505_REV(rev);
omap_chip.oc |= CHIP_IS_OMAP3430ES3_1;
break;
case 0xb891:
/* Handle 36xx devices */
omap_chip.oc |= CHIP_IS_OMAP3630ES1;
switch(rev) {
case 0: /* Take care of early samples */
omap_revision = OMAP3630_REV_ES1_0;
break;
case 1:
omap_revision = OMAP3630_REV_ES1_1;
omap_chip.oc |= CHIP_IS_OMAP3630ES1_1;
break;
case 2:
default:
omap_revision = OMAP3630_REV_ES1_2;
omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
}
break;
case 0xb81e:
omap_chip.oc = CHIP_IS_TI816X;
switch (rev) {
case 0:
omap_revision = TI8168_REV_ES1_0;
break;
case 1:
omap_revision = TI8168_REV_ES1_1;
break;
default:
omap_revision = TI8168_REV_ES1_1;
}
break;
default:
/* Unknown default to latest silicon rev as default*/
omap_revision = OMAP3630_REV_ES1_2;
omap_chip.oc |= CHIP_IS_OMAP3630ES1_2;
}
}
static void __init omap4_check_revision(void)
{
u32 idcode;
u8 rev;
/*
* NOTE: OMAP4460+ uses ramp system for identification and hawkeye
* variable is reused for the same. Since the values are unique
* we continue to use the current system
*/
u16 hawkeye;
/*
* The IC rev detection is done with hawkeye and rev.
* Note that rev does not map directly to defined processor
* revision numbers as ES1.0 uses value 0.
*/
idcode = read_tap_reg(OMAP_TAP_IDCODE);
hawkeye = (idcode >> 12) & 0xffff;
rev = (idcode >> 28) & 0xf;
/*
* Few initial 4430 ES2.0 samples IDCODE is same as ES1.0
* Use ARM register to detect the correct ES version
*/
if (!rev && (hawkeye != 0xb94e)) {
idcode = read_cpuid(CPUID_ID);
rev = (idcode & 0xf) - 1;
}
switch (hawkeye) {
case 0xb852:
switch (rev) {
case 0:
omap_revision = OMAP4430_REV_ES1_0;
omap_chip.oc |= CHIP_IS_OMAP4430ES1;
break;
case 1:
default:
omap_revision = OMAP4430_REV_ES2_0;
omap_chip.oc |= CHIP_IS_OMAP4430ES2;
}
break;
case 0xb95c:
switch (rev) {
case 3:
omap_revision = OMAP4430_REV_ES2_1;
omap_chip.oc |= CHIP_IS_OMAP4430ES2_1;
break;
case 4:
default:
omap_revision = OMAP4430_REV_ES2_2;
omap_chip.oc |= CHIP_IS_OMAP4430ES2_2;
}
break;
case 0xb94e:
switch (rev) {
case 0:
omap_revision = OMAP4460_REV_ES1_0;
omap_chip.oc |= CHIP_IS_OMAP4460ES1_0;
break;
case 2:
default:
omap_revision = OMAP4460_REV_ES1_1;
omap_chip.oc |= CHIP_IS_OMAP4460ES1_1;
break;
}
break;
default:
/* Unknown default to latest silicon rev as default */
omap_revision = OMAP4430_REV_ES2_2;
omap_chip.oc |= CHIP_IS_OMAP4430ES2_2;
}
pr_info("OMAP%04x ES%d.%d\n", omap_rev() >> 16,
((omap_rev() >> 12) & 0xf), ((omap_rev() >> 8) & 0xf));
}
#define OMAP3_SHOW_FEATURE(feat) \
if (omap3_has_ ##feat()) \
printk(#feat" ");
static void __init omap3_cpuinfo(void)
{
u8 rev = GET_OMAP_REVISION();
char cpu_name[16], cpu_rev[16];
/* OMAP3430 and OMAP3530 are assumed to be same.
*
* OMAP3525, OMAP3515 and OMAP3503 can be detected only based
* on available features. Upon detection, update the CPU id
* and CPU class bits.
*/
if (cpu_is_omap3630()) {
strcpy(cpu_name, "OMAP3630");
} else if (cpu_is_omap3505()) {
/*
* AM35xx devices
*/
if (omap3_has_sgx()) {
omap_revision = OMAP3517_REV(rev);
strcpy(cpu_name, "AM3517");
} else {
/* Already set in omap3_check_revision() */
strcpy(cpu_name, "AM3505");
}
} else if (cpu_is_ti816x()) {
strcpy(cpu_name, "TI816X");
} else if (omap3_has_iva() && omap3_has_sgx()) {
/* OMAP3430, OMAP3525, OMAP3515, OMAP3503 devices */
strcpy(cpu_name, "OMAP3430/3530");
} else if (omap3_has_iva()) {
omap_revision = OMAP3525_REV(rev);
strcpy(cpu_name, "OMAP3525");
} else if (omap3_has_sgx()) {
omap_revision = OMAP3515_REV(rev);
strcpy(cpu_name, "OMAP3515");
} else {
omap_revision = OMAP3503_REV(rev);
strcpy(cpu_name, "OMAP3503");
}
if (cpu_is_omap3630() || cpu_is_ti816x()) {
switch (rev) {
case OMAP_REVBITS_00:
strcpy(cpu_rev, "1.0");
break;
case OMAP_REVBITS_01:
strcpy(cpu_rev, "1.1");
break;
case OMAP_REVBITS_02:
/* FALLTHROUGH */
default:
/* Use the latest known revision as default */
strcpy(cpu_rev, "1.2");
}
} else if (cpu_is_omap3505() || cpu_is_omap3517()) {
switch (rev) {
case OMAP_REVBITS_00:
strcpy(cpu_rev, "1.0");
break;
case OMAP_REVBITS_01:
/* FALLTHROUGH */
default:
/* Use the latest known revision as default */
strcpy(cpu_rev, "1.1");
}
} else {
switch (rev) {
case OMAP_REVBITS_00:
strcpy(cpu_rev, "1.0");
break;
case OMAP_REVBITS_01:
strcpy(cpu_rev, "2.0");
break;
case OMAP_REVBITS_02:
strcpy(cpu_rev, "2.1");
break;
case OMAP_REVBITS_03:
strcpy(cpu_rev, "3.0");
break;
case OMAP_REVBITS_04:
strcpy(cpu_rev, "3.1");
break;
case OMAP_REVBITS_05:
/* FALLTHROUGH */
default:
/* Use the latest known revision as default */
strcpy(cpu_rev, "3.1.2");
}
}
/* Print verbose information */
pr_info("%s ES%s (", cpu_name, cpu_rev);
OMAP3_SHOW_FEATURE(l2cache);
OMAP3_SHOW_FEATURE(iva);
OMAP3_SHOW_FEATURE(sgx);
OMAP3_SHOW_FEATURE(neon);
OMAP3_SHOW_FEATURE(isp);
OMAP3_SHOW_FEATURE(192mhz_clk);
printk(")\n");
}
/*
* Try to detect the exact revision of the omap we're running on
*/
void __init omap2_check_revision(void)
{
/*
* At this point we have an idea about the processor revision set
* earlier with omap2_set_globals_tap().
*/
if (cpu_is_omap24xx()) {
omap24xx_check_revision();
} else if (cpu_is_omap34xx()) {
omap3_check_revision();
/* TI816X doesn't have feature register */
if (!cpu_is_ti816x())
omap3_check_features();
else
ti816x_check_features();
omap3_cpuinfo();
return;
} else if (cpu_is_omap44xx()) {
omap4_check_revision();
omap4_check_features();
return;
} else {
pr_err("OMAP revision unknown, please fix!\n");
}
/*
* OK, now we know the exact revision. Initialize omap_chip bits
* for powerdowmain and clockdomain code.
*/
if (cpu_is_omap243x()) {
/* Currently only supports 2430ES2.1 and 2430-all */
omap_chip.oc |= CHIP_IS_OMAP2430;
return;
} else if (cpu_is_omap242x()) {
/* Currently only supports 2420ES2.1.1 and 2420-all */
omap_chip.oc |= CHIP_IS_OMAP2420;
return;
}
pr_err("Uninitialized omap_chip, please fix!\n");
}
/*
* Set up things for map_io and processor detection later on. Gets called
* pretty much first thing from board init. For multi-omap, this gets
* cpu_is_omapxxxx() working accurately enough for map_io. Then we'll try to
* detect the exact revision later on in omap2_detect_revision() once map_io
* is done.
*/
void __init omap2_set_globals_tap(struct omap_globals *omap2_globals)
{
omap_revision = omap2_globals->class;
tap_base = omap2_globals->tap;
if (cpu_is_omap34xx())
tap_prod_id = 0x0210;
else
tap_prod_id = 0x0208;
}
| gpl-2.0 |
Alucard24/SGS4-SAMMY-Kernel | drivers/staging/prima/CORE/SME/src/csr/csrUtil.c | 345 | 212455 | /*
* Copyright (c) 2012, The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/** ------------------------------------------------------------------------- *
------------------------------------------------------------------------- *
\file csrUtil.c
Implementation supporting routines for CSR.
Copyright (C) 2006 Airgo Networks, Incorporated
========================================================================== */
#ifdef FEATURE_WLAN_NON_INTEGRATED_SOC
#include "halInternal.h" //Check if the below include of aniGobal.h is sufficient for Volans too.
#endif
#ifdef FEATURE_WLAN_INTEGRATED_SOC
#include "aniGlobal.h"
#endif
#include "palApi.h"
#include "csrSupport.h"
#include "csrInsideApi.h"
#include "smsDebug.h"
#include "smeQosInternal.h"
#include "wlan_qct_wda.h"
#ifdef FEATURE_WLAN_CCX
#include "vos_utils.h"
#include "csrCcx.h"
#endif /* FEATURE_WLAN_CCX */
tANI_U8 csrWpaOui[][ CSR_WPA_OUI_SIZE ] = {
{ 0x00, 0x50, 0xf2, 0x00 },
{ 0x00, 0x50, 0xf2, 0x01 },
{ 0x00, 0x50, 0xf2, 0x02 },
{ 0x00, 0x50, 0xf2, 0x03 },
{ 0x00, 0x50, 0xf2, 0x04 },
{ 0x00, 0x50, 0xf2, 0x05 },
#ifdef FEATURE_WLAN_CCX
{ 0x00, 0x40, 0x96, 0x00 }, // CCKM
#endif /* FEATURE_WLAN_CCX */
};
tANI_U8 csrRSNOui[][ CSR_RSN_OUI_SIZE ] = {
{ 0x00, 0x0F, 0xAC, 0x00 }, // group cipher
{ 0x00, 0x0F, 0xAC, 0x01 }, // WEP-40 or RSN
{ 0x00, 0x0F, 0xAC, 0x02 }, // TKIP or RSN-PSK
{ 0x00, 0x0F, 0xAC, 0x03 }, // Reserved
{ 0x00, 0x0F, 0xAC, 0x04 }, // AES-CCMP
{ 0x00, 0x0F, 0xAC, 0x05 }, // WEP-104
#ifdef WLAN_FEATURE_11W
{ 0x00, 0x0F, 0xAC, 0x06 }, // BIP(encryption type) or (RSN-PSK-SHA256(authentication type)
#endif
#ifdef FEATURE_WLAN_CCX
{ 0x00, 0x40, 0x96, 0x00 } // CCKM
#endif /* FEATURE_WLAN_CCX */
};
#ifdef FEATURE_WLAN_WAPI
tANI_U8 csrWapiOui[][ CSR_WAPI_OUI_SIZE ] = {
{ 0x00, 0x14, 0x72, 0x00 }, // Reserved
{ 0x00, 0x14, 0x72, 0x01 }, // WAI certificate or SMS4
{ 0x00, 0x14, 0x72, 0x02 } // WAI PSK
};
#endif /* FEATURE_WLAN_WAPI */
tANI_U8 csrWmeInfoOui[ CSR_WME_OUI_SIZE ] = { 0x00, 0x50, 0xf2, 0x02 };
tANI_U8 csrWmeParmOui[ CSR_WME_OUI_SIZE ] = { 0x00, 0x50, 0xf2, 0x02 };
static tCsrIELenInfo gCsrIELengthTable[] = {
/* 000 */ { SIR_MAC_SSID_EID_MIN, SIR_MAC_SSID_EID_MAX },
/* 001 */ { SIR_MAC_RATESET_EID_MIN, SIR_MAC_RATESET_EID_MAX },
/* 002 */ { SIR_MAC_FH_PARAM_SET_EID_MIN, SIR_MAC_FH_PARAM_SET_EID_MAX },
/* 003 */ { SIR_MAC_DS_PARAM_SET_EID_MIN, SIR_MAC_DS_PARAM_SET_EID_MAX },
/* 004 */ { SIR_MAC_CF_PARAM_SET_EID_MIN, SIR_MAC_CF_PARAM_SET_EID_MAX },
/* 005 */ { SIR_MAC_TIM_EID_MIN, SIR_MAC_TIM_EID_MAX },
/* 006 */ { SIR_MAC_IBSS_PARAM_SET_EID_MIN, SIR_MAC_IBSS_PARAM_SET_EID_MAX },
/* 007 */ { SIR_MAC_COUNTRY_EID_MIN, SIR_MAC_COUNTRY_EID_MAX },
/* 008 */ { SIR_MAC_FH_PARAMS_EID_MIN, SIR_MAC_FH_PARAMS_EID_MAX },
/* 009 */ { SIR_MAC_FH_PATTERN_EID_MIN, SIR_MAC_FH_PATTERN_EID_MAX },
/* 010 */ { SIR_MAC_REQUEST_EID_MIN, SIR_MAC_REQUEST_EID_MAX },
/* 011 */ { SIR_MAC_QBSS_LOAD_EID_MIN, SIR_MAC_QBSS_LOAD_EID_MAX },
/* 012 */ { SIR_MAC_EDCA_PARAM_SET_EID_MIN, SIR_MAC_EDCA_PARAM_SET_EID_MAX },
/* 013 */ { SIR_MAC_TSPEC_EID_MIN, SIR_MAC_TSPEC_EID_MAX },
/* 014 */ { SIR_MAC_TCLAS_EID_MIN, SIR_MAC_TCLAS_EID_MAX },
/* 015 */ { SIR_MAC_QOS_SCHEDULE_EID_MIN, SIR_MAC_QOS_SCHEDULE_EID_MAX },
/* 016 */ { SIR_MAC_CHALLENGE_TEXT_EID_MIN, SIR_MAC_CHALLENGE_TEXT_EID_MAX },
/* 017 */ { 0, 255 },
/* 018 */ { 0, 255 },
/* 019 */ { 0, 255 },
/* 020 */ { 0, 255 },
/* 021 */ { 0, 255 },
/* 022 */ { 0, 255 },
/* 023 */ { 0, 255 },
/* 024 */ { 0, 255 },
/* 025 */ { 0, 255 },
/* 026 */ { 0, 255 },
/* 027 */ { 0, 255 },
/* 028 */ { 0, 255 },
/* 029 */ { 0, 255 },
/* 030 */ { 0, 255 },
/* 031 */ { 0, 255 },
/* 032 */ { SIR_MAC_PWR_CONSTRAINT_EID_MIN, SIR_MAC_PWR_CONSTRAINT_EID_MAX },
/* 033 */ { SIR_MAC_PWR_CAPABILITY_EID_MIN, SIR_MAC_PWR_CAPABILITY_EID_MAX },
/* 034 */ { SIR_MAC_TPC_REQ_EID_MIN, SIR_MAC_TPC_REQ_EID_MAX },
/* 035 */ { SIR_MAC_TPC_RPT_EID_MIN, SIR_MAC_TPC_RPT_EID_MAX },
/* 036 */ { SIR_MAC_SPRTD_CHNLS_EID_MIN, SIR_MAC_SPRTD_CHNLS_EID_MAX },
/* 037 */ { SIR_MAC_CHNL_SWITCH_ANN_EID_MIN, SIR_MAC_CHNL_SWITCH_ANN_EID_MAX },
/* 038 */ { SIR_MAC_MEAS_REQ_EID_MIN, SIR_MAC_MEAS_REQ_EID_MAX },
/* 039 */ { SIR_MAC_MEAS_RPT_EID_MIN, SIR_MAC_MEAS_RPT_EID_MAX },
/* 040 */ { SIR_MAC_QUIET_EID_MIN, SIR_MAC_QUIET_EID_MAX },
/* 041 */ { SIR_MAC_IBSS_DFS_EID_MIN, SIR_MAC_IBSS_DFS_EID_MAX },
/* 042 */ { SIR_MAC_ERP_INFO_EID_MIN, SIR_MAC_ERP_INFO_EID_MAX },
/* 043 */ { SIR_MAC_TS_DELAY_EID_MIN, SIR_MAC_TS_DELAY_EID_MAX },
/* 044 */ { SIR_MAC_TCLAS_PROC_EID_MIN, SIR_MAC_TCLAS_PROC_EID_MAX },
/* 045 */ { SIR_MAC_QOS_ACTION_EID_MIN, SIR_MAC_QOS_ACTION_EID_MAX },
/* 046 */ { SIR_MAC_QOS_CAPABILITY_EID_MIN, SIR_MAC_QOS_CAPABILITY_EID_MAX },
/* 047 */ { 0, 255 },
/* 048 */ { SIR_MAC_RSN_EID_MIN, SIR_MAC_RSN_EID_MAX },
/* 049 */ { 0, 255 },
/* 050 */ { SIR_MAC_EXTENDED_RATE_EID_MIN, SIR_MAC_EXTENDED_RATE_EID_MAX },
/* 051 */ { 0, 255 },
/* 052 */ { 0, 255 },
/* 053 */ { 0, 255 },
/* 054 */ { 0, 255 },
/* 055 */ { 0, 255 },
/* 056 */ { 0, 255 },
/* 057 */ { 0, 255 },
/* 058 */ { 0, 255 },
/* 059 */ { 0, 255 },
/* 060 */ { 0, 255 },
/* 061 */ { 0, 255 },
/* 062 */ { 0, 255 },
/* 063 */ { 0, 255 },
/* 064 */ { 0, 255 },
/* 065 */ { 0, 255 },
/* 066 */ { 0, 255 },
/* 067 */ { 0, 255 },
#ifdef FEATURE_WLAN_WAPI
/* 068 */ { DOT11F_EID_WAPI, DOT11F_IE_WAPI_MAX_LEN },
#else
/* 068 */ { 0, 255 },
#endif /* FEATURE_WLAN_WAPI */
/* 069 */ { 0, 255 },
/* 070 */ { 0, 255 },
/* 071 */ { 0, 255 },
/* 072 */ { 0, 255 },
/* 073 */ { 0, 255 },
/* 074 */ { 0, 255 },
/* 075 */ { 0, 255 },
/* 076 */ { 0, 255 },
/* 077 */ { 0, 255 },
/* 078 */ { 0, 255 },
/* 079 */ { 0, 255 },
/* 080 */ { 0, 255 },
/* 081 */ { 0, 255 },
/* 082 */ { 0, 255 },
/* 083 */ { 0, 255 },
/* 084 */ { 0, 255 },
/* 085 */ { 0, 255 },
/* 086 */ { 0, 255 },
/* 087 */ { 0, 255 },
/* 088 */ { 0, 255 },
/* 089 */ { 0, 255 },
/* 090 */ { 0, 255 },
/* 091 */ { 0, 255 },
/* 092 */ { 0, 255 },
/* 093 */ { 0, 255 },
/* 094 */ { 0, 255 },
/* 095 */ { 0, 255 },
/* 096 */ { 0, 255 },
/* 097 */ { 0, 255 },
/* 098 */ { 0, 255 },
/* 099 */ { 0, 255 },
/* 100 */ { 0, 255 },
/* 101 */ { 0, 255 },
/* 102 */ { 0, 255 },
/* 103 */ { 0, 255 },
/* 104 */ { 0, 255 },
/* 105 */ { 0, 255 },
/* 106 */ { 0, 255 },
/* 107 */ { 0, 255 },
/* 108 */ { 0, 255 },
/* 109 */ { 0, 255 },
/* 110 */ { 0, 255 },
/* 111 */ { 0, 255 },
/* 112 */ { 0, 255 },
/* 113 */ { 0, 255 },
/* 114 */ { 0, 255 },
/* 115 */ { 0, 255 },
/* 116 */ { 0, 255 },
/* 117 */ { 0, 255 },
/* 118 */ { 0, 255 },
/* 119 */ { 0, 255 },
/* 120 */ { 0, 255 },
/* 121 */ { 0, 255 },
/* 122 */ { 0, 255 },
/* 123 */ { 0, 255 },
/* 124 */ { 0, 255 },
/* 125 */ { 0, 255 },
/* 126 */ { 0, 255 },
/* 127 */ { 0, 255 },
/* 128 */ { 0, 255 },
/* 129 */ { 0, 255 },
/* 130 */ { 0, 255 },
/* 131 */ { 0, 255 },
/* 132 */ { 0, 255 },
/* 133 */ { 0, 255 },
/* 134 */ { 0, 255 },
/* 135 */ { 0, 255 },
/* 136 */ { 0, 255 },
/* 137 */ { 0, 255 },
/* 138 */ { 0, 255 },
/* 139 */ { 0, 255 },
/* 140 */ { 0, 255 },
/* 141 */ { 0, 255 },
/* 142 */ { 0, 255 },
/* 143 */ { 0, 255 },
/* 144 */ { 0, 255 },
/* 145 */ { 0, 255 },
/* 146 */ { 0, 255 },
/* 147 */ { 0, 255 },
/* 148 */ { 0, 255 },
/* 149 */ { 0, 255 },
/* 150 */ { 0, 255 },
/* 151 */ { 0, 255 },
/* 152 */ { 0, 255 },
/* 153 */ { 0, 255 },
/* 154 */ { 0, 255 },
/* 155 */ { 0, 255 },
/* 156 */ { 0, 255 },
/* 157 */ { 0, 255 },
/* 158 */ { 0, 255 },
/* 159 */ { 0, 255 },
/* 160 */ { 0, 255 },
/* 161 */ { 0, 255 },
/* 162 */ { 0, 255 },
/* 163 */ { 0, 255 },
/* 164 */ { 0, 255 },
/* 165 */ { 0, 255 },
/* 166 */ { 0, 255 },
/* 167 */ { 0, 255 },
/* 168 */ { 0, 255 },
/* 169 */ { 0, 255 },
/* 170 */ { 0, 255 },
/* 171 */ { 0, 255 },
/* 172 */ { 0, 255 },
/* 173 */ { 0, 255 },
/* 174 */ { 0, 255 },
/* 175 */ { 0, 255 },
/* 176 */ { 0, 255 },
/* 177 */ { 0, 255 },
/* 178 */ { 0, 255 },
/* 179 */ { 0, 255 },
/* 180 */ { 0, 255 },
/* 181 */ { 0, 255 },
/* 182 */ { 0, 255 },
/* 183 */ { 0, 255 },
/* 184 */ { 0, 255 },
/* 185 */ { 0, 255 },
/* 186 */ { 0, 255 },
/* 187 */ { 0, 255 },
/* 188 */ { 0, 255 },
/* 189 */ { 0, 255 },
/* 190 */ { 0, 255 },
/* 191 */ { 0, 255 },
/* 192 */ { 0, 255 },
/* 193 */ { 0, 255 },
/* 194 */ { 0, 255 },
/* 195 */ { 0, 255 },
/* 196 */ { 0, 255 },
/* 197 */ { 0, 255 },
/* 198 */ { 0, 255 },
/* 199 */ { 0, 255 },
/* 200 */ { 0, 255 },
/* 201 */ { 0, 255 },
/* 202 */ { 0, 255 },
/* 203 */ { 0, 255 },
/* 204 */ { 0, 255 },
/* 205 */ { 0, 255 },
/* 206 */ { 0, 255 },
/* 207 */ { 0, 255 },
/* 208 */ { 0, 255 },
/* 209 */ { 0, 255 },
/* 210 */ { 0, 255 },
/* 211 */ { 0, 255 },
/* 212 */ { 0, 255 },
/* 213 */ { 0, 255 },
/* 214 */ { 0, 255 },
/* 215 */ { 0, 255 },
/* 216 */ { 0, 255 },
/* 217 */ { 0, 255 },
/* 218 */ { 0, 255 },
/* 219 */ { 0, 255 },
/* 220 */ { 0, 255 },
/* 221 */ { SIR_MAC_WPA_EID_MIN, SIR_MAC_WPA_EID_MAX },
/* 222 */ { 0, 255 },
/* 223 */ { 0, 255 },
/* 224 */ { 0, 255 },
/* 225 */ { 0, 255 },
/* 226 */ { 0, 255 },
/* 227 */ { 0, 255 },
/* 228 */ { 0, 255 },
/* 229 */ { 0, 255 },
/* 230 */ { 0, 255 },
/* 231 */ { 0, 255 },
/* 232 */ { 0, 255 },
/* 233 */ { 0, 255 },
/* 234 */ { 0, 255 },
/* 235 */ { 0, 255 },
/* 236 */ { 0, 255 },
/* 237 */ { 0, 255 },
/* 238 */ { 0, 255 },
/* 239 */ { 0, 255 },
/* 240 */ { 0, 255 },
/* 241 */ { 0, 255 },
/* 242 */ { 0, 255 },
/* 243 */ { 0, 255 },
/* 244 */ { 0, 255 },
/* 245 */ { 0, 255 },
/* 246 */ { 0, 255 },
/* 247 */ { 0, 255 },
/* 248 */ { 0, 255 },
/* 249 */ { 0, 255 },
/* 250 */ { 0, 255 },
/* 251 */ { 0, 255 },
/* 252 */ { 0, 255 },
/* 253 */ { 0, 255 },
/* 254 */ { 0, 255 },
/* 255 */ { SIR_MAC_ANI_WORKAROUND_EID_MIN, SIR_MAC_ANI_WORKAROUND_EID_MAX }
};
#if 0
//Don't not insert entry into the table, put it to the end. If you have to insert, make sure it is also
//reflected in eCsrCountryIndex
static tCsrCountryInfo gCsrCountryInfo[eCSR_NUM_COUNTRY_INDEX] =
{
{REG_DOMAIN_FCC, {'U', 'S', ' '}}, //USA/******The "US" MUST be at index 0*******/
{REG_DOMAIN_WORLD, {'A', 'D', ' '}}, //ANDORRA
{REG_DOMAIN_WORLD, {'A', 'E', ' '}}, //UAE
{REG_DOMAIN_WORLD, {'A', 'F', ' '}}, //AFGHANISTAN
{REG_DOMAIN_WORLD, {'A', 'G', ' '}}, //ANTIGUA AND BARBUDA
{REG_DOMAIN_WORLD, {'A', 'I', ' '}}, //ANGUILLA
{REG_DOMAIN_HI_5GHZ, {'A', 'L', ' '}}, //ALBANIA
{REG_DOMAIN_WORLD, {'A', 'M', ' '}}, //ARMENIA
{REG_DOMAIN_WORLD, {'A', 'N', ' '}}, //NETHERLANDS ANTILLES
{REG_DOMAIN_WORLD, {'A', 'O', ' '}}, //ANGOLA
{REG_DOMAIN_WORLD, {'A', 'Q', ' '}}, //ANTARCTICA
{REG_DOMAIN_HI_5GHZ, {'A', 'R', ' '}}, //ARGENTINA
{REG_DOMAIN_FCC, {'A', 'S', ' '}}, //AMERICAN SOMOA
{REG_DOMAIN_ETSI, {'A', 'T', ' '}}, //AUSTRIA
{REG_DOMAIN_ETSI, {'A', 'U', ' '}}, //AUSTRALIA
{REG_DOMAIN_WORLD, {'A', 'W', ' '}}, //ARUBA
{REG_DOMAIN_WORLD, {'A', 'X', ' '}}, //ALAND ISLANDS
{REG_DOMAIN_WORLD, {'A', 'Z', ' '}}, //AZERBAIJAN
{REG_DOMAIN_WORLD, {'B', 'A', ' '}}, //BOSNIA AND HERZEGOVINA
{REG_DOMAIN_WORLD, {'B', 'B', ' '}}, //BARBADOS
{REG_DOMAIN_WORLD, {'B', 'D', ' '}}, //BANGLADESH
{REG_DOMAIN_ETSI, {'B', 'E', ' '}}, //BELGIUM
{REG_DOMAIN_WORLD, {'B', 'F', ' '}}, //BURKINA FASO
{REG_DOMAIN_HI_5GHZ, {'B', 'G', ' '}}, //BULGARIA
{REG_DOMAIN_WORLD, {'B', 'H', ' '}}, //BAHRAIN
{REG_DOMAIN_WORLD, {'B', 'I', ' '}}, //BURUNDI
{REG_DOMAIN_WORLD, {'B', 'J', ' '}}, //BENIN
{REG_DOMAIN_WORLD, {'B', 'L', ' '}}, //SAINT BARTHELEMY
{REG_DOMAIN_ETSI, {'B', 'M', ' '}}, //BERMUDA
{REG_DOMAIN_WORLD, {'B', 'N', ' '}}, //BRUNEI DARUSSALAM
{REG_DOMAIN_WORLD, {'B', 'O', ' '}}, //BOLIVIA
{REG_DOMAIN_WORLD, {'B', 'R', ' '}}, //BRAZIL
{REG_DOMAIN_WORLD, {'B', 'S', ' '}}, //BAHAMAS
{REG_DOMAIN_WORLD, {'B', 'T', ' '}}, //BHUTAN
{REG_DOMAIN_WORLD, {'B', 'V', ' '}}, //BOUVET ISLAND
{REG_DOMAIN_WORLD, {'B', 'W', ' '}}, //BOTSWANA
{REG_DOMAIN_WORLD, {'B', 'Y', ' '}}, //BELARUS
{REG_DOMAIN_WORLD, {'B', 'Z', ' '}}, //BELIZE
{REG_DOMAIN_FCC, {'C', 'A', ' '}}, //CANADA
{REG_DOMAIN_WORLD, {'C', 'C', ' '}}, //COCOS (KEELING) ISLANDS
{REG_DOMAIN_WORLD, {'C', 'D', ' '}}, //CONGO, THE DEMOCRATIC REPUBLIC OF THE
{REG_DOMAIN_WORLD, {'C', 'F', ' '}}, //CENTRAL AFRICAN REPUBLIC
{REG_DOMAIN_WORLD, {'C', 'G', ' '}}, //CONGO
{REG_DOMAIN_ETSI, {'C', 'H', ' '}}, //SWITZERLAND
{REG_DOMAIN_WORLD, {'C', 'I', ' '}}, //COTE D'IVOIRE
{REG_DOMAIN_WORLD, {'C', 'K', ' '}}, //COOK ISLANDS
{REG_DOMAIN_WORLD, {'C', 'L', ' '}}, //CHILE
{REG_DOMAIN_WORLD, {'C', 'M', ' '}}, //CAMEROON
{REG_DOMAIN_HI_5GHZ, {'C', 'N', ' '}}, //CHINA
{REG_DOMAIN_WORLD, {'C', 'O', ' '}}, //COLOMBIA
{REG_DOMAIN_WORLD, {'C', 'R', ' '}}, //COSTA RICA
{REG_DOMAIN_WORLD, {'C', 'U', ' '}}, //CUBA
{REG_DOMAIN_WORLD, {'C', 'V', ' '}}, //CAPE VERDE
{REG_DOMAIN_WORLD, {'C', 'X', ' '}}, //CHRISTMAS ISLAND
{REG_DOMAIN_WORLD, {'C', 'Y', ' '}}, //CYPRUS
{REG_DOMAIN_HI_5GHZ, {'C', 'Z', ' '}}, //CZECH REPUBLIC
{REG_DOMAIN_ETSI, {'D', 'E', ' '}}, //GERMANY
{REG_DOMAIN_WORLD, {'D', 'J', ' '}}, //DJIBOUTI
{REG_DOMAIN_ETSI, {'D', 'K', ' '}}, //DENMARK
{REG_DOMAIN_WORLD, {'D', 'M', ' '}}, //DOMINICA
{REG_DOMAIN_WORLD, {'D', 'O', ' '}}, //DOMINICAN REPUBLIC
{REG_DOMAIN_WORLD, {'D', 'Z', ' '}}, //ALGERIA
{REG_DOMAIN_WORLD, {'E', 'C', ' '}}, //ECUADOR
{REG_DOMAIN_HI_5GHZ, {'E', 'E', ' '}}, //ESTONIA
{REG_DOMAIN_WORLD, {'E', 'G', ' '}}, //EGYPT
{REG_DOMAIN_WORLD, {'E', 'H', ' '}}, //WESTERN SAHARA
{REG_DOMAIN_WORLD, {'E', 'R', ' '}}, //ERITREA
{REG_DOMAIN_ETSI, {'E', 'S', ' '}}, //SPAIN
{REG_DOMAIN_WORLD, {'E', 'T', ' '}}, //ETHIOPIA
{REG_DOMAIN_WORLD, {'F', 'I', ' '}}, //FINLAND
{REG_DOMAIN_WORLD, {'F', 'J', ' '}}, //FIJI
{REG_DOMAIN_WORLD, {'F', 'K', ' '}}, //FALKLAND ISLANDS (MALVINAS)
{REG_DOMAIN_WORLD, {'F', 'M', ' '}}, //MICRONESIA, FEDERATED STATES OF
{REG_DOMAIN_WORLD, {'F', 'O', ' '}}, //FAROE ISLANDS
{REG_DOMAIN_ETSI, {'F', 'R', ' '}}, //FRANCE
{REG_DOMAIN_WORLD, {'G', 'A', ' '}}, //GABON
{REG_DOMAIN_ETSI, {'G', 'B', ' '}}, //UNITED KINGDOM
{REG_DOMAIN_WORLD, {'G', 'D', ' '}}, //GRENADA
{REG_DOMAIN_HI_5GHZ, {'G', 'E', ' '}}, //GEORGIA
{REG_DOMAIN_WORLD, {'G', 'F', ' '}}, //FRENCH GUIANA
{REG_DOMAIN_ETSI, {'G', 'G', ' '}}, //GUERNSEY
{REG_DOMAIN_WORLD, {'G', 'H', ' '}}, //GHANA
{REG_DOMAIN_WORLD, {'G', 'I', ' '}}, //GIBRALTAR
{REG_DOMAIN_WORLD, {'G', 'L', ' '}}, //GREENLAND
{REG_DOMAIN_WORLD, {'G', 'M', ' '}}, //GAMBIA
{REG_DOMAIN_WORLD, {'G', 'N', ' '}}, //GUINEA
{REG_DOMAIN_WORLD, {'G', 'P', ' '}}, //GUADELOUPE
{REG_DOMAIN_WORLD, {'G', 'Q', ' '}}, //EQUATORIAL GUINEA
{REG_DOMAIN_ETSI, {'G', 'R', ' '}}, //GREECE
{REG_DOMAIN_WORLD, {'G', 'S', ' '}}, //SOUTH GEORGIA AND THE SOUTH SANDWICH ISLANDS
{REG_DOMAIN_WORLD, {'G', 'T', ' '}}, //GUATEMALA
{REG_DOMAIN_WORLD, {'G', 'U', ' '}}, //GUAM
{REG_DOMAIN_WORLD, {'G', 'W', ' '}}, //GUINEA-BISSAU
{REG_DOMAIN_WORLD, {'G', 'Y', ' '}}, //GUYANA
{REG_DOMAIN_WORLD, {'H', 'K', ' '}}, //HONGKONG
{REG_DOMAIN_WORLD, {'H', 'M', ' '}}, //HEARD ISLAND AND MCDONALD ISLANDS
{REG_DOMAIN_WORLD, {'H', 'N', ' '}}, //HONDURAS
{REG_DOMAIN_HI_5GHZ, {'H', 'R', ' '}}, //CROATIA
{REG_DOMAIN_WORLD, {'H', 'T', ' '}}, //HAITI
{REG_DOMAIN_HI_5GHZ, {'H', 'U', ' '}}, //HUNGARY
{REG_DOMAIN_APAC, {'I', 'D', ' '}}, //INDONESIA
{REG_DOMAIN_ETSI, {'I', 'E', ' '}}, //IRELAND
{REG_DOMAIN_WORLD, {'I', 'L', ' '}}, //ISREAL
{REG_DOMAIN_ETSI, {'I', 'M', ' '}}, //ISLE OF MAN
{REG_DOMAIN_WORLD, {'I', 'N', ' '}}, //INDIA
{REG_DOMAIN_ETSI, {'I', 'O', ' '}}, //BRITISH INDIAN OCEAN TERRITORY
{REG_DOMAIN_WORLD, {'I', 'Q', ' '}}, //IRAQ
{REG_DOMAIN_WORLD, {'I', 'R', ' '}}, //IRAN, ISLAMIC REPUBLIC OF
{REG_DOMAIN_WORLD, {'I', 'S', ' '}}, //ICELAND
{REG_DOMAIN_ETSI, {'I', 'T', ' '}}, //ITALY
{REG_DOMAIN_ETSI, {'J', 'E', ' '}}, //JERSEY
{REG_DOMAIN_WORLD, {'J', 'M', ' '}}, //JAMAICA
{REG_DOMAIN_WORLD, {'J', 'O', ' '}}, //JORDAN
{REG_DOMAIN_JAPAN, {'J', 'P', ' '}}, //JAPAN
{REG_DOMAIN_WORLD, {'K', 'E', ' '}}, //KENYA
{REG_DOMAIN_WORLD, {'K', 'G', ' '}}, //KYRGYZSTAN
{REG_DOMAIN_WORLD, {'K', 'H', ' '}}, //CAMBODIA
{REG_DOMAIN_WORLD, {'K', 'I', ' '}}, //KIRIBATI
{REG_DOMAIN_WORLD, {'K', 'M', ' '}}, //COMOROS
{REG_DOMAIN_WORLD, {'K', 'N', ' '}}, //SAINT KITTS AND NEVIS
{REG_DOMAIN_KOREA, {'K', 'P', ' '}}, //KOREA, DEMOCRATIC PEOPLE'S REPUBLIC OF
{REG_DOMAIN_KOREA, {'K', 'R', ' '}}, //KOREA, REPUBLIC OF
{REG_DOMAIN_WORLD, {'K', 'W', ' '}}, //KUWAIT
{REG_DOMAIN_WORLD, {'K', 'Y', ' '}}, //CAYMAN ISLANDS
{REG_DOMAIN_WORLD, {'K', 'Z', ' '}}, //KAZAKHSTAN
{REG_DOMAIN_WORLD, {'L', 'A', ' '}}, //LAO PEOPLE'S DEMOCRATIC REPUBLIC
{REG_DOMAIN_WORLD, {'L', 'B', ' '}}, //LEBANON
{REG_DOMAIN_WORLD, {'L', 'C', ' '}}, //SAINT LUCIA
{REG_DOMAIN_ETSI, {'L', 'I', ' '}}, //LIECHTENSTEIN
{REG_DOMAIN_WORLD, {'L', 'K', ' '}}, //SRI LANKA
{REG_DOMAIN_WORLD, {'L', 'R', ' '}}, //LIBERIA
{REG_DOMAIN_WORLD, {'L', 'S', ' '}}, //LESOTHO
{REG_DOMAIN_HI_5GHZ, {'L', 'T', ' '}}, //LITHUANIA
{REG_DOMAIN_ETSI, {'L', 'U', ' '}}, //LUXEMBOURG
{REG_DOMAIN_HI_5GHZ, {'L', 'V', ' '}}, //LATVIA
{REG_DOMAIN_WORLD, {'L', 'Y', ' '}}, //LIBYAN ARAB JAMAHIRIYA
{REG_DOMAIN_WORLD, {'M', 'A', ' '}}, //MOROCCO
{REG_DOMAIN_ETSI, {'M', 'C', ' '}}, //MONACO
{REG_DOMAIN_WORLD, {'M', 'D', ' '}}, //MOLDOVA, REPUBLIC OF
{REG_DOMAIN_WORLD, {'M', 'E', ' '}}, //MONTENEGRO
{REG_DOMAIN_WORLD, {'M', 'G', ' '}}, //MADAGASCAR
{REG_DOMAIN_WORLD, {'M', 'H', ' '}}, //MARSHALL ISLANDS
{REG_DOMAIN_WORLD, {'M', 'K', ' '}}, //MACEDONIA, THE FORMER YUGOSLAV REPUBLIC OF
{REG_DOMAIN_WORLD, {'M', 'L', ' '}}, //MALI
{REG_DOMAIN_WORLD, {'M', 'M', ' '}}, //MYANMAR
{REG_DOMAIN_HI_5GHZ, {'M', 'N', ' '}}, //MONGOLIA
{REG_DOMAIN_WORLD, {'M', 'O', ' '}}, //MACAO
{REG_DOMAIN_WORLD, {'M', 'P', ' '}}, //NORTHERN MARIANA ISLANDS
{REG_DOMAIN_WORLD, {'M', 'Q', ' '}}, //MARTINIQUE
{REG_DOMAIN_WORLD, {'M', 'R', ' '}}, //MAURITANIA
{REG_DOMAIN_WORLD, {'M', 'S', ' '}}, //MONTSERRAT
{REG_DOMAIN_WORLD, {'M', 'T', ' '}}, //MALTA
{REG_DOMAIN_WORLD, {'M', 'U', ' '}}, //MAURITIUS
{REG_DOMAIN_WORLD, {'M', 'V', ' '}}, //MALDIVES
{REG_DOMAIN_WORLD, {'M', 'W', ' '}}, //MALAWI
{REG_DOMAIN_WORLD, {'M', 'X', ' '}}, //MEXICO
{REG_DOMAIN_HI_5GHZ, {'M', 'Y', ' '}}, //MALAYSIA
{REG_DOMAIN_WORLD, {'M', 'Z', ' '}}, //MOZAMBIQUE
{REG_DOMAIN_WORLD, {'N', 'A', ' '}}, //NAMIBIA
{REG_DOMAIN_WORLD, {'N', 'C', ' '}}, //NEW CALEDONIA
{REG_DOMAIN_WORLD, {'N', 'E', ' '}}, //NIGER
{REG_DOMAIN_WORLD, {'N', 'F', ' '}}, //NORFOLD ISLAND
{REG_DOMAIN_WORLD, {'N', 'G', ' '}}, //NIGERIA
{REG_DOMAIN_WORLD, {'N', 'I', ' '}}, //NICARAGUA
{REG_DOMAIN_ETSI, {'N', 'L', ' '}}, //NETHERLANDS
{REG_DOMAIN_WORLD, {'N', 'O', ' '}}, //NORWAY
{REG_DOMAIN_WORLD, {'N', 'P', ' '}}, //NEPAL
{REG_DOMAIN_WORLD, {'N', 'R', ' '}}, //NAURU
{REG_DOMAIN_WORLD, {'N', 'U', ' '}}, //NIUE
{REG_DOMAIN_ETSI, {'N', 'Z', ' '}}, //NEW ZEALAND
{REG_DOMAIN_WORLD, {'O', 'M', ' '}}, //OMAN
{REG_DOMAIN_WORLD, {'P', 'A', ' '}}, //PANAMA
{REG_DOMAIN_WORLD, {'P', 'E', ' '}}, //PERU
{REG_DOMAIN_WORLD, {'P', 'F', ' '}}, //FRENCH POLYNESIA
{REG_DOMAIN_WORLD, {'P', 'G', ' '}}, //PAPUA NEW GUINEA
{REG_DOMAIN_WORLD, {'P', 'H', ' '}}, //PHILIPPINES
{REG_DOMAIN_WORLD, {'P', 'K', ' '}}, //PAKISTAN
{REG_DOMAIN_WORLD, {'P', 'L', ' '}}, //POLAND
{REG_DOMAIN_WORLD, {'P', 'M', ' '}}, //SAINT PIERRE AND MIQUELON
{REG_DOMAIN_WORLD, {'P', 'N', ' '}}, //PITCAIRN
{REG_DOMAIN_FCC, {'P', 'R', ' '}}, //PUERTO RICO
{REG_DOMAIN_WORLD, {'P', 'S', ' '}}, //PALESTINIAN TERRITORY, OCCUPIED
{REG_DOMAIN_ETSI, {'P', 'T', ' '}}, //PORTUGAL
{REG_DOMAIN_WORLD, {'P', 'W', ' '}}, //PALAU
{REG_DOMAIN_WORLD, {'P', 'Y', ' '}}, //PARAGUAY
{REG_DOMAIN_WORLD, {'Q', 'A', ' '}}, //QATAR
{REG_DOMAIN_WORLD, {'R', 'E', ' '}}, //REUNION
{REG_DOMAIN_HI_5GHZ, {'R', 'O', ' '}}, //ROMANIA
{REG_DOMAIN_HI_5GHZ, {'R', 'S', ' '}}, //SERBIA
{REG_DOMAIN_WORLD, {'R', 'U', ' '}}, //RUSSIA
{REG_DOMAIN_WORLD, {'R', 'W', ' '}}, //RWANDA
{REG_DOMAIN_WORLD, {'S', 'A', ' '}}, //SAUDI ARABIA
{REG_DOMAIN_WORLD, {'S', 'B', ' '}}, //SOLOMON ISLANDS
{REG_DOMAIN_ETSI, {'S', 'C', ' '}}, //SEYCHELLES
{REG_DOMAIN_WORLD, {'S', 'D', ' '}}, //SUDAN
{REG_DOMAIN_ETSI, {'S', 'E', ' '}}, //SWEDEN
{REG_DOMAIN_APAC, {'S', 'G', ' '}}, //SINGAPORE
{REG_DOMAIN_WORLD, {'S', 'H', ' '}}, //SAINT HELENA
{REG_DOMAIN_HI_5GHZ, {'S', 'I', ' '}}, //SLOVENNIA
{REG_DOMAIN_WORLD, {'S', 'J', ' '}}, //SVALBARD AND JAN MAYEN
{REG_DOMAIN_HI_5GHZ, {'S', 'K', ' '}}, //SLOVAKIA
{REG_DOMAIN_WORLD, {'S', 'L', ' '}}, //SIERRA LEONE
{REG_DOMAIN_WORLD, {'S', 'M', ' '}}, //SAN MARINO
{REG_DOMAIN_WORLD, {'S', 'N', ' '}}, //SENEGAL
{REG_DOMAIN_WORLD, {'S', 'O', ' '}}, //SOMALIA
{REG_DOMAIN_WORLD, {'S', 'R', ' '}}, //SURINAME
{REG_DOMAIN_WORLD, {'S', 'T', ' '}}, //SAO TOME AND PRINCIPE
{REG_DOMAIN_WORLD, {'S', 'V', ' '}}, //EL SALVADOR
{REG_DOMAIN_WORLD, {'S', 'Y', ' '}}, //SYRIAN ARAB REPUBLIC
{REG_DOMAIN_WORLD, {'S', 'Z', ' '}}, //SWAZILAND
{REG_DOMAIN_WORLD, {'T', 'C', ' '}}, //TURKS AND CAICOS ISLANDS
{REG_DOMAIN_WORLD, {'T', 'D', ' '}}, //CHAD
{REG_DOMAIN_WORLD, {'T', 'F', ' '}}, //FRENCH SOUTHERN TERRITORIES
{REG_DOMAIN_WORLD, {'T', 'G', ' '}}, //TOGO
{REG_DOMAIN_WORLD, {'T', 'H', ' '}}, //THAILAND
{REG_DOMAIN_WORLD, {'T', 'J', ' '}}, //TAJIKISTAN
{REG_DOMAIN_WORLD, {'T', 'K', ' '}}, //TOKELAU
{REG_DOMAIN_WORLD, {'T', 'L', ' '}}, //TIMOR-LESTE
{REG_DOMAIN_WORLD, {'T', 'M', ' '}}, //TURKMENISTAN
{REG_DOMAIN_WORLD, {'T', 'N', ' '}}, //TUNISIA
{REG_DOMAIN_WORLD, {'T', 'O', ' '}}, //TONGA
{REG_DOMAIN_WORLD, {'T', 'R', ' '}}, //TURKEY
{REG_DOMAIN_WORLD, {'T', 'T', ' '}}, //TRINIDAD AND TOBAGO
{REG_DOMAIN_WORLD, {'T', 'V', ' '}}, //TUVALU
{REG_DOMAIN_HI_5GHZ, {'T', 'W', ' '}}, //TAIWAN, PROVINCE OF CHINA
{REG_DOMAIN_WORLD, {'T', 'Z', ' '}}, //TANZANIA, UNITED REPUBLIC OF
{REG_DOMAIN_HI_5GHZ, {'U', 'A', ' '}}, //UKRAINE
{REG_DOMAIN_WORLD, {'U', 'G', ' '}}, //UGANDA
{REG_DOMAIN_FCC, {'U', 'M', ' '}}, //UNITED STATES MINOR OUTLYING ISLANDS
{REG_DOMAIN_WORLD, {'U', 'Y', ' '}}, //URUGUAY
{REG_DOMAIN_HI_5GHZ, {'U', 'Z', ' '}}, //UZBEKISTAN
{REG_DOMAIN_ETSI, {'V', 'A', ' '}}, //HOLY SEE (VATICAN CITY STATE)
{REG_DOMAIN_WORLD, {'V', 'C', ' '}}, //SAINT VINCENT AND THE GRENADINES
{REG_DOMAIN_HI_5GHZ, {'V', 'E', ' '}}, //VENEZUELA
{REG_DOMAIN_ETSI, {'V', 'G', ' '}}, //VIRGIN ISLANDS, BRITISH
{REG_DOMAIN_FCC, {'V', 'I', ' '}}, //VIRGIN ISLANDS, US
{REG_DOMAIN_WORLD, {'V', 'N', ' '}}, //VIET NAM
{REG_DOMAIN_WORLD, {'V', 'U', ' '}}, //VANUATU
{REG_DOMAIN_WORLD, {'W', 'F', ' '}}, //WALLIS AND FUTUNA
{REG_DOMAIN_WORLD, {'W', 'S', ' '}}, //SOMOA
{REG_DOMAIN_WORLD, {'Y', 'E', ' '}}, //YEMEN
{REG_DOMAIN_WORLD, {'Y', 'T', ' '}}, //MAYOTTE
{REG_DOMAIN_WORLD, {'Z', 'A', ' '}}, //SOUTH AFRICA
{REG_DOMAIN_WORLD, {'Z', 'M', ' '}}, //ZAMBIA
{REG_DOMAIN_WORLD, {'Z', 'W', ' '}}, //ZIMBABWE
{REG_DOMAIN_KOREA, {'K', '1', ' '}}, //Korea alternate 1
{REG_DOMAIN_KOREA, {'K', '2', ' '}}, //Korea alternate 2
{REG_DOMAIN_KOREA, {'K', '3', ' '}}, //Korea alternate 3
{REG_DOMAIN_KOREA, {'K', '4', ' '}}, //Korea alternate 4
};
//The channels listed here doesn't mean they are valid channels for certain domain. They are here only to present
//whether they should be passive scanned.
tCsrDomainChnInfo gCsrDomainChnInfo[NUM_REG_DOMAINS] =
{
//REG_DOMAIN_FCC
{
REG_DOMAIN_FCC,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_PASSIVE_SCAN},
{104, eSIR_PASSIVE_SCAN},
{108, eSIR_PASSIVE_SCAN},
{112, eSIR_PASSIVE_SCAN},
{116, eSIR_PASSIVE_SCAN},
{120, eSIR_PASSIVE_SCAN},
{124, eSIR_PASSIVE_SCAN},
{128, eSIR_PASSIVE_SCAN},
{132, eSIR_PASSIVE_SCAN},
{136, eSIR_PASSIVE_SCAN},
{140, eSIR_PASSIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_ETSI
{
REG_DOMAIN_ETSI,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_PASSIVE_SCAN},
{104, eSIR_PASSIVE_SCAN},
{108, eSIR_PASSIVE_SCAN},
{112, eSIR_PASSIVE_SCAN},
{116, eSIR_PASSIVE_SCAN},
{120, eSIR_PASSIVE_SCAN},
{124, eSIR_PASSIVE_SCAN},
{128, eSIR_PASSIVE_SCAN},
{132, eSIR_PASSIVE_SCAN},
{136, eSIR_PASSIVE_SCAN},
{140, eSIR_PASSIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_JAPAN
{
REG_DOMAIN_JAPAN,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_PASSIVE_SCAN},
{104, eSIR_PASSIVE_SCAN},
{108, eSIR_PASSIVE_SCAN},
{112, eSIR_PASSIVE_SCAN},
{116, eSIR_PASSIVE_SCAN},
{120, eSIR_PASSIVE_SCAN},
{124, eSIR_PASSIVE_SCAN},
{128, eSIR_PASSIVE_SCAN},
{132, eSIR_PASSIVE_SCAN},
{136, eSIR_PASSIVE_SCAN},
{140, eSIR_PASSIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_WORLD
{
REG_DOMAIN_WORLD,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_ACTIVE_SCAN},
{56, eSIR_ACTIVE_SCAN},
{60, eSIR_ACTIVE_SCAN},
{64, eSIR_ACTIVE_SCAN},
//5470 to 5725
{100, eSIR_ACTIVE_SCAN},
{104, eSIR_ACTIVE_SCAN},
{108, eSIR_ACTIVE_SCAN},
{112, eSIR_ACTIVE_SCAN},
{116, eSIR_ACTIVE_SCAN},
{120, eSIR_ACTIVE_SCAN},
{124, eSIR_ACTIVE_SCAN},
{128, eSIR_ACTIVE_SCAN},
{132, eSIR_ACTIVE_SCAN},
{136, eSIR_ACTIVE_SCAN},
{140, eSIR_ACTIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_N_AMER_EXC_FCC
{
REG_DOMAIN_N_AMER_EXC_FCC,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_ACTIVE_SCAN},
{104, eSIR_ACTIVE_SCAN},
{108, eSIR_ACTIVE_SCAN},
{112, eSIR_ACTIVE_SCAN},
{116, eSIR_ACTIVE_SCAN},
{120, eSIR_ACTIVE_SCAN},
{124, eSIR_ACTIVE_SCAN},
{128, eSIR_ACTIVE_SCAN},
{132, eSIR_ACTIVE_SCAN},
{136, eSIR_ACTIVE_SCAN},
{140, eSIR_ACTIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_APAC
{
REG_DOMAIN_APAC,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_ACTIVE_SCAN},
{104, eSIR_ACTIVE_SCAN},
{108, eSIR_ACTIVE_SCAN},
{112, eSIR_ACTIVE_SCAN},
{116, eSIR_ACTIVE_SCAN},
{120, eSIR_ACTIVE_SCAN},
{124, eSIR_ACTIVE_SCAN},
{128, eSIR_ACTIVE_SCAN},
{132, eSIR_ACTIVE_SCAN},
{136, eSIR_ACTIVE_SCAN},
{140, eSIR_ACTIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_KOREA
{
REG_DOMAIN_KOREA,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_PASSIVE_SCAN},
{56, eSIR_PASSIVE_SCAN},
{60, eSIR_PASSIVE_SCAN},
{64, eSIR_PASSIVE_SCAN},
//5470 to 5725
{100, eSIR_PASSIVE_SCAN},
{104, eSIR_PASSIVE_SCAN},
{108, eSIR_PASSIVE_SCAN},
{112, eSIR_PASSIVE_SCAN},
{116, eSIR_PASSIVE_SCAN},
{120, eSIR_PASSIVE_SCAN},
{124, eSIR_PASSIVE_SCAN},
{128, eSIR_PASSIVE_SCAN},
{132, eSIR_PASSIVE_SCAN},
{136, eSIR_PASSIVE_SCAN},
{140, eSIR_PASSIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_HI_5GHZ
{
REG_DOMAIN_HI_5GHZ,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_ACTIVE_SCAN},
{56, eSIR_ACTIVE_SCAN},
{60, eSIR_ACTIVE_SCAN},
{64, eSIR_ACTIVE_SCAN},
//5470 to 5725
{100, eSIR_ACTIVE_SCAN},
{104, eSIR_ACTIVE_SCAN},
{108, eSIR_ACTIVE_SCAN},
{112, eSIR_ACTIVE_SCAN},
{116, eSIR_ACTIVE_SCAN},
{120, eSIR_ACTIVE_SCAN},
{124, eSIR_ACTIVE_SCAN},
{128, eSIR_ACTIVE_SCAN},
{132, eSIR_ACTIVE_SCAN},
{136, eSIR_ACTIVE_SCAN},
{140, eSIR_ACTIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
//REG_DOMAIN_NO_5GHZ
{
REG_DOMAIN_NO_5GHZ,
45, //Num channels
//Channels
{
//5GHz
//5180 - 5240
{36, eSIR_ACTIVE_SCAN},
{40, eSIR_ACTIVE_SCAN},
{44, eSIR_ACTIVE_SCAN},
{48, eSIR_ACTIVE_SCAN},
//5250 to 5350
{52, eSIR_ACTIVE_SCAN},
{56, eSIR_ACTIVE_SCAN},
{60, eSIR_ACTIVE_SCAN},
{64, eSIR_ACTIVE_SCAN},
//5470 to 5725
{100, eSIR_ACTIVE_SCAN},
{104, eSIR_ACTIVE_SCAN},
{108, eSIR_ACTIVE_SCAN},
{112, eSIR_ACTIVE_SCAN},
{116, eSIR_ACTIVE_SCAN},
{120, eSIR_ACTIVE_SCAN},
{124, eSIR_ACTIVE_SCAN},
{128, eSIR_ACTIVE_SCAN},
{132, eSIR_ACTIVE_SCAN},
{136, eSIR_ACTIVE_SCAN},
{140, eSIR_ACTIVE_SCAN},
//5745 - 5825
{149, eSIR_ACTIVE_SCAN},
{153, eSIR_ACTIVE_SCAN},
{157, eSIR_ACTIVE_SCAN},
{161, eSIR_ACTIVE_SCAN},
{165, eSIR_ACTIVE_SCAN},
//4.9GHz
//4920 - 5080
{240, eSIR_ACTIVE_SCAN},
{244, eSIR_ACTIVE_SCAN},
{248, eSIR_ACTIVE_SCAN},
{252, eSIR_ACTIVE_SCAN},
{208, eSIR_ACTIVE_SCAN},
{212, eSIR_ACTIVE_SCAN},
{216, eSIR_ACTIVE_SCAN},
//2,4GHz
{1, eSIR_ACTIVE_SCAN},
{2, eSIR_ACTIVE_SCAN},
{3, eSIR_ACTIVE_SCAN},
{4, eSIR_ACTIVE_SCAN},
{5, eSIR_ACTIVE_SCAN},
{6, eSIR_ACTIVE_SCAN},
{7, eSIR_ACTIVE_SCAN},
{8, eSIR_ACTIVE_SCAN},
{9, eSIR_ACTIVE_SCAN},
{10, eSIR_ACTIVE_SCAN},
{11, eSIR_ACTIVE_SCAN},
{12, eSIR_ACTIVE_SCAN},
{13, eSIR_ACTIVE_SCAN},
{14, eSIR_ACTIVE_SCAN},
}
},
};
#endif
extern const tRfChannelProps rfChannels[NUM_RF_CHANNELS];
////////////////////////////////////////////////////////////////////////
/**
* \var gPhyRatesSuppt
*
* \brief Rate support lookup table
*
*
* This is a lookup table indexing rates & configuration parameters to
* support. Given a rate (in unites of 0.5Mpbs) & three booleans (MIMO
* Enabled, Channel Bonding Enabled, & Concatenation Enabled), one can
* determine whether the given rate is supported by computing two
* indices. The first maps the rate to table row as indicated below
* (i.e. eHddSuppRate_6Mbps maps to row zero, eHddSuppRate_9Mbps to row
* 1, and so on). Index two can be computed like so:
*
* \code
idx2 = ( fEsf ? 0x4 : 0x0 ) |
( fCb ? 0x2 : 0x0 ) |
( fMimo ? 0x1 : 0x0 );
* \endcode
*
*
* Given that:
*
\code
fSupported = gPhyRatesSuppt[idx1][idx2];
\endcode
*
*
* This table is based on the document "PHY Supported Rates.doc". This
* table is permissive in that a rate is reflected as being supported
* even when turning off an enabled feature would be required. For
* instance, "PHY Supported Rates" lists 42Mpbs as unsupported when CB,
* ESF, & MIMO are all on. However, if we turn off either of CB or
* MIMO, it then becomes supported. Therefore, we mark it as supported
* even in index 7 of this table.
*
*
*/
static const tANI_BOOLEAN gPhyRatesSuppt[24][8] = {
// SSF SSF SSF SSF ESF ESF ESF ESF
// SIMO MIMO SIMO MIMO SIMO MIMO SIMO MIMO
// No CB No CB CB CB No CB No CB CB CB
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 6Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 9Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 12Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 18Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, FALSE, TRUE, TRUE }, // 20Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 24Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 36Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 40Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 42Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 48Mbps
{ TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE, TRUE }, // 54Mbps
{ FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 72Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 80Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 84Mbps
{ FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 96Mbps
{ FALSE, TRUE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 108Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 120Mbps
{ FALSE, FALSE, TRUE, TRUE, FALSE, TRUE, TRUE, TRUE }, // 126Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 144Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 160Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 168Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 192Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 216Mbps
{ FALSE, FALSE, FALSE, TRUE, FALSE, FALSE, FALSE, TRUE }, // 240Mbps
};
#define CASE_RETURN_STR(n) case (n): return (#n)
const char *
get_eRoamCmdStatus_str(eRoamCmdStatus val)
{
switch (val)
{
CASE_RETURN_STR(eCSR_ROAM_CANCELLED);
CASE_RETURN_STR(eCSR_ROAM_ROAMING_START);
CASE_RETURN_STR(eCSR_ROAM_ROAMING_COMPLETION);
CASE_RETURN_STR(eCSR_ROAM_ASSOCIATION_START);
CASE_RETURN_STR(eCSR_ROAM_ASSOCIATION_COMPLETION);
CASE_RETURN_STR(eCSR_ROAM_DISASSOCIATED);
CASE_RETURN_STR(eCSR_ROAM_SHOULD_ROAM);
CASE_RETURN_STR(eCSR_ROAM_SCAN_FOUND_NEW_BSS);
CASE_RETURN_STR(eCSR_ROAM_LOSTLINK);
default:
return "unknown";
}
}
const char *
get_eCsrRoamResult_str(eCsrRoamResult val)
{
switch (val)
{
CASE_RETURN_STR(eCSR_ROAM_RESULT_NONE);
CASE_RETURN_STR(eCSR_ROAM_RESULT_FAILURE);
CASE_RETURN_STR(eCSR_ROAM_RESULT_ASSOCIATED);
CASE_RETURN_STR(eCSR_ROAM_RESULT_NOT_ASSOCIATED);
CASE_RETURN_STR(eCSR_ROAM_RESULT_MIC_FAILURE);
CASE_RETURN_STR(eCSR_ROAM_RESULT_FORCED);
CASE_RETURN_STR(eCSR_ROAM_RESULT_DISASSOC_IND);
CASE_RETURN_STR(eCSR_ROAM_RESULT_DEAUTH_IND);
CASE_RETURN_STR(eCSR_ROAM_RESULT_CAP_CHANGED);
CASE_RETURN_STR(eCSR_ROAM_RESULT_IBSS_CONNECT);
CASE_RETURN_STR(eCSR_ROAM_RESULT_IBSS_INACTIVE);
CASE_RETURN_STR(eCSR_ROAM_RESULT_IBSS_NEW_PEER);
CASE_RETURN_STR(eCSR_ROAM_RESULT_IBSS_COALESCED);
default:
return "unknown";
}
}
tANI_BOOLEAN csrGetBssIdBssDesc( tHalHandle hHal, tSirBssDescription *pSirBssDesc, tCsrBssid *pBssId )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
palCopyMemory( pMac->hHdd, pBssId, &pSirBssDesc->bssId[ 0 ], sizeof(tCsrBssid) );
return( TRUE );
}
tANI_BOOLEAN csrIsBssIdEqual( tHalHandle hHal, tSirBssDescription *pSirBssDesc1, tSirBssDescription *pSirBssDesc2 )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fEqual = FALSE;
tCsrBssid bssId1;
tCsrBssid bssId2;
do {
if ( !pSirBssDesc1 ) break;
if ( !pSirBssDesc2 ) break;
if ( !csrGetBssIdBssDesc( pMac, pSirBssDesc1, &bssId1 ) ) break;
if ( !csrGetBssIdBssDesc( pMac, pSirBssDesc2, &bssId2 ) ) break;
//sirCompareMacAddr
fEqual = csrIsMacAddressEqual(pMac, &bssId1, &bssId2);
} while( 0 );
return( fEqual );
}
tANI_BOOLEAN csrIsConnStateConnectedIbss( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( eCSR_ASSOC_STATE_TYPE_IBSS_CONNECTED == pMac->roam.roamSession[sessionId].connectState );
}
tANI_BOOLEAN csrIsConnStateDisconnectedIbss( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( eCSR_ASSOC_STATE_TYPE_IBSS_DISCONNECTED == pMac->roam.roamSession[sessionId].connectState );
}
tANI_BOOLEAN csrIsConnStateConnectedInfra( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( eCSR_ASSOC_STATE_TYPE_INFRA_ASSOCIATED == pMac->roam.roamSession[sessionId].connectState );
}
tANI_BOOLEAN csrIsConnStateConnected( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
if( csrIsConnStateConnectedIbss( pMac, sessionId ) || csrIsConnStateConnectedInfra( pMac, sessionId ) || csrIsConnStateConnectedWds( pMac, sessionId) )
return TRUE;
else
return FALSE;
}
tANI_BOOLEAN csrIsConnStateInfra( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( csrIsConnStateConnectedInfra( pMac, sessionId ) );
}
tANI_BOOLEAN csrIsConnStateIbss( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( csrIsConnStateConnectedIbss( pMac, sessionId ) || csrIsConnStateDisconnectedIbss( pMac, sessionId ) );
}
tANI_BOOLEAN csrIsConnStateConnectedWds( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( eCSR_ASSOC_STATE_TYPE_WDS_CONNECTED == pMac->roam.roamSession[sessionId].connectState );
}
#ifdef WLAN_SOFTAP_FEATURE
tANI_BOOLEAN csrIsConnStateConnectedInfraAp( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( (eCSR_ASSOC_STATE_TYPE_INFRA_CONNECTED == pMac->roam.roamSession[sessionId].connectState) ||
(eCSR_ASSOC_STATE_TYPE_INFRA_DISCONNECTED == pMac->roam.roamSession[sessionId].connectState ) );
}
#endif
tANI_BOOLEAN csrIsConnStateDisconnectedWds( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( eCSR_ASSOC_STATE_TYPE_WDS_DISCONNECTED == pMac->roam.roamSession[sessionId].connectState );
}
tANI_BOOLEAN csrIsConnStateWds( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return( csrIsConnStateConnectedWds( pMac, sessionId ) ||
csrIsConnStateDisconnectedWds( pMac, sessionId ) );
}
tANI_BOOLEAN csrIsConnStateAp( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
tCsrRoamSession *pSession;
pSession = CSR_GET_SESSION(pMac, sessionId);
if (!pSession)
return eANI_BOOLEAN_FALSE;
if ( CSR_IS_INFRA_AP(&pSession->connectedProfile) )
{
return eANI_BOOLEAN_TRUE;
}
return eANI_BOOLEAN_FALSE;
}
tANI_BOOLEAN csrIsAnySessionInConnectState( tpAniSirGlobal pMac )
{
tANI_U32 i;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) &&
( csrIsConnStateInfra( pMac, i )
|| csrIsConnStateIbss( pMac, i )
|| csrIsConnStateAp( pMac, i) ) )
{
fRc = eANI_BOOLEAN_TRUE;
break;
}
}
return ( fRc );
}
tANI_S8 csrGetInfraSessionId( tpAniSirGlobal pMac )
{
tANI_U8 i;
tANI_S8 sessionid = -1;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && csrIsConnStateInfra( pMac, i ) )
{
sessionid = i;
break;
}
}
return ( sessionid );
}
tANI_U8 csrGetInfraOperationChannel( tpAniSirGlobal pMac, tANI_U8 sessionId)
{
tANI_U8 channel;
if( CSR_IS_SESSION_VALID( pMac, sessionId ))
{
channel = pMac->roam.roamSession[sessionId].connectedProfile.operationChannel;
}
else
{
channel = 0;
}
return channel;
}
//This routine will return operating channel on FIRST BSS that is active/operating to be used for concurrency mode.
//If other BSS is not up or not connected it will return 0
tANI_U8 csrGetConcurrentOperationChannel( tpAniSirGlobal pMac )
{
tCsrRoamSession *pSession = NULL;
tANI_U8 i = 0;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) )
{
pSession = CSR_GET_SESSION( pMac, i );
if (NULL != pSession->pCurRoamProfile)
{
if (
(((pSession->pCurRoamProfile->csrPersona == VOS_STA_MODE) ||
(pSession->pCurRoamProfile->csrPersona == VOS_P2P_CLIENT_MODE)) &&
(pSession->connectState == eCSR_ASSOC_STATE_TYPE_INFRA_ASSOCIATED))
||
(((pSession->pCurRoamProfile->csrPersona == VOS_P2P_GO_MODE) ||
(pSession->pCurRoamProfile->csrPersona == VOS_STA_SAP_MODE)) &&
(pSession->connectState != eCSR_ASSOC_STATE_TYPE_NOT_CONNECTED))
)
return (pSession->connectedProfile.operationChannel);
}
}
}
return 0;
}
tANI_BOOLEAN csrIsAllSessionDisconnected( tpAniSirGlobal pMac )
{
tANI_U32 i;
tANI_BOOLEAN fRc = eANI_BOOLEAN_TRUE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && !csrIsConnStateDisconnected( pMac, i ) )
{
fRc = eANI_BOOLEAN_FALSE;
break;
}
}
return ( fRc );
}
tANI_BOOLEAN csrIsInfraConnected( tpAniSirGlobal pMac )
{
tANI_U32 i;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && csrIsConnStateConnectedInfra( pMac, i ) )
{
fRc = eANI_BOOLEAN_TRUE;
break;
}
}
return ( fRc );
}
tANI_BOOLEAN csrIsConcurrentInfraConnected( tpAniSirGlobal pMac )
{
tANI_U32 i, noOfConnectedInfra = 0;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && csrIsConnStateConnectedInfra( pMac, i ) )
{
++noOfConnectedInfra;
}
}
// More than one Infra Sta Connected
if(noOfConnectedInfra > 1)
{
fRc = eANI_BOOLEAN_TRUE;
}
return ( fRc );
}
tANI_BOOLEAN csrIsIBSSStarted( tpAniSirGlobal pMac )
{
tANI_U32 i;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && csrIsConnStateIbss( pMac, i ) )
{
fRc = eANI_BOOLEAN_TRUE;
break;
}
}
return ( fRc );
}
tANI_BOOLEAN csrIsBTAMPStarted( tpAniSirGlobal pMac )
{
tANI_U32 i;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( i = 0; i < CSR_ROAM_SESSION_MAX; i++ )
{
if( CSR_IS_SESSION_VALID( pMac, i ) && csrIsConnStateConnectedWds( pMac, i ) )
{
fRc = eANI_BOOLEAN_TRUE;
break;
}
}
return ( fRc );
}
tANI_BOOLEAN csrIsConcurrentSessionRunning( tpAniSirGlobal pMac )
{
tANI_U32 sessionId, noOfCocurrentSession = 0;
eCsrConnectState connectState;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( sessionId = 0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++ )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
connectState = pMac->roam.roamSession[sessionId].connectState;
if( (eCSR_ASSOC_STATE_TYPE_INFRA_ASSOCIATED == connectState) ||
(eCSR_ASSOC_STATE_TYPE_INFRA_CONNECTED == connectState) ||
(eCSR_ASSOC_STATE_TYPE_INFRA_DISCONNECTED == connectState) )
{
++noOfCocurrentSession;
}
}
}
// More than one session is Up and Running
if(noOfCocurrentSession > 1)
{
fRc = eANI_BOOLEAN_TRUE;
}
return ( fRc );
}
tANI_BOOLEAN csrIsInfraApStarted( tpAniSirGlobal pMac )
{
tANI_U32 sessionId;
tANI_BOOLEAN fRc = eANI_BOOLEAN_FALSE;
for( sessionId = 0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++ )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) && (csrIsConnStateConnectedInfraAp(pMac, sessionId)) )
{
fRc = eANI_BOOLEAN_TRUE;
break;
}
}
return ( fRc );
}
tANI_BOOLEAN csrIsBTAMP( tpAniSirGlobal pMac, tANI_U32 sessionId )
{
return ( csrIsConnStateConnectedWds( pMac, sessionId ) );
}
tANI_BOOLEAN csrIsConnStateDisconnected(tpAniSirGlobal pMac, tANI_U32 sessionId)
{
return (eCSR_ASSOC_STATE_TYPE_NOT_CONNECTED == pMac->roam.roamSession[sessionId].connectState);
}
tANI_BOOLEAN csrIsValidMcConcurrentSession(tpAniSirGlobal pMac, tANI_U32 sessionId,
tSirBssDescription *pBssDesc)
{
tCsrRoamSession *pSession = NULL;
tANI_U8 Index = 0, ConnId = 0;
eAniBoolean status = eANI_BOOLEAN_FALSE;
tVOS_CON_MODE Mode[CSR_ROAM_SESSION_MAX];
//Check for MCC support
if (!pMac->roam.configParam.fenableMCCMode)
{
return status;
}
for( Index = 0; Index < CSR_ROAM_SESSION_MAX; Index++ )
{
Mode[Index] = VOS_MAX_NO_OF_MODE;
if( CSR_IS_SESSION_VALID( pMac, Index ) )
{
pSession = CSR_GET_SESSION( pMac, Index );
if (NULL != pSession->pCurRoamProfile)
{
Mode[ConnId] = pSession->pCurRoamProfile->csrPersona;
ConnId++;
}
}
}
Index = 0;
if (Mode[Index] == VOS_STA_MODE && ConnId > Index)
{
switch (Mode[Index+1])
{
case VOS_P2P_CLIENT_MODE :
status = eANI_BOOLEAN_TRUE;
break;
case VOS_MAX_NO_OF_MODE :
default :
break;
}
}
else if (Mode[Index] == VOS_P2P_CLIENT_MODE && ConnId > Index)
{
switch (Mode[Index +1])
{
case VOS_STA_MODE :
status = eANI_BOOLEAN_TRUE;
break;
case VOS_MAX_NO_OF_MODE :
default :
break;
}
}
//Validate BeaconInterval
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
pSession = CSR_GET_SESSION( pMac, sessionId );
if (NULL != pSession->pCurRoamProfile)
{
if(csrIsconcurrentsessionValid (pMac, sessionId,
pSession->pCurRoamProfile->csrPersona)
== eHAL_STATUS_SUCCESS )
{
if(csrValidateBeaconInterval( pMac, pBssDesc->channelId,
&pBssDesc->beaconInterval, sessionId,
pSession->pCurRoamProfile->csrPersona)
!= eHAL_STATUS_SUCCESS)
{
status = eANI_BOOLEAN_FALSE;
}
else
{
status = eANI_BOOLEAN_TRUE;
}
}
else
{
status = eANI_BOOLEAN_FALSE;
}
}
}
return status;
}
static tSirMacCapabilityInfo csrGetBssCapabilities( tSirBssDescription *pSirBssDesc )
{
tSirMacCapabilityInfo dot11Caps;
//tSirMacCapabilityInfo is 16-bit
pal_get_U16( (tANI_U8 *)&pSirBssDesc->capabilityInfo, (tANI_U16 *)&dot11Caps );
return( dot11Caps );
}
tANI_BOOLEAN csrIsInfraBssDesc( tSirBssDescription *pSirBssDesc )
{
tSirMacCapabilityInfo dot11Caps = csrGetBssCapabilities( pSirBssDesc );
return( (tANI_BOOLEAN)dot11Caps.ess );
}
tANI_BOOLEAN csrIsIbssBssDesc( tSirBssDescription *pSirBssDesc )
{
tSirMacCapabilityInfo dot11Caps = csrGetBssCapabilities( pSirBssDesc );
return( (tANI_BOOLEAN)dot11Caps.ibss );
}
tANI_BOOLEAN csrIsQoSBssDesc( tSirBssDescription *pSirBssDesc )
{
tSirMacCapabilityInfo dot11Caps = csrGetBssCapabilities( pSirBssDesc );
return( (tANI_BOOLEAN)dot11Caps.qos );
}
tANI_BOOLEAN csrIsPrivacy( tSirBssDescription *pSirBssDesc )
{
tSirMacCapabilityInfo dot11Caps = csrGetBssCapabilities( pSirBssDesc );
return( (tANI_BOOLEAN)dot11Caps.privacy );
}
tANI_BOOLEAN csrIs11dSupported(tpAniSirGlobal pMac)
{
return(pMac->roam.configParam.Is11dSupportEnabled);
}
tANI_BOOLEAN csrIs11hSupported(tpAniSirGlobal pMac)
{
return(pMac->roam.configParam.Is11hSupportEnabled);
}
tANI_BOOLEAN csrIs11eSupported(tpAniSirGlobal pMac)
{
return(pMac->roam.configParam.Is11eSupportEnabled);
}
tANI_BOOLEAN csrIsMCCSupported ( tpAniSirGlobal pMac )
{
return(pMac->roam.configParam.fenableMCCMode);
}
tANI_BOOLEAN csrIsWmmSupported(tpAniSirGlobal pMac)
{
if(eCsrRoamWmmNoQos == pMac->roam.configParam.WMMSupportMode)
{
return eANI_BOOLEAN_FALSE;
}
else
{
return eANI_BOOLEAN_TRUE;
}
}
//pIes is the IEs for pSirBssDesc2
tANI_BOOLEAN csrIsSsidEqual( tHalHandle hHal, tSirBssDescription *pSirBssDesc1,
tSirBssDescription *pSirBssDesc2, tDot11fBeaconIEs *pIes2 )
{
tANI_BOOLEAN fEqual = FALSE;
tSirMacSSid Ssid1, Ssid2;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tDot11fBeaconIEs *pIes1 = NULL;
tDot11fBeaconIEs *pIesLocal = pIes2;
do {
if( ( NULL == pSirBssDesc1 ) || ( NULL == pSirBssDesc2 ) ) break;
if( !pIesLocal && !HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc2, &pIesLocal)) )
{
smsLog(pMac, LOGE, FL(" fail to parse IEs\n"));
break;
}
if(!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc1, &pIes1)))
{
break;
}
if( ( !pIes1->SSID.present ) || ( !pIesLocal->SSID.present ) ) break;
if ( pIes1->SSID.num_ssid != pIesLocal->SSID.num_ssid ) break;
palCopyMemory(pMac->hHdd, Ssid1.ssId, pIes1->SSID.ssid, pIes1->SSID.num_ssid);
palCopyMemory(pMac->hHdd, Ssid2.ssId, pIesLocal->SSID.ssid, pIesLocal->SSID.num_ssid);
fEqual = palEqualMemory(pMac->hHdd, Ssid1.ssId, Ssid2.ssId, pIesLocal->SSID.num_ssid );
} while( 0 );
if(pIes1)
{
palFreeMemory(pMac->hHdd, pIes1);
}
if( pIesLocal && !pIes2 )
{
palFreeMemory(pMac->hHdd, pIesLocal);
}
return( fEqual );
}
tANI_BOOLEAN csrIsAniWmeSupported(tDot11fIEAirgo *pIeAirgo)
{
tANI_BOOLEAN fRet = eANI_BOOLEAN_FALSE;
if(pIeAirgo && pIeAirgo->present && pIeAirgo->PropCapability.present)
{
fRet = (tANI_BOOLEAN)(PROP_CAPABILITY_GET( WME, pIeAirgo->PropCapability.capability ));
}
return fRet;
}
//pIes can be passed in as NULL if the caller doesn't have one prepared
tANI_BOOLEAN csrIsBssDescriptionWme( tHalHandle hHal, tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
// Assume that WME is found...
tANI_BOOLEAN fWme = TRUE;
tDot11fBeaconIEs *pIesTemp = pIes;
do
{
if(pIesTemp == NULL)
{
if( !HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc, &pIesTemp)) )
{
fWme = FALSE;
break;
}
}
// if the AirgoProprietary indicator is found, then WME is supported...
if ( csrIsAniWmeSupported(&pIesTemp->Airgo) ) break;
// if the Wme Info IE is found, then WME is supported...
if ( CSR_IS_QOS_BSS(pIesTemp) ) break;
// if none of these are found, then WME is NOT supported...
fWme = FALSE;
} while( 0 );
if( !csrIsWmmSupported( pMac ) && fWme)
{
if( !pIesTemp->HTCaps.present )
{
fWme = FALSE;
}
}
if( ( pIes == NULL ) && ( NULL != pIesTemp ) )
{
//we allocate memory here so free it before returning
palFreeMemory(pMac->hHdd, pIesTemp);
}
return( fWme );
}
tANI_BOOLEAN csrIsHcfEnabled( tDot11fIEAirgo *pIeAirgo )
{
tANI_BOOLEAN fHcfSupported = FALSE;
fHcfSupported = ((tANI_BOOLEAN)(PROP_CAPABILITY_GET( WME, pIeAirgo->PropCapability.capability )) ||
(pIeAirgo->present && pIeAirgo->HCF.present && pIeAirgo->HCF.enabled));
return( fHcfSupported );
}
eCsrMediaAccessType csrGetQoSFromBssDesc( tHalHandle hHal, tSirBssDescription *pSirBssDesc,
tDot11fBeaconIEs *pIes )
{
eCsrMediaAccessType qosType = eCSR_MEDIUM_ACCESS_DCF;
#if defined(VOSS_ENABLED)
VOS_ASSERT( pIes != NULL );
#endif
do
{
// if we find WMM in the Bss Description, then we let this
// override and use WMM.
if ( csrIsBssDescriptionWme( hHal, pSirBssDesc, pIes ) )
{
qosType = eCSR_MEDIUM_ACCESS_WMM_eDCF_DSCP;
}
else
{
// if the QoS bit is on, then the AP is advertising 11E QoS...
if ( csrIsQoSBssDesc( pSirBssDesc ) )
{
// which could be HCF or eDCF.
if ( csrIsHcfEnabled( &pIes->Airgo ) )
{
qosType = eCSR_MEDIUM_ACCESS_11e_HCF;
}
else
{
qosType = eCSR_MEDIUM_ACCESS_11e_eDCF;
}
}
else
{
qosType = eCSR_MEDIUM_ACCESS_DCF;
}
// scale back based on the types turned on for the adapter...
if ( eCSR_MEDIUM_ACCESS_11e_eDCF == qosType && !csrIs11eSupported( hHal ) )
{
qosType = eCSR_MEDIUM_ACCESS_DCF;
}
}
} while(0);
return( qosType );
}
//Caller allocates memory for pIEStruct
eHalStatus csrParseBssDescriptionIEs(tHalHandle hHal, tSirBssDescription *pBssDesc, tDot11fBeaconIEs *pIEStruct)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
int ieLen = (int)(pBssDesc->length + sizeof( pBssDesc->length ) - GET_FIELD_OFFSET( tSirBssDescription, ieFields ));
if(ieLen > 0 && pIEStruct)
{
if(!DOT11F_FAILED(dot11fUnpackBeaconIEs( pMac, (tANI_U8 *)pBssDesc->ieFields, ieLen, pIEStruct )))
{
status = eHAL_STATUS_SUCCESS;
}
}
return (status);
}
//This function will allocate memory for the parsed IEs to the caller. Caller must free the memory
//after it is done with the data only if this function succeeds
eHalStatus csrGetParsedBssDescriptionIEs(tHalHandle hHal, tSirBssDescription *pBssDesc, tDot11fBeaconIEs **ppIEStruct)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if(pBssDesc && ppIEStruct)
{
status = palAllocateMemory(pMac->hHdd, (void **)ppIEStruct, sizeof(tDot11fBeaconIEs));
if(HAL_STATUS_SUCCESS(status))
{
palZeroMemory(pMac->hHdd, (void *)*ppIEStruct, sizeof(tDot11fBeaconIEs));
status = csrParseBssDescriptionIEs(hHal, pBssDesc, *ppIEStruct);
if(!HAL_STATUS_SUCCESS(status))
{
palFreeMemory(pMac->hHdd, *ppIEStruct);
*ppIEStruct = NULL;
}
}
else
{
smsLog( pMac, LOGE, FL(" failed to allocate memory\n") );
VOS_ASSERT( 0 );
}
}
return (status);
}
tANI_BOOLEAN csrIsNULLSSID( tANI_U8 *pBssSsid, tANI_U8 len )
{
tANI_BOOLEAN fNullSsid = FALSE;
tANI_U32 SsidLength;
tANI_U8 *pSsidStr;
do
{
if ( 0 == len )
{
fNullSsid = TRUE;
break;
}
//Consider 0 or space for hidden SSID
if ( 0 == pBssSsid[0] )
{
fNullSsid = TRUE;
break;
}
SsidLength = len;
pSsidStr = pBssSsid;
while ( SsidLength )
{
if( *pSsidStr )
break;
pSsidStr++;
SsidLength--;
}
if( 0 == SsidLength )
{
fNullSsid = TRUE;
break;
}
}
while( 0 );
return fNullSsid;
}
tANI_U32 csrGetFragThresh( tHalHandle hHal )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.FragmentationThreshold;
}
tANI_U32 csrGetRTSThresh( tHalHandle hHal )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.RTSThreshold;
}
eCsrPhyMode csrTranslateToPhyModeFromBssDesc( tSirBssDescription *pSirBssDesc )
{
eCsrPhyMode phyMode;
switch ( pSirBssDesc->nwType )
{
case eSIR_11A_NW_TYPE:
phyMode = eCSR_DOT11_MODE_11a;
break;
case eSIR_11B_NW_TYPE:
phyMode = eCSR_DOT11_MODE_11b;
break;
case eSIR_11G_NW_TYPE:
phyMode = eCSR_DOT11_MODE_11g;
break;
case eSIR_11N_NW_TYPE:
phyMode = eCSR_DOT11_MODE_11n;
break;
#ifdef WLAN_FEATURE_11AC
case eSIR_11AC_NW_TYPE:
default:
phyMode = eCSR_DOT11_MODE_11ac;
#else
default:
phyMode = eCSR_DOT11_MODE_11n;
#endif
break;
}
return( phyMode );
}
tANI_U32 csrTranslateToWNICfgDot11Mode(tpAniSirGlobal pMac, eCsrCfgDot11Mode csrDot11Mode)
{
tANI_U32 ret;
switch(csrDot11Mode)
{
case eCSR_CFG_DOT11_MODE_AUTO:
smsLog(pMac, LOGW, FL(" Warning: sees eCSR_CFG_DOT11_MODE_AUTO \n"));
//We cannot decide until now.
if(pMac->roam.configParam.ProprietaryRatesEnabled)
{
ret = WNI_CFG_DOT11_MODE_TAURUS;
}
else
{
ret = WNI_CFG_DOT11_MODE_11N;
}
break;
case eCSR_CFG_DOT11_MODE_TAURUS:
ret = WNI_CFG_DOT11_MODE_TAURUS;
break;
case eCSR_CFG_DOT11_MODE_11A:
ret = WNI_CFG_DOT11_MODE_11A;
break;
case eCSR_CFG_DOT11_MODE_11B:
ret = WNI_CFG_DOT11_MODE_11B;
break;
case eCSR_CFG_DOT11_MODE_11G:
ret = WNI_CFG_DOT11_MODE_11G;
break;
case eCSR_CFG_DOT11_MODE_11N:
ret = WNI_CFG_DOT11_MODE_11N;
break;
case eCSR_CFG_DOT11_MODE_POLARIS:
ret = WNI_CFG_DOT11_MODE_POLARIS;
break;
case eCSR_CFG_DOT11_MODE_TITAN:
ret = WNI_CFG_DOT11_MODE_TITAN;
break;
#ifdef WLAN_SOFTAP_FEATURE
case eCSR_CFG_DOT11_MODE_11G_ONLY:
ret = WNI_CFG_DOT11_MODE_11G_ONLY;
break;
case eCSR_CFG_DOT11_MODE_11N_ONLY:
ret = WNI_CFG_DOT11_MODE_11N_ONLY;
break;
#endif
#ifdef WLAN_FEATURE_11AC
case eCSR_CFG_DOT11_MODE_11AC_ONLY:
ret = WNI_CFG_DOT11_MODE_11AC_ONLY;
break;
case eCSR_CFG_DOT11_MODE_11AC:
ret = WNI_CFG_DOT11_MODE_11AC;
break;
#endif
default:
smsLog(pMac, LOGW, FL("doesn't expect %d as csrDo11Mode\n"), csrDot11Mode);
if(eCSR_BAND_24 == pMac->roam.configParam.eBand)
{
ret = WNI_CFG_DOT11_MODE_11G;
}
else
{
ret = WNI_CFG_DOT11_MODE_11A;
}
break;
}
return (ret);
}
//This function should only return the super set of supported modes. 11n implies 11b/g/a/n.
eHalStatus csrGetPhyModeFromBss(tpAniSirGlobal pMac, tSirBssDescription *pBSSDescription,
eCsrPhyMode *pPhyMode, tDot11fBeaconIEs *pIes)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
eCsrPhyMode phyMode = csrTranslateToPhyModeFromBssDesc(pBSSDescription);
if( pIes )
{
if(pIes->Airgo.present)
{
if(pIes->Airgo.PropCapability.present)
{
if( PROP_CAPABILITY_GET( TAURUS, pIes->Airgo.PropCapability.capability ))
{
phyMode = eCSR_DOT11_MODE_TAURUS;
}
}
}
if(pIes->HTCaps.present && (eCSR_DOT11_MODE_TAURUS != phyMode))
{
phyMode = eCSR_DOT11_MODE_11n;
}
#ifdef WLAN_FEATURE_11AC
if ( pIes->VHTCaps.present && (eCSR_DOT11_MODE_TAURUS != phyMode))
{
phyMode = eCSR_DOT11_MODE_11ac;
}
#endif
*pPhyMode = phyMode;
}
return (status);
}
//This function returns the correct eCSR_CFG_DOT11_MODE is the two phyModes matches
//bssPhyMode is the mode derived from the BSS description
//f5GhzBand is derived from the channel id of BSS description
tANI_BOOLEAN csrGetPhyModeInUse( eCsrPhyMode phyModeIn, eCsrPhyMode bssPhyMode, tANI_BOOLEAN f5GhzBand,
eCsrCfgDot11Mode *pCfgDot11ModeToUse )
{
tANI_BOOLEAN fMatch = FALSE;
eCsrCfgDot11Mode cfgDot11Mode;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N; // to suppress compiler warning
switch( phyModeIn )
{
case eCSR_DOT11_MODE_abg: //11a or 11b or 11g
if( f5GhzBand )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
}
else if( eCSR_DOT11_MODE_11b == bssPhyMode )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
}
else
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
}
break;
case eCSR_DOT11_MODE_11a: //11a
if( f5GhzBand )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
}
break;
case eCSR_DOT11_MODE_11a_ONLY: //11a
if( eCSR_DOT11_MODE_11a == bssPhyMode )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
}
break;
case eCSR_DOT11_MODE_11g:
if(!f5GhzBand)
{
if( eCSR_DOT11_MODE_11b == bssPhyMode )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
}
else
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
}
}
break;
case eCSR_DOT11_MODE_11g_ONLY:
if( eCSR_DOT11_MODE_11g == bssPhyMode )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
}
break;
case eCSR_DOT11_MODE_11b:
if( !f5GhzBand )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
}
break;
case eCSR_DOT11_MODE_11b_ONLY:
if( eCSR_DOT11_MODE_11b == bssPhyMode )
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
}
break;
case eCSR_DOT11_MODE_11n:
fMatch = TRUE;
switch(bssPhyMode)
{
case eCSR_DOT11_MODE_11g:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
break;
case eCSR_DOT11_MODE_11b:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
break;
case eCSR_DOT11_MODE_11a:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
break;
case eCSR_DOT11_MODE_11n:
#ifdef WLAN_FEATURE_11AC
case eCSR_DOT11_MODE_11ac:
#endif
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
break;
case eCSR_DOT11_MODE_TAURUS:
default:
#ifdef WLAN_FEATURE_11AC
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC;
#else
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
#endif
break;
}
break;
case eCSR_DOT11_MODE_11n_ONLY:
if((eCSR_DOT11_MODE_11n == bssPhyMode) || (eCSR_DOT11_MODE_TAURUS == bssPhyMode))
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
}
break;
#ifdef WLAN_FEATURE_11AC
case eCSR_DOT11_MODE_11ac:
fMatch = TRUE;
switch(bssPhyMode)
{
case eCSR_DOT11_MODE_11g:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
break;
case eCSR_DOT11_MODE_11b:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
break;
case eCSR_DOT11_MODE_11a:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
break;
case eCSR_DOT11_MODE_11n:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
break;
case eCSR_DOT11_MODE_11ac:
case eCSR_DOT11_MODE_TAURUS:
default:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC;
break;
}
break;
case eCSR_DOT11_MODE_11ac_ONLY:
if((eCSR_DOT11_MODE_11ac == bssPhyMode) || (eCSR_DOT11_MODE_TAURUS == bssPhyMode))
{
fMatch = TRUE;
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC;
}
break;
#endif
case eCSR_DOT11_MODE_TAURUS:
default:
fMatch = TRUE;
switch(bssPhyMode)
{
case eCSR_DOT11_MODE_11g:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
break;
case eCSR_DOT11_MODE_11b:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
break;
case eCSR_DOT11_MODE_11a:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
break;
case eCSR_DOT11_MODE_11n:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
break;
#ifdef WLAN_FEATURE_11AC
case eCSR_DOT11_MODE_11ac:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC;
break;
#endif
case eCSR_DOT11_MODE_TAURUS:
default:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_TAURUS;
break;
}
break;
}
if ( fMatch && pCfgDot11ModeToUse )
{
#ifdef WLAN_FEATURE_11AC
if(cfgDot11Mode == eCSR_CFG_DOT11_MODE_11AC && !WDA_getFwWlanFeatCaps(DOT11AC))
{
*pCfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11N;
}
else
#endif
{
*pCfgDot11ModeToUse = cfgDot11Mode;
}
}
return( fMatch );
}
//This function decides whether the one of the bit of phyMode is matching the mode in the BSS and allowed by the user
//setting, pMac->roam.configParam.uCfgDot11Mode. It returns the mode that fits the criteria.
tANI_BOOLEAN csrIsPhyModeMatch( tpAniSirGlobal pMac, tANI_U32 phyMode,
tSirBssDescription *pSirBssDesc, tCsrRoamProfile *pProfile,
eCsrCfgDot11Mode *pReturnCfgDot11Mode,
tDot11fBeaconIEs *pIes)
{
tANI_BOOLEAN fMatch = FALSE;
eCsrPhyMode phyModeInBssDesc, phyMode2;
eCsrCfgDot11Mode cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_TAURUS;
tANI_U32 bitMask, loopCount;
if(HAL_STATUS_SUCCESS(csrGetPhyModeFromBss(pMac, pSirBssDesc, &phyModeInBssDesc, pIes )))
{
//In case some change change eCSR_DOT11_MODE_TAURUS to non-0
if ( (0 == phyMode) || (eCSR_DOT11_MODE_AUTO & phyMode) || (eCSR_DOT11_MODE_TAURUS & phyMode))
{
//Taurus means anything
if ( eCSR_CFG_DOT11_MODE_ABG == pMac->roam.configParam.uCfgDot11Mode )
{
phyMode = eCSR_DOT11_MODE_abg;
}
else if(eCSR_CFG_DOT11_MODE_AUTO == pMac->roam.configParam.uCfgDot11Mode)
{
if(pMac->roam.configParam.ProprietaryRatesEnabled)
{
phyMode = eCSR_DOT11_MODE_TAURUS;
}
else
{
#ifdef WLAN_FEATURE_11AC
phyMode = eCSR_DOT11_MODE_11ac;
#else
phyMode = eCSR_DOT11_MODE_11n;
#endif
}
}
else
{
//user's pick
phyMode = pMac->roam.configParam.phyMode;
}
}
if ( (0 == phyMode) || (eCSR_DOT11_MODE_AUTO & phyMode) || (eCSR_DOT11_MODE_TAURUS & phyMode) )
{
if(0 != phyMode)
{
if(eCSR_DOT11_MODE_AUTO & phyMode)
{
phyMode2 = eCSR_DOT11_MODE_AUTO & phyMode;
}
else
{
phyMode2 = eCSR_DOT11_MODE_TAURUS & phyMode;
}
}
else
{
phyMode2 = phyMode;
}
fMatch = csrGetPhyModeInUse( phyMode2, phyModeInBssDesc, CSR_IS_CHANNEL_5GHZ(pSirBssDesc->channelId),
&cfgDot11ModeToUse );
}
else
{
bitMask = 1;
loopCount = 0;
while(loopCount < eCSR_NUM_PHY_MODE)
{
if(0 != ( phyMode2 = (phyMode & (bitMask << loopCount++)) ))
{
fMatch = csrGetPhyModeInUse( phyMode2, phyModeInBssDesc, CSR_IS_CHANNEL_5GHZ(pSirBssDesc->channelId),
&cfgDot11ModeToUse );
if(fMatch) break;
}
}
}
if ( fMatch && pReturnCfgDot11Mode )
{
if( pProfile )
{
/* IEEE 11n spec (8.4.3): HT STA shall eliminate TKIP as a
* choice for the pairwise cipher suite if CCMP is advertised
* by the AP or if the AP included an HT capabilities element
* in its Beacons and Probe Response.
*/
if( (!CSR_IS_11n_ALLOWED( pProfile->negotiatedUCEncryptionType )) &&
((eCSR_CFG_DOT11_MODE_11N == cfgDot11ModeToUse) ||
#ifdef WLAN_FEATURE_11AC
(eCSR_CFG_DOT11_MODE_11AC == cfgDot11ModeToUse) ||
#endif
(eCSR_CFG_DOT11_MODE_TAURUS == cfgDot11ModeToUse)) )
{
//We cannot do 11n here
if( !CSR_IS_CHANNEL_5GHZ(pSirBssDesc->channelId) )
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11G;
}
else
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11A;
}
}
}
*pReturnCfgDot11Mode = cfgDot11ModeToUse;
}
}
return( fMatch );
}
eCsrCfgDot11Mode csrFindBestPhyMode( tpAniSirGlobal pMac, tANI_U32 phyMode )
{
eCsrCfgDot11Mode cfgDot11ModeToUse;
eCsrBand eBand = pMac->roam.configParam.eBand;
#ifdef WLAN_FEATURE_11AC
if ( (0 == phyMode) || (eCSR_DOT11_MODE_AUTO & phyMode) || (eCSR_DOT11_MODE_TAURUS & phyMode)
||(eCSR_DOT11_MODE_11ac & phyMode))
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11AC;
}
else
#endif
if ( (0 == phyMode) || (eCSR_DOT11_MODE_AUTO & phyMode) || (eCSR_DOT11_MODE_TAURUS & phyMode))
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11N;
}
else
{
if( ( eCSR_DOT11_MODE_11n | eCSR_DOT11_MODE_11n_ONLY ) & phyMode )
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11N;
}
else if ( eCSR_DOT11_MODE_abg & phyMode )
{
if( eCSR_BAND_24 != eBand )
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11A;
}
else
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11G;
}
}
else if( ( eCSR_DOT11_MODE_11a | eCSR_DOT11_MODE_11a_ONLY ) & phyMode )
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11A;
}
else if( ( eCSR_DOT11_MODE_11g | eCSR_DOT11_MODE_11g_ONLY ) & phyMode )
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11G;
}
else
{
cfgDot11ModeToUse = eCSR_CFG_DOT11_MODE_11B;
}
}
return ( cfgDot11ModeToUse );
}
tANI_U32 csrGet11hPowerConstraint( tHalHandle hHal, tDot11fIEPowerConstraints *pPowerConstraint )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U32 localPowerConstraint = 0;
// check if .11h support is enabled, if not, the power constraint is 0.
if(pMac->roam.configParam.Is11hSupportEnabled && pPowerConstraint->present)
{
localPowerConstraint = pPowerConstraint->localPowerConstraints;
}
return( localPowerConstraint );
}
tANI_BOOLEAN csrIsProfileWpa( tCsrRoamProfile *pProfile )
{
tANI_BOOLEAN fWpaProfile = FALSE;
switch ( pProfile->negotiatedAuthType )
{
case eCSR_AUTH_TYPE_WPA:
case eCSR_AUTH_TYPE_WPA_PSK:
case eCSR_AUTH_TYPE_WPA_NONE:
#ifdef FEATURE_WLAN_CCX
case eCSR_AUTH_TYPE_CCKM_WPA:
#endif
fWpaProfile = TRUE;
break;
default:
fWpaProfile = FALSE;
break;
}
if ( fWpaProfile )
{
switch ( pProfile->negotiatedUCEncryptionType )
{
case eCSR_ENCRYPT_TYPE_WEP40:
case eCSR_ENCRYPT_TYPE_WEP104:
case eCSR_ENCRYPT_TYPE_TKIP:
case eCSR_ENCRYPT_TYPE_AES:
fWpaProfile = TRUE;
break;
default:
fWpaProfile = FALSE;
break;
}
}
return( fWpaProfile );
}
tANI_BOOLEAN csrIsProfileRSN( tCsrRoamProfile *pProfile )
{
tANI_BOOLEAN fRSNProfile = FALSE;
switch ( pProfile->negotiatedAuthType )
{
case eCSR_AUTH_TYPE_RSN:
case eCSR_AUTH_TYPE_RSN_PSK:
#ifdef WLAN_FEATURE_VOWIFI_11R
case eCSR_AUTH_TYPE_FT_RSN:
case eCSR_AUTH_TYPE_FT_RSN_PSK:
#endif
#ifdef FEATURE_WLAN_CCX
case eCSR_AUTH_TYPE_CCKM_RSN:
#endif
fRSNProfile = TRUE;
break;
default:
fRSNProfile = FALSE;
break;
}
if ( fRSNProfile )
{
switch ( pProfile->negotiatedUCEncryptionType )
{
// !!REVIEW - For WPA2, use of RSN IE mandates
// use of AES as encryption. Here, we qualify
// even if encryption type is WEP or TKIP
case eCSR_ENCRYPT_TYPE_WEP40:
case eCSR_ENCRYPT_TYPE_WEP104:
case eCSR_ENCRYPT_TYPE_TKIP:
case eCSR_ENCRYPT_TYPE_AES:
fRSNProfile = TRUE;
break;
default:
fRSNProfile = FALSE;
break;
}
}
return( fRSNProfile );
}
eHalStatus
csrIsconcurrentsessionValid(tpAniSirGlobal pMac,tANI_U32 cursessionId,
tVOS_CON_MODE currBssPersona)
{
tANI_U32 sessionId = 0;
for (sessionId = 0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++ )
{
if (cursessionId != sessionId )
{
if (!CSR_IS_SESSION_VALID( pMac, sessionId ))
{
continue;
}
switch (currBssPersona)
{
case VOS_STA_MODE:
if(pMac->roam.roamSession[sessionId].pCurRoamProfile &&
(pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_STA_MODE)) //check for P2P client mode
{
smsLog(pMac, LOGE, FL(" ****STA mode already exists ****\n"));
return eHAL_STATUS_FAILURE;
}
break;
case VOS_STA_SAP_MODE:
if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_STA_SAP_MODE)
{
smsLog(pMac, LOGE, FL(" ****SoftAP mode already exists ****\n"));
return eHAL_STATUS_FAILURE;
}
else if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_P2P_GO_MODE)
{
smsLog(pMac, LOGE, FL(" ****Cannot start Multiple Beaconing Role ****\n"));
return eHAL_STATUS_FAILURE;
}
break;
case VOS_P2P_CLIENT_MODE:
if(pMac->roam.roamSession[sessionId].pCurRoamProfile &&
(pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_P2P_CLIENT_MODE)) //check for P2P client mode
{
smsLog(pMac, LOGE, FL(" ****CLIENT mode already exists ****\n"));
return eHAL_STATUS_FAILURE;
}
break;
case VOS_P2P_GO_MODE:
if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_P2P_GO_MODE)
{
smsLog(pMac, LOGE, FL(" ****P2P GO mode already exists ****\n"));
return eHAL_STATUS_FAILURE;
}
else if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_STA_SAP_MODE)
{
smsLog(pMac, LOGE, FL(" ****Cannot start Multiple Beaconing Role ****\n"));
return eHAL_STATUS_FAILURE;
}
break;
default :
smsLog(pMac, LOGE, FL("***Persona not handled = %d*****\n"),currBssPersona);
break;
}
}
}
return eHAL_STATUS_SUCCESS;
}
eHalStatus csrValidateBeaconInterval(tpAniSirGlobal pMac, tANI_U8 channelId,
tANI_U16 *beaconInterval, tANI_U32 cursessionId,
tVOS_CON_MODE currBssPersona)
{
tANI_U32 sessionId = 0;
//If MCC is not supported just break and return SUCCESS
if ( !IS_MCC_SUPPORTED && !pMac->roam.configParam.fenableMCCMode){
return eHAL_STATUS_FAILURE;
}
for (sessionId = 0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++ )
{
if (cursessionId != sessionId )
{
if (!CSR_IS_SESSION_VALID( pMac, sessionId ))
{
continue;
}
switch (currBssPersona)
{
case VOS_STA_MODE:
if(pMac->roam.roamSession[sessionId].pCurRoamProfile &&
(pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_P2P_CLIENT_MODE)) //check for P2P client mode
{
smsLog(pMac, LOG1, FL(" Beacon Interval Validation not required for STA/CLIENT\n"));
}
//IF SAP has started and STA wants to connect on different channel MCC should
//MCC should not be enabled so making it false to enforce on same channel
else if (pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_STA_SAP_MODE)
{
if (pMac->roam.roamSession[sessionId].bssParams.operationChn
!= channelId )
{
smsLog(pMac, LOGE, FL("***MCC is not enabled for SAP +STA****\n"));
return eHAL_STATUS_FAILURE;
}
}
else if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_P2P_GO_MODE) //Check for P2P go scenario
{
/* if GO in MCC support different beacon interval, return success */
if ( pMac->roam.configParam.fAllowMCCGODiffBI == TRUE)
return eHAL_STATUS_SUCCESS;
if ((pMac->roam.roamSession[sessionId].bssParams.operationChn
!= channelId ) &&
(pMac->roam.roamSession[sessionId].bssParams.beaconInterval
!= *beaconInterval))
{
smsLog(pMac, LOGE, FL("BeaconInteval is different cannot connect to prefered AP...\n"));
return eHAL_STATUS_FAILURE;
}
}
break;
case VOS_P2P_CLIENT_MODE:
if(pMac->roam.roamSession[sessionId].pCurRoamProfile &&
(pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_STA_MODE)) //check for P2P client mode
{
smsLog(pMac, LOG1, FL(" Ignore Beacon Interval Validation...\n"));
}
//IF SAP has started and STA wants to connect on different channel MCC should
//MCC should not be enabled so making it false to enforce on same channel
else if (pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_STA_SAP_MODE)
{
if (pMac->roam.roamSession[sessionId].bssParams.operationChn
!= channelId )
{
smsLog(pMac, LOGE, FL("***MCC is not enabled for SAP + CLIENT****\n"));
return eHAL_STATUS_FAILURE;
}
}
else if(pMac->roam.roamSession[sessionId].bssParams.bssPersona
== VOS_P2P_GO_MODE) //Check for P2P go scenario
{
if ((pMac->roam.roamSession[sessionId].bssParams.operationChn
!= channelId ) &&
(pMac->roam.roamSession[sessionId].bssParams.beaconInterval
!= *beaconInterval))
{
smsLog(pMac, LOGE, FL("BeaconInteval is different cannot connect to P2P_GO network ...\n"));
return eHAL_STATUS_FAILURE;
}
}
break;
case VOS_P2P_GO_MODE :
if(pMac->roam.roamSession[sessionId].pCurRoamProfile &&
((pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_P2P_CLIENT_MODE)
|| (pMac->roam.roamSession[sessionId].pCurRoamProfile->csrPersona
== VOS_STA_MODE))) //check for P2P_client scenario
{
if ((pMac->roam.roamSession[sessionId].connectedProfile.operationChannel
== 0 )&&
(pMac->roam.roamSession[sessionId].connectedProfile.beaconInterval
== 0))
{
continue;
}
if (csrIsConnStateConnectedInfra(pMac, sessionId) &&
(pMac->roam.roamSession[sessionId].connectedProfile.operationChannel
!= channelId ) &&
(pMac->roam.roamSession[sessionId].connectedProfile.beaconInterval
!= *beaconInterval))
{
/*
* Updated beaconInterval should be used only when we are starting a new BSS
* not incase of client or STA case
*/
*beaconInterval =
pMac->roam.roamSession[sessionId].connectedProfile.beaconInterval;
return eHAL_STATUS_SUCCESS;
}
}
break;
default :
smsLog(pMac, LOG1, FL(" Persona not supported : %d\n"),currBssPersona);
return eHAL_STATUS_FAILURE;
}
}
}
return eHAL_STATUS_SUCCESS;
}
#ifdef WLAN_FEATURE_VOWIFI_11R
/* Function to return TRUE if the authtype is 11r */
tANI_BOOLEAN csrIsAuthType11r( eCsrAuthType AuthType )
{
switch ( AuthType )
{
case eCSR_AUTH_TYPE_FT_RSN_PSK:
case eCSR_AUTH_TYPE_FT_RSN:
return TRUE;
break;
default:
break;
}
return FALSE;
}
/* Function to return TRUE if the profile is 11r */
tANI_BOOLEAN csrIsProfile11r( tCsrRoamProfile *pProfile )
{
return csrIsAuthType11r( pProfile->negotiatedAuthType );
}
#endif
#ifdef FEATURE_WLAN_CCX
/* Function to return TRUE if the authtype is CCX */
tANI_BOOLEAN csrIsAuthTypeCCX( eCsrAuthType AuthType )
{
switch ( AuthType )
{
case eCSR_AUTH_TYPE_CCKM_WPA:
case eCSR_AUTH_TYPE_CCKM_RSN:
return TRUE;
break;
default:
break;
}
return FALSE;
}
/* Function to return TRUE if the profile is CCX */
tANI_BOOLEAN csrIsProfileCCX( tCsrRoamProfile *pProfile )
{
return (csrIsAuthTypeCCX( pProfile->negotiatedAuthType ));
}
#endif
#ifdef FEATURE_WLAN_WAPI
tANI_BOOLEAN csrIsProfileWapi( tCsrRoamProfile *pProfile )
{
tANI_BOOLEAN fWapiProfile = FALSE;
switch ( pProfile->negotiatedAuthType )
{
case eCSR_AUTH_TYPE_WAPI_WAI_CERTIFICATE:
case eCSR_AUTH_TYPE_WAPI_WAI_PSK:
fWapiProfile = TRUE;
break;
default:
fWapiProfile = FALSE;
break;
}
if ( fWapiProfile )
{
switch ( pProfile->negotiatedUCEncryptionType )
{
case eCSR_ENCRYPT_TYPE_WPI:
fWapiProfile = TRUE;
break;
default:
fWapiProfile = FALSE;
break;
}
}
return( fWapiProfile );
}
static tANI_BOOLEAN csrIsWapiOuiEqual( tpAniSirGlobal pMac, tANI_U8 *Oui1, tANI_U8 *Oui2 )
{
return( palEqualMemory(pMac->hHdd, Oui1, Oui2, CSR_WAPI_OUI_SIZE ) );
}
static tANI_BOOLEAN csrIsWapiOuiMatch( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WAPI_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Cypher[],
tANI_U8 Oui[] )
{
tANI_BOOLEAN fYes = FALSE;
tANI_U8 idx;
for ( idx = 0; idx < cAllCyphers; idx++ )
{
if ( csrIsWapiOuiEqual( pMac, AllCyphers[ idx ], Cypher ) )
{
fYes = TRUE;
break;
}
}
if ( fYes && Oui )
{
palCopyMemory( pMac->hHdd, Oui, AllCyphers[ idx ], CSR_WAPI_OUI_SIZE );
}
return( fYes );
}
#endif /* FEATURE_WLAN_WAPI */
static tANI_BOOLEAN csrIsWpaOuiEqual( tpAniSirGlobal pMac, tANI_U8 *Oui1, tANI_U8 *Oui2 )
{
return( palEqualMemory(pMac->hHdd, Oui1, Oui2, CSR_WPA_OUI_SIZE ) );
}
static tANI_BOOLEAN csrIsOuiMatch( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Cypher[],
tANI_U8 Oui[] )
{
tANI_BOOLEAN fYes = FALSE;
tANI_U8 idx;
for ( idx = 0; idx < cAllCyphers; idx++ )
{
if ( csrIsWpaOuiEqual( pMac, AllCyphers[ idx ], Cypher ) )
{
fYes = TRUE;
break;
}
}
if ( fYes && Oui )
{
palCopyMemory( pMac->hHdd, Oui, AllCyphers[ idx ], CSR_WPA_OUI_SIZE );
}
return( fYes );
}
static tANI_BOOLEAN csrMatchRSNOUIIndex( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers, tANI_U8 ouiIndex,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui[ouiIndex], Oui ) );
}
#ifdef FEATURE_WLAN_WAPI
static tANI_BOOLEAN csrMatchWapiOUIIndex( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WAPI_OUI_SIZE],
tANI_U8 cAllCyphers, tANI_U8 ouiIndex,
tANI_U8 Oui[] )
{
return( csrIsWapiOuiMatch( pMac, AllCyphers, cAllCyphers, csrWapiOui[ouiIndex], Oui ) );
}
#endif /* FEATURE_WLAN_WAPI */
static tANI_BOOLEAN csrMatchWPAOUIIndex( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers, tANI_U8 ouiIndex,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui[ouiIndex], Oui ) );
}
#if 0
static tANI_BOOLEAN csrIsRSNUnicastNone( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui00, Oui ) );
}
static tANI_BOOLEAN csrIsRSNMulticastWep( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
tANI_BOOLEAN fYes = FALSE;
// Check Wep 104 first, if fails, then check Wep40.
fYes = csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui05, Oui );
if ( !fYes )
{
// if not Wep-104, check Wep-40
fYes = csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui01, Oui );
}
return( fYes );
}
static tANI_BOOLEAN csrIsRSNUnicastTkip( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui02, Oui ) );
}
static tANI_BOOLEAN csrIsRSNMulticastTkip( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui02, Oui ) );
}
static tANI_BOOLEAN csrIsRSNUnicastAes( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui04, Oui ) );
}
static tANI_BOOLEAN csrIsRSNMulticastAes( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrRSNOui04, Oui ) );
}
#endif
#ifdef FEATURE_WLAN_WAPI
static tANI_BOOLEAN csrIsAuthWapiCert( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WAPI_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsWapiOuiMatch( pMac, AllSuites, cAllSuites, csrWapiOui[1], Oui ) );
}
static tANI_BOOLEAN csrIsAuthWapiPsk( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WAPI_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsWapiOuiMatch( pMac, AllSuites, cAllSuites, csrWapiOui[2], Oui ) );
}
#endif /* FEATURE_WLAN_WAPI */
#ifdef WLAN_FEATURE_VOWIFI_11R
/*
* Function for 11R FT Authentication. We match the FT Authentication Cipher suite
* here. This matches for FT Auth with the 802.1X exchange.
*
*/
static tANI_BOOLEAN csrIsFTAuthRSN( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[03], Oui ) );
}
/*
* Function for 11R FT Authentication. We match the FT Authentication Cipher suite
* here. This matches for FT Auth with the PSK.
*
*/
static tANI_BOOLEAN csrIsFTAuthRSNPsk( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[04], Oui ) );
}
#endif
#ifdef FEATURE_WLAN_CCX
/*
* Function for CCX CCKM AKM Authentication. We match the CCKM AKM Authentication Key Management suite
* here. This matches for CCKM AKM Auth with the 802.1X exchange.
*
*/
static tANI_BOOLEAN csrIsCcxCckmAuthRSN( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[06], Oui ) );
}
static tANI_BOOLEAN csrIsCcxCckmAuthWpa( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrWpaOui[06], Oui ) );
}
#endif
static tANI_BOOLEAN csrIsAuthRSN( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
#ifdef WLAN_FEATURE_11W
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[01], Oui ) ||
csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[05], Oui ));
#else
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[01], Oui ) );
#endif
}
static tANI_BOOLEAN csrIsAuthRSNPsk( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_RSN_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
#ifdef WLAN_FEATURE_11W
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[02], Oui ) ||
csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[06], Oui ) );
#else
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrRSNOui[02], Oui ) );
#endif
}
static tANI_BOOLEAN csrIsAuthWpa( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrWpaOui[01], Oui ) );
}
#ifdef NOT_CURRENTLY_USED
static tANI_BOOLEAN csrIsAuth802_1x( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrWpaOui[00], Oui ) );
}
#endif // NOT_CURRENTLY_USED
static tANI_BOOLEAN csrIsAuthWpaPsk( tpAniSirGlobal pMac, tANI_U8 AllSuites[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllSuites,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllSuites, cAllSuites, csrWpaOui[02], Oui ) );
}
#if 0
static tANI_BOOLEAN csrIsUnicastNone( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui00, Oui ) );
}
static tANI_BOOLEAN csrIsUnicastTkip( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui02, Oui ) );
}
static tANI_BOOLEAN csrIsUnicastAes( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui04, Oui ) );
}
static tANI_BOOLEAN csrIsMulticastWep( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
tANI_BOOLEAN fYes = FALSE;
// Check Wep 104 first, if fails, then check Wep40.
fYes = csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui05, Oui );
if ( !fYes )
{
// if not Wep-104, check Wep-40
fYes = csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui01, Oui );
}
return( fYes );
}
static tANI_BOOLEAN csrIsMulticastTkip( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui02, Oui ) );
}
static tANI_BOOLEAN csrIsMulticastAes( tpAniSirGlobal pMac, tANI_U8 AllCyphers[][CSR_WPA_OUI_SIZE],
tANI_U8 cAllCyphers,
tANI_U8 Oui[] )
{
return( csrIsOuiMatch( pMac, AllCyphers, cAllCyphers, csrWpaOui04, Oui ) );
}
#endif
tANI_U8 csrGetOUIIndexFromCipher( eCsrEncryptionType enType )
{
tANI_U8 OUIIndex;
switch ( enType )
{
case eCSR_ENCRYPT_TYPE_WEP40:
case eCSR_ENCRYPT_TYPE_WEP40_STATICKEY:
OUIIndex = CSR_OUI_WEP40_OR_1X_INDEX;
break;
case eCSR_ENCRYPT_TYPE_WEP104:
case eCSR_ENCRYPT_TYPE_WEP104_STATICKEY:
OUIIndex = CSR_OUI_WEP104_INDEX;
break;
case eCSR_ENCRYPT_TYPE_TKIP:
OUIIndex = CSR_OUI_TKIP_OR_PSK_INDEX;
break;
case eCSR_ENCRYPT_TYPE_AES:
OUIIndex = CSR_OUI_AES_INDEX;
break;
case eCSR_ENCRYPT_TYPE_NONE:
OUIIndex = CSR_OUI_USE_GROUP_CIPHER_INDEX;
break;
#ifdef FEATURE_WLAN_WAPI
case eCSR_ENCRYPT_TYPE_WPI:
OUIIndex = CSR_OUI_WAPI_WAI_CERT_OR_SMS4_INDEX;
break;
#endif /* FEATURE_WLAN_WAPI */
default: //HOWTO handle this?
OUIIndex = CSR_OUI_RESERVED_INDEX;
break;
}//switch
return OUIIndex;
}
tANI_BOOLEAN csrGetRSNInformation( tHalHandle hHal, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pMCEncryption,
tDot11fIERSN *pRSNIe,
tANI_U8 *UnicastCypher,
tANI_U8 *MulticastCypher,
tANI_U8 *AuthSuite,
tCsrRSNCapabilities *Capabilities,
eCsrAuthType *pNegotiatedAuthtype,
eCsrEncryptionType *pNegotiatedMCCipher )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fAcceptableCyphers = FALSE;
tANI_U8 cUnicastCyphers = 0;
tANI_U8 cMulticastCyphers = 0;
tANI_U8 cAuthSuites = 0, i;
tANI_U8 Unicast[ CSR_RSN_OUI_SIZE ];
tANI_U8 Multicast[ CSR_RSN_OUI_SIZE ];
tANI_U8 AuthSuites[ CSR_RSN_MAX_AUTH_SUITES ][ CSR_RSN_OUI_SIZE ];
tANI_U8 Authentication[ CSR_RSN_OUI_SIZE ];
tANI_U8 MulticastCyphers[ CSR_RSN_MAX_MULTICAST_CYPHERS ][ CSR_RSN_OUI_SIZE ];
eCsrAuthType negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
do{
if ( pRSNIe->present )
{
cMulticastCyphers++;
palCopyMemory(pMac->hHdd, MulticastCyphers, pRSNIe->gp_cipher_suite, CSR_RSN_OUI_SIZE);
cUnicastCyphers = (tANI_U8)(pRSNIe->pwise_cipher_suite_count);
cAuthSuites = (tANI_U8)(pRSNIe->akm_suite_count);
for(i = 0; i < cAuthSuites && i < CSR_RSN_MAX_AUTH_SUITES; i++)
{
palCopyMemory(pMac->hHdd, (void *)&AuthSuites[i],
(void *)&pRSNIe->akm_suites[i], CSR_RSN_OUI_SIZE);
}
//Check - Is requested Unicast Cipher supported by the BSS.
fAcceptableCyphers = csrMatchRSNOUIIndex( pMac, pRSNIe->pwise_cipher_suites, cUnicastCyphers,
csrGetOUIIndexFromCipher( enType ), Unicast );
if( !fAcceptableCyphers ) break;
//Unicast is supported. Pick the first matching Group cipher, if any.
for( i = 0 ; i < pMCEncryption->numEntries ; i++ )
{
fAcceptableCyphers = csrMatchRSNOUIIndex( pMac, MulticastCyphers, cMulticastCyphers,
csrGetOUIIndexFromCipher( pMCEncryption->encryptionType[i] ), Multicast );
if(fAcceptableCyphers)
{
break;
}
}
if( !fAcceptableCyphers ) break;
if( pNegotiatedMCCipher )
*pNegotiatedMCCipher = pMCEncryption->encryptionType[i];
/* Initializing with FALSE as it has TRUE value already */
fAcceptableCyphers = FALSE;
for (i = 0 ; i < pAuthType->numEntries; i++)
{
//Ciphers are supported, Match authentication algorithm and pick first matching authtype.
#ifdef WLAN_FEATURE_VOWIFI_11R
/* Changed the AKM suites according to order of preference */
if ( csrIsFTAuthRSN( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_FT_RSN == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_FT_RSN;
}
if ( (negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsFTAuthRSNPsk( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_FT_RSN_PSK == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_FT_RSN_PSK;
}
#endif
#ifdef FEATURE_WLAN_CCX
/* CCX only supports 802.1X. No PSK. */
if ( (negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsCcxCckmAuthRSN( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_CCKM_RSN == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_CCKM_RSN;
}
#endif
if ( (negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsAuthRSN( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_RSN == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_RSN;
}
if ((negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsAuthRSNPsk( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_RSN_PSK == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_RSN_PSK;
}
// The 1st auth type in the APs RSN IE, to match stations connecting
// profiles auth type will cause us to exit this loop
// This is added as some APs advertise multiple akms in the RSN IE.
if (eCSR_AUTH_TYPE_UNKNOWN != negAuthType)
{
fAcceptableCyphers = TRUE;
break;
}
} // for
}
}while (0);
if ( fAcceptableCyphers )
{
if ( MulticastCypher )
{
palCopyMemory( pMac->hHdd, MulticastCypher, Multicast, CSR_RSN_OUI_SIZE );
}
if ( UnicastCypher )
{
palCopyMemory( pMac->hHdd, UnicastCypher, Unicast, CSR_RSN_OUI_SIZE );
}
if ( AuthSuite )
{
palCopyMemory( pMac->hHdd, AuthSuite, Authentication, CSR_RSN_OUI_SIZE );
}
if ( pNegotiatedAuthtype )
{
*pNegotiatedAuthtype = negAuthType;
}
if ( Capabilities )
{
Capabilities->PreAuthSupported = pRSNIe->preauth;
Capabilities->NoPairwise = pRSNIe->no_pwise;
Capabilities->PTKSAReplayCounter = pRSNIe->PTKSA_replay_counter;
Capabilities->GTKSAReplayCounter = pRSNIe->GTKSA_replay_counter;
Capabilities->Reserved = pRSNIe->reserved;
}
}
return( fAcceptableCyphers );
}
tANI_BOOLEAN csrIsRSNMatch( tHalHandle hHal, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pEnMcType,
tDot11fBeaconIEs *pIes, eCsrAuthType *pNegotiatedAuthType, eCsrEncryptionType *pNegotiatedMCCipher )
{
tANI_BOOLEAN fRSNMatch = FALSE;
// See if the cyphers in the Bss description match with the settings in the profile.
fRSNMatch = csrGetRSNInformation( hHal, pAuthType, enType, pEnMcType, &pIes->RSN, NULL, NULL, NULL, NULL,
pNegotiatedAuthType, pNegotiatedMCCipher );
return( fRSNMatch );
}
tANI_BOOLEAN csrLookupPMKID( tpAniSirGlobal pMac, tANI_U32 sessionId, tANI_U8 *pBSSId, tANI_U8 *pPMKId )
{
tANI_BOOLEAN fRC = FALSE, fMatchFound = FALSE;
tANI_U32 Index;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
return FALSE;
}
do
{
for( Index=0; Index < pSession->NumPmkidCache; Index++ )
{
smsLog(pMac, LOGW, "match PMKID %02X-%02X-%02X-%02X-%02X-%02X to \n",
pBSSId[0], pBSSId[1], pBSSId[2], pBSSId[3], pBSSId[4], pBSSId[5]);
if( palEqualMemory( pMac->hHdd, pBSSId, pSession->PmkidCacheInfo[Index].BSSID, sizeof(tCsrBssid) ) )
{
// match found
fMatchFound = TRUE;
break;
}
}
if( !fMatchFound ) break;
palCopyMemory( pMac->hHdd, pPMKId, pSession->PmkidCacheInfo[Index].PMKID, CSR_RSN_PMKID_SIZE );
fRC = TRUE;
}
while( 0 );
smsLog(pMac, LOGW, "csrLookupPMKID called return match = %d pMac->roam.NumPmkidCache = %d",
fRC, pSession->NumPmkidCache);
return fRC;
}
tANI_U8 csrConstructRSNIe( tHalHandle hHal, tANI_U32 sessionId, tCsrRoamProfile *pProfile,
tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes, tCsrRSNIe *pRSNIe )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fRSNMatch;
tANI_U8 cbRSNIe = 0;
tANI_U8 UnicastCypher[ CSR_RSN_OUI_SIZE ];
tANI_U8 MulticastCypher[ CSR_RSN_OUI_SIZE ];
tANI_U8 AuthSuite[ CSR_RSN_OUI_SIZE ];
tCsrRSNAuthIe *pAuthSuite;
tCsrRSNCapabilities RSNCapabilities;
tCsrRSNPMKIe *pPMK;
tANI_U8 PMKId[CSR_RSN_PMKID_SIZE];
tDot11fBeaconIEs *pIesLocal = pIes;
smsLog(pMac, LOGW, "%s called...", __FUNCTION__);
do
{
if ( !csrIsProfileRSN( pProfile ) ) break;
if( !pIesLocal && (!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc, &pIesLocal))) )
{
break;
}
// See if the cyphers in the Bss description match with the settings in the profile.
fRSNMatch = csrGetRSNInformation( hHal, &pProfile->AuthType, pProfile->negotiatedUCEncryptionType,
&pProfile->mcEncryptionType, &pIesLocal->RSN,
UnicastCypher, MulticastCypher, AuthSuite, &RSNCapabilities, NULL, NULL );
if ( !fRSNMatch ) break;
pRSNIe->IeHeader.ElementID = SIR_MAC_RSN_EID;
pRSNIe->Version = CSR_RSN_VERSION_SUPPORTED;
palCopyMemory( pMac->hHdd, pRSNIe->MulticastOui, MulticastCypher, sizeof( MulticastCypher ) );
pRSNIe->cUnicastCyphers = 1;
palCopyMemory( pMac->hHdd, &pRSNIe->UnicastOui[ 0 ], UnicastCypher, sizeof( UnicastCypher ) );
pAuthSuite = (tCsrRSNAuthIe *)( &pRSNIe->UnicastOui[ pRSNIe->cUnicastCyphers ] );
pAuthSuite->cAuthenticationSuites = 1;
palCopyMemory( pMac->hHdd, &pAuthSuite->AuthOui[ 0 ], AuthSuite, sizeof( AuthSuite ) );
// RSN capabilities follows the Auth Suite (two octects)
// !!REVIEW - What should STA put in RSN capabilities, currently
// just putting back APs capabilities
// For one, we shouldn't EVER be sending out "pre-auth supported". It is an AP only capability
RSNCapabilities.PreAuthSupported = 0;
*(tANI_U16 *)( &pAuthSuite->AuthOui[ 1 ] ) = *((tANI_U16 *)(&RSNCapabilities));
pPMK = (tCsrRSNPMKIe *)( ((tANI_U8 *)(&pAuthSuite->AuthOui[ 1 ])) + sizeof(tANI_U16) );
if( csrLookupPMKID( pMac, sessionId, pSirBssDesc->bssId, &(PMKId[0]) ) )
{
pPMK->cPMKIDs = 1;
palCopyMemory( pMac->hHdd, pPMK->PMKIDList[0].PMKID, PMKId, CSR_RSN_PMKID_SIZE );
}
else
{
pPMK->cPMKIDs = 0;
}
// Add in the fixed fields plus 1 Unicast cypher, less the IE Header length
// Add in the size of the Auth suite (count plus a single OUI)
// Add in the RSN caps field.
// Add PMKID count and PMKID (if any)
pRSNIe->IeHeader.Length = (tANI_U8) (sizeof( *pRSNIe ) - sizeof ( pRSNIe->IeHeader ) +
sizeof( *pAuthSuite ) +
sizeof( tCsrRSNCapabilities ));
if(pPMK->cPMKIDs)
{
pRSNIe->IeHeader.Length += (tANI_U8)(sizeof( tANI_U16 ) +
(pPMK->cPMKIDs * CSR_RSN_PMKID_SIZE));
}
// return the size of the IE header (total) constructed...
cbRSNIe = pRSNIe->IeHeader.Length + sizeof( pRSNIe->IeHeader );
} while( 0 );
if( !pIes && pIesLocal )
{
//locally allocated
palFreeMemory(pMac->hHdd, pIesLocal);
}
return( cbRSNIe );
}
#ifdef FEATURE_WLAN_WAPI
tANI_BOOLEAN csrGetWapiInformation( tHalHandle hHal, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pMCEncryption,
tDot11fIEWAPI *pWapiIe,
tANI_U8 *UnicastCypher,
tANI_U8 *MulticastCypher,
tANI_U8 *AuthSuite,
eCsrAuthType *pNegotiatedAuthtype,
eCsrEncryptionType *pNegotiatedMCCipher )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fAcceptableCyphers = FALSE;
tANI_U8 cUnicastCyphers = 0;
tANI_U8 cMulticastCyphers = 0;
tANI_U8 cAuthSuites = 0, i;
tANI_U8 Unicast[ CSR_WAPI_OUI_SIZE ];
tANI_U8 Multicast[ CSR_WAPI_OUI_SIZE ];
tANI_U8 AuthSuites[ CSR_WAPI_MAX_AUTH_SUITES ][ CSR_WAPI_OUI_SIZE ];
tANI_U8 Authentication[ CSR_WAPI_OUI_SIZE ];
tANI_U8 MulticastCyphers[ CSR_WAPI_MAX_MULTICAST_CYPHERS ][ CSR_WAPI_OUI_SIZE ];
eCsrAuthType negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
do{
if ( pWapiIe->present )
{
cMulticastCyphers++;
palCopyMemory(pMac->hHdd, MulticastCyphers, pWapiIe->multicast_cipher_suite, CSR_WAPI_OUI_SIZE);
cUnicastCyphers = (tANI_U8)(pWapiIe->unicast_cipher_suite_count);
cAuthSuites = (tANI_U8)(pWapiIe->akm_suite_count);
for(i = 0; i < cAuthSuites && i < CSR_WAPI_MAX_AUTH_SUITES; i++)
{
palCopyMemory(pMac->hHdd, (void *)&AuthSuites[i],
(void *)&pWapiIe->akm_suites[i], CSR_WAPI_OUI_SIZE);
}
//Check - Is requested Unicast Cipher supported by the BSS.
fAcceptableCyphers = csrMatchWapiOUIIndex( pMac, pWapiIe->unicast_cipher_suites, cUnicastCyphers,
csrGetOUIIndexFromCipher( enType ), Unicast );
if( !fAcceptableCyphers ) break;
//Unicast is supported. Pick the first matching Group cipher, if any.
for( i = 0 ; i < pMCEncryption->numEntries ; i++ )
{
fAcceptableCyphers = csrMatchWapiOUIIndex( pMac, MulticastCyphers, cMulticastCyphers,
csrGetOUIIndexFromCipher( pMCEncryption->encryptionType[i] ), Multicast );
if(fAcceptableCyphers)
{
break;
}
}
if( !fAcceptableCyphers ) break;
if( pNegotiatedMCCipher )
*pNegotiatedMCCipher = pMCEncryption->encryptionType[i];
//Ciphers are supported, Match authentication algorithm and pick first matching authtype.
if ( csrIsAuthWapiCert( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
negAuthType = eCSR_AUTH_TYPE_WAPI_WAI_CERTIFICATE;
}
else if ( csrIsAuthWapiPsk( pMac, AuthSuites, cAuthSuites, Authentication ) )
{
negAuthType = eCSR_AUTH_TYPE_WAPI_WAI_PSK;
}
else
{
fAcceptableCyphers = FALSE;
negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
}
if( ( 0 == pAuthType->numEntries ) || ( FALSE == fAcceptableCyphers ) )
{
//Caller doesn't care about auth type, or BSS doesn't match
break;
}
fAcceptableCyphers = FALSE;
for( i = 0 ; i < pAuthType->numEntries; i++ )
{
if( pAuthType->authType[i] == negAuthType )
{
fAcceptableCyphers = TRUE;
break;
}
}
}
}while (0);
if ( fAcceptableCyphers )
{
if ( MulticastCypher )
{
palCopyMemory( pMac->hHdd, MulticastCypher, Multicast, CSR_WAPI_OUI_SIZE );
}
if ( UnicastCypher )
{
palCopyMemory( pMac->hHdd, UnicastCypher, Unicast, CSR_WAPI_OUI_SIZE );
}
if ( AuthSuite )
{
palCopyMemory( pMac->hHdd, AuthSuite, Authentication, CSR_WAPI_OUI_SIZE );
}
if ( pNegotiatedAuthtype )
{
*pNegotiatedAuthtype = negAuthType;
}
}
return( fAcceptableCyphers );
}
tANI_BOOLEAN csrIsWapiMatch( tHalHandle hHal, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pEnMcType,
tDot11fBeaconIEs *pIes, eCsrAuthType *pNegotiatedAuthType, eCsrEncryptionType *pNegotiatedMCCipher )
{
tANI_BOOLEAN fWapiMatch = FALSE;
// See if the cyphers in the Bss description match with the settings in the profile.
fWapiMatch = csrGetWapiInformation( hHal, pAuthType, enType, pEnMcType, &pIes->WAPI, NULL, NULL, NULL,
pNegotiatedAuthType, pNegotiatedMCCipher );
return( fWapiMatch );
}
tANI_BOOLEAN csrLookupBKID( tpAniSirGlobal pMac, tANI_U32 sessionId, tANI_U8 *pBSSId, tANI_U8 *pBKId )
{
tANI_BOOLEAN fRC = FALSE, fMatchFound = FALSE;
tANI_U32 Index;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
return FALSE;
}
do
{
for( Index=0; Index < pSession->NumBkidCache; Index++ )
{
smsLog(pMac, LOGW, "match BKID %02X-%02X-%02X-%02X-%02X-%02X to \n",
pBSSId[0], pBSSId[1], pBSSId[2], pBSSId[3], pBSSId[4], pBSSId[5]);
if( palEqualMemory( pMac->hHdd, pBSSId, pSession->BkidCacheInfo[Index].BSSID, sizeof(tCsrBssid) ) )
{
// match found
fMatchFound = TRUE;
break;
}
}
if( !fMatchFound ) break;
palCopyMemory( pMac->hHdd, pBKId, pSession->BkidCacheInfo[Index].BKID, CSR_WAPI_BKID_SIZE );
fRC = TRUE;
}
while( 0 );
smsLog(pMac, LOGW, "csrLookupBKID called return match = %d pMac->roam.NumBkidCache = %d", fRC, pSession->NumBkidCache);
return fRC;
}
tANI_U8 csrConstructWapiIe( tpAniSirGlobal pMac, tANI_U32 sessionId, tCsrRoamProfile *pProfile,
tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes, tCsrWapiIe *pWapiIe )
{
tANI_BOOLEAN fWapiMatch = FALSE;
tANI_U8 cbWapiIe = 0;
tANI_U8 UnicastCypher[ CSR_WAPI_OUI_SIZE ];
tANI_U8 MulticastCypher[ CSR_WAPI_OUI_SIZE ];
tANI_U8 AuthSuite[ CSR_WAPI_OUI_SIZE ];
tANI_U8 BKId[CSR_WAPI_BKID_SIZE];
tANI_U8 *pWapi = NULL;
tANI_BOOLEAN fBKIDFound = FALSE;
tDot11fBeaconIEs *pIesLocal = pIes;
do
{
if ( !csrIsProfileWapi( pProfile ) ) break;
if( !pIesLocal && (!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc, &pIesLocal))) )
{
break;
}
// See if the cyphers in the Bss description match with the settings in the profile.
fWapiMatch = csrGetWapiInformation( pMac, &pProfile->AuthType, pProfile->negotiatedUCEncryptionType,
&pProfile->mcEncryptionType, &pIesLocal->WAPI,
UnicastCypher, MulticastCypher, AuthSuite, NULL, NULL );
if ( !fWapiMatch ) break;
palZeroMemory(pMac->hHdd, pWapiIe, sizeof(tCsrWapiIe));
pWapiIe->IeHeader.ElementID = DOT11F_EID_WAPI;
pWapiIe->Version = CSR_WAPI_VERSION_SUPPORTED;
pWapiIe->cAuthenticationSuites = 1;
palCopyMemory( pMac->hHdd, &pWapiIe->AuthOui[ 0 ], AuthSuite, sizeof( AuthSuite ) );
pWapi = (tANI_U8 *) (&pWapiIe->AuthOui[ 1 ]);
*pWapi = (tANI_U16)1; //cUnicastCyphers
pWapi+=2;
palCopyMemory( pMac->hHdd, pWapi, UnicastCypher, sizeof( UnicastCypher ) );
pWapi += sizeof( UnicastCypher );
palCopyMemory( pMac->hHdd, pWapi, MulticastCypher, sizeof( MulticastCypher ) );
pWapi += sizeof( MulticastCypher );
// WAPI capabilities follows the Auth Suite (two octects)
// we shouldn't EVER be sending out "pre-auth supported". It is an AP only capability
// & since we already did a memset pWapiIe to 0, skip these fields
pWapi +=2;
fBKIDFound = csrLookupBKID( pMac, sessionId, pSirBssDesc->bssId, &(BKId[0]) );
if( fBKIDFound )
{
/* Do we need to change the endianness here */
*pWapi = (tANI_U16)1; //cBKIDs
pWapi+=2;
palCopyMemory( pMac->hHdd, pWapi, BKId, CSR_WAPI_BKID_SIZE );
}
else
{
*pWapi = 0;
pWapi+=1;
*pWapi = 0;
pWapi+=1;
}
// Add in the IE fields except the IE header
// Add BKID count and BKID (if any)
pWapiIe->IeHeader.Length = (tANI_U8) (sizeof( *pWapiIe ) - sizeof ( pWapiIe->IeHeader ));
/*2 bytes for BKID Count field*/
pWapiIe->IeHeader.Length += sizeof( tANI_U16 );
if(fBKIDFound)
{
pWapiIe->IeHeader.Length += CSR_WAPI_BKID_SIZE;
}
// return the size of the IE header (total) constructed...
cbWapiIe = pWapiIe->IeHeader.Length + sizeof( pWapiIe->IeHeader );
} while( 0 );
if( !pIes && pIesLocal )
{
//locally allocated
palFreeMemory(pMac->hHdd, pIesLocal);
}
return( cbWapiIe );
}
#endif /* FEATURE_WLAN_WAPI */
tANI_BOOLEAN csrGetWpaCyphers( tpAniSirGlobal pMac, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pMCEncryption,
tDot11fIEWPA *pWpaIe,
tANI_U8 *UnicastCypher,
tANI_U8 *MulticastCypher,
tANI_U8 *AuthSuite,
eCsrAuthType *pNegotiatedAuthtype,
eCsrEncryptionType *pNegotiatedMCCipher )
{
tANI_BOOLEAN fAcceptableCyphers = FALSE;
tANI_U8 cUnicastCyphers = 0;
tANI_U8 cMulticastCyphers = 0;
tANI_U8 cAuthSuites = 0;
tANI_U8 Unicast[ CSR_WPA_OUI_SIZE ];
tANI_U8 Multicast[ CSR_WPA_OUI_SIZE ];
tANI_U8 Authentication[ CSR_WPA_OUI_SIZE ];
tANI_U8 MulticastCyphers[ 1 ][ CSR_WPA_OUI_SIZE ];
tANI_U8 i;
eCsrAuthType negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
do
{
if ( pWpaIe->present )
{
cMulticastCyphers = 1;
palCopyMemory(pMac->hHdd, MulticastCyphers, pWpaIe->multicast_cipher, CSR_WPA_OUI_SIZE);
cUnicastCyphers = (tANI_U8)(pWpaIe->unicast_cipher_count);
cAuthSuites = (tANI_U8)(pWpaIe->auth_suite_count);
//Check - Is requested Unicast Cipher supported by the BSS.
fAcceptableCyphers = csrMatchWPAOUIIndex( pMac, pWpaIe->unicast_ciphers, cUnicastCyphers,
csrGetOUIIndexFromCipher( enType ), Unicast );
if( !fAcceptableCyphers ) break;
//Unicast is supported. Pick the first matching Group cipher, if any.
for( i = 0 ; i < pMCEncryption->numEntries ; i++ )
{
fAcceptableCyphers = csrMatchWPAOUIIndex( pMac, MulticastCyphers, cMulticastCyphers,
csrGetOUIIndexFromCipher( pMCEncryption->encryptionType[i]), Multicast );
if(fAcceptableCyphers)
{
break;
}
}
if( !fAcceptableCyphers ) break;
if( pNegotiatedMCCipher )
*pNegotiatedMCCipher = pMCEncryption->encryptionType[i];
/* Initializing with FALSE as it has TRUE value already */
fAcceptableCyphers = FALSE;
for (i = 0 ; i < pAuthType->numEntries; i++)
{
//Ciphers are supported, Match authentication algorithm and pick first matching authtype.
if ( csrIsAuthWpa( pMac, pWpaIe->auth_suites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_WPA == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_WPA;
}
if ( (negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsAuthWpaPsk( pMac, pWpaIe->auth_suites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_WPA_PSK == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_WPA_PSK;
}
#ifdef FEATURE_WLAN_CCX
if ( (negAuthType == eCSR_AUTH_TYPE_UNKNOWN) && csrIsCcxCckmAuthWpa( pMac, pWpaIe->auth_suites, cAuthSuites, Authentication ) )
{
if (eCSR_AUTH_TYPE_CCKM_WPA == pAuthType->authType[i])
negAuthType = eCSR_AUTH_TYPE_CCKM_WPA;
}
#endif /* FEATURE_WLAN_CCX */
// The 1st auth type in the APs WPA IE, to match stations connecting
// profiles auth type will cause us to exit this loop
// This is added as some APs advertise multiple akms in the WPA IE.
if (eCSR_AUTH_TYPE_UNKNOWN != negAuthType)
{
fAcceptableCyphers = TRUE;
break;
}
} // for
}
}while(0);
if ( fAcceptableCyphers )
{
if ( MulticastCypher )
{
palCopyMemory( pMac->hHdd, (tANI_U8 **)MulticastCypher, Multicast, CSR_WPA_OUI_SIZE );
}
if ( UnicastCypher )
{
palCopyMemory( pMac->hHdd, (tANI_U8 **)UnicastCypher, Unicast, CSR_WPA_OUI_SIZE );
}
if ( AuthSuite )
{
palCopyMemory( pMac->hHdd, (tANI_U8 **)AuthSuite, Authentication, CSR_WPA_OUI_SIZE );
}
if( pNegotiatedAuthtype )
{
*pNegotiatedAuthtype = negAuthType;
}
}
return( fAcceptableCyphers );
}
tANI_BOOLEAN csrIsWpaEncryptionMatch( tpAniSirGlobal pMac, tCsrAuthList *pAuthType, eCsrEncryptionType enType, tCsrEncryptionList *pEnMcType,
tDot11fBeaconIEs *pIes, eCsrAuthType *pNegotiatedAuthtype, eCsrEncryptionType *pNegotiatedMCCipher )
{
tANI_BOOLEAN fWpaMatch = eANI_BOOLEAN_FALSE;
// See if the cyphers in the Bss description match with the settings in the profile.
fWpaMatch = csrGetWpaCyphers( pMac, pAuthType, enType, pEnMcType, &pIes->WPA, NULL, NULL, NULL, pNegotiatedAuthtype, pNegotiatedMCCipher );
return( fWpaMatch );
}
tANI_U8 csrConstructWpaIe( tHalHandle hHal, tCsrRoamProfile *pProfile, tSirBssDescription *pSirBssDesc,
tDot11fBeaconIEs *pIes, tCsrWpaIe *pWpaIe )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fWpaMatch;
tANI_U8 cbWpaIe = 0;
tANI_U8 UnicastCypher[ CSR_WPA_OUI_SIZE ];
tANI_U8 MulticastCypher[ CSR_WPA_OUI_SIZE ];
tANI_U8 AuthSuite[ CSR_WPA_OUI_SIZE ];
tCsrWpaAuthIe *pAuthSuite;
tDot11fBeaconIEs *pIesLocal = pIes;
do
{
if ( !csrIsProfileWpa( pProfile ) ) break;
if( !pIesLocal && (!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pSirBssDesc, &pIesLocal))) )
{
break;
}
// See if the cyphers in the Bss description match with the settings in the profile.
fWpaMatch = csrGetWpaCyphers( hHal, &pProfile->AuthType, pProfile->negotiatedUCEncryptionType, &pProfile->mcEncryptionType,
&pIesLocal->WPA, UnicastCypher, MulticastCypher, AuthSuite, NULL, NULL );
if ( !fWpaMatch ) break;
pWpaIe->IeHeader.ElementID = SIR_MAC_WPA_EID;
palCopyMemory( pMac->hHdd, pWpaIe->Oui, csrWpaOui[01], sizeof( pWpaIe->Oui ) );
pWpaIe->Version = CSR_WPA_VERSION_SUPPORTED;
palCopyMemory( pMac->hHdd, pWpaIe->MulticastOui, MulticastCypher, sizeof( MulticastCypher ) );
pWpaIe->cUnicastCyphers = 1;
palCopyMemory( pMac->hHdd, &pWpaIe->UnicastOui[ 0 ], UnicastCypher, sizeof( UnicastCypher ) );
pAuthSuite = (tCsrWpaAuthIe *)( &pWpaIe->UnicastOui[ pWpaIe->cUnicastCyphers ] );
pAuthSuite->cAuthenticationSuites = 1;
palCopyMemory( pMac->hHdd, &pAuthSuite->AuthOui[ 0 ], AuthSuite, sizeof( AuthSuite ) );
// The WPA capabilities follows the Auth Suite (two octects)--
// this field is optional, and we always "send" zero, so just
// remove it. This is consistent with our assumptions in the
// frames compiler; c.f. bug 15234:
// http://gold.woodsidenet.com/bugzilla/show_bug.cgi?id=15234
// Add in the fixed fields plus 1 Unicast cypher, less the IE Header length
// Add in the size of the Auth suite (count plus a single OUI)
pWpaIe->IeHeader.Length = sizeof( *pWpaIe ) - sizeof ( pWpaIe->IeHeader ) +
sizeof( *pAuthSuite );
// return the size of the IE header (total) constructed...
cbWpaIe = pWpaIe->IeHeader.Length + sizeof( pWpaIe->IeHeader );
} while( 0 );
if( !pIes && pIesLocal )
{
//locally allocated
palFreeMemory(pMac->hHdd, pIesLocal);
}
return( cbWpaIe );
}
tANI_BOOLEAN csrGetWpaRsnIe( tHalHandle hHal, tANI_U8 *pIes, tANI_U32 len,
tANI_U8 *pWpaIe, tANI_U8 *pcbWpaIe, tANI_U8 *pRSNIe, tANI_U8 *pcbRSNIe)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tDot11IEHeader *pIEHeader;
tSirMacPropIE *pSirMacPropIE;
tANI_U32 cbParsed;
tANI_U32 cbIE;
int cExpectedIEs = 0;
int cFoundIEs = 0;
int cbPropIETotal;
pIEHeader = (tDot11IEHeader *)pIes;
if(pWpaIe) cExpectedIEs++;
if(pRSNIe) cExpectedIEs++;
// bss description length includes all fields other than the length itself
cbParsed = 0;
// Loop as long as there is data left in the IE of the Bss Description
// and the number of Expected IEs is NOT found yet.
while( ( (cbParsed + sizeof( *pIEHeader )) <= len ) && ( cFoundIEs < cExpectedIEs ) )
{
cbIE = sizeof( *pIEHeader ) + pIEHeader->Length;
if ( ( cbIE + cbParsed ) > len ) break;
if ( ( pIEHeader->Length >= gCsrIELengthTable[ pIEHeader->ElementID ].min ) &&
( pIEHeader->Length <= gCsrIELengthTable[ pIEHeader->ElementID ].max ) )
{
switch( pIEHeader->ElementID )
{
// Parse the 221 (0xdd) Proprietary IEs here...
// Note that the 221 IE is overloaded, containing the WPA IE, WMM/WME IE, and the
// Airgo proprietary IE information.
case SIR_MAC_WPA_EID:
{
tANI_U32 aniOUI;
tANI_U8 *pOui = (tANI_U8 *)&aniOUI;
pOui++;
aniOUI = ANI_OUI;
aniOUI = i_ntohl( aniOUI );
pSirMacPropIE = ( tSirMacPropIE *)pIEHeader;
cbPropIETotal = pSirMacPropIE->length;
// Validate the ANI OUI is in the OUI field in the proprietary IE...
if ( ( pSirMacPropIE->length >= WNI_CFG_MANUFACTURER_OUI_LEN ) &&
pOui[ 0 ] == pSirMacPropIE->oui[ 0 ] &&
pOui[ 1 ] == pSirMacPropIE->oui[ 1 ] &&
pOui[ 2 ] == pSirMacPropIE->oui[ 2 ] )
{
}
else
{
tCsrWpaIe *pIe = ( tCsrWpaIe * )pIEHeader;
if(!pWpaIe || !pcbWpaIe) break;
// Check if this is a valid WPA IE. Then check that the
// WPA OUI is in place and the version is one that we support.
if ( ( pIe->IeHeader.Length >= SIR_MAC_WPA_IE_MIN_LENGTH ) &&
( palEqualMemory(pMac->hHdd, pIe->Oui, (void *)csrWpaOui[1], sizeof( pIe->Oui ) ) ) &&
( pIe->Version <= CSR_WPA_VERSION_SUPPORTED ) )
{
palCopyMemory(pMac->hHdd, pWpaIe, pIe, pIe->IeHeader.Length + sizeof( pIe->IeHeader ) );
*pcbWpaIe = pIe->IeHeader.Length + sizeof( pIe->IeHeader );
cFoundIEs++;
break;
}
}
break;
}
case SIR_MAC_RSN_EID:
{
tCsrRSNIe *pIe;
if(!pcbRSNIe || !pRSNIe) break;
pIe = (tCsrRSNIe *)pIEHeader;
// Check the length of the RSN Ie to assure it is valid. Then check that the
// version is one that we support.
if ( pIe->IeHeader.Length < SIR_MAC_RSN_IE_MIN_LENGTH ) break;
if ( pIe->Version > CSR_RSN_VERSION_SUPPORTED ) break;
cFoundIEs++;
// if there is enough room in the WpaIE passed in, then copy the Wpa IE into
// the buffer passed in.
if ( *pcbRSNIe < pIe->IeHeader.Length + sizeof( pIe->IeHeader ) ) break;
palCopyMemory(pMac->hHdd, pRSNIe, pIe, pIe->IeHeader.Length + sizeof( pIe->IeHeader ) );
*pcbRSNIe = pIe->IeHeader.Length + sizeof( pIe->IeHeader );
break;
}
// Add support for other IE here...
default:
break;
}
}
cbParsed += cbIE;
pIEHeader = (tDot11IEHeader *)( ((tANI_U8 *)pIEHeader) + cbIE );
}
// return a BOOL that tells if all of the IEs asked for were found...
return( cFoundIEs == cExpectedIEs );
}
//If a WPAIE exists in the profile, just use it. Or else construct one from the BSS
//Caller allocated memory for pWpaIe and guarrantee it can contain a max length WPA IE
tANI_U8 csrRetrieveWpaIe( tHalHandle hHal, tCsrRoamProfile *pProfile, tSirBssDescription *pSirBssDesc,
tDot11fBeaconIEs *pIes, tCsrWpaIe *pWpaIe )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U8 cbWpaIe = 0;
do
{
if ( !csrIsProfileWpa( pProfile ) ) break;
if(pProfile->nWPAReqIELength && pProfile->pWPAReqIE)
{
if(SIR_MAC_WPA_IE_MAX_LENGTH >= pProfile->nWPAReqIELength)
{
cbWpaIe = (tANI_U8)pProfile->nWPAReqIELength;
palCopyMemory(pMac->hHdd, pWpaIe, pProfile->pWPAReqIE, cbWpaIe);
}
else
{
smsLog(pMac, LOGW, " csrRetrieveWpaIe detect invalid WPA IE length (%d) \n", pProfile->nWPAReqIELength);
}
}
else
{
cbWpaIe = csrConstructWpaIe(pMac, pProfile, pSirBssDesc, pIes, pWpaIe);
}
}while(0);
return (cbWpaIe);
}
//If a RSNIE exists in the profile, just use it. Or else construct one from the BSS
//Caller allocated memory for pWpaIe and guarrantee it can contain a max length WPA IE
tANI_U8 csrRetrieveRsnIe( tHalHandle hHal, tANI_U32 sessionId, tCsrRoamProfile *pProfile,
tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes, tCsrRSNIe *pRsnIe )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U8 cbRsnIe = 0;
do
{
if ( !csrIsProfileRSN( pProfile ) ) break;
#ifdef FEATURE_WLAN_LFR
if (csrRoamIsFastRoamEnabled(pMac))
{
// If "Legacy Fast Roaming" is enabled ALWAYS rebuild the RSN IE from
// scratch. So it contains the current PMK-IDs
cbRsnIe = csrConstructRSNIe(pMac, sessionId, pProfile, pSirBssDesc, pIes, pRsnIe);
}
else
#endif
if(pProfile->nRSNReqIELength && pProfile->pRSNReqIE)
{
// If you have one started away, re-use it.
if(SIR_MAC_WPA_IE_MAX_LENGTH >= pProfile->nRSNReqIELength)
{
cbRsnIe = (tANI_U8)pProfile->nRSNReqIELength;
palCopyMemory(pMac->hHdd, pRsnIe, pProfile->pRSNReqIE, cbRsnIe);
}
else
{
smsLog(pMac, LOGW, " csrRetrieveRsnIe detect invalid RSN IE length (%d) \n", pProfile->nRSNReqIELength);
}
}
else
{
cbRsnIe = csrConstructRSNIe(pMac, sessionId, pProfile, pSirBssDesc, pIes, pRsnIe);
}
}while(0);
return (cbRsnIe);
}
#ifdef FEATURE_WLAN_WAPI
//If a WAPI IE exists in the profile, just use it. Or else construct one from the BSS
//Caller allocated memory for pWapiIe and guarrantee it can contain a max length WAPI IE
tANI_U8 csrRetrieveWapiIe( tHalHandle hHal, tANI_U32 sessionId,
tCsrRoamProfile *pProfile, tSirBssDescription *pSirBssDesc,
tDot11fBeaconIEs *pIes, tCsrWapiIe *pWapiIe )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U8 cbWapiIe = 0;
do
{
if ( !csrIsProfileWapi( pProfile ) ) break;
if(pProfile->nWAPIReqIELength && pProfile->pWAPIReqIE)
{
if(DOT11F_IE_WAPI_MAX_LEN >= pProfile->nWAPIReqIELength)
{
cbWapiIe = (tANI_U8)pProfile->nWAPIReqIELength;
palCopyMemory(pMac->hHdd, pWapiIe, pProfile->pWAPIReqIE, cbWapiIe);
}
else
{
smsLog(pMac, LOGW, " csrRetrieveWapiIe detect invalid WAPI IE length (%d) \n", pProfile->nWAPIReqIELength);
}
}
else
{
cbWapiIe = csrConstructWapiIe(pMac, sessionId, pProfile, pSirBssDesc, pIes, pWapiIe);
}
}while(0);
return (cbWapiIe);
}
#endif /* FEATURE_WLAN_WAPI */
tANI_BOOLEAN csrSearchChannelListForTxPower(tHalHandle hHal, tSirBssDescription *pBssDescription, tCsrChannelSet *returnChannelGroup)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tListElem *pEntry;
tANI_U16 i;
tANI_U16 startingChannel;
tANI_BOOLEAN found = FALSE;
tCsrChannelSet *pChannelGroup;
pEntry = csrLLPeekHead( &pMac->roam.channelList5G, LL_ACCESS_LOCK );
while ( pEntry )
{
pChannelGroup = GET_BASE_ADDR( pEntry, tCsrChannelSet, channelListLink );
startingChannel = pChannelGroup->firstChannel;
for ( i = 0; i < pChannelGroup->numChannels; i++ )
{
if ( startingChannel + i * pChannelGroup->interChannelOffset == pBssDescription->channelId )
{
found = TRUE;
break;
}
}
if ( found )
{
palCopyMemory(pMac->hHdd, returnChannelGroup, pChannelGroup, sizeof(tCsrChannelSet));
break;
}
else
{
pEntry = csrLLNext( &pMac->roam.channelList5G, pEntry, LL_ACCESS_LOCK );
}
}
return( found );
}
tANI_BOOLEAN csrRatesIsDot11Rate11bSupportedRate( tANI_U8 dot11Rate )
{
tANI_BOOLEAN fSupported = FALSE;
tANI_U16 nonBasicRate = (tANI_U16)( BITS_OFF( dot11Rate, CSR_DOT11_BASIC_RATE_MASK ) );
switch ( nonBasicRate )
{
case eCsrSuppRate_1Mbps:
case eCsrSuppRate_2Mbps:
case eCsrSuppRate_5_5Mbps:
case eCsrSuppRate_11Mbps:
fSupported = TRUE;
break;
default:
break;
}
return( fSupported );
}
tANI_BOOLEAN csrRatesIsDot11Rate11aSupportedRate( tANI_U8 dot11Rate )
{
tANI_BOOLEAN fSupported = FALSE;
tANI_U16 nonBasicRate = (tANI_U16)( BITS_OFF( dot11Rate, CSR_DOT11_BASIC_RATE_MASK ) );
switch ( nonBasicRate )
{
case eCsrSuppRate_6Mbps:
case eCsrSuppRate_9Mbps:
case eCsrSuppRate_12Mbps:
case eCsrSuppRate_18Mbps:
case eCsrSuppRate_24Mbps:
case eCsrSuppRate_36Mbps:
case eCsrSuppRate_48Mbps:
case eCsrSuppRate_54Mbps:
fSupported = TRUE;
break;
default:
break;
}
return( fSupported );
}
tAniEdType csrTranslateEncryptTypeToEdType( eCsrEncryptionType EncryptType )
{
tAniEdType edType;
switch ( EncryptType )
{
default:
case eCSR_ENCRYPT_TYPE_NONE:
edType = eSIR_ED_NONE;
break;
case eCSR_ENCRYPT_TYPE_WEP40_STATICKEY:
case eCSR_ENCRYPT_TYPE_WEP40:
edType = eSIR_ED_WEP40;
break;
case eCSR_ENCRYPT_TYPE_WEP104_STATICKEY:
case eCSR_ENCRYPT_TYPE_WEP104:
edType = eSIR_ED_WEP104;
break;
case eCSR_ENCRYPT_TYPE_TKIP:
edType = eSIR_ED_TKIP;
break;
case eCSR_ENCRYPT_TYPE_AES:
edType = eSIR_ED_CCMP;
break;
#ifdef FEATURE_WLAN_WAPI
case eCSR_ENCRYPT_TYPE_WPI:
edType = eSIR_ED_WPI;
#endif
#ifdef WLAN_FEATURE_11W
//11w BIP
case eCSR_ENCRYPT_TYPE_AES_CMAC:
edType = eSIR_ED_AES_128_CMAC;
break;
#endif
}
return( edType );
}
//pIes can be NULL
tANI_BOOLEAN csrValidateWep( tpAniSirGlobal pMac, eCsrEncryptionType ucEncryptionType,
tCsrAuthList *pAuthList, tCsrEncryptionList *pMCEncryptionList,
eCsrAuthType *pNegotiatedAuthType, eCsrEncryptionType *pNegotiatedMCEncryption,
tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes )
{
tANI_U32 idx;
tANI_BOOLEAN fMatch = FALSE;
eCsrAuthType negotiatedAuth = eCSR_AUTH_TYPE_OPEN_SYSTEM;
eCsrEncryptionType negotiatedMCCipher = eCSR_ENCRYPT_TYPE_UNKNOWN;
//This function just checks whether HDD is giving correct values for Multicast cipher and Auth.
do
{
//If privacy bit is not set, consider no match
if ( !csrIsPrivacy( pSirBssDesc ) ) break;
for( idx = 0; idx < pMCEncryptionList->numEntries; idx++ )
{
switch( pMCEncryptionList->encryptionType[idx] )
{
case eCSR_ENCRYPT_TYPE_WEP40_STATICKEY:
case eCSR_ENCRYPT_TYPE_WEP104_STATICKEY:
case eCSR_ENCRYPT_TYPE_WEP40:
case eCSR_ENCRYPT_TYPE_WEP104:
/* Multicast list may contain WEP40/WEP104. Check whether it matches UC.
*/
if( ucEncryptionType == pMCEncryptionList->encryptionType[idx] )
{
fMatch = TRUE;
negotiatedMCCipher = pMCEncryptionList->encryptionType[idx];
}
break;
default:
fMatch = FALSE;
break;
}
if(fMatch) break;
}
if(!fMatch) break;
for( idx = 0; idx < pAuthList->numEntries; idx++ )
{
switch( pAuthList->authType[idx] )
{
case eCSR_AUTH_TYPE_OPEN_SYSTEM:
case eCSR_AUTH_TYPE_SHARED_KEY:
case eCSR_AUTH_TYPE_AUTOSWITCH:
fMatch = TRUE;
negotiatedAuth = pAuthList->authType[idx];
break;
default:
fMatch = FALSE;
}
if (fMatch) break;
}
if(!fMatch) break;
//In case of WPA / WPA2, check whether it supports WEP as well
if(pIes)
{
//Prepare the encryption type for WPA/WPA2 functions
if( eCSR_ENCRYPT_TYPE_WEP40_STATICKEY == ucEncryptionType )
{
ucEncryptionType = eCSR_ENCRYPT_TYPE_WEP40;
}
else if( eCSR_ENCRYPT_TYPE_WEP104 == ucEncryptionType )
{
ucEncryptionType = eCSR_ENCRYPT_TYPE_WEP104;
}
//else we can use the encryption type directly
if( pIes->WPA.present )
{
fMatch = palEqualMemory(pMac->hHdd, pIes->WPA.multicast_cipher,
csrWpaOui[csrGetOUIIndexFromCipher( ucEncryptionType )], CSR_WPA_OUI_SIZE );
if( fMatch ) break;
}
if( pIes->RSN.present )
{
fMatch = palEqualMemory(pMac->hHdd, pIes->RSN.gp_cipher_suite,
csrRSNOui[csrGetOUIIndexFromCipher( ucEncryptionType )], CSR_RSN_OUI_SIZE );
}
}
}while(0);
if( fMatch )
{
if( pNegotiatedAuthType )
*pNegotiatedAuthType = negotiatedAuth;
if( pNegotiatedMCEncryption )
*pNegotiatedMCEncryption = negotiatedMCCipher;
}
return fMatch;
}
//pIes shall contain IEs from pSirBssDesc. It shall be returned from function csrGetParsedBssDescriptionIEs
tANI_BOOLEAN csrIsSecurityMatch( tHalHandle hHal, tCsrAuthList *authType, tCsrEncryptionList *pUCEncryptionType, tCsrEncryptionList *pMCEncryptionType,
tSirBssDescription *pSirBssDesc, tDot11fBeaconIEs *pIes,
eCsrAuthType *negotiatedAuthtype, eCsrEncryptionType *negotiatedUCCipher, eCsrEncryptionType *negotiatedMCCipher )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fMatch = FALSE;
tANI_U8 i,idx;
eCsrEncryptionType mcCipher = eCSR_ENCRYPT_TYPE_UNKNOWN, ucCipher = eCSR_ENCRYPT_TYPE_UNKNOWN;
eCsrAuthType negAuthType = eCSR_AUTH_TYPE_UNKNOWN;
for( i = 0 ; ((i < pUCEncryptionType->numEntries) && (!fMatch)) ; i++ )
{
ucCipher = pUCEncryptionType->encryptionType[i];
// If the Bss description shows the Privacy bit is on, then we must have some sort of encryption configured
// for the profile to work. Don't attempt to join networks with Privacy bit set when profiles say NONE for
// encryption type.
switch ( ucCipher )
{
case eCSR_ENCRYPT_TYPE_NONE:
{
// for NO encryption, if the Bss description has the Privacy bit turned on, then encryption is
// required so we have to reject this Bss.
if ( csrIsPrivacy( pSirBssDesc ) )
{
fMatch = FALSE;
}
else
{
fMatch = TRUE;
}
if ( fMatch )
{
fMatch = FALSE;
//Check Multicast cipher requested and Auth type requested.
for( idx = 0 ; idx < pMCEncryptionType->numEntries ; idx++ )
{
if( eCSR_ENCRYPT_TYPE_NONE == pMCEncryptionType->encryptionType[idx] )
{
fMatch = TRUE; //Multicast can only be none.
mcCipher = pMCEncryptionType->encryptionType[idx];
break;
}
}
if (!fMatch) break;
fMatch = FALSE;
//Check Auth list. It should contain AuthOpen.
for( idx = 0 ; idx < authType->numEntries ; idx++ )
{
if( eCSR_AUTH_TYPE_OPEN_SYSTEM == authType->authType[idx] )
{
fMatch = TRUE;
negAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM;
break;
}
}
if (!fMatch) break;
}
break;
}
case eCSR_ENCRYPT_TYPE_WEP40_STATICKEY:
case eCSR_ENCRYPT_TYPE_WEP104_STATICKEY:
// !! might want to check for WEP keys set in the Profile.... ?
// !! don't need to have the privacy bit in the Bss description. Many AP policies make legacy
// encryption 'optional' so we don't know if we can associate or not. The AP will reject if
// encryption is not allowed without the Privacy bit turned on.
fMatch = csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes);
break;
// these are all of the WPA encryption types...
case eCSR_ENCRYPT_TYPE_WEP40:
case eCSR_ENCRYPT_TYPE_WEP104:
fMatch = csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes);
break;
case eCSR_ENCRYPT_TYPE_TKIP:
case eCSR_ENCRYPT_TYPE_AES:
{
if(pIes)
{
// First check if there is a RSN match
fMatch = csrIsRSNMatch( pMac, authType, ucCipher, pMCEncryptionType, pIes, &negAuthType, &mcCipher );
if( !fMatch )
{
// If not RSN, then check if there is a WPA match
fMatch = csrIsWpaEncryptionMatch( pMac, authType, ucCipher, pMCEncryptionType, pIes,
&negAuthType, &mcCipher );
}
}
else
{
fMatch = FALSE;
}
break;
}
#ifdef FEATURE_WLAN_WAPI
case eCSR_ENCRYPT_TYPE_WPI://WAPI
{
if(pIes)
{
fMatch = csrIsWapiMatch( hHal, authType, ucCipher, pMCEncryptionType, pIes, &negAuthType, &mcCipher );
}
else
{
fMatch = FALSE;
}
break;
}
#endif /* FEATURE_WLAN_WAPI */
case eCSR_ENCRYPT_TYPE_ANY:
default:
{
tANI_BOOLEAN fMatchAny = eANI_BOOLEAN_FALSE;
fMatch = eANI_BOOLEAN_TRUE;
//It is allowed to match anything. Try the more secured ones first.
if(pIes)
{
//Check AES first
ucCipher = eCSR_ENCRYPT_TYPE_AES;
fMatchAny = csrIsRSNMatch( hHal, authType, ucCipher, pMCEncryptionType, pIes, &negAuthType, &mcCipher );
if(!fMatchAny)
{
//Check TKIP
ucCipher = eCSR_ENCRYPT_TYPE_TKIP;
fMatchAny = csrIsRSNMatch( hHal, authType, ucCipher, pMCEncryptionType, pIes, &negAuthType, &mcCipher );
}
#ifdef FEATURE_WLAN_WAPI
if(!fMatchAny)
{
//Check WAPI
ucCipher = eCSR_ENCRYPT_TYPE_WPI;
fMatchAny = csrIsWapiMatch( hHal, authType, ucCipher, pMCEncryptionType, pIes, &negAuthType, &mcCipher );
}
#endif /* FEATURE_WLAN_WAPI */
}
if(!fMatchAny)
{
ucCipher = eCSR_ENCRYPT_TYPE_WEP104;
if(!csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes))
{
ucCipher = eCSR_ENCRYPT_TYPE_WEP40;
if(!csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes))
{
ucCipher = eCSR_ENCRYPT_TYPE_WEP104_STATICKEY;
if(!csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes))
{
ucCipher = eCSR_ENCRYPT_TYPE_WEP40_STATICKEY;
if(!csrValidateWep( pMac, ucCipher, authType, pMCEncryptionType, &negAuthType, &mcCipher, pSirBssDesc, pIes))
{
//It must be open and no encryption
if ( csrIsPrivacy( pSirBssDesc ) )
{
//This is not right
fMatch = eANI_BOOLEAN_FALSE;
}
else
{
negAuthType = eCSR_AUTH_TYPE_OPEN_SYSTEM;
mcCipher = eCSR_ENCRYPT_TYPE_NONE;
ucCipher = eCSR_ENCRYPT_TYPE_NONE;
}
}
}
}
}
}
break;
}
}
}
if( fMatch )
{
if( negotiatedUCCipher )
*negotiatedUCCipher = ucCipher;
if( negotiatedMCCipher )
*negotiatedMCCipher = mcCipher;
if( negotiatedAuthtype )
*negotiatedAuthtype = negAuthType;
}
return( fMatch );
}
tANI_BOOLEAN csrIsSsidMatch( tpAniSirGlobal pMac, tANI_U8 *ssid1, tANI_U8 ssid1Len, tANI_U8 *bssSsid,
tANI_U8 bssSsidLen, tANI_BOOLEAN fSsidRequired )
{
tANI_BOOLEAN fMatch = FALSE;
do {
// There are a few special cases. If the Bss description has a Broadcast SSID,
// then our Profile must have a single SSID without Wildcards so we can program
// the SSID.
// SSID could be suppressed in beacons. In that case SSID IE has valid length
// but the SSID value is all NULL characters. That condition is trated same
// as NULL SSID
if ( csrIsNULLSSID( bssSsid, bssSsidLen ) )
{
if ( eANI_BOOLEAN_FALSE == fSsidRequired )
{
fMatch = TRUE;
}
break;
}
// Check for the specification of the Broadcast SSID at the beginning of the list.
// If specified, then all SSIDs are matches (broadcast SSID means accept all SSIDs).
if ( ssid1Len == 0 )
{
fMatch = TRUE;
break;
}
if(ssid1Len != bssSsidLen) break;
if(palEqualMemory(pMac->hHdd, bssSsid, ssid1, bssSsidLen))
{
fMatch = TRUE;
break;
}
} while( 0 );
return( fMatch );
}
//Null ssid means match
tANI_BOOLEAN csrIsSsidInList( tHalHandle hHal, tSirMacSSid *pSsid, tCsrSSIDs *pSsidList )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fMatch = FALSE;
tANI_U32 i;
if ( pSsidList && pSsid )
{
for(i = 0; i < pSsidList->numOfSSIDs; i++)
{
if(csrIsNULLSSID(pSsidList->SSIDList[i].SSID.ssId, pSsidList->SSIDList[i].SSID.length) ||
((pSsidList->SSIDList[i].SSID.length == pSsid->length) &&
palEqualMemory(pMac->hHdd, pSsid->ssId, pSsidList->SSIDList[i].SSID.ssId, pSsid->length)))
{
fMatch = TRUE;
break;
}
}
}
return (fMatch);
}
//like to use sirCompareMacAddr
tANI_BOOLEAN csrIsMacAddressZero( tpAniSirGlobal pMac, tCsrBssid *pMacAddr )
{
tANI_U8 bssid[WNI_CFG_BSSID_LEN] = {0, 0, 0, 0, 0, 0};
return( palEqualMemory(pMac->hHdd, bssid, pMacAddr, WNI_CFG_BSSID_LEN));
}
//like to use sirCompareMacAddr
tANI_BOOLEAN csrIsMacAddressBroadcast( tpAniSirGlobal pMac, tCsrBssid *pMacAddr )
{
tANI_U8 bssid[WNI_CFG_BSSID_LEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
return( palEqualMemory(pMac->hHdd, bssid, pMacAddr, WNI_CFG_BSSID_LEN));
}
//like to use sirCompareMacAddr
tANI_BOOLEAN csrIsMacAddressEqual( tpAniSirGlobal pMac, tCsrBssid *pMacAddr1, tCsrBssid *pMacAddr2 )
{
return( palEqualMemory(pMac->hHdd, pMacAddr1, pMacAddr2, sizeof(tCsrBssid)) );
}
tANI_BOOLEAN csrIsBssidMatch( tHalHandle hHal, tCsrBssid *pProfBssid, tCsrBssid *BssBssid )
{
tANI_BOOLEAN fMatch = FALSE;
tCsrBssid ProfileBssid;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
// for efficiency of the MAC_ADDRESS functions, move the
// Bssid's into MAC_ADDRESS structs.
palCopyMemory( pMac->hHdd, &ProfileBssid, pProfBssid, sizeof(tCsrBssid) );
do {
// Give the profile the benefit of the doubt... accept either all 0 or
// the real broadcast Bssid (all 0xff) as broadcast Bssids (meaning to
// match any Bssids).
if ( csrIsMacAddressZero( pMac, &ProfileBssid ) ||
csrIsMacAddressBroadcast( pMac, &ProfileBssid ) )
{
fMatch = TRUE;
break;
}
if ( csrIsMacAddressEqual( pMac, BssBssid, &ProfileBssid ) )
{
fMatch = TRUE;
break;
}
} while( 0 );
return( fMatch );
}
tANI_BOOLEAN csrIsBSSTypeMatch(eCsrRoamBssType bssType1, eCsrRoamBssType bssType2)
{
if((eCSR_BSS_TYPE_ANY != bssType1 && eCSR_BSS_TYPE_ANY != bssType2) && (bssType1 != bssType2))
return eANI_BOOLEAN_FALSE;
else
return eANI_BOOLEAN_TRUE;
}
tANI_BOOLEAN csrIsBssTypeIBSS(eCsrRoamBssType bssType)
{
return((tANI_BOOLEAN)(eCSR_BSS_TYPE_START_IBSS == bssType || eCSR_BSS_TYPE_IBSS == bssType));
}
tANI_BOOLEAN csrIsBssTypeWDS(eCsrRoamBssType bssType)
{
return((tANI_BOOLEAN)(eCSR_BSS_TYPE_WDS_STA == bssType || eCSR_BSS_TYPE_WDS_AP == bssType));
}
tANI_BOOLEAN csrIsBSSTypeCapsMatch( eCsrRoamBssType bssType, tSirBssDescription *pSirBssDesc )
{
tANI_BOOLEAN fMatch = TRUE;
do
{
switch( bssType )
{
case eCSR_BSS_TYPE_ANY:
break;
case eCSR_BSS_TYPE_INFRASTRUCTURE:
case eCSR_BSS_TYPE_WDS_STA:
if( !csrIsInfraBssDesc( pSirBssDesc ) )
fMatch = FALSE;
break;
case eCSR_BSS_TYPE_IBSS:
case eCSR_BSS_TYPE_START_IBSS:
if( !csrIsIbssBssDesc( pSirBssDesc ) )
fMatch = FALSE;
break;
case eCSR_BSS_TYPE_WDS_AP: //For WDS AP, no need to match anything
default:
fMatch = FALSE;
break;
}
}
while( 0 );
return( fMatch );
}
static tANI_BOOLEAN csrIsCapabilitiesMatch( tpAniSirGlobal pMac, eCsrRoamBssType bssType, tSirBssDescription *pSirBssDesc )
{
return( csrIsBSSTypeCapsMatch( bssType, pSirBssDesc ) );
}
static tANI_BOOLEAN csrIsSpecificChannelMatch( tpAniSirGlobal pMac, tSirBssDescription *pSirBssDesc, tANI_U8 Channel )
{
tANI_BOOLEAN fMatch = TRUE;
do
{
// if the channel is ANY, then always match...
if ( eCSR_OPERATING_CHANNEL_ANY == Channel ) break;
if ( Channel == pSirBssDesc->channelId ) break;
// didn't match anything.. so return NO match
fMatch = FALSE;
} while( 0 );
return( fMatch );
}
tANI_BOOLEAN csrIsChannelBandMatch( tpAniSirGlobal pMac, tANI_U8 channelId, tSirBssDescription *pSirBssDesc )
{
tANI_BOOLEAN fMatch = TRUE;
do
{
// if the profile says Any channel AND the global settings says ANY channel, then we
// always match...
if ( eCSR_OPERATING_CHANNEL_ANY == channelId ) break;
if ( eCSR_OPERATING_CHANNEL_ANY != channelId )
{
fMatch = csrIsSpecificChannelMatch( pMac, pSirBssDesc, channelId );
}
} while( 0 );
return( fMatch );
}
/**
* \brief Enquire as to whether a given rate is supported by the
* adapter as currently configured
*
*
* \param nRate A rate in units of 500kbps
*
* \return TRUE if the adapter is currently capable of supporting this
* rate, FALSE else
*
*
* The rate encoding is just as in 802.11 Information Elements, except
* that the high bit is \em not interpreted as indicating a Basic Rate,
* and proprietary rates are allowed, too.
*
* Note that if the adapter's dot11Mode is g, we don't restrict the
* rates. According to hwReadEepromParameters, this will happen when:
*
* ... the card is configured for ALL bands through the property
* page. If this occurs, and the card is not an ABG card ,then this
* code is setting the dot11Mode to assume the mode that the
* hardware can support. For example, if the card is an 11BG card
* and we are configured to support ALL bands, then we change the
* dot11Mode to 11g because ALL in this case is only what the
* hardware can support.
*
*
*/
static tANI_BOOLEAN csrIsAggregateRateSupported( tpAniSirGlobal pMac, tANI_U16 rate )
{
tANI_BOOLEAN fSupported = eANI_BOOLEAN_FALSE;
tANI_U16 idx, newRate;
//In case basic rate flag is set
newRate = BITS_OFF(rate, CSR_DOT11_BASIC_RATE_MASK);
if ( eCSR_CFG_DOT11_MODE_11A == pMac->roam.configParam.uCfgDot11Mode )
{
switch ( newRate )
{
case eCsrSuppRate_6Mbps:
case eCsrSuppRate_9Mbps:
case eCsrSuppRate_12Mbps:
case eCsrSuppRate_18Mbps:
case eCsrSuppRate_24Mbps:
case eCsrSuppRate_36Mbps:
case eCsrSuppRate_48Mbps:
case eCsrSuppRate_54Mbps:
fSupported = TRUE;
break;
default:
fSupported = FALSE;
break;
}
}
else if( eCSR_CFG_DOT11_MODE_11B == pMac->roam.configParam.uCfgDot11Mode )
{
switch ( newRate )
{
case eCsrSuppRate_1Mbps:
case eCsrSuppRate_2Mbps:
case eCsrSuppRate_5_5Mbps:
case eCsrSuppRate_11Mbps:
fSupported = TRUE;
break;
default:
fSupported = FALSE;
break;
}
}
else if ( !pMac->roam.configParam.ProprietaryRatesEnabled )
{
switch ( newRate )
{
case eCsrSuppRate_1Mbps:
case eCsrSuppRate_2Mbps:
case eCsrSuppRate_5_5Mbps:
case eCsrSuppRate_6Mbps:
case eCsrSuppRate_9Mbps:
case eCsrSuppRate_11Mbps:
case eCsrSuppRate_12Mbps:
case eCsrSuppRate_18Mbps:
case eCsrSuppRate_24Mbps:
case eCsrSuppRate_36Mbps:
case eCsrSuppRate_48Mbps:
case eCsrSuppRate_54Mbps:
fSupported = TRUE;
break;
default:
fSupported = FALSE;
break;
}
}
else {
if ( eCsrSuppRate_1Mbps == newRate ||
eCsrSuppRate_2Mbps == newRate ||
eCsrSuppRate_5_5Mbps == newRate ||
eCsrSuppRate_11Mbps == newRate )
{
fSupported = TRUE;
}
else {
idx = 0x1;
switch ( newRate )
{
case eCsrSuppRate_6Mbps:
fSupported = gPhyRatesSuppt[0][idx];
break;
case eCsrSuppRate_9Mbps:
fSupported = gPhyRatesSuppt[1][idx];
break;
case eCsrSuppRate_12Mbps:
fSupported = gPhyRatesSuppt[2][idx];
break;
case eCsrSuppRate_18Mbps:
fSupported = gPhyRatesSuppt[3][idx];
break;
case eCsrSuppRate_20Mbps:
fSupported = gPhyRatesSuppt[4][idx];
break;
case eCsrSuppRate_24Mbps:
fSupported = gPhyRatesSuppt[5][idx];
break;
case eCsrSuppRate_36Mbps:
fSupported = gPhyRatesSuppt[6][idx];
break;
case eCsrSuppRate_40Mbps:
fSupported = gPhyRatesSuppt[7][idx];
break;
case eCsrSuppRate_42Mbps:
fSupported = gPhyRatesSuppt[8][idx];
break;
case eCsrSuppRate_48Mbps:
fSupported = gPhyRatesSuppt[9][idx];
break;
case eCsrSuppRate_54Mbps:
fSupported = gPhyRatesSuppt[10][idx];
break;
case eCsrSuppRate_72Mbps:
fSupported = gPhyRatesSuppt[11][idx];
break;
case eCsrSuppRate_80Mbps:
fSupported = gPhyRatesSuppt[12][idx];
break;
case eCsrSuppRate_84Mbps:
fSupported = gPhyRatesSuppt[13][idx];
break;
case eCsrSuppRate_96Mbps:
fSupported = gPhyRatesSuppt[14][idx];
break;
case eCsrSuppRate_108Mbps:
fSupported = gPhyRatesSuppt[15][idx];
break;
case eCsrSuppRate_120Mbps:
fSupported = gPhyRatesSuppt[16][idx];
break;
case eCsrSuppRate_126Mbps:
fSupported = gPhyRatesSuppt[17][idx];
break;
case eCsrSuppRate_144Mbps:
fSupported = gPhyRatesSuppt[18][idx];
break;
case eCsrSuppRate_160Mbps:
fSupported = gPhyRatesSuppt[19][idx];
break;
case eCsrSuppRate_168Mbps:
fSupported = gPhyRatesSuppt[20][idx];
break;
case eCsrSuppRate_192Mbps:
fSupported = gPhyRatesSuppt[21][idx];
break;
case eCsrSuppRate_216Mbps:
fSupported = gPhyRatesSuppt[22][idx];
break;
case eCsrSuppRate_240Mbps:
fSupported = gPhyRatesSuppt[23][idx];
break;
default:
fSupported = FALSE;
break;
}
}
}
return fSupported;
}
static tANI_BOOLEAN csrIsRateSetMatch( tpAniSirGlobal pMac,
tDot11fIESuppRates *pBssSuppRates,
tDot11fIEExtSuppRates *pBssExtSuppRates )
{
tANI_BOOLEAN fMatch = TRUE;
tANI_U32 i;
// Validate that all of the Basic rates advertised in the Bss description are supported.
if ( pBssSuppRates )
{
for( i = 0; i < pBssSuppRates->num_rates; i++ )
{
if ( CSR_IS_BASIC_RATE( pBssSuppRates->rates[ i ] ) )
{
if ( !csrIsAggregateRateSupported( pMac, pBssSuppRates->rates[ i ] ) )
{
fMatch = FALSE;
break;
}
}
}
}
if ( fMatch && pBssExtSuppRates )
{
for( i = 0; i < pBssExtSuppRates->num_rates; i++ )
{
if ( CSR_IS_BASIC_RATE( pBssExtSuppRates->rates[ i ] ) )
{
if ( !csrIsAggregateRateSupported( pMac, pBssExtSuppRates->rates[ i ] ) )
{
fMatch = FALSE;
break;
}
}
}
}
return( fMatch );
}
//ppIes can be NULL. If caller want to get the *ppIes allocated by this function, pass in *ppIes = NULL
tANI_BOOLEAN csrMatchBSS( tHalHandle hHal, tSirBssDescription *pBssDesc, tCsrScanResultFilter *pFilter,
eCsrAuthType *pNegAuth, eCsrEncryptionType *pNegUc, eCsrEncryptionType *pNegMc,
tDot11fBeaconIEs **ppIes)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fRC = eANI_BOOLEAN_FALSE, fCheck;
tANI_U32 i;
tDot11fBeaconIEs *pIes = NULL;
tANI_U8 *pb;
do {
if( ( NULL == ppIes ) || ( *ppIes ) == NULL )
{
//If no IEs passed in, get our own.
if(!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pBssDesc, &pIes)))
{
break;
}
}
else
{
//Save the one pass in for local use
pIes = *ppIes;
}
//Check if caller wants P2P
fCheck = (!pFilter->p2pResult || pIes->P2PBeaconProbeRes.present);
if(!fCheck) break;
if(pIes->SSID.present)
{
for(i = 0; i < pFilter->SSIDs.numOfSSIDs; i++)
{
fCheck = csrIsSsidMatch( pMac, pFilter->SSIDs.SSIDList[i].SSID.ssId, pFilter->SSIDs.SSIDList[i].SSID.length,
pIes->SSID.ssid,
pIes->SSID.num_ssid, eANI_BOOLEAN_TRUE );
if ( fCheck ) break;
}
if(!fCheck) break;
}
fCheck = eANI_BOOLEAN_TRUE;
for(i = 0; i < pFilter->BSSIDs.numOfBSSIDs; i++)
{
fCheck = csrIsBssidMatch( pMac, (tCsrBssid *)&pFilter->BSSIDs.bssid[i], (tCsrBssid *)pBssDesc->bssId );
if ( fCheck ) break;
if (pFilter->p2pResult && pIes->P2PBeaconProbeRes.present)
{
fCheck = csrIsBssidMatch( pMac, (tCsrBssid *)&pFilter->BSSIDs.bssid[i],
(tCsrBssid *)pIes->P2PBeaconProbeRes.P2PDeviceInfo.P2PDeviceAddress );
if ( fCheck ) break;
}
}
if(!fCheck) break;
fCheck = eANI_BOOLEAN_TRUE;
for(i = 0; i < pFilter->ChannelInfo.numOfChannels; i++)
{
fCheck = csrIsChannelBandMatch( pMac, pFilter->ChannelInfo.ChannelList[i], pBssDesc );
if ( fCheck ) break;
}
if(!fCheck)
break;
#if defined WLAN_FEATURE_VOWIFI
/* If this is for measurement filtering */
if( pFilter->fMeasurement )
{
fRC = eANI_BOOLEAN_TRUE;
break;
}
#endif
if ( !csrIsPhyModeMatch( pMac, pFilter->phyMode, pBssDesc, NULL, NULL, pIes ) ) break;
if ( (!pFilter->bWPSAssociation) &&
!csrIsSecurityMatch( pMac, &pFilter->authType, &pFilter->EncryptionType, &pFilter->mcEncryptionType,
pBssDesc, pIes, pNegAuth, pNegUc, pNegMc ) ) break;
if ( !csrIsCapabilitiesMatch( pMac, pFilter->BSSType, pBssDesc ) ) break;
if ( !csrIsRateSetMatch( pMac, &pIes->SuppRates, &pIes->ExtSuppRates ) ) break;
//Tush-QoS: validate first if asked for APSD or WMM association
if ( (eCsrRoamWmmQbssOnly == pMac->roam.configParam.WMMSupportMode) &&
!CSR_IS_QOS_BSS(pIes) )
break;
//Check country. check even when pb is NULL because we may want to make sure
//AP has a country code in it if fEnforceCountryCodeMatch is set.
pb = ( pFilter->countryCode[0] ) ? ( pFilter->countryCode) : NULL;
fCheck = csrMatchCountryCode( pMac, pb, pIes );
if(!fCheck)
break;
#ifdef WLAN_FEATURE_VOWIFI_11R
if (pFilter->MDID.mdiePresent)
{
if (pBssDesc->mdiePresent)
{
if (pFilter->MDID.mobilityDomain != (pBssDesc->mdie[1] << 8 | pBssDesc->mdie[0]))
break;
}
else
break;
}
#endif
fRC = eANI_BOOLEAN_TRUE;
} while( 0 );
if( ppIes )
{
*ppIes = pIes;
}
else if( pIes )
{
palFreeMemory(pMac->hHdd, pIes);
}
return( fRC );
}
tANI_BOOLEAN csrMatchConnectedBSSSecurity( tpAniSirGlobal pMac, tCsrRoamConnectedProfile *pProfile,
tSirBssDescription *pBssDesc, tDot11fBeaconIEs *pIes)
{
tCsrEncryptionList ucEncryptionList, mcEncryptionList;
tCsrAuthList authList;
ucEncryptionList.numEntries = 1;
ucEncryptionList.encryptionType[0] = pProfile->EncryptionType;
mcEncryptionList.numEntries = 1;
mcEncryptionList.encryptionType[0] = pProfile->mcEncryptionType;
authList.numEntries = 1;
authList.authType[0] = pProfile->AuthType;
return( csrIsSecurityMatch( pMac, &authList, &ucEncryptionList, &mcEncryptionList, pBssDesc, pIes, NULL, NULL, NULL ));
}
tANI_BOOLEAN csrMatchBSSToConnectProfile( tHalHandle hHal, tCsrRoamConnectedProfile *pProfile,
tSirBssDescription *pBssDesc, tDot11fBeaconIEs *pIes )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN fRC = eANI_BOOLEAN_FALSE, fCheck;
tDot11fBeaconIEs *pIesLocal = pIes;
do {
if( !pIes )
{
if(!HAL_STATUS_SUCCESS(csrGetParsedBssDescriptionIEs(pMac, pBssDesc, &pIesLocal)))
{
break;
}
}
fCheck = eANI_BOOLEAN_TRUE;
if(pIesLocal->SSID.present)
{
tANI_BOOLEAN fCheckSsid = eANI_BOOLEAN_FALSE;
if(pProfile->SSID.length)
{
fCheckSsid = eANI_BOOLEAN_TRUE;
}
fCheck = csrIsSsidMatch( pMac, pProfile->SSID.ssId, pProfile->SSID.length,
pIesLocal->SSID.ssid, pIesLocal->SSID.num_ssid, fCheckSsid );
if(!fCheck) break;
}
if ( !csrMatchConnectedBSSSecurity( pMac, pProfile, pBssDesc, pIesLocal) ) break;
if ( !csrIsCapabilitiesMatch( pMac, pProfile->BSSType, pBssDesc ) ) break;
if ( !csrIsRateSetMatch( pMac, &pIesLocal->SuppRates, &pIesLocal->ExtSuppRates ) ) break;
fCheck = csrIsChannelBandMatch( pMac, pProfile->operationChannel, pBssDesc );
if(!fCheck)
break;
fRC = eANI_BOOLEAN_TRUE;
} while( 0 );
if( !pIes && pIesLocal )
{
//locally allocated
palFreeMemory(pMac->hHdd, pIesLocal);
}
return( fRC );
}
tANI_BOOLEAN csrRatesIsDot11RateSupported( tHalHandle hHal, tANI_U8 rate )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U16 n = BITS_OFF( rate, CSR_DOT11_BASIC_RATE_MASK );
return csrIsAggregateRateSupported( pMac, n );
}
tANI_U16 csrRatesMacPropToDot11( tANI_U16 Rate )
{
tANI_U16 ConvertedRate = Rate;
switch( Rate )
{
case SIR_MAC_RATE_1:
ConvertedRate = 2;
break;
case SIR_MAC_RATE_2:
ConvertedRate = 4;
break;
case SIR_MAC_RATE_5_5:
ConvertedRate = 11;
break;
case SIR_MAC_RATE_11:
ConvertedRate = 22;
break;
case SIR_MAC_RATE_6:
ConvertedRate = 12;
break;
case SIR_MAC_RATE_9:
ConvertedRate = 18;
break;
case SIR_MAC_RATE_12:
ConvertedRate = 24;
break;
case SIR_MAC_RATE_18:
ConvertedRate = 36;
break;
case SIR_MAC_RATE_24:
ConvertedRate = 48;
break;
case SIR_MAC_RATE_36:
ConvertedRate = 72;
break;
case SIR_MAC_RATE_42:
ConvertedRate = 84;
break;
case SIR_MAC_RATE_48:
ConvertedRate = 96;
break;
case SIR_MAC_RATE_54:
ConvertedRate = 108;
break;
case SIR_MAC_RATE_72:
ConvertedRate = 144;
break;
case SIR_MAC_RATE_84:
ConvertedRate = 168;
break;
case SIR_MAC_RATE_96:
ConvertedRate = 192;
break;
case SIR_MAC_RATE_108:
ConvertedRate = 216;
break;
case SIR_MAC_RATE_126:
ConvertedRate = 252;
break;
case SIR_MAC_RATE_144:
ConvertedRate = 288;
break;
case SIR_MAC_RATE_168:
ConvertedRate = 336;
break;
case SIR_MAC_RATE_192:
ConvertedRate = 384;
break;
case SIR_MAC_RATE_216:
ConvertedRate = 432;
break;
case SIR_MAC_RATE_240:
ConvertedRate = 480;
break;
case 0xff:
ConvertedRate = 0;
break;
}
return ConvertedRate;
}
tANI_U16 csrRatesFindBestRate( tSirMacRateSet *pSuppRates, tSirMacRateSet *pExtRates, tSirMacPropRateSet *pPropRates )
{
tANI_U8 i;
tANI_U16 nBest;
nBest = pSuppRates->rate[ 0 ] & ( ~CSR_DOT11_BASIC_RATE_MASK );
if(pSuppRates->numRates > SIR_MAC_RATESET_EID_MAX)
{
pSuppRates->numRates = SIR_MAC_RATESET_EID_MAX;
}
for ( i = 1U; i < pSuppRates->numRates; ++i )
{
nBest = (tANI_U16)CSR_MAX( nBest, pSuppRates->rate[ i ] & ( ~CSR_DOT11_BASIC_RATE_MASK ) );
}
if ( NULL != pExtRates )
{
for ( i = 0U; i < pExtRates->numRates; ++i )
{
nBest = (tANI_U16)CSR_MAX( nBest, pExtRates->rate[ i ] & ( ~CSR_DOT11_BASIC_RATE_MASK ) );
}
}
if ( NULL != pPropRates )
{
for ( i = 0U; i < pPropRates->numPropRates; ++i )
{
nBest = (tANI_U16)CSR_MAX( nBest, csrRatesMacPropToDot11( pPropRates->propRate[ i ] ) );
}
}
return nBest;
}
void csrReleaseProfile(tpAniSirGlobal pMac, tCsrRoamProfile *pProfile)
{
if(pProfile)
{
if(pProfile->BSSIDs.bssid)
{
palFreeMemory(pMac->hHdd, pProfile->BSSIDs.bssid);
pProfile->BSSIDs.bssid = NULL;
}
if(pProfile->SSIDs.SSIDList)
{
palFreeMemory(pMac->hHdd, pProfile->SSIDs.SSIDList);
pProfile->SSIDs.SSIDList = NULL;
}
if(pProfile->pWPAReqIE)
{
palFreeMemory(pMac->hHdd, pProfile->pWPAReqIE);
pProfile->pWPAReqIE = NULL;
}
if(pProfile->pRSNReqIE)
{
palFreeMemory(pMac->hHdd, pProfile->pRSNReqIE);
pProfile->pRSNReqIE = NULL;
}
#ifdef FEATURE_WLAN_WAPI
if(pProfile->pWAPIReqIE)
{
palFreeMemory(pMac->hHdd, pProfile->pWAPIReqIE);
pProfile->pWAPIReqIE = NULL;
}
#endif /* FEATURE_WLAN_WAPI */
if(pProfile->pAddIEScan)
{
palFreeMemory(pMac->hHdd, pProfile->pAddIEScan);
pProfile->pAddIEScan = NULL;
}
if(pProfile->pAddIEAssoc)
{
palFreeMemory(pMac->hHdd, pProfile->pAddIEAssoc);
pProfile->pAddIEAssoc = NULL;
}
{
palFreeMemory(pMac->hHdd, pProfile->pAddIEAssoc);
pProfile->pAddIEAssoc = NULL;
}
if(pProfile->ChannelInfo.ChannelList)
{
palFreeMemory(pMac->hHdd, pProfile->ChannelInfo.ChannelList);
pProfile->ChannelInfo.ChannelList = NULL;
}
palZeroMemory(pMac->hHdd, pProfile, sizeof(tCsrRoamProfile));
}
}
void csrFreeScanFilter(tpAniSirGlobal pMac, tCsrScanResultFilter *pScanFilter)
{
if(pScanFilter->BSSIDs.bssid)
{
palFreeMemory(pMac->hHdd, pScanFilter->BSSIDs.bssid);
pScanFilter->BSSIDs.bssid = NULL;
}
if(pScanFilter->ChannelInfo.ChannelList)
{
palFreeMemory(pMac->hHdd, pScanFilter->ChannelInfo.ChannelList);
pScanFilter->ChannelInfo.ChannelList = NULL;
}
if(pScanFilter->SSIDs.SSIDList)
{
palFreeMemory(pMac->hHdd, pScanFilter->SSIDs.SSIDList);
pScanFilter->SSIDs.SSIDList = NULL;
}
}
void csrFreeRoamProfile(tpAniSirGlobal pMac, tANI_U32 sessionId)
{
tCsrRoamSession *pSession = &pMac->roam.roamSession[sessionId];
if(pSession->pCurRoamProfile)
{
csrReleaseProfile(pMac, pSession->pCurRoamProfile);
palFreeMemory(pMac->hHdd, pSession->pCurRoamProfile);
pSession->pCurRoamProfile = NULL;
}
}
void csrFreeConnectBssDesc(tpAniSirGlobal pMac, tANI_U32 sessionId)
{
tCsrRoamSession *pSession = &pMac->roam.roamSession[sessionId];
if(pSession->pConnectBssDesc)
{
palFreeMemory(pMac->hHdd, pSession->pConnectBssDesc);
pSession->pConnectBssDesc = NULL;
}
}
tSirResultCodes csrGetDisassocRspStatusCode( tSirSmeDisassocRsp *pSmeDisassocRsp )
{
tANI_U8 *pBuffer = (tANI_U8 *)pSmeDisassocRsp;
tANI_U32 ret;
pBuffer += (sizeof(tANI_U16) + sizeof(tANI_U16) + sizeof(tSirMacAddr));
//tSirResultCodes is an enum, assuming is 32bit
//If we cannot make this assumption, use copymemory
pal_get_U32( pBuffer, &ret );
return( ( tSirResultCodes )ret );
}
tSirResultCodes csrGetDeAuthRspStatusCode( tSirSmeDeauthRsp *pSmeRsp )
{
tANI_U8 *pBuffer = (tANI_U8 *)pSmeRsp;
tANI_U32 ret;
pBuffer += (sizeof(tANI_U16) + sizeof(tANI_U16) + sizeof(tSirMacAddr));
//tSirResultCodes is an enum, assuming is 32bit
//If we cannot make this assumption, use copymemory
pal_get_U32( pBuffer, &ret );
return( ( tSirResultCodes )ret );
}
#if 0
tSirScanType csrGetScanType(tANI_U8 chnId, eRegDomainId domainId, tANI_U8 *countryCode)
{
tSirScanType scanType = eSIR_PASSIVE_SCAN;
tANI_U8 cc = 0;
while (cc++ < gCsrDomainChnInfo[domainId].numChannels)
{
if(chnId == gCsrDomainChnInfo[domainId].chnInfo[cc].chnId)
{
scanType = gCsrDomainChnInfo[domainId].chnInfo[cc].scanType;
break;
}
}
return (scanType);
}
#endif
tSirScanType csrGetScanType(tpAniSirGlobal pMac, tANI_U8 chnId)
{
tSirScanType scanType = eSIR_PASSIVE_SCAN;
eNVChannelEnabledType channelEnabledType;
channelEnabledType = vos_nv_getChannelEnabledState(chnId);
if( NV_CHANNEL_ENABLE == channelEnabledType)
{
scanType = eSIR_ACTIVE_SCAN;
}
return (scanType);
}
tANI_U8 csrToUpper( tANI_U8 ch )
{
tANI_U8 chOut;
if ( ch >= 'a' && ch <= 'z' )
{
chOut = ch - 'a' + 'A';
}
else
{
chOut = ch;
}
return( chOut );
}
tSirBssType csrTranslateBsstypeToMacType(eCsrRoamBssType csrtype)
{
tSirBssType ret;
switch(csrtype)
{
case eCSR_BSS_TYPE_INFRASTRUCTURE:
ret = eSIR_INFRASTRUCTURE_MODE;
break;
case eCSR_BSS_TYPE_IBSS:
case eCSR_BSS_TYPE_START_IBSS:
ret = eSIR_IBSS_MODE;
break;
case eCSR_BSS_TYPE_WDS_AP:
ret = eSIR_BTAMP_AP_MODE;
break;
case eCSR_BSS_TYPE_WDS_STA:
ret = eSIR_BTAMP_STA_MODE;
break;
#ifdef WLAN_SOFTAP_FEATURE
case eCSR_BSS_TYPE_INFRA_AP:
ret = eSIR_INFRA_AP_MODE;
break;
#endif
case eCSR_BSS_TYPE_ANY:
default:
ret = eSIR_AUTO_MODE;
break;
}
return (ret);
}
//This function use the parameters to decide the CFG value.
//CSR never sets WNI_CFG_DOT11_MODE_ALL to the CFG
//So PE should not see WNI_CFG_DOT11_MODE_ALL when it gets the CFG value
#ifdef WLAN_SOFTAP_FEATURE
eCsrCfgDot11Mode csrGetCfgDot11ModeFromCsrPhyMode(tCsrRoamProfile *pProfile, eCsrPhyMode phyMode, tANI_BOOLEAN fProprietary)
#else
eCsrCfgDot11Mode csrGetCfgDot11ModeFromCsrPhyMode(eCsrPhyMode phyMode, tANI_BOOLEAN fProprietary)
#endif
{
tANI_U32 cfgDot11Mode = eCSR_CFG_DOT11_MODE_ABG;
switch(phyMode)
{
case eCSR_DOT11_MODE_11a:
case eCSR_DOT11_MODE_11a_ONLY:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11A;
break;
case eCSR_DOT11_MODE_11b:
case eCSR_DOT11_MODE_11b_ONLY:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11B;
break;
case eCSR_DOT11_MODE_11g:
case eCSR_DOT11_MODE_11g_ONLY:
#ifdef WLAN_SOFTAP_FEATURE
if(pProfile && (CSR_IS_INFRA_AP(pProfile)) && (phyMode == eCSR_DOT11_MODE_11g_ONLY))
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G_ONLY;
else
#endif
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11G;
break;
case eCSR_DOT11_MODE_11n:
if(fProprietary)
{
cfgDot11Mode = eCSR_CFG_DOT11_MODE_TAURUS;
}
else
{
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
}
break;
case eCSR_DOT11_MODE_11n_ONLY:
#ifdef WLAN_SOFTAP_FEATURE
if(pProfile && CSR_IS_INFRA_AP(pProfile))
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N_ONLY;
else
#endif
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
break;
case eCSR_DOT11_MODE_TAURUS:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_TAURUS;
break;
case eCSR_DOT11_MODE_abg:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_ABG;
break;
case eCSR_DOT11_MODE_AUTO:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_AUTO;
break;
#ifdef WLAN_FEATURE_11AC
case eCSR_DOT11_MODE_11ac:
if (!WDA_getFwWlanFeatCaps(DOT11AC))
{
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11N;
}
else
{
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC;
}
break;
case eCSR_DOT11_MODE_11ac_ONLY:
cfgDot11Mode = eCSR_CFG_DOT11_MODE_11AC_ONLY;
break;
#endif
default:
//No need to assign anything here
break;
}
return (cfgDot11Mode);
}
eHalStatus csrSetRegulatoryDomain(tpAniSirGlobal pMac, v_REGDOMAIN_t domainId, tANI_BOOLEAN *pfRestartNeeded)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tANI_BOOLEAN fRestart;
if(pMac->scan.domainIdCurrent == domainId)
{
//no change
fRestart = eANI_BOOLEAN_FALSE;
}
else if( !pMac->roam.configParam.fEnforceDefaultDomain )
{
pMac->scan.domainIdCurrent = domainId;
fRestart = eANI_BOOLEAN_TRUE;
}
else
{
//We cannot change the domain
status = eHAL_STATUS_CSR_WRONG_STATE;
fRestart = eANI_BOOLEAN_FALSE;
}
if(pfRestartNeeded)
{
*pfRestartNeeded = fRestart;
}
return (status);
}
v_REGDOMAIN_t csrGetCurrentRegulatoryDomain(tpAniSirGlobal pMac)
{
return (pMac->scan.domainIdCurrent);
}
eHalStatus csrGetRegulatoryDomainForCountry(tpAniSirGlobal pMac, tANI_U8 *pCountry, v_REGDOMAIN_t *pDomainId)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
VOS_STATUS vosStatus;
v_COUNTRYCODE_t countryCode;
v_REGDOMAIN_t domainId;
if(pCountry)
{
countryCode[0] = pCountry[0];
countryCode[1] = pCountry[1];
vosStatus = vos_nv_getRegDomainFromCountryCode( &domainId, countryCode );
if( VOS_IS_STATUS_SUCCESS(vosStatus) )
{
if( pDomainId )
{
*pDomainId = domainId;
}
status = eHAL_STATUS_SUCCESS;
}
else
{
smsLog(pMac, LOGW, FL(" doesn't match country %c%c\n"), pCountry[0], pCountry[1]);
status = eHAL_STATUS_INVALID_PARAMETER;
}
}
return (status);
}
//To check whether a country code matches the one in the IE
//Only check the first two characters, ignoring in/outdoor
//pCountry -- caller allocated buffer contain the country code that is checking against
//the one in pIes. It can be NULL.
//caller must provide pIes, it cannot be NULL
//This function always return TRUE if 11d support is not turned on.
tANI_BOOLEAN csrMatchCountryCode( tpAniSirGlobal pMac, tANI_U8 *pCountry, tDot11fBeaconIEs *pIes )
{
tANI_BOOLEAN fRet = eANI_BOOLEAN_TRUE;
v_REGDOMAIN_t domainId = REGDOMAIN_COUNT; //This is init to invalid value
eHalStatus status;
do
{
if( !csrIs11dSupported( pMac) )
{
break;
}
if( !pIes )
{
smsLog(pMac, LOGE, FL(" No IEs\n"));
break;
}
if( pMac->roam.configParam.fEnforceDefaultDomain ||
pMac->roam.configParam.fEnforceCountryCodeMatch )
{
//Make sure this country is recognizable
if( pIes->Country.present )
{
status = csrGetRegulatoryDomainForCountry( pMac, pIes->Country.country, &domainId );
if( !HAL_STATUS_SUCCESS( status ) )
{
fRet = eANI_BOOLEAN_FALSE;
break;
}
}
//check whether it is needed to enforce to the default regulatory domain first
if( pMac->roam.configParam.fEnforceDefaultDomain )
{
if( domainId != pMac->scan.domainIdCurrent )
{
fRet = eANI_BOOLEAN_FALSE;
break;
}
}
if( pMac->roam.configParam.fEnforceCountryCodeMatch )
{
if( domainId >= REGDOMAIN_COUNT )
{
fRet = eANI_BOOLEAN_FALSE;
break;
}
}
}
if( pCountry )
{
tANI_U32 i;
if( !pIes->Country.present )
{
fRet = eANI_BOOLEAN_FALSE;
break;
}
// Convert the CountryCode characters to upper
for ( i = 0; i < WNI_CFG_COUNTRY_CODE_LEN - 1; i++ )
{
pCountry[i] = csrToUpper( pCountry[i] );
}
if( !palEqualMemory(pMac->hHdd, pIes->Country.country, pCountry, WNI_CFG_COUNTRY_CODE_LEN - 1) )
{
fRet = eANI_BOOLEAN_FALSE;
break;
}
}
} while(0);
return (fRet);
}
#if 0
eHalStatus csrSetCountryDomainMapping(tpAniSirGlobal pMac, tCsrCountryDomainMapping *pCountryDomainMapping)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
tANI_U32 i, j;
tANI_BOOLEAN fDomainChanged = eANI_BOOLEAN_FALSE;
tANI_U8 countryCode[WNI_CFG_COUNTRY_CODE_LEN];
i = WNI_CFG_COUNTRY_CODE_LEN;
//Get the currently used country code
status = ccmCfgGetStr(pMac, WNI_CFG_COUNTRY_CODE, countryCode, &i);
if(HAL_STATUS_SUCCESS(status))
{
if(pCountryDomainMapping && pCountryDomainMapping->numEntry)
{
for(i = 0; i < pCountryDomainMapping->numEntry; i++)
{
for(j = 0; j < eCSR_NUM_COUNTRY_INDEX; j++)
{
if(palEqualMemory(pMac->hHdd, gCsrCountryInfo[j].countryCode,
pCountryDomainMapping->pCountryInfo[i].countryCode, 2))
{
if(gCsrCountryInfo[j].domainId != pCountryDomainMapping->pCountryInfo[i].domainId)
{
gCsrCountryInfo[j].domainId = pCountryDomainMapping->pCountryInfo[i].domainId;
//Check whether it matches the currently used country code
//If matching, need to update base on the new domain setting.
if(palEqualMemory(pMac->hHdd, countryCode,
pCountryDomainMapping->pCountryInfo[i].countryCode, 2))
{
fDomainChanged = eANI_BOOLEAN_TRUE;
}
}
break;
}
}
}
status = eHAL_STATUS_SUCCESS;
if(fDomainChanged)
{
tCsrChannel *pChannelList;
if(pMac->scan.f11dInfoApplied)
{
//11d info already applied. Let's reapply with the new domain setting
if(pMac->scan.channels11d.numChannels)
{
pChannelList = &pMac->scan.channels11d;
}
else
{
pChannelList = &pMac->scan.base20MHzChannels;
}
}
else
{
//no 11d so we use the base channelist from EEPROM
pChannelList = &pMac->scan.base20MHzChannels;
}
//set the new domain's scan requirement to CFG
csrSetCfgScanControlList(pMac, countryCode, pChannelList);
}
}
}
return (status);
}
eHalStatus csrSetDomainScanSetting(tpAniSirGlobal pMac, tCsrDomainFreqInfo *pDomainFreqInfo)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
tANI_U32 i, j;
tANI_U16 freq;
if(pDomainFreqInfo && pDomainFreqInfo->numEntry && (pDomainFreqInfo->domainId < NUM_REG_DOMAINS))
{
tCsrDomainChnInfo *pDomainChnInfo = &gCsrDomainChnInfo[pDomainFreqInfo->domainId];
for(j = 0; j < pDomainChnInfo->numChannels; j++)
{
if(HAL_STATUS_SUCCESS(halPhyChIdToFreqConversion(pDomainChnInfo->chnInfo[j].chnId, &freq)))
{
for(i = 0; i < pDomainFreqInfo->numEntry; i++)
{
if((pDomainFreqInfo->pCsrScanFreqInfo[i].nStartFreq <= freq) &&
(freq <= pDomainFreqInfo->pCsrScanFreqInfo[i].nEndFreq))
{
pDomainChnInfo->chnInfo[j].scanType = pDomainFreqInfo->pCsrScanFreqInfo[i].scanType;
break;
}
}
}
else
{
smsLog(pMac, LOGW, " Failed to get frequency of channel %d", pDomainChnInfo->chnInfo[j].chnId);
}
}
status = eHAL_STATUS_SUCCESS;
}
return (status);
}
#endif
eHalStatus csrGetModifyProfileFields(tpAniSirGlobal pMac, tANI_U32 sessionId,
tCsrRoamModifyProfileFields *pModifyProfileFields)
{
if(!pModifyProfileFields)
{
return eHAL_STATUS_FAILURE;
}
palCopyMemory( pMac->hHdd, pModifyProfileFields,
&pMac->roam.roamSession[sessionId].connectedProfile.modifyProfileFields,
sizeof(tCsrRoamModifyProfileFields) );
return eHAL_STATUS_SUCCESS;
}
eHalStatus csrSetModifyProfileFields(tpAniSirGlobal pMac, tANI_U32 sessionId,
tCsrRoamModifyProfileFields *pModifyProfileFields)
{
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
palCopyMemory( pMac->hHdd, &pSession->connectedProfile.modifyProfileFields,
pModifyProfileFields,
sizeof(tCsrRoamModifyProfileFields) );
return eHAL_STATUS_SUCCESS;
}
#if 0
/* ---------------------------------------------------------------------------
\fn csrGetSupportedCountryCode
\brief this function is to get a list of the country code current being supported
\param pBuf - Caller allocated buffer with at least 3 bytes, upon success return,
this has the country code list. 3 bytes for each country code. This may be NULL if
caller wants to know the needed bytes.
\param pbLen - Caller allocated, as input, it indicates the length of pBuf. Upon success return,
this contains the length of the data in pBuf
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus csrGetSupportedCountryCode(tpAniSirGlobal pMac, tANI_U8 *pBuf, tANI_U32 *pbLen)
{
tANI_U32 numOfCountry = sizeof( gCsrCountryInfo ) / sizeof( gCsrCountryInfo[0] );
tANI_U32 numBytes = 0;
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
if( pbLen )
{
numBytes = *pbLen;
//Consider it ok, at least we can return the number of bytes needed;
*pbLen = numOfCountry * WNI_CFG_COUNTRY_CODE_LEN;
status = eHAL_STATUS_SUCCESS;
if( pBuf && ( numBytes >= *pbLen ) )
{
//The ugly part starts.
//We may need to alter the data structure and find a way to make this faster.
tANI_U32 i;
for( i = 0; i < numOfCountry; i++ )
{
palCopyMemory( pMac->hHdd, pBuf + ( i * WNI_CFG_COUNTRY_CODE_LEN ),
gCsrCountryInfo[i].countryCode, WNI_CFG_COUNTRY_CODE_LEN );
}
}
}
return ( status );
}
#endif
/* ---------------------------------------------------------------------------
\fn csrGetSupportedCountryCode
\brief this function is to get a list of the country code current being supported
\param pBuf - Caller allocated buffer with at least 3 bytes, upon success return,
this has the country code list. 3 bytes for each country code. This may be NULL if
caller wants to know the needed bytes.
\param pbLen - Caller allocated, as input, it indicates the length of pBuf. Upon success return,
this contains the length of the data in pBuf
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus csrGetSupportedCountryCode(tpAniSirGlobal pMac, tANI_U8 *pBuf, tANI_U32 *pbLen)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus;
v_SIZE_t size = (v_SIZE_t)*pbLen;
vosStatus = vos_nv_getSupportedCountryCode( pBuf, &size, 1 );
//eiter way, return the value back
*pbLen = (tANI_U32)size;
//If pBuf is NULL, caller just want to get the size, consider it success
if(pBuf)
{
if( VOS_IS_STATUS_SUCCESS( vosStatus ) )
{
tANI_U32 i, n = *pbLen / 3;
for( i = 0; i < n; i++ )
{
pBuf[i*3 + 2] = ' ';
}
}
else
{
status = eHAL_STATUS_FAILURE;
}
}
return (status);
}
//Upper layer to get the list of the base channels to scan for passively 11d info from csr
eHalStatus csrScanGetBaseChannels( tpAniSirGlobal pMac, tCsrChannelInfo * pChannelInfo )
{
eHalStatus status = eHAL_STATUS_FAILURE;
do
{
if(!pMac->scan.baseChannels.numChannels || !pChannelInfo)
{
break;
}
status = palAllocateMemory( pMac->hHdd, (void **)&pChannelInfo->ChannelList,
pMac->scan.baseChannels.numChannels );
if( !HAL_STATUS_SUCCESS( status ) )
{
smsLog( pMac, LOGE, FL("csrScanGetBaseChannels: fail to allocate memory\n") );
break;
}
status = palCopyMemory( pMac->hHdd, pChannelInfo->ChannelList, pMac->scan.baseChannels.channelList,
pMac->scan.baseChannels.numChannels );
if( !HAL_STATUS_SUCCESS( status ) )
{
break;
}
pChannelInfo->numOfChannels = pMac->scan.baseChannels.numChannels;
}while(0);
return ( status );
}
tANI_BOOLEAN csrIsSetKeyAllowed(tpAniSirGlobal pMac, tANI_U32 sessionId)
{
tANI_BOOLEAN fRet = eANI_BOOLEAN_TRUE;
#ifdef WLAN_SOFTAP_FEATURE
tCsrRoamSession *pSession;
pSession =CSR_GET_SESSION(pMac, sessionId);
/*This condition is not working for infra state. When infra is in not-connected state
* the pSession->pCurRoamProfile is NULL. And this function returns TRUE, that is incorrect.
* Since SAP requires to set key without any BSS started, it needs this condition to be met.
* In other words, this function is useless.
* The current work-around is to process setcontext_rsp and removekey_rsp no matter what the
* state is.
*/
smsLog( pMac, LOG2, FL(" is not what it intends to. Must be revisit or removed\n") );
if( (NULL == pSession) ||
( csrIsConnStateDisconnected( pMac, sessionId ) &&
(pSession->pCurRoamProfile != NULL) &&
(!(CSR_IS_INFRA_AP(pSession->pCurRoamProfile))) )
)
{
fRet = eANI_BOOLEAN_FALSE;
}
#else
fRet = !( csrIsConnStateDisconnected( pMac, sessionId ) );
#endif
return ( fRet );
}
//no need to acquire lock for this basic function
tANI_U16 sme_ChnToFreq(tANI_U8 chanNum)
{
int i;
for (i = 0; i < NUM_RF_CHANNELS; i++)
{
if (rfChannels[i].channelNum == chanNum)
{
return rfChannels[i].targetFreq;
}
}
return (0);
}
/* Disconnect all active sessions by sending disassoc. This is mainly used to disconnect the remaining session when we
* transition from concurrent sessions to a single session. The use case is Infra STA and wifi direct multiple sessions are up and
* P2P session is removed. The Infra STA session remains and should resume BMPS if BMPS is enabled by default. However, there
* are some issues seen with BMPS resume during this transition and this is a workaround which will allow the Infra STA session to
* disconnect and auto connect back and enter BMPS this giving the same effect as resuming BMPS
*/
void csrDisconnectAllActiveSessions(tpAniSirGlobal pMac)
{
tANI_U8 i;
/* Disconnect all the active sessions */
for (i=0; i<CSR_ROAM_SESSION_MAX; i++)
{
if( CSR_IS_SESSION_VALID( pMac, i ) && !csrIsConnStateDisconnected( pMac, i ) )
{
csrRoamDisconnectInternal(pMac, i, eCSR_DISCONNECT_REASON_UNSPECIFIED);
}
}
}
#ifdef FEATURE_WLAN_LFR
tANI_BOOLEAN csrIsChannelPresentInList(
tANI_U8 *pChannelList,
int numChannels,
tANI_U8 channel
)
{
int i = 0;
// Check for NULL pointer
if (!pChannelList) return FALSE;
// Look for the channel in the list
for (i = 0; i < numChannels; i++)
{
if (pChannelList[i] == channel)
return TRUE;
}
return FALSE;
}
VOS_STATUS csrAddToChannelListFront(
tANI_U8 *pChannelList,
int numChannels,
tANI_U8 channel
)
{
int i = 0;
// Check for NULL pointer
if (!pChannelList) return eHAL_STATUS_E_NULL_VALUE;
// Make room for the addition. (Start moving from the back.)
for (i = numChannels; i > 0; i--)
{
pChannelList[i] = pChannelList[i-1];
}
// Now add the NEW channel...at the front
pChannelList[0] = channel;
return eHAL_STATUS_SUCCESS;
}
#endif
| gpl-2.0 |
unforgiven512/android_kernel_asus_tf700t | drivers/staging/gma500/mdfld_tmd_vid.c | 601 | 6884 | /*
* Copyright © 2010 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Jim Liu <jim.liu@intel.com>
* Jackie Li<yaodong.li@intel.com>
* Gideon Eaton <eaton.
* Scott Rowe <scott.m.rowe@intel.com>
*/
#include "mdfld_dsi_dbi.h"
#include "mdfld_dsi_dpi.h"
#include "mdfld_dsi_output.h"
#include "mdfld_output.h"
#include "mdfld_dsi_pkg_sender.h"
#include "displays/tmd_vid.h"
/* FIXME: static ? */
struct drm_display_mode *tmd_vid_get_config_mode(struct drm_device *dev)
{
struct drm_display_mode *mode;
struct drm_psb_private *dev_priv = dev->dev_private;
struct mrst_timing_info *ti = &dev_priv->gct_data.DTD;
bool use_gct = false; /*Disable GCT for now*/
mode = kzalloc(sizeof(*mode), GFP_KERNEL);
if (!mode) {
dev_err(dev->dev, "Out of memory\n");
return NULL;
}
if (use_gct) {
dev_dbg(dev->dev, "gct find MIPI panel.\n");
mode->hdisplay = (ti->hactive_hi << 8) | ti->hactive_lo;
mode->vdisplay = (ti->vactive_hi << 8) | ti->vactive_lo;
mode->hsync_start = mode->hdisplay +
((ti->hsync_offset_hi << 8) |
ti->hsync_offset_lo);
mode->hsync_end = mode->hsync_start +
((ti->hsync_pulse_width_hi << 8) |
ti->hsync_pulse_width_lo);
mode->htotal = mode->hdisplay + ((ti->hblank_hi << 8) |
ti->hblank_lo);
mode->vsync_start = \
mode->vdisplay + ((ti->vsync_offset_hi << 8) |
ti->vsync_offset_lo);
mode->vsync_end = \
mode->vsync_start + ((ti->vsync_pulse_width_hi << 8) | \
ti->vsync_pulse_width_lo);
mode->vtotal = mode->vdisplay +
((ti->vblank_hi << 8) | ti->vblank_lo);
mode->clock = ti->pixel_clock * 10;
dev_dbg(dev->dev, "hdisplay is %d\n", mode->hdisplay);
dev_dbg(dev->dev, "vdisplay is %d\n", mode->vdisplay);
dev_dbg(dev->dev, "HSS is %d\n", mode->hsync_start);
dev_dbg(dev->dev, "HSE is %d\n", mode->hsync_end);
dev_dbg(dev->dev, "htotal is %d\n", mode->htotal);
dev_dbg(dev->dev, "VSS is %d\n", mode->vsync_start);
dev_dbg(dev->dev, "VSE is %d\n", mode->vsync_end);
dev_dbg(dev->dev, "vtotal is %d\n", mode->vtotal);
dev_dbg(dev->dev, "clock is %d\n", mode->clock);
} else {
mode->hdisplay = 480;
mode->vdisplay = 854;
mode->hsync_start = 487;
mode->hsync_end = 490;
mode->htotal = 499;
mode->vsync_start = 861;
mode->vsync_end = 865;
mode->vtotal = 873;
mode->clock = 33264;
}
drm_mode_set_name(mode);
drm_mode_set_crtcinfo(mode, 0);
mode->type |= DRM_MODE_TYPE_PREFERRED;
return mode;
}
static int tmd_vid_get_panel_info(struct drm_device *dev,
int pipe,
struct panel_info *pi)
{
if (!dev || !pi)
return -EINVAL;
pi->width_mm = TMD_PANEL_WIDTH;
pi->height_mm = TMD_PANEL_HEIGHT;
return 0;
}
/*
* mdfld_init_TMD_MIPI - initialise a TMD interface
* @dsi_config: configuration
* @pipe: pipe to configure
*
* This function is called only by mrst_dsi_mode_set and
* restore_display_registers. since this function does not
* acquire the mutex, it is important that the calling function
* does!
*/
static void mdfld_dsi_tmd_drv_ic_init(struct mdfld_dsi_config *dsi_config,
int pipe)
{
static u32 tmd_cmd_mcap_off[] = {0x000000b2};
static u32 tmd_cmd_enable_lane_switch[] = {0x000101ef};
static u32 tmd_cmd_set_lane_num[] = {0x006360ef};
static u32 tmd_cmd_pushing_clock0[] = {0x00cc2fef};
static u32 tmd_cmd_pushing_clock1[] = {0x00dd6eef};
static u32 tmd_cmd_set_mode[] = {0x000000b3};
static u32 tmd_cmd_set_sync_pulse_mode[] = {0x000961ef};
static u32 tmd_cmd_set_column[] = {0x0100002a, 0x000000df};
static u32 tmd_cmd_set_page[] = {0x0300002b, 0x00000055};
static u32 tmd_cmd_set_video_mode[] = {0x00000153};
/*no auto_bl,need add in furture*/
static u32 tmd_cmd_enable_backlight[] = {0x00005ab4};
static u32 tmd_cmd_set_backlight_dimming[] = {0x00000ebd};
struct mdfld_dsi_pkg_sender *sender
= mdfld_dsi_get_pkg_sender(dsi_config);
DRM_INFO("Enter mdfld init TMD MIPI display.\n");
if (!sender) {
DRM_ERROR("Cannot get sender\n");
return;
}
if (dsi_config->dvr_ic_inited)
return;
msleep(3);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_mcap_off, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_lane_switch, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_lane_num, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock0, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_pushing_clock1, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_mode, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_sync_pulse_mode, 1, 0);
mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_column, 2, 0);
mdfld_dsi_send_mcs_long_lp(sender, tmd_cmd_set_page, 2, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_video_mode, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_enable_backlight, 1, 0);
mdfld_dsi_send_gen_long_lp(sender, tmd_cmd_set_backlight_dimming, 1, 0);
dsi_config->dvr_ic_inited = 1;
}
/* TMD DPI encoder helper funcs */
static const struct drm_encoder_helper_funcs
mdfld_tpo_dpi_encoder_helper_funcs = {
.dpms = mdfld_dsi_dpi_dpms,
.mode_fixup = mdfld_dsi_dpi_mode_fixup,
.prepare = mdfld_dsi_dpi_prepare,
.mode_set = mdfld_dsi_dpi_mode_set,
.commit = mdfld_dsi_dpi_commit,
};
/* TMD DPI encoder funcs */
static const struct drm_encoder_funcs mdfld_tpo_dpi_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
void tmd_vid_init(struct drm_device *dev, struct panel_funcs *p_funcs)
{
if (!dev || !p_funcs) {
dev_err(dev->dev, "Invalid parameters\n");
return;
}
p_funcs->encoder_funcs = &mdfld_tpo_dpi_encoder_funcs;
p_funcs->encoder_helper_funcs = &mdfld_tpo_dpi_encoder_helper_funcs;
p_funcs->get_config_mode = &tmd_vid_get_config_mode;
p_funcs->update_fb = NULL;
p_funcs->get_panel_info = tmd_vid_get_panel_info;
p_funcs->reset = mdfld_dsi_panel_reset;
p_funcs->drv_ic_init = mdfld_dsi_tmd_drv_ic_init;
}
| gpl-2.0 |
NStep/nx_bullhead | arch/x86/kernel/setup.c | 601 | 30934 | /*
* Copyright (C) 1995 Linus Torvalds
*
* Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
*
* Memory region support
* David Parsons <orc@pell.chi.il.us>, July-August 1999
*
* Added E820 sanitization routine (removes overlapping memory regions);
* Brian Moyle <bmoyle@mvista.com>, February 2001
*
* Moved CPU detection code to cpu/${cpu}.c
* Patrick Mochel <mochel@osdl.org>, March 2002
*
* Provisions for empty E820 memory regions (reported by certain BIOSes).
* Alex Achenbach <xela@slit.de>, December 2002.
*
*/
/*
* This file handles the architecture-dependent parts of initialization
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/screen_info.h>
#include <linux/ioport.h>
#include <linux/acpi.h>
#include <linux/sfi.h>
#include <linux/apm_bios.h>
#include <linux/initrd.h>
#include <linux/bootmem.h>
#include <linux/memblock.h>
#include <linux/seq_file.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/highmem.h>
#include <linux/module.h>
#include <linux/efi.h>
#include <linux/init.h>
#include <linux/edd.h>
#include <linux/iscsi_ibft.h>
#include <linux/nodemask.h>
#include <linux/kexec.h>
#include <linux/dmi.h>
#include <linux/pfn.h>
#include <linux/pci.h>
#include <asm/pci-direct.h>
#include <linux/init_ohci1394_dma.h>
#include <linux/kvm_para.h>
#include <linux/dma-contiguous.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/delay.h>
#include <linux/kallsyms.h>
#include <linux/cpufreq.h>
#include <linux/dma-mapping.h>
#include <linux/ctype.h>
#include <linux/uaccess.h>
#include <linux/percpu.h>
#include <linux/crash_dump.h>
#include <linux/tboot.h>
#include <linux/jiffies.h>
#include <video/edid.h>
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <asm/realmode.h>
#include <asm/e820.h>
#include <asm/mpspec.h>
#include <asm/setup.h>
#include <asm/efi.h>
#include <asm/timer.h>
#include <asm/i8259.h>
#include <asm/sections.h>
#include <asm/io_apic.h>
#include <asm/ist.h>
#include <asm/setup_arch.h>
#include <asm/bios_ebda.h>
#include <asm/cacheflush.h>
#include <asm/processor.h>
#include <asm/bugs.h>
#include <asm/vsyscall.h>
#include <asm/cpu.h>
#include <asm/desc.h>
#include <asm/dma.h>
#include <asm/iommu.h>
#include <asm/gart.h>
#include <asm/mmu_context.h>
#include <asm/proto.h>
#include <asm/paravirt.h>
#include <asm/hypervisor.h>
#include <asm/olpc_ofw.h>
#include <asm/percpu.h>
#include <asm/topology.h>
#include <asm/apicdef.h>
#include <asm/amd_nb.h>
#include <asm/mce.h>
#include <asm/alternative.h>
#include <asm/prom.h>
/*
* max_low_pfn_mapped: highest direct mapped pfn under 4GB
* max_pfn_mapped: highest direct mapped pfn over 4GB
*
* The direct mapping only covers E820_RAM regions, so the ranges and gaps are
* represented by pfn_mapped
*/
unsigned long max_low_pfn_mapped;
unsigned long max_pfn_mapped;
#ifdef CONFIG_DMI
RESERVE_BRK(dmi_alloc, 65536);
#endif
static __initdata unsigned long _brk_start = (unsigned long)__brk_base;
unsigned long _brk_end = (unsigned long)__brk_base;
#ifdef CONFIG_X86_64
int default_cpu_present_to_apicid(int mps_cpu)
{
return __default_cpu_present_to_apicid(mps_cpu);
}
int default_check_phys_apicid_present(int phys_apicid)
{
return __default_check_phys_apicid_present(phys_apicid);
}
#endif
struct boot_params boot_params;
/*
* Machine setup..
*/
static struct resource data_resource = {
.name = "Kernel data",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
static struct resource code_resource = {
.name = "Kernel code",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
static struct resource bss_resource = {
.name = "Kernel bss",
.start = 0,
.end = 0,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
#ifdef CONFIG_X86_32
/* cpu data as detected by the assembly code in head.S */
struct cpuinfo_x86 new_cpu_data __cpuinitdata = {
.wp_works_ok = -1,
};
/* common cpu data for all cpus */
struct cpuinfo_x86 boot_cpu_data __read_mostly = {
.wp_works_ok = -1,
};
EXPORT_SYMBOL(boot_cpu_data);
unsigned int def_to_bigsmp;
/* for MCA, but anyone else can use it if they want */
unsigned int machine_id;
unsigned int machine_submodel_id;
unsigned int BIOS_revision;
struct apm_info apm_info;
EXPORT_SYMBOL(apm_info);
#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
struct ist_info ist_info;
EXPORT_SYMBOL(ist_info);
#else
struct ist_info ist_info;
#endif
#else
struct cpuinfo_x86 boot_cpu_data __read_mostly = {
.x86_phys_bits = MAX_PHYSMEM_BITS,
};
EXPORT_SYMBOL(boot_cpu_data);
#endif
#if !defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
unsigned long mmu_cr4_features;
#else
unsigned long mmu_cr4_features = X86_CR4_PAE;
#endif
/* Boot loader ID and version as integers, for the benefit of proc_dointvec */
int bootloader_type, bootloader_version;
/*
* Setup options
*/
struct screen_info screen_info;
EXPORT_SYMBOL(screen_info);
struct edid_info edid_info;
EXPORT_SYMBOL_GPL(edid_info);
extern int root_mountflags;
unsigned long saved_video_mode;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
static char __initdata command_line[COMMAND_LINE_SIZE];
#ifdef CONFIG_CMDLINE_BOOL
static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
#endif
#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
struct edd edd;
#ifdef CONFIG_EDD_MODULE
EXPORT_SYMBOL(edd);
#endif
/**
* copy_edd() - Copy the BIOS EDD information
* from boot_params into a safe place.
*
*/
static inline void __init copy_edd(void)
{
memcpy(edd.mbr_signature, boot_params.edd_mbr_sig_buffer,
sizeof(edd.mbr_signature));
memcpy(edd.edd_info, boot_params.eddbuf, sizeof(edd.edd_info));
edd.mbr_signature_nr = boot_params.edd_mbr_sig_buf_entries;
edd.edd_info_nr = boot_params.eddbuf_entries;
}
#else
static inline void __init copy_edd(void)
{
}
#endif
void * __init extend_brk(size_t size, size_t align)
{
size_t mask = align - 1;
void *ret;
BUG_ON(_brk_start == 0);
BUG_ON(align & mask);
_brk_end = (_brk_end + mask) & ~mask;
BUG_ON((char *)(_brk_end + size) > __brk_limit);
ret = (void *)_brk_end;
_brk_end += size;
memset(ret, 0, size);
return ret;
}
#ifdef CONFIG_X86_32
static void __init cleanup_highmap(void)
{
}
#endif
static void __init reserve_brk(void)
{
if (_brk_end > _brk_start)
memblock_reserve(__pa_symbol(_brk_start),
_brk_end - _brk_start);
/* Mark brk area as locked down and no longer taking any
new allocations */
_brk_start = 0;
}
#ifdef CONFIG_BLK_DEV_INITRD
static u64 __init get_ramdisk_image(void)
{
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
ramdisk_image |= (u64)boot_params.ext_ramdisk_image << 32;
return ramdisk_image;
}
static u64 __init get_ramdisk_size(void)
{
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
ramdisk_size |= (u64)boot_params.ext_ramdisk_size << 32;
return ramdisk_size;
}
#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
static void __init relocate_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 area_size = PAGE_ALIGN(ramdisk_size);
u64 ramdisk_here;
unsigned long slop, clen, mapaddr;
char *p, *q;
/* We need to move the initrd down into directly mapped mem */
ramdisk_here = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
area_size, PAGE_SIZE);
if (!ramdisk_here)
panic("Cannot find place for new RAMDISK of size %lld\n",
ramdisk_size);
/* Note: this includes all the mem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
memblock_reserve(ramdisk_here, area_size);
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
ramdisk_here, ramdisk_here + ramdisk_size - 1);
q = (char *)initrd_start;
/* Copy the initrd */
while (ramdisk_size) {
slop = ramdisk_image & ~PAGE_MASK;
clen = ramdisk_size;
if (clen > MAX_MAP_CHUNK-slop)
clen = MAX_MAP_CHUNK-slop;
mapaddr = ramdisk_image & PAGE_MASK;
p = early_memremap(mapaddr, clen+slop);
memcpy(q, p+slop, clen);
early_iounmap(p, clen+slop);
q += clen;
ramdisk_image += clen;
ramdisk_size -= clen;
}
ramdisk_image = get_ramdisk_image();
ramdisk_size = get_ramdisk_size();
printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
" [mem %#010llx-%#010llx]\n",
ramdisk_image, ramdisk_image + ramdisk_size - 1,
ramdisk_here, ramdisk_here + ramdisk_size - 1);
}
static void __init early_reserve_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
if (!boot_params.hdr.type_of_loader ||
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
memblock_reserve(ramdisk_image, ramdisk_end - ramdisk_image);
}
static void __init reserve_initrd(void)
{
/* Assume only end is not page aligned */
u64 ramdisk_image = get_ramdisk_image();
u64 ramdisk_size = get_ramdisk_size();
u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
u64 mapped_size;
if (!boot_params.hdr.type_of_loader ||
!ramdisk_image || !ramdisk_size)
return; /* No initrd provided by bootloader */
initrd_start = 0;
mapped_size = memblock_mem_size(max_pfn_mapped);
if (ramdisk_size >= (mapped_size>>1))
panic("initrd too large to handle, "
"disabling initrd (%lld needed, %lld available)\n",
ramdisk_size, mapped_size>>1);
printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
ramdisk_end - 1);
if (pfn_range_is_mapped(PFN_DOWN(ramdisk_image),
PFN_DOWN(ramdisk_end))) {
/* All are mapped, easy case */
initrd_start = ramdisk_image + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
return;
}
relocate_initrd();
memblock_free(ramdisk_image, ramdisk_end - ramdisk_image);
}
#else
static void __init early_reserve_initrd(void)
{
}
static void __init reserve_initrd(void)
{
}
#endif /* CONFIG_BLK_DEV_INITRD */
static void __init parse_setup_data(void)
{
struct setup_data *data;
u64 pa_data, pa_next;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
u32 data_len, map_len, data_type;
map_len = max(PAGE_SIZE - (pa_data & ~PAGE_MASK),
(u64)sizeof(struct setup_data));
data = early_memremap(pa_data, map_len);
data_len = data->len + sizeof(struct setup_data);
data_type = data->type;
pa_next = data->next;
early_iounmap(data, map_len);
switch (data_type) {
case SETUP_E820_EXT:
parse_e820_ext(pa_data, data_len);
break;
case SETUP_DTB:
add_dtb(pa_data);
break;
default:
break;
}
pa_data = pa_next;
}
}
static void __init e820_reserve_setup_data(void)
{
struct setup_data *data;
u64 pa_data;
int found = 0;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
e820_update_range(pa_data, sizeof(*data)+data->len,
E820_RAM, E820_RESERVED_KERN);
found = 1;
pa_data = data->next;
early_iounmap(data, sizeof(*data));
}
if (!found)
return;
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
memcpy(&e820_saved, &e820, sizeof(struct e820map));
printk(KERN_INFO "extended physical RAM map:\n");
e820_print_map("reserve setup_data");
}
static void __init memblock_x86_reserve_range_setup_data(void)
{
struct setup_data *data;
u64 pa_data;
pa_data = boot_params.hdr.setup_data;
while (pa_data) {
data = early_memremap(pa_data, sizeof(*data));
memblock_reserve(pa_data, sizeof(*data) + data->len);
pa_data = data->next;
early_iounmap(data, sizeof(*data));
}
}
/*
* --------- Crashkernel reservation ------------------------------
*/
#ifdef CONFIG_KEXEC
/*
* Keep the crash kernel below this limit. On 32 bits earlier kernels
* would limit the kernel to the low 512 MiB due to mapping restrictions.
* On 64bit, old kexec-tools need to under 896MiB.
*/
#ifdef CONFIG_X86_32
# define CRASH_KERNEL_ADDR_LOW_MAX (512 << 20)
# define CRASH_KERNEL_ADDR_HIGH_MAX (512 << 20)
#else
# define CRASH_KERNEL_ADDR_LOW_MAX (896UL<<20)
# define CRASH_KERNEL_ADDR_HIGH_MAX MAXMEM
#endif
static void __init reserve_crashkernel_low(void)
{
#ifdef CONFIG_X86_64
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long low_base = 0, low_size = 0;
unsigned long total_low_mem;
unsigned long long base;
bool auto_set = false;
int ret;
total_low_mem = memblock_mem_size(1UL<<(32-PAGE_SHIFT));
/* crashkernel=Y,low */
ret = parse_crashkernel_low(boot_command_line, total_low_mem,
&low_size, &base);
if (ret != 0) {
/*
* two parts from lib/swiotlb.c:
* swiotlb size: user specified with swiotlb= or default.
* swiotlb overflow buffer: now is hardcoded to 32k.
* We round it to 8M for other buffers that
* may need to stay low too.
*/
low_size = swiotlb_size_or_default() + (8UL<<20);
auto_set = true;
} else {
/* passed with crashkernel=0,low ? */
if (!low_size)
return;
}
low_base = memblock_find_in_range(low_size, (1ULL<<32),
low_size, alignment);
if (!low_base) {
if (!auto_set)
pr_info("crashkernel low reservation failed - No suitable area found.\n");
return;
}
memblock_reserve(low_base, low_size);
pr_info("Reserving %ldMB of low memory at %ldMB for crashkernel (System low RAM: %ldMB)\n",
(unsigned long)(low_size >> 20),
(unsigned long)(low_base >> 20),
(unsigned long)(total_low_mem >> 20));
crashk_low_res.start = low_base;
crashk_low_res.end = low_base + low_size - 1;
insert_resource(&iomem_resource, &crashk_low_res);
#endif
}
static void __init reserve_crashkernel(void)
{
const unsigned long long alignment = 16<<20; /* 16M */
unsigned long long total_mem;
unsigned long long crash_size, crash_base;
bool high = false;
int ret;
total_mem = memblock_phys_mem_size();
/* crashkernel=XM */
ret = parse_crashkernel(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret != 0 || crash_size <= 0) {
/* crashkernel=X,high */
ret = parse_crashkernel_high(boot_command_line, total_mem,
&crash_size, &crash_base);
if (ret != 0 || crash_size <= 0)
return;
high = true;
}
/* 0 means: find the address automatically */
if (crash_base <= 0) {
/*
* kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
*/
crash_base = memblock_find_in_range(alignment,
high ? CRASH_KERNEL_ADDR_HIGH_MAX :
CRASH_KERNEL_ADDR_LOW_MAX,
crash_size, alignment);
if (!crash_base) {
pr_info("crashkernel reservation failed - No suitable area found.\n");
return;
}
} else {
unsigned long long start;
start = memblock_find_in_range(crash_base,
crash_base + crash_size, crash_size, 1<<20);
if (start != crash_base) {
pr_info("crashkernel reservation failed - memory is in use.\n");
return;
}
}
memblock_reserve(crash_base, crash_size);
printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
"for crashkernel (System RAM: %ldMB)\n",
(unsigned long)(crash_size >> 20),
(unsigned long)(crash_base >> 20),
(unsigned long)(total_mem >> 20));
crashk_res.start = crash_base;
crashk_res.end = crash_base + crash_size - 1;
insert_resource(&iomem_resource, &crashk_res);
if (crash_base >= (1ULL<<32))
reserve_crashkernel_low();
}
#else
static void __init reserve_crashkernel(void)
{
}
#endif
static struct resource standard_io_resources[] = {
{ .name = "dma1", .start = 0x00, .end = 0x1f,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "pic1", .start = 0x20, .end = 0x21,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "timer0", .start = 0x40, .end = 0x43,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "timer1", .start = 0x50, .end = 0x53,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "keyboard", .start = 0x60, .end = 0x60,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "keyboard", .start = 0x64, .end = 0x64,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "dma page reg", .start = 0x80, .end = 0x8f,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "pic2", .start = 0xa0, .end = 0xa1,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "dma2", .start = 0xc0, .end = 0xdf,
.flags = IORESOURCE_BUSY | IORESOURCE_IO },
{ .name = "fpu", .start = 0xf0, .end = 0xff,
.flags = IORESOURCE_BUSY | IORESOURCE_IO }
};
void __init reserve_standard_io_resources(void)
{
int i;
/* request I/O space for devices used on all i[345]86 PCs */
for (i = 0; i < ARRAY_SIZE(standard_io_resources); i++)
request_resource(&ioport_resource, &standard_io_resources[i]);
}
static __init void reserve_ibft_region(void)
{
unsigned long addr, size = 0;
addr = find_ibft_region(&size);
if (size)
memblock_reserve(addr, size);
}
static bool __init snb_gfx_workaround_needed(void)
{
#ifdef CONFIG_PCI
int i;
u16 vendor, devid;
static const __initconst u16 snb_ids[] = {
0x0102,
0x0112,
0x0122,
0x0106,
0x0116,
0x0126,
0x010a,
};
/* Assume no if something weird is going on with PCI */
if (!early_pci_allowed())
return false;
vendor = read_pci_config_16(0, 2, 0, PCI_VENDOR_ID);
if (vendor != 0x8086)
return false;
devid = read_pci_config_16(0, 2, 0, PCI_DEVICE_ID);
for (i = 0; i < ARRAY_SIZE(snb_ids); i++)
if (devid == snb_ids[i])
return true;
#endif
return false;
}
/*
* Sandy Bridge graphics has trouble with certain ranges, exclude
* them from allocation.
*/
static void __init trim_snb_memory(void)
{
static const __initconst unsigned long bad_pages[] = {
0x20050000,
0x20110000,
0x20130000,
0x20138000,
0x40004000,
};
int i;
if (!snb_gfx_workaround_needed())
return;
printk(KERN_DEBUG "reserving inaccessible SNB gfx pages\n");
/*
* Reserve all memory below the 1 MB mark that has not
* already been reserved.
*/
memblock_reserve(0, 1<<20);
for (i = 0; i < ARRAY_SIZE(bad_pages); i++) {
if (memblock_reserve(bad_pages[i], PAGE_SIZE))
printk(KERN_WARNING "failed to reserve 0x%08lx\n",
bad_pages[i]);
}
}
/*
* Here we put platform-specific memory range workarounds, i.e.
* memory known to be corrupt or otherwise in need to be reserved on
* specific platforms.
*
* If this gets used more widely it could use a real dispatch mechanism.
*/
static void __init trim_platform_memory_ranges(void)
{
trim_snb_memory();
}
static void __init trim_bios_range(void)
{
/*
* A special case is the first 4Kb of memory;
* This is a BIOS owned area, not kernel ram, but generally
* not listed as such in the E820 table.
*
* This typically reserves additional memory (64KiB by default)
* since some BIOSes are known to corrupt low memory. See the
* Kconfig help text for X86_RESERVE_LOW.
*/
e820_update_range(0, PAGE_SIZE, E820_RAM, E820_RESERVED);
/*
* special case: Some BIOSen report the PC BIOS
* area (640->1Mb) as ram even though it is not.
* take them out.
*/
e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
}
/* called before trim_bios_range() to spare extra sanitize */
static void __init e820_add_kernel_range(void)
{
u64 start = __pa_symbol(_text);
u64 size = __pa_symbol(_end) - start;
/*
* Complain if .text .data and .bss are not marked as E820_RAM and
* attempt to fix it by adding the range. We may have a confused BIOS,
* or the user may have used memmap=exactmap or memmap=xxM$yyM to
* exclude kernel range. If we really are running on top non-RAM,
* we will crash later anyways.
*/
if (e820_all_mapped(start, start + size, E820_RAM))
return;
pr_warn(".text .data .bss are not marked as E820_RAM!\n");
e820_remove_range(start, size, E820_RAM, 0);
e820_add_region(start, size, E820_RAM);
}
static unsigned reserve_low = CONFIG_X86_RESERVE_LOW << 10;
static int __init parse_reservelow(char *p)
{
unsigned long long size;
if (!p)
return -EINVAL;
size = memparse(p, &p);
if (size < 4096)
size = 4096;
if (size > 640*1024)
size = 640*1024;
reserve_low = size;
return 0;
}
early_param("reservelow", parse_reservelow);
static void __init trim_low_memory_range(void)
{
memblock_reserve(0, ALIGN(reserve_low, PAGE_SIZE));
}
/*
* Determine if we were loaded by an EFI loader. If so, then we have also been
* passed the efi memmap, systab, etc., so we should use these data structures
* for initialization. Note, the efi init code path is determined by the
* global efi_enabled. This allows the same kernel image to be used on existing
* systems (with a traditional BIOS) as well as on EFI systems.
*/
/*
* setup_arch - architecture-specific boot-time initializations
*
* Note: On x86_64, fixmaps are ready for use even before this is called.
*/
void __init setup_arch(char **cmdline_p)
{
memblock_reserve(__pa_symbol(_text),
(unsigned long)__bss_stop - (unsigned long)_text);
early_reserve_initrd();
/*
* At this point everything still needed from the boot loader
* or BIOS or kernel text should be early reserved or marked not
* RAM in e820. All other memory is free game.
*/
#ifdef CONFIG_X86_32
memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
visws_early_detect();
/*
* copy kernel address range established so far and switch
* to the proper swapper page table
*/
clone_pgd_range(swapper_pg_dir + KERNEL_PGD_BOUNDARY,
initial_page_table + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
load_cr3(swapper_pg_dir);
__flush_tlb_all();
#else
printk(KERN_INFO "Command line: %s\n", boot_command_line);
#endif
/*
* If we have OLPC OFW, we might end up relocating the fixmap due to
* reserve_top(), so do this before touching the ioremap area.
*/
olpc_ofw_detect();
early_trap_init();
early_cpu_init();
early_ioremap_init();
setup_olpc_ofw_pgd();
ROOT_DEV = old_decode_dev(boot_params.hdr.root_dev);
screen_info = boot_params.screen_info;
edid_info = boot_params.edid_info;
#ifdef CONFIG_X86_32
apm_info.bios = boot_params.apm_bios_info;
ist_info = boot_params.ist_info;
if (boot_params.sys_desc_table.length != 0) {
machine_id = boot_params.sys_desc_table.table[0];
machine_submodel_id = boot_params.sys_desc_table.table[1];
BIOS_revision = boot_params.sys_desc_table.table[2];
}
#endif
saved_video_mode = boot_params.hdr.vid_mode;
bootloader_type = boot_params.hdr.type_of_loader;
if ((bootloader_type >> 4) == 0xe) {
bootloader_type &= 0xf;
bootloader_type |= (boot_params.hdr.ext_loader_type+0x10) << 4;
}
bootloader_version = bootloader_type & 0xf;
bootloader_version |= boot_params.hdr.ext_loader_ver << 4;
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = boot_params.hdr.ram_size & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((boot_params.hdr.ram_size & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((boot_params.hdr.ram_size & RAMDISK_LOAD_FLAG) != 0);
#endif
#ifdef CONFIG_EFI
if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL32", 4)) {
set_bit(EFI_BOOT, &efi.flags);
} else if (!strncmp((char *)&boot_params.efi_info.efi_loader_signature,
"EL64", 4)) {
set_bit(EFI_BOOT, &efi.flags);
set_bit(EFI_64BIT, &efi.flags);
}
if (efi_enabled(EFI_BOOT))
efi_memblock_x86_reserve_range();
#endif
x86_init.oem.arch_setup();
iomem_resource.end = (1ULL << boot_cpu_data.x86_phys_bits) - 1;
setup_memory_map();
parse_setup_data();
/* update the e820_saved too */
e820_reserve_setup_data();
copy_edd();
if (!boot_params.hdr.root_flags)
root_mountflags &= ~MS_RDONLY;
init_mm.start_code = (unsigned long) _text;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
init_mm.brk = _brk_end;
code_resource.start = __pa_symbol(_text);
code_resource.end = __pa_symbol(_etext)-1;
data_resource.start = __pa_symbol(_etext);
data_resource.end = __pa_symbol(_edata)-1;
bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
/* append boot loader cmdline to builtin */
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/*
* x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug
* console setup can safely call set_fixmap()). It may then be called
* again from within noexec_setup() during parsing early parameters
* to honor the respective command line option.
*/
x86_configure_nx();
parse_early_param();
x86_report_nx();
/* after early param, so could get panic from serial */
memblock_x86_reserve_range_setup_data();
if (acpi_mps_check()) {
#ifdef CONFIG_X86_LOCAL_APIC
disable_apic = 1;
#endif
setup_clear_cpu_cap(X86_FEATURE_APIC);
}
#ifdef CONFIG_PCI
if (pci_early_dump_regs)
early_dump_pci_devices();
#endif
finish_e820_parsing();
if (efi_enabled(EFI_BOOT))
efi_init();
dmi_scan_machine();
dmi_set_dump_stack_arch_desc();
/*
* VMware detection requires dmi to be available, so this
* needs to be done after dmi_scan_machine, for the BP.
*/
init_hypervisor_platform();
x86_init.resources.probe_roms();
/* after parse_early_param, so could debug it */
insert_resource(&iomem_resource, &code_resource);
insert_resource(&iomem_resource, &data_resource);
insert_resource(&iomem_resource, &bss_resource);
e820_add_kernel_range();
trim_bios_range();
#ifdef CONFIG_X86_32
if (ppro_with_ram_bug()) {
e820_update_range(0x70000000ULL, 0x40000ULL, E820_RAM,
E820_RESERVED);
sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
printk(KERN_INFO "fixed physical RAM map:\n");
e820_print_map("bad_ppro");
}
#else
early_gart_iommu_check();
#endif
/*
* partially used pages are not usable - thus
* we are rounding upwards:
*/
max_pfn = e820_end_of_ram_pfn();
/* update e820 for memory not covered by WB MTRRs */
mtrr_bp_init();
if (mtrr_trim_uncached_memory(max_pfn))
max_pfn = e820_end_of_ram_pfn();
#ifdef CONFIG_X86_32
/* max_low_pfn get updated here */
find_low_pfn_range();
#else
num_physpages = max_pfn;
check_x2apic();
/* How many end-of-memory variables you have, grandma! */
/* need this before calling reserve_initrd */
if (max_pfn > (1UL<<(32 - PAGE_SHIFT)))
max_low_pfn = e820_end_of_low_ram_pfn();
else
max_low_pfn = max_pfn;
high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
#endif
/*
* Find and reserve possible boot-time SMP configuration:
*/
find_smp_config();
reserve_ibft_region();
early_alloc_pgt_buf();
/*
* Need to conclude brk, before memblock_x86_fill()
* it could use memblock_find_in_range, could overlap with
* brk area.
*/
reserve_brk();
cleanup_highmap();
memblock.current_limit = ISA_END_ADDRESS;
memblock_x86_fill();
/*
* The EFI specification says that boot service code won't be called
* after ExitBootServices(). This is, in fact, a lie.
*/
if (efi_enabled(EFI_MEMMAP))
efi_reserve_boot_services();
/* preallocate 4k for mptable mpc */
early_reserve_e820_mpc_new();
#ifdef CONFIG_X86_CHECK_BIOS_CORRUPTION
setup_bios_corruption_check();
#endif
#ifdef CONFIG_X86_32
printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
(max_pfn_mapped<<PAGE_SHIFT) - 1);
#endif
reserve_real_mode();
trim_platform_memory_ranges();
trim_low_memory_range();
init_mem_mapping();
early_trap_pf_init();
setup_real_mode();
memblock.current_limit = get_max_mapped();
dma_contiguous_reserve(0);
/*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use.
*/
#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
if (init_ohci1394_dma_early)
init_ohci1394_dma_on_all_controllers();
#endif
/* Allocate bigger log buffer */
setup_log_buf(1);
reserve_initrd();
#if defined(CONFIG_ACPI) && defined(CONFIG_BLK_DEV_INITRD)
acpi_initrd_override((void *)initrd_start, initrd_end - initrd_start);
#endif
reserve_crashkernel();
vsmp_init();
io_delay_init();
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
early_acpi_boot_init();
initmem_init();
memblock_find_dma_reserve();
#ifdef CONFIG_KVM_GUEST
kvmclock_init();
#endif
x86_init.paging.pagetable_init();
if (boot_cpu_data.cpuid_level >= 0) {
/* A CPU has %cr4 if and only if it has CPUID */
mmu_cr4_features = read_cr4();
if (trampoline_cr4_features)
*trampoline_cr4_features = mmu_cr4_features;
}
#ifdef CONFIG_X86_32
/* sync back kernel address range */
clone_pgd_range(initial_page_table + KERNEL_PGD_BOUNDARY,
swapper_pg_dir + KERNEL_PGD_BOUNDARY,
KERNEL_PGD_PTRS);
#endif
tboot_probe();
#ifdef CONFIG_X86_64
map_vsyscall();
#endif
generic_apic_probe();
early_quirks();
/*
* Read APIC and some other early information from ACPI tables.
*/
acpi_boot_init();
sfi_init();
x86_dtb_init();
/*
* get boot-time SMP configuration:
*/
if (smp_found_config)
get_smp_config();
prefill_possible_map();
init_cpu_to_node();
init_apic_mappings();
if (x86_io_apic_ops.init)
x86_io_apic_ops.init();
kvm_guest_init();
e820_reserve_resources();
e820_mark_nosave_regions(max_low_pfn);
x86_init.resources.reserve_resources();
e820_setup_gap();
#ifdef CONFIG_VT
#if defined(CONFIG_VGA_CONSOLE)
if (!efi_enabled(EFI_BOOT) || (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
conswitchp = &vga_con;
#elif defined(CONFIG_DUMMY_CONSOLE)
conswitchp = &dummy_con;
#endif
#endif
x86_init.oem.banner();
x86_init.timers.wallclock_init();
mcheck_init();
arch_init_ideal_nops();
register_refined_jiffies(CLOCK_TICK_RATE);
#ifdef CONFIG_EFI
/* Once setup is done above, unmap the EFI memory map on
* mismatched firmware/kernel archtectures since there is no
* support for runtime services.
*/
if (efi_enabled(EFI_BOOT) && !efi_is_native()) {
pr_info("efi: Setup done, disabling due to 32/64-bit mismatch\n");
efi_unmap_memmap();
}
#endif
}
#ifdef CONFIG_X86_32
static struct resource video_ram_resource = {
.name = "Video RAM area",
.start = 0xa0000,
.end = 0xbffff,
.flags = IORESOURCE_BUSY | IORESOURCE_MEM
};
void __init i386_reserve_resources(void)
{
request_resource(&iomem_resource, &video_ram_resource);
reserve_standard_io_resources();
}
#endif /* CONFIG_X86_32 */
| gpl-2.0 |
hash07/Apollo_X | net/bridge/br_sysfs_br.c | 2137 | 24623 | /*
* Sysfs attributes of bridge ports
* Linux ethernet bridge
*
* Authors:
* Stephen Hemminger <shemminger@osdl.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/capability.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/if_bridge.h>
#include <linux/rtnetlink.h>
#include <linux/spinlock.h>
#include <linux/times.h>
#include "br_private.h"
#define to_dev(obj) container_of(obj, struct device, kobj)
#define to_bridge(cd) ((struct net_bridge *)netdev_priv(to_net_dev(cd)))
/*
* Common code for storing bridge parameters.
*/
static ssize_t store_bridge_parm(struct device *d,
const char *buf, size_t len,
int (*set)(struct net_bridge *, unsigned long))
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
int err;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
err = (*set)(br, val);
return err ? err : len;
}
static ssize_t show_forward_delay(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->forward_delay));
}
static ssize_t store_forward_delay(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_forward_delay);
}
static DEVICE_ATTR(forward_delay, S_IRUGO | S_IWUSR,
show_forward_delay, store_forward_delay);
static ssize_t show_hello_time(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->hello_time));
}
static ssize_t store_hello_time(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, br_set_hello_time);
}
static DEVICE_ATTR(hello_time, S_IRUGO | S_IWUSR, show_hello_time,
store_hello_time);
static ssize_t show_max_age(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(to_bridge(d)->max_age));
}
static ssize_t store_max_age(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_set_max_age);
}
static DEVICE_ATTR(max_age, S_IRUGO | S_IWUSR, show_max_age, store_max_age);
static ssize_t show_ageing_time(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n", jiffies_to_clock_t(br->ageing_time));
}
static int set_ageing_time(struct net_bridge *br, unsigned long val)
{
br->ageing_time = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_ageing_time(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_ageing_time);
}
static DEVICE_ATTR(ageing_time, S_IRUGO | S_IWUSR, show_ageing_time,
store_ageing_time);
static ssize_t show_stp_state(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->stp_enabled);
}
static ssize_t store_stp_state(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (!rtnl_trylock())
return restart_syscall();
br_stp_set_enabled(br, val);
rtnl_unlock();
return len;
}
static DEVICE_ATTR(stp_state, S_IRUGO | S_IWUSR, show_stp_state,
store_stp_state);
static ssize_t show_group_fwd_mask(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%#x\n", br->group_fwd_mask);
}
static ssize_t store_group_fwd_mask(struct device *d,
struct device_attribute *attr, const char *buf,
size_t len)
{
struct net_bridge *br = to_bridge(d);
char *endp;
unsigned long val;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
val = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EINVAL;
if (val & BR_GROUPFWD_RESTRICTED)
return -EINVAL;
br->group_fwd_mask = val;
return len;
}
static DEVICE_ATTR(group_fwd_mask, S_IRUGO | S_IWUSR, show_group_fwd_mask,
store_group_fwd_mask);
static ssize_t show_priority(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n",
(br->bridge_id.prio[0] << 8) | br->bridge_id.prio[1]);
}
static int set_priority(struct net_bridge *br, unsigned long val)
{
br_stp_set_bridge_priority(br, (u16) val);
return 0;
}
static ssize_t store_priority(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_priority);
}
static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, show_priority, store_priority);
static ssize_t show_root_id(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->designated_root);
}
static DEVICE_ATTR(root_id, S_IRUGO, show_root_id, NULL);
static ssize_t show_bridge_id(struct device *d, struct device_attribute *attr,
char *buf)
{
return br_show_bridge_id(buf, &to_bridge(d)->bridge_id);
}
static DEVICE_ATTR(bridge_id, S_IRUGO, show_bridge_id, NULL);
static ssize_t show_root_port(struct device *d, struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_port);
}
static DEVICE_ATTR(root_port, S_IRUGO, show_root_port, NULL);
static ssize_t show_root_path_cost(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->root_path_cost);
}
static DEVICE_ATTR(root_path_cost, S_IRUGO, show_root_path_cost, NULL);
static ssize_t show_topology_change(struct device *d,
struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_bridge(d)->topology_change);
}
static DEVICE_ATTR(topology_change, S_IRUGO, show_topology_change, NULL);
static ssize_t show_topology_change_detected(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->topology_change_detected);
}
static DEVICE_ATTR(topology_change_detected, S_IRUGO,
show_topology_change_detected, NULL);
static ssize_t show_hello_timer(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->hello_timer));
}
static DEVICE_ATTR(hello_timer, S_IRUGO, show_hello_timer, NULL);
static ssize_t show_tcn_timer(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->tcn_timer));
}
static DEVICE_ATTR(tcn_timer, S_IRUGO, show_tcn_timer, NULL);
static ssize_t show_topology_change_timer(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->topology_change_timer));
}
static DEVICE_ATTR(topology_change_timer, S_IRUGO, show_topology_change_timer,
NULL);
static ssize_t show_gc_timer(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%ld\n", br_timer_value(&br->gc_timer));
}
static DEVICE_ATTR(gc_timer, S_IRUGO, show_gc_timer, NULL);
static ssize_t show_group_addr(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%x:%x:%x:%x:%x:%x\n",
br->group_addr[0], br->group_addr[1],
br->group_addr[2], br->group_addr[3],
br->group_addr[4], br->group_addr[5]);
}
static ssize_t store_group_addr(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
u8 new_addr[6];
int i;
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
if (sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx",
&new_addr[0], &new_addr[1], &new_addr[2],
&new_addr[3], &new_addr[4], &new_addr[5]) != 6)
return -EINVAL;
if (!is_link_local_ether_addr(new_addr))
return -EINVAL;
if (new_addr[5] == 1 || /* 802.3x Pause address */
new_addr[5] == 2 || /* 802.3ad Slow protocols */
new_addr[5] == 3) /* 802.1X PAE address */
return -EINVAL;
spin_lock_bh(&br->lock);
for (i = 0; i < 6; i++)
br->group_addr[i] = new_addr[i];
spin_unlock_bh(&br->lock);
return len;
}
static DEVICE_ATTR(group_addr, S_IRUGO | S_IWUSR,
show_group_addr, store_group_addr);
static ssize_t store_flush(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_bridge *br = to_bridge(d);
if (!ns_capable(dev_net(br->dev)->user_ns, CAP_NET_ADMIN))
return -EPERM;
br_fdb_flush(br);
return len;
}
static DEVICE_ATTR(flush, S_IWUSR, NULL, store_flush);
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
static ssize_t show_multicast_router(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_router);
}
static ssize_t store_multicast_router(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_router);
}
static DEVICE_ATTR(multicast_router, S_IRUGO | S_IWUSR, show_multicast_router,
store_multicast_router);
static ssize_t show_multicast_snooping(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", !br->multicast_disabled);
}
static ssize_t store_multicast_snooping(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_toggle);
}
static DEVICE_ATTR(multicast_snooping, S_IRUGO | S_IWUSR,
show_multicast_snooping, store_multicast_snooping);
static ssize_t show_multicast_querier(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->multicast_querier);
}
static ssize_t store_multicast_querier(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_querier);
}
static DEVICE_ATTR(multicast_querier, S_IRUGO | S_IWUSR,
show_multicast_querier, store_multicast_querier);
static ssize_t show_hash_elasticity(struct device *d,
struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_elasticity);
}
static int set_elasticity(struct net_bridge *br, unsigned long val)
{
br->hash_elasticity = val;
return 0;
}
static ssize_t store_hash_elasticity(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_elasticity);
}
static DEVICE_ATTR(hash_elasticity, S_IRUGO | S_IWUSR, show_hash_elasticity,
store_hash_elasticity);
static ssize_t show_hash_max(struct device *d, struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->hash_max);
}
static ssize_t store_hash_max(struct device *d, struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_multicast_set_hash_max);
}
static DEVICE_ATTR(hash_max, S_IRUGO | S_IWUSR, show_hash_max,
store_hash_max);
static ssize_t show_multicast_last_member_count(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_last_member_count);
}
static int set_last_member_count(struct net_bridge *br, unsigned long val)
{
br->multicast_last_member_count = val;
return 0;
}
static ssize_t store_multicast_last_member_count(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_count);
}
static DEVICE_ATTR(multicast_last_member_count, S_IRUGO | S_IWUSR,
show_multicast_last_member_count,
store_multicast_last_member_count);
static ssize_t show_multicast_startup_query_count(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->multicast_startup_query_count);
}
static int set_startup_query_count(struct net_bridge *br, unsigned long val)
{
br->multicast_startup_query_count = val;
return 0;
}
static ssize_t store_multicast_startup_query_count(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_count);
}
static DEVICE_ATTR(multicast_startup_query_count, S_IRUGO | S_IWUSR,
show_multicast_startup_query_count,
store_multicast_startup_query_count);
static ssize_t show_multicast_last_member_interval(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_last_member_interval));
}
static int set_last_member_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_last_member_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_last_member_interval(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_last_member_interval);
}
static DEVICE_ATTR(multicast_last_member_interval, S_IRUGO | S_IWUSR,
show_multicast_last_member_interval,
store_multicast_last_member_interval);
static ssize_t show_multicast_membership_interval(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_membership_interval));
}
static int set_membership_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_membership_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_membership_interval(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_membership_interval);
}
static DEVICE_ATTR(multicast_membership_interval, S_IRUGO | S_IWUSR,
show_multicast_membership_interval,
store_multicast_membership_interval);
static ssize_t show_multicast_querier_interval(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_querier_interval));
}
static int set_querier_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_querier_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_querier_interval(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_querier_interval);
}
static DEVICE_ATTR(multicast_querier_interval, S_IRUGO | S_IWUSR,
show_multicast_querier_interval,
store_multicast_querier_interval);
static ssize_t show_multicast_query_interval(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_interval));
}
static int set_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_query_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_query_interval(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, set_query_interval);
}
static DEVICE_ATTR(multicast_query_interval, S_IRUGO | S_IWUSR,
show_multicast_query_interval,
store_multicast_query_interval);
static ssize_t show_multicast_query_response_interval(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_query_response_interval));
}
static int set_query_response_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_query_response_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_query_response_interval(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_query_response_interval);
}
static DEVICE_ATTR(multicast_query_response_interval, S_IRUGO | S_IWUSR,
show_multicast_query_response_interval,
store_multicast_query_response_interval);
static ssize_t show_multicast_startup_query_interval(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(
buf, "%lu\n",
jiffies_to_clock_t(br->multicast_startup_query_interval));
}
static int set_startup_query_interval(struct net_bridge *br, unsigned long val)
{
br->multicast_startup_query_interval = clock_t_to_jiffies(val);
return 0;
}
static ssize_t store_multicast_startup_query_interval(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_startup_query_interval);
}
static DEVICE_ATTR(multicast_startup_query_interval, S_IRUGO | S_IWUSR,
show_multicast_startup_query_interval,
store_multicast_startup_query_interval);
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
static ssize_t show_nf_call_iptables(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_iptables);
}
static int set_nf_call_iptables(struct net_bridge *br, unsigned long val)
{
br->nf_call_iptables = val ? true : false;
return 0;
}
static ssize_t store_nf_call_iptables(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_iptables);
}
static DEVICE_ATTR(nf_call_iptables, S_IRUGO | S_IWUSR,
show_nf_call_iptables, store_nf_call_iptables);
static ssize_t show_nf_call_ip6tables(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_ip6tables);
}
static int set_nf_call_ip6tables(struct net_bridge *br, unsigned long val)
{
br->nf_call_ip6tables = val ? true : false;
return 0;
}
static ssize_t store_nf_call_ip6tables(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_ip6tables);
}
static DEVICE_ATTR(nf_call_ip6tables, S_IRUGO | S_IWUSR,
show_nf_call_ip6tables, store_nf_call_ip6tables);
static ssize_t show_nf_call_arptables(
struct device *d, struct device_attribute *attr, char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%u\n", br->nf_call_arptables);
}
static int set_nf_call_arptables(struct net_bridge *br, unsigned long val)
{
br->nf_call_arptables = val ? true : false;
return 0;
}
static ssize_t store_nf_call_arptables(
struct device *d, struct device_attribute *attr, const char *buf,
size_t len)
{
return store_bridge_parm(d, buf, len, set_nf_call_arptables);
}
static DEVICE_ATTR(nf_call_arptables, S_IRUGO | S_IWUSR,
show_nf_call_arptables, store_nf_call_arptables);
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
static ssize_t show_vlan_filtering(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct net_bridge *br = to_bridge(d);
return sprintf(buf, "%d\n", br->vlan_enabled);
}
static ssize_t store_vlan_filtering(struct device *d,
struct device_attribute *attr,
const char *buf, size_t len)
{
return store_bridge_parm(d, buf, len, br_vlan_filter_toggle);
}
static DEVICE_ATTR(vlan_filtering, S_IRUGO | S_IWUSR,
show_vlan_filtering, store_vlan_filtering);
#endif
static struct attribute *bridge_attrs[] = {
&dev_attr_forward_delay.attr,
&dev_attr_hello_time.attr,
&dev_attr_max_age.attr,
&dev_attr_ageing_time.attr,
&dev_attr_stp_state.attr,
&dev_attr_group_fwd_mask.attr,
&dev_attr_priority.attr,
&dev_attr_bridge_id.attr,
&dev_attr_root_id.attr,
&dev_attr_root_path_cost.attr,
&dev_attr_root_port.attr,
&dev_attr_topology_change.attr,
&dev_attr_topology_change_detected.attr,
&dev_attr_hello_timer.attr,
&dev_attr_tcn_timer.attr,
&dev_attr_topology_change_timer.attr,
&dev_attr_gc_timer.attr,
&dev_attr_group_addr.attr,
&dev_attr_flush.attr,
#ifdef CONFIG_BRIDGE_IGMP_SNOOPING
&dev_attr_multicast_router.attr,
&dev_attr_multicast_snooping.attr,
&dev_attr_multicast_querier.attr,
&dev_attr_hash_elasticity.attr,
&dev_attr_hash_max.attr,
&dev_attr_multicast_last_member_count.attr,
&dev_attr_multicast_startup_query_count.attr,
&dev_attr_multicast_last_member_interval.attr,
&dev_attr_multicast_membership_interval.attr,
&dev_attr_multicast_querier_interval.attr,
&dev_attr_multicast_query_interval.attr,
&dev_attr_multicast_query_response_interval.attr,
&dev_attr_multicast_startup_query_interval.attr,
#endif
#ifdef CONFIG_BRIDGE_NETFILTER
&dev_attr_nf_call_iptables.attr,
&dev_attr_nf_call_ip6tables.attr,
&dev_attr_nf_call_arptables.attr,
#endif
#ifdef CONFIG_BRIDGE_VLAN_FILTERING
&dev_attr_vlan_filtering.attr,
#endif
NULL
};
static struct attribute_group bridge_group = {
.name = SYSFS_BRIDGE_ATTR,
.attrs = bridge_attrs,
};
/*
* Export the forwarding information table as a binary file
* The records are struct __fdb_entry.
*
* Returns the number of bytes read.
*/
static ssize_t brforward_read(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buf, loff_t off, size_t count)
{
struct device *dev = to_dev(kobj);
struct net_bridge *br = to_bridge(dev);
int n;
/* must read whole records */
if (off % sizeof(struct __fdb_entry) != 0)
return -EINVAL;
n = br_fdb_fillbuf(br, buf,
count / sizeof(struct __fdb_entry),
off / sizeof(struct __fdb_entry));
if (n > 0)
n *= sizeof(struct __fdb_entry);
return n;
}
static struct bin_attribute bridge_forward = {
.attr = { .name = SYSFS_BRIDGE_FDB,
.mode = S_IRUGO, },
.read = brforward_read,
};
/*
* Add entries in sysfs onto the existing network class device
* for the bridge.
* Adds a attribute group "bridge" containing tuning parameters.
* Binary attribute containing the forward table
* Sub directory to hold links to interfaces.
*
* Note: the ifobj exists only to be a subdirectory
* to hold links. The ifobj exists in same data structure
* as it's parent the bridge so reference counting works.
*/
int br_sysfs_addbr(struct net_device *dev)
{
struct kobject *brobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
int err;
err = sysfs_create_group(brobj, &bridge_group);
if (err) {
pr_info("%s: can't create group %s/%s\n",
__func__, dev->name, bridge_group.name);
goto out1;
}
err = sysfs_create_bin_file(brobj, &bridge_forward);
if (err) {
pr_info("%s: can't create attribute file %s/%s\n",
__func__, dev->name, bridge_forward.attr.name);
goto out2;
}
br->ifobj = kobject_create_and_add(SYSFS_BRIDGE_PORT_SUBDIR, brobj);
if (!br->ifobj) {
pr_info("%s: can't add kobject (directory) %s/%s\n",
__func__, dev->name, SYSFS_BRIDGE_PORT_SUBDIR);
goto out3;
}
return 0;
out3:
sysfs_remove_bin_file(&dev->dev.kobj, &bridge_forward);
out2:
sysfs_remove_group(&dev->dev.kobj, &bridge_group);
out1:
return err;
}
void br_sysfs_delbr(struct net_device *dev)
{
struct kobject *kobj = &dev->dev.kobj;
struct net_bridge *br = netdev_priv(dev);
kobject_put(br->ifobj);
sysfs_remove_bin_file(kobj, &bridge_forward);
sysfs_remove_group(kobj, &bridge_group);
}
| gpl-2.0 |
NoelMacwan/Kernel-NanHu-11.3.A.1.39 | drivers/net/loopback.c | 2905 | 5465 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* Pseudo-driver for the loopback interface.
*
* Version: @(#)loopback.c 1.0.4b 08/16/93
*
* Authors: Ross Biro
* Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
* Donald Becker, <becker@scyld.com>
*
* Alan Cox : Fixed oddments for NET3.014
* Alan Cox : Rejig for NET3.029 snap #3
* Alan Cox : Fixed NET3.029 bugs and sped up
* Larry McVoy : Tiny tweak to double performance
* Alan Cox : Backed out LMV's tweak - the linux mm
* can't take it...
* Michael Griffith: Don't bother computing the checksums
* on packets received on the loopback
* interface.
* Alexey Kuznetsov: Potential hang under some extreme
* cases removed.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/socket.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/init.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <net/sock.h>
#include <net/checksum.h>
#include <linux/if_ether.h> /* For the statistics structure. */
#include <linux/if_arp.h> /* For ARPHRD_ETHER */
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/percpu.h>
#include <net/net_namespace.h>
#include <linux/u64_stats_sync.h>
struct pcpu_lstats {
u64 packets;
u64 bytes;
struct u64_stats_sync syncp;
};
/*
* The higher levels take care of making this non-reentrant (it's
* called with bh's disabled).
*/
static netdev_tx_t loopback_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct pcpu_lstats *lb_stats;
int len;
skb_orphan(skb);
skb->protocol = eth_type_trans(skb, dev);
/* it's OK to use per_cpu_ptr() because BHs are off */
lb_stats = this_cpu_ptr(dev->lstats);
len = skb->len;
if (likely(netif_rx(skb) == NET_RX_SUCCESS)) {
u64_stats_update_begin(&lb_stats->syncp);
lb_stats->bytes += len;
lb_stats->packets++;
u64_stats_update_end(&lb_stats->syncp);
}
return NETDEV_TX_OK;
}
static struct rtnl_link_stats64 *loopback_get_stats64(struct net_device *dev,
struct rtnl_link_stats64 *stats)
{
u64 bytes = 0;
u64 packets = 0;
int i;
for_each_possible_cpu(i) {
const struct pcpu_lstats *lb_stats;
u64 tbytes, tpackets;
unsigned int start;
lb_stats = per_cpu_ptr(dev->lstats, i);
do {
start = u64_stats_fetch_begin(&lb_stats->syncp);
tbytes = lb_stats->bytes;
tpackets = lb_stats->packets;
} while (u64_stats_fetch_retry(&lb_stats->syncp, start));
bytes += tbytes;
packets += tpackets;
}
stats->rx_packets = packets;
stats->tx_packets = packets;
stats->rx_bytes = bytes;
stats->tx_bytes = bytes;
return stats;
}
static u32 always_on(struct net_device *dev)
{
return 1;
}
static const struct ethtool_ops loopback_ethtool_ops = {
.get_link = always_on,
};
static int loopback_dev_init(struct net_device *dev)
{
dev->lstats = alloc_percpu(struct pcpu_lstats);
if (!dev->lstats)
return -ENOMEM;
return 0;
}
static void loopback_dev_free(struct net_device *dev)
{
free_percpu(dev->lstats);
free_netdev(dev);
}
static const struct net_device_ops loopback_ops = {
.ndo_init = loopback_dev_init,
.ndo_start_xmit= loopback_xmit,
.ndo_get_stats64 = loopback_get_stats64,
};
/*
* The loopback device is special. There is only one instance
* per network namespace.
*/
static void loopback_setup(struct net_device *dev)
{
dev->mtu = (16 * 1024) + 20 + 20 + 12;
dev->hard_header_len = ETH_HLEN; /* 14 */
dev->addr_len = ETH_ALEN; /* 6 */
dev->tx_queue_len = 0;
dev->type = ARPHRD_LOOPBACK; /* 0x0001*/
dev->flags = IFF_LOOPBACK;
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
dev->hw_features = NETIF_F_ALL_TSO | NETIF_F_UFO;
dev->features = NETIF_F_SG | NETIF_F_FRAGLIST
| NETIF_F_ALL_TSO
| NETIF_F_UFO
| NETIF_F_HW_CSUM
| NETIF_F_RXCSUM
| NETIF_F_HIGHDMA
| NETIF_F_LLTX
| NETIF_F_NETNS_LOCAL
| NETIF_F_VLAN_CHALLENGED
| NETIF_F_LOOPBACK;
dev->ethtool_ops = &loopback_ethtool_ops;
dev->header_ops = ð_header_ops;
dev->netdev_ops = &loopback_ops;
dev->destructor = loopback_dev_free;
}
/* Setup and register the loopback device. */
static __net_init int loopback_net_init(struct net *net)
{
struct net_device *dev;
int err;
err = -ENOMEM;
dev = alloc_netdev(0, "lo", loopback_setup);
if (!dev)
goto out;
dev_net_set(dev, net);
err = register_netdev(dev);
if (err)
goto out_free_netdev;
net->loopback_dev = dev;
return 0;
out_free_netdev:
free_netdev(dev);
out:
if (net_eq(net, &init_net))
panic("loopback: Failed to register netdevice: %d\n", err);
return err;
}
/* Registered in net/core/dev.c */
struct pernet_operations __net_initdata loopback_net_ops = {
.init = loopback_net_init,
};
| gpl-2.0 |
KlinkOnE/kyleopen-kernel | lib/decompress_unlzma.c | 3161 | 16160 | /* Lzma decompressor for Linux kernel. Shamelessly snarfed
*from busybox 1.1.1
*
*Linux kernel adaptation
*Copyright (C) 2006 Alain < alain@knaff.lu >
*
*Based on small lzma deflate implementation/Small range coder
*implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
*Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
*Copyright (C) 1999-2005 Igor Pavlov
*
*Copyrights of the parts, see headers below.
*
*
*This program is free software; you can redistribute it and/or
*modify it under the terms of the GNU Lesser General Public
*License as published by the Free Software Foundation; either
*version 2.1 of the License, or (at your option) any later version.
*
*This program is distributed in the hope that it will be useful,
*but WITHOUT ANY WARRANTY; without even the implied warranty of
*MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
*Lesser General Public License for more details.
*
*You should have received a copy of the GNU Lesser General Public
*License along with this library; if not, write to the Free Software
*Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef STATIC
#define PREBOOT
#else
#include <linux/decompress/unlzma.h>
#endif /* STATIC */
#include <linux/decompress/mm.h>
#define MIN(a, b) (((a) < (b)) ? (a) : (b))
static long long INIT read_int(unsigned char *ptr, int size)
{
int i;
long long ret = 0;
for (i = 0; i < size; i++)
ret = (ret << 8) | ptr[size-i-1];
return ret;
}
#define ENDIAN_CONVERT(x) \
x = (typeof(x))read_int((unsigned char *)&x, sizeof(x))
/* Small range coder implementation for lzma.
*Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
*Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
*Copyright (c) 1999-2005 Igor Pavlov
*/
#include <linux/compiler.h>
#define LZMA_IOBUF_SIZE 0x10000
struct rc {
int (*fill)(void*, unsigned int);
uint8_t *ptr;
uint8_t *buffer;
uint8_t *buffer_end;
int buffer_size;
uint32_t code;
uint32_t range;
uint32_t bound;
void (*error)(char *);
};
#define RC_TOP_BITS 24
#define RC_MOVE_BITS 5
#define RC_MODEL_TOTAL_BITS 11
static int INIT nofill(void *buffer, unsigned int len)
{
return -1;
}
/* Called twice: once at startup and once in rc_normalize() */
static void INIT rc_read(struct rc *rc)
{
rc->buffer_size = rc->fill((char *)rc->buffer, LZMA_IOBUF_SIZE);
if (rc->buffer_size <= 0)
rc->error("unexpected EOF");
rc->ptr = rc->buffer;
rc->buffer_end = rc->buffer + rc->buffer_size;
}
/* Called once */
static inline void INIT rc_init(struct rc *rc,
int (*fill)(void*, unsigned int),
char *buffer, int buffer_size)
{
if (fill)
rc->fill = fill;
else
rc->fill = nofill;
rc->buffer = (uint8_t *)buffer;
rc->buffer_size = buffer_size;
rc->buffer_end = rc->buffer + rc->buffer_size;
rc->ptr = rc->buffer;
rc->code = 0;
rc->range = 0xFFFFFFFF;
}
static inline void INIT rc_init_code(struct rc *rc)
{
int i;
for (i = 0; i < 5; i++) {
if (rc->ptr >= rc->buffer_end)
rc_read(rc);
rc->code = (rc->code << 8) | *rc->ptr++;
}
}
/* Called twice, but one callsite is in inline'd rc_is_bit_0_helper() */
static void INIT rc_do_normalize(struct rc *rc)
{
if (rc->ptr >= rc->buffer_end)
rc_read(rc);
rc->range <<= 8;
rc->code = (rc->code << 8) | *rc->ptr++;
}
static inline void INIT rc_normalize(struct rc *rc)
{
if (rc->range < (1 << RC_TOP_BITS))
rc_do_normalize(rc);
}
/* Called 9 times */
/* Why rc_is_bit_0_helper exists?
*Because we want to always expose (rc->code < rc->bound) to optimizer
*/
static inline uint32_t INIT rc_is_bit_0_helper(struct rc *rc, uint16_t *p)
{
rc_normalize(rc);
rc->bound = *p * (rc->range >> RC_MODEL_TOTAL_BITS);
return rc->bound;
}
static inline int INIT rc_is_bit_0(struct rc *rc, uint16_t *p)
{
uint32_t t = rc_is_bit_0_helper(rc, p);
return rc->code < t;
}
/* Called ~10 times, but very small, thus inlined */
static inline void INIT rc_update_bit_0(struct rc *rc, uint16_t *p)
{
rc->range = rc->bound;
*p += ((1 << RC_MODEL_TOTAL_BITS) - *p) >> RC_MOVE_BITS;
}
static inline void INIT rc_update_bit_1(struct rc *rc, uint16_t *p)
{
rc->range -= rc->bound;
rc->code -= rc->bound;
*p -= *p >> RC_MOVE_BITS;
}
/* Called 4 times in unlzma loop */
static int INIT rc_get_bit(struct rc *rc, uint16_t *p, int *symbol)
{
if (rc_is_bit_0(rc, p)) {
rc_update_bit_0(rc, p);
*symbol *= 2;
return 0;
} else {
rc_update_bit_1(rc, p);
*symbol = *symbol * 2 + 1;
return 1;
}
}
/* Called once */
static inline int INIT rc_direct_bit(struct rc *rc)
{
rc_normalize(rc);
rc->range >>= 1;
if (rc->code >= rc->range) {
rc->code -= rc->range;
return 1;
}
return 0;
}
/* Called twice */
static inline void INIT
rc_bit_tree_decode(struct rc *rc, uint16_t *p, int num_levels, int *symbol)
{
int i = num_levels;
*symbol = 1;
while (i--)
rc_get_bit(rc, p + *symbol, symbol);
*symbol -= 1 << num_levels;
}
/*
* Small lzma deflate implementation.
* Copyright (C) 2006 Aurelien Jacobs < aurel@gnuage.org >
*
* Based on LzmaDecode.c from the LZMA SDK 4.22 (http://www.7-zip.org/)
* Copyright (C) 1999-2005 Igor Pavlov
*/
struct lzma_header {
uint8_t pos;
uint32_t dict_size;
uint64_t dst_size;
} __attribute__ ((packed)) ;
#define LZMA_BASE_SIZE 1846
#define LZMA_LIT_SIZE 768
#define LZMA_NUM_POS_BITS_MAX 4
#define LZMA_LEN_NUM_LOW_BITS 3
#define LZMA_LEN_NUM_MID_BITS 3
#define LZMA_LEN_NUM_HIGH_BITS 8
#define LZMA_LEN_CHOICE 0
#define LZMA_LEN_CHOICE_2 (LZMA_LEN_CHOICE + 1)
#define LZMA_LEN_LOW (LZMA_LEN_CHOICE_2 + 1)
#define LZMA_LEN_MID (LZMA_LEN_LOW \
+ (1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_LOW_BITS)))
#define LZMA_LEN_HIGH (LZMA_LEN_MID \
+(1 << (LZMA_NUM_POS_BITS_MAX + LZMA_LEN_NUM_MID_BITS)))
#define LZMA_NUM_LEN_PROBS (LZMA_LEN_HIGH + (1 << LZMA_LEN_NUM_HIGH_BITS))
#define LZMA_NUM_STATES 12
#define LZMA_NUM_LIT_STATES 7
#define LZMA_START_POS_MODEL_INDEX 4
#define LZMA_END_POS_MODEL_INDEX 14
#define LZMA_NUM_FULL_DISTANCES (1 << (LZMA_END_POS_MODEL_INDEX >> 1))
#define LZMA_NUM_POS_SLOT_BITS 6
#define LZMA_NUM_LEN_TO_POS_STATES 4
#define LZMA_NUM_ALIGN_BITS 4
#define LZMA_MATCH_MIN_LEN 2
#define LZMA_IS_MATCH 0
#define LZMA_IS_REP (LZMA_IS_MATCH + (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
#define LZMA_IS_REP_G0 (LZMA_IS_REP + LZMA_NUM_STATES)
#define LZMA_IS_REP_G1 (LZMA_IS_REP_G0 + LZMA_NUM_STATES)
#define LZMA_IS_REP_G2 (LZMA_IS_REP_G1 + LZMA_NUM_STATES)
#define LZMA_IS_REP_0_LONG (LZMA_IS_REP_G2 + LZMA_NUM_STATES)
#define LZMA_POS_SLOT (LZMA_IS_REP_0_LONG \
+ (LZMA_NUM_STATES << LZMA_NUM_POS_BITS_MAX))
#define LZMA_SPEC_POS (LZMA_POS_SLOT \
+(LZMA_NUM_LEN_TO_POS_STATES << LZMA_NUM_POS_SLOT_BITS))
#define LZMA_ALIGN (LZMA_SPEC_POS \
+ LZMA_NUM_FULL_DISTANCES - LZMA_END_POS_MODEL_INDEX)
#define LZMA_LEN_CODER (LZMA_ALIGN + (1 << LZMA_NUM_ALIGN_BITS))
#define LZMA_REP_LEN_CODER (LZMA_LEN_CODER + LZMA_NUM_LEN_PROBS)
#define LZMA_LITERAL (LZMA_REP_LEN_CODER + LZMA_NUM_LEN_PROBS)
struct writer {
uint8_t *buffer;
uint8_t previous_byte;
size_t buffer_pos;
int bufsize;
size_t global_pos;
int(*flush)(void*, unsigned int);
struct lzma_header *header;
};
struct cstate {
int state;
uint32_t rep0, rep1, rep2, rep3;
};
static inline size_t INIT get_pos(struct writer *wr)
{
return
wr->global_pos + wr->buffer_pos;
}
static inline uint8_t INIT peek_old_byte(struct writer *wr,
uint32_t offs)
{
if (!wr->flush) {
int32_t pos;
while (offs > wr->header->dict_size)
offs -= wr->header->dict_size;
pos = wr->buffer_pos - offs;
return wr->buffer[pos];
} else {
uint32_t pos = wr->buffer_pos - offs;
while (pos >= wr->header->dict_size)
pos += wr->header->dict_size;
return wr->buffer[pos];
}
}
static inline int INIT write_byte(struct writer *wr, uint8_t byte)
{
wr->buffer[wr->buffer_pos++] = wr->previous_byte = byte;
if (wr->flush && wr->buffer_pos == wr->header->dict_size) {
wr->buffer_pos = 0;
wr->global_pos += wr->header->dict_size;
if (wr->flush((char *)wr->buffer, wr->header->dict_size)
!= wr->header->dict_size)
return -1;
}
return 0;
}
static inline int INIT copy_byte(struct writer *wr, uint32_t offs)
{
return write_byte(wr, peek_old_byte(wr, offs));
}
static inline int INIT copy_bytes(struct writer *wr,
uint32_t rep0, int len)
{
do {
if (copy_byte(wr, rep0))
return -1;
len--;
} while (len != 0 && wr->buffer_pos < wr->header->dst_size);
return len;
}
static inline int INIT process_bit0(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob,
int lc, uint32_t literal_pos_mask) {
int mi = 1;
rc_update_bit_0(rc, prob);
prob = (p + LZMA_LITERAL +
(LZMA_LIT_SIZE
* (((get_pos(wr) & literal_pos_mask) << lc)
+ (wr->previous_byte >> (8 - lc))))
);
if (cst->state >= LZMA_NUM_LIT_STATES) {
int match_byte = peek_old_byte(wr, cst->rep0);
do {
int bit;
uint16_t *prob_lit;
match_byte <<= 1;
bit = match_byte & 0x100;
prob_lit = prob + 0x100 + bit + mi;
if (rc_get_bit(rc, prob_lit, &mi)) {
if (!bit)
break;
} else {
if (bit)
break;
}
} while (mi < 0x100);
}
while (mi < 0x100) {
uint16_t *prob_lit = prob + mi;
rc_get_bit(rc, prob_lit, &mi);
}
if (cst->state < 4)
cst->state = 0;
else if (cst->state < 10)
cst->state -= 3;
else
cst->state -= 6;
return write_byte(wr, mi);
}
static inline int INIT process_bit1(struct writer *wr, struct rc *rc,
struct cstate *cst, uint16_t *p,
int pos_state, uint16_t *prob) {
int offset;
uint16_t *prob_len;
int num_bits;
int len;
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
cst->rep3 = cst->rep2;
cst->rep2 = cst->rep1;
cst->rep1 = cst->rep0;
cst->state = cst->state < LZMA_NUM_LIT_STATES ? 0 : 3;
prob = p + LZMA_LEN_CODER;
} else {
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G0 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
prob = (p + LZMA_IS_REP_0_LONG
+ (cst->state <<
LZMA_NUM_POS_BITS_MAX) +
pos_state);
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
cst->state = cst->state < LZMA_NUM_LIT_STATES ?
9 : 11;
return copy_byte(wr, cst->rep0);
} else {
rc_update_bit_1(rc, prob);
}
} else {
uint32_t distance;
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G1 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
distance = cst->rep1;
} else {
rc_update_bit_1(rc, prob);
prob = p + LZMA_IS_REP_G2 + cst->state;
if (rc_is_bit_0(rc, prob)) {
rc_update_bit_0(rc, prob);
distance = cst->rep2;
} else {
rc_update_bit_1(rc, prob);
distance = cst->rep3;
cst->rep3 = cst->rep2;
}
cst->rep2 = cst->rep1;
}
cst->rep1 = cst->rep0;
cst->rep0 = distance;
}
cst->state = cst->state < LZMA_NUM_LIT_STATES ? 8 : 11;
prob = p + LZMA_REP_LEN_CODER;
}
prob_len = prob + LZMA_LEN_CHOICE;
if (rc_is_bit_0(rc, prob_len)) {
rc_update_bit_0(rc, prob_len);
prob_len = (prob + LZMA_LEN_LOW
+ (pos_state <<
LZMA_LEN_NUM_LOW_BITS));
offset = 0;
num_bits = LZMA_LEN_NUM_LOW_BITS;
} else {
rc_update_bit_1(rc, prob_len);
prob_len = prob + LZMA_LEN_CHOICE_2;
if (rc_is_bit_0(rc, prob_len)) {
rc_update_bit_0(rc, prob_len);
prob_len = (prob + LZMA_LEN_MID
+ (pos_state <<
LZMA_LEN_NUM_MID_BITS));
offset = 1 << LZMA_LEN_NUM_LOW_BITS;
num_bits = LZMA_LEN_NUM_MID_BITS;
} else {
rc_update_bit_1(rc, prob_len);
prob_len = prob + LZMA_LEN_HIGH;
offset = ((1 << LZMA_LEN_NUM_LOW_BITS)
+ (1 << LZMA_LEN_NUM_MID_BITS));
num_bits = LZMA_LEN_NUM_HIGH_BITS;
}
}
rc_bit_tree_decode(rc, prob_len, num_bits, &len);
len += offset;
if (cst->state < 4) {
int pos_slot;
cst->state += LZMA_NUM_LIT_STATES;
prob =
p + LZMA_POS_SLOT +
((len <
LZMA_NUM_LEN_TO_POS_STATES ? len :
LZMA_NUM_LEN_TO_POS_STATES - 1)
<< LZMA_NUM_POS_SLOT_BITS);
rc_bit_tree_decode(rc, prob,
LZMA_NUM_POS_SLOT_BITS,
&pos_slot);
if (pos_slot >= LZMA_START_POS_MODEL_INDEX) {
int i, mi;
num_bits = (pos_slot >> 1) - 1;
cst->rep0 = 2 | (pos_slot & 1);
if (pos_slot < LZMA_END_POS_MODEL_INDEX) {
cst->rep0 <<= num_bits;
prob = p + LZMA_SPEC_POS +
cst->rep0 - pos_slot - 1;
} else {
num_bits -= LZMA_NUM_ALIGN_BITS;
while (num_bits--)
cst->rep0 = (cst->rep0 << 1) |
rc_direct_bit(rc);
prob = p + LZMA_ALIGN;
cst->rep0 <<= LZMA_NUM_ALIGN_BITS;
num_bits = LZMA_NUM_ALIGN_BITS;
}
i = 1;
mi = 1;
while (num_bits--) {
if (rc_get_bit(rc, prob + mi, &mi))
cst->rep0 |= i;
i <<= 1;
}
} else
cst->rep0 = pos_slot;
if (++(cst->rep0) == 0)
return 0;
if (cst->rep0 > wr->header->dict_size
|| cst->rep0 > get_pos(wr))
return -1;
}
len += LZMA_MATCH_MIN_LEN;
return copy_bytes(wr, cst->rep0, len);
}
STATIC inline int INIT unlzma(unsigned char *buf, int in_len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *output,
int *posp,
void(*error)(char *x)
)
{
struct lzma_header header;
int lc, pb, lp;
uint32_t pos_state_mask;
uint32_t literal_pos_mask;
uint16_t *p;
int num_probs;
struct rc rc;
int i, mi;
struct writer wr;
struct cstate cst;
unsigned char *inbuf;
int ret = -1;
rc.error = error;
if (buf)
inbuf = buf;
else
inbuf = malloc(LZMA_IOBUF_SIZE);
if (!inbuf) {
error("Could not allocate input bufer");
goto exit_0;
}
cst.state = 0;
cst.rep0 = cst.rep1 = cst.rep2 = cst.rep3 = 1;
wr.header = &header;
wr.flush = flush;
wr.global_pos = 0;
wr.previous_byte = 0;
wr.buffer_pos = 0;
rc_init(&rc, fill, inbuf, in_len);
for (i = 0; i < sizeof(header); i++) {
if (rc.ptr >= rc.buffer_end)
rc_read(&rc);
((unsigned char *)&header)[i] = *rc.ptr++;
}
if (header.pos >= (9 * 5 * 5)) {
error("bad header");
goto exit_1;
}
mi = 0;
lc = header.pos;
while (lc >= 9) {
mi++;
lc -= 9;
}
pb = 0;
lp = mi;
while (lp >= 5) {
pb++;
lp -= 5;
}
pos_state_mask = (1 << pb) - 1;
literal_pos_mask = (1 << lp) - 1;
ENDIAN_CONVERT(header.dict_size);
ENDIAN_CONVERT(header.dst_size);
if (header.dict_size == 0)
header.dict_size = 1;
if (output)
wr.buffer = output;
else {
wr.bufsize = MIN(header.dst_size, header.dict_size);
wr.buffer = large_malloc(wr.bufsize);
}
if (wr.buffer == NULL)
goto exit_1;
num_probs = LZMA_BASE_SIZE + (LZMA_LIT_SIZE << (lc + lp));
p = (uint16_t *) large_malloc(num_probs * sizeof(*p));
if (p == 0)
goto exit_2;
num_probs = LZMA_LITERAL + (LZMA_LIT_SIZE << (lc + lp));
for (i = 0; i < num_probs; i++)
p[i] = (1 << RC_MODEL_TOTAL_BITS) >> 1;
rc_init_code(&rc);
while (get_pos(&wr) < header.dst_size) {
int pos_state = get_pos(&wr) & pos_state_mask;
uint16_t *prob = p + LZMA_IS_MATCH +
(cst.state << LZMA_NUM_POS_BITS_MAX) + pos_state;
if (rc_is_bit_0(&rc, prob)) {
if (process_bit0(&wr, &rc, &cst, p, pos_state, prob,
lc, literal_pos_mask)) {
error("LZMA data is corrupt");
goto exit_3;
}
} else {
if (process_bit1(&wr, &rc, &cst, p, pos_state, prob)) {
error("LZMA data is corrupt");
goto exit_3;
}
if (cst.rep0 == 0)
break;
}
if (rc.buffer_size <= 0)
goto exit_3;
}
if (posp)
*posp = rc.ptr-rc.buffer;
if (!wr.flush || wr.flush(wr.buffer, wr.buffer_pos) == wr.buffer_pos)
ret = 0;
exit_3:
large_free(p);
exit_2:
if (!output)
large_free(wr.buffer);
exit_1:
if (!buf)
free(inbuf);
exit_0:
return ret;
}
#ifdef PREBOOT
STATIC int INIT decompress(unsigned char *buf, int in_len,
int(*fill)(void*, unsigned int),
int(*flush)(void*, unsigned int),
unsigned char *output,
int *posp,
void(*error)(char *x)
)
{
return unlzma(buf, in_len - 4, fill, flush, output, posp, error);
}
#endif
| gpl-2.0 |
BOOTMGR/GT-I9070_kernel | arch/arm/mach-s3c2410/gpio.c | 4441 | 1871 | /* linux/arch/arm/mach-s3c2410/gpio.c
*
* Copyright (c) 2004-2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 GPIO support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <mach/gpio-fns.h>
#include <asm/irq.h>
#include <mach/regs-gpio.h>
int s3c2410_gpio_irqfilter(unsigned int pin, unsigned int on,
unsigned int config)
{
void __iomem *reg = S3C24XX_EINFLT0;
unsigned long flags;
unsigned long val;
if (pin < S3C2410_GPG(8) || pin > S3C2410_GPG(15))
return -EINVAL;
config &= 0xff;
pin -= S3C2410_GPG(8);
reg += pin & ~3;
local_irq_save(flags);
/* update filter width and clock source */
val = __raw_readl(reg);
val &= ~(0xff << ((pin & 3) * 8));
val |= config << ((pin & 3) * 8);
__raw_writel(val, reg);
/* update filter enable */
val = __raw_readl(S3C24XX_EXTINT2);
val &= ~(1 << ((pin * 4) + 3));
val |= on << ((pin * 4) + 3);
__raw_writel(val, S3C24XX_EXTINT2);
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(s3c2410_gpio_irqfilter);
| gpl-2.0 |
savoca/h811 | drivers/mtd/maps/pismo.c | 4697 | 6233 | /*
* PISMO memory driver - http://www.pismoworld.org/
*
* For ARM Realview and Versatile platforms
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/mtd/physmap.h>
#include <linux/mtd/plat-ram.h>
#include <linux/mtd/pismo.h>
#define PISMO_NUM_CS 5
struct pismo_cs_block {
u8 type;
u8 width;
__le16 access;
__le32 size;
u32 reserved[2];
char device[32];
} __packed;
struct pismo_eeprom {
struct pismo_cs_block cs[PISMO_NUM_CS];
char board[15];
u8 sum;
} __packed;
struct pismo_mem {
phys_addr_t base;
u32 size;
u16 access;
u8 width;
u8 type;
};
struct pismo_data {
struct i2c_client *client;
void (*vpp)(void *, int);
void *vpp_data;
struct platform_device *dev[PISMO_NUM_CS];
};
static void pismo_set_vpp(struct platform_device *pdev, int on)
{
struct i2c_client *client = to_i2c_client(pdev->dev.parent);
struct pismo_data *pismo = i2c_get_clientdata(client);
pismo->vpp(pismo->vpp_data, on);
}
static unsigned int pismo_width_to_bytes(unsigned int width)
{
width &= 15;
if (width > 2)
return 0;
return 1 << width;
}
static int pismo_eeprom_read(struct i2c_client *client, void *buf, u8 addr,
size_t size)
{
int ret;
struct i2c_msg msg[] = {
{
.addr = client->addr,
.len = sizeof(addr),
.buf = &addr,
}, {
.addr = client->addr,
.flags = I2C_M_RD,
.len = size,
.buf = buf,
},
};
ret = i2c_transfer(client->adapter, msg, ARRAY_SIZE(msg));
return ret == ARRAY_SIZE(msg) ? size : -EIO;
}
static int pismo_add_device(struct pismo_data *pismo, int i,
struct pismo_mem *region, const char *name,
void *pdata, size_t psize)
{
struct platform_device *dev;
struct resource res = { };
phys_addr_t base = region->base;
int ret;
if (base == ~0)
return -ENXIO;
res.start = base;
res.end = base + region->size - 1;
res.flags = IORESOURCE_MEM;
dev = platform_device_alloc(name, i);
if (!dev)
return -ENOMEM;
dev->dev.parent = &pismo->client->dev;
do {
ret = platform_device_add_resources(dev, &res, 1);
if (ret)
break;
ret = platform_device_add_data(dev, pdata, psize);
if (ret)
break;
ret = platform_device_add(dev);
if (ret)
break;
pismo->dev[i] = dev;
return 0;
} while (0);
platform_device_put(dev);
return ret;
}
static int pismo_add_nor(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct physmap_flash_data data = {
.width = region->width,
};
if (pismo->vpp)
data.set_vpp = pismo_set_vpp;
return pismo_add_device(pismo, i, region, "physmap-flash",
&data, sizeof(data));
}
static int pismo_add_sram(struct pismo_data *pismo, int i,
struct pismo_mem *region)
{
struct platdata_mtd_ram data = {
.bankwidth = region->width,
};
return pismo_add_device(pismo, i, region, "mtd-ram",
&data, sizeof(data));
}
static void pismo_add_one(struct pismo_data *pismo, int i,
const struct pismo_cs_block *cs, phys_addr_t base)
{
struct device *dev = &pismo->client->dev;
struct pismo_mem region;
region.base = base;
region.type = cs->type;
region.width = pismo_width_to_bytes(cs->width);
region.access = le16_to_cpu(cs->access);
region.size = le32_to_cpu(cs->size);
if (region.width == 0) {
dev_err(dev, "cs%u: bad width: %02x, ignoring\n", i, cs->width);
return;
}
/*
* FIXME: may need to the platforms memory controller here, but at
* the moment we assume that it has already been correctly setup.
* The memory controller can also tell us the base address as well.
*/
dev_info(dev, "cs%u: %.32s: type %02x access %u00ps size %uK\n",
i, cs->device, region.type, region.access, region.size / 1024);
switch (region.type) {
case 0:
break;
case 1:
/* static DOC */
break;
case 2:
/* static NOR */
pismo_add_nor(pismo, i, ®ion);
break;
case 3:
/* static RAM */
pismo_add_sram(pismo, i, ®ion);
break;
}
}
static int pismo_remove(struct i2c_client *client)
{
struct pismo_data *pismo = i2c_get_clientdata(client);
int i;
for (i = 0; i < ARRAY_SIZE(pismo->dev); i++)
platform_device_unregister(pismo->dev[i]);
kfree(pismo);
return 0;
}
static int pismo_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct pismo_pdata *pdata = client->dev.platform_data;
struct pismo_eeprom eeprom;
struct pismo_data *pismo;
int ret, i;
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "functionality mismatch\n");
return -EIO;
}
pismo = kzalloc(sizeof(*pismo), GFP_KERNEL);
if (!pismo)
return -ENOMEM;
pismo->client = client;
if (pdata) {
pismo->vpp = pdata->set_vpp;
pismo->vpp_data = pdata->vpp_data;
}
i2c_set_clientdata(client, pismo);
ret = pismo_eeprom_read(client, &eeprom, 0, sizeof(eeprom));
if (ret < 0) {
dev_err(&client->dev, "error reading EEPROM: %d\n", ret);
goto exit_free;
}
dev_info(&client->dev, "%.15s board found\n", eeprom.board);
for (i = 0; i < ARRAY_SIZE(eeprom.cs); i++)
if (eeprom.cs[i].type != 0xff)
pismo_add_one(pismo, i, &eeprom.cs[i],
pdata->cs_addrs[i]);
return 0;
exit_free:
kfree(pismo);
return ret;
}
static const struct i2c_device_id pismo_id[] = {
{ "pismo" },
{ },
};
MODULE_DEVICE_TABLE(i2c, pismo_id);
static struct i2c_driver pismo_driver = {
.driver = {
.name = "pismo",
.owner = THIS_MODULE,
},
.probe = pismo_probe,
.remove = pismo_remove,
.id_table = pismo_id,
};
static int __init pismo_init(void)
{
BUILD_BUG_ON(sizeof(struct pismo_cs_block) != 48);
BUILD_BUG_ON(sizeof(struct pismo_eeprom) != 256);
return i2c_add_driver(&pismo_driver);
}
module_init(pismo_init);
static void __exit pismo_exit(void)
{
i2c_del_driver(&pismo_driver);
}
module_exit(pismo_exit);
MODULE_AUTHOR("Russell King <linux@arm.linux.org.uk>");
MODULE_DESCRIPTION("PISMO memory driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
itsmerajit/Samsung_j2 | drivers/video/mmp/hw/mmp_spi.c | 4697 | 4684 | /*
* linux/drivers/video/mmp/hw/mmp_spi.c
* using the spi in LCD controler for commands send
*
* Copyright (C) 2012 Marvell Technology Group Ltd.
* Authors: Guoqing Li <ligq@marvell.com>
* Lisa Du <cldu@marvell.com>
* Zhou Zhu <zzhu3@marvell.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/spi/spi.h>
#include "mmp_ctrl.h"
/**
* spi_write - write command to the SPI port
* @data: can be 8/16/32-bit, MSB justified data to write.
* @len: data length.
*
* Wait bus transfer complete IRQ.
* The caller is expected to perform the necessary locking.
*
* Returns:
* %-ETIMEDOUT timeout occurred
* 0 success
*/
static inline int lcd_spi_write(struct spi_device *spi, u32 data)
{
int timeout = 100000, isr, ret = 0;
u32 tmp;
void *reg_base =
*(void **)spi_master_get_devdata(spi->master);
/* clear ISR */
writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
switch (spi->bits_per_word) {
case 8:
writel_relaxed((u8)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
case 16:
writel_relaxed((u16)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
case 32:
writel_relaxed((u32)data, reg_base + LCD_SPU_SPI_TXDATA);
break;
default:
dev_err(&spi->dev, "Wrong spi bit length\n");
}
/* SPI start to send command */
tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
tmp &= ~CFG_SPI_START_MASK;
tmp |= CFG_SPI_START(1);
writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
while (!(isr & SPI_IRQ_ENA_MASK)) {
udelay(100);
isr = readl_relaxed(reg_base + SPU_IRQ_ISR);
if (!--timeout) {
ret = -ETIMEDOUT;
dev_err(&spi->dev, "spi cmd send time out\n");
break;
}
}
tmp = readl_relaxed(reg_base + LCD_SPU_SPI_CTRL);
tmp &= ~CFG_SPI_START_MASK;
tmp |= CFG_SPI_START(0);
writel_relaxed(tmp, reg_base + LCD_SPU_SPI_CTRL);
writel_relaxed(~SPI_IRQ_MASK, reg_base + SPU_IRQ_ISR);
return ret;
}
static int lcd_spi_setup(struct spi_device *spi)
{
void *reg_base =
*(void **)spi_master_get_devdata(spi->master);
u32 tmp;
tmp = CFG_SCLKCNT(16) |
CFG_TXBITS(spi->bits_per_word) |
CFG_SPI_SEL(1) | CFG_SPI_ENA(1) |
CFG_SPI_3W4WB(1);
writel(tmp, reg_base + LCD_SPU_SPI_CTRL);
/*
* After set mode it need a time to pull up the spi singals,
* or it would cause the wrong waveform when send spi command,
* especially on pxa910h
*/
tmp = readl_relaxed(reg_base + SPU_IOPAD_CONTROL);
if ((tmp & CFG_IOPADMODE_MASK) != IOPAD_DUMB18SPI)
writel_relaxed(IOPAD_DUMB18SPI |
(tmp & ~CFG_IOPADMODE_MASK),
reg_base + SPU_IOPAD_CONTROL);
udelay(20);
return 0;
}
static int lcd_spi_one_transfer(struct spi_device *spi, struct spi_message *m)
{
struct spi_transfer *t;
int i;
list_for_each_entry(t, &m->transfers, transfer_list) {
switch (spi->bits_per_word) {
case 8:
for (i = 0; i < t->len; i++)
lcd_spi_write(spi, ((u8 *)t->tx_buf)[i]);
break;
case 16:
for (i = 0; i < t->len/2; i++)
lcd_spi_write(spi, ((u16 *)t->tx_buf)[i]);
break;
case 32:
for (i = 0; i < t->len/4; i++)
lcd_spi_write(spi, ((u32 *)t->tx_buf)[i]);
break;
default:
dev_err(&spi->dev, "Wrong spi bit length\n");
}
}
m->status = 0;
if (m->complete)
m->complete(m->context);
return 0;
}
int lcd_spi_register(struct mmphw_ctrl *ctrl)
{
struct spi_master *master;
void **p_regbase;
int err;
master = spi_alloc_master(ctrl->dev, sizeof(void *));
if (!master) {
dev_err(ctrl->dev, "unable to allocate SPI master\n");
return -ENOMEM;
}
p_regbase = spi_master_get_devdata(master);
*p_regbase = ctrl->reg_base;
/* set bus num to 5 to avoid conflict with other spi hosts */
master->bus_num = 5;
master->num_chipselect = 1;
master->setup = lcd_spi_setup;
master->transfer = lcd_spi_one_transfer;
err = spi_register_master(master);
if (err < 0) {
dev_err(ctrl->dev, "unable to register SPI master\n");
spi_master_put(master);
return err;
}
dev_info(&master->dev, "registered\n");
return 0;
}
| gpl-2.0 |
syhost/android_kernel_oppo_find7a | arch/arm/mach-clps711x/autcpu12.c | 4953 | 2144 | /*
* linux/arch/arm/mach-clps711x/autcpu12.c
*
* (c) 2001 Thomas Gleixner, autronix automation <gleixner@autronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/sizes.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/pgtable.h>
#include <asm/page.h>
#include <asm/mach/map.h>
#include <mach/autcpu12.h>
#include "common.h"
/*
* The on-chip registers are given a size of 1MB so that a section can
* be used to map them; this saves a page table. This is the place to
* add mappings for ROM, expansion memory, PCMCIA, etc. (if static
* mappings are chosen for those areas).
*
*/
static struct map_desc autcpu12_io_desc[] __initdata = {
/* memory-mapped extra io and CS8900A Ethernet chip */
/* ethernet chip */
{
.virtual = AUTCPU12_VIRT_CS8900A,
.pfn = __phys_to_pfn(AUTCPU12_PHYS_CS8900A),
.length = SZ_1M,
.type = MT_DEVICE
}
};
void __init autcpu12_map_io(void)
{
clps711x_map_io();
iotable_init(autcpu12_io_desc, ARRAY_SIZE(autcpu12_io_desc));
}
MACHINE_START(AUTCPU12, "autronix autcpu12")
/* Maintainer: Thomas Gleixner */
.atag_offset = 0x20000,
.map_io = autcpu12_map_io,
.init_irq = clps711x_init_irq,
.timer = &clps711x_timer,
.restart = clps711x_restart,
MACHINE_END
| gpl-2.0 |
EnJens/android_kernel_sony_pollux_windy_stock | arch/sh/mm/flush-sh4.c | 9049 | 2653 | #include <linux/mm.h>
#include <asm/mmu_context.h>
#include <asm/cache_insns.h>
#include <asm/cacheflush.h>
#include <asm/traps.h>
/*
* Write back the dirty D-caches, but not invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_wback_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
__ocbwb(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbwb(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* Write back the dirty D-caches and invalidate them.
*
* START: Virtual Address (U0, P1, or P3)
* SIZE: Size of the region.
*/
static void sh4__flush_purge_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
__ocbp(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbp(v); v += L1_CACHE_BYTES;
cnt--;
}
}
/*
* No write back please
*/
static void sh4__flush_invalidate_region(void *start, int size)
{
reg_size_t aligned_start, v, cnt, end;
aligned_start = register_align(start);
v = aligned_start & ~(L1_CACHE_BYTES-1);
end = (aligned_start + size + L1_CACHE_BYTES-1)
& ~(L1_CACHE_BYTES-1);
cnt = (end - v) / L1_CACHE_BYTES;
while (cnt >= 8) {
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
__ocbi(v); v += L1_CACHE_BYTES;
cnt -= 8;
}
while (cnt) {
__ocbi(v); v += L1_CACHE_BYTES;
cnt--;
}
}
void __init sh4__flush_region_init(void)
{
__flush_wback_region = sh4__flush_wback_region;
__flush_invalidate_region = sh4__flush_invalidate_region;
__flush_purge_region = sh4__flush_purge_region;
}
| gpl-2.0 |
Eistuete/android_kernel_huawei_msm8916 | arch/mips/alchemy/common/vss.c | 9049 | 2204 | /*
* Au1300 media block power gating (VSS)
*
* This is a stop-gap solution until I have the clock framework integration
* ready. This stuff here really must be handled transparently when clocks
* for various media blocks are enabled/disabled.
*/
#include <linux/module.h>
#include <linux/spinlock.h>
#include <asm/mach-au1x00/au1000.h>
#define VSS_GATE 0x00 /* gate wait timers */
#define VSS_CLKRST 0x04 /* clock/block control */
#define VSS_FTR 0x08 /* footers */
#define VSS_ADDR(blk) (KSEG1ADDR(AU1300_VSS_PHYS_ADDR) + (blk * 0x0c))
static DEFINE_SPINLOCK(au1300_vss_lock);
/* enable a block as outlined in the databook */
static inline void __enable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(3, base + VSS_CLKRST); /* enable clock, assert reset */
wmb();
__raw_writel(0x01fffffe, base + VSS_GATE); /* maximum setup time */
wmb();
/* enable footers in sequence */
__raw_writel(0x01, base + VSS_FTR);
wmb();
__raw_writel(0x03, base + VSS_FTR);
wmb();
__raw_writel(0x07, base + VSS_FTR);
wmb();
__raw_writel(0x0f, base + VSS_FTR);
wmb();
__raw_writel(0x01ffffff, base + VSS_GATE); /* start FSM too */
wmb();
__raw_writel(2, base + VSS_CLKRST); /* deassert reset */
wmb();
__raw_writel(0x1f, base + VSS_FTR); /* enable isolation cells */
wmb();
}
/* disable a block as outlined in the databook */
static inline void __disable_block(int block)
{
void __iomem *base = (void __iomem *)VSS_ADDR(block);
__raw_writel(0x0f, base + VSS_FTR); /* disable isolation cells */
wmb();
__raw_writel(0, base + VSS_GATE); /* disable FSM */
wmb();
__raw_writel(3, base + VSS_CLKRST); /* assert reset */
wmb();
__raw_writel(1, base + VSS_CLKRST); /* disable clock */
wmb();
__raw_writel(0, base + VSS_FTR); /* disable all footers */
wmb();
}
void au1300_vss_block_control(int block, int enable)
{
unsigned long flags;
if (alchemy_get_cputype() != ALCHEMY_CPU_AU1300)
return;
/* only one block at a time */
spin_lock_irqsave(&au1300_vss_lock, flags);
if (enable)
__enable_block(block);
else
__disable_block(block);
spin_unlock_irqrestore(&au1300_vss_lock, flags);
}
EXPORT_SYMBOL_GPL(au1300_vss_block_control);
| gpl-2.0 |
Altaf-Mahdi/android_kernel_oneplus_msm8996 | drivers/tty/serial/serial_core.c | 90 | 73037 | /*
* Driver core for serial ports
*
* Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o.
*
* Copyright 1999 ARM Limited
* Copyright (C) 2000-2001 Deep Blue Solutions Ltd.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/tty.h>
#include <linux/tty_flip.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/console.h>
#include <linux/of.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/serial.h> /* for serial_state and serial_icounter_struct */
#include <linux/serial_core.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
/*
* This is used to lock changes in serial line configuration.
*/
static DEFINE_MUTEX(port_mutex);
/*
* lockdep: port->lock is initialized in two places, but we
* want only one lock-class:
*/
static struct lock_class_key port_lock_key;
#define HIGH_BITS_OFFSET ((sizeof(long)-sizeof(int))*8)
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios);
static void uart_wait_until_sent(struct tty_struct *tty, int timeout);
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state);
static void uart_port_shutdown(struct tty_port *port);
static int uart_dcd_enabled(struct uart_port *uport)
{
return uport->status & UPSTAT_DCD_ENABLE;
}
/*
* This routine is used by the interrupt handler to schedule processing in
* the software interrupt portion of the driver.
*/
void uart_write_wakeup(struct uart_port *port)
{
struct uart_state *state = port->state;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
BUG_ON(!state);
tty_wakeup(state->port.tty);
}
static void uart_stop(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
port->ops->stop_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
static void __uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
if (port->ops->wake_peer)
port->ops->wake_peer(port);
if (!uart_tx_stopped(port))
port->ops->start_tx(port);
}
static void uart_start(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
spin_lock_irqsave(&port->lock, flags);
__uart_start(tty);
spin_unlock_irqrestore(&port->lock, flags);
}
static inline void
uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear)
{
unsigned long flags;
unsigned int old;
spin_lock_irqsave(&port->lock, flags);
old = port->mctrl;
port->mctrl = (old & ~clear) | set;
if (old != port->mctrl)
port->ops->set_mctrl(port, port->mctrl);
spin_unlock_irqrestore(&port->lock, flags);
}
#define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0)
#define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear)
/*
* Startup the port. This will be called once per open. All calls
* will be serialised by the per-port mutex.
*/
static int uart_port_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
struct uart_port *uport = state->uart_port;
unsigned long page;
int retval = 0;
if (uport->type == PORT_UNKNOWN)
return 1;
/*
* Make sure the device is in D0 state.
*/
uart_change_pm(state, UART_PM_STATE_ON);
/*
* Initialise and allocate the transmit and temporary
* buffer.
*/
if (!state->xmit.buf) {
/* This is protected by the per port mutex */
page = get_zeroed_page(GFP_KERNEL);
if (!page)
return -ENOMEM;
state->xmit.buf = (unsigned char *) page;
uart_circ_clear(&state->xmit);
}
retval = uport->ops->startup(uport);
if (retval == 0) {
if (uart_console(uport) && uport->cons->cflag) {
tty->termios.c_cflag = uport->cons->cflag;
uport->cons->cflag = 0;
}
/*
* Initialise the hardware port settings.
*/
uart_change_speed(tty, state, NULL);
if (init_hw) {
/*
* Setup the RTS and DTR signals once the
* port is open and ready to respond.
*/
if (tty->termios.c_cflag & CBAUD)
uart_set_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
}
spin_lock_irq(&uport->lock);
if (uart_cts_enabled(uport) &&
!(uport->ops->get_mctrl(uport) & TIOCM_CTS))
uport->hw_stopped = 1;
else
uport->hw_stopped = 0;
spin_unlock_irq(&uport->lock);
}
/*
* This is to allow setserial on this port. People may want to set
* port/irq/type and then reconfigure the port properly if it failed
* now.
*/
if (retval && capable(CAP_SYS_ADMIN))
return 1;
return retval;
}
static int uart_startup(struct tty_struct *tty, struct uart_state *state,
int init_hw)
{
struct tty_port *port = &state->port;
int retval;
if (port->flags & ASYNC_INITIALIZED)
return 0;
/*
* Set the TTY IO error marker - we will only clear this
* once we have successfully opened the port.
*/
set_bit(TTY_IO_ERROR, &tty->flags);
retval = uart_port_startup(tty, state, init_hw);
if (!retval) {
set_bit(ASYNCB_INITIALIZED, &port->flags);
clear_bit(TTY_IO_ERROR, &tty->flags);
} else if (retval > 0)
retval = 0;
return retval;
}
/*
* This routine will shutdown a serial port; interrupts are disabled, and
* DTR is dropped if the hangup on close termio flag is on. Calls to
* uart_shutdown are serialised by the per-port semaphore.
*/
static void uart_shutdown(struct tty_struct *tty, struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
/*
* Set the TTY IO error marker
*/
if (tty)
set_bit(TTY_IO_ERROR, &tty->flags);
if (test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags)) {
/*
* Turn off DTR and RTS early.
*/
if (uart_console(uport) && tty)
uport->cons->cflag = tty->termios.c_cflag;
if (!tty || (tty->termios.c_cflag & HUPCL))
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
uart_port_shutdown(port);
}
/*
* It's possible for shutdown to be called after suspend if we get
* a DCD drop (hangup) at just the right time. Clear suspended bit so
* we don't try to resume a port that has been shutdown.
*/
clear_bit(ASYNCB_SUSPENDED, &port->flags);
/*
* Free the transmit buffer page.
*/
if (state->xmit.buf) {
free_page((unsigned long)state->xmit.buf);
state->xmit.buf = NULL;
}
}
/**
* uart_update_timeout - update per-port FIFO timeout.
* @port: uart_port structure describing the port
* @cflag: termios cflag value
* @baud: speed of the port
*
* Set the port FIFO timeout value. The @cflag value should
* reflect the actual hardware settings.
*/
void
uart_update_timeout(struct uart_port *port, unsigned int cflag,
unsigned int baud)
{
unsigned int bits;
/* byte size and parity */
switch (cflag & CSIZE) {
case CS5:
bits = 7;
break;
case CS6:
bits = 8;
break;
case CS7:
bits = 9;
break;
default:
bits = 10;
break; /* CS8 */
}
if (cflag & CSTOPB)
bits++;
if (cflag & PARENB)
bits++;
/*
* The total number of bits to be transmitted in the fifo.
*/
bits = bits * port->fifosize;
/*
* Figure the timeout to send the above number of bits.
* Add .02 seconds of slop
*/
port->timeout = (HZ * bits) / baud + HZ/50;
}
EXPORT_SYMBOL(uart_update_timeout);
/**
* uart_get_baud_rate - return baud rate for a particular port
* @port: uart_port structure describing the port in question.
* @termios: desired termios settings.
* @old: old termios (or NULL)
* @min: minimum acceptable baud rate
* @max: maximum acceptable baud rate
*
* Decode the termios structure into a numeric baud rate,
* taking account of the magic 38400 baud rate (with spd_*
* flags), and mapping the %B0 rate to 9600 baud.
*
* If the new baud rate is invalid, try the old termios setting.
* If it's still invalid, we try 9600 baud.
*
* Update the @termios structure to reflect the baud rate
* we're actually going to be using. Don't do this for the case
* where B0 is requested ("hang up").
*/
unsigned int
uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
struct ktermios *old, unsigned int min, unsigned int max)
{
unsigned int try, baud, altbaud = 38400;
int hung_up = 0;
upf_t flags = port->flags & UPF_SPD_MASK;
if (flags == UPF_SPD_HI)
altbaud = 57600;
else if (flags == UPF_SPD_VHI)
altbaud = 115200;
else if (flags == UPF_SPD_SHI)
altbaud = 230400;
else if (flags == UPF_SPD_WARP)
altbaud = 460800;
for (try = 0; try < 2; try++) {
baud = tty_termios_baud_rate(termios);
/*
* The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
* Die! Die! Die!
*/
if (try == 0 && baud == 38400)
baud = altbaud;
/*
* Special case: B0 rate.
*/
if (baud == 0) {
hung_up = 1;
baud = 9600;
}
if (baud >= min && baud <= max)
return baud;
/*
* Oops, the quotient was zero. Try again with
* the old baud rate if possible.
*/
termios->c_cflag &= ~CBAUD;
if (old) {
baud = tty_termios_baud_rate(old);
if (!hung_up)
tty_termios_encode_baud_rate(termios,
baud, baud);
old = NULL;
continue;
}
/*
* As a last resort, if the range cannot be met then clip to
* the nearest chip supported rate.
*/
if (!hung_up) {
if (baud <= min)
tty_termios_encode_baud_rate(termios,
min + 1, min + 1);
else
tty_termios_encode_baud_rate(termios,
max - 1, max - 1);
}
}
/* Should never happen */
WARN_ON(1);
return 0;
}
EXPORT_SYMBOL(uart_get_baud_rate);
/**
* uart_get_divisor - return uart clock divisor
* @port: uart_port structure describing the port.
* @baud: desired baud rate
*
* Calculate the uart clock divisor for the port.
*/
unsigned int
uart_get_divisor(struct uart_port *port, unsigned int baud)
{
unsigned int quot;
/*
* Old custom speed handling.
*/
if (baud == 38400 && (port->flags & UPF_SPD_MASK) == UPF_SPD_CUST)
quot = port->custom_divisor;
else
quot = DIV_ROUND_CLOSEST(port->uartclk, 16 * baud);
return quot;
}
EXPORT_SYMBOL(uart_get_divisor);
/* FIXME: Consistent locking policy */
static void uart_change_speed(struct tty_struct *tty, struct uart_state *state,
struct ktermios *old_termios)
{
struct uart_port *uport = state->uart_port;
struct ktermios *termios;
/*
* If we have no tty, termios, or the port does not exist,
* then we can't set the parameters for this port.
*/
if (!tty || uport->type == PORT_UNKNOWN)
return;
termios = &tty->termios;
uport->ops->set_termios(uport, termios, old_termios);
/*
* Set modem status enables based on termios cflag
*/
spin_lock_irq(&uport->lock);
if (termios->c_cflag & CRTSCTS)
uport->status |= UPSTAT_CTS_ENABLE;
else
uport->status &= ~UPSTAT_CTS_ENABLE;
if (termios->c_cflag & CLOCAL)
uport->status &= ~UPSTAT_DCD_ENABLE;
else
uport->status |= UPSTAT_DCD_ENABLE;
spin_unlock_irq(&uport->lock);
}
static inline int __uart_put_char(struct uart_port *port,
struct circ_buf *circ, unsigned char c)
{
unsigned long flags;
int ret = 0;
if (!circ->buf)
return 0;
spin_lock_irqsave(&port->lock, flags);
if (uart_circ_chars_free(circ) != 0) {
circ->buf[circ->head] = c;
circ->head = (circ->head + 1) & (UART_XMIT_SIZE - 1);
ret = 1;
}
spin_unlock_irqrestore(&port->lock, flags);
return ret;
}
static int uart_put_char(struct tty_struct *tty, unsigned char ch)
{
struct uart_state *state = tty->driver_data;
return __uart_put_char(state->uart_port, &state->xmit, ch);
}
static void uart_flush_chars(struct tty_struct *tty)
{
uart_start(tty);
}
static int uart_write(struct tty_struct *tty,
const unsigned char *buf, int count)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
struct circ_buf *circ;
unsigned long flags;
int c, ret = 0;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (!state) {
WARN_ON(1);
return -EL3HLT;
}
port = state->uart_port;
circ = &state->xmit;
if (!circ->buf)
return 0;
spin_lock_irqsave(&port->lock, flags);
while (1) {
c = CIRC_SPACE_TO_END(circ->head, circ->tail, UART_XMIT_SIZE);
if (count < c)
c = count;
if (c <= 0)
break;
memcpy(circ->buf + circ->head, buf, c);
circ->head = (circ->head + c) & (UART_XMIT_SIZE - 1);
buf += c;
count -= c;
ret += c;
}
spin_unlock_irqrestore(&port->lock, flags);
uart_start(tty);
return ret;
}
static int uart_write_room(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
unsigned long flags;
int ret;
spin_lock_irqsave(&state->uart_port->lock, flags);
ret = uart_circ_chars_free(&state->xmit);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
return ret;
}
static int uart_chars_in_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
unsigned long flags;
int ret;
spin_lock_irqsave(&state->uart_port->lock, flags);
ret = uart_circ_chars_pending(&state->xmit);
spin_unlock_irqrestore(&state->uart_port->lock, flags);
return ret;
}
static void uart_flush_buffer(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port;
unsigned long flags;
/*
* This means you called this function _after_ the port was
* closed. No cookie for you.
*/
if (!state) {
WARN_ON(1);
return;
}
port = state->uart_port;
pr_debug("uart_flush_buffer(%d) called\n", tty->index);
spin_lock_irqsave(&port->lock, flags);
uart_circ_clear(&state->xmit);
if (port->ops->flush_buffer)
port->ops->flush_buffer(port);
spin_unlock_irqrestore(&port->lock, flags);
tty_wakeup(tty);
}
/*
* This function is used to send a high-priority XON/XOFF character to
* the device
*/
static void uart_send_xchar(struct tty_struct *tty, char ch)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long flags;
if (port->ops->send_xchar)
port->ops->send_xchar(port, ch);
else {
spin_lock_irqsave(&port->lock, flags);
port->x_char = ch;
if (ch)
port->ops->start_tx(port);
spin_unlock_irqrestore(&port->lock, flags);
}
}
static void uart_throttle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
uint32_t mask = 0;
if (I_IXOFF(tty))
mask |= UPF_SOFT_FLOW;
if (tty->termios.c_cflag & CRTSCTS)
mask |= UPF_HARD_FLOW;
if (port->flags & mask) {
port->ops->throttle(port);
mask &= ~port->flags;
}
if (mask & UPF_SOFT_FLOW)
uart_send_xchar(tty, STOP_CHAR(tty));
if (mask & UPF_HARD_FLOW)
uart_clear_mctrl(port, TIOCM_RTS);
}
static void uart_unthrottle(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
uint32_t mask = 0;
if (I_IXOFF(tty))
mask |= UPF_SOFT_FLOW;
if (tty->termios.c_cflag & CRTSCTS)
mask |= UPF_HARD_FLOW;
if (port->flags & mask) {
port->ops->unthrottle(port);
mask &= ~port->flags;
}
if (mask & UPF_SOFT_FLOW)
uart_send_xchar(tty, START_CHAR(tty));
if (mask & UPF_HARD_FLOW)
uart_set_mctrl(port, TIOCM_RTS);
}
static void do_uart_get_info(struct tty_port *port,
struct serial_struct *retinfo)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
memset(retinfo, 0, sizeof(*retinfo));
retinfo->type = uport->type;
retinfo->line = uport->line;
retinfo->port = uport->iobase;
if (HIGH_BITS_OFFSET)
retinfo->port_high = (long) uport->iobase >> HIGH_BITS_OFFSET;
retinfo->irq = uport->irq;
retinfo->flags = uport->flags;
retinfo->xmit_fifo_size = uport->fifosize;
retinfo->baud_base = uport->uartclk / 16;
retinfo->close_delay = jiffies_to_msecs(port->close_delay) / 10;
retinfo->closing_wait = port->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
jiffies_to_msecs(port->closing_wait) / 10;
retinfo->custom_divisor = uport->custom_divisor;
retinfo->hub6 = uport->hub6;
retinfo->io_type = uport->iotype;
retinfo->iomem_reg_shift = uport->regshift;
retinfo->iomem_base = (void *)(unsigned long)uport->mapbase;
}
static void uart_get_info(struct tty_port *port,
struct serial_struct *retinfo)
{
/* Ensure the state we copy is consistent and no hardware changes
occur as we go */
mutex_lock(&port->mutex);
do_uart_get_info(port, retinfo);
mutex_unlock(&port->mutex);
}
static int uart_get_info_user(struct tty_port *port,
struct serial_struct __user *retinfo)
{
struct serial_struct tmp;
uart_get_info(port, &tmp);
if (copy_to_user(retinfo, &tmp, sizeof(*retinfo)))
return -EFAULT;
return 0;
}
static int uart_set_info(struct tty_struct *tty, struct tty_port *port,
struct uart_state *state,
struct serial_struct *new_info)
{
struct uart_port *uport = state->uart_port;
unsigned long new_port;
unsigned int change_irq, change_port, closing_wait;
unsigned int old_custom_divisor, close_delay;
upf_t old_flags, new_flags;
int retval = 0;
new_port = new_info->port;
if (HIGH_BITS_OFFSET)
new_port += (unsigned long) new_info->port_high << HIGH_BITS_OFFSET;
new_info->irq = irq_canonicalize(new_info->irq);
close_delay = msecs_to_jiffies(new_info->close_delay * 10);
closing_wait = new_info->closing_wait == ASYNC_CLOSING_WAIT_NONE ?
ASYNC_CLOSING_WAIT_NONE :
msecs_to_jiffies(new_info->closing_wait * 10);
change_irq = !(uport->flags & UPF_FIXED_PORT)
&& new_info->irq != uport->irq;
/*
* Since changing the 'type' of the port changes its resource
* allocations, we should treat type changes the same as
* IO port changes.
*/
change_port = !(uport->flags & UPF_FIXED_PORT)
&& (new_port != uport->iobase ||
(unsigned long)new_info->iomem_base != uport->mapbase ||
new_info->hub6 != uport->hub6 ||
new_info->io_type != uport->iotype ||
new_info->iomem_reg_shift != uport->regshift ||
new_info->type != uport->type);
old_flags = uport->flags;
new_flags = new_info->flags;
old_custom_divisor = uport->custom_divisor;
if (!capable(CAP_SYS_ADMIN)) {
retval = -EPERM;
if (change_irq || change_port ||
(new_info->baud_base != uport->uartclk / 16) ||
(close_delay != port->close_delay) ||
(closing_wait != port->closing_wait) ||
(new_info->xmit_fifo_size &&
new_info->xmit_fifo_size != uport->fifosize) ||
(((new_flags ^ old_flags) & ~UPF_USR_MASK) != 0))
goto exit;
uport->flags = ((uport->flags & ~UPF_USR_MASK) |
(new_flags & UPF_USR_MASK));
uport->custom_divisor = new_info->custom_divisor;
goto check_and_exit;
}
/*
* Ask the low level driver to verify the settings.
*/
if (uport->ops->verify_port)
retval = uport->ops->verify_port(uport, new_info);
if ((new_info->irq >= nr_irqs) || (new_info->irq < 0) ||
(new_info->baud_base < 9600))
retval = -EINVAL;
if (retval)
goto exit;
if (change_port || change_irq) {
retval = -EBUSY;
/*
* Make sure that we are the sole user of this port.
*/
if (tty_port_users(port) > 1)
goto exit;
/*
* We need to shutdown the serial port at the old
* port/type/irq combination.
*/
uart_shutdown(tty, state);
}
if (change_port) {
unsigned long old_iobase, old_mapbase;
unsigned int old_type, old_iotype, old_hub6, old_shift;
old_iobase = uport->iobase;
old_mapbase = uport->mapbase;
old_type = uport->type;
old_hub6 = uport->hub6;
old_iotype = uport->iotype;
old_shift = uport->regshift;
/*
* Free and release old regions
*/
if (old_type != PORT_UNKNOWN)
uport->ops->release_port(uport);
uport->iobase = new_port;
uport->type = new_info->type;
uport->hub6 = new_info->hub6;
uport->iotype = new_info->io_type;
uport->regshift = new_info->iomem_reg_shift;
uport->mapbase = (unsigned long)new_info->iomem_base;
/*
* Claim and map the new regions
*/
if (uport->type != PORT_UNKNOWN) {
retval = uport->ops->request_port(uport);
} else {
/* Always success - Jean II */
retval = 0;
}
/*
* If we fail to request resources for the
* new port, try to restore the old settings.
*/
if (retval) {
uport->iobase = old_iobase;
uport->type = old_type;
uport->hub6 = old_hub6;
uport->iotype = old_iotype;
uport->regshift = old_shift;
uport->mapbase = old_mapbase;
if (old_type != PORT_UNKNOWN) {
retval = uport->ops->request_port(uport);
/*
* If we failed to restore the old settings,
* we fail like this.
*/
if (retval)
uport->type = PORT_UNKNOWN;
/*
* We failed anyway.
*/
retval = -EBUSY;
}
/* Added to return the correct error -Ram Gupta */
goto exit;
}
}
if (change_irq)
uport->irq = new_info->irq;
if (!(uport->flags & UPF_FIXED_PORT))
uport->uartclk = new_info->baud_base * 16;
uport->flags = (uport->flags & ~UPF_CHANGE_MASK) |
(new_flags & UPF_CHANGE_MASK);
uport->custom_divisor = new_info->custom_divisor;
port->close_delay = close_delay;
port->closing_wait = closing_wait;
if (new_info->xmit_fifo_size)
uport->fifosize = new_info->xmit_fifo_size;
port->low_latency = (uport->flags & UPF_LOW_LATENCY) ? 1 : 0;
check_and_exit:
retval = 0;
if (uport->type == PORT_UNKNOWN)
goto exit;
if (port->flags & ASYNC_INITIALIZED) {
if (((old_flags ^ uport->flags) & UPF_SPD_MASK) ||
old_custom_divisor != uport->custom_divisor) {
/*
* If they're setting up a custom divisor or speed,
* instead of clearing it, then bitch about it. No
* need to rate-limit; it's CAP_SYS_ADMIN only.
*/
if (uport->flags & UPF_SPD_MASK) {
char buf[64];
dev_notice(uport->dev,
"%s sets custom speed on %s. This is deprecated.\n",
current->comm,
tty_name(port->tty, buf));
}
uart_change_speed(tty, state, NULL);
}
} else
retval = uart_startup(tty, state, 1);
exit:
return retval;
}
static int uart_set_info_user(struct tty_struct *tty, struct uart_state *state,
struct serial_struct __user *newinfo)
{
struct serial_struct new_serial;
struct tty_port *port = &state->port;
int retval;
if (copy_from_user(&new_serial, newinfo, sizeof(new_serial)))
return -EFAULT;
/*
* This semaphore protects port->count. It is also
* very useful to prevent opens. Also, take the
* port configuration semaphore to make sure that a
* module insertion/removal doesn't change anything
* under us.
*/
mutex_lock(&port->mutex);
retval = uart_set_info(tty, port, state, &new_serial);
mutex_unlock(&port->mutex);
return retval;
}
/**
* uart_get_lsr_info - get line status register info
* @tty: tty associated with the UART
* @state: UART being queried
* @value: returned modem value
*
* Note: uart_ioctl protects us against hangups.
*/
static int uart_get_lsr_info(struct tty_struct *tty,
struct uart_state *state, unsigned int __user *value)
{
struct uart_port *uport = state->uart_port;
unsigned int result;
result = uport->ops->tx_empty(uport);
/*
* If we're about to load something into the transmit
* register, we'll pretend the transmitter isn't empty to
* avoid a race condition (depending on when the transmit
* interrupt happens).
*/
if (uport->x_char ||
((uart_circ_chars_pending(&state->xmit) > 0) &&
!uart_tx_stopped(uport)))
result &= ~TIOCSER_TEMT;
return put_user(result, value);
}
static int uart_tiocmget(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport = state->uart_port;
int result = -EIO;
mutex_lock(&port->mutex);
if (!(tty->flags & (1 << TTY_IO_ERROR))) {
result = uport->mctrl;
spin_lock_irq(&uport->lock);
result |= uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
}
mutex_unlock(&port->mutex);
return result;
}
static int
uart_tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
int ret = -EIO;
mutex_lock(&port->mutex);
if (!(tty->flags & (1 << TTY_IO_ERROR))) {
uart_update_mctrl(uport, set, clear);
ret = 0;
}
mutex_unlock(&port->mutex);
return ret;
}
static int uart_break_ctl(struct tty_struct *tty, int break_state)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
struct uart_port *uport = state->uart_port;
mutex_lock(&port->mutex);
if (uport->type != PORT_UNKNOWN)
uport->ops->break_ctl(uport, break_state);
mutex_unlock(&port->mutex);
return 0;
}
static int uart_do_autoconfig(struct tty_struct *tty,struct uart_state *state)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
int flags, ret;
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
/*
* Take the per-port semaphore. This prevents count from
* changing, and hence any extra opens of the port while
* we're auto-configuring.
*/
if (mutex_lock_interruptible(&port->mutex))
return -ERESTARTSYS;
ret = -EBUSY;
if (tty_port_users(port) == 1) {
uart_shutdown(tty, state);
/*
* If we already have a port type configured,
* we must release its resources.
*/
if (uport->type != PORT_UNKNOWN)
uport->ops->release_port(uport);
flags = UART_CONFIG_TYPE;
if (uport->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
/*
* This will claim the ports resources if
* a port is found.
*/
uport->ops->config_port(uport, flags);
ret = uart_startup(tty, state, 1);
}
mutex_unlock(&port->mutex);
return ret;
}
static void uart_enable_ms(struct uart_port *uport)
{
/*
* Force modem status interrupts on
*/
if (uport->ops->enable_ms)
uport->ops->enable_ms(uport);
}
/*
* Wait for any of the 4 modem inputs (DCD,RI,DSR,CTS) to change
* - mask passed in arg for lines of interest
* (use |'ed TIOCM_RNG/DSR/CD/CTS for masking)
* Caller should use TIOCGICOUNT to see which one it was
*
* FIXME: This wants extracting into a common all driver implementation
* of TIOCMWAIT using tty_port.
*/
static int
uart_wait_modem_status(struct uart_state *state, unsigned long arg)
{
struct uart_port *uport = state->uart_port;
struct tty_port *port = &state->port;
DECLARE_WAITQUEUE(wait, current);
struct uart_icount cprev, cnow;
int ret;
/*
* note the counters on entry
*/
spin_lock_irq(&uport->lock);
memcpy(&cprev, &uport->icount, sizeof(struct uart_icount));
uart_enable_ms(uport);
spin_unlock_irq(&uport->lock);
add_wait_queue(&port->delta_msr_wait, &wait);
for (;;) {
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
set_current_state(TASK_INTERRUPTIBLE);
if (((arg & TIOCM_RNG) && (cnow.rng != cprev.rng)) ||
((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) ||
((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) ||
((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) {
ret = 0;
break;
}
schedule();
/* see if a signal did it */
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
cprev = cnow;
}
current->state = TASK_RUNNING;
remove_wait_queue(&port->delta_msr_wait, &wait);
return ret;
}
/*
* Get counter of input serial line interrupts (DCD,RI,DSR,CTS)
* Return: write counters to the user passed counter struct
* NB: both 1->0 and 0->1 transitions are counted except for
* RI where only 0->1 is counted.
*/
static int uart_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount)
{
struct uart_state *state = tty->driver_data;
struct uart_icount cnow;
struct uart_port *uport = state->uart_port;
spin_lock_irq(&uport->lock);
memcpy(&cnow, &uport->icount, sizeof(struct uart_icount));
spin_unlock_irq(&uport->lock);
icount->cts = cnow.cts;
icount->dsr = cnow.dsr;
icount->rng = cnow.rng;
icount->dcd = cnow.dcd;
icount->rx = cnow.rx;
icount->tx = cnow.tx;
icount->frame = cnow.frame;
icount->overrun = cnow.overrun;
icount->parity = cnow.parity;
icount->brk = cnow.brk;
icount->buf_overrun = cnow.buf_overrun;
return 0;
}
/*
* Called via sys_ioctl. We can use spin_lock_irq() here.
*/
static int
uart_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
void __user *uarg = (void __user *)arg;
int ret = -ENOIOCTLCMD;
/*
* These ioctls don't rely on the hardware to be present.
*/
switch (cmd) {
case TIOCGSERIAL:
ret = uart_get_info_user(port, uarg);
break;
case TIOCSSERIAL:
ret = uart_set_info_user(tty, state, uarg);
break;
case TIOCSERCONFIG:
ret = uart_do_autoconfig(tty, state);
break;
case TIOCSERGWILD: /* obsolete */
case TIOCSERSWILD: /* obsolete */
ret = 0;
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
if (tty->flags & (1 << TTY_IO_ERROR)) {
ret = -EIO;
goto out;
}
/*
* The following should only be used when hardware is present.
*/
switch (cmd) {
case TIOCMIWAIT:
ret = uart_wait_modem_status(state, arg);
break;
}
if (ret != -ENOIOCTLCMD)
goto out;
mutex_lock(&port->mutex);
if (tty->flags & (1 << TTY_IO_ERROR)) {
ret = -EIO;
goto out_up;
}
/*
* All these rely on hardware being present and need to be
* protected against the tty being hung up.
*/
switch (cmd) {
case TIOCSERGETLSR: /* Get line status register */
ret = uart_get_lsr_info(tty, state, uarg);
break;
default: {
struct uart_port *uport = state->uart_port;
if (uport->ops->ioctl)
ret = uport->ops->ioctl(uport, cmd, arg);
break;
}
}
out_up:
mutex_unlock(&port->mutex);
out:
return ret;
}
static void uart_set_ldisc(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
if (uport->ops->set_ldisc)
uport->ops->set_ldisc(uport, tty->termios.c_line);
}
static void uart_set_termios(struct tty_struct *tty,
struct ktermios *old_termios)
{
struct uart_state *state = tty->driver_data;
struct uart_port *uport = state->uart_port;
unsigned int cflag = tty->termios.c_cflag;
unsigned int iflag_mask = IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK;
bool sw_changed = false;
/*
* Drivers doing software flow control also need to know
* about changes to these input settings.
*/
if (uport->flags & UPF_SOFT_FLOW) {
iflag_mask |= IXANY|IXON|IXOFF;
sw_changed =
tty->termios.c_cc[VSTART] != old_termios->c_cc[VSTART] ||
tty->termios.c_cc[VSTOP] != old_termios->c_cc[VSTOP];
}
/*
* These are the bits that are used to setup various
* flags in the low level driver. We can ignore the Bfoo
* bits in c_cflag; c_[io]speed will always be set
* appropriately by set_termios() in tty_ioctl.c
*/
if ((cflag ^ old_termios->c_cflag) == 0 &&
tty->termios.c_ospeed == old_termios->c_ospeed &&
tty->termios.c_ispeed == old_termios->c_ispeed &&
((tty->termios.c_iflag ^ old_termios->c_iflag) & iflag_mask) == 0 &&
!sw_changed) {
return;
}
uart_change_speed(tty, state, old_termios);
/* reload cflag from termios; port driver may have overriden flags */
cflag = tty->termios.c_cflag;
/* Handle transition to B0 status */
if ((old_termios->c_cflag & CBAUD) && !(cflag & CBAUD))
uart_clear_mctrl(uport, TIOCM_RTS | TIOCM_DTR);
/* Handle transition away from B0 status */
else if (!(old_termios->c_cflag & CBAUD) && (cflag & CBAUD)) {
unsigned int mask = TIOCM_DTR;
if (!(cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags))
mask |= TIOCM_RTS;
uart_set_mctrl(uport, mask);
}
/*
* If the port is doing h/w assisted flow control, do nothing.
* We assume that port->hw_stopped has never been set.
*/
if (uport->flags & UPF_HARD_FLOW)
return;
/* Handle turning off CRTSCTS */
if ((old_termios->c_cflag & CRTSCTS) && !(cflag & CRTSCTS)) {
spin_lock_irq(&uport->lock);
uport->hw_stopped = 0;
__uart_start(tty);
spin_unlock_irq(&uport->lock);
}
/* Handle turning on CRTSCTS */
else if (!(old_termios->c_cflag & CRTSCTS) && (cflag & CRTSCTS)) {
spin_lock_irq(&uport->lock);
if (!(uport->ops->get_mctrl(uport) & TIOCM_CTS)) {
uport->hw_stopped = 1;
uport->ops->stop_tx(uport);
}
spin_unlock_irq(&uport->lock);
}
}
/*
* Calls to uart_close() are serialised via the tty_lock in
* drivers/tty/tty_io.c:tty_release()
* drivers/tty/tty_io.c:do_tty_hangup()
* This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_close(struct tty_struct *tty, struct file *filp)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port;
struct uart_port *uport;
unsigned long flags;
if (!state)
return;
uport = state->uart_port;
port = &state->port;
pr_debug("uart_close(%d) called\n", uport ? uport->line : -1);
if (!port->count || tty_port_close_start(port, tty, filp) == 0)
return;
/*
* At this point, we stop accepting input. To do this, we
* disable the receive line status interrupts.
*/
if (port->flags & ASYNC_INITIALIZED) {
unsigned long flags;
spin_lock_irqsave(&uport->lock, flags);
uport->ops->stop_rx(uport);
spin_unlock_irqrestore(&uport->lock, flags);
/*
* Before we drop DTR, make sure the UART transmitter
* has completely drained; this is especially
* important if there is a transmit FIFO!
*/
uart_wait_until_sent(tty, uport->timeout);
}
mutex_lock(&port->mutex);
uart_shutdown(tty, state);
tty_port_tty_set(port, NULL);
spin_lock_irqsave(&port->lock, flags);
if (port->blocked_open) {
spin_unlock_irqrestore(&port->lock, flags);
if (port->close_delay)
msleep_interruptible(
jiffies_to_msecs(port->close_delay));
spin_lock_irqsave(&port->lock, flags);
} else if (!uart_console(uport)) {
spin_unlock_irqrestore(&port->lock, flags);
uart_change_pm(state, UART_PM_STATE_OFF);
spin_lock_irqsave(&port->lock, flags);
}
/*
* Wake up anyone trying to open this port.
*/
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
clear_bit(ASYNCB_CLOSING, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->close_wait);
mutex_unlock(&port->mutex);
tty_ldisc_flush(tty);
tty->closing = 0;
}
static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
{
struct uart_state *state = tty->driver_data;
struct uart_port *port = state->uart_port;
unsigned long char_time, expire;
if (port->type == PORT_UNKNOWN || port->fifosize == 0)
return;
/*
* Set the check interval to be 1/5 of the estimated time to
* send a single character, and make it at least 1. The check
* interval should also be less than the timeout.
*
* Note: we have to use pretty tight timings here to satisfy
* the NIST-PCTS.
*/
char_time = (port->timeout - HZ/50) / port->fifosize;
char_time = char_time / 5;
if (char_time == 0)
char_time = 1;
if (timeout && timeout < char_time)
char_time = timeout;
/*
* If the transmitter hasn't cleared in twice the approximate
* amount of time to send the entire FIFO, it probably won't
* ever clear. This assumes the UART isn't doing flow
* control, which is currently the case. Hence, if it ever
* takes longer than port->timeout, this is probably due to a
* UART bug of some kind. So, we clamp the timeout parameter at
* 2*port->timeout.
*/
if (timeout == 0 || timeout > 2 * port->timeout)
timeout = 2 * port->timeout;
expire = jiffies + timeout;
pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n",
port->line, jiffies, expire);
/*
* Check whether the transmitter is empty every 'char_time'.
* 'timeout' / 'expire' give us the maximum amount of time
* we wait.
*/
while (!port->ops->tx_empty(port)) {
msleep_interruptible(jiffies_to_msecs(char_time));
if (signal_pending(current))
break;
if (time_after(jiffies, expire))
break;
}
}
/*
* Calls to uart_hangup() are serialised by the tty_lock in
* drivers/tty/tty_io.c:do_tty_hangup()
* This runs from a workqueue and can sleep for a _short_ time only.
*/
static void uart_hangup(struct tty_struct *tty)
{
struct uart_state *state = tty->driver_data;
struct tty_port *port = &state->port;
unsigned long flags;
pr_debug("uart_hangup(%d)\n", state->uart_port->line);
mutex_lock(&port->mutex);
if (port->flags & ASYNC_NORMAL_ACTIVE) {
uart_flush_buffer(tty);
uart_shutdown(tty, state);
spin_lock_irqsave(&port->lock, flags);
port->count = 0;
clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
spin_unlock_irqrestore(&port->lock, flags);
tty_port_tty_set(port, NULL);
if (!uart_console(state->uart_port))
uart_change_pm(state, UART_PM_STATE_OFF);
wake_up_interruptible(&port->open_wait);
wake_up_interruptible(&port->delta_msr_wait);
}
mutex_unlock(&port->mutex);
}
static int uart_port_activate(struct tty_port *port, struct tty_struct *tty)
{
return 0;
}
static void uart_port_shutdown(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
/*
* clear delta_msr_wait queue to avoid mem leaks: we may free
* the irq here so the queue might never be woken up. Note
* that we won't end up waiting on delta_msr_wait again since
* any outstanding file descriptors should be pointing at
* hung_up_tty_fops now.
*/
wake_up_interruptible(&port->delta_msr_wait);
/*
* Free the IRQ and disable the port.
*/
uport->ops->shutdown(uport);
/*
* Ensure that the IRQ handler isn't running on another CPU.
*/
synchronize_irq(uport->irq);
}
static int uart_carrier_raised(struct tty_port *port)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
int mctrl;
spin_lock_irq(&uport->lock);
uart_enable_ms(uport);
mctrl = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
if (mctrl & TIOCM_CAR)
return 1;
return 0;
}
static void uart_dtr_rts(struct tty_port *port, int onoff)
{
struct uart_state *state = container_of(port, struct uart_state, port);
struct uart_port *uport = state->uart_port;
if (onoff)
uart_set_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
else
uart_clear_mctrl(uport, TIOCM_DTR | TIOCM_RTS);
}
/*
* Calls to uart_open are serialised by the tty_lock in
* drivers/tty/tty_io.c:tty_open()
* Note that if this fails, then uart_close() _will_ be called.
*
* In time, we want to scrap the "opening nonpresent ports"
* behaviour and implement an alternative way for setserial
* to set base addresses/ports/types. This will allow us to
* get rid of a certain amount of extra tests.
*/
static int uart_open(struct tty_struct *tty, struct file *filp)
{
struct uart_driver *drv = (struct uart_driver *)tty->driver->driver_state;
int retval, line = tty->index;
struct uart_state *state = drv->state + line;
struct tty_port *port = &state->port;
pr_debug("uart_open(%d) called\n", line);
/*
* We take the semaphore here to guarantee that we won't be re-entered
* while allocating the state structure, or while we request any IRQs
* that the driver may need. This also has the nice side-effect that
* it delays the action of uart_hangup, so we can guarantee that
* state->port.tty will always contain something reasonable.
*/
if (mutex_lock_interruptible(&port->mutex)) {
retval = -ERESTARTSYS;
goto end;
}
port->count++;
if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
retval = -ENXIO;
goto err_dec_count;
}
/*
* Once we set tty->driver_data here, we are guaranteed that
* uart_close() will decrement the driver module use count.
* Any failures from here onwards should not touch the count.
*/
tty->driver_data = state;
state->uart_port->state = state;
state->port.low_latency =
(state->uart_port->flags & UPF_LOW_LATENCY) ? 1 : 0;
tty_port_tty_set(port, tty);
/*
* Start up the serial port.
*/
retval = uart_startup(tty, state, 0);
/*
* If we succeeded, wait until the port is ready.
*/
mutex_unlock(&port->mutex);
if (retval == 0)
retval = tty_port_block_til_ready(port, tty, filp);
end:
return retval;
err_dec_count:
port->count--;
mutex_unlock(&port->mutex);
goto end;
}
static const char *uart_type(struct uart_port *port)
{
const char *str = NULL;
if (port->ops->type)
str = port->ops->type(port);
if (!str)
str = "unknown";
return str;
}
#ifdef CONFIG_PROC_FS
static void uart_line_info(struct seq_file *m, struct uart_driver *drv, int i)
{
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
enum uart_pm_state pm_state;
struct uart_port *uport = state->uart_port;
char stat_buf[32];
unsigned int status;
int mmio;
if (!uport)
return;
mmio = uport->iotype >= UPIO_MEM;
seq_printf(m, "%d: uart:%s %s%08llX irq:%d",
uport->line, uart_type(uport),
mmio ? "mmio:0x" : "port:",
mmio ? (unsigned long long)uport->mapbase
: (unsigned long long)uport->iobase,
uport->irq);
if (uport->type == PORT_UNKNOWN) {
seq_putc(m, '\n');
return;
}
if (capable(CAP_SYS_ADMIN)) {
mutex_lock(&port->mutex);
pm_state = state->pm_state;
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
status = uport->ops->get_mctrl(uport);
spin_unlock_irq(&uport->lock);
if (pm_state != UART_PM_STATE_ON)
uart_change_pm(state, pm_state);
mutex_unlock(&port->mutex);
seq_printf(m, " tx:%d rx:%d",
uport->icount.tx, uport->icount.rx);
if (uport->icount.frame)
seq_printf(m, " fe:%d",
uport->icount.frame);
if (uport->icount.parity)
seq_printf(m, " pe:%d",
uport->icount.parity);
if (uport->icount.brk)
seq_printf(m, " brk:%d",
uport->icount.brk);
if (uport->icount.overrun)
seq_printf(m, " oe:%d",
uport->icount.overrun);
#define INFOBIT(bit, str) \
if (uport->mctrl & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
#define STATBIT(bit, str) \
if (status & (bit)) \
strncat(stat_buf, (str), sizeof(stat_buf) - \
strlen(stat_buf) - 2)
stat_buf[0] = '\0';
stat_buf[1] = '\0';
INFOBIT(TIOCM_RTS, "|RTS");
STATBIT(TIOCM_CTS, "|CTS");
INFOBIT(TIOCM_DTR, "|DTR");
STATBIT(TIOCM_DSR, "|DSR");
STATBIT(TIOCM_CAR, "|CD");
STATBIT(TIOCM_RNG, "|RI");
if (stat_buf[0])
stat_buf[0] = ' ';
seq_puts(m, stat_buf);
}
seq_putc(m, '\n');
#undef STATBIT
#undef INFOBIT
}
static int uart_proc_show(struct seq_file *m, void *v)
{
struct tty_driver *ttydrv = m->private;
struct uart_driver *drv = ttydrv->driver_state;
int i;
seq_printf(m, "serinfo:1.0 driver%s%s revision:%s\n",
"", "", "");
for (i = 0; i < drv->nr; i++)
uart_line_info(m, drv, i);
return 0;
}
static int uart_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, uart_proc_show, PDE_DATA(inode));
}
static const struct file_operations uart_proc_fops = {
.owner = THIS_MODULE,
.open = uart_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
#endif
#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
/*
* uart_console_write - write a console message to a serial port
* @port: the port to write the message
* @s: array of characters
* @count: number of characters in string to write
* @write: function to write character to port
*/
void uart_console_write(struct uart_port *port, const char *s,
unsigned int count,
void (*putchar)(struct uart_port *, int))
{
unsigned int i;
for (i = 0; i < count; i++, s++) {
if (*s == '\n')
putchar(port, '\r');
putchar(port, *s);
}
}
EXPORT_SYMBOL_GPL(uart_console_write);
/*
* Check whether an invalid uart number has been specified, and
* if so, search for the first available port that does have
* console support.
*/
struct uart_port * __init
uart_get_console(struct uart_port *ports, int nr, struct console *co)
{
int idx = co->index;
if (idx < 0 || idx >= nr || (ports[idx].iobase == 0 &&
ports[idx].membase == NULL))
for (idx = 0; idx < nr; idx++)
if (ports[idx].iobase != 0 ||
ports[idx].membase != NULL)
break;
co->index = idx;
return ports + idx;
}
/**
* uart_parse_options - Parse serial port baud/parity/bits/flow control.
* @options: pointer to option string
* @baud: pointer to an 'int' variable for the baud rate.
* @parity: pointer to an 'int' variable for the parity.
* @bits: pointer to an 'int' variable for the number of data bits.
* @flow: pointer to an 'int' variable for the flow control character.
*
* uart_parse_options decodes a string containing the serial console
* options. The format of the string is <baud><parity><bits><flow>,
* eg: 115200n8r
*/
void
uart_parse_options(char *options, int *baud, int *parity, int *bits, int *flow)
{
char *s = options;
*baud = simple_strtoul(s, NULL, 10);
while (*s >= '0' && *s <= '9')
s++;
if (*s)
*parity = *s++;
if (*s)
*bits = *s++ - '0';
if (*s)
*flow = *s;
}
EXPORT_SYMBOL_GPL(uart_parse_options);
struct baud_rates {
unsigned int rate;
unsigned int cflag;
};
static const struct baud_rates baud_rates[] = {
{ 921600, B921600 },
{ 460800, B460800 },
{ 230400, B230400 },
{ 115200, B115200 },
{ 57600, B57600 },
{ 38400, B38400 },
{ 19200, B19200 },
{ 9600, B9600 },
{ 4800, B4800 },
{ 2400, B2400 },
{ 1200, B1200 },
{ 0, B38400 }
};
/**
* uart_set_options - setup the serial console parameters
* @port: pointer to the serial ports uart_port structure
* @co: console pointer
* @baud: baud rate
* @parity: parity character - 'n' (none), 'o' (odd), 'e' (even)
* @bits: number of data bits
* @flow: flow control character - 'r' (rts)
*/
int
uart_set_options(struct uart_port *port, struct console *co,
int baud, int parity, int bits, int flow)
{
struct ktermios termios;
static struct ktermios dummy;
int i;
/*
* Ensure that the serial console lock is initialised
* early.
* If this port is a console, then the spinlock is already
* initialised.
*/
if (!(uart_console(port) && (port->cons->flags & CON_ENABLED))) {
spin_lock_init(&port->lock);
lockdep_set_class(&port->lock, &port_lock_key);
}
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = CREAD | HUPCL | CLOCAL;
/*
* Construct a cflag setting.
*/
for (i = 0; baud_rates[i].rate; i++)
if (baud_rates[i].rate <= baud)
break;
termios.c_cflag |= baud_rates[i].cflag;
if (bits == 7)
termios.c_cflag |= CS7;
else
termios.c_cflag |= CS8;
switch (parity) {
case 'o': case 'O':
termios.c_cflag |= PARODD;
/*fall through*/
case 'e': case 'E':
termios.c_cflag |= PARENB;
break;
}
if (flow == 'r')
termios.c_cflag |= CRTSCTS;
/*
* some uarts on other side don't support no flow control.
* So we set * DTR in host uart to make them happy
*/
port->mctrl |= TIOCM_DTR;
port->ops->set_termios(port, &termios, &dummy);
/*
* Allow the setting of the UART parameters with a NULL console
* too:
*/
if (co)
co->cflag = termios.c_cflag;
return 0;
}
EXPORT_SYMBOL_GPL(uart_set_options);
#endif /* CONFIG_SERIAL_CORE_CONSOLE */
/**
* uart_change_pm - set power state of the port
*
* @state: port descriptor
* @pm_state: new state
*
* Locking: port->mutex has to be held
*/
static void uart_change_pm(struct uart_state *state,
enum uart_pm_state pm_state)
{
struct uart_port *port = state->uart_port;
if (state->pm_state != pm_state) {
if (port->ops->pm)
port->ops->pm(port, pm_state, state->pm_state);
state->pm_state = pm_state;
}
}
struct uart_match {
struct uart_port *port;
struct uart_driver *driver;
};
static int serial_match_port(struct device *dev, void *data)
{
struct uart_match *match = data;
struct tty_driver *tty_drv = match->driver->tty_driver;
dev_t devt = MKDEV(tty_drv->major, tty_drv->minor_start) +
match->port->line;
return dev->devt == devt; /* Actually, only one tty per port */
}
int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (device_may_wakeup(tty_dev)) {
if (!enable_irq_wake(uport->irq))
uport->irq_wake = 1;
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
if (console_suspend_enabled || !uart_console(uport))
uport->suspended = 1;
if (port->flags & ASYNC_INITIALIZED) {
const struct uart_ops *ops = uport->ops;
int tries;
if (console_suspend_enabled || !uart_console(uport)) {
set_bit(ASYNCB_SUSPENDED, &port->flags);
clear_bit(ASYNCB_INITIALIZED, &port->flags);
spin_lock_irq(&uport->lock);
ops->stop_tx(uport);
ops->set_mctrl(uport, 0);
ops->stop_rx(uport);
spin_unlock_irq(&uport->lock);
}
/*
* Wait for the transmitter to empty.
*/
for (tries = 3; !ops->tx_empty(uport) && tries; tries--)
msleep(10);
if (!tries)
dev_err(uport->dev, "%s%d: Unable to drain transmitter\n",
drv->dev_name,
drv->tty_driver->name_base + uport->line);
if (console_suspend_enabled || !uart_console(uport))
ops->shutdown(uport);
}
/*
* Disable the console device before suspending.
*/
if (console_suspend_enabled && uart_console(uport))
console_stop(uport->cons);
if (console_suspend_enabled || !uart_console(uport))
uart_change_pm(state, UART_PM_STATE_OFF);
mutex_unlock(&port->mutex);
return 0;
}
int uart_resume_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct device *tty_dev;
struct uart_match match = {uport, drv};
struct ktermios termios;
mutex_lock(&port->mutex);
tty_dev = device_find_child(uport->dev, &match, serial_match_port);
if (!uport->suspended && device_may_wakeup(tty_dev)) {
if (uport->irq_wake) {
disable_irq_wake(uport->irq);
uport->irq_wake = 0;
}
put_device(tty_dev);
mutex_unlock(&port->mutex);
return 0;
}
put_device(tty_dev);
uport->suspended = 0;
/*
* Re-enable the console device after suspending.
*/
if (uart_console(uport)) {
/*
* First try to use the console cflag setting.
*/
memset(&termios, 0, sizeof(struct ktermios));
termios.c_cflag = uport->cons->cflag;
/*
* If that's unset, use the tty termios setting.
*/
if (port->tty && termios.c_cflag == 0)
termios = port->tty->termios;
if (console_suspend_enabled)
uart_change_pm(state, UART_PM_STATE_ON);
uport->ops->set_termios(uport, &termios, NULL);
if (console_suspend_enabled)
console_start(uport->cons);
}
if (port->flags & ASYNC_SUSPENDED) {
const struct uart_ops *ops = uport->ops;
int ret;
uart_change_pm(state, UART_PM_STATE_ON);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, 0);
spin_unlock_irq(&uport->lock);
if (console_suspend_enabled || !uart_console(uport)) {
/* Protected by port mutex for now */
struct tty_struct *tty = port->tty;
ret = ops->startup(uport);
if (ret == 0) {
if (tty)
uart_change_speed(tty, state, NULL);
spin_lock_irq(&uport->lock);
ops->set_mctrl(uport, uport->mctrl);
ops->start_tx(uport);
spin_unlock_irq(&uport->lock);
set_bit(ASYNCB_INITIALIZED, &port->flags);
} else {
/*
* Failed to resume - maybe hardware went away?
* Clear the "initialized" flag so we won't try
* to call the low level drivers shutdown method.
*/
uart_shutdown(tty, state);
}
}
clear_bit(ASYNCB_SUSPENDED, &port->flags);
}
mutex_unlock(&port->mutex);
return 0;
}
static inline void
uart_report_port(struct uart_driver *drv, struct uart_port *port)
{
char address[64];
switch (port->iotype) {
case UPIO_PORT:
snprintf(address, sizeof(address), "I/O 0x%lx", port->iobase);
break;
case UPIO_HUB6:
snprintf(address, sizeof(address),
"I/O 0x%lx offset 0x%x", port->iobase, port->hub6);
break;
case UPIO_MEM:
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
snprintf(address, sizeof(address),
"MMIO 0x%llx", (unsigned long long)port->mapbase);
break;
default:
strlcpy(address, "*unknown*", sizeof(address));
break;
}
printk(KERN_INFO "%s%s%s%d at %s (irq = %d, base_baud = %d) is a %s\n",
port->dev ? dev_name(port->dev) : "",
port->dev ? ": " : "",
drv->dev_name,
drv->tty_driver->name_base + port->line,
address, port->irq, port->uartclk / 16, uart_type(port));
}
static void
uart_configure_port(struct uart_driver *drv, struct uart_state *state,
struct uart_port *port)
{
unsigned int flags;
/*
* If there isn't a port here, don't do anything further.
*/
if (!port->iobase && !port->mapbase && !port->membase)
return;
/*
* Now do the auto configuration stuff. Note that config_port
* is expected to claim the resources and map the port for us.
*/
flags = 0;
if (port->flags & UPF_AUTO_IRQ)
flags |= UART_CONFIG_IRQ;
if (port->flags & UPF_BOOT_AUTOCONF) {
if (!(port->flags & UPF_FIXED_TYPE)) {
port->type = PORT_UNKNOWN;
flags |= UART_CONFIG_TYPE;
}
port->ops->config_port(port, flags);
}
if (port->type != PORT_UNKNOWN) {
unsigned long flags;
uart_report_port(drv, port);
/* Power up port for set_mctrl() */
uart_change_pm(state, UART_PM_STATE_ON);
/*
* Ensure that the modem control lines are de-activated.
* keep the DTR setting that is set in uart_set_options()
* We probably don't need a spinlock around this, but
*/
spin_lock_irqsave(&port->lock, flags);
port->ops->set_mctrl(port, port->mctrl & TIOCM_DTR);
spin_unlock_irqrestore(&port->lock, flags);
/*
* If this driver supports console, and it hasn't been
* successfully registered yet, try to re-register it.
* It may be that the port was not available.
*/
if (port->cons && !(port->cons->flags & CON_ENABLED))
register_console(port->cons);
/*
* Power down all ports by default, except the
* console if we have one.
*/
if (!uart_console(port))
uart_change_pm(state, UART_PM_STATE_OFF);
}
}
#ifdef CONFIG_CONSOLE_POLL
static int uart_poll_init(struct tty_driver *driver, int line, char *options)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
int baud = 9600;
int bits = 8;
int parity = 'n';
int flow = 'n';
int ret;
if (!state || !state->uart_port)
return -1;
port = state->uart_port;
if (!(port->ops->poll_get_char && port->ops->poll_put_char))
return -1;
if (port->ops->poll_init) {
struct tty_port *tport = &state->port;
ret = 0;
mutex_lock(&tport->mutex);
/*
* We don't set ASYNCB_INITIALIZED as we only initialized the
* hw, e.g. state->xmit is still uninitialized.
*/
if (!test_bit(ASYNCB_INITIALIZED, &tport->flags))
ret = port->ops->poll_init(port);
mutex_unlock(&tport->mutex);
if (ret)
return ret;
}
if (options) {
uart_parse_options(options, &baud, &parity, &bits, &flow);
return uart_set_options(port, NULL, baud, parity, bits, flow);
}
return 0;
}
static int uart_poll_get_char(struct tty_driver *driver, int line)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
if (!state || !state->uart_port)
return -1;
port = state->uart_port;
return port->ops->poll_get_char(port);
}
static void uart_poll_put_char(struct tty_driver *driver, int line, char ch)
{
struct uart_driver *drv = driver->driver_state;
struct uart_state *state = drv->state + line;
struct uart_port *port;
if (!state || !state->uart_port)
return;
port = state->uart_port;
if (ch == '\n')
port->ops->poll_put_char(port, '\r');
port->ops->poll_put_char(port, ch);
}
#endif
static const struct tty_operations uart_ops = {
.open = uart_open,
.close = uart_close,
.write = uart_write,
.put_char = uart_put_char,
.flush_chars = uart_flush_chars,
.write_room = uart_write_room,
.chars_in_buffer= uart_chars_in_buffer,
.flush_buffer = uart_flush_buffer,
.ioctl = uart_ioctl,
.throttle = uart_throttle,
.unthrottle = uart_unthrottle,
.send_xchar = uart_send_xchar,
.set_termios = uart_set_termios,
.set_ldisc = uart_set_ldisc,
.stop = uart_stop,
.start = uart_start,
.hangup = uart_hangup,
.break_ctl = uart_break_ctl,
.wait_until_sent= uart_wait_until_sent,
#ifdef CONFIG_PROC_FS
.proc_fops = &uart_proc_fops,
#endif
.tiocmget = uart_tiocmget,
.tiocmset = uart_tiocmset,
.get_icount = uart_get_icount,
#ifdef CONFIG_CONSOLE_POLL
.poll_init = uart_poll_init,
.poll_get_char = uart_poll_get_char,
.poll_put_char = uart_poll_put_char,
#endif
};
static const struct tty_port_operations uart_port_ops = {
.activate = uart_port_activate,
.shutdown = uart_port_shutdown,
.carrier_raised = uart_carrier_raised,
.dtr_rts = uart_dtr_rts,
};
/**
* uart_register_driver - register a driver with the uart core layer
* @drv: low level driver structure
*
* Register a uart driver with the core driver. We in turn register
* with the tty layer, and initialise the core driver per-port state.
*
* We have a proc file in /proc/tty/driver which is named after the
* normal driver.
*
* drv->port should be NULL, and the per-port structures should be
* registered using uart_add_one_port after this call has succeeded.
*/
int uart_register_driver(struct uart_driver *drv)
{
struct tty_driver *normal;
int i, retval;
BUG_ON(drv->state);
/*
* Maybe we should be using a slab cache for this, especially if
* we have a large number of ports to handle.
*/
drv->state = kzalloc(sizeof(struct uart_state) * drv->nr, GFP_KERNEL);
if (!drv->state)
goto out;
normal = alloc_tty_driver(drv->nr);
if (!normal)
goto out_kfree;
drv->tty_driver = normal;
normal->driver_name = drv->driver_name;
normal->name = drv->dev_name;
normal->major = drv->major;
normal->minor_start = drv->minor;
normal->type = TTY_DRIVER_TYPE_SERIAL;
normal->subtype = SERIAL_TYPE_NORMAL;
normal->init_termios = tty_std_termios;
normal->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
normal->init_termios.c_ispeed = normal->init_termios.c_ospeed = 9600;
normal->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
normal->driver_state = drv;
tty_set_operations(normal, &uart_ops);
/*
* Initialise the UART state(s).
*/
for (i = 0; i < drv->nr; i++) {
struct uart_state *state = drv->state + i;
struct tty_port *port = &state->port;
tty_port_init(port);
port->ops = &uart_port_ops;
port->close_delay = HZ / 2; /* .5 seconds */
port->closing_wait = 30 * HZ;/* 30 seconds */
}
retval = tty_register_driver(normal);
if (retval >= 0)
return retval;
for (i = 0; i < drv->nr; i++)
tty_port_destroy(&drv->state[i].port);
put_tty_driver(normal);
out_kfree:
kfree(drv->state);
out:
return -ENOMEM;
}
/**
* uart_unregister_driver - remove a driver from the uart core layer
* @drv: low level driver structure
*
* Remove all references to a driver from the core driver. The low
* level driver must have removed all its ports via the
* uart_remove_one_port() if it registered them with uart_add_one_port().
* (ie, drv->port == NULL)
*/
void uart_unregister_driver(struct uart_driver *drv)
{
struct tty_driver *p = drv->tty_driver;
unsigned int i;
tty_unregister_driver(p);
put_tty_driver(p);
for (i = 0; i < drv->nr; i++)
tty_port_destroy(&drv->state[i].port);
kfree(drv->state);
drv->state = NULL;
drv->tty_driver = NULL;
}
struct tty_driver *uart_console_device(struct console *co, int *index)
{
struct uart_driver *p = co->data;
*index = co->index;
return p->tty_driver;
}
static ssize_t uart_get_attr_uartclk(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.baud_base * 16);
}
static ssize_t uart_get_attr_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.type);
}
static ssize_t uart_get_attr_line(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.line);
}
static ssize_t uart_get_attr_port(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
unsigned long ioaddr;
uart_get_info(port, &tmp);
ioaddr = tmp.port;
if (HIGH_BITS_OFFSET)
ioaddr |= (unsigned long)tmp.port_high << HIGH_BITS_OFFSET;
return snprintf(buf, PAGE_SIZE, "0x%lX\n", ioaddr);
}
static ssize_t uart_get_attr_irq(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.irq);
}
static ssize_t uart_get_attr_flags(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "0x%X\n", tmp.flags);
}
static ssize_t uart_get_attr_xmit_fifo_size(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.xmit_fifo_size);
}
static ssize_t uart_get_attr_close_delay(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.close_delay);
}
static ssize_t uart_get_attr_closing_wait(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.closing_wait);
}
static ssize_t uart_get_attr_custom_divisor(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.custom_divisor);
}
static ssize_t uart_get_attr_io_type(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.io_type);
}
static ssize_t uart_get_attr_iomem_base(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "0x%lX\n", (unsigned long)tmp.iomem_base);
}
static ssize_t uart_get_attr_iomem_reg_shift(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct serial_struct tmp;
struct tty_port *port = dev_get_drvdata(dev);
uart_get_info(port, &tmp);
return snprintf(buf, PAGE_SIZE, "%d\n", tmp.iomem_reg_shift);
}
static DEVICE_ATTR(type, S_IRUSR | S_IRGRP, uart_get_attr_type, NULL);
static DEVICE_ATTR(line, S_IRUSR | S_IRGRP, uart_get_attr_line, NULL);
static DEVICE_ATTR(port, S_IRUSR | S_IRGRP, uart_get_attr_port, NULL);
static DEVICE_ATTR(irq, S_IRUSR | S_IRGRP, uart_get_attr_irq, NULL);
static DEVICE_ATTR(flags, S_IRUSR | S_IRGRP, uart_get_attr_flags, NULL);
static DEVICE_ATTR(xmit_fifo_size, S_IRUSR | S_IRGRP, uart_get_attr_xmit_fifo_size, NULL);
static DEVICE_ATTR(uartclk, S_IRUSR | S_IRGRP, uart_get_attr_uartclk, NULL);
static DEVICE_ATTR(close_delay, S_IRUSR | S_IRGRP, uart_get_attr_close_delay, NULL);
static DEVICE_ATTR(closing_wait, S_IRUSR | S_IRGRP, uart_get_attr_closing_wait, NULL);
static DEVICE_ATTR(custom_divisor, S_IRUSR | S_IRGRP, uart_get_attr_custom_divisor, NULL);
static DEVICE_ATTR(io_type, S_IRUSR | S_IRGRP, uart_get_attr_io_type, NULL);
static DEVICE_ATTR(iomem_base, S_IRUSR | S_IRGRP, uart_get_attr_iomem_base, NULL);
static DEVICE_ATTR(iomem_reg_shift, S_IRUSR | S_IRGRP, uart_get_attr_iomem_reg_shift, NULL);
static struct attribute *tty_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_line.attr,
&dev_attr_port.attr,
&dev_attr_irq.attr,
&dev_attr_flags.attr,
&dev_attr_xmit_fifo_size.attr,
&dev_attr_uartclk.attr,
&dev_attr_close_delay.attr,
&dev_attr_closing_wait.attr,
&dev_attr_custom_divisor.attr,
&dev_attr_io_type.attr,
&dev_attr_iomem_base.attr,
&dev_attr_iomem_reg_shift.attr,
NULL,
};
static const struct attribute_group tty_dev_attr_group = {
.attrs = tty_dev_attrs,
};
/**
* uart_add_one_port - attach a driver-defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure to use for this port.
*
* This allows the driver to register its own uart_port structure
* with the core driver. The main purpose is to allow the low
* level uart drivers to expand uart_port, rather than having yet
* more levels of structures.
*/
int uart_add_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state;
struct tty_port *port;
int ret = 0;
struct device *tty_dev;
int num_groups;
BUG_ON(in_interrupt());
if (uport->line >= drv->nr)
return -EINVAL;
state = drv->state + uport->line;
port = &state->port;
mutex_lock(&port_mutex);
mutex_lock(&port->mutex);
if (state->uart_port) {
ret = -EINVAL;
goto out;
}
state->uart_port = uport;
state->pm_state = UART_PM_STATE_UNDEFINED;
uport->cons = drv->cons;
uport->state = state;
/*
* If this port is a console, then the spinlock is already
* initialised.
*/
if (!(uart_console(uport) && (uport->cons->flags & CON_ENABLED))) {
spin_lock_init(&uport->lock);
lockdep_set_class(&uport->lock, &port_lock_key);
}
if (uport->cons && uport->dev)
of_console_check(uport->dev->of_node, uport->cons->name, uport->line);
uart_configure_port(drv, state, uport);
num_groups = 2;
if (uport->attr_group)
num_groups++;
uport->tty_groups = kcalloc(num_groups, sizeof(*uport->tty_groups),
GFP_KERNEL);
if (!uport->tty_groups) {
ret = -ENOMEM;
goto out;
}
uport->tty_groups[0] = &tty_dev_attr_group;
if (uport->attr_group)
uport->tty_groups[1] = uport->attr_group;
/*
* Register the port whether it's detected or not. This allows
* setserial to be used to alter this port's parameters.
*/
tty_dev = tty_port_register_device_attr(port, drv->tty_driver,
uport->line, uport->dev, port, uport->tty_groups);
if (likely(!IS_ERR(tty_dev))) {
device_set_wakeup_capable(tty_dev, 1);
} else {
dev_err(uport->dev, "Cannot register tty device on line %d\n",
uport->line);
}
/*
* Ensure UPF_DEAD is not set.
*/
uport->flags &= ~UPF_DEAD;
out:
mutex_unlock(&port->mutex);
mutex_unlock(&port_mutex);
return ret;
}
/**
* uart_remove_one_port - detach a driver defined port structure
* @drv: pointer to the uart low level driver structure for this port
* @uport: uart port structure for this port
*
* This unhooks (and hangs up) the specified port structure from the
* core driver. No further calls will be made to the low-level code
* for this port.
*/
int uart_remove_one_port(struct uart_driver *drv, struct uart_port *uport)
{
struct uart_state *state = drv->state + uport->line;
struct tty_port *port = &state->port;
struct tty_struct *tty;
int ret = 0;
BUG_ON(in_interrupt());
if (state->uart_port != uport)
dev_alert(uport->dev, "Removing wrong port: %p != %p\n",
state->uart_port, uport);
mutex_lock(&port_mutex);
/*
* Mark the port "dead" - this prevents any opens from
* succeeding while we shut down the port.
*/
mutex_lock(&port->mutex);
if (!state->uart_port) {
mutex_unlock(&port->mutex);
ret = -EINVAL;
goto out;
}
uport->flags |= UPF_DEAD;
mutex_unlock(&port->mutex);
/*
* Remove the devices from the tty layer
*/
tty_unregister_device(drv->tty_driver, uport->line);
tty = tty_port_tty_get(port);
if (tty) {
tty_vhangup(port->tty);
tty_kref_put(tty);
}
/*
* If the port is used as a console, unregister it
*/
if (uart_console(uport))
unregister_console(uport->cons);
/*
* Free the port IO and memory resources, if any.
*/
if (uport->type != PORT_UNKNOWN)
uport->ops->release_port(uport);
kfree(uport->tty_groups);
/*
* Indicate that there isn't a port here anymore.
*/
uport->type = PORT_UNKNOWN;
state->uart_port = NULL;
out:
mutex_unlock(&port_mutex);
return ret;
}
/*
* Are the two ports equivalent?
*/
int uart_match_port(struct uart_port *port1, struct uart_port *port2)
{
if (port1->iotype != port2->iotype)
return 0;
switch (port1->iotype) {
case UPIO_PORT:
return (port1->iobase == port2->iobase);
case UPIO_HUB6:
return (port1->iobase == port2->iobase) &&
(port1->hub6 == port2->hub6);
case UPIO_MEM:
case UPIO_MEM32:
case UPIO_AU:
case UPIO_TSI:
return (port1->mapbase == port2->mapbase);
}
return 0;
}
EXPORT_SYMBOL(uart_match_port);
/**
* uart_handle_dcd_change - handle a change of carrier detect state
* @uport: uart_port structure for the open port
* @status: new carrier detect status, nonzero if active
*
* Caller must hold uport->lock
*/
void uart_handle_dcd_change(struct uart_port *uport, unsigned int status)
{
struct tty_port *port = &uport->state->port;
struct tty_struct *tty = port->tty;
struct tty_ldisc *ld;
lockdep_assert_held_once(&uport->lock);
if (tty) {
ld = tty_ldisc_ref(tty);
if (ld) {
if (ld->ops->dcd_change)
ld->ops->dcd_change(tty, status);
tty_ldisc_deref(ld);
}
}
uport->icount.dcd++;
if (uart_dcd_enabled(uport)) {
if (status)
wake_up_interruptible(&port->open_wait);
else if (tty)
tty_hangup(tty);
}
}
EXPORT_SYMBOL_GPL(uart_handle_dcd_change);
/**
* uart_handle_cts_change - handle a change of clear-to-send state
* @uport: uart_port structure for the open port
* @status: new clear to send status, nonzero if active
*
* Caller must hold uport->lock
*/
void uart_handle_cts_change(struct uart_port *uport, unsigned int status)
{
lockdep_assert_held_once(&uport->lock);
uport->icount.cts++;
if (uart_cts_enabled(uport)) {
if (uport->hw_stopped) {
if (status) {
uport->hw_stopped = 0;
uport->ops->start_tx(uport);
uart_write_wakeup(uport);
}
} else {
if (!status) {
uport->hw_stopped = 1;
uport->ops->stop_tx(uport);
}
}
}
}
EXPORT_SYMBOL_GPL(uart_handle_cts_change);
/**
* uart_insert_char - push a char to the uart layer
*
* User is responsible to call tty_flip_buffer_push when they are done with
* insertion.
*
* @port: corresponding port
* @status: state of the serial port RX buffer (LSR for 8250)
* @overrun: mask of overrun bits in @status
* @ch: character to push
* @flag: flag for the character (see TTY_NORMAL and friends)
*/
void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag)
{
struct tty_port *tport = &port->state->port;
if ((status & port->ignore_status_mask & ~overrun) == 0)
if (tty_insert_flip_char(tport, ch, flag) == 0)
++port->icount.buf_overrun;
/*
* Overrun is special. Since it's reported immediately,
* it doesn't affect the current character.
*/
if (status & ~port->ignore_status_mask & overrun)
if (tty_insert_flip_char(tport, 0, TTY_OVERRUN) == 0)
++port->icount.buf_overrun;
}
EXPORT_SYMBOL_GPL(uart_insert_char);
EXPORT_SYMBOL(uart_write_wakeup);
EXPORT_SYMBOL(uart_register_driver);
EXPORT_SYMBOL(uart_unregister_driver);
EXPORT_SYMBOL(uart_suspend_port);
EXPORT_SYMBOL(uart_resume_port);
EXPORT_SYMBOL(uart_add_one_port);
EXPORT_SYMBOL(uart_remove_one_port);
MODULE_DESCRIPTION("Serial driver core");
MODULE_LICENSE("GPL");
| gpl-2.0 |
thestealth131205/k2_u-ul | arch/arm/perfmon/perf-function-hooks.c | 346 | 1416 | /* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/sysrq.h>
#include <linux/time.h>
#include "linux/proc_fs.h"
#include "linux/kernel_stat.h"
#include "asm/uaccess.h"
#include <linux/proc_fs.h>
#include "cp15_registers.h"
#include <asm/perftypes.h>
#include "perf.h"
VPVF pp_interrupt_out_ptr;
VPVF pp_interrupt_in_ptr;
VPULF pp_process_remove_ptr;
unsigned int pp_loaded;
EXPORT_SYMBOL(pp_loaded);
atomic_t pm_op_lock;
EXPORT_SYMBOL(pm_op_lock);
void perf_mon_interrupt_out(void)
{
if (pp_loaded)
(*pp_interrupt_out_ptr)();
}
EXPORT_SYMBOL(pp_interrupt_out_ptr);
void perf_mon_interrupt_in(void)
{
if (pp_loaded)
(*pp_interrupt_in_ptr)();
}
EXPORT_SYMBOL(pp_interrupt_in_ptr);
void per_process_remove(unsigned long pid)
{
if (pp_loaded)
(*pp_process_remove_ptr)(pid);
}
EXPORT_SYMBOL(pp_process_remove_ptr);
| gpl-2.0 |
chenyu105/linux | net/ipv4/esp4.c | 602 | 17715 | #define pr_fmt(fmt) "IPsec: " fmt
#include <crypto/aead.h>
#include <crypto/authenc.h>
#include <linux/err.h>
#include <linux/module.h>
#include <net/ip.h>
#include <net/xfrm.h>
#include <net/esp.h>
#include <linux/scatterlist.h>
#include <linux/kernel.h>
#include <linux/pfkeyv2.h>
#include <linux/rtnetlink.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in6.h>
#include <net/icmp.h>
#include <net/protocol.h>
#include <net/udp.h>
struct esp_skb_cb {
struct xfrm_skb_cb xfrm;
void *tmp;
};
#define ESP_SKB_CB(__skb) ((struct esp_skb_cb *)&((__skb)->cb[0]))
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu);
/*
* Allocate an AEAD request structure with extra space for SG and IV.
*
* For alignment considerations the IV is placed at the front, followed
* by the request and finally the SG list.
*
* TODO: Use spare space in skb for this where possible.
*/
static void *esp_alloc_tmp(struct crypto_aead *aead, int nfrags, int seqhilen)
{
unsigned int len;
len = seqhilen;
len += crypto_aead_ivsize(aead);
if (len) {
len += crypto_aead_alignmask(aead) &
~(crypto_tfm_ctx_alignment() - 1);
len = ALIGN(len, crypto_tfm_ctx_alignment());
}
len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
len = ALIGN(len, __alignof__(struct scatterlist));
len += sizeof(struct scatterlist) * nfrags;
return kmalloc(len, GFP_ATOMIC);
}
static inline __be32 *esp_tmp_seqhi(void *tmp)
{
return PTR_ALIGN((__be32 *)tmp, __alignof__(__be32));
}
static inline u8 *esp_tmp_iv(struct crypto_aead *aead, void *tmp, int seqhilen)
{
return crypto_aead_ivsize(aead) ?
PTR_ALIGN((u8 *)tmp + seqhilen,
crypto_aead_alignmask(aead) + 1) : tmp + seqhilen;
}
static inline struct aead_request *esp_tmp_req(struct crypto_aead *aead, u8 *iv)
{
struct aead_request *req;
req = (void *)PTR_ALIGN(iv + crypto_aead_ivsize(aead),
crypto_tfm_ctx_alignment());
aead_request_set_tfm(req, aead);
return req;
}
static inline struct scatterlist *esp_req_sg(struct crypto_aead *aead,
struct aead_request *req)
{
return (void *)ALIGN((unsigned long)(req + 1) +
crypto_aead_reqsize(aead),
__alignof__(struct scatterlist));
}
static void esp_output_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
kfree(ESP_SKB_CB(skb)->tmp);
xfrm_output_resume(skb, err);
}
/* Move ESP header back into place. */
static void esp_restore_header(struct sk_buff *skb, unsigned int offset)
{
struct ip_esp_hdr *esph = (void *)(skb->data + offset);
void *tmp = ESP_SKB_CB(skb)->tmp;
__be32 *seqhi = esp_tmp_seqhi(tmp);
esph->seq_no = esph->spi;
esph->spi = *seqhi;
}
static void esp_output_restore_header(struct sk_buff *skb)
{
esp_restore_header(skb, skb_transport_offset(skb) - sizeof(__be32));
}
static void esp_output_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
esp_output_restore_header(skb);
esp_output_done(base, err);
}
static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
{
int err;
struct ip_esp_hdr *esph;
struct crypto_aead *aead;
struct aead_request *req;
struct scatterlist *sg;
struct sk_buff *trailer;
void *tmp;
u8 *iv;
u8 *tail;
int blksize;
int clen;
int alen;
int plen;
int ivlen;
int tfclen;
int nfrags;
int assoclen;
int seqhilen;
__be32 *seqhi;
__be64 seqno;
/* skb is pure payload to encrypt */
aead = x->data;
alen = crypto_aead_authsize(aead);
ivlen = crypto_aead_ivsize(aead);
tfclen = 0;
if (x->tfcpad) {
struct xfrm_dst *dst = (struct xfrm_dst *)skb_dst(skb);
u32 padto;
padto = min(x->tfcpad, esp4_get_mtu(x, dst->child_mtu_cached));
if (skb->len < padto)
tfclen = padto - skb->len;
}
blksize = ALIGN(crypto_aead_blocksize(aead), 4);
clen = ALIGN(skb->len + 2 + tfclen, blksize);
plen = clen - skb->len - tfclen;
err = skb_cow_data(skb, tfclen + plen + alen, &trailer);
if (err < 0)
goto error;
nfrags = err;
assoclen = sizeof(*esph);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
seqhilen += sizeof(__be32);
assoclen += seqhilen;
}
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
if (!tmp) {
err = -ENOMEM;
goto error;
}
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
/* Fill padding... */
tail = skb_tail_pointer(trailer);
if (tfclen) {
memset(tail, 0, tfclen);
tail += tfclen;
}
do {
int i;
for (i = 0; i < plen - 2; i++)
tail[i] = i + 1;
} while (0);
tail[plen - 2] = plen - 2;
tail[plen - 1] = *skb_mac_header(skb);
pskb_put(skb, trailer, clen - skb->len + alen);
skb_push(skb, -skb_network_offset(skb));
esph = ip_esp_hdr(skb);
*skb_mac_header(skb) = IPPROTO_ESP;
/* this is non-NULL only with UDP Encapsulation */
if (x->encap) {
struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh;
__be32 *udpdata32;
__be16 sport, dport;
int encap_type;
spin_lock_bh(&x->lock);
sport = encap->encap_sport;
dport = encap->encap_dport;
encap_type = encap->encap_type;
spin_unlock_bh(&x->lock);
uh = (struct udphdr *)esph;
uh->source = sport;
uh->dest = dport;
uh->len = htons(skb->len - skb_transport_offset(skb));
uh->check = 0;
switch (encap_type) {
default:
case UDP_ENCAP_ESPINUDP:
esph = (struct ip_esp_hdr *)(uh + 1);
break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
udpdata32 = (__be32 *)(uh + 1);
udpdata32[0] = udpdata32[1] = 0;
esph = (struct ip_esp_hdr *)(udpdata32 + 2);
break;
}
*skb_mac_header(skb) = IPPROTO_UDP;
}
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.low);
aead_request_set_callback(req, 0, esp_output_done, skb);
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits. We will move it back after
* encryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
esph = (void *)(skb_transport_header(skb) - sizeof(__be32));
*seqhi = esph->spi;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.output.hi);
aead_request_set_callback(req, 0, esp_output_done_esn, skb);
}
esph->spi = x->id.spi;
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg,
(unsigned char *)esph - skb->data,
assoclen + ivlen + clen + alen);
aead_request_set_crypt(req, sg, sg, ivlen + clen, iv);
aead_request_set_ad(req, assoclen);
seqno = cpu_to_be64(XFRM_SKB_CB(skb)->seq.output.low +
((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
memset(iv, 0, ivlen);
memcpy(iv + ivlen - min(ivlen, 8), (u8 *)&seqno + 8 - min(ivlen, 8),
min(ivlen, 8));
ESP_SKB_CB(skb)->tmp = tmp;
err = crypto_aead_encrypt(req);
switch (err) {
case -EINPROGRESS:
goto error;
case -EBUSY:
err = NET_XMIT_DROP;
break;
case 0:
if ((x->props.flags & XFRM_STATE_ESN))
esp_output_restore_header(skb);
}
kfree(tmp);
error:
return err;
}
static int esp_input_done2(struct sk_buff *skb, int err)
{
const struct iphdr *iph;
struct xfrm_state *x = xfrm_input_state(skb);
struct crypto_aead *aead = x->data;
int alen = crypto_aead_authsize(aead);
int hlen = sizeof(struct ip_esp_hdr) + crypto_aead_ivsize(aead);
int elen = skb->len - hlen;
int ihl;
u8 nexthdr[2];
int padlen;
kfree(ESP_SKB_CB(skb)->tmp);
if (unlikely(err))
goto out;
if (skb_copy_bits(skb, skb->len-alen-2, nexthdr, 2))
BUG();
err = -EINVAL;
padlen = nexthdr[0];
if (padlen + 2 + alen >= elen)
goto out;
/* ... check padding bits here. Silly. :-) */
iph = ip_hdr(skb);
ihl = iph->ihl * 4;
if (x->encap) {
struct xfrm_encap_tmpl *encap = x->encap;
struct udphdr *uh = (void *)(skb_network_header(skb) + ihl);
/*
* 1) if the NAT-T peer's IP or port changed then
* advertize the change to the keying daemon.
* This is an inbound SA, so just compare
* SRC ports.
*/
if (iph->saddr != x->props.saddr.a4 ||
uh->source != encap->encap_sport) {
xfrm_address_t ipaddr;
ipaddr.a4 = iph->saddr;
km_new_mapping(x, &ipaddr, uh->source);
/* XXX: perhaps add an extra
* policy check here, to see
* if we should allow or
* reject a packet from a
* different source
* address/port.
*/
}
/*
* 2) ignore UDP/TCP checksums in case
* of NAT-T in Transport Mode, or
* perform other post-processing fixes
* as per draft-ietf-ipsec-udp-encaps-06,
* section 3.1.2
*/
if (x->props.mode == XFRM_MODE_TRANSPORT)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
pskb_trim(skb, skb->len - alen - padlen - 2);
__skb_pull(skb, hlen);
if (x->props.mode == XFRM_MODE_TUNNEL)
skb_reset_transport_header(skb);
else
skb_set_transport_header(skb, -ihl);
err = nexthdr[1];
/* RFC4303: Drop dummy packets without any error */
if (err == IPPROTO_NONE)
err = -EINVAL;
out:
return err;
}
static void esp_input_done(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
xfrm_input_resume(skb, esp_input_done2(skb, err));
}
static void esp_input_restore_header(struct sk_buff *skb)
{
esp_restore_header(skb, 0);
__skb_pull(skb, 4);
}
static void esp_input_done_esn(struct crypto_async_request *base, int err)
{
struct sk_buff *skb = base->data;
esp_input_restore_header(skb);
esp_input_done(base, err);
}
/*
* Note: detecting truncated vs. non-truncated authentication data is very
* expensive, so we only support truncated data, which is the recommended
* and common case.
*/
static int esp_input(struct xfrm_state *x, struct sk_buff *skb)
{
struct ip_esp_hdr *esph;
struct crypto_aead *aead = x->data;
struct aead_request *req;
struct sk_buff *trailer;
int ivlen = crypto_aead_ivsize(aead);
int elen = skb->len - sizeof(*esph) - ivlen;
int nfrags;
int assoclen;
int seqhilen;
__be32 *seqhi;
void *tmp;
u8 *iv;
struct scatterlist *sg;
int err = -EINVAL;
if (!pskb_may_pull(skb, sizeof(*esph) + ivlen))
goto out;
if (elen <= 0)
goto out;
err = skb_cow_data(skb, 0, &trailer);
if (err < 0)
goto out;
nfrags = err;
assoclen = sizeof(*esph);
seqhilen = 0;
if (x->props.flags & XFRM_STATE_ESN) {
seqhilen += sizeof(__be32);
assoclen += seqhilen;
}
err = -ENOMEM;
tmp = esp_alloc_tmp(aead, nfrags, seqhilen);
if (!tmp)
goto out;
ESP_SKB_CB(skb)->tmp = tmp;
seqhi = esp_tmp_seqhi(tmp);
iv = esp_tmp_iv(aead, tmp, seqhilen);
req = esp_tmp_req(aead, iv);
sg = esp_req_sg(aead, req);
skb->ip_summed = CHECKSUM_NONE;
esph = (struct ip_esp_hdr *)skb->data;
aead_request_set_callback(req, 0, esp_input_done, skb);
/* For ESN we move the header forward by 4 bytes to
* accomodate the high bits. We will move it back after
* decryption.
*/
if ((x->props.flags & XFRM_STATE_ESN)) {
esph = (void *)skb_push(skb, 4);
*seqhi = esph->spi;
esph->spi = esph->seq_no;
esph->seq_no = htonl(XFRM_SKB_CB(skb)->seq.input.hi);
aead_request_set_callback(req, 0, esp_input_done_esn, skb);
}
sg_init_table(sg, nfrags);
skb_to_sgvec(skb, sg, 0, skb->len);
aead_request_set_crypt(req, sg, sg, elen + ivlen, iv);
aead_request_set_ad(req, assoclen);
err = crypto_aead_decrypt(req);
if (err == -EINPROGRESS)
goto out;
if ((x->props.flags & XFRM_STATE_ESN))
esp_input_restore_header(skb);
err = esp_input_done2(skb, err);
out:
return err;
}
static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
{
struct crypto_aead *aead = x->data;
u32 blksize = ALIGN(crypto_aead_blocksize(aead), 4);
unsigned int net_adj;
switch (x->props.mode) {
case XFRM_MODE_TRANSPORT:
case XFRM_MODE_BEET:
net_adj = sizeof(struct iphdr);
break;
case XFRM_MODE_TUNNEL:
net_adj = 0;
break;
default:
BUG();
}
return ((mtu - x->props.header_len - crypto_aead_authsize(aead) -
net_adj) & ~(blksize - 1)) + net_adj - 2;
}
static int esp4_err(struct sk_buff *skb, u32 info)
{
struct net *net = dev_net(skb->dev);
const struct iphdr *iph = (const struct iphdr *)skb->data;
struct ip_esp_hdr *esph = (struct ip_esp_hdr *)(skb->data+(iph->ihl<<2));
struct xfrm_state *x;
switch (icmp_hdr(skb)->type) {
case ICMP_DEST_UNREACH:
if (icmp_hdr(skb)->code != ICMP_FRAG_NEEDED)
return 0;
case ICMP_REDIRECT:
break;
default:
return 0;
}
x = xfrm_state_lookup(net, skb->mark, (const xfrm_address_t *)&iph->daddr,
esph->spi, IPPROTO_ESP, AF_INET);
if (!x)
return 0;
if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH)
ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
else
ipv4_redirect(skb, net, 0, 0, IPPROTO_ESP, 0);
xfrm_state_put(x);
return 0;
}
static void esp_destroy(struct xfrm_state *x)
{
struct crypto_aead *aead = x->data;
if (!aead)
return;
crypto_free_aead(aead);
}
static int esp_init_aead(struct xfrm_state *x)
{
char aead_name[CRYPTO_MAX_ALG_NAME];
struct crypto_aead *aead;
int err;
err = -ENAMETOOLONG;
if (snprintf(aead_name, CRYPTO_MAX_ALG_NAME, "%s(%s)",
x->geniv, x->aead->alg_name) >= CRYPTO_MAX_ALG_NAME)
goto error;
aead = crypto_alloc_aead(aead_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
x->data = aead;
err = crypto_aead_setkey(aead, x->aead->alg_key,
(x->aead->alg_key_len + 7) / 8);
if (err)
goto error;
err = crypto_aead_setauthsize(aead, x->aead->alg_icv_len / 8);
if (err)
goto error;
error:
return err;
}
static int esp_init_authenc(struct xfrm_state *x)
{
struct crypto_aead *aead;
struct crypto_authenc_key_param *param;
struct rtattr *rta;
char *key;
char *p;
char authenc_name[CRYPTO_MAX_ALG_NAME];
unsigned int keylen;
int err;
err = -EINVAL;
if (!x->ealg)
goto error;
err = -ENAMETOOLONG;
if ((x->props.flags & XFRM_STATE_ESN)) {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthencesn(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
goto error;
} else {
if (snprintf(authenc_name, CRYPTO_MAX_ALG_NAME,
"%s%sauthenc(%s,%s)%s",
x->geniv ?: "", x->geniv ? "(" : "",
x->aalg ? x->aalg->alg_name : "digest_null",
x->ealg->alg_name,
x->geniv ? ")" : "") >= CRYPTO_MAX_ALG_NAME)
goto error;
}
aead = crypto_alloc_aead(authenc_name, 0, 0);
err = PTR_ERR(aead);
if (IS_ERR(aead))
goto error;
x->data = aead;
keylen = (x->aalg ? (x->aalg->alg_key_len + 7) / 8 : 0) +
(x->ealg->alg_key_len + 7) / 8 + RTA_SPACE(sizeof(*param));
err = -ENOMEM;
key = kmalloc(keylen, GFP_KERNEL);
if (!key)
goto error;
p = key;
rta = (void *)p;
rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
rta->rta_len = RTA_LENGTH(sizeof(*param));
param = RTA_DATA(rta);
p += RTA_SPACE(sizeof(*param));
if (x->aalg) {
struct xfrm_algo_desc *aalg_desc;
memcpy(p, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8);
p += (x->aalg->alg_key_len + 7) / 8;
aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
BUG_ON(!aalg_desc);
err = -EINVAL;
if (aalg_desc->uinfo.auth.icv_fullbits / 8 !=
crypto_aead_authsize(aead)) {
pr_info("ESP: %s digestsize %u != %hu\n",
x->aalg->alg_name,
crypto_aead_authsize(aead),
aalg_desc->uinfo.auth.icv_fullbits / 8);
goto free_key;
}
err = crypto_aead_setauthsize(
aead, x->aalg->alg_trunc_len / 8);
if (err)
goto free_key;
}
param->enckeylen = cpu_to_be32((x->ealg->alg_key_len + 7) / 8);
memcpy(p, x->ealg->alg_key, (x->ealg->alg_key_len + 7) / 8);
err = crypto_aead_setkey(aead, key, keylen);
free_key:
kfree(key);
error:
return err;
}
static int esp_init_state(struct xfrm_state *x)
{
struct crypto_aead *aead;
u32 align;
int err;
x->data = NULL;
if (x->aead)
err = esp_init_aead(x);
else
err = esp_init_authenc(x);
if (err)
goto error;
aead = x->data;
x->props.header_len = sizeof(struct ip_esp_hdr) +
crypto_aead_ivsize(aead);
if (x->props.mode == XFRM_MODE_TUNNEL)
x->props.header_len += sizeof(struct iphdr);
else if (x->props.mode == XFRM_MODE_BEET && x->sel.family != AF_INET6)
x->props.header_len += IPV4_BEET_PHMAXLEN;
if (x->encap) {
struct xfrm_encap_tmpl *encap = x->encap;
switch (encap->encap_type) {
default:
goto error;
case UDP_ENCAP_ESPINUDP:
x->props.header_len += sizeof(struct udphdr);
break;
case UDP_ENCAP_ESPINUDP_NON_IKE:
x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
break;
}
}
align = ALIGN(crypto_aead_blocksize(aead), 4);
x->props.trailer_len = align + 1 + crypto_aead_authsize(aead);
error:
return err;
}
static int esp4_rcv_cb(struct sk_buff *skb, int err)
{
return 0;
}
static const struct xfrm_type esp_type =
{
.description = "ESP4",
.owner = THIS_MODULE,
.proto = IPPROTO_ESP,
.flags = XFRM_TYPE_REPLAY_PROT,
.init_state = esp_init_state,
.destructor = esp_destroy,
.get_mtu = esp4_get_mtu,
.input = esp_input,
.output = esp_output
};
static struct xfrm4_protocol esp4_protocol = {
.handler = xfrm4_rcv,
.input_handler = xfrm_input,
.cb_handler = esp4_rcv_cb,
.err_handler = esp4_err,
.priority = 0,
};
static int __init esp4_init(void)
{
if (xfrm_register_type(&esp_type, AF_INET) < 0) {
pr_info("%s: can't add xfrm type\n", __func__);
return -EAGAIN;
}
if (xfrm4_protocol_register(&esp4_protocol, IPPROTO_ESP) < 0) {
pr_info("%s: can't add protocol\n", __func__);
xfrm_unregister_type(&esp_type, AF_INET);
return -EAGAIN;
}
return 0;
}
static void __exit esp4_fini(void)
{
if (xfrm4_protocol_deregister(&esp4_protocol, IPPROTO_ESP) < 0)
pr_info("%s: can't remove protocol\n", __func__);
if (xfrm_unregister_type(&esp_type, AF_INET) < 0)
pr_info("%s: can't remove xfrm type\n", __func__);
}
module_init(esp4_init);
module_exit(esp4_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_ESP);
| gpl-2.0 |
androidbftab1/bf-kernel-3.4 | drivers/scsi/esp_scsi.c | 1626 | 66490 | /* esp_scsi.c: ESP SCSI driver.
*
* Copyright (C) 2007 David S. Miller (davem@davemloft.net)
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/completion.h>
#include <linux/kallsyms.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/irqreturn.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <scsi/scsi.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_tcq.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_spi.h>
#include "esp_scsi.h"
#define DRV_MODULE_NAME "esp"
#define PFX DRV_MODULE_NAME ": "
#define DRV_VERSION "2.000"
#define DRV_MODULE_RELDATE "April 19, 2007"
/* SCSI bus reset settle time in seconds. */
static int esp_bus_reset_settle = 3;
static u32 esp_debug;
#define ESP_DEBUG_INTR 0x00000001
#define ESP_DEBUG_SCSICMD 0x00000002
#define ESP_DEBUG_RESET 0x00000004
#define ESP_DEBUG_MSGIN 0x00000008
#define ESP_DEBUG_MSGOUT 0x00000010
#define ESP_DEBUG_CMDDONE 0x00000020
#define ESP_DEBUG_DISCONNECT 0x00000040
#define ESP_DEBUG_DATASTART 0x00000080
#define ESP_DEBUG_DATADONE 0x00000100
#define ESP_DEBUG_RECONNECT 0x00000200
#define ESP_DEBUG_AUTOSENSE 0x00000400
#define esp_log_intr(f, a...) \
do { if (esp_debug & ESP_DEBUG_INTR) \
printk(f, ## a); \
} while (0)
#define esp_log_reset(f, a...) \
do { if (esp_debug & ESP_DEBUG_RESET) \
printk(f, ## a); \
} while (0)
#define esp_log_msgin(f, a...) \
do { if (esp_debug & ESP_DEBUG_MSGIN) \
printk(f, ## a); \
} while (0)
#define esp_log_msgout(f, a...) \
do { if (esp_debug & ESP_DEBUG_MSGOUT) \
printk(f, ## a); \
} while (0)
#define esp_log_cmddone(f, a...) \
do { if (esp_debug & ESP_DEBUG_CMDDONE) \
printk(f, ## a); \
} while (0)
#define esp_log_disconnect(f, a...) \
do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
printk(f, ## a); \
} while (0)
#define esp_log_datastart(f, a...) \
do { if (esp_debug & ESP_DEBUG_DATASTART) \
printk(f, ## a); \
} while (0)
#define esp_log_datadone(f, a...) \
do { if (esp_debug & ESP_DEBUG_DATADONE) \
printk(f, ## a); \
} while (0)
#define esp_log_reconnect(f, a...) \
do { if (esp_debug & ESP_DEBUG_RECONNECT) \
printk(f, ## a); \
} while (0)
#define esp_log_autosense(f, a...) \
do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
printk(f, ## a); \
} while (0)
#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
static void esp_log_fill_regs(struct esp *esp,
struct esp_event_ent *p)
{
p->sreg = esp->sreg;
p->seqreg = esp->seqreg;
p->sreg2 = esp->sreg2;
p->ireg = esp->ireg;
p->select_state = esp->select_state;
p->event = esp->event;
}
void scsi_esp_cmd(struct esp *esp, u8 val)
{
struct esp_event_ent *p;
int idx = esp->esp_event_cur;
p = &esp->esp_event_log[idx];
p->type = ESP_EVENT_TYPE_CMD;
p->val = val;
esp_log_fill_regs(esp, p);
esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
esp_write8(val, ESP_CMD);
}
EXPORT_SYMBOL(scsi_esp_cmd);
static void esp_event(struct esp *esp, u8 val)
{
struct esp_event_ent *p;
int idx = esp->esp_event_cur;
p = &esp->esp_event_log[idx];
p->type = ESP_EVENT_TYPE_EVENT;
p->val = val;
esp_log_fill_regs(esp, p);
esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
esp->event = val;
}
static void esp_dump_cmd_log(struct esp *esp)
{
int idx = esp->esp_event_cur;
int stop = idx;
printk(KERN_INFO PFX "esp%d: Dumping command log\n",
esp->host->unique_id);
do {
struct esp_event_ent *p = &esp->esp_event_log[idx];
printk(KERN_INFO PFX "esp%d: ent[%d] %s ",
esp->host->unique_id, idx,
p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT");
printk("val[%02x] sreg[%02x] seqreg[%02x] "
"sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
p->val, p->sreg, p->seqreg,
p->sreg2, p->ireg, p->select_state, p->event);
idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
} while (idx != stop);
}
static void esp_flush_fifo(struct esp *esp)
{
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP236) {
int lim = 1000;
while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
if (--lim == 0) {
printk(KERN_ALERT PFX "esp%d: ESP_FF_BYTES "
"will not clear!\n",
esp->host->unique_id);
break;
}
udelay(1);
}
}
}
static void hme_read_fifo(struct esp *esp)
{
int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
int idx = 0;
while (fcnt--) {
esp->fifo[idx++] = esp_read8(ESP_FDATA);
esp->fifo[idx++] = esp_read8(ESP_FDATA);
}
if (esp->sreg2 & ESP_STAT2_F1BYTE) {
esp_write8(0, ESP_FDATA);
esp->fifo[idx++] = esp_read8(ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
}
esp->fifo_cnt = idx;
}
static void esp_set_all_config3(struct esp *esp, u8 val)
{
int i;
for (i = 0; i < ESP_MAX_TARGET; i++)
esp->target[i].esp_config3 = val;
}
/* Reset the ESP chip, _not_ the SCSI bus. */
static void esp_reset_esp(struct esp *esp)
{
u8 family_code, version;
/* Now reset the ESP chip */
scsi_esp_cmd(esp, ESP_CMD_RC);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
if (esp->rev == FAST)
esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
/* This is the only point at which it is reliable to read
* the ID-code for a fast ESP chip variants.
*/
esp->max_period = ((35 * esp->ccycle) / 1000);
if (esp->rev == FAST) {
version = esp_read8(ESP_UID);
family_code = (version & 0xf8) >> 3;
if (family_code == 0x02)
esp->rev = FAS236;
else if (family_code == 0x0a)
esp->rev = FASHME; /* Version is usually '5'. */
else
esp->rev = FAS100A;
esp->min_period = ((4 * esp->ccycle) / 1000);
} else {
esp->min_period = ((5 * esp->ccycle) / 1000);
}
esp->max_period = (esp->max_period + 3)>>2;
esp->min_period = (esp->min_period + 3)>>2;
esp_write8(esp->config1, ESP_CFG1);
switch (esp->rev) {
case ESP100:
/* nothing to do */
break;
case ESP100A:
esp_write8(esp->config2, ESP_CFG2);
break;
case ESP236:
/* Slow 236 */
esp_write8(esp->config2, ESP_CFG2);
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
break;
case FASHME:
esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
/* fallthrough... */
case FAS236:
/* Fast 236 or HME */
esp_write8(esp->config2, ESP_CFG2);
if (esp->rev == FASHME) {
u8 cfg3 = esp->target[0].esp_config3;
cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
if (esp->scsi_id >= 8)
cfg3 |= ESP_CONFIG3_IDBIT3;
esp_set_all_config3(esp, cfg3);
} else {
u32 cfg3 = esp->target[0].esp_config3;
cfg3 |= ESP_CONFIG3_FCLK;
esp_set_all_config3(esp, cfg3);
}
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
if (esp->rev == FASHME) {
esp->radelay = 80;
} else {
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
esp->radelay = 0;
else
esp->radelay = 96;
}
break;
case FAS100A:
/* Fast 100a */
esp_write8(esp->config2, ESP_CFG2);
esp_set_all_config3(esp,
(esp->target[0].esp_config3 |
ESP_CONFIG3_FCLOCK));
esp->prev_cfg3 = esp->target[0].esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
esp->radelay = 32;
break;
default:
break;
}
/* Reload the configuration registers */
esp_write8(esp->cfact, ESP_CFACT);
esp->prev_stp = 0;
esp_write8(esp->prev_stp, ESP_STP);
esp->prev_soff = 0;
esp_write8(esp->prev_soff, ESP_SOFF);
esp_write8(esp->neg_defp, ESP_TIMEO);
/* Eat any bitrot in the chip */
esp_read8(ESP_INTRPT);
udelay(100);
}
static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
struct scatterlist *sg = scsi_sglist(cmd);
int dir = cmd->sc_data_direction;
int total, i;
if (dir == DMA_NONE)
return;
spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
spriv->cur_residue = sg_dma_len(sg);
spriv->cur_sg = sg;
total = 0;
for (i = 0; i < spriv->u.num_sg; i++)
total += sg_dma_len(&sg[i]);
spriv->tot_residue = total;
}
static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
return ent->sense_dma +
(ent->sense_ptr - cmd->sense_buffer);
}
return sg_dma_address(p->cur_sg) +
(sg_dma_len(p->cur_sg) -
p->cur_residue);
}
static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
return SCSI_SENSE_BUFFERSIZE -
(ent->sense_ptr - cmd->sense_buffer);
}
return p->cur_residue;
}
static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd, unsigned int len)
{
struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->sense_ptr += len;
return;
}
p->cur_residue -= len;
p->tot_residue -= len;
if (p->cur_residue < 0 || p->tot_residue < 0) {
printk(KERN_ERR PFX "esp%d: Data transfer overflow.\n",
esp->host->unique_id);
printk(KERN_ERR PFX "esp%d: cur_residue[%d] tot_residue[%d] "
"len[%u]\n",
esp->host->unique_id,
p->cur_residue, p->tot_residue, len);
p->cur_residue = 0;
p->tot_residue = 0;
}
if (!p->cur_residue && p->tot_residue) {
p->cur_sg++;
p->cur_residue = sg_dma_len(p->cur_sg);
}
}
static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
{
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
int dir = cmd->sc_data_direction;
if (dir == DMA_NONE)
return;
esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
}
static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->saved_sense_ptr = ent->sense_ptr;
return;
}
ent->saved_cur_residue = spriv->cur_residue;
ent->saved_cur_sg = spriv->cur_sg;
ent->saved_tot_residue = spriv->tot_residue;
}
static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->sense_ptr = ent->saved_sense_ptr;
return;
}
spriv->cur_residue = ent->saved_cur_residue;
spriv->cur_sg = ent->saved_cur_sg;
spriv->tot_residue = ent->saved_tot_residue;
}
static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
{
if (cmd->cmd_len == 6 ||
cmd->cmd_len == 10 ||
cmd->cmd_len == 12) {
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
} else {
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
}
static void esp_write_tgt_config3(struct esp *esp, int tgt)
{
if (esp->rev > ESP100A) {
u8 val = esp->target[tgt].esp_config3;
if (val != esp->prev_cfg3) {
esp->prev_cfg3 = val;
esp_write8(val, ESP_CFG3);
}
}
}
static void esp_write_tgt_sync(struct esp *esp, int tgt)
{
u8 off = esp->target[tgt].esp_offset;
u8 per = esp->target[tgt].esp_period;
if (off != esp->prev_soff) {
esp->prev_soff = off;
esp_write8(off, ESP_SOFF);
}
if (per != esp->prev_stp) {
esp->prev_stp = per;
esp_write8(per, ESP_STP);
}
}
static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
{
if (esp->rev == FASHME) {
/* Arbitrary segment boundaries, 24-bit counts. */
if (dma_len > (1U << 24))
dma_len = (1U << 24);
} else {
u32 base, end;
/* ESP chip limits other variants by 16-bits of transfer
* count. Actually on FAS100A and FAS236 we could get
* 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
* in the ESP_CFG2 register but that causes other unwanted
* changes so we don't use it currently.
*/
if (dma_len > (1U << 16))
dma_len = (1U << 16);
/* All of the DMA variants hooked up to these chips
* cannot handle crossing a 24-bit address boundary.
*/
base = dma_addr & ((1U << 24) - 1U);
end = base + dma_len;
if (end > (1U << 24))
end = (1U <<24);
dma_len = end - base;
}
return dma_len;
}
static int esp_need_to_nego_wide(struct esp_target_data *tp)
{
struct scsi_target *target = tp->starget;
return spi_width(target) != tp->nego_goal_width;
}
static int esp_need_to_nego_sync(struct esp_target_data *tp)
{
struct scsi_target *target = tp->starget;
/* When offset is zero, period is "don't care". */
if (!spi_offset(target) && !tp->nego_goal_offset)
return 0;
if (spi_offset(target) == tp->nego_goal_offset &&
spi_period(target) == tp->nego_goal_period)
return 0;
return 1;
}
static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
if (!ent->orig_tag[0]) {
/* Non-tagged, slot already taken? */
if (lp->non_tagged_cmd)
return -EBUSY;
if (lp->hold) {
/* We are being held by active tagged
* commands.
*/
if (lp->num_tagged)
return -EBUSY;
/* Tagged commands completed, we can unplug
* the queue and run this untagged command.
*/
lp->hold = 0;
} else if (lp->num_tagged) {
/* Plug the queue until num_tagged decreases
* to zero in esp_free_lun_tag.
*/
lp->hold = 1;
return -EBUSY;
}
lp->non_tagged_cmd = ent;
return 0;
} else {
/* Tagged command, see if blocked by a
* non-tagged one.
*/
if (lp->non_tagged_cmd || lp->hold)
return -EBUSY;
}
BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
lp->tagged_cmds[ent->orig_tag[1]] = ent;
lp->num_tagged++;
return 0;
}
static void esp_free_lun_tag(struct esp_cmd_entry *ent,
struct esp_lun_data *lp)
{
if (ent->orig_tag[0]) {
BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
lp->tagged_cmds[ent->orig_tag[1]] = NULL;
lp->num_tagged--;
} else {
BUG_ON(lp->non_tagged_cmd != ent);
lp->non_tagged_cmd = NULL;
}
}
/* When a contingent allegiance conditon is created, we force feed a
* REQUEST_SENSE command to the device to fetch the sense data. I
* tried many other schemes, relying on the scsi error handling layer
* to send out the REQUEST_SENSE automatically, but this was difficult
* to get right especially in the presence of applications like smartd
* which use SG_IO to send out their own REQUEST_SENSE commands.
*/
static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
struct scsi_device *dev = cmd->device;
int tgt, lun;
u8 *p, val;
tgt = dev->id;
lun = dev->lun;
if (!ent->sense_ptr) {
esp_log_autosense("esp%d: Doing auto-sense for "
"tgt[%d] lun[%d]\n",
esp->host->unique_id, tgt, lun);
ent->sense_ptr = cmd->sense_buffer;
ent->sense_dma = esp->ops->map_single(esp,
ent->sense_ptr,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
}
ent->saved_sense_ptr = ent->sense_ptr;
esp->active_cmd = ent;
p = esp->command_block;
esp->msg_out_len = 0;
*p++ = IDENTIFY(0, lun);
*p++ = REQUEST_SENSE;
*p++ = ((dev->scsi_level <= SCSI_2) ?
(lun << 5) : 0);
*p++ = 0;
*p++ = 0;
*p++ = SCSI_SENSE_BUFFERSIZE;
*p++ = 0;
esp->select_state = ESP_SELECT_BASIC;
val = tgt;
if (esp->rev == FASHME)
val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
esp_write8(val, ESP_BUSID);
esp_write_tgt_sync(esp, tgt);
esp_write_tgt_config3(esp, tgt);
val = (p - esp->command_block);
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
}
static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
{
struct esp_cmd_entry *ent;
list_for_each_entry(ent, &esp->queued_cmds, list) {
struct scsi_cmnd *cmd = ent->cmd;
struct scsi_device *dev = cmd->device;
struct esp_lun_data *lp = dev->hostdata;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
ent->tag[0] = 0;
ent->tag[1] = 0;
return ent;
}
if (!scsi_populate_tag_msg(cmd, &ent->tag[0])) {
ent->tag[0] = 0;
ent->tag[1] = 0;
}
ent->orig_tag[0] = ent->tag[0];
ent->orig_tag[1] = ent->tag[1];
if (esp_alloc_lun_tag(ent, lp) < 0)
continue;
return ent;
}
return NULL;
}
static void esp_maybe_execute_command(struct esp *esp)
{
struct esp_target_data *tp;
struct esp_lun_data *lp;
struct scsi_device *dev;
struct scsi_cmnd *cmd;
struct esp_cmd_entry *ent;
int tgt, lun, i;
u32 val, start_cmd;
u8 *p;
if (esp->active_cmd ||
(esp->flags & ESP_FLAG_RESETTING))
return;
ent = find_and_prep_issuable_command(esp);
if (!ent)
return;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp_autosense(esp, ent);
return;
}
cmd = ent->cmd;
dev = cmd->device;
tgt = dev->id;
lun = dev->lun;
tp = &esp->target[tgt];
lp = dev->hostdata;
list_move(&ent->list, &esp->active_cmds);
esp->active_cmd = ent;
esp_map_dma(esp, cmd);
esp_save_pointers(esp, ent);
esp_check_command_len(esp, cmd);
p = esp->command_block;
esp->msg_out_len = 0;
if (tp->flags & ESP_TGT_CHECK_NEGO) {
/* Need to negotiate. If the target is broken
* go for synchronous transfers and non-wide.
*/
if (tp->flags & ESP_TGT_BROKEN) {
tp->flags &= ~ESP_TGT_DISCONNECT;
tp->nego_goal_period = 0;
tp->nego_goal_offset = 0;
tp->nego_goal_width = 0;
tp->nego_goal_tags = 0;
}
/* If the settings are not changing, skip this. */
if (spi_width(tp->starget) == tp->nego_goal_width &&
spi_period(tp->starget) == tp->nego_goal_period &&
spi_offset(tp->starget) == tp->nego_goal_offset) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
goto build_identify;
}
if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
esp->msg_out_len =
spi_populate_width_msg(&esp->msg_out[0],
(tp->nego_goal_width ?
1 : 0));
tp->flags |= ESP_TGT_NEGO_WIDE;
} else if (esp_need_to_nego_sync(tp)) {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
} else {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
}
/* Process it like a slow command. */
if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
build_identify:
/* If we don't have a lun-data struct yet, we're probing
* so do not disconnect. Also, do not disconnect unless
* we have a tag on this command.
*/
if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
*p++ = IDENTIFY(1, lun);
else
*p++ = IDENTIFY(0, lun);
if (ent->tag[0] && esp->rev == ESP100) {
/* ESP100 lacks select w/atn3 command, use select
* and stop instead.
*/
esp->flags |= ESP_FLAG_DOING_SLOWCMD;
}
if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
if (ent->tag[0]) {
*p++ = ent->tag[0];
*p++ = ent->tag[1];
start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
}
for (i = 0; i < cmd->cmd_len; i++)
*p++ = cmd->cmnd[i];
esp->select_state = ESP_SELECT_BASIC;
} else {
esp->cmd_bytes_left = cmd->cmd_len;
esp->cmd_bytes_ptr = &cmd->cmnd[0];
if (ent->tag[0]) {
for (i = esp->msg_out_len - 1;
i >= 0; i--)
esp->msg_out[i + 2] = esp->msg_out[i];
esp->msg_out[0] = ent->tag[0];
esp->msg_out[1] = ent->tag[1];
esp->msg_out_len += 2;
}
start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
esp->select_state = ESP_SELECT_MSGOUT;
}
val = tgt;
if (esp->rev == FASHME)
val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
esp_write8(val, ESP_BUSID);
esp_write_tgt_sync(esp, tgt);
esp_write_tgt_config3(esp, tgt);
val = (p - esp->command_block);
if (esp_debug & ESP_DEBUG_SCSICMD) {
printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
for (i = 0; i < cmd->cmd_len; i++)
printk("%02x ", cmd->cmnd[i]);
printk("]\n");
}
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
val, 16, 0, start_cmd);
}
static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
{
struct list_head *head = &esp->esp_cmd_pool;
struct esp_cmd_entry *ret;
if (list_empty(head)) {
ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
} else {
ret = list_entry(head->next, struct esp_cmd_entry, list);
list_del(&ret->list);
memset(ret, 0, sizeof(*ret));
}
return ret;
}
static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
{
list_add(&ent->list, &esp->esp_cmd_pool);
}
static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd, unsigned int result)
{
struct scsi_device *dev = cmd->device;
int tgt = dev->id;
int lun = dev->lun;
esp->active_cmd = NULL;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, dev->hostdata);
cmd->result = result;
if (ent->eh_done) {
complete(ent->eh_done);
ent->eh_done = NULL;
}
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
/* Restore the message/status bytes to what we actually
* saw originally. Also, report that we are providing
* the sense data.
*/
cmd->result = ((DRIVER_SENSE << 24) |
(DID_OK << 16) |
(COMMAND_COMPLETE << 8) |
(SAM_STAT_CHECK_CONDITION << 0));
ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
if (esp_debug & ESP_DEBUG_AUTOSENSE) {
int i;
printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
esp->host->unique_id, tgt, lun);
for (i = 0; i < 18; i++)
printk("%02x ", cmd->sense_buffer[i]);
printk("]\n");
}
}
cmd->scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
esp_maybe_execute_command(esp);
}
static unsigned int compose_result(unsigned int status, unsigned int message,
unsigned int driver_code)
{
return (status | (message << 8) | (driver_code << 16));
}
static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_device *dev = ent->cmd->device;
struct esp_lun_data *lp = dev->hostdata;
scsi_track_queue_full(dev, lp->num_tagged - 1);
}
static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
{
struct scsi_device *dev = cmd->device;
struct esp *esp = shost_priv(dev->host);
struct esp_cmd_priv *spriv;
struct esp_cmd_entry *ent;
ent = esp_get_ent(esp);
if (!ent)
return SCSI_MLQUEUE_HOST_BUSY;
ent->cmd = cmd;
cmd->scsi_done = done;
spriv = ESP_CMD_PRIV(cmd);
spriv->u.dma_addr = ~(dma_addr_t)0x0;
list_add_tail(&ent->list, &esp->queued_cmds);
esp_maybe_execute_command(esp);
return 0;
}
static DEF_SCSI_QCMD(esp_queuecommand)
static int esp_check_gross_error(struct esp *esp)
{
if (esp->sreg & ESP_STAT_SPAM) {
/* Gross Error, could be one of:
* - top of fifo overwritten
* - top of command register overwritten
* - DMA programmed with wrong direction
* - improper phase change
*/
printk(KERN_ERR PFX "esp%d: Gross error sreg[%02x]\n",
esp->host->unique_id, esp->sreg);
/* XXX Reset the chip. XXX */
return 1;
}
return 0;
}
static int esp_check_spur_intr(struct esp *esp)
{
switch (esp->rev) {
case ESP100:
case ESP100A:
/* The interrupt pending bit of the status register cannot
* be trusted on these revisions.
*/
esp->sreg &= ~ESP_STAT_INTR;
break;
default:
if (!(esp->sreg & ESP_STAT_INTR)) {
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_SR)
return 1;
/* If the DMA is indicating interrupt pending and the
* ESP is not, the only possibility is a DMA error.
*/
if (!esp->ops->dma_error(esp)) {
printk(KERN_ERR PFX "esp%d: Spurious irq, "
"sreg=%02x.\n",
esp->host->unique_id, esp->sreg);
return -1;
}
printk(KERN_ERR PFX "esp%d: DMA error\n",
esp->host->unique_id);
/* XXX Reset the chip. XXX */
return -1;
}
break;
}
return 0;
}
static void esp_schedule_reset(struct esp *esp)
{
esp_log_reset("ESP: esp_schedule_reset() from %p\n",
__builtin_return_address(0));
esp->flags |= ESP_FLAG_RESETTING;
esp_event(esp, ESP_EVENT_RESET);
}
/* In order to avoid having to add a special half-reconnected state
* into the driver we just sit here and poll through the rest of
* the reselection process to get the tag message bytes.
*/
static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
struct esp_lun_data *lp)
{
struct esp_cmd_entry *ent;
int i;
if (!lp->num_tagged) {
printk(KERN_ERR PFX "esp%d: Reconnect w/num_tagged==0\n",
esp->host->unique_id);
return NULL;
}
esp_log_reconnect("ESP: reconnect tag, ");
for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
if (esp->ops->irq_pending(esp))
break;
}
if (i == ESP_QUICKIRQ_LIMIT) {
printk(KERN_ERR PFX "esp%d: Reconnect IRQ1 timeout\n",
esp->host->unique_id);
return NULL;
}
esp->sreg = esp_read8(ESP_STATUS);
esp->ireg = esp_read8(ESP_INTRPT);
esp_log_reconnect("IRQ(%d:%x:%x), ",
i, esp->ireg, esp->sreg);
if (esp->ireg & ESP_INTR_DC) {
printk(KERN_ERR PFX "esp%d: Reconnect, got disconnect.\n",
esp->host->unique_id);
return NULL;
}
if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
printk(KERN_ERR PFX "esp%d: Reconnect, not MIP sreg[%02x].\n",
esp->host->unique_id, esp->sreg);
return NULL;
}
/* DMA in the tag bytes... */
esp->command_block[0] = 0xff;
esp->command_block[1] = 0xff;
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
/* ACK the message. */
scsi_esp_cmd(esp, ESP_CMD_MOK);
for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
if (esp->ops->irq_pending(esp)) {
esp->sreg = esp_read8(ESP_STATUS);
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_FDONE)
break;
}
udelay(1);
}
if (i == ESP_RESELECT_TAG_LIMIT) {
printk(KERN_ERR PFX "esp%d: Reconnect IRQ2 timeout\n",
esp->host->unique_id);
return NULL;
}
esp->ops->dma_drain(esp);
esp->ops->dma_invalidate(esp);
esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
i, esp->ireg, esp->sreg,
esp->command_block[0],
esp->command_block[1]);
if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
esp->command_block[0] > ORDERED_QUEUE_TAG) {
printk(KERN_ERR PFX "esp%d: Reconnect, bad tag "
"type %02x.\n",
esp->host->unique_id, esp->command_block[0]);
return NULL;
}
ent = lp->tagged_cmds[esp->command_block[1]];
if (!ent) {
printk(KERN_ERR PFX "esp%d: Reconnect, no entry for "
"tag %02x.\n",
esp->host->unique_id, esp->command_block[1]);
return NULL;
}
return ent;
}
static int esp_reconnect(struct esp *esp)
{
struct esp_cmd_entry *ent;
struct esp_target_data *tp;
struct esp_lun_data *lp;
struct scsi_device *dev;
int target, lun;
BUG_ON(esp->active_cmd);
if (esp->rev == FASHME) {
/* FASHME puts the target and lun numbers directly
* into the fifo.
*/
target = esp->fifo[0];
lun = esp->fifo[1] & 0x7;
} else {
u8 bits = esp_read8(ESP_FDATA);
/* Older chips put the lun directly into the fifo, but
* the target is given as a sample of the arbitration
* lines on the bus at reselection time. So we should
* see the ID of the ESP and the one reconnecting target
* set in the bitmap.
*/
if (!(bits & esp->scsi_id_mask))
goto do_reset;
bits &= ~esp->scsi_id_mask;
if (!bits || (bits & (bits - 1)))
goto do_reset;
target = ffs(bits) - 1;
lun = (esp_read8(ESP_FDATA) & 0x7);
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP100) {
u8 ireg = esp_read8(ESP_INTRPT);
/* This chip has a bug during reselection that can
* cause a spurious illegal-command interrupt, which
* we simply ACK here. Another possibility is a bus
* reset so we must check for that.
*/
if (ireg & ESP_INTR_SR)
goto do_reset;
}
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
esp_write_tgt_sync(esp, target);
esp_write_tgt_config3(esp, target);
scsi_esp_cmd(esp, ESP_CMD_MOK);
if (esp->rev == FASHME)
esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
ESP_BUSID);
tp = &esp->target[target];
dev = __scsi_device_lookup_by_target(tp->starget, lun);
if (!dev) {
printk(KERN_ERR PFX "esp%d: Reconnect, no lp "
"tgt[%u] lun[%u]\n",
esp->host->unique_id, target, lun);
goto do_reset;
}
lp = dev->hostdata;
ent = lp->non_tagged_cmd;
if (!ent) {
ent = esp_reconnect_with_tag(esp, lp);
if (!ent)
goto do_reset;
}
esp->active_cmd = ent;
if (ent->flags & ESP_CMD_FLAG_ABORT) {
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
esp_event(esp, ESP_EVENT_CHECK_PHASE);
esp_restore_pointers(esp, ent);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
do_reset:
esp_schedule_reset(esp);
return 0;
}
static int esp_finish_select(struct esp *esp)
{
struct esp_cmd_entry *ent;
struct scsi_cmnd *cmd;
u8 orig_select_state;
orig_select_state = esp->select_state;
/* No longer selecting. */
esp->select_state = ESP_SELECT_NONE;
esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
ent = esp->active_cmd;
cmd = ent->cmd;
if (esp->ops->dma_error(esp)) {
/* If we see a DMA error during or as a result of selection,
* all bets are off.
*/
esp_schedule_reset(esp);
esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
return 0;
}
esp->ops->dma_invalidate(esp);
if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
struct esp_target_data *tp = &esp->target[cmd->device->id];
/* Carefully back out of the selection attempt. Release
* resources (such as DMA mapping & TAG) and reset state (such
* as message out and command delivery variables).
*/
if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
esp->cmd_bytes_ptr = NULL;
esp->cmd_bytes_left = 0;
} else {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE,
DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
/* Now that the state is unwound properly, put back onto
* the issue queue. This command is no longer active.
*/
list_move(&ent->list, &esp->queued_cmds);
esp->active_cmd = NULL;
/* Return value ignored by caller, it directly invokes
* esp_reconnect().
*/
return 0;
}
if (esp->ireg == ESP_INTR_DC) {
struct scsi_device *dev = cmd->device;
/* Disconnect. Make sure we re-negotiate sync and
* wide parameters if this target starts responding
* again in the future.
*/
esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_ESEL);
esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
return 1;
}
if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
/* Selection successful. On pre-FAST chips we have
* to do a NOP and possibly clean out the FIFO.
*/
if (esp->rev <= ESP236) {
int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
scsi_esp_cmd(esp, ESP_CMD_NULL);
if (!fcnt &&
(!esp->prev_soff ||
((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
esp_flush_fifo(esp);
}
/* If we are doing a slow command, negotiation, etc.
* we'll do the right thing as we transition to the
* next phase.
*/
esp_event(esp, ESP_EVENT_CHECK_PHASE);
return 0;
}
printk("ESP: Unexpected selection completion ireg[%x].\n",
esp->ireg);
esp_schedule_reset(esp);
return 0;
}
static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
struct scsi_cmnd *cmd)
{
int fifo_cnt, ecount, bytes_sent, flush_fifo;
fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
fifo_cnt <<= 1;
ecount = 0;
if (!(esp->sreg & ESP_STAT_TCNT)) {
ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
(((unsigned int)esp_read8(ESP_TCMED)) << 8));
if (esp->rev == FASHME)
ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
}
bytes_sent = esp->data_dma_len;
bytes_sent -= ecount;
if (!(ent->flags & ESP_CMD_FLAG_WRITE))
bytes_sent -= fifo_cnt;
flush_fifo = 0;
if (!esp->prev_soff) {
/* Synchronous data transfer, always flush fifo. */
flush_fifo = 1;
} else {
if (esp->rev == ESP100) {
u32 fflags, phase;
/* ESP100 has a chip bug where in the synchronous data
* phase it can mistake a final long REQ pulse from the
* target as an extra data byte. Fun.
*
* To detect this case we resample the status register
* and fifo flags. If we're still in a data phase and
* we see spurious chunks in the fifo, we return error
* to the caller which should reset and set things up
* such that we only try future transfers to this
* target in synchronous mode.
*/
esp->sreg = esp_read8(ESP_STATUS);
phase = esp->sreg & ESP_STAT_PMASK;
fflags = esp_read8(ESP_FFLAGS);
if ((phase == ESP_DOP &&
(fflags & ESP_FF_ONOTZERO)) ||
(phase == ESP_DIP &&
(fflags & ESP_FF_FBYTES)))
return -1;
}
if (!(ent->flags & ESP_CMD_FLAG_WRITE))
flush_fifo = 1;
}
if (flush_fifo)
esp_flush_fifo(esp);
return bytes_sent;
}
static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
u8 scsi_period, u8 scsi_offset,
u8 esp_stp, u8 esp_soff)
{
spi_period(tp->starget) = scsi_period;
spi_offset(tp->starget) = scsi_offset;
spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
if (esp_soff) {
esp_stp &= 0x1f;
esp_soff |= esp->radelay;
if (esp->rev >= FAS236) {
u8 bit = ESP_CONFIG3_FSCSI;
if (esp->rev >= FAS100A)
bit = ESP_CONFIG3_FAST;
if (scsi_period < 50) {
if (esp->rev == FASHME)
esp_soff &= ~esp->radelay;
tp->esp_config3 |= bit;
} else {
tp->esp_config3 &= ~bit;
}
esp->prev_cfg3 = tp->esp_config3;
esp_write8(esp->prev_cfg3, ESP_CFG3);
}
}
tp->esp_period = esp->prev_stp = esp_stp;
tp->esp_offset = esp->prev_soff = esp_soff;
esp_write8(esp_soff, ESP_SOFF);
esp_write8(esp_stp, ESP_STP);
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
spi_display_xfer_agreement(tp->starget);
}
static void esp_msgin_reject(struct esp *esp)
{
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
struct esp_target_data *tp;
int tgt;
tgt = cmd->device->id;
tp = &esp->target[tgt];
if (tp->flags & ESP_TGT_NEGO_WIDE) {
tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
if (!esp_need_to_nego_sync(tp)) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_RATN);
} else {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
return;
}
if (tp->flags & ESP_TGT_NEGO_SYNC) {
tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
tp->esp_period = 0;
tp->esp_offset = 0;
esp_setsync(esp, tp, 0, 0, 0, 0);
scsi_esp_cmd(esp, ESP_CMD_RATN);
return;
}
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
{
u8 period = esp->msg_in[3];
u8 offset = esp->msg_in[4];
u8 stp;
if (!(tp->flags & ESP_TGT_NEGO_SYNC))
goto do_reject;
if (offset > 15)
goto do_reject;
if (offset) {
int one_clock;
if (period > esp->max_period) {
period = offset = 0;
goto do_sdtr;
}
if (period < esp->min_period)
goto do_reject;
one_clock = esp->ccycle / 1000;
stp = DIV_ROUND_UP(period << 2, one_clock);
if (stp && esp->rev >= FAS236) {
if (stp >= 50)
stp--;
}
} else {
stp = 0;
}
esp_setsync(esp, tp, period, offset, stp, offset);
return;
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
return;
do_sdtr:
tp->nego_goal_period = period;
tp->nego_goal_offset = offset;
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
{
int size = 8 << esp->msg_in[3];
u8 cfg3;
if (esp->rev != FASHME)
goto do_reject;
if (size != 8 && size != 16)
goto do_reject;
if (!(tp->flags & ESP_TGT_NEGO_WIDE))
goto do_reject;
cfg3 = tp->esp_config3;
if (size == 16) {
tp->flags |= ESP_TGT_WIDE;
cfg3 |= ESP_CONFIG3_EWIDE;
} else {
tp->flags &= ~ESP_TGT_WIDE;
cfg3 &= ~ESP_CONFIG3_EWIDE;
}
tp->esp_config3 = cfg3;
esp->prev_cfg3 = cfg3;
esp_write8(cfg3, ESP_CFG3);
tp->flags &= ~ESP_TGT_NEGO_WIDE;
spi_period(tp->starget) = 0;
spi_offset(tp->starget) = 0;
if (!esp_need_to_nego_sync(tp)) {
tp->flags &= ~ESP_TGT_CHECK_NEGO;
scsi_esp_cmd(esp, ESP_CMD_RATN);
} else {
esp->msg_out_len =
spi_populate_sync_msg(&esp->msg_out[0],
tp->nego_goal_period,
tp->nego_goal_offset);
tp->flags |= ESP_TGT_NEGO_SYNC;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
return;
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
static void esp_msgin_extended(struct esp *esp)
{
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
struct esp_target_data *tp;
int tgt = cmd->device->id;
tp = &esp->target[tgt];
if (esp->msg_in[2] == EXTENDED_SDTR) {
esp_msgin_sdtr(esp, tp);
return;
}
if (esp->msg_in[2] == EXTENDED_WDTR) {
esp_msgin_wdtr(esp, tp);
return;
}
printk("ESP: Unexpected extended msg type %x\n",
esp->msg_in[2]);
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
}
/* Analyze msgin bytes received from target so far. Return non-zero
* if there are more bytes needed to complete the message.
*/
static int esp_msgin_process(struct esp *esp)
{
u8 msg0 = esp->msg_in[0];
int len = esp->msg_in_len;
if (msg0 & 0x80) {
/* Identify */
printk("ESP: Unexpected msgin identify\n");
return 0;
}
switch (msg0) {
case EXTENDED_MESSAGE:
if (len == 1)
return 1;
if (len < esp->msg_in[1] + 2)
return 1;
esp_msgin_extended(esp);
return 0;
case IGNORE_WIDE_RESIDUE: {
struct esp_cmd_entry *ent;
struct esp_cmd_priv *spriv;
if (len == 1)
return 1;
if (esp->msg_in[1] != 1)
goto do_reject;
ent = esp->active_cmd;
spriv = ESP_CMD_PRIV(ent->cmd);
if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
spriv->cur_sg--;
spriv->cur_residue = 1;
} else
spriv->cur_residue++;
spriv->tot_residue++;
return 0;
}
case NOP:
return 0;
case RESTORE_POINTERS:
esp_restore_pointers(esp, esp->active_cmd);
return 0;
case SAVE_POINTERS:
esp_save_pointers(esp, esp->active_cmd);
return 0;
case COMMAND_COMPLETE:
case DISCONNECT: {
struct esp_cmd_entry *ent = esp->active_cmd;
ent->message = msg0;
esp_event(esp, ESP_EVENT_FREE_BUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 0;
}
case MESSAGE_REJECT:
esp_msgin_reject(esp);
return 0;
default:
do_reject:
esp->msg_out[0] = MESSAGE_REJECT;
esp->msg_out_len = 1;
scsi_esp_cmd(esp, ESP_CMD_SATN);
return 0;
}
}
static int esp_process_event(struct esp *esp)
{
int write;
again:
write = 0;
switch (esp->event) {
case ESP_EVENT_CHECK_PHASE:
switch (esp->sreg & ESP_STAT_PMASK) {
case ESP_DOP:
esp_event(esp, ESP_EVENT_DATA_OUT);
break;
case ESP_DIP:
esp_event(esp, ESP_EVENT_DATA_IN);
break;
case ESP_STATP:
esp_flush_fifo(esp);
scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
esp_event(esp, ESP_EVENT_STATUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
case ESP_MOP:
esp_event(esp, ESP_EVENT_MSGOUT);
break;
case ESP_MIP:
esp_event(esp, ESP_EVENT_MSGIN);
break;
case ESP_CMDP:
esp_event(esp, ESP_EVENT_CMD_START);
break;
default:
printk("ESP: Unexpected phase, sreg=%02x\n",
esp->sreg);
esp_schedule_reset(esp);
return 0;
}
goto again;
break;
case ESP_EVENT_DATA_IN:
write = 1;
/* fallthru */
case ESP_EVENT_DATA_OUT: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
unsigned int dma_len = esp_cur_dma_len(ent, cmd);
if (esp->rev == ESP100)
scsi_esp_cmd(esp, ESP_CMD_NULL);
if (write)
ent->flags |= ESP_CMD_FLAG_WRITE;
else
ent->flags &= ~ESP_CMD_FLAG_WRITE;
if (esp->ops->dma_length_limit)
dma_len = esp->ops->dma_length_limit(esp, dma_addr,
dma_len);
else
dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
esp->data_dma_len = dma_len;
if (!dma_len) {
printk(KERN_ERR PFX "esp%d: DMA length is zero!\n",
esp->host->unique_id);
printk(KERN_ERR PFX "esp%d: cur adr[%08llx] len[%08x]\n",
esp->host->unique_id,
(unsigned long long)esp_cur_dma_addr(ent, cmd),
esp_cur_dma_len(ent, cmd));
esp_schedule_reset(esp);
return 0;
}
esp_log_datastart("ESP: start data addr[%08llx] len[%u] "
"write(%d)\n",
(unsigned long long)dma_addr, dma_len, write);
esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
write, ESP_CMD_DMA | ESP_CMD_TI);
esp_event(esp, ESP_EVENT_DATA_DONE);
break;
}
case ESP_EVENT_DATA_DONE: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
int bytes_sent;
if (esp->ops->dma_error(esp)) {
printk("ESP: data done, DMA error, resetting\n");
esp_schedule_reset(esp);
return 0;
}
if (ent->flags & ESP_CMD_FLAG_WRITE) {
/* XXX parity errors, etc. XXX */
esp->ops->dma_drain(esp);
}
esp->ops->dma_invalidate(esp);
if (esp->ireg != ESP_INTR_BSERV) {
/* We should always see exactly a bus-service
* interrupt at the end of a successful transfer.
*/
printk("ESP: data done, not BSERV, resetting\n");
esp_schedule_reset(esp);
return 0;
}
bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
esp_log_datadone("ESP: data done flgs[%x] sent[%d]\n",
ent->flags, bytes_sent);
if (bytes_sent < 0) {
/* XXX force sync mode for this target XXX */
esp_schedule_reset(esp);
return 0;
}
esp_advance_dma(esp, ent, cmd, bytes_sent);
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
}
case ESP_EVENT_STATUS: {
struct esp_cmd_entry *ent = esp->active_cmd;
if (esp->ireg & ESP_INTR_FDONE) {
ent->status = esp_read8(ESP_FDATA);
ent->message = esp_read8(ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_MOK);
} else if (esp->ireg == ESP_INTR_BSERV) {
ent->status = esp_read8(ESP_FDATA);
ent->message = 0xff;
esp_event(esp, ESP_EVENT_MSGIN);
return 0;
}
if (ent->message != COMMAND_COMPLETE) {
printk("ESP: Unexpected message %x in status\n",
ent->message);
esp_schedule_reset(esp);
return 0;
}
esp_event(esp, ESP_EVENT_FREE_BUS);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
}
case ESP_EVENT_FREE_BUS: {
struct esp_cmd_entry *ent = esp->active_cmd;
struct scsi_cmnd *cmd = ent->cmd;
if (ent->message == COMMAND_COMPLETE ||
ent->message == DISCONNECT)
scsi_esp_cmd(esp, ESP_CMD_ESEL);
if (ent->message == COMMAND_COMPLETE) {
esp_log_cmddone("ESP: Command done status[%x] "
"message[%x]\n",
ent->status, ent->message);
if (ent->status == SAM_STAT_TASK_SET_FULL)
esp_event_queue_full(esp, ent);
if (ent->status == SAM_STAT_CHECK_CONDITION &&
!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
esp_autosense(esp, ent);
} else {
esp_cmd_is_done(esp, ent, cmd,
compose_result(ent->status,
ent->message,
DID_OK));
}
} else if (ent->message == DISCONNECT) {
esp_log_disconnect("ESP: Disconnecting tgt[%d] "
"tag[%x:%x]\n",
cmd->device->id,
ent->tag[0], ent->tag[1]);
esp->active_cmd = NULL;
esp_maybe_execute_command(esp);
} else {
printk("ESP: Unexpected message %x in freebus\n",
ent->message);
esp_schedule_reset(esp);
return 0;
}
if (esp->active_cmd)
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
}
case ESP_EVENT_MSGOUT: {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp_debug & ESP_DEBUG_MSGOUT) {
int i;
printk("ESP: Sending message [ ");
for (i = 0; i < esp->msg_out_len; i++)
printk("%02x ", esp->msg_out[i]);
printk("]\n");
}
if (esp->rev == FASHME) {
int i;
/* Always use the fifo. */
for (i = 0; i < esp->msg_out_len; i++) {
esp_write8(esp->msg_out[i], ESP_FDATA);
esp_write8(0, ESP_FDATA);
}
scsi_esp_cmd(esp, ESP_CMD_TI);
} else {
if (esp->msg_out_len == 1) {
esp_write8(esp->msg_out[0], ESP_FDATA);
scsi_esp_cmd(esp, ESP_CMD_TI);
} else {
/* Use DMA. */
memcpy(esp->command_block,
esp->msg_out,
esp->msg_out_len);
esp->ops->send_dma_cmd(esp,
esp->command_block_dma,
esp->msg_out_len,
esp->msg_out_len,
0,
ESP_CMD_DMA|ESP_CMD_TI);
}
}
esp_event(esp, ESP_EVENT_MSGOUT_DONE);
break;
}
case ESP_EVENT_MSGOUT_DONE:
if (esp->rev == FASHME) {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
} else {
if (esp->msg_out_len > 1)
esp->ops->dma_invalidate(esp);
}
if (!(esp->ireg & ESP_INTR_DC)) {
if (esp->rev != FASHME)
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
case ESP_EVENT_MSGIN:
if (esp->ireg & ESP_INTR_BSERV) {
if (esp->rev == FASHME) {
if (!(esp_read8(ESP_STATUS2) &
ESP_STAT2_FEMPTY))
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
} else {
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
if (esp->rev == ESP100)
scsi_esp_cmd(esp, ESP_CMD_NULL);
}
scsi_esp_cmd(esp, ESP_CMD_TI);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
return 1;
}
if (esp->ireg & ESP_INTR_FDONE) {
u8 val;
if (esp->rev == FASHME)
val = esp->fifo[0];
else
val = esp_read8(ESP_FDATA);
esp->msg_in[esp->msg_in_len++] = val;
esp_log_msgin("ESP: Got msgin byte %x\n", val);
if (!esp_msgin_process(esp))
esp->msg_in_len = 0;
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
scsi_esp_cmd(esp, ESP_CMD_MOK);
if (esp->event != ESP_EVENT_FREE_BUS)
esp_event(esp, ESP_EVENT_CHECK_PHASE);
} else {
printk("ESP: MSGIN neither BSERV not FDON, resetting");
esp_schedule_reset(esp);
return 0;
}
break;
case ESP_EVENT_CMD_START:
memcpy(esp->command_block, esp->cmd_bytes_ptr,
esp->cmd_bytes_left);
if (esp->rev == FASHME)
scsi_esp_cmd(esp, ESP_CMD_FLUSH);
esp->ops->send_dma_cmd(esp, esp->command_block_dma,
esp->cmd_bytes_left, 16, 0,
ESP_CMD_DMA | ESP_CMD_TI);
esp_event(esp, ESP_EVENT_CMD_DONE);
esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
break;
case ESP_EVENT_CMD_DONE:
esp->ops->dma_invalidate(esp);
if (esp->ireg & ESP_INTR_BSERV) {
esp_event(esp, ESP_EVENT_CHECK_PHASE);
goto again;
}
esp_schedule_reset(esp);
return 0;
break;
case ESP_EVENT_RESET:
scsi_esp_cmd(esp, ESP_CMD_RS);
break;
default:
printk("ESP: Unexpected event %x, resetting\n",
esp->event);
esp_schedule_reset(esp);
return 0;
break;
}
return 1;
}
static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
{
struct scsi_cmnd *cmd = ent->cmd;
esp_unmap_dma(esp, cmd);
esp_free_lun_tag(ent, cmd->device->hostdata);
cmd->result = DID_RESET << 16;
if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
esp->ops->unmap_single(esp, ent->sense_dma,
SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
ent->sense_ptr = NULL;
}
cmd->scsi_done(cmd);
list_del(&ent->list);
esp_put_ent(esp, ent);
}
static void esp_clear_hold(struct scsi_device *dev, void *data)
{
struct esp_lun_data *lp = dev->hostdata;
BUG_ON(lp->num_tagged);
lp->hold = 0;
}
static void esp_reset_cleanup(struct esp *esp)
{
struct esp_cmd_entry *ent, *tmp;
int i;
list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
struct scsi_cmnd *cmd = ent->cmd;
list_del(&ent->list);
cmd->result = DID_RESET << 16;
cmd->scsi_done(cmd);
esp_put_ent(esp, ent);
}
list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
if (ent == esp->active_cmd)
esp->active_cmd = NULL;
esp_reset_cleanup_one(esp, ent);
}
BUG_ON(esp->active_cmd != NULL);
/* Force renegotiation of sync/wide transfers. */
for (i = 0; i < ESP_MAX_TARGET; i++) {
struct esp_target_data *tp = &esp->target[i];
tp->esp_period = 0;
tp->esp_offset = 0;
tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
ESP_CONFIG3_FSCSI |
ESP_CONFIG3_FAST);
tp->flags &= ~ESP_TGT_WIDE;
tp->flags |= ESP_TGT_CHECK_NEGO;
if (tp->starget)
__starget_for_each_device(tp->starget, NULL,
esp_clear_hold);
}
esp->flags &= ~ESP_FLAG_RESETTING;
}
/* Runs under host->lock */
static void __esp_interrupt(struct esp *esp)
{
int finish_reset, intr_done;
u8 phase;
esp->sreg = esp_read8(ESP_STATUS);
if (esp->flags & ESP_FLAG_RESETTING) {
finish_reset = 1;
} else {
if (esp_check_gross_error(esp))
return;
finish_reset = esp_check_spur_intr(esp);
if (finish_reset < 0)
return;
}
esp->ireg = esp_read8(ESP_INTRPT);
if (esp->ireg & ESP_INTR_SR)
finish_reset = 1;
if (finish_reset) {
esp_reset_cleanup(esp);
if (esp->eh_reset) {
complete(esp->eh_reset);
esp->eh_reset = NULL;
}
return;
}
phase = (esp->sreg & ESP_STAT_PMASK);
if (esp->rev == FASHME) {
if (((phase != ESP_DIP && phase != ESP_DOP) &&
esp->select_state == ESP_SELECT_NONE &&
esp->event != ESP_EVENT_STATUS &&
esp->event != ESP_EVENT_DATA_DONE) ||
(esp->ireg & ESP_INTR_RSEL)) {
esp->sreg2 = esp_read8(ESP_STATUS2);
if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
(esp->sreg2 & ESP_STAT2_F1BYTE))
hme_read_fifo(esp);
}
}
esp_log_intr("ESP: intr sreg[%02x] seqreg[%02x] "
"sreg2[%02x] ireg[%02x]\n",
esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
intr_done = 0;
if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
printk("ESP: unexpected IREG %02x\n", esp->ireg);
if (esp->ireg & ESP_INTR_IC)
esp_dump_cmd_log(esp);
esp_schedule_reset(esp);
} else {
if (!(esp->ireg & ESP_INTR_RSEL)) {
/* Some combination of FDONE, BSERV, DC. */
if (esp->select_state != ESP_SELECT_NONE)
intr_done = esp_finish_select(esp);
} else if (esp->ireg & ESP_INTR_RSEL) {
if (esp->active_cmd)
(void) esp_finish_select(esp);
intr_done = esp_reconnect(esp);
}
}
while (!intr_done)
intr_done = esp_process_event(esp);
}
irqreturn_t scsi_esp_intr(int irq, void *dev_id)
{
struct esp *esp = dev_id;
unsigned long flags;
irqreturn_t ret;
spin_lock_irqsave(esp->host->host_lock, flags);
ret = IRQ_NONE;
if (esp->ops->irq_pending(esp)) {
ret = IRQ_HANDLED;
for (;;) {
int i;
__esp_interrupt(esp);
if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
break;
esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
if (esp->ops->irq_pending(esp))
break;
}
if (i == ESP_QUICKIRQ_LIMIT)
break;
}
}
spin_unlock_irqrestore(esp->host->host_lock, flags);
return ret;
}
EXPORT_SYMBOL(scsi_esp_intr);
static void esp_get_revision(struct esp *esp)
{
u8 val;
esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
esp_write8(esp->config2, ESP_CFG2);
val = esp_read8(ESP_CFG2);
val &= ~ESP_CONFIG2_MAGIC;
if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
/* If what we write to cfg2 does not come back, cfg2 is not
* implemented, therefore this must be a plain esp100.
*/
esp->rev = ESP100;
} else {
esp->config2 = 0;
esp_set_all_config3(esp, 5);
esp->prev_cfg3 = 5;
esp_write8(esp->config2, ESP_CFG2);
esp_write8(0, ESP_CFG3);
esp_write8(esp->prev_cfg3, ESP_CFG3);
val = esp_read8(ESP_CFG3);
if (val != 5) {
/* The cfg2 register is implemented, however
* cfg3 is not, must be esp100a.
*/
esp->rev = ESP100A;
} else {
esp_set_all_config3(esp, 0);
esp->prev_cfg3 = 0;
esp_write8(esp->prev_cfg3, ESP_CFG3);
/* All of cfg{1,2,3} implemented, must be one of
* the fas variants, figure out which one.
*/
if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
esp->rev = FAST;
esp->sync_defp = SYNC_DEFP_FAST;
} else {
esp->rev = ESP236;
}
esp->config2 = 0;
esp_write8(esp->config2, ESP_CFG2);
}
}
}
static void esp_init_swstate(struct esp *esp)
{
int i;
INIT_LIST_HEAD(&esp->queued_cmds);
INIT_LIST_HEAD(&esp->active_cmds);
INIT_LIST_HEAD(&esp->esp_cmd_pool);
/* Start with a clear state, domain validation (via ->slave_configure,
* spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
* commands.
*/
for (i = 0 ; i < ESP_MAX_TARGET; i++) {
esp->target[i].flags = 0;
esp->target[i].nego_goal_period = 0;
esp->target[i].nego_goal_offset = 0;
esp->target[i].nego_goal_width = 0;
esp->target[i].nego_goal_tags = 0;
}
}
/* This places the ESP into a known state at boot time. */
static void esp_bootup_reset(struct esp *esp)
{
u8 val;
/* Reset the DMA */
esp->ops->reset_dma(esp);
/* Reset the ESP */
esp_reset_esp(esp);
/* Reset the SCSI bus, but tell ESP not to generate an irq */
val = esp_read8(ESP_CFG1);
val |= ESP_CONFIG1_SRRDISAB;
esp_write8(val, ESP_CFG1);
scsi_esp_cmd(esp, ESP_CMD_RS);
udelay(400);
esp_write8(esp->config1, ESP_CFG1);
/* Eat any bitrot in the chip and we are done... */
esp_read8(ESP_INTRPT);
}
static void esp_set_clock_params(struct esp *esp)
{
int fhz;
u8 ccf;
/* This is getting messy but it has to be done correctly or else
* you get weird behavior all over the place. We are trying to
* basically figure out three pieces of information.
*
* a) Clock Conversion Factor
*
* This is a representation of the input crystal clock frequency
* going into the ESP on this machine. Any operation whose timing
* is longer than 400ns depends on this value being correct. For
* example, you'll get blips for arbitration/selection during high
* load or with multiple targets if this is not set correctly.
*
* b) Selection Time-Out
*
* The ESP isn't very bright and will arbitrate for the bus and try
* to select a target forever if you let it. This value tells the
* ESP when it has taken too long to negotiate and that it should
* interrupt the CPU so we can see what happened. The value is
* computed as follows (from NCR/Symbios chip docs).
*
* (Time Out Period) * (Input Clock)
* STO = ----------------------------------
* (8192) * (Clock Conversion Factor)
*
* We use a time out period of 250ms (ESP_BUS_TIMEOUT).
*
* c) Imperical constants for synchronous offset and transfer period
* register values
*
* This entails the smallest and largest sync period we could ever
* handle on this ESP.
*/
fhz = esp->cfreq;
ccf = ((fhz / 1000000) + 4) / 5;
if (ccf == 1)
ccf = 2;
/* If we can't find anything reasonable, just assume 20MHZ.
* This is the clock frequency of the older sun4c's where I've
* been unable to find the clock-frequency PROM property. All
* other machines provide useful values it seems.
*/
if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
fhz = 20000000;
ccf = 4;
}
esp->cfact = (ccf == 8 ? 0 : ccf);
esp->cfreq = fhz;
esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
esp->ctick = ESP_TICK(ccf, esp->ccycle);
esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
esp->sync_defp = SYNC_DEFP_SLOW;
}
static const char *esp_chip_names[] = {
"ESP100",
"ESP100A",
"ESP236",
"FAS236",
"FAS100A",
"FAST",
"FASHME",
};
static struct scsi_transport_template *esp_transport_template;
int scsi_esp_register(struct esp *esp, struct device *dev)
{
static int instance;
int err;
esp->host->transportt = esp_transport_template;
esp->host->max_lun = ESP_MAX_LUN;
esp->host->cmd_per_lun = 2;
esp->host->unique_id = instance;
esp_set_clock_params(esp);
esp_get_revision(esp);
esp_init_swstate(esp);
esp_bootup_reset(esp);
printk(KERN_INFO PFX "esp%u, regs[%1p:%1p] irq[%u]\n",
esp->host->unique_id, esp->regs, esp->dma_regs,
esp->host->irq);
printk(KERN_INFO PFX "esp%u is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
esp->host->unique_id, esp_chip_names[esp->rev],
esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
/* Let the SCSI bus reset settle. */
ssleep(esp_bus_reset_settle);
err = scsi_add_host(esp->host, dev);
if (err)
return err;
instance++;
scsi_scan_host(esp->host);
return 0;
}
EXPORT_SYMBOL(scsi_esp_register);
void scsi_esp_unregister(struct esp *esp)
{
scsi_remove_host(esp->host);
}
EXPORT_SYMBOL(scsi_esp_unregister);
static int esp_target_alloc(struct scsi_target *starget)
{
struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
struct esp_target_data *tp = &esp->target[starget->id];
tp->starget = starget;
return 0;
}
static void esp_target_destroy(struct scsi_target *starget)
{
struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
struct esp_target_data *tp = &esp->target[starget->id];
tp->starget = NULL;
}
static int esp_slave_alloc(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
struct esp_lun_data *lp;
lp = kzalloc(sizeof(*lp), GFP_KERNEL);
if (!lp)
return -ENOMEM;
dev->hostdata = lp;
spi_min_period(tp->starget) = esp->min_period;
spi_max_offset(tp->starget) = 15;
if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
spi_max_width(tp->starget) = 1;
else
spi_max_width(tp->starget) = 0;
return 0;
}
static int esp_slave_configure(struct scsi_device *dev)
{
struct esp *esp = shost_priv(dev->host);
struct esp_target_data *tp = &esp->target[dev->id];
int goal_tags, queue_depth;
goal_tags = 0;
if (dev->tagged_supported) {
/* XXX make this configurable somehow XXX */
goal_tags = ESP_DEFAULT_TAGS;
if (goal_tags > ESP_MAX_TAG)
goal_tags = ESP_MAX_TAG;
}
queue_depth = goal_tags;
if (queue_depth < dev->host->cmd_per_lun)
queue_depth = dev->host->cmd_per_lun;
if (goal_tags) {
scsi_set_tag_type(dev, MSG_ORDERED_TAG);
scsi_activate_tcq(dev, queue_depth);
} else {
scsi_deactivate_tcq(dev, queue_depth);
}
tp->flags |= ESP_TGT_DISCONNECT;
if (!spi_initial_dv(dev->sdev_target))
spi_dv_device(dev);
return 0;
}
static void esp_slave_destroy(struct scsi_device *dev)
{
struct esp_lun_data *lp = dev->hostdata;
kfree(lp);
dev->hostdata = NULL;
}
static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = shost_priv(cmd->device->host);
struct esp_cmd_entry *ent, *tmp;
struct completion eh_done;
unsigned long flags;
/* XXX This helps a lot with debugging but might be a bit
* XXX much for the final driver.
*/
spin_lock_irqsave(esp->host->host_lock, flags);
printk(KERN_ERR PFX "esp%d: Aborting command [%p:%02x]\n",
esp->host->unique_id, cmd, cmd->cmnd[0]);
ent = esp->active_cmd;
if (ent)
printk(KERN_ERR PFX "esp%d: Current command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
list_for_each_entry(ent, &esp->queued_cmds, list) {
printk(KERN_ERR PFX "esp%d: Queued command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
}
list_for_each_entry(ent, &esp->active_cmds, list) {
printk(KERN_ERR PFX "esp%d: Active command [%p:%02x]\n",
esp->host->unique_id, ent->cmd, ent->cmd->cmnd[0]);
}
esp_dump_cmd_log(esp);
spin_unlock_irqrestore(esp->host->host_lock, flags);
spin_lock_irqsave(esp->host->host_lock, flags);
ent = NULL;
list_for_each_entry(tmp, &esp->queued_cmds, list) {
if (tmp->cmd == cmd) {
ent = tmp;
break;
}
}
if (ent) {
/* Easiest case, we didn't even issue the command
* yet so it is trivial to abort.
*/
list_del(&ent->list);
cmd->result = DID_ABORT << 16;
cmd->scsi_done(cmd);
esp_put_ent(esp, ent);
goto out_success;
}
init_completion(&eh_done);
ent = esp->active_cmd;
if (ent && ent->cmd == cmd) {
/* Command is the currently active command on
* the bus. If we already have an output message
* pending, no dice.
*/
if (esp->msg_out_len)
goto out_failure;
/* Send out an abort, encouraging the target to
* go to MSGOUT phase by asserting ATN.
*/
esp->msg_out[0] = ABORT_TASK_SET;
esp->msg_out_len = 1;
ent->eh_done = &eh_done;
scsi_esp_cmd(esp, ESP_CMD_SATN);
} else {
/* The command is disconnected. This is not easy to
* abort. For now we fail and let the scsi error
* handling layer go try a scsi bus reset or host
* reset.
*
* What we could do is put together a scsi command
* solely for the purpose of sending an abort message
* to the target. Coming up with all the code to
* cook up scsi commands, special case them everywhere,
* etc. is for questionable gain and it would be better
* if the generic scsi error handling layer could do at
* least some of that for us.
*
* Anyways this is an area for potential future improvement
* in this driver.
*/
goto out_failure;
}
spin_unlock_irqrestore(esp->host->host_lock, flags);
if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
spin_lock_irqsave(esp->host->host_lock, flags);
ent->eh_done = NULL;
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
return SUCCESS;
out_success:
spin_unlock_irqrestore(esp->host->host_lock, flags);
return SUCCESS;
out_failure:
/* XXX This might be a good location to set ESP_TGT_BROKEN
* XXX since we know which target/lun in particular is
* XXX causing trouble.
*/
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = shost_priv(cmd->device->host);
struct completion eh_reset;
unsigned long flags;
init_completion(&eh_reset);
spin_lock_irqsave(esp->host->host_lock, flags);
esp->eh_reset = &eh_reset;
/* XXX This is too simple... We should add lots of
* XXX checks here so that if we find that the chip is
* XXX very wedged we return failure immediately so
* XXX that we can perform a full chip reset.
*/
esp->flags |= ESP_FLAG_RESETTING;
scsi_esp_cmd(esp, ESP_CMD_RS);
spin_unlock_irqrestore(esp->host->host_lock, flags);
ssleep(esp_bus_reset_settle);
if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
spin_lock_irqsave(esp->host->host_lock, flags);
esp->eh_reset = NULL;
spin_unlock_irqrestore(esp->host->host_lock, flags);
return FAILED;
}
return SUCCESS;
}
/* All bets are off, reset the entire device. */
static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
{
struct esp *esp = shost_priv(cmd->device->host);
unsigned long flags;
spin_lock_irqsave(esp->host->host_lock, flags);
esp_bootup_reset(esp);
esp_reset_cleanup(esp);
spin_unlock_irqrestore(esp->host->host_lock, flags);
ssleep(esp_bus_reset_settle);
return SUCCESS;
}
static const char *esp_info(struct Scsi_Host *host)
{
return "esp";
}
struct scsi_host_template scsi_esp_template = {
.module = THIS_MODULE,
.name = "esp",
.info = esp_info,
.queuecommand = esp_queuecommand,
.target_alloc = esp_target_alloc,
.target_destroy = esp_target_destroy,
.slave_alloc = esp_slave_alloc,
.slave_configure = esp_slave_configure,
.slave_destroy = esp_slave_destroy,
.eh_abort_handler = esp_eh_abort_handler,
.eh_bus_reset_handler = esp_eh_bus_reset_handler,
.eh_host_reset_handler = esp_eh_host_reset_handler,
.can_queue = 7,
.this_id = 7,
.sg_tablesize = SG_ALL,
.use_clustering = ENABLE_CLUSTERING,
.max_sectors = 0xffff,
.skip_settle_delay = 1,
};
EXPORT_SYMBOL(scsi_esp_template);
static void esp_get_signalling(struct Scsi_Host *host)
{
struct esp *esp = shost_priv(host);
enum spi_signal_type type;
if (esp->flags & ESP_FLAG_DIFFERENTIAL)
type = SPI_SIGNAL_HVD;
else
type = SPI_SIGNAL_SE;
spi_signalling(host) = type;
}
static void esp_set_offset(struct scsi_target *target, int offset)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
if (esp->flags & ESP_FLAG_DISABLE_SYNC)
tp->nego_goal_offset = 0;
else
tp->nego_goal_offset = offset;
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static void esp_set_period(struct scsi_target *target, int period)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_period = period;
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static void esp_set_width(struct scsi_target *target, int width)
{
struct Scsi_Host *host = dev_to_shost(target->dev.parent);
struct esp *esp = shost_priv(host);
struct esp_target_data *tp = &esp->target[target->id];
tp->nego_goal_width = (width ? 1 : 0);
tp->flags |= ESP_TGT_CHECK_NEGO;
}
static struct spi_function_template esp_transport_ops = {
.set_offset = esp_set_offset,
.show_offset = 1,
.set_period = esp_set_period,
.show_period = 1,
.set_width = esp_set_width,
.show_width = 1,
.get_signalling = esp_get_signalling,
};
static int __init esp_init(void)
{
BUILD_BUG_ON(sizeof(struct scsi_pointer) <
sizeof(struct esp_cmd_priv));
esp_transport_template = spi_attach_transport(&esp_transport_ops);
if (!esp_transport_template)
return -ENODEV;
return 0;
}
static void __exit esp_exit(void)
{
spi_release_transport(esp_transport_template);
}
MODULE_DESCRIPTION("ESP SCSI driver core");
MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
MODULE_LICENSE("GPL");
MODULE_VERSION(DRV_VERSION);
module_param(esp_bus_reset_settle, int, 0);
MODULE_PARM_DESC(esp_bus_reset_settle,
"ESP scsi bus reset delay in seconds");
module_param(esp_debug, int, 0);
MODULE_PARM_DESC(esp_debug,
"ESP bitmapped debugging message enable value:\n"
" 0x00000001 Log interrupt events\n"
" 0x00000002 Log scsi commands\n"
" 0x00000004 Log resets\n"
" 0x00000008 Log message in events\n"
" 0x00000010 Log message out events\n"
" 0x00000020 Log command completion\n"
" 0x00000040 Log disconnects\n"
" 0x00000080 Log data start\n"
" 0x00000100 Log data done\n"
" 0x00000200 Log reconnects\n"
" 0x00000400 Log auto-sense data\n"
);
module_init(esp_init);
module_exit(esp_exit);
| gpl-2.0 |
tkymgr/semc_kernel | arch/arm/mach-lh7a40x/irq-kev7a400.c | 1882 | 2178 | /* arch/arm/mach-lh7a40x/irq-kev7a400.c
*
* Copyright (C) 2004 Coastal Environmental Systems
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
*/
#include <linux/interrupt.h>
#include <linux/init.h>
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/mach/hardware.h>
#include <asm/mach/irqs.h>
#include "common.h"
/* KEV7a400 CPLD IRQ handling */
static u16 CPLD_IRQ_mask; /* Mask for CPLD IRQs, 1 == unmasked */
static void
lh7a400_ack_cpld_irq (u32 irq)
{
CPLD_CL_INT = 1 << (irq - IRQ_KEV7A400_CPLD);
}
static void
lh7a400_mask_cpld_irq (u32 irq)
{
CPLD_IRQ_mask &= ~(1 << (irq - IRQ_KEV7A400_CPLD));
CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
}
static void
lh7a400_unmask_cpld_irq (u32 irq)
{
CPLD_IRQ_mask |= 1 << (irq - IRQ_KEV7A400_CPLD);
CPLD_WR_PB_INT_MASK = CPLD_IRQ_mask;
}
static struct
irq_chip lh7a400_cpld_chip = {
.name = "CPLD",
.ack = lh7a400_ack_cpld_irq,
.mask = lh7a400_mask_cpld_irq,
.unmask = lh7a400_unmask_cpld_irq,
};
static void
lh7a400_cpld_handler (unsigned int irq, struct irq_desc *desc)
{
u32 mask = CPLD_LATCHED_INTS;
irq = IRQ_KEV_7A400_CPLD;
for (; mask; mask >>= 1, ++irq) {
if (mask & 1)
desc[irq].handle (irq, desc);
}
}
/* IRQ initialization */
void __init
lh7a400_init_board_irq (void)
{
int irq;
for (irq = IRQ_KEV7A400_CPLD;
irq < IRQ_KEV7A400_CPLD + NR_IRQ_KEV7A400_CPLD; ++irq) {
set_irq_chip (irq, &lh7a400_cpld_chip);
set_irq_handler (irq, handle_edge_irq);
set_irq_flags (irq, IRQF_VALID);
}
set_irq_chained_handler (IRQ_CPLD, kev7a400_cpld_handler);
/* Clear all CPLD interrupts */
CPLD_CL_INT = 0xff; /* CPLD_INTR_MMC_CD | CPLD_INTR_ETH_INT; */
/* *** FIXME CF enabled in ide-probe.c */
GPIO_GPIOINTEN = 0; /* Disable all GPIO interrupts */
barrier();
GPIO_INTTYPE1
= (GPIO_INTR_PCC1_CD | GPIO_INTR_PCC1_CD); /* Edge trig. */
GPIO_INTTYPE2 = 0; /* Falling edge & low-level */
GPIO_GPIOFEOI = 0xff; /* Clear all GPIO interrupts */
GPIO_GPIOINTEN = 0xff; /* Enable all GPIO interrupts */
init_FIQ();
}
| gpl-2.0 |
SOKP/kernel_yu_msm8916 | drivers/acpi/acpica/utids.c | 2138 | 12312 | /******************************************************************************
*
* Module Name: utids - support for device Ids - HID, UID, CID
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2013, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("utids")
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_HID
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the string HID is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _HID control method that returns the hardware
* ID of the device. The HID is either an 32-bit encoded EISAID
* Integer or a String. A string is always returned. An EISAID
* is converted to a string.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_HID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id *hid;
u32 length;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_HID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__HID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
length = ACPI_EISAID_STRING_SIZE;
} else {
length = obj_desc->string.length + 1;
}
/* Allocate a buffer for the HID */
hid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!hid) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
hid->string =
ACPI_ADD_PTR(char, hid, sizeof(struct acpi_pnp_device_id));
/* Convert EISAID to a string or simply copy existing string */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_eisa_id_to_string(hid->string, obj_desc->integer.value);
} else {
ACPI_STRCPY(hid->string, obj_desc->string.pointer);
}
hid->length = length;
*return_id = hid;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_SUB
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the _SUB is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _SUB control method that returns the subsystem
* ID of the device. The _SUB value is always a string containing
* either a valid PNP or ACPI ID.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_SUB(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id *sub;
u32 length;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_SUB);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__SUB,
ACPI_BTYPE_STRING, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
length = obj_desc->string.length + 1;
/* Allocate a buffer for the SUB */
sub =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!sub) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
sub->string =
ACPI_ADD_PTR(char, sub, sizeof(struct acpi_pnp_device_id));
/* Simply copy existing string */
ACPI_STRCPY(sub->string, obj_desc->string.pointer);
sub->length = length;
*return_id = sub;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_UID
*
* PARAMETERS: device_node - Node for the device
* return_id - Where the string UID is returned
*
* RETURN: Status
*
* DESCRIPTION: Executes the _UID control method that returns the unique
* ID of the device. The UID is either a 64-bit Integer (NOT an
* EISAID) or a string. Always returns a string. A 64-bit integer
* is converted to a decimal string.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_UID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id **return_id)
{
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id *uid;
u32 length;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_UID);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__UID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING,
&obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the size of the String to be returned, includes null terminator */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
length = ACPI_MAX64_DECIMAL_DIGITS + 1;
} else {
length = obj_desc->string.length + 1;
}
/* Allocate a buffer for the UID */
uid =
ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_pnp_device_id) +
(acpi_size) length);
if (!uid) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for the string starts after PNP_DEVICE_ID struct */
uid->string =
ACPI_ADD_PTR(char, uid, sizeof(struct acpi_pnp_device_id));
/* Convert an Integer to string, or just copy an existing string */
if (obj_desc->common.type == ACPI_TYPE_INTEGER) {
acpi_ex_integer_to_string(uid->string, obj_desc->integer.value);
} else {
ACPI_STRCPY(uid->string, obj_desc->string.pointer);
}
uid->length = length;
*return_id = uid;
cleanup:
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_CID
*
* PARAMETERS: device_node - Node for the device
* return_cid_list - Where the CID list is returned
*
* RETURN: Status, list of CID strings
*
* DESCRIPTION: Executes the _CID control method that returns one or more
* compatible hardware IDs for the device.
*
* NOTE: Internal function, no parameter validation
*
* A _CID method can return either a single compatible ID or a package of
* compatible IDs. Each compatible ID can be one of the following:
* 1) Integer (32 bit compressed EISA ID) or
* 2) String (PCI ID format, e.g. "PCI\VEN_vvvv&DEV_dddd&SUBSYS_ssssssss")
*
* The Integer CIDs are converted to string format by this function.
*
******************************************************************************/
acpi_status
acpi_ut_execute_CID(struct acpi_namespace_node *device_node,
struct acpi_pnp_device_id_list **return_cid_list)
{
union acpi_operand_object **cid_objects;
union acpi_operand_object *obj_desc;
struct acpi_pnp_device_id_list *cid_list;
char *next_id_string;
u32 string_area_size;
u32 length;
u32 cid_list_size;
acpi_status status;
u32 count;
u32 i;
ACPI_FUNCTION_TRACE(ut_execute_CID);
/* Evaluate the _CID method for this device */
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__CID,
ACPI_BTYPE_INTEGER | ACPI_BTYPE_STRING
| ACPI_BTYPE_PACKAGE, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/*
* Get the count and size of the returned _CIDs. _CID can return either
* a Package of Integers/Strings or a single Integer or String.
* Note: This section also validates that all CID elements are of the
* correct type (Integer or String).
*/
if (obj_desc->common.type == ACPI_TYPE_PACKAGE) {
count = obj_desc->package.count;
cid_objects = obj_desc->package.elements;
} else { /* Single Integer or String CID */
count = 1;
cid_objects = &obj_desc;
}
string_area_size = 0;
for (i = 0; i < count; i++) {
/* String lengths include null terminator */
switch (cid_objects[i]->common.type) {
case ACPI_TYPE_INTEGER:
string_area_size += ACPI_EISAID_STRING_SIZE;
break;
case ACPI_TYPE_STRING:
string_area_size += cid_objects[i]->string.length + 1;
break;
default:
status = AE_TYPE;
goto cleanup;
}
}
/*
* Now that we know the length of the CIDs, allocate return buffer:
* 1) Size of the base structure +
* 2) Size of the CID PNP_DEVICE_ID array +
* 3) Size of the actual CID strings
*/
cid_list_size = sizeof(struct acpi_pnp_device_id_list) +
((count - 1) * sizeof(struct acpi_pnp_device_id)) +
string_area_size;
cid_list = ACPI_ALLOCATE_ZEROED(cid_list_size);
if (!cid_list) {
status = AE_NO_MEMORY;
goto cleanup;
}
/* Area for CID strings starts after the CID PNP_DEVICE_ID array */
next_id_string = ACPI_CAST_PTR(char, cid_list->ids) +
((acpi_size) count * sizeof(struct acpi_pnp_device_id));
/* Copy/convert the CIDs to the return buffer */
for (i = 0; i < count; i++) {
if (cid_objects[i]->common.type == ACPI_TYPE_INTEGER) {
/* Convert the Integer (EISAID) CID to a string */
acpi_ex_eisa_id_to_string(next_id_string,
cid_objects[i]->integer.
value);
length = ACPI_EISAID_STRING_SIZE;
} else { /* ACPI_TYPE_STRING */
/* Copy the String CID from the returned object */
ACPI_STRCPY(next_id_string,
cid_objects[i]->string.pointer);
length = cid_objects[i]->string.length + 1;
}
cid_list->ids[i].string = next_id_string;
cid_list->ids[i].length = length;
next_id_string += length;
}
/* Finish the CID list */
cid_list->count = count;
cid_list->list_size = cid_list_size;
*return_cid_list = cid_list;
cleanup:
/* On exit, we must delete the _CID return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
| gpl-2.0 |
ckw1375/mptcp | sound/oss/soundcard.c | 2650 | 17091 | /*
* linux/sound/oss/soundcard.c
*
* Sound card driver for Linux
*
*
* Copyright (C) by Hannu Savolainen 1993-1997
*
* OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL)
* Version 2 (June 1991). See the "COPYING" file distributed with this software
* for more info.
*
*
* Thomas Sailer : ioctl code reworked (vmalloc/vfree removed)
* integrated sound_switch.c
* Stefan Reinauer : integrated /proc/sound (equals to /dev/sndstat,
* which should disappear in the near future)
* Eric Dumas : devfs support (22-Jan-98) <dumas@linux.eu.org> with
* fixups by C. Scott Ananian <cananian@alumni.princeton.edu>
* Richard Gooch : moved common (non OSS-specific) devices to sound_core.c
* Rob Riggs : Added persistent DMA buffers support (1998/10/17)
* Christoph Hellwig : Some cleanup work (2000/03/01)
*/
#include "sound_config.h"
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/signal.h>
#include <linux/fcntl.h>
#include <linux/ctype.h>
#include <linux/stddef.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <linux/wait.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/device.h>
/*
* This ought to be moved into include/asm/dma.h
*/
#ifndef valid_dma
#define valid_dma(n) ((n) >= 0 && (n) < MAX_DMA_CHANNELS && (n) != 4)
#endif
/*
* Table for permanently allocated memory (used when unloading the module)
*/
void * sound_mem_blocks[MAX_MEM_BLOCKS];
static DEFINE_MUTEX(soundcard_mutex);
int sound_nblocks = 0;
/* Persistent DMA buffers */
#ifdef CONFIG_SOUND_DMAP
int sound_dmap_flag = 1;
#else
int sound_dmap_flag = 0;
#endif
static char dma_alloc_map[MAX_DMA_CHANNELS];
#define DMA_MAP_UNAVAIL 0
#define DMA_MAP_FREE 1
#define DMA_MAP_BUSY 2
unsigned long seq_time = 0; /* Time for /dev/sequencer */
extern struct class *sound_class;
/*
* Table for configurable mixer volume handling
*/
static mixer_vol_table mixer_vols[MAX_MIXER_DEV];
static int num_mixer_volumes;
int *load_mixer_volumes(char *name, int *levels, int present)
{
int i, n;
for (i = 0; i < num_mixer_volumes; i++) {
if (strncmp(name, mixer_vols[i].name, 32) == 0) {
if (present)
mixer_vols[i].num = i;
return mixer_vols[i].levels;
}
}
if (num_mixer_volumes >= MAX_MIXER_DEV) {
printk(KERN_ERR "Sound: Too many mixers (%s)\n", name);
return levels;
}
n = num_mixer_volumes++;
strncpy(mixer_vols[n].name, name, 32);
if (present)
mixer_vols[n].num = n;
else
mixer_vols[n].num = -1;
for (i = 0; i < 32; i++)
mixer_vols[n].levels[i] = levels[i];
return mixer_vols[n].levels;
}
EXPORT_SYMBOL(load_mixer_volumes);
static int set_mixer_levels(void __user * arg)
{
/* mixer_vol_table is 174 bytes, so IMHO no reason to not allocate it on the stack */
mixer_vol_table buf;
if (__copy_from_user(&buf, arg, sizeof(buf)))
return -EFAULT;
load_mixer_volumes(buf.name, buf.levels, 0);
if (__copy_to_user(arg, &buf, sizeof(buf)))
return -EFAULT;
return 0;
}
static int get_mixer_levels(void __user * arg)
{
int n;
if (__get_user(n, (int __user *)(&(((mixer_vol_table __user *)arg)->num))))
return -EFAULT;
if (n < 0 || n >= num_mixer_volumes)
return -EINVAL;
if (__copy_to_user(arg, &mixer_vols[n], sizeof(mixer_vol_table)))
return -EFAULT;
return 0;
}
/* 4K page size but our output routines use some slack for overruns */
#define PROC_BLOCK_SIZE (3*1024)
static ssize_t sound_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
{
int dev = iminor(file_inode(file));
int ret = -EINVAL;
/*
* The OSS drivers aren't remotely happy without this locking,
* and unless someone fixes them when they are about to bite the
* big one anyway, we might as well bandage here..
*/
mutex_lock(&soundcard_mutex);
DEB(printk("sound_read(dev=%d, count=%d)\n", dev, count));
switch (dev & 0x0f) {
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
ret = audio_read(dev, file, buf, count);
break;
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
ret = sequencer_read(dev, file, buf, count);
break;
case SND_DEV_MIDIN:
ret = MIDIbuf_read(dev, file, buf, count);
}
mutex_unlock(&soundcard_mutex);
return ret;
}
static ssize_t sound_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos)
{
int dev = iminor(file_inode(file));
int ret = -EINVAL;
mutex_lock(&soundcard_mutex);
DEB(printk("sound_write(dev=%d, count=%d)\n", dev, count));
switch (dev & 0x0f) {
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
ret = sequencer_write(dev, file, buf, count);
break;
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
ret = audio_write(dev, file, buf, count);
break;
case SND_DEV_MIDIN:
ret = MIDIbuf_write(dev, file, buf, count);
break;
}
mutex_unlock(&soundcard_mutex);
return ret;
}
static int sound_open(struct inode *inode, struct file *file)
{
int dev = iminor(inode);
int retval;
DEB(printk("sound_open(dev=%d)\n", dev));
if ((dev >= SND_NDEVS) || (dev < 0)) {
printk(KERN_ERR "Invalid minor device %d\n", dev);
return -ENXIO;
}
mutex_lock(&soundcard_mutex);
switch (dev & 0x0f) {
case SND_DEV_CTL:
dev >>= 4;
if (dev >= 0 && dev < MAX_MIXER_DEV && mixer_devs[dev] == NULL) {
request_module("mixer%d", dev);
}
retval = -ENXIO;
if (dev && (dev >= num_mixers || mixer_devs[dev] == NULL))
break;
if (!try_module_get(mixer_devs[dev]->owner))
break;
retval = 0;
break;
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
retval = sequencer_open(dev, file);
break;
case SND_DEV_MIDIN:
retval = MIDIbuf_open(dev, file);
break;
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
retval = audio_open(dev, file);
break;
default:
printk(KERN_ERR "Invalid minor device %d\n", dev);
retval = -ENXIO;
}
mutex_unlock(&soundcard_mutex);
return retval;
}
static int sound_release(struct inode *inode, struct file *file)
{
int dev = iminor(inode);
mutex_lock(&soundcard_mutex);
DEB(printk("sound_release(dev=%d)\n", dev));
switch (dev & 0x0f) {
case SND_DEV_CTL:
module_put(mixer_devs[dev >> 4]->owner);
break;
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
sequencer_release(dev, file);
break;
case SND_DEV_MIDIN:
MIDIbuf_release(dev, file);
break;
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
audio_release(dev, file);
break;
default:
printk(KERN_ERR "Sound error: Releasing unknown device 0x%02x\n", dev);
}
mutex_unlock(&soundcard_mutex);
return 0;
}
static int get_mixer_info(int dev, void __user *arg)
{
mixer_info info;
memset(&info, 0, sizeof(info));
strlcpy(info.id, mixer_devs[dev]->id, sizeof(info.id));
strlcpy(info.name, mixer_devs[dev]->name, sizeof(info.name));
info.modify_counter = mixer_devs[dev]->modify_counter;
if (__copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int get_old_mixer_info(int dev, void __user *arg)
{
_old_mixer_info info;
memset(&info, 0, sizeof(info));
strlcpy(info.id, mixer_devs[dev]->id, sizeof(info.id));
strlcpy(info.name, mixer_devs[dev]->name, sizeof(info.name));
if (copy_to_user(arg, &info, sizeof(info)))
return -EFAULT;
return 0;
}
static int sound_mixer_ioctl(int mixdev, unsigned int cmd, void __user *arg)
{
if (mixdev < 0 || mixdev >= MAX_MIXER_DEV)
return -ENXIO;
/* Try to load the mixer... */
if (mixer_devs[mixdev] == NULL) {
request_module("mixer%d", mixdev);
}
if (mixdev >= num_mixers || !mixer_devs[mixdev])
return -ENXIO;
if (cmd == SOUND_MIXER_INFO)
return get_mixer_info(mixdev, arg);
if (cmd == SOUND_OLD_MIXER_INFO)
return get_old_mixer_info(mixdev, arg);
if (_SIOC_DIR(cmd) & _SIOC_WRITE)
mixer_devs[mixdev]->modify_counter++;
if (!mixer_devs[mixdev]->ioctl)
return -EINVAL;
return mixer_devs[mixdev]->ioctl(mixdev, cmd, arg);
}
static long sound_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
int len = 0, dtype;
int dev = iminor(file_inode(file));
long ret = -EINVAL;
void __user *p = (void __user *)arg;
if (_SIOC_DIR(cmd) != _SIOC_NONE && _SIOC_DIR(cmd) != 0) {
/*
* Have to validate the address given by the process.
*/
len = _SIOC_SIZE(cmd);
if (len < 1 || len > 65536 || !p)
return -EFAULT;
if (_SIOC_DIR(cmd) & _SIOC_WRITE)
if (!access_ok(VERIFY_READ, p, len))
return -EFAULT;
if (_SIOC_DIR(cmd) & _SIOC_READ)
if (!access_ok(VERIFY_WRITE, p, len))
return -EFAULT;
}
DEB(printk("sound_ioctl(dev=%d, cmd=0x%x, arg=0x%x)\n", dev, cmd, arg));
if (cmd == OSS_GETVERSION)
return __put_user(SOUND_VERSION, (int __user *)p);
mutex_lock(&soundcard_mutex);
if (_IOC_TYPE(cmd) == 'M' && num_mixers > 0 && /* Mixer ioctl */
(dev & 0x0f) != SND_DEV_CTL) {
dtype = dev & 0x0f;
switch (dtype) {
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
ret = sound_mixer_ioctl(audio_devs[dev >> 4]->mixer_dev,
cmd, p);
break;
default:
ret = sound_mixer_ioctl(dev >> 4, cmd, p);
break;
}
mutex_unlock(&soundcard_mutex);
return ret;
}
switch (dev & 0x0f) {
case SND_DEV_CTL:
if (cmd == SOUND_MIXER_GETLEVELS)
ret = get_mixer_levels(p);
else if (cmd == SOUND_MIXER_SETLEVELS)
ret = set_mixer_levels(p);
else
ret = sound_mixer_ioctl(dev >> 4, cmd, p);
break;
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
ret = sequencer_ioctl(dev, file, cmd, p);
break;
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
ret = audio_ioctl(dev, file, cmd, p);
break;
case SND_DEV_MIDIN:
ret = MIDIbuf_ioctl(dev, file, cmd, p);
break;
}
mutex_unlock(&soundcard_mutex);
return ret;
}
static unsigned int sound_poll(struct file *file, poll_table * wait)
{
struct inode *inode = file_inode(file);
int dev = iminor(inode);
DEB(printk("sound_poll(dev=%d)\n", dev));
switch (dev & 0x0f) {
case SND_DEV_SEQ:
case SND_DEV_SEQ2:
return sequencer_poll(dev, file, wait);
case SND_DEV_MIDIN:
return MIDIbuf_poll(dev, file, wait);
case SND_DEV_DSP:
case SND_DEV_DSP16:
case SND_DEV_AUDIO:
return DMAbuf_poll(file, dev >> 4, wait);
}
return 0;
}
static int sound_mmap(struct file *file, struct vm_area_struct *vma)
{
int dev_class;
unsigned long size;
struct dma_buffparms *dmap = NULL;
int dev = iminor(file_inode(file));
dev_class = dev & 0x0f;
dev >>= 4;
if (dev_class != SND_DEV_DSP && dev_class != SND_DEV_DSP16 && dev_class != SND_DEV_AUDIO) {
printk(KERN_ERR "Sound: mmap() not supported for other than audio devices\n");
return -EINVAL;
}
mutex_lock(&soundcard_mutex);
if (vma->vm_flags & VM_WRITE) /* Map write and read/write to the output buf */
dmap = audio_devs[dev]->dmap_out;
else if (vma->vm_flags & VM_READ)
dmap = audio_devs[dev]->dmap_in;
else {
printk(KERN_ERR "Sound: Undefined mmap() access\n");
mutex_unlock(&soundcard_mutex);
return -EINVAL;
}
if (dmap == NULL) {
printk(KERN_ERR "Sound: mmap() error. dmap == NULL\n");
mutex_unlock(&soundcard_mutex);
return -EIO;
}
if (dmap->raw_buf == NULL) {
printk(KERN_ERR "Sound: mmap() called when raw_buf == NULL\n");
mutex_unlock(&soundcard_mutex);
return -EIO;
}
if (dmap->mapping_flags) {
printk(KERN_ERR "Sound: mmap() called twice for the same DMA buffer\n");
mutex_unlock(&soundcard_mutex);
return -EIO;
}
if (vma->vm_pgoff != 0) {
printk(KERN_ERR "Sound: mmap() offset must be 0.\n");
mutex_unlock(&soundcard_mutex);
return -EINVAL;
}
size = vma->vm_end - vma->vm_start;
if (size != dmap->bytes_in_use) {
printk(KERN_WARNING "Sound: mmap() size = %ld. Should be %d\n", size, dmap->bytes_in_use);
}
if (remap_pfn_range(vma, vma->vm_start,
virt_to_phys(dmap->raw_buf) >> PAGE_SHIFT,
vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
mutex_unlock(&soundcard_mutex);
return -EAGAIN;
}
dmap->mapping_flags |= DMA_MAP_MAPPED;
if( audio_devs[dev]->d->mmap)
audio_devs[dev]->d->mmap(dev);
memset(dmap->raw_buf,
dmap->neutral_byte,
dmap->bytes_in_use);
mutex_unlock(&soundcard_mutex);
return 0;
}
const struct file_operations oss_sound_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = sound_read,
.write = sound_write,
.poll = sound_poll,
.unlocked_ioctl = sound_ioctl,
.mmap = sound_mmap,
.open = sound_open,
.release = sound_release,
};
/*
* Create the required special subdevices
*/
static int create_special_devices(void)
{
int seq1,seq2;
seq1=register_sound_special(&oss_sound_fops, 1);
if(seq1==-1)
goto bad;
seq2=register_sound_special(&oss_sound_fops, 8);
if(seq2!=-1)
return 0;
unregister_sound_special(1);
bad:
return -1;
}
static int dmabuf;
static int dmabug;
module_param(dmabuf, int, 0444);
module_param(dmabug, int, 0444);
/* additional minors for compatibility */
struct oss_minor_dev {
unsigned short minor;
unsigned int enabled;
} dev_list[] = {
{ SND_DEV_DSP16 },
{ SND_DEV_AUDIO },
};
static int __init oss_init(void)
{
int err;
int i, j;
#ifdef CONFIG_PCI
if(dmabug)
isa_dma_bridge_buggy = dmabug;
#endif
err = create_special_devices();
if (err) {
printk(KERN_ERR "sound: driver already loaded/included in kernel\n");
return err;
}
/* Protecting the innocent */
sound_dmap_flag = (dmabuf > 0 ? 1 : 0);
for (i = 0; i < ARRAY_SIZE(dev_list); i++) {
j = 0;
do {
unsigned short minor = dev_list[i].minor + j * 0x10;
if (!register_sound_special(&oss_sound_fops, minor))
dev_list[i].enabled = (1 << j);
} while (++j < num_audiodevs);
}
if (sound_nblocks >= MAX_MEM_BLOCKS - 1)
printk(KERN_ERR "Sound warning: Deallocation table was too small.\n");
return 0;
}
static void __exit oss_cleanup(void)
{
int i, j;
for (i = 0; i < ARRAY_SIZE(dev_list); i++) {
j = 0;
do {
if (dev_list[i].enabled & (1 << j))
unregister_sound_special(dev_list[i].minor);
} while (++j < num_audiodevs);
}
unregister_sound_special(1);
unregister_sound_special(8);
sound_stop_timer();
sequencer_unload();
for (i = 0; i < MAX_DMA_CHANNELS; i++)
if (dma_alloc_map[i] != DMA_MAP_UNAVAIL) {
printk(KERN_ERR "Sound: Hmm, DMA%d was left allocated - fixed\n", i);
sound_free_dma(i);
}
for (i = 0; i < sound_nblocks; i++)
vfree(sound_mem_blocks[i]);
}
module_init(oss_init);
module_exit(oss_cleanup);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("OSS Sound subsystem");
MODULE_AUTHOR("Hannu Savolainen, et al.");
int sound_alloc_dma(int chn, char *deviceID)
{
int err;
if ((err = request_dma(chn, deviceID)) != 0)
return err;
dma_alloc_map[chn] = DMA_MAP_FREE;
return 0;
}
EXPORT_SYMBOL(sound_alloc_dma);
int sound_open_dma(int chn, char *deviceID)
{
if (!valid_dma(chn)) {
printk(KERN_ERR "sound_open_dma: Invalid DMA channel %d\n", chn);
return 1;
}
if (dma_alloc_map[chn] != DMA_MAP_FREE) {
printk("sound_open_dma: DMA channel %d busy or not allocated (%d)\n", chn, dma_alloc_map[chn]);
return 1;
}
dma_alloc_map[chn] = DMA_MAP_BUSY;
return 0;
}
EXPORT_SYMBOL(sound_open_dma);
void sound_free_dma(int chn)
{
if (dma_alloc_map[chn] == DMA_MAP_UNAVAIL) {
/* printk( "sound_free_dma: Bad access to DMA channel %d\n", chn); */
return;
}
free_dma(chn);
dma_alloc_map[chn] = DMA_MAP_UNAVAIL;
}
EXPORT_SYMBOL(sound_free_dma);
void sound_close_dma(int chn)
{
if (dma_alloc_map[chn] != DMA_MAP_BUSY) {
printk(KERN_ERR "sound_close_dma: Bad access to DMA channel %d\n", chn);
return;
}
dma_alloc_map[chn] = DMA_MAP_FREE;
}
EXPORT_SYMBOL(sound_close_dma);
static void do_sequencer_timer(unsigned long dummy)
{
sequencer_timer(0);
}
static DEFINE_TIMER(seq_timer, do_sequencer_timer, 0, 0);
void request_sound_timer(int count)
{
extern unsigned long seq_time;
if (count < 0) {
seq_timer.expires = (-count) + jiffies;
add_timer(&seq_timer);
return;
}
count += seq_time;
count -= jiffies;
if (count < 1)
count = 1;
seq_timer.expires = (count) + jiffies;
add_timer(&seq_timer);
}
void sound_stop_timer(void)
{
del_timer(&seq_timer);
}
void conf_printf(char *name, struct address_info *hw_config)
{
#ifndef CONFIG_SOUND_TRACEINIT
return;
#else
printk("<%s> at 0x%03x", name, hw_config->io_base);
if (hw_config->irq)
printk(" irq %d", (hw_config->irq > 0) ? hw_config->irq : -hw_config->irq);
if (hw_config->dma != -1 || hw_config->dma2 != -1)
{
printk(" dma %d", hw_config->dma);
if (hw_config->dma2 != -1)
printk(",%d", hw_config->dma2);
}
printk("\n");
#endif
}
EXPORT_SYMBOL(conf_printf);
void conf_printf2(char *name, int base, int irq, int dma, int dma2)
{
#ifndef CONFIG_SOUND_TRACEINIT
return;
#else
printk("<%s> at 0x%03x", name, base);
if (irq)
printk(" irq %d", (irq > 0) ? irq : -irq);
if (dma != -1 || dma2 != -1)
{
printk(" dma %d", dma);
if (dma2 != -1)
printk(",%d", dma2);
}
printk("\n");
#endif
}
EXPORT_SYMBOL(conf_printf2);
| gpl-2.0 |
CyanogenMod/android_kernel_huawei_msm8928 | drivers/mfd/s5m-core.c | 4698 | 4789 | /*
* s5m87xx.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/s5m87xx/s5m-core.h>
#include <linux/mfd/s5m87xx/s5m-pmic.h>
#include <linux/mfd/s5m87xx/s5m-rtc.h>
#include <linux/regmap.h>
static struct mfd_cell s5m8751_devs[] = {
{
.name = "s5m8751-pmic",
}, {
.name = "s5m-charger",
}, {
.name = "s5m8751-codec",
},
};
static struct mfd_cell s5m8763_devs[] = {
{
.name = "s5m8763-pmic",
}, {
.name = "s5m-rtc",
}, {
.name = "s5m-charger",
},
};
static struct mfd_cell s5m8767_devs[] = {
{
.name = "s5m8767-pmic",
}, {
.name = "s5m-rtc",
},
};
int s5m_reg_read(struct s5m87xx_dev *s5m87xx, u8 reg, void *dest)
{
return regmap_read(s5m87xx->regmap, reg, dest);
}
EXPORT_SYMBOL_GPL(s5m_reg_read);
int s5m_bulk_read(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
{
return regmap_bulk_read(s5m87xx->regmap, reg, buf, count);
}
EXPORT_SYMBOL_GPL(s5m_bulk_read);
int s5m_reg_write(struct s5m87xx_dev *s5m87xx, u8 reg, u8 value)
{
return regmap_write(s5m87xx->regmap, reg, value);
}
EXPORT_SYMBOL_GPL(s5m_reg_write);
int s5m_bulk_write(struct s5m87xx_dev *s5m87xx, u8 reg, int count, u8 *buf)
{
return regmap_raw_write(s5m87xx->regmap, reg, buf, count);
}
EXPORT_SYMBOL_GPL(s5m_bulk_write);
int s5m_reg_update(struct s5m87xx_dev *s5m87xx, u8 reg, u8 val, u8 mask)
{
return regmap_update_bits(s5m87xx->regmap, reg, mask, val);
}
EXPORT_SYMBOL_GPL(s5m_reg_update);
static struct regmap_config s5m_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
static int s5m87xx_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct s5m_platform_data *pdata = i2c->dev.platform_data;
struct s5m87xx_dev *s5m87xx;
int ret;
s5m87xx = devm_kzalloc(&i2c->dev, sizeof(struct s5m87xx_dev),
GFP_KERNEL);
if (s5m87xx == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, s5m87xx);
s5m87xx->dev = &i2c->dev;
s5m87xx->i2c = i2c;
s5m87xx->irq = i2c->irq;
s5m87xx->type = id->driver_data;
if (pdata) {
s5m87xx->device_type = pdata->device_type;
s5m87xx->ono = pdata->ono;
s5m87xx->irq_base = pdata->irq_base;
s5m87xx->wakeup = pdata->wakeup;
}
s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config);
if (IS_ERR(s5m87xx->regmap)) {
ret = PTR_ERR(s5m87xx->regmap);
dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
ret);
goto err;
}
s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
i2c_set_clientdata(s5m87xx->rtc, s5m87xx);
if (pdata && pdata->cfg_pmic_irq)
pdata->cfg_pmic_irq();
s5m_irq_init(s5m87xx);
pm_runtime_set_active(s5m87xx->dev);
switch (s5m87xx->device_type) {
case S5M8751X:
ret = mfd_add_devices(s5m87xx->dev, -1, s5m8751_devs,
ARRAY_SIZE(s5m8751_devs), NULL, 0);
break;
case S5M8763X:
ret = mfd_add_devices(s5m87xx->dev, -1, s5m8763_devs,
ARRAY_SIZE(s5m8763_devs), NULL, 0);
break;
case S5M8767X:
ret = mfd_add_devices(s5m87xx->dev, -1, s5m8767_devs,
ARRAY_SIZE(s5m8767_devs), NULL, 0);
break;
default:
/* If this happens the probe function is problem */
BUG();
}
if (ret < 0)
goto err;
return ret;
err:
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
regmap_exit(s5m87xx->regmap);
return ret;
}
static int s5m87xx_i2c_remove(struct i2c_client *i2c)
{
struct s5m87xx_dev *s5m87xx = i2c_get_clientdata(i2c);
mfd_remove_devices(s5m87xx->dev);
s5m_irq_exit(s5m87xx);
i2c_unregister_device(s5m87xx->rtc);
regmap_exit(s5m87xx->regmap);
return 0;
}
static const struct i2c_device_id s5m87xx_i2c_id[] = {
{ "s5m87xx", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, s5m87xx_i2c_id);
static struct i2c_driver s5m87xx_i2c_driver = {
.driver = {
.name = "s5m87xx",
.owner = THIS_MODULE,
},
.probe = s5m87xx_i2c_probe,
.remove = s5m87xx_i2c_remove,
.id_table = s5m87xx_i2c_id,
};
static int __init s5m87xx_i2c_init(void)
{
return i2c_add_driver(&s5m87xx_i2c_driver);
}
subsys_initcall(s5m87xx_i2c_init);
static void __exit s5m87xx_i2c_exit(void)
{
i2c_del_driver(&s5m87xx_i2c_driver);
}
module_exit(s5m87xx_i2c_exit);
MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>");
MODULE_DESCRIPTION("Core support for the S5M MFD");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Swapnil133609/Zeus_exp | net/x25/x25_route.c | 4698 | 4866 | /*
* X.25 Packet Layer release 002
*
* This is ALPHA test software. This code may break your machine,
* randomly fail to work with new releases, misbehave and/or generally
* screw up. It might even work.
*
* This code REQUIRES 2.1.15 or higher
*
* This module:
* This module is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* History
* X.25 001 Jonathan Naylor Started coding.
*/
#include <linux/if_arp.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <net/x25.h>
LIST_HEAD(x25_route_list);
DEFINE_RWLOCK(x25_route_list_lock);
/*
* Add a new route.
*/
static int x25_add_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
struct list_head *entry;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
list_for_each(entry, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits)
goto out;
}
rt = kmalloc(sizeof(*rt), GFP_ATOMIC);
rc = -ENOMEM;
if (!rt)
goto out;
strcpy(rt->address.x25_addr, "000000000000000");
memcpy(rt->address.x25_addr, address->x25_addr, sigdigits);
rt->sigdigits = sigdigits;
rt->dev = dev;
atomic_set(&rt->refcnt, 1);
list_add(&rt->node, &x25_route_list);
rc = 0;
out:
write_unlock_bh(&x25_route_list_lock);
return rc;
}
/**
* __x25_remove_route - remove route from x25_route_list
* @rt: route to remove
*
* Remove route from x25_route_list. If it was there.
* Caller must hold x25_route_list_lock.
*/
static void __x25_remove_route(struct x25_route *rt)
{
if (rt->node.next) {
list_del(&rt->node);
x25_route_put(rt);
}
}
static int x25_del_route(struct x25_address *address, unsigned int sigdigits,
struct net_device *dev)
{
struct x25_route *rt;
struct list_head *entry;
int rc = -EINVAL;
write_lock_bh(&x25_route_list_lock);
list_for_each(entry, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
if (!memcmp(&rt->address, address, sigdigits) &&
rt->sigdigits == sigdigits && rt->dev == dev) {
__x25_remove_route(rt);
rc = 0;
break;
}
}
write_unlock_bh(&x25_route_list_lock);
return rc;
}
/*
* A device has been removed, remove its routes.
*/
void x25_route_device_down(struct net_device *dev)
{
struct x25_route *rt;
struct list_head *entry, *tmp;
write_lock_bh(&x25_route_list_lock);
list_for_each_safe(entry, tmp, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
if (rt->dev == dev)
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
/* Remove any related forwarding */
x25_clear_forward_by_dev(dev);
}
/*
* Check that the device given is a valid X.25 interface that is "up".
*/
struct net_device *x25_dev_get(char *devname)
{
struct net_device *dev = dev_get_by_name(&init_net, devname);
if (dev &&
(!(dev->flags & IFF_UP) || (dev->type != ARPHRD_X25
#if IS_ENABLED(CONFIG_LLC)
&& dev->type != ARPHRD_ETHER
#endif
))){
dev_put(dev);
dev = NULL;
}
return dev;
}
/**
* x25_get_route - Find a route given an X.25 address.
* @addr - address to find a route for
*
* Find a route given an X.25 address.
*/
struct x25_route *x25_get_route(struct x25_address *addr)
{
struct x25_route *rt, *use = NULL;
struct list_head *entry;
read_lock_bh(&x25_route_list_lock);
list_for_each(entry, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
if (!memcmp(&rt->address, addr, rt->sigdigits)) {
if (!use)
use = rt;
else if (rt->sigdigits > use->sigdigits)
use = rt;
}
}
if (use)
x25_route_hold(use);
read_unlock_bh(&x25_route_list_lock);
return use;
}
/*
* Handle the ioctls that control the routing functions.
*/
int x25_route_ioctl(unsigned int cmd, void __user *arg)
{
struct x25_route_struct rt;
struct net_device *dev;
int rc = -EINVAL;
if (cmd != SIOCADDRT && cmd != SIOCDELRT)
goto out;
rc = -EFAULT;
if (copy_from_user(&rt, arg, sizeof(rt)))
goto out;
rc = -EINVAL;
if (rt.sigdigits > 15)
goto out;
dev = x25_dev_get(rt.device);
if (!dev)
goto out;
if (cmd == SIOCADDRT)
rc = x25_add_route(&rt.address, rt.sigdigits, dev);
else
rc = x25_del_route(&rt.address, rt.sigdigits, dev);
dev_put(dev);
out:
return rc;
}
/*
* Release all memory associated with X.25 routing structures.
*/
void __exit x25_route_free(void)
{
struct x25_route *rt;
struct list_head *entry, *tmp;
write_lock_bh(&x25_route_list_lock);
list_for_each_safe(entry, tmp, &x25_route_list) {
rt = list_entry(entry, struct x25_route, node);
__x25_remove_route(rt);
}
write_unlock_bh(&x25_route_list_lock);
}
| gpl-2.0 |
windxixi/OptiWiz-Kernel-F200-JB | arch/mn10300/kernel/irq.c | 4698 | 9930 | /* MN10300 Arch-specific interrupt handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <linux/cpumask.h>
#include <asm/setup.h>
#include <asm/serial-regs.h>
unsigned long __mn10300_irq_enabled_epsw[NR_CPUS] __cacheline_aligned_in_smp = {
[0 ... NR_CPUS - 1] = EPSW_IE | EPSW_IM_7
};
EXPORT_SYMBOL(__mn10300_irq_enabled_epsw);
#ifdef CONFIG_SMP
static char irq_affinity_online[NR_IRQS] = {
[0 ... NR_IRQS - 1] = 0
};
#define NR_IRQ_WORDS ((NR_IRQS + 31) / 32)
static unsigned long irq_affinity_request[NR_IRQ_WORDS] = {
[0 ... NR_IRQ_WORDS - 1] = 0
};
#endif /* CONFIG_SMP */
atomic_t irq_err_count;
/*
* MN10300 interrupt controller operations
*/
static void mn10300_cpupic_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
GxICR_u8(irq) = GxICR_DETECT;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void __mask_and_set_icr(unsigned int irq,
unsigned int mask, unsigned int set)
{
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
tmp = GxICR(irq);
GxICR(irq) = (tmp & mask) | set;
tmp = GxICR(irq);
arch_local_irq_restore(flags);
}
static void mn10300_cpupic_mask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, 0);
}
static void mn10300_cpupic_mask_ack(struct irq_data *d)
{
unsigned int irq = d->irq;
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_DETECT;
tmp = GxICR(irq);
} else {
u16 tmp2;
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL);
tmp2 = GxICR(irq);
irq_affinity_online[irq] =
cpumask_any_and(d->affinity, cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) =
(tmp & (GxICR_LEVEL | GxICR_ENABLE)) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_DETECT);
#endif /* CONFIG_SMP */
}
static void mn10300_cpupic_unmask(struct irq_data *d)
{
__mask_and_set_icr(d->irq, GxICR_LEVEL, GxICR_ENABLE);
}
static void mn10300_cpupic_unmask_clear(struct irq_data *d)
{
unsigned int irq = d->irq;
/* the MN10300 PIC latches its interrupt request bit, even after the
* device has ceased to assert its interrupt line and the interrupt
* channel has been disabled in the PIC, so for level-triggered
* interrupts we need to clear the request bit when we re-enable */
#ifdef CONFIG_SMP
unsigned long flags;
u16 tmp;
flags = arch_local_cli_save();
if (!test_and_clear_bit(irq, irq_affinity_request)) {
tmp = GxICR(irq);
GxICR(irq) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = GxICR(irq);
} else {
tmp = GxICR(irq);
irq_affinity_online[irq] = cpumask_any_and(d->affinity,
cpu_online_mask);
CROSS_GxICR(irq, irq_affinity_online[irq]) = (tmp & GxICR_LEVEL) | GxICR_ENABLE | GxICR_DETECT;
tmp = CROSS_GxICR(irq, irq_affinity_online[irq]);
}
arch_local_irq_restore(flags);
#else /* CONFIG_SMP */
__mask_and_set_icr(irq, GxICR_LEVEL, GxICR_ENABLE | GxICR_DETECT);
#endif /* CONFIG_SMP */
}
#ifdef CONFIG_SMP
static int
mn10300_cpupic_setaffinity(struct irq_data *d, const struct cpumask *mask,
bool force)
{
unsigned long flags;
int err;
flags = arch_local_cli_save();
/* check irq no */
switch (d->irq) {
case TMJCIRQ:
case RESCHEDULE_IPI:
case CALL_FUNC_SINGLE_IPI:
case LOCAL_TIMER_IPI:
case FLUSH_CACHE_IPI:
case CALL_FUNCTION_NMI_IPI:
case DEBUGGER_NMI_IPI:
#ifdef CONFIG_MN10300_TTYSM0
case SC0RXIRQ:
case SC0TXIRQ:
#ifdef CONFIG_MN10300_TTYSM0_TIMER8
case TM8IRQ:
#elif CONFIG_MN10300_TTYSM0_TIMER2
case TM2IRQ:
#endif /* CONFIG_MN10300_TTYSM0_TIMER8 */
#endif /* CONFIG_MN10300_TTYSM0 */
#ifdef CONFIG_MN10300_TTYSM1
case SC1RXIRQ:
case SC1TXIRQ:
#ifdef CONFIG_MN10300_TTYSM1_TIMER12
case TM12IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER9
case TM9IRQ:
#elif CONFIG_MN10300_TTYSM1_TIMER3
case TM3IRQ:
#endif /* CONFIG_MN10300_TTYSM1_TIMER12 */
#endif /* CONFIG_MN10300_TTYSM1 */
#ifdef CONFIG_MN10300_TTYSM2
case SC2RXIRQ:
case SC2TXIRQ:
case TM10IRQ:
#endif /* CONFIG_MN10300_TTYSM2 */
err = -1;
break;
default:
set_bit(d->irq, irq_affinity_request);
err = 0;
break;
}
arch_local_irq_restore(flags);
return err;
}
#endif /* CONFIG_SMP */
/*
* MN10300 PIC level-triggered IRQ handling.
*
* The PIC has no 'ACK' function per se. It is possible to clear individual
* channel latches, but each latch relatches whether or not the channel is
* masked, so we need to clear the latch when we unmask the channel.
*
* Also for this reason, we don't supply an ack() op (it's unused anyway if
* mask_ack() is provided), and mask_ack() just masks.
*/
static struct irq_chip mn10300_cpu_pic_level = {
.name = "cpu_l",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask_clear,
.irq_ack = NULL,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask,
.irq_unmask = mn10300_cpupic_unmask_clear,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* MN10300 PIC edge-triggered IRQ handling.
*
* We use the latch clearing function of the PIC as the 'ACK' function.
*/
static struct irq_chip mn10300_cpu_pic_edge = {
.name = "cpu_e",
.irq_disable = mn10300_cpupic_mask,
.irq_enable = mn10300_cpupic_unmask,
.irq_ack = mn10300_cpupic_ack,
.irq_mask = mn10300_cpupic_mask,
.irq_mask_ack = mn10300_cpupic_mask_ack,
.irq_unmask = mn10300_cpupic_unmask,
#ifdef CONFIG_SMP
.irq_set_affinity = mn10300_cpupic_setaffinity,
#endif
};
/*
* 'what should we do if we get a hw irq event on an illegal vector'.
* each architecture has to answer this themselves.
*/
void ack_bad_irq(int irq)
{
printk(KERN_WARNING "unexpected IRQ trap at vector %02x\n", irq);
}
/*
* change the level at which an IRQ executes
* - must not be called whilst interrupts are being processed!
*/
void set_intr_level(int irq, u16 level)
{
BUG_ON(in_interrupt());
__mask_and_set_icr(irq, GxICR_ENABLE, level);
}
/*
* mark an interrupt to be ACK'd after interrupt handlers have been run rather
* than before
*/
void mn10300_set_lateack_irq_type(int irq)
{
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_level,
handle_level_irq);
}
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int irq;
for (irq = 0; irq < NR_IRQS; irq++)
if (irq_get_chip(irq) == &no_irq_chip)
/* due to the PIC latching interrupt requests, even
* when the IRQ is disabled, IRQ_PENDING is superfluous
* and we can use handle_level_irq() for edge-triggered
* interrupts */
irq_set_chip_and_handler(irq, &mn10300_cpu_pic_edge,
handle_level_irq);
unit_init_IRQ();
}
/*
* handle normal device IRQs
*/
asmlinkage void do_IRQ(void)
{
unsigned long sp, epsw, irq_disabled_epsw, old_irq_enabled_epsw;
unsigned int cpu_id = smp_processor_id();
int irq;
sp = current_stack_pointer();
BUG_ON(sp - (sp & ~(THREAD_SIZE - 1)) < STACK_WARN);
/* make sure local_irq_enable() doesn't muck up the interrupt priority
* setting in EPSW */
old_irq_enabled_epsw = __mn10300_irq_enabled_epsw[cpu_id];
local_save_flags(epsw);
__mn10300_irq_enabled_epsw[cpu_id] = EPSW_IE | (EPSW_IM & epsw);
irq_disabled_epsw = EPSW_IE | MN10300_CLI_LEVEL;
#ifdef CONFIG_MN10300_WD_TIMER
__IRQ_STAT(cpu_id, __irq_count)++;
#endif
irq_enter();
for (;;) {
/* ask the interrupt controller for the next IRQ to process
* - the result we get depends on EPSW.IM
*/
irq = IAGR & IAGR_GN;
if (!irq)
break;
local_irq_restore(irq_disabled_epsw);
generic_handle_irq(irq >> 2);
/* restore IRQ controls for IAGR access */
local_irq_restore(epsw);
}
__mn10300_irq_enabled_epsw[cpu_id] = old_irq_enabled_epsw;
irq_exit();
}
/*
* Display interrupt management information through /proc/interrupts
*/
int arch_show_interrupts(struct seq_file *p, int prec)
{
#ifdef CONFIG_MN10300_WD_TIMER
int j;
seq_printf(p, "%*s: ", prec, "NMI");
for (j = 0; j < NR_CPUS; j++)
if (cpu_online(j))
seq_printf(p, "%10u ", nmi_count(j));
seq_putc(p, '\n');
#endif
seq_printf(p, "%*s: ", prec, "ERR");
seq_printf(p, "%10u\n", atomic_read(&irq_err_count));
return 0;
}
#ifdef CONFIG_HOTPLUG_CPU
void migrate_irqs(void)
{
int irq;
unsigned int self, new;
unsigned long flags;
self = smp_processor_id();
for (irq = 0; irq < NR_IRQS; irq++) {
struct irq_data *data = irq_get_irq_data(irq);
if (irqd_is_per_cpu(data))
continue;
if (cpumask_test_cpu(self, &data->affinity) &&
!cpumask_intersects(&irq_affinity[irq], cpu_online_mask)) {
int cpu_id;
cpu_id = cpumask_first(cpu_online_mask);
cpumask_set_cpu(cpu_id, &data->affinity);
}
/* We need to operate irq_affinity_online atomically. */
arch_local_cli_save(flags);
if (irq_affinity_online[irq] == self) {
u16 x, tmp;
x = GxICR(irq);
GxICR(irq) = x & GxICR_LEVEL;
tmp = GxICR(irq);
new = cpumask_any_and(&data->affinity,
cpu_online_mask);
irq_affinity_online[irq] = new;
CROSS_GxICR(irq, new) =
(x & GxICR_LEVEL) | GxICR_DETECT;
tmp = CROSS_GxICR(irq, new);
x &= GxICR_LEVEL | GxICR_ENABLE;
if (GxICR(irq) & GxICR_REQUEST)
x |= GxICR_REQUEST | GxICR_DETECT;
CROSS_GxICR(irq, new) = x;
tmp = CROSS_GxICR(irq, new);
}
arch_local_irq_restore(flags);
}
}
#endif /* CONFIG_HOTPLUG_CPU */
| gpl-2.0 |
AK-Kernel/AK-OnePone | drivers/i2c/busses/i2c-diolan-u2c.c | 4954 | 13121 | /*
* Driver for the Diolan u2c-12 USB-I2C adapter
*
* Copyright (c) 2010-2011 Ericsson AB
*
* Derived from:
* i2c-tiny-usb.c
* Copyright (C) 2006-2007 Till Harbaum (Till@Harbaum.org)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#define DRIVER_NAME "i2c-diolan-u2c"
#define USB_VENDOR_ID_DIOLAN 0x0abf
#define USB_DEVICE_ID_DIOLAN_U2C 0x3370
#define DIOLAN_OUT_EP 0x02
#define DIOLAN_IN_EP 0x84
/* commands via USB, must match command ids in the firmware */
#define CMD_I2C_READ 0x01
#define CMD_I2C_WRITE 0x02
#define CMD_I2C_SCAN 0x03 /* Returns list of detected devices */
#define CMD_I2C_RELEASE_SDA 0x04
#define CMD_I2C_RELEASE_SCL 0x05
#define CMD_I2C_DROP_SDA 0x06
#define CMD_I2C_DROP_SCL 0x07
#define CMD_I2C_READ_SDA 0x08
#define CMD_I2C_READ_SCL 0x09
#define CMD_GET_FW_VERSION 0x0a
#define CMD_GET_SERIAL 0x0b
#define CMD_I2C_START 0x0c
#define CMD_I2C_STOP 0x0d
#define CMD_I2C_REPEATED_START 0x0e
#define CMD_I2C_PUT_BYTE 0x0f
#define CMD_I2C_GET_BYTE 0x10
#define CMD_I2C_PUT_ACK 0x11
#define CMD_I2C_GET_ACK 0x12
#define CMD_I2C_PUT_BYTE_ACK 0x13
#define CMD_I2C_GET_BYTE_ACK 0x14
#define CMD_I2C_SET_SPEED 0x1b
#define CMD_I2C_GET_SPEED 0x1c
#define CMD_I2C_SET_CLK_SYNC 0x24
#define CMD_I2C_GET_CLK_SYNC 0x25
#define CMD_I2C_SET_CLK_SYNC_TO 0x26
#define CMD_I2C_GET_CLK_SYNC_TO 0x27
#define RESP_OK 0x00
#define RESP_FAILED 0x01
#define RESP_BAD_MEMADDR 0x04
#define RESP_DATA_ERR 0x05
#define RESP_NOT_IMPLEMENTED 0x06
#define RESP_NACK 0x07
#define RESP_TIMEOUT 0x09
#define U2C_I2C_SPEED_FAST 0 /* 400 kHz */
#define U2C_I2C_SPEED_STD 1 /* 100 kHz */
#define U2C_I2C_SPEED_2KHZ 242 /* 2 kHz, minimum speed */
#define U2C_I2C_SPEED(f) ((DIV_ROUND_UP(1000000, (f)) - 10) / 2 + 1)
#define U2C_I2C_FREQ_FAST 400000
#define U2C_I2C_FREQ_STD 100000
#define U2C_I2C_FREQ(s) (1000000 / (2 * (s - 1) + 10))
#define DIOLAN_USB_TIMEOUT 100 /* in ms */
#define DIOLAN_SYNC_TIMEOUT 20 /* in ms */
#define DIOLAN_OUTBUF_LEN 128
#define DIOLAN_FLUSH_LEN (DIOLAN_OUTBUF_LEN - 4)
#define DIOLAN_INBUF_LEN 256 /* Maximum supported receive length */
/* Structure to hold all of our device specific stuff */
struct i2c_diolan_u2c {
u8 obuffer[DIOLAN_OUTBUF_LEN]; /* output buffer */
u8 ibuffer[DIOLAN_INBUF_LEN]; /* input buffer */
struct usb_device *usb_dev; /* the usb device for this device */
struct usb_interface *interface;/* the interface for this device */
struct i2c_adapter adapter; /* i2c related things */
int olen; /* Output buffer length */
int ocount; /* Number of enqueued messages */
};
static uint frequency = U2C_I2C_FREQ_STD; /* I2C clock frequency in Hz */
module_param(frequency, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(frequency, "I2C clock frequency in hertz");
/* usb layer */
/* Send command to device, and get response. */
static int diolan_usb_transfer(struct i2c_diolan_u2c *dev)
{
int ret = 0;
int actual;
int i;
if (!dev->olen || !dev->ocount)
return -EINVAL;
ret = usb_bulk_msg(dev->usb_dev,
usb_sndbulkpipe(dev->usb_dev, DIOLAN_OUT_EP),
dev->obuffer, dev->olen, &actual,
DIOLAN_USB_TIMEOUT);
if (!ret) {
for (i = 0; i < dev->ocount; i++) {
int tmpret;
tmpret = usb_bulk_msg(dev->usb_dev,
usb_rcvbulkpipe(dev->usb_dev,
DIOLAN_IN_EP),
dev->ibuffer,
sizeof(dev->ibuffer), &actual,
DIOLAN_USB_TIMEOUT);
/*
* Stop command processing if a previous command
* returned an error.
* Note that we still need to retrieve all messages.
*/
if (ret < 0)
continue;
ret = tmpret;
if (ret == 0 && actual > 0) {
switch (dev->ibuffer[actual - 1]) {
case RESP_NACK:
/*
* Return ENXIO if NACK was received as
* response to the address phase,
* EIO otherwise
*/
ret = i == 1 ? -ENXIO : -EIO;
break;
case RESP_TIMEOUT:
ret = -ETIMEDOUT;
break;
case RESP_OK:
/* strip off return code */
ret = actual - 1;
break;
default:
ret = -EIO;
break;
}
}
}
}
dev->olen = 0;
dev->ocount = 0;
return ret;
}
static int diolan_write_cmd(struct i2c_diolan_u2c *dev, bool flush)
{
if (flush || dev->olen >= DIOLAN_FLUSH_LEN)
return diolan_usb_transfer(dev);
return 0;
}
/* Send command (no data) */
static int diolan_usb_cmd(struct i2c_diolan_u2c *dev, u8 command, bool flush)
{
dev->obuffer[dev->olen++] = command;
dev->ocount++;
return diolan_write_cmd(dev, flush);
}
/* Send command with one byte of data */
static int diolan_usb_cmd_data(struct i2c_diolan_u2c *dev, u8 command, u8 data,
bool flush)
{
dev->obuffer[dev->olen++] = command;
dev->obuffer[dev->olen++] = data;
dev->ocount++;
return diolan_write_cmd(dev, flush);
}
/* Send command with two bytes of data */
static int diolan_usb_cmd_data2(struct i2c_diolan_u2c *dev, u8 command, u8 d1,
u8 d2, bool flush)
{
dev->obuffer[dev->olen++] = command;
dev->obuffer[dev->olen++] = d1;
dev->obuffer[dev->olen++] = d2;
dev->ocount++;
return diolan_write_cmd(dev, flush);
}
/*
* Flush input queue.
* If we don't do this at startup and the controller has queued up
* messages which were not retrieved, it will stop responding
* at some point.
*/
static void diolan_flush_input(struct i2c_diolan_u2c *dev)
{
int i;
for (i = 0; i < 10; i++) {
int actual = 0;
int ret;
ret = usb_bulk_msg(dev->usb_dev,
usb_rcvbulkpipe(dev->usb_dev, DIOLAN_IN_EP),
dev->ibuffer, sizeof(dev->ibuffer), &actual,
DIOLAN_USB_TIMEOUT);
if (ret < 0 || actual == 0)
break;
}
if (i == 10)
dev_err(&dev->interface->dev, "Failed to flush input buffer\n");
}
static int diolan_i2c_start(struct i2c_diolan_u2c *dev)
{
return diolan_usb_cmd(dev, CMD_I2C_START, false);
}
static int diolan_i2c_repeated_start(struct i2c_diolan_u2c *dev)
{
return diolan_usb_cmd(dev, CMD_I2C_REPEATED_START, false);
}
static int diolan_i2c_stop(struct i2c_diolan_u2c *dev)
{
return diolan_usb_cmd(dev, CMD_I2C_STOP, true);
}
static int diolan_i2c_get_byte_ack(struct i2c_diolan_u2c *dev, bool ack,
u8 *byte)
{
int ret;
ret = diolan_usb_cmd_data(dev, CMD_I2C_GET_BYTE_ACK, ack, true);
if (ret > 0)
*byte = dev->ibuffer[0];
else if (ret == 0)
ret = -EIO;
return ret;
}
static int diolan_i2c_put_byte_ack(struct i2c_diolan_u2c *dev, u8 byte)
{
return diolan_usb_cmd_data(dev, CMD_I2C_PUT_BYTE_ACK, byte, false);
}
static int diolan_set_speed(struct i2c_diolan_u2c *dev, u8 speed)
{
return diolan_usb_cmd_data(dev, CMD_I2C_SET_SPEED, speed, true);
}
/* Enable or disable clock synchronization (stretching) */
static int diolan_set_clock_synch(struct i2c_diolan_u2c *dev, bool enable)
{
return diolan_usb_cmd_data(dev, CMD_I2C_SET_CLK_SYNC, enable, true);
}
/* Set clock synchronization timeout in ms */
static int diolan_set_clock_synch_timeout(struct i2c_diolan_u2c *dev, int ms)
{
int to_val = ms * 10;
return diolan_usb_cmd_data2(dev, CMD_I2C_SET_CLK_SYNC_TO,
to_val & 0xff, (to_val >> 8) & 0xff, true);
}
static void diolan_fw_version(struct i2c_diolan_u2c *dev)
{
int ret;
ret = diolan_usb_cmd(dev, CMD_GET_FW_VERSION, true);
if (ret >= 2)
dev_info(&dev->interface->dev,
"Diolan U2C firmware version %u.%u\n",
(unsigned int)dev->ibuffer[0],
(unsigned int)dev->ibuffer[1]);
}
static void diolan_get_serial(struct i2c_diolan_u2c *dev)
{
int ret;
u32 serial;
ret = diolan_usb_cmd(dev, CMD_GET_SERIAL, true);
if (ret >= 4) {
serial = le32_to_cpu(*(u32 *)dev->ibuffer);
dev_info(&dev->interface->dev,
"Diolan U2C serial number %u\n", serial);
}
}
static int diolan_init(struct i2c_diolan_u2c *dev)
{
int speed, ret;
if (frequency >= 200000) {
speed = U2C_I2C_SPEED_FAST;
frequency = U2C_I2C_FREQ_FAST;
} else if (frequency >= 100000 || frequency == 0) {
speed = U2C_I2C_SPEED_STD;
frequency = U2C_I2C_FREQ_STD;
} else {
speed = U2C_I2C_SPEED(frequency);
if (speed > U2C_I2C_SPEED_2KHZ)
speed = U2C_I2C_SPEED_2KHZ;
frequency = U2C_I2C_FREQ(speed);
}
dev_info(&dev->interface->dev,
"Diolan U2C at USB bus %03d address %03d speed %d Hz\n",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum, frequency);
diolan_flush_input(dev);
diolan_fw_version(dev);
diolan_get_serial(dev);
/* Set I2C speed */
ret = diolan_set_speed(dev, speed);
if (ret < 0)
return ret;
/* Configure I2C clock synchronization */
ret = diolan_set_clock_synch(dev, speed != U2C_I2C_SPEED_FAST);
if (ret < 0)
return ret;
if (speed != U2C_I2C_SPEED_FAST)
ret = diolan_set_clock_synch_timeout(dev, DIOLAN_SYNC_TIMEOUT);
return ret;
}
/* i2c layer */
static int diolan_usb_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs,
int num)
{
struct i2c_diolan_u2c *dev = i2c_get_adapdata(adapter);
struct i2c_msg *pmsg;
int i, j;
int ret, sret;
ret = diolan_i2c_start(dev);
if (ret < 0)
return ret;
for (i = 0; i < num; i++) {
pmsg = &msgs[i];
if (i) {
ret = diolan_i2c_repeated_start(dev);
if (ret < 0)
goto abort;
}
if (pmsg->flags & I2C_M_RD) {
ret =
diolan_i2c_put_byte_ack(dev, (pmsg->addr << 1) | 1);
if (ret < 0)
goto abort;
for (j = 0; j < pmsg->len; j++) {
u8 byte;
bool ack = j < pmsg->len - 1;
/*
* Don't send NACK if this is the first byte
* of a SMBUS_BLOCK message.
*/
if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN))
ack = true;
ret = diolan_i2c_get_byte_ack(dev, ack, &byte);
if (ret < 0)
goto abort;
/*
* Adjust count if first received byte is length
*/
if (j == 0 && (pmsg->flags & I2C_M_RECV_LEN)) {
if (byte == 0
|| byte > I2C_SMBUS_BLOCK_MAX) {
ret = -EPROTO;
goto abort;
}
pmsg->len += byte;
}
pmsg->buf[j] = byte;
}
} else {
ret = diolan_i2c_put_byte_ack(dev, pmsg->addr << 1);
if (ret < 0)
goto abort;
for (j = 0; j < pmsg->len; j++) {
ret = diolan_i2c_put_byte_ack(dev,
pmsg->buf[j]);
if (ret < 0)
goto abort;
}
}
}
abort:
sret = diolan_i2c_stop(dev);
if (sret < 0 && ret >= 0)
ret = sret;
return ret;
}
/*
* Return list of supported functionality.
*/
static u32 diolan_usb_func(struct i2c_adapter *a)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL;
}
static const struct i2c_algorithm diolan_usb_algorithm = {
.master_xfer = diolan_usb_xfer,
.functionality = diolan_usb_func,
};
/* device layer */
static const struct usb_device_id diolan_u2c_table[] = {
{ USB_DEVICE(USB_VENDOR_ID_DIOLAN, USB_DEVICE_ID_DIOLAN_U2C) },
{ }
};
MODULE_DEVICE_TABLE(usb, diolan_u2c_table);
static void diolan_u2c_free(struct i2c_diolan_u2c *dev)
{
usb_put_dev(dev->usb_dev);
kfree(dev);
}
static int diolan_u2c_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
struct i2c_diolan_u2c *dev;
int ret;
/* allocate memory for our device state and initialize it */
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (dev == NULL) {
dev_err(&interface->dev, "no memory for device state\n");
ret = -ENOMEM;
goto error;
}
dev->usb_dev = usb_get_dev(interface_to_usbdev(interface));
dev->interface = interface;
/* save our data pointer in this interface device */
usb_set_intfdata(interface, dev);
/* setup i2c adapter description */
dev->adapter.owner = THIS_MODULE;
dev->adapter.class = I2C_CLASS_HWMON;
dev->adapter.algo = &diolan_usb_algorithm;
i2c_set_adapdata(&dev->adapter, dev);
snprintf(dev->adapter.name, sizeof(dev->adapter.name),
DRIVER_NAME " at bus %03d device %03d",
dev->usb_dev->bus->busnum, dev->usb_dev->devnum);
dev->adapter.dev.parent = &dev->interface->dev;
/* initialize diolan i2c interface */
ret = diolan_init(dev);
if (ret < 0) {
dev_err(&interface->dev, "failed to initialize adapter\n");
goto error_free;
}
/* and finally attach to i2c layer */
ret = i2c_add_adapter(&dev->adapter);
if (ret < 0) {
dev_err(&interface->dev, "failed to add I2C adapter\n");
goto error_free;
}
dev_dbg(&interface->dev, "connected " DRIVER_NAME "\n");
return 0;
error_free:
usb_set_intfdata(interface, NULL);
diolan_u2c_free(dev);
error:
return ret;
}
static void diolan_u2c_disconnect(struct usb_interface *interface)
{
struct i2c_diolan_u2c *dev = usb_get_intfdata(interface);
i2c_del_adapter(&dev->adapter);
usb_set_intfdata(interface, NULL);
diolan_u2c_free(dev);
dev_dbg(&interface->dev, "disconnected\n");
}
static struct usb_driver diolan_u2c_driver = {
.name = DRIVER_NAME,
.probe = diolan_u2c_probe,
.disconnect = diolan_u2c_disconnect,
.id_table = diolan_u2c_table,
};
module_usb_driver(diolan_u2c_driver);
MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>");
MODULE_DESCRIPTION(DRIVER_NAME " driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MoKee/android_kernel_htc_msm7x30 | drivers/staging/comedi/drivers/multiq3.c | 8282 | 9194 | /*
comedi/drivers/multiq3.c
Hardware driver for Quanser Consulting MultiQ-3 board
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: multiq3
Description: Quanser Consulting MultiQ-3
Author: Anders Blomdell <anders.blomdell@control.lth.se>
Status: works
Devices: [Quanser Consulting] MultiQ-3 (multiq3)
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/ioport.h>
#define MULTIQ3_SIZE 16
/*
* MULTIQ-3 port offsets
*/
#define MULTIQ3_DIGIN_PORT 0
#define MULTIQ3_DIGOUT_PORT 0
#define MULTIQ3_DAC_DATA 2
#define MULTIQ3_AD_DATA 4
#define MULTIQ3_AD_CS 4
#define MULTIQ3_STATUS 6
#define MULTIQ3_CONTROL 6
#define MULTIQ3_CLK_DATA 8
#define MULTIQ3_ENC_DATA 12
#define MULTIQ3_ENC_CONTROL 14
/*
* flags for CONTROL register
*/
#define MULTIQ3_AD_MUX_EN 0x0040
#define MULTIQ3_AD_AUTOZ 0x0080
#define MULTIQ3_AD_AUTOCAL 0x0100
#define MULTIQ3_AD_SH 0x0200
#define MULTIQ3_AD_CLOCK_4M 0x0400
#define MULTIQ3_DA_LOAD 0x1800
#define MULTIQ3_CONTROL_MUST 0x0600
/*
* flags for STATUS register
*/
#define MULTIQ3_STATUS_EOC 0x008
#define MULTIQ3_STATUS_EOC_I 0x010
/*
* flags for encoder control
*/
#define MULTIQ3_CLOCK_DATA 0x00
#define MULTIQ3_CLOCK_SETUP 0x18
#define MULTIQ3_INPUT_SETUP 0x41
#define MULTIQ3_QUAD_X4 0x38
#define MULTIQ3_BP_RESET 0x01
#define MULTIQ3_CNTR_RESET 0x02
#define MULTIQ3_TRSFRPR_CTR 0x08
#define MULTIQ3_TRSFRCNTR_OL 0x10
#define MULTIQ3_EFLAG_RESET 0x06
#define MULTIQ3_TIMEOUT 30
static int multiq3_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int multiq3_detach(struct comedi_device *dev);
static struct comedi_driver driver_multiq3 = {
.driver_name = "multiq3",
.module = THIS_MODULE,
.attach = multiq3_attach,
.detach = multiq3_detach,
};
static int __init driver_multiq3_init_module(void)
{
return comedi_driver_register(&driver_multiq3);
}
static void __exit driver_multiq3_cleanup_module(void)
{
comedi_driver_unregister(&driver_multiq3);
}
module_init(driver_multiq3_init_module);
module_exit(driver_multiq3_cleanup_module);
struct multiq3_private {
unsigned int ao_readback[2];
};
#define devpriv ((struct multiq3_private *)dev->private)
static int multiq3_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i, n;
int chan;
unsigned int hi, lo;
chan = CR_CHAN(insn->chanspec);
outw(MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3),
dev->iobase + MULTIQ3_CONTROL);
for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
if (inw(dev->iobase + MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC)
break;
}
if (i == MULTIQ3_TIMEOUT)
return -ETIMEDOUT;
for (n = 0; n < insn->n; n++) {
outw(0, dev->iobase + MULTIQ3_AD_CS);
for (i = 0; i < MULTIQ3_TIMEOUT; i++) {
if (inw(dev->iobase +
MULTIQ3_STATUS) & MULTIQ3_STATUS_EOC_I)
break;
}
if (i == MULTIQ3_TIMEOUT)
return -ETIMEDOUT;
hi = inb(dev->iobase + MULTIQ3_AD_CS);
lo = inb(dev->iobase + MULTIQ3_AD_CS);
data[n] = (((hi << 8) | lo) + 0x1000) & 0x1fff;
}
return n;
}
static int multiq3_ao_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++)
data[i] = devpriv->ao_readback[chan];
return i;
}
static int multiq3_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int i;
int chan = CR_CHAN(insn->chanspec);
for (i = 0; i < insn->n; i++) {
outw(MULTIQ3_CONTROL_MUST | MULTIQ3_DA_LOAD | chan,
dev->iobase + MULTIQ3_CONTROL);
outw(data[i], dev->iobase + MULTIQ3_DAC_DATA);
outw(MULTIQ3_CONTROL_MUST, dev->iobase + MULTIQ3_CONTROL);
devpriv->ao_readback[chan] = data[i];
}
return i;
}
static int multiq3_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
data[1] = inw(dev->iobase + MULTIQ3_DIGIN_PORT);
return 2;
}
static int multiq3_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
if (insn->n != 2)
return -EINVAL;
s->state &= ~data[0];
s->state |= (data[0] & data[1]);
outw(s->state, dev->iobase + MULTIQ3_DIGOUT_PORT);
data[1] = s->state;
return 2;
}
static int multiq3_encoder_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
int n;
int chan = CR_CHAN(insn->chanspec);
int control = MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3);
for (n = 0; n < insn->n; n++) {
int value;
outw(control, dev->iobase + MULTIQ3_CONTROL);
outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_TRSFRCNTR_OL, dev->iobase + MULTIQ3_ENC_CONTROL);
value = inb(dev->iobase + MULTIQ3_ENC_DATA);
value |= (inb(dev->iobase + MULTIQ3_ENC_DATA) << 8);
value |= (inb(dev->iobase + MULTIQ3_ENC_DATA) << 16);
data[n] = (value + 0x800000) & 0xffffff;
}
return n;
}
static void encoder_reset(struct comedi_device *dev)
{
int chan;
for (chan = 0; chan < dev->subdevices[4].n_chan; chan++) {
int control =
MULTIQ3_CONTROL_MUST | MULTIQ3_AD_MUX_EN | (chan << 3);
outw(control, dev->iobase + MULTIQ3_CONTROL);
outb(MULTIQ3_EFLAG_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_BP_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_CLOCK_DATA, dev->iobase + MULTIQ3_ENC_DATA);
outb(MULTIQ3_CLOCK_SETUP, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_INPUT_SETUP, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_QUAD_X4, dev->iobase + MULTIQ3_ENC_CONTROL);
outb(MULTIQ3_CNTR_RESET, dev->iobase + MULTIQ3_ENC_CONTROL);
}
}
/*
options[0] - I/O port
options[1] - irq
options[2] - number of encoder chips installed
*/
static int multiq3_attach(struct comedi_device *dev,
struct comedi_devconfig *it)
{
int result = 0;
unsigned long iobase;
unsigned int irq;
struct comedi_subdevice *s;
iobase = it->options[0];
printk(KERN_INFO "comedi%d: multiq3: 0x%04lx ", dev->minor, iobase);
if (!request_region(iobase, MULTIQ3_SIZE, "multiq3")) {
printk(KERN_ERR "comedi%d: I/O port conflict\n", dev->minor);
return -EIO;
}
dev->iobase = iobase;
irq = it->options[1];
if (irq)
printk(KERN_WARNING "comedi%d: irq = %u ignored\n",
dev->minor, irq);
else
printk(KERN_WARNING "comedi%d: no irq\n", dev->minor);
dev->board_name = "multiq3";
result = alloc_subdevices(dev, 5);
if (result < 0)
return result;
result = alloc_private(dev, sizeof(struct multiq3_private));
if (result < 0)
return result;
s = dev->subdevices + 0;
/* ai subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND;
s->n_chan = 8;
s->insn_read = multiq3_ai_insn_read;
s->maxdata = 0x1fff;
s->range_table = &range_bipolar5;
s = dev->subdevices + 1;
/* ao subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 8;
s->insn_read = multiq3_ao_insn_read;
s->insn_write = multiq3_ao_insn_write;
s->maxdata = 0xfff;
s->range_table = &range_bipolar5;
s = dev->subdevices + 2;
/* di subdevice */
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->insn_bits = multiq3_di_insn_bits;
s->maxdata = 1;
s->range_table = &range_digital;
s = dev->subdevices + 3;
/* do subdevice */
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->insn_bits = multiq3_do_insn_bits;
s->maxdata = 1;
s->range_table = &range_digital;
s->state = 0;
s = dev->subdevices + 4;
/* encoder (counter) subdevice */
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_LSAMPL;
s->n_chan = it->options[2] * 2;
s->insn_read = multiq3_encoder_insn_read;
s->maxdata = 0xffffff;
s->range_table = &range_unknown;
encoder_reset(dev);
return 0;
}
static int multiq3_detach(struct comedi_device *dev)
{
printk(KERN_INFO "comedi%d: multiq3: remove\n", dev->minor);
if (dev->iobase)
release_region(dev->iobase, MULTIQ3_SIZE);
if (dev->irq)
free_irq(dev->irq, dev);
return 0;
}
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
santod/NuK3rn3l_htc_m7_GPE-5.1 | arch/powerpc/mm/tlb_hash32.c | 9050 | 4581 | /*
* This file contains the routines for TLB flushing.
* On machines where the MMU uses a hash table to store virtual to
* physical translations, these routines flush entries from the
* hash table also.
* -- paulus
*
* Derived from arch/ppc/mm/init.c:
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
*
* Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
* and Cort Dougan (PReP) (cort@cs.nmt.edu)
* Copyright (C) 1996 Paul Mackerras
*
* Derived from "arch/i386/mm/init.c"
* Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/export.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include "mmu_decl.h"
/*
* Called when unmapping pages to flush entries from the TLB/hash table.
*/
void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
{
unsigned long ptephys;
if (Hash != 0) {
ptephys = __pa(ptep) & PAGE_MASK;
flush_hash_pages(mm->context.id, addr, ptephys, 1);
}
}
EXPORT_SYMBOL(flush_hash_entry);
/*
* Called by ptep_set_access_flags, must flush on CPUs for which the
* DSI handler can't just "fixup" the TLB on a write fault
*/
void flush_tlb_page_nohash(struct vm_area_struct *vma, unsigned long addr)
{
if (Hash != 0)
return;
_tlbie(addr);
}
/*
* Called at the end of a mmu_gather operation to make sure the
* TLB flush is completely done.
*/
void tlb_flush(struct mmu_gather *tlb)
{
if (Hash == 0) {
/*
* 603 needs to flush the whole TLB here since
* it doesn't use a hash table.
*/
_tlbia();
}
}
/*
* TLB flushing:
*
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes kernel pages
*
* since the hardware hash table functions as an extension of the
* tlb as far as the linux tables are concerned, flush it too.
* -- Cort
*/
static void flush_range(struct mm_struct *mm, unsigned long start,
unsigned long end)
{
pmd_t *pmd;
unsigned long pmd_end;
int count;
unsigned int ctx = mm->context.id;
if (Hash == 0) {
_tlbia();
return;
}
start &= PAGE_MASK;
if (start >= end)
return;
end = (end - 1) | ~PAGE_MASK;
pmd = pmd_offset(pud_offset(pgd_offset(mm, start), start), start);
for (;;) {
pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
if (pmd_end > end)
pmd_end = end;
if (!pmd_none(*pmd)) {
count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
flush_hash_pages(ctx, start, pmd_val(*pmd), count);
}
if (pmd_end == end)
break;
start = pmd_end + 1;
++pmd;
}
}
/*
* Flush kernel TLB entries in the given range
*/
void flush_tlb_kernel_range(unsigned long start, unsigned long end)
{
flush_range(&init_mm, start, end);
}
EXPORT_SYMBOL(flush_tlb_kernel_range);
/*
* Flush all the (user) entries for the address space described by mm.
*/
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *mp;
if (Hash == 0) {
_tlbia();
return;
}
/*
* It is safe to go down the mm's list of vmas when called
* from dup_mmap, holding mmap_sem. It would also be safe from
* unmap_region or exit_mmap, but not from vmtruncate on SMP -
* but it seems dup_mmap is the only SMP case which gets here.
*/
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
}
EXPORT_SYMBOL(flush_tlb_mm);
void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
struct mm_struct *mm;
pmd_t *pmd;
if (Hash == 0) {
_tlbie(vmaddr);
return;
}
mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
pmd = pmd_offset(pud_offset(pgd_offset(mm, vmaddr), vmaddr), vmaddr);
if (!pmd_none(*pmd))
flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
}
EXPORT_SYMBOL(flush_tlb_page);
/*
* For each address in the range, find the pte for the address
* and check _PAGE_HASHPTE bit; if it is set, find and destroy
* the corresponding HPTE.
*/
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
flush_range(vma->vm_mm, start, end);
}
EXPORT_SYMBOL(flush_tlb_range);
void __init early_init_mmu(void)
{
}
| gpl-2.0 |
tobw/linux-curie | arch/arm/mach-ux500/clock.c | 91 | 18323 | /*
* Copyright (C) 2009 ST-Ericsson
* Copyright (C) 2009 STMicroelectronics
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/clkdev.h>
#include <linux/cpufreq.h>
#include <plat/mtu.h>
#include <mach/hardware.h>
#include "clock.h"
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#include <linux/uaccess.h> /* for copy_from_user */
static LIST_HEAD(clk_list);
#endif
#define PRCC_PCKEN 0x00
#define PRCC_PCKDIS 0x04
#define PRCC_KCKEN 0x08
#define PRCC_KCKDIS 0x0C
#define PRCM_YYCLKEN0_MGT_SET 0x510
#define PRCM_YYCLKEN1_MGT_SET 0x514
#define PRCM_YYCLKEN0_MGT_CLR 0x518
#define PRCM_YYCLKEN1_MGT_CLR 0x51C
#define PRCM_YYCLKEN0_MGT_VAL 0x520
#define PRCM_YYCLKEN1_MGT_VAL 0x524
#define PRCM_SVAMMDSPCLK_MGT 0x008
#define PRCM_SIAMMDSPCLK_MGT 0x00C
#define PRCM_SGACLK_MGT 0x014
#define PRCM_UARTCLK_MGT 0x018
#define PRCM_MSP02CLK_MGT 0x01C
#define PRCM_MSP1CLK_MGT 0x288
#define PRCM_I2CCLK_MGT 0x020
#define PRCM_SDMMCCLK_MGT 0x024
#define PRCM_SLIMCLK_MGT 0x028
#define PRCM_PER1CLK_MGT 0x02C
#define PRCM_PER2CLK_MGT 0x030
#define PRCM_PER3CLK_MGT 0x034
#define PRCM_PER5CLK_MGT 0x038
#define PRCM_PER6CLK_MGT 0x03C
#define PRCM_PER7CLK_MGT 0x040
#define PRCM_LCDCLK_MGT 0x044
#define PRCM_BMLCLK_MGT 0x04C
#define PRCM_HSITXCLK_MGT 0x050
#define PRCM_HSIRXCLK_MGT 0x054
#define PRCM_HDMICLK_MGT 0x058
#define PRCM_APEATCLK_MGT 0x05C
#define PRCM_APETRACECLK_MGT 0x060
#define PRCM_MCDECLK_MGT 0x064
#define PRCM_IPI2CCLK_MGT 0x068
#define PRCM_DSIALTCLK_MGT 0x06C
#define PRCM_DMACLK_MGT 0x074
#define PRCM_B2R2CLK_MGT 0x078
#define PRCM_TVCLK_MGT 0x07C
#define PRCM_TCR 0x1C8
#define PRCM_TCR_STOPPED (1 << 16)
#define PRCM_TCR_DOZE_MODE (1 << 17)
#define PRCM_UNIPROCLK_MGT 0x278
#define PRCM_SSPCLK_MGT 0x280
#define PRCM_RNGCLK_MGT 0x284
#define PRCM_UICCCLK_MGT 0x27C
#define PRCM_MGT_ENABLE (1 << 8)
static DEFINE_SPINLOCK(clocks_lock);
static void __clk_enable(struct clk *clk)
{
if (clk->enabled++ == 0) {
if (clk->parent_cluster)
__clk_enable(clk->parent_cluster);
if (clk->parent_periph)
__clk_enable(clk->parent_periph);
if (clk->ops && clk->ops->enable)
clk->ops->enable(clk);
}
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clocks_lock, flags);
__clk_enable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
EXPORT_SYMBOL(clk_enable);
static void __clk_disable(struct clk *clk)
{
if (--clk->enabled == 0) {
if (clk->ops && clk->ops->disable)
clk->ops->disable(clk);
if (clk->parent_periph)
__clk_disable(clk->parent_periph);
if (clk->parent_cluster)
__clk_disable(clk->parent_cluster);
}
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
WARN_ON(!clk->enabled);
spin_lock_irqsave(&clocks_lock, flags);
__clk_disable(clk);
spin_unlock_irqrestore(&clocks_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
/*
* The MTU has a separate, rather complex muxing setup
* with alternative parents (peripheral cluster or
* ULP or fixed 32768 Hz) depending on settings
*/
static unsigned long clk_mtu_get_rate(struct clk *clk)
{
void __iomem *addr;
u32 tcr;
int mtu = (int) clk->data;
/*
* One of these is selected eventually
* TODO: Replace the constant with a reference
* to the ULP source once this is modeled.
*/
unsigned long clk32k = 32768;
unsigned long mturate;
unsigned long retclk;
if (cpu_is_u5500())
addr = __io_address(U5500_PRCMU_BASE);
else if (cpu_is_u8500())
addr = __io_address(U8500_PRCMU_BASE);
else
ux500_unknown_soc();
/*
* On a startup, always conifgure the TCR to the doze mode;
* bootloaders do it for us. Do this in the kernel too.
*/
writel(PRCM_TCR_DOZE_MODE, addr + PRCM_TCR);
tcr = readl(addr + PRCM_TCR);
/* Get the rate from the parent as a default */
if (clk->parent_periph)
mturate = clk_get_rate(clk->parent_periph);
else if (clk->parent_cluster)
mturate = clk_get_rate(clk->parent_cluster);
else
/* We need to be connected SOMEWHERE */
BUG();
/* Return the clock selected for this MTU */
if (tcr & (1 << mtu))
retclk = clk32k;
else
retclk = mturate;
pr_info("MTU%d clock rate: %lu Hz\n", mtu, retclk);
return retclk;
}
unsigned long clk_get_rate(struct clk *clk)
{
unsigned long rate;
/*
* If there is a custom getrate callback for this clock,
* it will take precedence.
*/
if (clk->get_rate)
return clk->get_rate(clk);
if (clk->ops && clk->ops->get_rate)
return clk->ops->get_rate(clk);
rate = clk->rate;
if (!rate) {
if (clk->parent_periph)
rate = clk_get_rate(clk->parent_periph);
else if (clk->parent_cluster)
rate = clk_get_rate(clk->parent_cluster);
}
return rate;
}
EXPORT_SYMBOL(clk_get_rate);
long clk_round_rate(struct clk *clk, unsigned long rate)
{
/*TODO*/
return rate;
}
EXPORT_SYMBOL(clk_round_rate);
int clk_set_rate(struct clk *clk, unsigned long rate)
{
clk->rate = rate;
return 0;
}
EXPORT_SYMBOL(clk_set_rate);
static void clk_prcmu_enable(struct clk *clk)
{
void __iomem *cg_set_reg = __io_address(U8500_PRCMU_BASE)
+ PRCM_YYCLKEN0_MGT_SET + clk->prcmu_cg_off;
writel(1 << clk->prcmu_cg_bit, cg_set_reg);
}
static void clk_prcmu_disable(struct clk *clk)
{
void __iomem *cg_clr_reg = __io_address(U8500_PRCMU_BASE)
+ PRCM_YYCLKEN0_MGT_CLR + clk->prcmu_cg_off;
writel(1 << clk->prcmu_cg_bit, cg_clr_reg);
}
static struct clkops clk_prcmu_ops = {
.enable = clk_prcmu_enable,
.disable = clk_prcmu_disable,
};
static unsigned int clkrst_base[] = {
[1] = U8500_CLKRST1_BASE,
[2] = U8500_CLKRST2_BASE,
[3] = U8500_CLKRST3_BASE,
[5] = U8500_CLKRST5_BASE,
[6] = U8500_CLKRST6_BASE,
};
static void clk_prcc_enable(struct clk *clk)
{
void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
if (clk->prcc_kernel != -1)
writel(1 << clk->prcc_kernel, addr + PRCC_KCKEN);
if (clk->prcc_bus != -1)
writel(1 << clk->prcc_bus, addr + PRCC_PCKEN);
}
static void clk_prcc_disable(struct clk *clk)
{
void __iomem *addr = __io_address(clkrst_base[clk->cluster]);
if (clk->prcc_bus != -1)
writel(1 << clk->prcc_bus, addr + PRCC_PCKDIS);
if (clk->prcc_kernel != -1)
writel(1 << clk->prcc_kernel, addr + PRCC_KCKDIS);
}
static struct clkops clk_prcc_ops = {
.enable = clk_prcc_enable,
.disable = clk_prcc_disable,
};
static struct clk clk_32khz = {
.name = "clk_32khz",
.rate = 32000,
};
/*
* PRCMU level clock gating
*/
/* Bank 0 */
static DEFINE_PRCMU_CLK(svaclk, 0x0, 2, SVAMMDSPCLK);
static DEFINE_PRCMU_CLK(siaclk, 0x0, 3, SIAMMDSPCLK);
static DEFINE_PRCMU_CLK(sgaclk, 0x0, 4, SGACLK);
static DEFINE_PRCMU_CLK_RATE(uartclk, 0x0, 5, UARTCLK, 38400000);
static DEFINE_PRCMU_CLK(msp02clk, 0x0, 6, MSP02CLK);
static DEFINE_PRCMU_CLK(msp1clk, 0x0, 7, MSP1CLK); /* v1 */
static DEFINE_PRCMU_CLK_RATE(i2cclk, 0x0, 8, I2CCLK, 48000000);
static DEFINE_PRCMU_CLK_RATE(sdmmcclk, 0x0, 9, SDMMCCLK, 100000000);
static DEFINE_PRCMU_CLK(slimclk, 0x0, 10, SLIMCLK);
static DEFINE_PRCMU_CLK(per1clk, 0x0, 11, PER1CLK);
static DEFINE_PRCMU_CLK(per2clk, 0x0, 12, PER2CLK);
static DEFINE_PRCMU_CLK(per3clk, 0x0, 13, PER3CLK);
static DEFINE_PRCMU_CLK(per5clk, 0x0, 14, PER5CLK);
static DEFINE_PRCMU_CLK_RATE(per6clk, 0x0, 15, PER6CLK, 133330000);
static DEFINE_PRCMU_CLK(lcdclk, 0x0, 17, LCDCLK);
static DEFINE_PRCMU_CLK(bmlclk, 0x0, 18, BMLCLK);
static DEFINE_PRCMU_CLK(hsitxclk, 0x0, 19, HSITXCLK);
static DEFINE_PRCMU_CLK(hsirxclk, 0x0, 20, HSIRXCLK);
static DEFINE_PRCMU_CLK(hdmiclk, 0x0, 21, HDMICLK);
static DEFINE_PRCMU_CLK(apeatclk, 0x0, 22, APEATCLK);
static DEFINE_PRCMU_CLK(apetraceclk, 0x0, 23, APETRACECLK);
static DEFINE_PRCMU_CLK(mcdeclk, 0x0, 24, MCDECLK);
static DEFINE_PRCMU_CLK(ipi2clk, 0x0, 25, IPI2CCLK);
static DEFINE_PRCMU_CLK(dsialtclk, 0x0, 26, DSIALTCLK); /* v1 */
static DEFINE_PRCMU_CLK(dmaclk, 0x0, 27, DMACLK);
static DEFINE_PRCMU_CLK(b2r2clk, 0x0, 28, B2R2CLK);
static DEFINE_PRCMU_CLK(tvclk, 0x0, 29, TVCLK);
static DEFINE_PRCMU_CLK(uniproclk, 0x0, 30, UNIPROCLK); /* v1 */
static DEFINE_PRCMU_CLK_RATE(sspclk, 0x0, 31, SSPCLK, 48000000); /* v1 */
/* Bank 1 */
static DEFINE_PRCMU_CLK(rngclk, 0x4, 0, RNGCLK); /* v1 */
static DEFINE_PRCMU_CLK(uiccclk, 0x4, 1, UICCCLK); /* v1 */
/*
* PRCC level clock gating
* Format: per#, clk, PCKEN bit, KCKEN bit, parent
*/
/* Peripheral Cluster #1 */
static DEFINE_PRCC_CLK(1, i2c4, 10, 9, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, gpio0, 9, -1, NULL);
static DEFINE_PRCC_CLK(1, slimbus0, 8, 8, &clk_slimclk);
static DEFINE_PRCC_CLK(1, spi3, 7, -1, NULL);
static DEFINE_PRCC_CLK(1, i2c2, 6, 6, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, sdi0, 5, 5, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(1, msp1, 4, 4, &clk_msp1clk);
static DEFINE_PRCC_CLK(1, msp0, 3, 3, &clk_msp02clk);
static DEFINE_PRCC_CLK(1, i2c1, 2, 2, &clk_i2cclk);
static DEFINE_PRCC_CLK(1, uart1, 1, 1, &clk_uartclk);
static DEFINE_PRCC_CLK(1, uart0, 0, 0, &clk_uartclk);
/* Peripheral Cluster #2 */
static DEFINE_PRCC_CLK(2, gpio1, 11, -1, NULL);
static DEFINE_PRCC_CLK(2, ssitx, 10, 7, NULL);
static DEFINE_PRCC_CLK(2, ssirx, 9, 6, NULL);
static DEFINE_PRCC_CLK(2, spi0, 8, -1, NULL);
static DEFINE_PRCC_CLK(2, sdi3, 7, 5, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(2, sdi1, 6, 4, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(2, msp2, 5, 3, &clk_msp02clk);
static DEFINE_PRCC_CLK(2, sdi4, 4, 2, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(2, pwl, 3, 1, NULL);
static DEFINE_PRCC_CLK(2, spi1, 2, -1, NULL);
static DEFINE_PRCC_CLK(2, spi2, 1, -1, NULL);
static DEFINE_PRCC_CLK(2, i2c3, 0, 0, &clk_i2cclk);
/* Peripheral Cluster #3 */
static DEFINE_PRCC_CLK(3, gpio2, 8, -1, NULL);
static DEFINE_PRCC_CLK(3, sdi5, 7, 7, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(3, uart2, 6, 6, &clk_uartclk);
static DEFINE_PRCC_CLK(3, ske, 5, 5, &clk_32khz);
static DEFINE_PRCC_CLK(3, sdi2, 4, 4, &clk_sdmmcclk);
static DEFINE_PRCC_CLK(3, i2c0, 3, 3, &clk_i2cclk);
static DEFINE_PRCC_CLK(3, ssp1, 2, 2, &clk_sspclk);
static DEFINE_PRCC_CLK(3, ssp0, 1, 1, &clk_sspclk);
static DEFINE_PRCC_CLK(3, fsmc, 0, -1, NULL);
/* Peripheral Cluster #4 is in the always on domain */
/* Peripheral Cluster #5 */
static DEFINE_PRCC_CLK(5, gpio3, 1, -1, NULL);
static DEFINE_PRCC_CLK(5, usb, 0, 0, NULL);
/* Peripheral Cluster #6 */
/* MTU ID in data */
static DEFINE_PRCC_CLK_CUSTOM(6, mtu1, 8, -1, NULL, clk_mtu_get_rate, 1);
static DEFINE_PRCC_CLK_CUSTOM(6, mtu0, 7, -1, NULL, clk_mtu_get_rate, 0);
static DEFINE_PRCC_CLK(6, cfgreg, 6, 6, NULL);
static DEFINE_PRCC_CLK(6, hash1, 5, -1, NULL);
static DEFINE_PRCC_CLK(6, unipro, 4, 1, &clk_uniproclk);
static DEFINE_PRCC_CLK(6, pka, 3, -1, NULL);
static DEFINE_PRCC_CLK(6, hash0, 2, -1, NULL);
static DEFINE_PRCC_CLK(6, cryp0, 1, -1, NULL);
static DEFINE_PRCC_CLK(6, rng, 0, 0, &clk_rngclk);
static struct clk clk_dummy_apb_pclk = {
.name = "apb_pclk",
};
static struct clk_lookup u8500_clks[] = {
CLK(dummy_apb_pclk, NULL, "apb_pclk"),
/* Peripheral Cluster #1 */
CLK(gpio0, "gpio.0", NULL),
CLK(gpio0, "gpio.1", NULL),
CLK(slimbus0, "slimbus0", NULL),
CLK(i2c2, "nmk-i2c.2", NULL),
CLK(sdi0, "sdi0", NULL),
CLK(msp0, "msp0", NULL),
CLK(i2c1, "nmk-i2c.1", NULL),
CLK(uart1, "uart1", NULL),
CLK(uart0, "uart0", NULL),
/* Peripheral Cluster #3 */
CLK(gpio2, "gpio.2", NULL),
CLK(gpio2, "gpio.3", NULL),
CLK(gpio2, "gpio.4", NULL),
CLK(gpio2, "gpio.5", NULL),
CLK(sdi5, "sdi5", NULL),
CLK(uart2, "uart2", NULL),
CLK(ske, "ske", NULL),
CLK(ske, "nmk-ske-keypad", NULL),
CLK(sdi2, "sdi2", NULL),
CLK(i2c0, "nmk-i2c.0", NULL),
CLK(fsmc, "fsmc", NULL),
/* Peripheral Cluster #5 */
CLK(gpio3, "gpio.8", NULL),
/* Peripheral Cluster #6 */
CLK(hash1, "hash1", NULL),
CLK(pka, "pka", NULL),
CLK(hash0, "hash0", NULL),
CLK(cryp0, "cryp0", NULL),
/* PRCMU level clock gating */
/* Bank 0 */
CLK(svaclk, "sva", NULL),
CLK(siaclk, "sia", NULL),
CLK(sgaclk, "sga", NULL),
CLK(slimclk, "slim", NULL),
CLK(lcdclk, "lcd", NULL),
CLK(bmlclk, "bml", NULL),
CLK(hsitxclk, "stm-hsi.0", NULL),
CLK(hsirxclk, "stm-hsi.1", NULL),
CLK(hdmiclk, "hdmi", NULL),
CLK(apeatclk, "apeat", NULL),
CLK(apetraceclk, "apetrace", NULL),
CLK(mcdeclk, "mcde", NULL),
CLK(ipi2clk, "ipi2", NULL),
CLK(dmaclk, "dma40.0", NULL),
CLK(b2r2clk, "b2r2", NULL),
CLK(tvclk, "tv", NULL),
/* Peripheral Cluster #1 */
CLK(i2c4, "nmk-i2c.4", NULL),
CLK(spi3, "spi3", NULL),
CLK(msp1, "msp1", NULL),
/* Peripheral Cluster #2 */
CLK(gpio1, "gpio.6", NULL),
CLK(gpio1, "gpio.7", NULL),
CLK(ssitx, "ssitx", NULL),
CLK(ssirx, "ssirx", NULL),
CLK(spi0, "spi0", NULL),
CLK(sdi3, "sdi3", NULL),
CLK(sdi1, "sdi1", NULL),
CLK(msp2, "msp2", NULL),
CLK(sdi4, "sdi4", NULL),
CLK(pwl, "pwl", NULL),
CLK(spi1, "spi1", NULL),
CLK(spi2, "spi2", NULL),
CLK(i2c3, "nmk-i2c.3", NULL),
/* Peripheral Cluster #3 */
CLK(ssp1, "ssp1", NULL),
CLK(ssp0, "ssp0", NULL),
/* Peripheral Cluster #5 */
CLK(usb, "musb-ux500.0", "usb"),
/* Peripheral Cluster #6 */
CLK(mtu1, "mtu1", NULL),
CLK(mtu0, "mtu0", NULL),
CLK(cfgreg, "cfgreg", NULL),
CLK(hash1, "hash1", NULL),
CLK(unipro, "unipro", NULL),
CLK(rng, "rng", NULL),
/* PRCMU level clock gating */
/* Bank 0 */
CLK(uniproclk, "uniproclk", NULL),
CLK(dsialtclk, "dsialt", NULL),
/* Bank 1 */
CLK(rngclk, "rng", NULL),
CLK(uiccclk, "uicc", NULL),
};
#ifdef CONFIG_DEBUG_FS
/*
* debugfs support to trace clock tree hierarchy and attributes with
* powerdebug
*/
static struct dentry *clk_debugfs_root;
void __init clk_debugfs_add_table(struct clk_lookup *cl, size_t num)
{
while (num--) {
/* Check that the clock has not been already registered */
if (!(cl->clk->list.prev != cl->clk->list.next))
list_add_tail(&cl->clk->list, &clk_list);
cl++;
}
}
static ssize_t usecount_dbg_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct clk *clk = file->f_dentry->d_inode->i_private;
char cusecount[128];
unsigned int len;
len = sprintf(cusecount, "%u\n", clk->enabled);
return simple_read_from_buffer(buf, size, off, cusecount, len);
}
static ssize_t rate_dbg_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
struct clk *clk = file->f_dentry->d_inode->i_private;
char crate[128];
unsigned int rate;
unsigned int len;
rate = clk_get_rate(clk);
len = sprintf(crate, "%u\n", rate);
return simple_read_from_buffer(buf, size, off, crate, len);
}
static const struct file_operations usecount_fops = {
.read = usecount_dbg_read,
};
static const struct file_operations set_rate_fops = {
.read = rate_dbg_read,
};
static struct dentry *clk_debugfs_register_dir(struct clk *c,
struct dentry *p_dentry)
{
struct dentry *d, *clk_d;
const char *p = c->name;
if (!p)
p = "BUG";
clk_d = debugfs_create_dir(p, p_dentry);
if (!clk_d)
return NULL;
d = debugfs_create_file("usecount", S_IRUGO,
clk_d, c, &usecount_fops);
if (!d)
goto err_out;
d = debugfs_create_file("rate", S_IRUGO,
clk_d, c, &set_rate_fops);
if (!d)
goto err_out;
/*
* TODO : not currently available in ux500
* d = debugfs_create_x32("flags", S_IRUGO, clk_d, (u32 *)&c->flags);
* if (!d)
* goto err_out;
*/
return clk_d;
err_out:
debugfs_remove_recursive(clk_d);
return NULL;
}
static int clk_debugfs_register_one(struct clk *c)
{
struct clk *pa = c->parent_periph;
struct clk *bpa = c->parent_cluster;
if (!(bpa && !pa)) {
c->dent = clk_debugfs_register_dir(c,
pa ? pa->dent : clk_debugfs_root);
if (!c->dent)
return -ENOMEM;
}
if (bpa) {
c->dent_bus = clk_debugfs_register_dir(c,
bpa->dent_bus ? bpa->dent_bus : bpa->dent);
if ((!c->dent_bus) && (c->dent)) {
debugfs_remove_recursive(c->dent);
c->dent = NULL;
return -ENOMEM;
}
}
return 0;
}
static int clk_debugfs_register(struct clk *c)
{
int err;
struct clk *pa = c->parent_periph;
struct clk *bpa = c->parent_cluster;
if (pa && (!pa->dent && !pa->dent_bus)) {
err = clk_debugfs_register(pa);
if (err)
return err;
}
if (bpa && (!bpa->dent && !bpa->dent_bus)) {
err = clk_debugfs_register(bpa);
if (err)
return err;
}
if ((!c->dent) && (!c->dent_bus)) {
err = clk_debugfs_register_one(c);
if (err)
return err;
}
return 0;
}
static int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
int err;
d = debugfs_create_dir("clock", NULL);
if (!d)
return -ENOMEM;
clk_debugfs_root = d;
list_for_each_entry(c, &clk_list, list) {
err = clk_debugfs_register(c);
if (err)
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
return err;
}
late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_DEBUG_FS) */
unsigned long clk_smp_twd_rate = 500000000;
unsigned long clk_smp_twd_get_rate(struct clk *clk)
{
return clk_smp_twd_rate;
}
static struct clk clk_smp_twd = {
.get_rate = clk_smp_twd_get_rate,
.name = "smp_twd",
};
static struct clk_lookup clk_smp_twd_lookup = {
.dev_id = "smp_twd",
.clk = &clk_smp_twd,
};
#ifdef CONFIG_CPU_FREQ
static int clk_twd_cpufreq_transition(struct notifier_block *nb,
unsigned long state, void *data)
{
struct cpufreq_freqs *f = data;
if (state == CPUFREQ_PRECHANGE) {
/* Save frequency in simple Hz */
clk_smp_twd_rate = (f->new * 1000) / 2;
}
return NOTIFY_OK;
}
static struct notifier_block clk_twd_cpufreq_nb = {
.notifier_call = clk_twd_cpufreq_transition,
};
static int clk_init_smp_twd_cpufreq(void)
{
return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
CPUFREQ_TRANSITION_NOTIFIER);
}
late_initcall(clk_init_smp_twd_cpufreq);
#endif
int __init clk_init(void)
{
if (cpu_is_u5500()) {
/* Clock tree for U5500 not implemented yet */
clk_prcc_ops.enable = clk_prcc_ops.disable = NULL;
clk_prcmu_ops.enable = clk_prcmu_ops.disable = NULL;
clk_uartclk.rate = 36360000;
clk_sdmmcclk.rate = 99900000;
}
clkdev_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
clkdev_add(&clk_smp_twd_lookup);
#ifdef CONFIG_DEBUG_FS
clk_debugfs_add_table(u8500_clks, ARRAY_SIZE(u8500_clks));
#endif
return 0;
}
| gpl-2.0 |
gengzh0016/kernel_BBxM | arch/ia64/sn/pci/pcibr/pcibr_provider.c | 91 | 6747 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2001-2004, 2006 Silicon Graphics, Inc. All rights reserved.
*/
#include <linux/interrupt.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/export.h>
#include <asm/sn/addrs.h>
#include <asm/sn/geo.h>
#include <asm/sn/pcibr_provider.h>
#include <asm/sn/pcibus_provider_defs.h>
#include <asm/sn/pcidev.h>
#include <asm/sn/sn_sal.h>
#include <asm/sn/pic.h>
#include <asm/sn/sn2/sn_hwperf.h>
#include "xtalk/xwidgetdev.h"
#include "xtalk/hubdev.h"
int
sal_pcibr_slot_enable(struct pcibus_info *soft, int device, void *resp,
char **ssdt)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_ENABLE, segment,
busnum, (u64) device, (u64) resp, (u64)ia64_tpa(ssdt),
0, 0);
return (int)ret_stuff.v0;
}
int
sal_pcibr_slot_disable(struct pcibus_info *soft, int device, int action,
void *resp)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
u64 segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff, (u64) SN_SAL_IOIF_SLOT_DISABLE,
segment, busnum, (u64) device, (u64) action,
(u64) resp, 0, 0);
return (int)ret_stuff.v0;
}
static int sal_pcibr_error_interrupt(struct pcibus_info *soft)
{
struct ia64_sal_retval ret_stuff;
u64 busnum;
int segment;
ret_stuff.status = 0;
ret_stuff.v0 = 0;
segment = soft->pbi_buscommon.bs_persist_segment;
busnum = soft->pbi_buscommon.bs_persist_busnum;
SAL_CALL_NOLOCK(ret_stuff,
(u64) SN_SAL_IOIF_ERROR_INTERRUPT,
(u64) segment, (u64) busnum, 0, 0, 0, 0, 0);
return (int)ret_stuff.v0;
}
u16 sn_ioboard_to_pci_bus(struct pci_bus *pci_bus)
{
long rc;
u16 uninitialized_var(ioboard); /* GCC be quiet */
nasid_t nasid = NASID_GET(SN_PCIBUS_BUSSOFT(pci_bus)->bs_base);
rc = ia64_sn_sysctl_ioboard_get(nasid, &ioboard);
if (rc) {
printk(KERN_WARNING "ia64_sn_sysctl_ioboard_get failed: %ld\n",
rc);
return 0;
}
return ioboard;
}
/*
* PCI Bridge Error interrupt handler. Gets invoked whenever a PCI
* bridge sends an error interrupt.
*/
static irqreturn_t
pcibr_error_intr_handler(int irq, void *arg)
{
struct pcibus_info *soft = arg;
if (sal_pcibr_error_interrupt(soft) < 0)
panic("pcibr_error_intr_handler(): Fatal Bridge Error");
return IRQ_HANDLED;
}
void *
pcibr_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *controller)
{
int nasid, cnode, j;
struct hubdev_info *hubdev_info;
struct pcibus_info *soft;
struct sn_flush_device_kernel *sn_flush_device_kernel;
struct sn_flush_device_common *common;
if (! IS_PCI_BRIDGE_ASIC(prom_bussoft->bs_asic_type)) {
return NULL;
}
/*
* Allocate kernel bus soft and copy from prom.
*/
soft = kmemdup(prom_bussoft, sizeof(struct pcibus_info), GFP_KERNEL);
if (!soft) {
return NULL;
}
soft->pbi_buscommon.bs_base = (unsigned long)
ioremap(REGION_OFFSET(soft->pbi_buscommon.bs_base),
sizeof(struct pic));
spin_lock_init(&soft->pbi_lock);
/*
* register the bridge's error interrupt handler
*/
if (request_irq(SGI_PCIASIC_ERROR, pcibr_error_intr_handler,
IRQF_SHARED, "PCIBR error", (void *)(soft))) {
printk(KERN_WARNING
"pcibr cannot allocate interrupt for error handler\n");
}
sn_set_err_irq_affinity(SGI_PCIASIC_ERROR);
/*
* Update the Bridge with the "kernel" pagesize
*/
if (PAGE_SIZE < 16384) {
pcireg_control_bit_clr(soft, PCIBR_CTRL_PAGE_SIZE);
} else {
pcireg_control_bit_set(soft, PCIBR_CTRL_PAGE_SIZE);
}
nasid = NASID_GET(soft->pbi_buscommon.bs_base);
cnode = nasid_to_cnodeid(nasid);
hubdev_info = (struct hubdev_info *)(NODEPDA(cnode)->pdinfo);
if (hubdev_info->hdi_flush_nasid_list.widget_p) {
sn_flush_device_kernel = hubdev_info->hdi_flush_nasid_list.
widget_p[(int)soft->pbi_buscommon.bs_xid];
if (sn_flush_device_kernel) {
for (j = 0; j < DEV_PER_WIDGET;
j++, sn_flush_device_kernel++) {
common = sn_flush_device_kernel->common;
if (common->sfdl_slot == -1)
continue;
if ((common->sfdl_persistent_segment ==
soft->pbi_buscommon.bs_persist_segment) &&
(common->sfdl_persistent_busnum ==
soft->pbi_buscommon.bs_persist_busnum))
common->sfdl_pcibus_info =
soft;
}
}
}
/* Setup the PMU ATE map */
soft->pbi_int_ate_resource.lowest_free_index = 0;
soft->pbi_int_ate_resource.ate =
kzalloc(soft->pbi_int_ate_size * sizeof(u64), GFP_KERNEL);
if (!soft->pbi_int_ate_resource.ate) {
kfree(soft);
return NULL;
}
return soft;
}
void pcibr_force_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
if (! sn_irq_info->irq_bridge)
return;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
pcireg_force_intr_set(pcibus_info, bit);
}
}
void pcibr_target_interrupt(struct sn_irq_info *sn_irq_info)
{
struct pcidev_info *pcidev_info;
struct pcibus_info *pcibus_info;
int bit = sn_irq_info->irq_int_bit;
u64 xtalk_addr = sn_irq_info->irq_xtalkaddr;
pcidev_info = (struct pcidev_info *)sn_irq_info->irq_pciioinfo;
if (pcidev_info) {
pcibus_info =
(struct pcibus_info *)pcidev_info->pdi_host_pcidev_info->
pdi_pcibus_info;
/* Disable the device's IRQ */
pcireg_intr_enable_bit_clr(pcibus_info, (1 << bit));
/* Change the device's IRQ */
pcireg_intr_addr_addr_set(pcibus_info, bit, xtalk_addr);
/* Re-enable the device's IRQ */
pcireg_intr_enable_bit_set(pcibus_info, (1 << bit));
pcibr_force_interrupt(sn_irq_info);
}
}
/*
* Provider entries for PIC/CP
*/
struct sn_pcibus_provider pcibr_provider = {
.dma_map = pcibr_dma_map,
.dma_map_consistent = pcibr_dma_map_consistent,
.dma_unmap = pcibr_dma_unmap,
.bus_fixup = pcibr_bus_fixup,
.force_interrupt = pcibr_force_interrupt,
.target_interrupt = pcibr_target_interrupt
};
int
pcibr_init_provider(void)
{
sn_pci_provider[PCIIO_ASIC_TYPE_PIC] = &pcibr_provider;
sn_pci_provider[PCIIO_ASIC_TYPE_TIOCP] = &pcibr_provider;
return 0;
}
EXPORT_SYMBOL_GPL(sal_pcibr_slot_enable);
EXPORT_SYMBOL_GPL(sal_pcibr_slot_disable);
EXPORT_SYMBOL_GPL(sn_ioboard_to_pci_bus);
| gpl-2.0 |
vkomenda/linux-sunxi | drivers/usb/dwc3/dwc3-keystone.c | 603 | 4857 | /**
* dwc3-keystone.c - Keystone Specific Glue layer
*
* Copyright (C) 2010-2013 Texas Instruments Incorporated - http://www.ti.com
*
* Author: WingMan Kwok <w-kwok2@ti.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 of
* the License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/clk.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <linux/of_platform.h>
/* USBSS register offsets */
#define USBSS_REVISION 0x0000
#define USBSS_SYSCONFIG 0x0010
#define USBSS_IRQ_EOI 0x0018
#define USBSS_IRQSTATUS_RAW_0 0x0020
#define USBSS_IRQSTATUS_0 0x0024
#define USBSS_IRQENABLE_SET_0 0x0028
#define USBSS_IRQENABLE_CLR_0 0x002c
/* IRQ register bits */
#define USBSS_IRQ_EOI_LINE(n) BIT(n)
#define USBSS_IRQ_EVENT_ST BIT(0)
#define USBSS_IRQ_COREIRQ_EN BIT(0)
#define USBSS_IRQ_COREIRQ_CLR BIT(0)
static u64 kdwc3_dma_mask;
struct dwc3_keystone {
struct device *dev;
struct clk *clk;
void __iomem *usbss;
};
static inline u32 kdwc3_readl(void __iomem *base, u32 offset)
{
return readl(base + offset);
}
static inline void kdwc3_writel(void __iomem *base, u32 offset, u32 value)
{
writel(value, base + offset);
}
static void kdwc3_enable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val |= USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static void kdwc3_disable_irqs(struct dwc3_keystone *kdwc)
{
u32 val;
val = kdwc3_readl(kdwc->usbss, USBSS_IRQENABLE_SET_0);
val &= ~USBSS_IRQ_COREIRQ_EN;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, val);
}
static irqreturn_t dwc3_keystone_interrupt(int irq, void *_kdwc)
{
struct dwc3_keystone *kdwc = _kdwc;
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_CLR_0, USBSS_IRQ_COREIRQ_CLR);
kdwc3_writel(kdwc->usbss, USBSS_IRQSTATUS_0, USBSS_IRQ_EVENT_ST);
kdwc3_writel(kdwc->usbss, USBSS_IRQENABLE_SET_0, USBSS_IRQ_COREIRQ_EN);
kdwc3_writel(kdwc->usbss, USBSS_IRQ_EOI, USBSS_IRQ_EOI_LINE(0));
return IRQ_HANDLED;
}
static int kdwc3_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
struct dwc3_keystone *kdwc;
struct resource *res;
int error, irq;
kdwc = devm_kzalloc(dev, sizeof(*kdwc), GFP_KERNEL);
if (!kdwc)
return -ENOMEM;
platform_set_drvdata(pdev, kdwc);
kdwc->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
kdwc->usbss = devm_ioremap_resource(dev, res);
if (IS_ERR(kdwc->usbss))
return PTR_ERR(kdwc->usbss);
kdwc3_dma_mask = dma_get_mask(dev);
dev->dma_mask = &kdwc3_dma_mask;
kdwc->clk = devm_clk_get(kdwc->dev, "usb");
error = clk_prepare_enable(kdwc->clk);
if (error < 0) {
dev_dbg(kdwc->dev, "unable to enable usb clock, err %d\n",
error);
return error;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "missing irq\n");
error = irq;
goto err_irq;
}
error = devm_request_irq(dev, irq, dwc3_keystone_interrupt, IRQF_SHARED,
dev_name(dev), kdwc);
if (error) {
dev_err(dev, "failed to request IRQ #%d --> %d\n",
irq, error);
goto err_irq;
}
kdwc3_enable_irqs(kdwc);
error = of_platform_populate(node, NULL, NULL, dev);
if (error) {
dev_err(&pdev->dev, "failed to create dwc3 core\n");
goto err_core;
}
return 0;
err_core:
kdwc3_disable_irqs(kdwc);
err_irq:
clk_disable_unprepare(kdwc->clk);
return error;
}
static int kdwc3_remove_core(struct device *dev, void *c)
{
struct platform_device *pdev = to_platform_device(dev);
platform_device_unregister(pdev);
return 0;
}
static int kdwc3_remove(struct platform_device *pdev)
{
struct dwc3_keystone *kdwc = platform_get_drvdata(pdev);
kdwc3_disable_irqs(kdwc);
device_for_each_child(&pdev->dev, NULL, kdwc3_remove_core);
clk_disable_unprepare(kdwc->clk);
platform_set_drvdata(pdev, NULL);
return 0;
}
static const struct of_device_id kdwc3_of_match[] = {
{ .compatible = "ti,keystone-dwc3", },
{},
};
MODULE_DEVICE_TABLE(of, kdwc3_of_match);
static struct platform_driver kdwc3_driver = {
.probe = kdwc3_probe,
.remove = kdwc3_remove,
.driver = {
.name = "keystone-dwc3",
.of_match_table = kdwc3_of_match,
},
};
module_platform_driver(kdwc3_driver);
MODULE_ALIAS("platform:keystone-dwc3");
MODULE_AUTHOR("WingMan Kwok <w-kwok2@ti.com>");
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("DesignWare USB3 KEYSTONE Glue Layer");
| gpl-2.0 |
myjang0507/Polaris-slte- | drivers/staging/zcache/zbud.c | 2139 | 32648 | /*
* zbud.c - Compression buddies allocator
*
* Copyright (c) 2010-2012, Dan Magenheimer, Oracle Corp.
*
* Compression buddies ("zbud") provides for efficiently packing two
* (or, possibly in the future, more) compressed pages ("zpages") into
* a single "raw" pageframe and for tracking both zpages and pageframes
* so that whole pageframes can be easily reclaimed in LRU-like order.
* It is designed to be used in conjunction with transcendent memory
* ("tmem"); for example separate LRU lists are maintained for persistent
* vs. ephemeral pages.
*
* A zbudpage is an overlay for a struct page and thus each zbudpage
* refers to a physical pageframe of RAM. When the caller passes a
* struct page from the kernel's page allocator, zbud "transforms" it
* to a zbudpage which sets/uses a different set of fields than the
* struct-page and thus must "untransform" it back by reinitializing
* certain fields before the struct-page can be freed. The fields
* of a zbudpage include a page lock for controlling access to the
* corresponding pageframe, and there is a size field for each zpage.
* Each zbudpage also lives on two linked lists: a "budlist" which is
* used to support efficient buddying of zpages; and an "lru" which
* is used for reclaiming pageframes in approximately least-recently-used
* order.
*
* A zbudpageframe is a pageframe divided up into aligned 64-byte "chunks"
* which contain the compressed data for zero, one, or two zbuds. Contained
* with the compressed data is a tmem_handle which is a key to allow
* the same data to be found via the tmem interface so the zpage can
* be invalidated (for ephemeral pages) or repatriated to the swap cache
* (for persistent pages). The contents of a zbudpageframe must never
* be accessed without holding the page lock for the corresponding
* zbudpage and, to accomodate highmem machines, the contents may
* only be examined or changes when kmapped. Thus, when in use, a
* kmapped zbudpageframe is referred to in the zbud code as "void *zbpg".
*
* Note that the term "zbud" refers to the combination of a zpage and
* a tmem_handle that is stored as one of possibly two "buddied" zpages;
* it also generically refers to this allocator... sorry for any confusion.
*
* A zbudref is a pointer to a struct zbudpage (which can be cast to a
* struct page), with the LSB either cleared or set to indicate, respectively,
* the first or second zpage in the zbudpageframe. Since a zbudref can be
* cast to a pointer, it is used as the tmem "pampd" pointer and uniquely
* references a stored tmem page and so is the only zbud data structure
* externally visible to zbud.c/zbud.h.
*
* Since we wish to reclaim entire pageframes but zpages may be randomly
* added and deleted to any given pageframe, we approximate LRU by
* promoting a pageframe to MRU when a zpage is added to it, but
* leaving it at the current place in the list when a zpage is deleted
* from it. As a side effect, zpages that are difficult to buddy (e.g.
* very large paages) will be reclaimed faster than average, which seems
* reasonable.
*
* In the current implementation, no more than two zpages may be stored in
* any pageframe and no zpage ever crosses a pageframe boundary. While
* other zpage allocation mechanisms may allow greater density, this two
* zpage-per-pageframe limit both ensures simple reclaim of pageframes
* (including garbage collection of references to the contents of those
* pageframes from tmem data structures) AND avoids the need for compaction.
* With additional complexity, zbud could be modified to support storing
* up to three zpages per pageframe or, to handle larger average zpages,
* up to three zpages per pair of pageframes, but it is not clear if the
* additional complexity would be worth it. So consider it an exercise
* for future developers.
*
* Note also that zbud does no page allocation or freeing. This is so
* that the caller has complete control over and, for accounting, visibility
* into if/when pages are allocated and freed.
*
* Finally, note that zbud limits the size of zpages it can store; the
* caller must check the zpage size with zbud_max_buddy_size before
* storing it, else BUGs will result. User beware.
*/
#include <linux/module.h>
#include <linux/highmem.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/bug.h>
#include "tmem.h"
#include "zcache.h"
#include "zbud.h"
/*
* We need to ensure that a struct zbudpage is never larger than a
* struct page. This is checked with a BUG_ON in zbud_init.
*
* The unevictable field indicates that a zbud is being added to the
* zbudpage. Since this is a two-phase process (due to tmem locking),
* this field locks the zbudpage against eviction when a zbud match
* or creation is in process. Since this addition process may occur
* in parallel for two zbuds in one zbudpage, the field is a counter
* that must not exceed two.
*/
struct zbudpage {
union {
struct page page;
struct {
unsigned long space_for_flags;
struct {
unsigned zbud0_size:PAGE_SHIFT;
unsigned zbud1_size:PAGE_SHIFT;
unsigned unevictable:2;
};
struct list_head budlist;
struct list_head lru;
};
};
};
#if (PAGE_SHIFT * 2) + 2 > BITS_PER_LONG
#error "zbud won't work for this arch, PAGE_SIZE is too large"
#endif
struct zbudref {
union {
struct zbudpage *zbudpage;
unsigned long zbudref;
};
};
#define CHUNK_SHIFT 6
#define CHUNK_SIZE (1 << CHUNK_SHIFT)
#define CHUNK_MASK (~(CHUNK_SIZE-1))
#define NCHUNKS (PAGE_SIZE >> CHUNK_SHIFT)
#define MAX_CHUNK (NCHUNKS-1)
/*
* The following functions deal with the difference between struct
* page and struct zbudpage. Note the hack of using the pageflags
* from struct page; this is to avoid duplicating all the complex
* pageflag macros.
*/
static inline void zbudpage_spin_lock(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
while (unlikely(test_and_set_bit_lock(PG_locked, &page->flags))) {
do {
cpu_relax();
} while (test_bit(PG_locked, &page->flags));
}
}
static inline void zbudpage_spin_unlock(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
clear_bit(PG_locked, &page->flags);
}
static inline int zbudpage_spin_trylock(struct zbudpage *zbudpage)
{
return trylock_page((struct page *)zbudpage);
}
static inline int zbudpage_is_locked(struct zbudpage *zbudpage)
{
return PageLocked((struct page *)zbudpage);
}
static inline void *kmap_zbudpage_atomic(struct zbudpage *zbudpage)
{
return kmap_atomic((struct page *)zbudpage);
}
/*
* A dying zbudpage is an ephemeral page in the process of being evicted.
* Any data contained in the zbudpage is invalid and we are just waiting for
* the tmem pampds to be invalidated before freeing the page
*/
static inline int zbudpage_is_dying(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
return test_bit(PG_reclaim, &page->flags);
}
static inline void zbudpage_set_dying(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
set_bit(PG_reclaim, &page->flags);
}
static inline void zbudpage_clear_dying(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
clear_bit(PG_reclaim, &page->flags);
}
/*
* A zombie zbudpage is a persistent page in the process of being evicted.
* The data contained in the zbudpage is valid and we are just waiting for
* the tmem pampds to be invalidated before freeing the page
*/
static inline int zbudpage_is_zombie(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
return test_bit(PG_dirty, &page->flags);
}
static inline void zbudpage_set_zombie(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
set_bit(PG_dirty, &page->flags);
}
static inline void zbudpage_clear_zombie(struct zbudpage *zbudpage)
{
struct page *page = (struct page *)zbudpage;
clear_bit(PG_dirty, &page->flags);
}
static inline void kunmap_zbudpage_atomic(void *zbpg)
{
kunmap_atomic(zbpg);
}
/*
* zbud "translation" and helper functions
*/
static inline struct zbudpage *zbudref_to_zbudpage(struct zbudref *zref)
{
unsigned long zbud = (unsigned long)zref;
zbud &= ~1UL;
return (struct zbudpage *)zbud;
}
static inline struct zbudref *zbudpage_to_zbudref(struct zbudpage *zbudpage,
unsigned budnum)
{
unsigned long zbud = (unsigned long)zbudpage;
BUG_ON(budnum > 1);
zbud |= budnum;
return (struct zbudref *)zbud;
}
static inline int zbudref_budnum(struct zbudref *zbudref)
{
unsigned long zbud = (unsigned long)zbudref;
return zbud & 1UL;
}
static inline unsigned zbud_max_size(void)
{
return MAX_CHUNK << CHUNK_SHIFT;
}
static inline unsigned zbud_size_to_chunks(unsigned size)
{
BUG_ON(size == 0 || size > zbud_max_size());
return (size + CHUNK_SIZE - 1) >> CHUNK_SHIFT;
}
/* can only be used between kmap_zbudpage_atomic/kunmap_zbudpage_atomic! */
static inline char *zbud_data(void *zbpg,
unsigned budnum, unsigned size)
{
char *p;
BUG_ON(size == 0 || size > zbud_max_size());
p = (char *)zbpg;
if (budnum == 1)
p += PAGE_SIZE - ((size + CHUNK_SIZE - 1) & CHUNK_MASK);
return p;
}
/*
* These are all informative and exposed through debugfs... except for
* the arrays... anyone know how to do that? To avoid confusion for
* debugfs viewers, some of these should also be atomic_long_t, but
* I don't know how to expose atomics via debugfs either...
*/
static ssize_t zbud_eph_pageframes;
static ssize_t zbud_pers_pageframes;
static ssize_t zbud_eph_zpages;
static ssize_t zbud_pers_zpages;
static u64 zbud_eph_zbytes;
static u64 zbud_pers_zbytes;
static ssize_t zbud_eph_evicted_pageframes;
static ssize_t zbud_pers_evicted_pageframes;
static ssize_t zbud_eph_cumul_zpages;
static ssize_t zbud_pers_cumul_zpages;
static u64 zbud_eph_cumul_zbytes;
static u64 zbud_pers_cumul_zbytes;
static ssize_t zbud_eph_cumul_chunk_counts[NCHUNKS];
static ssize_t zbud_pers_cumul_chunk_counts[NCHUNKS];
static ssize_t zbud_eph_buddied_count;
static ssize_t zbud_pers_buddied_count;
static ssize_t zbud_eph_unbuddied_count;
static ssize_t zbud_pers_unbuddied_count;
static ssize_t zbud_eph_zombie_count;
static ssize_t zbud_pers_zombie_count;
static atomic_t zbud_eph_zombie_atomic;
static atomic_t zbud_pers_zombie_atomic;
#ifdef CONFIG_DEBUG_FS
#include <linux/debugfs.h>
#define zdfs debugfs_create_size_t
#define zdfs64 debugfs_create_u64
static int zbud_debugfs_init(void)
{
struct dentry *root = debugfs_create_dir("zbud", NULL);
if (root == NULL)
return -ENXIO;
/*
* would be nice to dump the sizes of the unbuddied
* arrays, like was done with sysfs, but it doesn't
* look like debugfs is flexible enough to do that
*/
zdfs64("eph_zbytes", S_IRUGO, root, &zbud_eph_zbytes);
zdfs64("eph_cumul_zbytes", S_IRUGO, root, &zbud_eph_cumul_zbytes);
zdfs64("pers_zbytes", S_IRUGO, root, &zbud_pers_zbytes);
zdfs64("pers_cumul_zbytes", S_IRUGO, root, &zbud_pers_cumul_zbytes);
zdfs("eph_cumul_zpages", S_IRUGO, root, &zbud_eph_cumul_zpages);
zdfs("eph_evicted_pageframes", S_IRUGO, root,
&zbud_eph_evicted_pageframes);
zdfs("eph_zpages", S_IRUGO, root, &zbud_eph_zpages);
zdfs("eph_pageframes", S_IRUGO, root, &zbud_eph_pageframes);
zdfs("eph_buddied_count", S_IRUGO, root, &zbud_eph_buddied_count);
zdfs("eph_unbuddied_count", S_IRUGO, root, &zbud_eph_unbuddied_count);
zdfs("pers_cumul_zpages", S_IRUGO, root, &zbud_pers_cumul_zpages);
zdfs("pers_evicted_pageframes", S_IRUGO, root,
&zbud_pers_evicted_pageframes);
zdfs("pers_zpages", S_IRUGO, root, &zbud_pers_zpages);
zdfs("pers_pageframes", S_IRUGO, root, &zbud_pers_pageframes);
zdfs("pers_buddied_count", S_IRUGO, root, &zbud_pers_buddied_count);
zdfs("pers_unbuddied_count", S_IRUGO, root, &zbud_pers_unbuddied_count);
zdfs("pers_zombie_count", S_IRUGO, root, &zbud_pers_zombie_count);
return 0;
}
#undef zdfs
#undef zdfs64
#else
static inline int zbud_debugfs_init(void)
{
return 0;
}
#endif
/* protects the buddied list and all unbuddied lists */
static DEFINE_SPINLOCK(zbud_eph_lists_lock);
static DEFINE_SPINLOCK(zbud_pers_lists_lock);
struct zbud_unbuddied {
struct list_head list;
unsigned count;
};
/* list N contains pages with N chunks USED and NCHUNKS-N unused */
/* element 0 is never used but optimizing that isn't worth it */
static struct zbud_unbuddied zbud_eph_unbuddied[NCHUNKS];
static struct zbud_unbuddied zbud_pers_unbuddied[NCHUNKS];
static LIST_HEAD(zbud_eph_lru_list);
static LIST_HEAD(zbud_pers_lru_list);
static LIST_HEAD(zbud_eph_buddied_list);
static LIST_HEAD(zbud_pers_buddied_list);
static LIST_HEAD(zbud_eph_zombie_list);
static LIST_HEAD(zbud_pers_zombie_list);
/*
* Given a struct page, transform it to a zbudpage so that it can be
* used by zbud and initialize fields as necessary.
*/
static inline struct zbudpage *zbud_init_zbudpage(struct page *page, bool eph)
{
struct zbudpage *zbudpage = (struct zbudpage *)page;
BUG_ON(page == NULL);
INIT_LIST_HEAD(&zbudpage->budlist);
INIT_LIST_HEAD(&zbudpage->lru);
zbudpage->zbud0_size = 0;
zbudpage->zbud1_size = 0;
zbudpage->unevictable = 0;
if (eph)
zbud_eph_pageframes++;
else
zbud_pers_pageframes++;
return zbudpage;
}
/* "Transform" a zbudpage back to a struct page suitable to free. */
static inline struct page *zbud_unuse_zbudpage(struct zbudpage *zbudpage,
bool eph)
{
struct page *page = (struct page *)zbudpage;
BUG_ON(!list_empty(&zbudpage->budlist));
BUG_ON(!list_empty(&zbudpage->lru));
BUG_ON(zbudpage->zbud0_size != 0);
BUG_ON(zbudpage->zbud1_size != 0);
BUG_ON(!PageLocked(page));
BUG_ON(zbudpage->unevictable != 0);
BUG_ON(zbudpage_is_dying(zbudpage));
BUG_ON(zbudpage_is_zombie(zbudpage));
if (eph)
zbud_eph_pageframes--;
else
zbud_pers_pageframes--;
zbudpage_spin_unlock(zbudpage);
page_mapcount_reset(page);
init_page_count(page);
page->index = 0;
return page;
}
/* Mark a zbud as unused and do accounting */
static inline void zbud_unuse_zbud(struct zbudpage *zbudpage,
int budnum, bool eph)
{
unsigned size;
BUG_ON(!zbudpage_is_locked(zbudpage));
if (budnum == 0) {
size = zbudpage->zbud0_size;
zbudpage->zbud0_size = 0;
} else {
size = zbudpage->zbud1_size;
zbudpage->zbud1_size = 0;
}
if (eph) {
zbud_eph_zbytes -= size;
zbud_eph_zpages--;
} else {
zbud_pers_zbytes -= size;
zbud_pers_zpages--;
}
}
/*
* Given a zbudpage/budnum/size, a tmem handle, and a kmapped pointer
* to some data, set up the zbud appropriately including data copying
* and accounting. Note that if cdata is NULL, the data copying is
* skipped. (This is useful for lazy writes such as for RAMster.)
*/
static void zbud_init_zbud(struct zbudpage *zbudpage, struct tmem_handle *th,
bool eph, void *cdata,
unsigned budnum, unsigned size)
{
char *to;
void *zbpg;
struct tmem_handle *to_th;
unsigned nchunks = zbud_size_to_chunks(size);
BUG_ON(!zbudpage_is_locked(zbudpage));
zbpg = kmap_zbudpage_atomic(zbudpage);
to = zbud_data(zbpg, budnum, size);
to_th = (struct tmem_handle *)to;
to_th->index = th->index;
to_th->oid = th->oid;
to_th->pool_id = th->pool_id;
to_th->client_id = th->client_id;
to += sizeof(struct tmem_handle);
if (cdata != NULL)
memcpy(to, cdata, size - sizeof(struct tmem_handle));
kunmap_zbudpage_atomic(zbpg);
if (budnum == 0)
zbudpage->zbud0_size = size;
else
zbudpage->zbud1_size = size;
if (eph) {
zbud_eph_cumul_chunk_counts[nchunks]++;
zbud_eph_zpages++;
zbud_eph_cumul_zpages++;
zbud_eph_zbytes += size;
zbud_eph_cumul_zbytes += size;
} else {
zbud_pers_cumul_chunk_counts[nchunks]++;
zbud_pers_zpages++;
zbud_pers_cumul_zpages++;
zbud_pers_zbytes += size;
zbud_pers_cumul_zbytes += size;
}
}
/*
* Given a locked dying zbudpage, read out the tmem handles from the data,
* unlock the page, then use the handles to tell tmem to flush out its
* references
*/
static void zbud_evict_tmem(struct zbudpage *zbudpage)
{
int i, j;
uint32_t pool_id[2], client_id[2];
uint32_t index[2];
struct tmem_oid oid[2];
struct tmem_pool *pool;
void *zbpg;
struct tmem_handle *th;
unsigned size;
/* read out the tmem handles from the data and set aside */
zbpg = kmap_zbudpage_atomic(zbudpage);
for (i = 0, j = 0; i < 2; i++) {
size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
if (size) {
th = (struct tmem_handle *)zbud_data(zbpg, i, size);
client_id[j] = th->client_id;
pool_id[j] = th->pool_id;
oid[j] = th->oid;
index[j] = th->index;
j++;
zbud_unuse_zbud(zbudpage, i, true);
}
}
kunmap_zbudpage_atomic(zbpg);
zbudpage_spin_unlock(zbudpage);
/* zbudpage is now an unlocked dying... tell tmem to flush pointers */
for (i = 0; i < j; i++) {
pool = zcache_get_pool_by_id(client_id[i], pool_id[i]);
if (pool != NULL) {
tmem_flush_page(pool, &oid[i], index[i]);
zcache_put_pool(pool);
}
}
}
/*
* Externally callable zbud handling routines.
*/
/*
* Return the maximum size compressed page that can be stored (secretly
* setting aside space for the tmem handle.
*/
unsigned int zbud_max_buddy_size(void)
{
return zbud_max_size() - sizeof(struct tmem_handle);
}
/*
* Given a zbud reference, free the corresponding zbud from all lists,
* mark it as unused, do accounting, and if the freeing of the zbud
* frees up an entire pageframe, return it to the caller (else NULL).
*/
struct page *zbud_free_and_delist(struct zbudref *zref, bool eph,
unsigned int *zsize, unsigned int *zpages)
{
unsigned long budnum = zbudref_budnum(zref);
struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
struct page *page = NULL;
unsigned chunks, bud_size, other_bud_size;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
struct zbud_unbuddied *unbud =
eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
spin_lock(lists_lock);
zbudpage_spin_lock(zbudpage);
if (zbudpage_is_dying(zbudpage)) {
/* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
*zpages = 0;
*zsize = 0;
goto out;
}
if (budnum == 0) {
bud_size = zbudpage->zbud0_size;
other_bud_size = zbudpage->zbud1_size;
} else {
bud_size = zbudpage->zbud1_size;
other_bud_size = zbudpage->zbud0_size;
}
*zsize = bud_size - sizeof(struct tmem_handle);
*zpages = 1;
zbud_unuse_zbud(zbudpage, budnum, eph);
if (other_bud_size == 0) { /* was unbuddied: unlist and free */
chunks = zbud_size_to_chunks(bud_size) ;
if (zbudpage_is_zombie(zbudpage)) {
if (eph)
zbud_pers_zombie_count =
atomic_dec_return(&zbud_eph_zombie_atomic);
else
zbud_pers_zombie_count =
atomic_dec_return(&zbud_pers_zombie_atomic);
zbudpage_clear_zombie(zbudpage);
} else {
BUG_ON(list_empty(&unbud[chunks].list));
list_del_init(&zbudpage->budlist);
unbud[chunks].count--;
}
list_del_init(&zbudpage->lru);
spin_unlock(lists_lock);
if (eph)
zbud_eph_unbuddied_count--;
else
zbud_pers_unbuddied_count--;
page = zbud_unuse_zbudpage(zbudpage, eph);
} else { /* was buddied: move remaining buddy to unbuddied list */
chunks = zbud_size_to_chunks(other_bud_size) ;
if (!zbudpage_is_zombie(zbudpage)) {
list_del_init(&zbudpage->budlist);
list_add_tail(&zbudpage->budlist, &unbud[chunks].list);
unbud[chunks].count++;
}
if (eph) {
zbud_eph_buddied_count--;
zbud_eph_unbuddied_count++;
} else {
zbud_pers_unbuddied_count++;
zbud_pers_buddied_count--;
}
/* don't mess with lru, no need to move it */
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
}
out:
return page;
}
/*
* Given a tmem handle, and a kmapped pointer to compressed data of
* the given size, try to find an unbuddied zbudpage in which to
* create a zbud. If found, put it there, mark the zbudpage unevictable,
* and return a zbudref to it. Else return NULL.
*/
struct zbudref *zbud_match_prep(struct tmem_handle *th, bool eph,
void *cdata, unsigned size)
{
struct zbudpage *zbudpage = NULL, *zbudpage2;
unsigned long budnum = 0UL;
unsigned nchunks;
int i, found_good_buddy = 0;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
struct zbud_unbuddied *unbud =
eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
size += sizeof(struct tmem_handle);
nchunks = zbud_size_to_chunks(size);
for (i = MAX_CHUNK - nchunks + 1; i > 0; i--) {
spin_lock(lists_lock);
if (!list_empty(&unbud[i].list)) {
list_for_each_entry_safe(zbudpage, zbudpage2,
&unbud[i].list, budlist) {
if (zbudpage_spin_trylock(zbudpage)) {
found_good_buddy = i;
goto found_unbuddied;
}
}
}
spin_unlock(lists_lock);
}
zbudpage = NULL;
goto out;
found_unbuddied:
BUG_ON(!zbudpage_is_locked(zbudpage));
BUG_ON(!((zbudpage->zbud0_size == 0) ^ (zbudpage->zbud1_size == 0)));
if (zbudpage->zbud0_size == 0)
budnum = 0UL;
else if (zbudpage->zbud1_size == 0)
budnum = 1UL;
list_del_init(&zbudpage->budlist);
if (eph) {
list_add_tail(&zbudpage->budlist, &zbud_eph_buddied_list);
unbud[found_good_buddy].count--;
zbud_eph_unbuddied_count--;
zbud_eph_buddied_count++;
/* "promote" raw zbudpage to most-recently-used */
list_del_init(&zbudpage->lru);
list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
} else {
list_add_tail(&zbudpage->budlist, &zbud_pers_buddied_list);
unbud[found_good_buddy].count--;
zbud_pers_unbuddied_count--;
zbud_pers_buddied_count++;
/* "promote" raw zbudpage to most-recently-used */
list_del_init(&zbudpage->lru);
list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
}
zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
zbudpage->unevictable++;
BUG_ON(zbudpage->unevictable == 3);
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
out:
return zbudpage_to_zbudref(zbudpage, budnum);
}
/*
* Given a tmem handle, and a kmapped pointer to compressed data of
* the given size, and a newly allocated struct page, create an unevictable
* zbud in that new page and return a zbudref to it.
*/
struct zbudref *zbud_create_prep(struct tmem_handle *th, bool eph,
void *cdata, unsigned size,
struct page *newpage)
{
struct zbudpage *zbudpage;
unsigned long budnum = 0;
unsigned nchunks;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
struct zbud_unbuddied *unbud =
eph ? zbud_eph_unbuddied : zbud_pers_unbuddied;
#if 0
/* this may be worth it later to support decompress-in-place? */
static unsigned long counter;
budnum = counter++ & 1; /* alternate using zbud0 and zbud1 */
#endif
if (size > zbud_max_buddy_size())
return NULL;
if (newpage == NULL)
return NULL;
size += sizeof(struct tmem_handle);
nchunks = zbud_size_to_chunks(size) ;
spin_lock(lists_lock);
zbudpage = zbud_init_zbudpage(newpage, eph);
zbudpage_spin_lock(zbudpage);
list_add_tail(&zbudpage->budlist, &unbud[nchunks].list);
if (eph) {
list_add_tail(&zbudpage->lru, &zbud_eph_lru_list);
zbud_eph_unbuddied_count++;
} else {
list_add_tail(&zbudpage->lru, &zbud_pers_lru_list);
zbud_pers_unbuddied_count++;
}
unbud[nchunks].count++;
zbud_init_zbud(zbudpage, th, eph, cdata, budnum, size);
zbudpage->unevictable++;
BUG_ON(zbudpage->unevictable == 3);
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
return zbudpage_to_zbudref(zbudpage, budnum);
}
/*
* Finish creation of a zbud by, assuming another zbud isn't being created
* in parallel, marking it evictable.
*/
void zbud_create_finish(struct zbudref *zref, bool eph)
{
struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
spin_lock(lists_lock);
zbudpage_spin_lock(zbudpage);
BUG_ON(zbudpage_is_dying(zbudpage));
zbudpage->unevictable--;
BUG_ON((int)zbudpage->unevictable < 0);
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
}
/*
* Given a zbudref and a struct page, decompress the data from
* the zbud into the physical page represented by the struct page
* by upcalling to zcache_decompress
*/
int zbud_decompress(struct page *data_page, struct zbudref *zref, bool eph,
void (*decompress)(char *, unsigned int, char *))
{
struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
unsigned long budnum = zbudref_budnum(zref);
void *zbpg;
char *to_va, *from_va;
unsigned size;
int ret = -1;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
spin_lock(lists_lock);
zbudpage_spin_lock(zbudpage);
if (zbudpage_is_dying(zbudpage)) {
/* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
goto out;
}
zbpg = kmap_zbudpage_atomic(zbudpage);
to_va = kmap_atomic(data_page);
if (budnum == 0)
size = zbudpage->zbud0_size;
else
size = zbudpage->zbud1_size;
BUG_ON(size == 0 || size > zbud_max_size());
from_va = zbud_data(zbpg, budnum, size);
from_va += sizeof(struct tmem_handle);
size -= sizeof(struct tmem_handle);
decompress(from_va, size, to_va);
kunmap_atomic(to_va);
kunmap_zbudpage_atomic(zbpg);
ret = 0;
out:
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
return ret;
}
/*
* Given a zbudref and a kernel pointer, copy the data from
* the zbud to the kernel pointer.
*/
int zbud_copy_from_zbud(char *to_va, struct zbudref *zref,
size_t *sizep, bool eph)
{
struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
unsigned long budnum = zbudref_budnum(zref);
void *zbpg;
char *from_va;
unsigned size;
int ret = -1;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
spin_lock(lists_lock);
zbudpage_spin_lock(zbudpage);
if (zbudpage_is_dying(zbudpage)) {
/* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
goto out;
}
zbpg = kmap_zbudpage_atomic(zbudpage);
if (budnum == 0)
size = zbudpage->zbud0_size;
else
size = zbudpage->zbud1_size;
BUG_ON(size == 0 || size > zbud_max_size());
from_va = zbud_data(zbpg, budnum, size);
from_va += sizeof(struct tmem_handle);
size -= sizeof(struct tmem_handle);
*sizep = size;
memcpy(to_va, from_va, size);
kunmap_zbudpage_atomic(zbpg);
ret = 0;
out:
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
return ret;
}
/*
* Given a zbudref and a kernel pointer, copy the data from
* the kernel pointer to the zbud.
*/
int zbud_copy_to_zbud(struct zbudref *zref, char *from_va, bool eph)
{
struct zbudpage *zbudpage = zbudref_to_zbudpage(zref);
unsigned long budnum = zbudref_budnum(zref);
void *zbpg;
char *to_va;
unsigned size;
int ret = -1;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
spin_lock(lists_lock);
zbudpage_spin_lock(zbudpage);
if (zbudpage_is_dying(zbudpage)) {
/* ignore dying zbudpage... see zbud_evict_pageframe_lru() */
goto out;
}
zbpg = kmap_zbudpage_atomic(zbudpage);
if (budnum == 0)
size = zbudpage->zbud0_size;
else
size = zbudpage->zbud1_size;
BUG_ON(size == 0 || size > zbud_max_size());
to_va = zbud_data(zbpg, budnum, size);
to_va += sizeof(struct tmem_handle);
size -= sizeof(struct tmem_handle);
memcpy(to_va, from_va, size);
kunmap_zbudpage_atomic(zbpg);
ret = 0;
out:
zbudpage_spin_unlock(zbudpage);
spin_unlock(lists_lock);
return ret;
}
/*
* Choose an ephemeral LRU zbudpage that is evictable (not locked), ensure
* there are no references to it remaining, and return the now unused
* (and re-init'ed) struct page and the total amount of compressed
* data that was evicted.
*/
struct page *zbud_evict_pageframe_lru(unsigned int *zsize, unsigned int *zpages)
{
struct zbudpage *zbudpage = NULL, *zbudpage2;
struct zbud_unbuddied *unbud = zbud_eph_unbuddied;
struct page *page = NULL;
bool irqs_disabled = irqs_disabled();
/*
* Since this can be called indirectly from cleancache_put, which
* has interrupts disabled, as well as frontswap_put, which does not,
* we need to be able to handle both cases, even though it is ugly.
*/
if (irqs_disabled)
spin_lock(&zbud_eph_lists_lock);
else
spin_lock_bh(&zbud_eph_lists_lock);
*zsize = 0;
if (list_empty(&zbud_eph_lru_list))
goto unlock_out;
list_for_each_entry_safe(zbudpage, zbudpage2, &zbud_eph_lru_list, lru) {
/* skip a locked zbudpage */
if (unlikely(!zbudpage_spin_trylock(zbudpage)))
continue;
/* skip an unevictable zbudpage */
if (unlikely(zbudpage->unevictable != 0)) {
zbudpage_spin_unlock(zbudpage);
continue;
}
/* got a locked evictable page */
goto evict_page;
}
unlock_out:
/* no unlocked evictable pages, give up */
if (irqs_disabled)
spin_unlock(&zbud_eph_lists_lock);
else
spin_unlock_bh(&zbud_eph_lists_lock);
goto out;
evict_page:
list_del_init(&zbudpage->budlist);
list_del_init(&zbudpage->lru);
zbudpage_set_dying(zbudpage);
/*
* the zbudpage is now "dying" and attempts to read, write,
* or delete data from it will be ignored
*/
if (zbudpage->zbud0_size != 0 && zbudpage->zbud1_size != 0) {
*zsize = zbudpage->zbud0_size + zbudpage->zbud1_size -
(2 * sizeof(struct tmem_handle));
*zpages = 2;
} else if (zbudpage->zbud0_size != 0) {
unbud[zbud_size_to_chunks(zbudpage->zbud0_size)].count--;
*zsize = zbudpage->zbud0_size - sizeof(struct tmem_handle);
*zpages = 1;
} else if (zbudpage->zbud1_size != 0) {
unbud[zbud_size_to_chunks(zbudpage->zbud1_size)].count--;
*zsize = zbudpage->zbud1_size - sizeof(struct tmem_handle);
*zpages = 1;
} else {
BUG();
}
spin_unlock(&zbud_eph_lists_lock);
zbud_eph_evicted_pageframes++;
if (*zpages == 1)
zbud_eph_unbuddied_count--;
else
zbud_eph_buddied_count--;
zbud_evict_tmem(zbudpage);
zbudpage_spin_lock(zbudpage);
zbudpage_clear_dying(zbudpage);
page = zbud_unuse_zbudpage(zbudpage, true);
if (!irqs_disabled)
local_bh_enable();
out:
return page;
}
/*
* Choose a persistent LRU zbudpage that is evictable (not locked), zombify it,
* read the tmem_handle(s) out of it into the passed array, and return the
* number of zbuds. Caller must perform necessary tmem functions and,
* indirectly, zbud functions to fetch any valid data and cause the
* now-zombified zbudpage to eventually be freed. We track the zombified
* zbudpage count so it is possible to observe if there is a leak.
FIXME: describe (ramster) case where data pointers are passed in for memcpy
*/
unsigned int zbud_make_zombie_lru(struct tmem_handle *th, unsigned char **data,
unsigned int *zsize, bool eph)
{
struct zbudpage *zbudpage = NULL, *zbudpag2;
struct tmem_handle *thfrom;
char *from_va;
void *zbpg;
unsigned size;
int ret = 0, i;
spinlock_t *lists_lock =
eph ? &zbud_eph_lists_lock : &zbud_pers_lists_lock;
struct list_head *lru_list =
eph ? &zbud_eph_lru_list : &zbud_pers_lru_list;
spin_lock_bh(lists_lock);
if (list_empty(lru_list))
goto out;
list_for_each_entry_safe(zbudpage, zbudpag2, lru_list, lru) {
/* skip a locked zbudpage */
if (unlikely(!zbudpage_spin_trylock(zbudpage)))
continue;
/* skip an unevictable zbudpage */
if (unlikely(zbudpage->unevictable != 0)) {
zbudpage_spin_unlock(zbudpage);
continue;
}
/* got a locked evictable page */
goto zombify_page;
}
/* no unlocked evictable pages, give up */
goto out;
zombify_page:
/* got an unlocked evictable page, zombify it */
list_del_init(&zbudpage->budlist);
zbudpage_set_zombie(zbudpage);
/* FIXME what accounting do I need to do here? */
list_del_init(&zbudpage->lru);
if (eph) {
list_add_tail(&zbudpage->lru, &zbud_eph_zombie_list);
zbud_eph_zombie_count =
atomic_inc_return(&zbud_eph_zombie_atomic);
} else {
list_add_tail(&zbudpage->lru, &zbud_pers_zombie_list);
zbud_pers_zombie_count =
atomic_inc_return(&zbud_pers_zombie_atomic);
}
/* FIXME what accounting do I need to do here? */
zbpg = kmap_zbudpage_atomic(zbudpage);
for (i = 0; i < 2; i++) {
size = (i == 0) ? zbudpage->zbud0_size : zbudpage->zbud1_size;
if (size) {
from_va = zbud_data(zbpg, i, size);
thfrom = (struct tmem_handle *)from_va;
from_va += sizeof(struct tmem_handle);
size -= sizeof(struct tmem_handle);
if (th != NULL)
th[ret] = *thfrom;
if (data != NULL)
memcpy(data[ret], from_va, size);
if (zsize != NULL)
*zsize++ = size;
ret++;
}
}
kunmap_zbudpage_atomic(zbpg);
zbudpage_spin_unlock(zbudpage);
out:
spin_unlock_bh(lists_lock);
return ret;
}
void zbud_init(void)
{
int i;
zbud_debugfs_init();
BUG_ON((sizeof(struct tmem_handle) * 2 > CHUNK_SIZE));
BUG_ON(sizeof(struct zbudpage) > sizeof(struct page));
for (i = 0; i < NCHUNKS; i++) {
INIT_LIST_HEAD(&zbud_eph_unbuddied[i].list);
INIT_LIST_HEAD(&zbud_pers_unbuddied[i].list);
}
}
| gpl-2.0 |
dastin1015/android_kernel_htc_villec2 | arch/arm/mach-exynos4/setup-sdhci.c | 2395 | 1797 | /* linux/arch/arm/mach-exynos4/setup-sdhci.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS4 - Helper functions for settign up SDHCI device(s) (HSMMC)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <plat/regs-sdhci.h>
/* clock sources for the mmc bus clock, order as for the ctrl2[5..4] */
char *exynos4_hsmmc_clksrcs[4] = {
[0] = NULL,
[1] = NULL,
[2] = "sclk_mmc", /* mmc_bus */
[3] = NULL,
};
void exynos4_setup_sdhci_cfg_card(struct platform_device *dev, void __iomem *r,
struct mmc_ios *ios, struct mmc_card *card)
{
u32 ctrl2, ctrl3;
/* don't need to alter anything according to card-type */
ctrl2 = readl(r + S3C_SDHCI_CONTROL2);
/* select base clock source to HCLK */
ctrl2 &= S3C_SDHCI_CTRL2_SELBASECLK_MASK;
/*
* clear async mode, enable conflict mask, rx feedback ctrl, SD
* clk hold and no use debounce count
*/
ctrl2 |= (S3C64XX_SDHCI_CTRL2_ENSTAASYNCCLR |
S3C64XX_SDHCI_CTRL2_ENCMDCNFMSK |
S3C_SDHCI_CTRL2_ENFBCLKRX |
S3C_SDHCI_CTRL2_DFCNT_NONE |
S3C_SDHCI_CTRL2_ENCLKOUTHOLD);
/* Tx and Rx feedback clock delay control */
if (ios->clock < 25 * 1000000)
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL3 |
S3C_SDHCI_CTRL3_FCSEL2 |
S3C_SDHCI_CTRL3_FCSEL1 |
S3C_SDHCI_CTRL3_FCSEL0);
else
ctrl3 = (S3C_SDHCI_CTRL3_FCSEL1 | S3C_SDHCI_CTRL3_FCSEL0);
writel(ctrl2, r + S3C_SDHCI_CONTROL2);
writel(ctrl3, r + S3C_SDHCI_CONTROL3);
}
| gpl-2.0 |
mifl/android_kernel_pantech_msm8974 | arch/arm/mach-msm/board-msm7x27a-regulator.c | 3163 | 7670 | /*
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include "board-msm7x27a-regulator.h"
#define VOLTAGE_RANGE(min_uV, max_uV, step_uV) ((max_uV - min_uV) / step_uV)
/* Physically available PMIC regulator voltage setpoint ranges */
#define p_ranges VOLTAGE_RANGE(1500000, 3300000, 25000)
#define n_ranges VOLTAGE_RANGE(750000, 1525000, 12500)
#define s_ranges (VOLTAGE_RANGE(700000, 1500000, 12500) + \
VOLTAGE_RANGE(1500000, 3050000, 25000))
#define PCOM_VREG_CONSUMERS(name) \
static struct regulator_consumer_supply __pcom_vreg_supply_##name[]
#define PCOM_VREG_INIT_DATA(_name, _supply, _min_uV, _max_uV, _always_on, \
_boot_on, _apply_uV, _supply_uV)\
{ \
.supply_regulator = _supply, \
.consumer_supplies = __pcom_vreg_supply_##_name, \
.num_consumer_supplies = ARRAY_SIZE(__pcom_vreg_supply_##_name), \
.constraints = { \
.name = #_name, \
.min_uV = _min_uV, \
.max_uV = _max_uV, \
.valid_modes_mask = REGULATOR_MODE_NORMAL, \
.valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | \
REGULATOR_CHANGE_STATUS, \
.input_uV = _supply_uV, \
.apply_uV = _apply_uV, \
.boot_on = _boot_on, \
.always_on = _always_on \
} \
}
#define PCOM_VREG_SMP(_name, _id, _supply, _min_uV, _max_uV, _rise_time, \
_pulldown, _always_on, _boot_on, _apply_uV, _supply_uV, _range) \
{ \
.init_data = PCOM_VREG_INIT_DATA(_name, _supply, _min_uV, _max_uV, \
_always_on, _boot_on, _apply_uV, _supply_uV), \
.id = _id, \
.rise_time = _rise_time, \
.pulldown = _pulldown, \
.negative = 0, \
.n_voltages = _range##_ranges, \
}
#define PCOM_VREG_LDO PCOM_VREG_SMP
#define PCOM_VREG_NCP(_name, _id, _supply, _min_uV, _max_uV, _rise_time, \
_always_on, _boot_on, _apply_uV, _supply_uV) \
{ \
.init_data = PCOM_VREG_INIT_DATA(_name, _supply, -(_min_uV), \
-(_max_uV), _always_on, _boot_on, _apply_uV, _supply_uV), \
.id = _id, \
.rise_time = _rise_time, \
.pulldown = -1, \
.negative = 1, \
}
PCOM_VREG_CONSUMERS(smps1) = {
REGULATOR_SUPPLY("smps1", NULL),
REGULATOR_SUPPLY("msmc1", NULL),
};
PCOM_VREG_CONSUMERS(smps2) = {
REGULATOR_SUPPLY("smps2", NULL),
REGULATOR_SUPPLY("msmc2", NULL),
};
PCOM_VREG_CONSUMERS(smps3) = {
REGULATOR_SUPPLY("smps3", NULL),
REGULATOR_SUPPLY("msme1", NULL),
REGULATOR_SUPPLY("vcc_i2c", "1-004a"),
REGULATOR_SUPPLY("vcc_i2c", "1-0038"),
};
PCOM_VREG_CONSUMERS(smps4) = {
REGULATOR_SUPPLY("smps4", NULL),
REGULATOR_SUPPLY("rf", NULL),
};
PCOM_VREG_CONSUMERS(ldo01) = {
REGULATOR_SUPPLY("ldo01", NULL),
REGULATOR_SUPPLY("ldo1", NULL),
REGULATOR_SUPPLY("rfrx1", NULL),
};
PCOM_VREG_CONSUMERS(ldo02) = {
REGULATOR_SUPPLY("ldo02", NULL),
REGULATOR_SUPPLY("ldo2", NULL),
REGULATOR_SUPPLY("rfrx2", NULL),
};
PCOM_VREG_CONSUMERS(ldo03) = {
REGULATOR_SUPPLY("ldo03", NULL),
REGULATOR_SUPPLY("ldo3", NULL),
REGULATOR_SUPPLY("mddi", NULL),
};
PCOM_VREG_CONSUMERS(ldo04) = {
REGULATOR_SUPPLY("ldo04", NULL),
REGULATOR_SUPPLY("ldo4", NULL),
REGULATOR_SUPPLY("pllx", NULL),
};
PCOM_VREG_CONSUMERS(ldo05) = {
REGULATOR_SUPPLY("ldo05", NULL),
REGULATOR_SUPPLY("ldo5", NULL),
REGULATOR_SUPPLY("wlan2", NULL),
};
PCOM_VREG_CONSUMERS(ldo06) = {
REGULATOR_SUPPLY("ldo06", NULL),
REGULATOR_SUPPLY("ldo6", NULL),
REGULATOR_SUPPLY("wlan3", NULL),
};
PCOM_VREG_CONSUMERS(ldo07) = {
REGULATOR_SUPPLY("ldo07", NULL),
REGULATOR_SUPPLY("ldo7", NULL),
REGULATOR_SUPPLY("msma", NULL),
};
PCOM_VREG_CONSUMERS(ldo08) = {
REGULATOR_SUPPLY("ldo08", NULL),
REGULATOR_SUPPLY("ldo8", NULL),
REGULATOR_SUPPLY("tcxo", NULL),
};
PCOM_VREG_CONSUMERS(ldo09) = {
REGULATOR_SUPPLY("ldo09", NULL),
REGULATOR_SUPPLY("ldo9", NULL),
REGULATOR_SUPPLY("usb2", NULL),
};
PCOM_VREG_CONSUMERS(ldo10) = {
REGULATOR_SUPPLY("ldo10", NULL),
REGULATOR_SUPPLY("emmc", NULL),
};
PCOM_VREG_CONSUMERS(ldo11) = {
REGULATOR_SUPPLY("ldo11", NULL),
REGULATOR_SUPPLY("wlan_tcx0", NULL),
};
PCOM_VREG_CONSUMERS(ldo12) = {
REGULATOR_SUPPLY("ldo12", NULL),
REGULATOR_SUPPLY("gp2", NULL),
REGULATOR_SUPPLY("vdd_ana", "1-004a"),
REGULATOR_SUPPLY("vdd", "1-0038"),
};
PCOM_VREG_CONSUMERS(ldo13) = {
REGULATOR_SUPPLY("ldo13", NULL),
REGULATOR_SUPPLY("mmc", NULL),
};
PCOM_VREG_CONSUMERS(ldo14) = {
REGULATOR_SUPPLY("ldo14", NULL),
REGULATOR_SUPPLY("usb", NULL),
};
PCOM_VREG_CONSUMERS(ldo15) = {
REGULATOR_SUPPLY("ldo15", NULL),
REGULATOR_SUPPLY("usim2", NULL),
};
PCOM_VREG_CONSUMERS(ldo16) = {
REGULATOR_SUPPLY("ldo16", NULL),
REGULATOR_SUPPLY("ruim", NULL),
};
PCOM_VREG_CONSUMERS(ldo17) = {
REGULATOR_SUPPLY("ldo17", NULL),
REGULATOR_SUPPLY("bt", NULL),
};
PCOM_VREG_CONSUMERS(ldo18) = {
REGULATOR_SUPPLY("ldo18", NULL),
REGULATOR_SUPPLY("rftx", NULL),
};
PCOM_VREG_CONSUMERS(ldo19) = {
REGULATOR_SUPPLY("ldo19", NULL),
REGULATOR_SUPPLY("wlan4", NULL),
};
PCOM_VREG_CONSUMERS(ncp) = {
REGULATOR_SUPPLY("ncp", NULL),
};
static struct proccomm_regulator_info msm7x27a_pcom_vreg_info[] = {
/* Standard regulators (SMPS and LDO)
* R = rise time (us)
* P = pulldown (1 = pull down, 0 = float, -1 = don't care)
* A = always on
* B = boot on
* V = automatic voltage set (meaningful for single-voltage regs only)
* S = supply voltage (uV)
* T = type of regulator (smps, pldo, nldo)
* name id supp min uV max uV R P A B V S T*/
PCOM_VREG_SMP(smps1, 3, NULL, 1100000, 1100000, 0, -1, 0, 0, 0, 0, s),
PCOM_VREG_SMP(smps2, 4, NULL, 1100000, 1100000, 0, -1, 0, 0, 0, 0, s),
PCOM_VREG_SMP(smps3, 2, NULL, 1800000, 1800000, 0, -1, 0, 0, 0, 0, s),
PCOM_VREG_SMP(smps4, 24, NULL, 2100000, 2100000, 0, -1, 0, 0, 0, 0, s),
PCOM_VREG_LDO(ldo01, 12, NULL, 1800000, 2100000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo02, 13, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo03, 49, NULL, 1200000, 1200000, 0, -1, 0, 0, 0, 0, n),
PCOM_VREG_LDO(ldo04, 50, NULL, 1100000, 1100000, 0, -1, 0, 0, 0, 0, n),
PCOM_VREG_LDO(ldo05, 45, NULL, 1300000, 1350000, 0, -1, 0, 0, 0, 0, n),
PCOM_VREG_LDO(ldo06, 51, NULL, 1200000, 1200000, 0, -1, 0, 0, 0, 0, n),
PCOM_VREG_LDO(ldo07, 0, NULL, 2600000, 2600000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo08, 9, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo09, 44, NULL, 1800000, 1800000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo10, 52, NULL, 1800000, 3000000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo11, 53, NULL, 1800000, 1800000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo12, 21, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo13, 18, NULL, 2850000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo14, 16, NULL, 3300000, 3300000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo15, 54, NULL, 1800000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo16, 19, NULL, 1800000, 2850000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo17, 56, NULL, 2900000, 3300000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo18, 11, NULL, 2700000, 2700000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_LDO(ldo19, 57, NULL, 1200000, 1800000, 0, -1, 0, 0, 0, 0, p),
PCOM_VREG_NCP(ncp, 31, NULL, -1800000, -1800000, 0, 0, 0, 0, 0),
};
struct proccomm_regulator_platform_data msm7x27a_proccomm_regulator_data = {
.regs = msm7x27a_pcom_vreg_info,
.nregs = ARRAY_SIZE(msm7x27a_pcom_vreg_info)
};
| gpl-2.0 |
luckasfb/kernel_lenovo_a3000 | arch/sparc/kernel/pci.c | 3675 | 28816 | /* pci.c: UltraSparc PCI controller support.
*
* Copyright (C) 1997, 1998, 1999 David S. Miller (davem@redhat.com)
* Copyright (C) 1998, 1999 Eddie C. Dost (ecd@skynet.be)
* Copyright (C) 1999 Jakub Jelinek (jj@ultra.linux.cz)
*
* OF tree based PCI bus probing taken from the PowerPC port
* with minor modifications, see there for credits.
*/
#include <linux/export.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/msi.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include <asm/prom.h>
#include <asm/apb.h>
#include "pci_impl.h"
/* List of all PCI controllers found in the system. */
struct pci_pbm_info *pci_pbm_root = NULL;
/* Each PBM found gets a unique index. */
int pci_num_pbms = 0;
volatile int pci_poke_in_progress;
volatile int pci_poke_cpu = -1;
volatile int pci_poke_faulted;
static DEFINE_SPINLOCK(pci_poke_lock);
void pci_config_read8(u8 *addr, u8 *ret)
{
unsigned long flags;
u8 byte;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduba [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (byte)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = byte;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read16(u16 *addr, u16 *ret)
{
unsigned long flags;
u16 word;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduha [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (word)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = word;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_read32(u32 *addr, u32 *ret)
{
unsigned long flags;
u32 dword;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"lduwa [%1] %2, %0\n\t"
"membar #Sync"
: "=r" (dword)
: "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
if (!pci_poke_faulted)
*ret = dword;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write8(u8 *addr, u8 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stba %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write16(u16 *addr, u16 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stha %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
void pci_config_write32(u32 *addr, u32 val)
{
unsigned long flags;
spin_lock_irqsave(&pci_poke_lock, flags);
pci_poke_cpu = smp_processor_id();
pci_poke_in_progress = 1;
pci_poke_faulted = 0;
__asm__ __volatile__("membar #Sync\n\t"
"stwa %0, [%1] %2\n\t"
"membar #Sync"
: /* no outputs */
: "r" (val), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E_L)
: "memory");
pci_poke_in_progress = 0;
pci_poke_cpu = -1;
spin_unlock_irqrestore(&pci_poke_lock, flags);
}
static int ofpci_verbose;
static int __init ofpci_debug(char *str)
{
int val = 0;
get_option(&str, &val);
if (val)
ofpci_verbose = 1;
return 1;
}
__setup("ofpci_debug=", ofpci_debug);
static unsigned long pci_parse_of_flags(u32 addr0)
{
unsigned long flags = 0;
if (addr0 & 0x02000000) {
flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY;
flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64;
flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M;
if (addr0 & 0x40000000)
flags |= IORESOURCE_PREFETCH
| PCI_BASE_ADDRESS_MEM_PREFETCH;
} else if (addr0 & 0x01000000)
flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO;
return flags;
}
/* The of_device layer has translated all of the assigned-address properties
* into physical address resources, we only have to figure out the register
* mapping.
*/
static void pci_parse_of_addrs(struct platform_device *op,
struct device_node *node,
struct pci_dev *dev)
{
struct resource *op_res;
const u32 *addrs;
int proplen;
addrs = of_get_property(node, "assigned-addresses", &proplen);
if (!addrs)
return;
if (ofpci_verbose)
printk(" parse addresses (%d bytes) @ %p\n",
proplen, addrs);
op_res = &op->resource[0];
for (; proplen >= 20; proplen -= 20, addrs += 5, op_res++) {
struct resource *res;
unsigned long flags;
int i;
flags = pci_parse_of_flags(addrs[0]);
if (!flags)
continue;
i = addrs[0] & 0xff;
if (ofpci_verbose)
printk(" start: %llx, end: %llx, i: %x\n",
op_res->start, op_res->end, i);
if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) {
res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2];
} else if (i == dev->rom_base_reg) {
res = &dev->resource[PCI_ROM_RESOURCE];
flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE
| IORESOURCE_SIZEALIGN;
} else {
printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
continue;
}
res->start = op_res->start;
res->end = op_res->end;
res->flags = flags;
res->name = pci_name(dev);
}
}
static struct pci_dev *of_create_pci_dev(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus, int devfn)
{
struct dev_archdata *sd;
struct pci_slot *slot;
struct platform_device *op;
struct pci_dev *dev;
const char *type;
u32 class;
dev = alloc_pci_dev();
if (!dev)
return NULL;
sd = &dev->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
sd->host_controller = pbm;
sd->op = op = of_find_device_by_node(node);
sd->numa_node = pbm->numa_node;
sd = &op->dev.archdata;
sd->iommu = pbm->iommu;
sd->stc = &pbm->stc;
sd->numa_node = pbm->numa_node;
if (!strcmp(node->name, "ebus"))
of_propagate_archdata(op);
type = of_get_property(node, "device_type", NULL);
if (type == NULL)
type = "";
if (ofpci_verbose)
printk(" create device, devfn: %x, type: %s\n",
devfn, type);
dev->bus = bus;
dev->sysdata = node;
dev->dev.parent = bus->bridge;
dev->dev.bus = &pci_bus_type;
dev->dev.of_node = of_node_get(node);
dev->devfn = devfn;
dev->multifunction = 0; /* maybe a lie? */
set_pcie_port_type(dev);
list_for_each_entry(slot, &dev->bus->slots, list)
if (PCI_SLOT(dev->devfn) == slot->number)
dev->slot = slot;
dev->vendor = of_getintprop_default(node, "vendor-id", 0xffff);
dev->device = of_getintprop_default(node, "device-id", 0xffff);
dev->subsystem_vendor =
of_getintprop_default(node, "subsystem-vendor-id", 0);
dev->subsystem_device =
of_getintprop_default(node, "subsystem-id", 0);
dev->cfg_size = pci_cfg_space_size(dev);
/* We can't actually use the firmware value, we have
* to read what is in the register right now. One
* reason is that in the case of IDE interfaces the
* firmware can sample the value before the the IDE
* interface is programmed into native mode.
*/
pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
dev->class = class >> 8;
dev->revision = class & 0xff;
dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus),
dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn));
if (ofpci_verbose)
printk(" class: 0x%x device name: %s\n",
dev->class, pci_name(dev));
/* I have seen IDE devices which will not respond to
* the bmdma simplex check reads if bus mastering is
* disabled.
*/
if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE)
pci_set_master(dev);
dev->current_state = 4; /* unknown power state */
dev->error_state = pci_channel_io_normal;
dev->dma_mask = 0xffffffff;
if (!strcmp(node->name, "pci")) {
/* a PCI-PCI bridge */
dev->hdr_type = PCI_HEADER_TYPE_BRIDGE;
dev->rom_base_reg = PCI_ROM_ADDRESS1;
} else if (!strcmp(type, "cardbus")) {
dev->hdr_type = PCI_HEADER_TYPE_CARDBUS;
} else {
dev->hdr_type = PCI_HEADER_TYPE_NORMAL;
dev->rom_base_reg = PCI_ROM_ADDRESS;
dev->irq = sd->op->archdata.irqs[0];
if (dev->irq == 0xffffffff)
dev->irq = PCI_IRQ_NONE;
}
pci_parse_of_addrs(sd->op, node, dev);
if (ofpci_verbose)
printk(" adding to system ...\n");
pci_device_add(dev, bus);
return dev;
}
static void __devinit apb_calc_first_last(u8 map, u32 *first_p, u32 *last_p)
{
u32 idx, first, last;
first = 8;
last = 0;
for (idx = 0; idx < 8; idx++) {
if ((map & (1 << idx)) != 0) {
if (first > idx)
first = idx;
if (last < idx)
last = idx;
}
}
*first_p = first;
*last_p = last;
}
/* For PCI bus devices which lack a 'ranges' property we interrogate
* the config space values to set the resources, just like the generic
* Linux PCI probing code does.
*/
static void __devinit pci_cfg_fake_ranges(struct pci_dev *dev,
struct pci_bus *bus,
struct pci_pbm_info *pbm)
{
struct pci_bus_region region;
struct resource *res, res2;
u8 io_base_lo, io_limit_lo;
u16 mem_base_lo, mem_limit_lo;
unsigned long base, limit;
pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
u16 io_base_hi, io_limit_hi;
pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
base |= (io_base_hi << 16);
limit |= (io_limit_hi << 16);
}
res = bus->resource[0];
if (base <= limit) {
res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
res2.flags = res->flags;
region.start = base;
region.end = limit + 0xfff;
pcibios_bus_to_resource(dev, &res2, ®ion);
if (!res->start)
res->start = res2.start;
if (!res->end)
res->end = res2.end;
}
pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
res = bus->resource[1];
if (base <= limit) {
res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
IORESOURCE_MEM);
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev, res, ®ion);
}
pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
u32 mem_base_hi, mem_limit_hi;
pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
/*
* Some bridges set the base > limit by default, and some
* (broken) BIOSes do not initialize them. If we find
* this, just assume they are not being used.
*/
if (mem_base_hi <= mem_limit_hi) {
base |= ((long) mem_base_hi) << 32;
limit |= ((long) mem_limit_hi) << 32;
}
}
res = bus->resource[2];
if (base <= limit) {
res->flags = ((mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) |
IORESOURCE_MEM | IORESOURCE_PREFETCH);
region.start = base;
region.end = limit + 0xfffff;
pcibios_bus_to_resource(dev, res, ®ion);
}
}
/* Cook up fake bus resources for SUNW,simba PCI bridges which lack
* a proper 'ranges' property.
*/
static void __devinit apb_fake_ranges(struct pci_dev *dev,
struct pci_bus *bus,
struct pci_pbm_info *pbm)
{
struct pci_bus_region region;
struct resource *res;
u32 first, last;
u8 map;
pci_read_config_byte(dev, APB_IO_ADDRESS_MAP, &map);
apb_calc_first_last(map, &first, &last);
res = bus->resource[0];
res->flags = IORESOURCE_IO;
region.start = (first << 21);
region.end = (last << 21) + ((1 << 21) - 1);
pcibios_bus_to_resource(dev, res, ®ion);
pci_read_config_byte(dev, APB_MEM_ADDRESS_MAP, &map);
apb_calc_first_last(map, &first, &last);
res = bus->resource[1];
res->flags = IORESOURCE_MEM;
region.start = (first << 21);
region.end = (last << 21) + ((1 << 21) - 1);
pcibios_bus_to_resource(dev, res, ®ion);
}
static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus);
#define GET_64BIT(prop, i) ((((u64) (prop)[(i)]) << 32) | (prop)[(i)+1])
static void __devinit of_scan_pci_bridge(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_dev *dev)
{
struct pci_bus *bus;
const u32 *busrange, *ranges;
int len, i, simba;
struct pci_bus_region region;
struct resource *res;
unsigned int flags;
u64 size;
if (ofpci_verbose)
printk("of_scan_pci_bridge(%s)\n", node->full_name);
/* parse bus-range property */
busrange = of_get_property(node, "bus-range", &len);
if (busrange == NULL || len != 8) {
printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n",
node->full_name);
return;
}
ranges = of_get_property(node, "ranges", &len);
simba = 0;
if (ranges == NULL) {
const char *model = of_get_property(node, "model", NULL);
if (model && !strcmp(model, "SUNW,simba"))
simba = 1;
}
bus = pci_add_new_bus(dev->bus, dev, busrange[0]);
if (!bus) {
printk(KERN_ERR "Failed to create pci bus for %s\n",
node->full_name);
return;
}
bus->primary = dev->bus->number;
bus->subordinate = busrange[1];
bus->bridge_ctl = 0;
/* parse ranges property, or cook one up by hand for Simba */
/* PCI #address-cells == 3 and #size-cells == 2 always */
res = &dev->resource[PCI_BRIDGE_RESOURCES];
for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) {
res->flags = 0;
bus->resource[i] = res;
++res;
}
if (simba) {
apb_fake_ranges(dev, bus, pbm);
goto after_ranges;
} else if (ranges == NULL) {
pci_cfg_fake_ranges(dev, bus, pbm);
goto after_ranges;
}
i = 1;
for (; len >= 32; len -= 32, ranges += 8) {
flags = pci_parse_of_flags(ranges[0]);
size = GET_64BIT(ranges, 6);
if (flags == 0 || size == 0)
continue;
if (flags & IORESOURCE_IO) {
res = bus->resource[0];
if (res->flags) {
printk(KERN_ERR "PCI: ignoring extra I/O range"
" for bridge %s\n", node->full_name);
continue;
}
} else {
if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) {
printk(KERN_ERR "PCI: too many memory ranges"
" for bridge %s\n", node->full_name);
continue;
}
res = bus->resource[i];
++i;
}
res->flags = flags;
region.start = GET_64BIT(ranges, 1);
region.end = region.start + size - 1;
pcibios_bus_to_resource(dev, res, ®ion);
}
after_ranges:
sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
bus->number);
if (ofpci_verbose)
printk(" bus name: %s\n", bus->name);
pci_of_scan_bus(pbm, node, bus);
}
static void __devinit pci_of_scan_bus(struct pci_pbm_info *pbm,
struct device_node *node,
struct pci_bus *bus)
{
struct device_node *child;
const u32 *reg;
int reglen, devfn, prev_devfn;
struct pci_dev *dev;
if (ofpci_verbose)
printk("PCI: scan_bus[%s] bus no %d\n",
node->full_name, bus->number);
child = NULL;
prev_devfn = -1;
while ((child = of_get_next_child(node, child)) != NULL) {
if (ofpci_verbose)
printk(" * %s\n", child->full_name);
reg = of_get_property(child, "reg", ®len);
if (reg == NULL || reglen < 20)
continue;
devfn = (reg[0] >> 8) & 0xff;
/* This is a workaround for some device trees
* which list PCI devices twice. On the V100
* for example, device number 3 is listed twice.
* Once as "pm" and once again as "lomp".
*/
if (devfn == prev_devfn)
continue;
prev_devfn = devfn;
/* create a new pci_dev for this device */
dev = of_create_pci_dev(pbm, child, bus, devfn);
if (!dev)
continue;
if (ofpci_verbose)
printk("PCI: dev header type: %x\n",
dev->hdr_type);
if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
of_scan_pci_bridge(pbm, child, dev);
}
}
static ssize_t
show_pciobppath_attr(struct device * dev, struct device_attribute * attr, char * buf)
{
struct pci_dev *pdev;
struct device_node *dp;
pdev = to_pci_dev(dev);
dp = pdev->dev.of_node;
return snprintf (buf, PAGE_SIZE, "%s\n", dp->full_name);
}
static DEVICE_ATTR(obppath, S_IRUSR | S_IRGRP | S_IROTH, show_pciobppath_attr, NULL);
static void __devinit pci_bus_register_of_sysfs(struct pci_bus *bus)
{
struct pci_dev *dev;
struct pci_bus *child_bus;
int err;
list_for_each_entry(dev, &bus->devices, bus_list) {
/* we don't really care if we can create this file or
* not, but we need to assign the result of the call
* or the world will fall under alien invasion and
* everybody will be frozen on a spaceship ready to be
* eaten on alpha centauri by some green and jelly
* humanoid.
*/
err = sysfs_create_file(&dev->dev.kobj, &dev_attr_obppath.attr);
(void) err;
}
list_for_each_entry(child_bus, &bus->children, node)
pci_bus_register_of_sysfs(child_bus);
}
struct pci_bus * __devinit pci_scan_one_pbm(struct pci_pbm_info *pbm,
struct device *parent)
{
LIST_HEAD(resources);
struct device_node *node = pbm->op->dev.of_node;
struct pci_bus *bus;
printk("PCI: Scanning PBM %s\n", node->full_name);
pci_add_resource_offset(&resources, &pbm->io_space,
pbm->io_space.start);
pci_add_resource_offset(&resources, &pbm->mem_space,
pbm->mem_space.start);
bus = pci_create_root_bus(parent, pbm->pci_first_busno, pbm->pci_ops,
pbm, &resources);
if (!bus) {
printk(KERN_ERR "Failed to create bus for %s\n",
node->full_name);
pci_free_resource_list(&resources);
return NULL;
}
bus->secondary = pbm->pci_first_busno;
bus->subordinate = pbm->pci_last_busno;
pci_of_scan_bus(pbm, node, bus);
pci_bus_add_devices(bus);
pci_bus_register_of_sysfs(bus);
return bus;
}
void __devinit pcibios_fixup_bus(struct pci_bus *pbus)
{
}
void pcibios_update_irq(struct pci_dev *pdev, int irq)
{
}
resource_size_t pcibios_align_resource(void *data, const struct resource *res,
resource_size_t size, resource_size_t align)
{
return res->start;
}
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
u16 cmd, oldcmd;
int i;
pci_read_config_word(dev, PCI_COMMAND, &cmd);
oldcmd = cmd;
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = &dev->resource[i];
/* Only set up the requested stuff */
if (!(mask & (1<<i)))
continue;
if (res->flags & IORESOURCE_IO)
cmd |= PCI_COMMAND_IO;
if (res->flags & IORESOURCE_MEM)
cmd |= PCI_COMMAND_MEMORY;
}
if (cmd != oldcmd) {
printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
pci_name(dev), cmd);
/* Enable the appropriate bits in the PCI command register. */
pci_write_config_word(dev, PCI_COMMAND, cmd);
}
return 0;
}
char * __devinit pcibios_setup(char *str)
{
return str;
}
/* Platform support for /proc/bus/pci/X/Y mmap()s. */
/* If the user uses a host-bridge as the PCI device, he may use
* this to perform a raw mmap() of the I/O or MEM space behind
* that controller.
*
* This can be useful for execution of x86 PCI bios initialization code
* on a PCI card, like the xfree86 int10 stuff does.
*/
static int __pci_mmap_make_offset_bus(struct pci_dev *pdev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long space_size, user_offset, user_size;
if (mmap_state == pci_mmap_io) {
space_size = resource_size(&pbm->io_space);
} else {
space_size = resource_size(&pbm->mem_space);
}
/* Make sure the request is in range. */
user_offset = vma->vm_pgoff << PAGE_SHIFT;
user_size = vma->vm_end - vma->vm_start;
if (user_offset >= space_size ||
(user_offset + user_size) > space_size)
return -EINVAL;
if (mmap_state == pci_mmap_io) {
vma->vm_pgoff = (pbm->io_space.start +
user_offset) >> PAGE_SHIFT;
} else {
vma->vm_pgoff = (pbm->mem_space.start +
user_offset) >> PAGE_SHIFT;
}
return 0;
}
/* Adjust vm_pgoff of VMA such that it is the physical page offset
* corresponding to the 32-bit pci bus offset for DEV requested by the user.
*
* Basically, the user finds the base address for his device which he wishes
* to mmap. They read the 32-bit value from the config space base register,
* add whatever PAGE_SIZE multiple offset they wish, and feed this into the
* offset parameter of mmap on /proc/bus/pci/XXX for that device.
*
* Returns negative error code on failure, zero on success.
*/
static int __pci_mmap_make_offset(struct pci_dev *pdev,
struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
unsigned long user_paddr, user_size;
int i, err;
/* First compute the physical address in vma->vm_pgoff,
* making sure the user offset is within range in the
* appropriate PCI space.
*/
err = __pci_mmap_make_offset_bus(pdev, vma, mmap_state);
if (err)
return err;
/* If this is a mapping on a host bridge, any address
* is OK.
*/
if ((pdev->class >> 8) == PCI_CLASS_BRIDGE_HOST)
return err;
/* Otherwise make sure it's in the range for one of the
* device's resources.
*/
user_paddr = vma->vm_pgoff << PAGE_SHIFT;
user_size = vma->vm_end - vma->vm_start;
for (i = 0; i <= PCI_ROM_RESOURCE; i++) {
struct resource *rp = &pdev->resource[i];
resource_size_t aligned_end;
/* Active? */
if (!rp->flags)
continue;
/* Same type? */
if (i == PCI_ROM_RESOURCE) {
if (mmap_state != pci_mmap_mem)
continue;
} else {
if ((mmap_state == pci_mmap_io &&
(rp->flags & IORESOURCE_IO) == 0) ||
(mmap_state == pci_mmap_mem &&
(rp->flags & IORESOURCE_MEM) == 0))
continue;
}
/* Align the resource end to the next page address.
* PAGE_SIZE intentionally added instead of (PAGE_SIZE - 1),
* because actually we need the address of the next byte
* after rp->end.
*/
aligned_end = (rp->end + PAGE_SIZE) & PAGE_MASK;
if ((rp->start <= user_paddr) &&
(user_paddr + user_size) <= aligned_end)
break;
}
if (i > PCI_ROM_RESOURCE)
return -EINVAL;
return 0;
}
/* Set vm_flags of VMA, as appropriate for this architecture, for a pci device
* mapping.
*/
static void __pci_mmap_set_flags(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
vma->vm_flags |= (VM_IO | VM_RESERVED);
}
/* Set vm_page_prot of VMA, as appropriate for this architecture, for a pci
* device mapping.
*/
static void __pci_mmap_set_pgprot(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state)
{
/* Our io_remap_pfn_range takes care of this, do nothing. */
}
/* Perform the actual remap of the pages for a PCI device mapping, as appropriate
* for this architecture. The region in the process to map is described by vm_start
* and vm_end members of VMA, the base physical address is found in vm_pgoff.
* The pci device structure is provided so that architectures may make mapping
* decisions on a per-device or per-bus basis.
*
* Returns a negative error code on failure, zero on success.
*/
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state,
int write_combine)
{
int ret;
ret = __pci_mmap_make_offset(dev, vma, mmap_state);
if (ret < 0)
return ret;
__pci_mmap_set_flags(dev, vma, mmap_state);
__pci_mmap_set_pgprot(dev, vma, mmap_state);
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start,
vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret)
return ret;
return 0;
}
#ifdef CONFIG_NUMA
int pcibus_to_node(struct pci_bus *pbus)
{
struct pci_pbm_info *pbm = pbus->sysdata;
return pbm->numa_node;
}
EXPORT_SYMBOL(pcibus_to_node);
#endif
/* Return the domain number for this pci bus */
int pci_domain_nr(struct pci_bus *pbus)
{
struct pci_pbm_info *pbm = pbus->sysdata;
int ret;
if (!pbm) {
ret = -ENXIO;
} else {
ret = pbm->index;
}
return ret;
}
EXPORT_SYMBOL(pci_domain_nr);
#ifdef CONFIG_PCI_MSI
int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned int irq;
if (!pbm->setup_msi_irq)
return -EINVAL;
return pbm->setup_msi_irq(&irq, pdev, desc);
}
void arch_teardown_msi_irq(unsigned int irq)
{
struct msi_desc *entry = irq_get_msi_desc(irq);
struct pci_dev *pdev = entry->dev;
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
if (pbm->teardown_msi_irq)
pbm->teardown_msi_irq(irq, pdev);
}
#endif /* !(CONFIG_PCI_MSI) */
static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
{
struct pci_dev *ali_isa_bridge;
u8 val;
/* ALI sound chips generate 31-bits of DMA, a special register
* determines what bit 31 is emitted as.
*/
ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
PCI_DEVICE_ID_AL_M1533,
NULL);
pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
if (set_bit)
val |= 0x01;
else
val &= ~0x01;
pci_write_config_byte(ali_isa_bridge, 0x7e, val);
pci_dev_put(ali_isa_bridge);
}
int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask)
{
u64 dma_addr_mask;
if (pdev == NULL) {
dma_addr_mask = 0xffffffff;
} else {
struct iommu *iommu = pdev->dev.archdata.iommu;
dma_addr_mask = iommu->dma_addr_mask;
if (pdev->vendor == PCI_VENDOR_ID_AL &&
pdev->device == PCI_DEVICE_ID_AL_M5451 &&
device_mask == 0x7fffffff) {
ali_sound_dma_hack(pdev,
(dma_addr_mask & 0x80000000) != 0);
return 1;
}
}
if (device_mask >= (1UL << 32UL))
return 0;
return (device_mask & dma_addr_mask) == dma_addr_mask;
}
void pci_resource_to_user(const struct pci_dev *pdev, int bar,
const struct resource *rp, resource_size_t *start,
resource_size_t *end)
{
struct pci_pbm_info *pbm = pdev->dev.archdata.host_controller;
unsigned long offset;
if (rp->flags & IORESOURCE_IO)
offset = pbm->io_space.start;
else
offset = pbm->mem_space.start;
*start = rp->start - offset;
*end = rp->end - offset;
}
void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
static int __init pcibios_init(void)
{
pci_dfl_cache_line_size = 64 >> 2;
return 0;
}
subsys_initcall(pcibios_init);
#ifdef CONFIG_SYSFS
static void __devinit pci_bus_slot_names(struct device_node *node,
struct pci_bus *bus)
{
const struct pci_slot_names {
u32 slot_mask;
char names[0];
} *prop;
const char *sp;
int len, i;
u32 mask;
prop = of_get_property(node, "slot-names", &len);
if (!prop)
return;
mask = prop->slot_mask;
sp = prop->names;
if (ofpci_verbose)
printk("PCI: Making slots for [%s] mask[0x%02x]\n",
node->full_name, mask);
i = 0;
while (mask) {
struct pci_slot *pci_slot;
u32 this_bit = 1 << i;
if (!(mask & this_bit)) {
i++;
continue;
}
if (ofpci_verbose)
printk("PCI: Making slot [%s]\n", sp);
pci_slot = pci_create_slot(bus, i, sp, NULL);
if (IS_ERR(pci_slot))
printk(KERN_ERR "PCI: pci_create_slot returned %ld\n",
PTR_ERR(pci_slot));
sp += strlen(sp) + 1;
mask &= ~this_bit;
i++;
}
}
static int __init of_pci_slot_init(void)
{
struct pci_bus *pbus = NULL;
while ((pbus = pci_find_next_bus(pbus)) != NULL) {
struct device_node *node;
if (pbus->self) {
/* PCI->PCI bridge */
node = pbus->self->dev.of_node;
} else {
struct pci_pbm_info *pbm = pbus->sysdata;
/* Host PCI controller */
node = pbm->op->dev.of_node;
}
pci_bus_slot_names(node, pbus);
}
return 0;
}
module_init(of_pci_slot_init);
#endif
| gpl-2.0 |
chaostic/Hima-M9 | lib/oid_registry.c | 4443 | 3936 | /* ASN.1 Object identifier (OID) registry
*
* Copyright (C) 2012 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/export.h>
#include <linux/oid_registry.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/bug.h>
#include "oid_registry_data.c"
MODULE_DESCRIPTION("OID Registry");
MODULE_AUTHOR("Red Hat, Inc.");
MODULE_LICENSE("GPL");
/**
* look_up_OID - Find an OID registration for the specified data
* @data: Binary representation of the OID
* @datasize: Size of the binary representation
*/
enum OID look_up_OID(const void *data, size_t datasize)
{
const unsigned char *octets = data;
enum OID oid;
unsigned char xhash;
unsigned i, j, k, hash;
size_t len;
/* Hash the OID data */
hash = datasize - 1;
for (i = 0; i < datasize; i++)
hash += octets[i] * 33;
hash = (hash >> 24) ^ (hash >> 16) ^ (hash >> 8) ^ hash;
hash &= 0xff;
/* Binary search the OID registry. OIDs are stored in ascending order
* of hash value then ascending order of size and then in ascending
* order of reverse value.
*/
i = 0;
k = OID__NR;
while (i < k) {
j = (i + k) / 2;
xhash = oid_search_table[j].hash;
if (xhash > hash) {
k = j;
continue;
}
if (xhash < hash) {
i = j + 1;
continue;
}
oid = oid_search_table[j].oid;
len = oid_index[oid + 1] - oid_index[oid];
if (len > datasize) {
k = j;
continue;
}
if (len < datasize) {
i = j + 1;
continue;
}
/* Variation is most likely to be at the tail end of the
* OID, so do the comparison in reverse.
*/
while (len > 0) {
unsigned char a = oid_data[oid_index[oid] + --len];
unsigned char b = octets[len];
if (a > b) {
k = j;
goto next;
}
if (a < b) {
i = j + 1;
goto next;
}
}
return oid;
next:
;
}
return OID__NR;
}
EXPORT_SYMBOL_GPL(look_up_OID);
/*
* sprint_OID - Print an Object Identifier into a buffer
* @data: The encoded OID to print
* @datasize: The size of the encoded OID
* @buffer: The buffer to render into
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
* bytes is returned. -EBADMSG is returned if the data could not be intepreted
* and -ENOBUFS if the buffer was too small.
*/
int sprint_oid(const void *data, size_t datasize, char *buffer, size_t bufsize)
{
const unsigned char *v = data, *end = v + datasize;
unsigned long num;
unsigned char n;
size_t ret;
int count;
if (v >= end)
return -EBADMSG;
n = *v++;
ret = count = snprintf(buffer, bufsize, "%u.%u", n / 40, n % 40);
buffer += count;
bufsize -= count;
if (bufsize == 0)
return -ENOBUFS;
while (v < end) {
num = 0;
n = *v++;
if (!(n & 0x80)) {
num = n;
} else {
num = n & 0x7f;
do {
if (v >= end)
return -EBADMSG;
n = *v++;
num <<= 7;
num |= n & 0x7f;
} while (n & 0x80);
}
ret += count = snprintf(buffer, bufsize, ".%lu", num);
buffer += count;
bufsize -= count;
if (bufsize == 0)
return -ENOBUFS;
}
return ret;
}
EXPORT_SYMBOL_GPL(sprint_oid);
/**
* sprint_OID - Print an Object Identifier into a buffer
* @oid: The OID to print
* @buffer: The buffer to render into
* @bufsize: The size of the buffer
*
* The OID is rendered into the buffer in "a.b.c.d" format and the number of
* bytes is returned.
*/
int sprint_OID(enum OID oid, char *buffer, size_t bufsize)
{
int ret;
BUG_ON(oid >= OID__NR);
ret = sprint_oid(oid_data + oid_index[oid],
oid_index[oid + 1] - oid_index[oid],
buffer, bufsize);
BUG_ON(ret == -EBADMSG);
return ret;
}
EXPORT_SYMBOL_GPL(sprint_OID);
| gpl-2.0 |
jfdsmabalot/kernel_hammerhead | arch/sparc/kernel/kgdb_32.c | 4699 | 3839 | /* kgdb.c: KGDB support for 32-bit sparc.
*
* Copyright (C) 2008 David S. Miller <davem@davemloft.net>
*/
#include <linux/kgdb.h>
#include <linux/kdebug.h>
#include <asm/kdebug.h>
#include <asm/ptrace.h>
#include <asm/irq.h>
#include <asm/cacheflush.h>
extern unsigned long trapbase;
void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window32 *win;
int i;
gdb_regs[GDB_G0] = 0;
for (i = 0; i < 15; i++)
gdb_regs[GDB_G1 + i] = regs->u_regs[UREG_G1 + i];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F31; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_Y] = regs->y;
gdb_regs[GDB_PSR] = regs->psr;
gdb_regs[GDB_WIM] = 0;
gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
gdb_regs[GDB_PC] = regs->pc;
gdb_regs[GDB_NPC] = regs->npc;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_CSR] = 0;
}
void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
{
struct thread_info *t = task_thread_info(p);
struct reg_window32 *win;
int i;
for (i = GDB_G0; i < GDB_G6; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_G6] = (unsigned long) t;
gdb_regs[GDB_G7] = 0;
for (i = GDB_O0; i < GDB_SP; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_SP] = t->ksp;
gdb_regs[GDB_O7] = 0;
win = (struct reg_window32 *) t->ksp;
for (i = 0; i < 8; i++)
gdb_regs[GDB_L0 + i] = win->locals[i];
for (i = 0; i < 8; i++)
gdb_regs[GDB_I0 + i] = win->ins[i];
for (i = GDB_F0; i <= GDB_F31; i++)
gdb_regs[i] = 0;
gdb_regs[GDB_Y] = 0;
gdb_regs[GDB_PSR] = t->kpsr;
gdb_regs[GDB_WIM] = t->kwim;
gdb_regs[GDB_TBR] = (unsigned long) &trapbase;
gdb_regs[GDB_PC] = t->kpc;
gdb_regs[GDB_NPC] = t->kpc + 4;
gdb_regs[GDB_FSR] = 0;
gdb_regs[GDB_CSR] = 0;
}
void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
{
struct reg_window32 *win;
int i;
for (i = 0; i < 15; i++)
regs->u_regs[UREG_G1 + i] = gdb_regs[GDB_G1 + i];
/* If the PSR register is changing, we have to preserve
* the CWP field, otherwise window save/restore explodes.
*/
if (regs->psr != gdb_regs[GDB_PSR]) {
unsigned long cwp = regs->psr & PSR_CWP;
regs->psr = (gdb_regs[GDB_PSR] & ~PSR_CWP) | cwp;
}
regs->pc = gdb_regs[GDB_PC];
regs->npc = gdb_regs[GDB_NPC];
regs->y = gdb_regs[GDB_Y];
win = (struct reg_window32 *) regs->u_regs[UREG_FP];
for (i = 0; i < 8; i++)
win->locals[i] = gdb_regs[GDB_L0 + i];
for (i = 0; i < 8; i++)
win->ins[i] = gdb_regs[GDB_I0 + i];
}
int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
char *remcomInBuffer, char *remcomOutBuffer,
struct pt_regs *linux_regs)
{
unsigned long addr;
char *ptr;
switch (remcomInBuffer[0]) {
case 'c':
/* try to read optional parameter, pc unchanged if no parm */
ptr = &remcomInBuffer[1];
if (kgdb_hex2long(&ptr, &addr)) {
linux_regs->pc = addr;
linux_regs->npc = addr + 4;
}
/* fallthru */
case 'D':
case 'k':
if (linux_regs->pc == (unsigned long) arch_kgdb_breakpoint) {
linux_regs->pc = linux_regs->npc;
linux_regs->npc += 4;
}
return 0;
}
return -1;
}
extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
asmlinkage void kgdb_trap(struct pt_regs *regs)
{
unsigned long flags;
if (user_mode(regs)) {
do_hw_interrupt(regs, 0xfd);
return;
}
flushw_all();
local_irq_save(flags);
kgdb_handle_exception(0x172, SIGTRAP, 0, regs);
local_irq_restore(flags);
}
int kgdb_arch_init(void)
{
return 0;
}
void kgdb_arch_exit(void)
{
}
void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
{
regs->pc = ip;
regs->npc = regs->pc + 4;
}
struct kgdb_arch arch_kgdb_ops = {
/* Breakpoint instruction: ta 0x7d */
.gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
};
| gpl-2.0 |
Channing-Y/kernel | drivers/media/video/v4l2-int-device.c | 4699 | 3949 | /*
* drivers/media/video/v4l2-int-device.c
*
* V4L2 internal ioctl interface.
*
* Copyright (C) 2007 Nokia Corporation.
*
* Contact: Sakari Ailus <sakari.ailus@nokia.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/sort.h>
#include <linux/string.h>
#include <media/v4l2-int-device.h>
static DEFINE_MUTEX(mutex);
static LIST_HEAD(int_list);
void v4l2_int_device_try_attach_all(void)
{
struct v4l2_int_device *m, *s;
list_for_each_entry(m, &int_list, head) {
if (m->type != v4l2_int_type_master)
continue;
list_for_each_entry(s, &int_list, head) {
if (s->type != v4l2_int_type_slave)
continue;
/* Slave is connected? */
if (s->u.slave->master)
continue;
/* Slave wants to attach to master? */
if (s->u.slave->attach_to[0] != 0
&& strncmp(m->name, s->u.slave->attach_to,
V4L2NAMESIZE))
continue;
if (!try_module_get(m->module))
continue;
s->u.slave->master = m;
if (m->u.master->attach(s)) {
s->u.slave->master = NULL;
module_put(m->module);
continue;
}
}
}
}
EXPORT_SYMBOL_GPL(v4l2_int_device_try_attach_all);
static int ioctl_sort_cmp(const void *a, const void *b)
{
const struct v4l2_int_ioctl_desc *d1 = a, *d2 = b;
if (d1->num > d2->num)
return 1;
if (d1->num < d2->num)
return -1;
return 0;
}
int v4l2_int_device_register(struct v4l2_int_device *d)
{
if (d->type == v4l2_int_type_slave)
sort(d->u.slave->ioctls, d->u.slave->num_ioctls,
sizeof(struct v4l2_int_ioctl_desc),
&ioctl_sort_cmp, NULL);
mutex_lock(&mutex);
list_add(&d->head, &int_list);
v4l2_int_device_try_attach_all();
mutex_unlock(&mutex);
return 0;
}
EXPORT_SYMBOL_GPL(v4l2_int_device_register);
void v4l2_int_device_unregister(struct v4l2_int_device *d)
{
mutex_lock(&mutex);
list_del(&d->head);
if (d->type == v4l2_int_type_slave
&& d->u.slave->master != NULL) {
d->u.slave->master->u.master->detach(d);
module_put(d->u.slave->master->module);
d->u.slave->master = NULL;
}
mutex_unlock(&mutex);
}
EXPORT_SYMBOL_GPL(v4l2_int_device_unregister);
/* Adapted from search_extable in extable.c. */
static v4l2_int_ioctl_func *find_ioctl(struct v4l2_int_slave *slave, int cmd,
v4l2_int_ioctl_func *no_such_ioctl)
{
const struct v4l2_int_ioctl_desc *first = slave->ioctls;
const struct v4l2_int_ioctl_desc *last =
first + slave->num_ioctls - 1;
while (first <= last) {
const struct v4l2_int_ioctl_desc *mid;
mid = (last - first) / 2 + first;
if (mid->num < cmd)
first = mid + 1;
else if (mid->num > cmd)
last = mid - 1;
else
return mid->func;
}
return no_such_ioctl;
}
static int no_such_ioctl_0(struct v4l2_int_device *d)
{
return -ENOIOCTLCMD;
}
int v4l2_int_ioctl_0(struct v4l2_int_device *d, int cmd)
{
return ((v4l2_int_ioctl_func_0 *)
find_ioctl(d->u.slave, cmd,
(v4l2_int_ioctl_func *)no_such_ioctl_0))(d);
}
EXPORT_SYMBOL_GPL(v4l2_int_ioctl_0);
static int no_such_ioctl_1(struct v4l2_int_device *d, void *arg)
{
return -ENOIOCTLCMD;
}
int v4l2_int_ioctl_1(struct v4l2_int_device *d, int cmd, void *arg)
{
return ((v4l2_int_ioctl_func_1 *)
find_ioctl(d->u.slave, cmd,
(v4l2_int_ioctl_func *)no_such_ioctl_1))(d, arg);
}
EXPORT_SYMBOL_GPL(v4l2_int_ioctl_1);
MODULE_LICENSE("GPL");
| gpl-2.0 |
AOKP/kernel_lge_msm8974 | drivers/net/wireless/brcm80211/brcmsmac/phy/phy_lcn.c | 4955 | 129977 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/cordic.h>
#include <pmu.h>
#include <d11.h>
#include <phy_shim.h>
#include "phy_qmath.h"
#include "phy_hal.h"
#include "phy_radio.h"
#include "phytbl_lcn.h"
#include "phy_lcn.h"
#define PLL_2064_NDIV 90
#define PLL_2064_LOW_END_VCO 3000
#define PLL_2064_LOW_END_KVCO 27
#define PLL_2064_HIGH_END_VCO 4200
#define PLL_2064_HIGH_END_KVCO 68
#define PLL_2064_LOOP_BW_DOUBLER 200
#define PLL_2064_D30_DOUBLER 10500
#define PLL_2064_LOOP_BW 260
#define PLL_2064_D30 8000
#define PLL_2064_CAL_REF_TO 8
#define PLL_2064_MHZ 1000000
#define PLL_2064_OPEN_LOOP_DELAY 5
#define TEMPSENSE 1
#define VBATSENSE 2
#define NOISE_IF_UPD_CHK_INTERVAL 1
#define NOISE_IF_UPD_RST_INTERVAL 60
#define NOISE_IF_UPD_THRESHOLD_CNT 1
#define NOISE_IF_UPD_TRHRESHOLD 50
#define NOISE_IF_UPD_TIMEOUT 1000
#define NOISE_IF_OFF 0
#define NOISE_IF_CHK 1
#define NOISE_IF_ON 2
#define PAPD_BLANKING_PROFILE 3
#define PAPD2LUT 0
#define PAPD_CORR_NORM 0
#define PAPD_BLANKING_THRESHOLD 0
#define PAPD_STOP_AFTER_LAST_UPDATE 0
#define LCN_TARGET_PWR 60
#define LCN_VBAT_OFFSET_433X 34649679
#define LCN_VBAT_SLOPE_433X 8258032
#define LCN_VBAT_SCALE_NOM 53
#define LCN_VBAT_SCALE_DEN 432
#define LCN_TEMPSENSE_OFFSET 80812
#define LCN_TEMPSENSE_DEN 2647
#define LCN_BW_LMT 200
#define LCN_CUR_LMT 1250
#define LCN_MULT 1
#define LCN_VCO_DIV 30
#define LCN_OFFSET 680
#define LCN_FACT 490
#define LCN_CUR_DIV 2640
#define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT \
(0 + 8)
#define LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK \
(0x7f << LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT)
#define LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT \
(0 + 8)
#define LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_MASK \
(0x7f << LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT)
#define wlc_lcnphy_enable_tx_gain_override(pi) \
wlc_lcnphy_set_tx_gain_override(pi, true)
#define wlc_lcnphy_disable_tx_gain_override(pi) \
wlc_lcnphy_set_tx_gain_override(pi, false)
#define wlc_lcnphy_iqcal_active(pi) \
(read_phy_reg((pi), 0x451) & \
((0x1 << 15) | (0x1 << 14)))
#define txpwrctrl_off(pi) (0x7 != ((read_phy_reg(pi, 0x4a4) & 0xE000) >> 13))
#define wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) \
(pi->temppwrctrl_capable)
#define wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) \
(pi->hwpwrctrl_capable)
#define SWCTRL_BT_TX 0x18
#define SWCTRL_OVR_DISABLE 0x40
#define AFE_CLK_INIT_MODE_TXRX2X 1
#define AFE_CLK_INIT_MODE_PAPD 0
#define LCNPHY_TBL_ID_IQLOCAL 0x00
#define LCNPHY_TBL_ID_RFSEQ 0x08
#define LCNPHY_TBL_ID_GAIN_IDX 0x0d
#define LCNPHY_TBL_ID_SW_CTRL 0x0f
#define LCNPHY_TBL_ID_GAIN_TBL 0x12
#define LCNPHY_TBL_ID_SPUR 0x14
#define LCNPHY_TBL_ID_SAMPLEPLAY 0x15
#define LCNPHY_TBL_ID_SAMPLEPLAY1 0x16
#define LCNPHY_TX_PWR_CTRL_RATE_OFFSET 832
#define LCNPHY_TX_PWR_CTRL_MAC_OFFSET 128
#define LCNPHY_TX_PWR_CTRL_GAIN_OFFSET 192
#define LCNPHY_TX_PWR_CTRL_IQ_OFFSET 320
#define LCNPHY_TX_PWR_CTRL_LO_OFFSET 448
#define LCNPHY_TX_PWR_CTRL_PWR_OFFSET 576
#define LCNPHY_TX_PWR_CTRL_START_INDEX_2G_4313 140
#define LCNPHY_TX_PWR_CTRL_START_NPT 1
#define LCNPHY_TX_PWR_CTRL_MAX_NPT 7
#define LCNPHY_NOISE_SAMPLES_DEFAULT 5000
#define LCNPHY_ACI_DETECT_START 1
#define LCNPHY_ACI_DETECT_PROGRESS 2
#define LCNPHY_ACI_DETECT_STOP 3
#define LCNPHY_ACI_CRSHIFRMLO_TRSH 100
#define LCNPHY_ACI_GLITCH_TRSH 2000
#define LCNPHY_ACI_TMOUT 250
#define LCNPHY_ACI_DETECT_TIMEOUT 2
#define LCNPHY_ACI_START_DELAY 0
#define wlc_lcnphy_tx_gain_override_enabled(pi) \
(0 != (read_phy_reg((pi), 0x43b) & (0x1 << 6)))
#define wlc_lcnphy_total_tx_frames(pi) \
wlapi_bmac_read_shm((pi)->sh->physhim, M_UCODE_MACSTAT + \
offsetof(struct macstat, txallfrm))
struct lcnphy_txgains {
u16 gm_gain;
u16 pga_gain;
u16 pad_gain;
u16 dac_gain;
};
enum lcnphy_cal_mode {
LCNPHY_CAL_FULL,
LCNPHY_CAL_RECAL,
LCNPHY_CAL_CURRECAL,
LCNPHY_CAL_DIGCAL,
LCNPHY_CAL_GCTRL
};
struct lcnphy_rx_iqcomp {
u8 chan;
s16 a;
s16 b;
};
struct lcnphy_spb_tone {
s16 re;
s16 im;
};
struct lcnphy_unsign16_struct {
u16 re;
u16 im;
};
struct lcnphy_iq_est {
u32 iq_prod;
u32 i_pwr;
u32 q_pwr;
};
struct lcnphy_sfo_cfg {
u16 ptcentreTs20;
u16 ptcentreFactor;
};
enum lcnphy_papd_cal_type {
LCNPHY_PAPD_CAL_CW,
LCNPHY_PAPD_CAL_OFDM
};
typedef u16 iqcal_gain_params_lcnphy[9];
static const iqcal_gain_params_lcnphy tbl_iqcal_gainparams_lcnphy_2G[] = {
{0, 0, 0, 0, 0, 0, 0, 0, 0},
};
static const iqcal_gain_params_lcnphy *tbl_iqcal_gainparams_lcnphy[1] = {
tbl_iqcal_gainparams_lcnphy_2G,
};
static const u16 iqcal_gainparams_numgains_lcnphy[1] = {
sizeof(tbl_iqcal_gainparams_lcnphy_2G) /
sizeof(*tbl_iqcal_gainparams_lcnphy_2G),
};
static const struct lcnphy_sfo_cfg lcnphy_sfo_cfg[] = {
{965, 1087},
{967, 1085},
{969, 1082},
{971, 1080},
{973, 1078},
{975, 1076},
{977, 1073},
{979, 1071},
{981, 1069},
{983, 1067},
{985, 1065},
{987, 1063},
{989, 1060},
{994, 1055}
};
static const
u16 lcnphy_iqcal_loft_gainladder[] = {
((2 << 8) | 0),
((3 << 8) | 0),
((4 << 8) | 0),
((6 << 8) | 0),
((8 << 8) | 0),
((11 << 8) | 0),
((16 << 8) | 0),
((16 << 8) | 1),
((16 << 8) | 2),
((16 << 8) | 3),
((16 << 8) | 4),
((16 << 8) | 5),
((16 << 8) | 6),
((16 << 8) | 7),
((23 << 8) | 7),
((32 << 8) | 7),
((45 << 8) | 7),
((64 << 8) | 7),
((91 << 8) | 7),
((128 << 8) | 7)
};
static const
u16 lcnphy_iqcal_ir_gainladder[] = {
((1 << 8) | 0),
((2 << 8) | 0),
((4 << 8) | 0),
((6 << 8) | 0),
((8 << 8) | 0),
((11 << 8) | 0),
((16 << 8) | 0),
((23 << 8) | 0),
((32 << 8) | 0),
((45 << 8) | 0),
((64 << 8) | 0),
((64 << 8) | 1),
((64 << 8) | 2),
((64 << 8) | 3),
((64 << 8) | 4),
((64 << 8) | 5),
((64 << 8) | 6),
((64 << 8) | 7),
((91 << 8) | 7),
((128 << 8) | 7)
};
static const
struct lcnphy_spb_tone lcnphy_spb_tone_3750[] = {
{88, 0},
{73, 49},
{34, 81},
{-17, 86},
{-62, 62},
{-86, 17},
{-81, -34},
{-49, -73},
{0, -88},
{49, -73},
{81, -34},
{86, 17},
{62, 62},
{17, 86},
{-34, 81},
{-73, 49},
{-88, 0},
{-73, -49},
{-34, -81},
{17, -86},
{62, -62},
{86, -17},
{81, 34},
{49, 73},
{0, 88},
{-49, 73},
{-81, 34},
{-86, -17},
{-62, -62},
{-17, -86},
{34, -81},
{73, -49},
};
static const
u16 iqlo_loopback_rf_regs[20] = {
RADIO_2064_REG036,
RADIO_2064_REG11A,
RADIO_2064_REG03A,
RADIO_2064_REG025,
RADIO_2064_REG028,
RADIO_2064_REG005,
RADIO_2064_REG112,
RADIO_2064_REG0FF,
RADIO_2064_REG11F,
RADIO_2064_REG00B,
RADIO_2064_REG113,
RADIO_2064_REG007,
RADIO_2064_REG0FC,
RADIO_2064_REG0FD,
RADIO_2064_REG012,
RADIO_2064_REG057,
RADIO_2064_REG059,
RADIO_2064_REG05C,
RADIO_2064_REG078,
RADIO_2064_REG092,
};
static const
u16 tempsense_phy_regs[14] = {
0x503,
0x4a4,
0x4d0,
0x4d9,
0x4da,
0x4a6,
0x938,
0x939,
0x4d8,
0x4d0,
0x4d7,
0x4a5,
0x40d,
0x4a2,
};
static const
u16 rxiq_cal_rf_reg[11] = {
RADIO_2064_REG098,
RADIO_2064_REG116,
RADIO_2064_REG12C,
RADIO_2064_REG06A,
RADIO_2064_REG00B,
RADIO_2064_REG01B,
RADIO_2064_REG113,
RADIO_2064_REG01D,
RADIO_2064_REG114,
RADIO_2064_REG02E,
RADIO_2064_REG12A,
};
static const
struct lcnphy_rx_iqcomp lcnphy_rx_iqcomp_table_rev0[] = {
{1, 0, 0},
{2, 0, 0},
{3, 0, 0},
{4, 0, 0},
{5, 0, 0},
{6, 0, 0},
{7, 0, 0},
{8, 0, 0},
{9, 0, 0},
{10, 0, 0},
{11, 0, 0},
{12, 0, 0},
{13, 0, 0},
{14, 0, 0},
{34, 0, 0},
{38, 0, 0},
{42, 0, 0},
{46, 0, 0},
{36, 0, 0},
{40, 0, 0},
{44, 0, 0},
{48, 0, 0},
{52, 0, 0},
{56, 0, 0},
{60, 0, 0},
{64, 0, 0},
{100, 0, 0},
{104, 0, 0},
{108, 0, 0},
{112, 0, 0},
{116, 0, 0},
{120, 0, 0},
{124, 0, 0},
{128, 0, 0},
{132, 0, 0},
{136, 0, 0},
{140, 0, 0},
{149, 0, 0},
{153, 0, 0},
{157, 0, 0},
{161, 0, 0},
{165, 0, 0},
{184, 0, 0},
{188, 0, 0},
{192, 0, 0},
{196, 0, 0},
{200, 0, 0},
{204, 0, 0},
{208, 0, 0},
{212, 0, 0},
{216, 0, 0},
};
static const u32 lcnphy_23bitgaincode_table[] = {
0x200100,
0x200200,
0x200004,
0x200014,
0x200024,
0x200034,
0x200134,
0x200234,
0x200334,
0x200434,
0x200037,
0x200137,
0x200237,
0x200337,
0x200437,
0x000035,
0x000135,
0x000235,
0x000037,
0x000137,
0x000237,
0x000337,
0x00013f,
0x00023f,
0x00033f,
0x00034f,
0x00044f,
0x00144f,
0x00244f,
0x00254f,
0x00354f,
0x00454f,
0x00464f,
0x01464f,
0x02464f,
0x03464f,
0x04464f,
};
static const s8 lcnphy_gain_table[] = {
-16,
-13,
10,
7,
4,
0,
3,
6,
9,
12,
15,
18,
21,
24,
27,
30,
33,
36,
39,
42,
45,
48,
50,
53,
56,
59,
62,
65,
68,
71,
74,
77,
80,
83,
86,
89,
92,
};
static const s8 lcnphy_gain_index_offset_for_rssi[] = {
7,
7,
7,
7,
7,
7,
7,
8,
7,
7,
6,
7,
7,
4,
4,
4,
4,
4,
4,
4,
4,
3,
3,
3,
3,
3,
3,
4,
2,
2,
2,
2,
2,
2,
-1,
-2,
-2,
-2
};
struct chan_info_2064_lcnphy {
uint chan;
uint freq;
u8 logen_buftune;
u8 logen_rccr_tx;
u8 txrf_mix_tune_ctrl;
u8 pa_input_tune_g;
u8 logen_rccr_rx;
u8 pa_rxrf_lna1_freq_tune;
u8 pa_rxrf_lna2_freq_tune;
u8 rxrf_rxrf_spare1;
};
static const struct chan_info_2064_lcnphy chan_info_2064_lcnphy[] = {
{1, 2412, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{2, 2417, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{3, 2422, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{4, 2427, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{5, 2432, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{6, 2437, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{7, 2442, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{8, 2447, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{9, 2452, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{10, 2457, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{11, 2462, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{12, 2467, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{13, 2472, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
{14, 2484, 0x0B, 0x0A, 0x00, 0x07, 0x0A, 0x88, 0x88, 0x80},
};
static const struct lcnphy_radio_regs lcnphy_radio_regs_2064[] = {
{0x00, 0, 0, 0, 0},
{0x01, 0x64, 0x64, 0, 0},
{0x02, 0x20, 0x20, 0, 0},
{0x03, 0x66, 0x66, 0, 0},
{0x04, 0xf8, 0xf8, 0, 0},
{0x05, 0, 0, 0, 0},
{0x06, 0x10, 0x10, 0, 0},
{0x07, 0, 0, 0, 0},
{0x08, 0, 0, 0, 0},
{0x09, 0, 0, 0, 0},
{0x0A, 0x37, 0x37, 0, 0},
{0x0B, 0x6, 0x6, 0, 0},
{0x0C, 0x55, 0x55, 0, 0},
{0x0D, 0x8b, 0x8b, 0, 0},
{0x0E, 0, 0, 0, 0},
{0x0F, 0x5, 0x5, 0, 0},
{0x10, 0, 0, 0, 0},
{0x11, 0xe, 0xe, 0, 0},
{0x12, 0, 0, 0, 0},
{0x13, 0xb, 0xb, 0, 0},
{0x14, 0x2, 0x2, 0, 0},
{0x15, 0x12, 0x12, 0, 0},
{0x16, 0x12, 0x12, 0, 0},
{0x17, 0xc, 0xc, 0, 0},
{0x18, 0xc, 0xc, 0, 0},
{0x19, 0xc, 0xc, 0, 0},
{0x1A, 0x8, 0x8, 0, 0},
{0x1B, 0x2, 0x2, 0, 0},
{0x1C, 0, 0, 0, 0},
{0x1D, 0x1, 0x1, 0, 0},
{0x1E, 0x12, 0x12, 0, 0},
{0x1F, 0x6e, 0x6e, 0, 0},
{0x20, 0x2, 0x2, 0, 0},
{0x21, 0x23, 0x23, 0, 0},
{0x22, 0x8, 0x8, 0, 0},
{0x23, 0, 0, 0, 0},
{0x24, 0, 0, 0, 0},
{0x25, 0xc, 0xc, 0, 0},
{0x26, 0x33, 0x33, 0, 0},
{0x27, 0x55, 0x55, 0, 0},
{0x28, 0, 0, 0, 0},
{0x29, 0x30, 0x30, 0, 0},
{0x2A, 0xb, 0xb, 0, 0},
{0x2B, 0x1b, 0x1b, 0, 0},
{0x2C, 0x3, 0x3, 0, 0},
{0x2D, 0x1b, 0x1b, 0, 0},
{0x2E, 0, 0, 0, 0},
{0x2F, 0x20, 0x20, 0, 0},
{0x30, 0xa, 0xa, 0, 0},
{0x31, 0, 0, 0, 0},
{0x32, 0x62, 0x62, 0, 0},
{0x33, 0x19, 0x19, 0, 0},
{0x34, 0x33, 0x33, 0, 0},
{0x35, 0x77, 0x77, 0, 0},
{0x36, 0, 0, 0, 0},
{0x37, 0x70, 0x70, 0, 0},
{0x38, 0x3, 0x3, 0, 0},
{0x39, 0xf, 0xf, 0, 0},
{0x3A, 0x6, 0x6, 0, 0},
{0x3B, 0xcf, 0xcf, 0, 0},
{0x3C, 0x1a, 0x1a, 0, 0},
{0x3D, 0x6, 0x6, 0, 0},
{0x3E, 0x42, 0x42, 0, 0},
{0x3F, 0, 0, 0, 0},
{0x40, 0xfb, 0xfb, 0, 0},
{0x41, 0x9a, 0x9a, 0, 0},
{0x42, 0x7a, 0x7a, 0, 0},
{0x43, 0x29, 0x29, 0, 0},
{0x44, 0, 0, 0, 0},
{0x45, 0x8, 0x8, 0, 0},
{0x46, 0xce, 0xce, 0, 0},
{0x47, 0x27, 0x27, 0, 0},
{0x48, 0x62, 0x62, 0, 0},
{0x49, 0x6, 0x6, 0, 0},
{0x4A, 0x58, 0x58, 0, 0},
{0x4B, 0xf7, 0xf7, 0, 0},
{0x4C, 0, 0, 0, 0},
{0x4D, 0xb3, 0xb3, 0, 0},
{0x4E, 0, 0, 0, 0},
{0x4F, 0x2, 0x2, 0, 0},
{0x50, 0, 0, 0, 0},
{0x51, 0x9, 0x9, 0, 0},
{0x52, 0x5, 0x5, 0, 0},
{0x53, 0x17, 0x17, 0, 0},
{0x54, 0x38, 0x38, 0, 0},
{0x55, 0, 0, 0, 0},
{0x56, 0, 0, 0, 0},
{0x57, 0xb, 0xb, 0, 0},
{0x58, 0, 0, 0, 0},
{0x59, 0, 0, 0, 0},
{0x5A, 0, 0, 0, 0},
{0x5B, 0, 0, 0, 0},
{0x5C, 0, 0, 0, 0},
{0x5D, 0, 0, 0, 0},
{0x5E, 0x88, 0x88, 0, 0},
{0x5F, 0xcc, 0xcc, 0, 0},
{0x60, 0x74, 0x74, 0, 0},
{0x61, 0x74, 0x74, 0, 0},
{0x62, 0x74, 0x74, 0, 0},
{0x63, 0x44, 0x44, 0, 0},
{0x64, 0x77, 0x77, 0, 0},
{0x65, 0x44, 0x44, 0, 0},
{0x66, 0x77, 0x77, 0, 0},
{0x67, 0x55, 0x55, 0, 0},
{0x68, 0x77, 0x77, 0, 0},
{0x69, 0x77, 0x77, 0, 0},
{0x6A, 0, 0, 0, 0},
{0x6B, 0x7f, 0x7f, 0, 0},
{0x6C, 0x8, 0x8, 0, 0},
{0x6D, 0, 0, 0, 0},
{0x6E, 0x88, 0x88, 0, 0},
{0x6F, 0x66, 0x66, 0, 0},
{0x70, 0x66, 0x66, 0, 0},
{0x71, 0x28, 0x28, 0, 0},
{0x72, 0x55, 0x55, 0, 0},
{0x73, 0x4, 0x4, 0, 0},
{0x74, 0, 0, 0, 0},
{0x75, 0, 0, 0, 0},
{0x76, 0, 0, 0, 0},
{0x77, 0x1, 0x1, 0, 0},
{0x78, 0xd6, 0xd6, 0, 0},
{0x79, 0, 0, 0, 0},
{0x7A, 0, 0, 0, 0},
{0x7B, 0, 0, 0, 0},
{0x7C, 0, 0, 0, 0},
{0x7D, 0, 0, 0, 0},
{0x7E, 0, 0, 0, 0},
{0x7F, 0, 0, 0, 0},
{0x80, 0, 0, 0, 0},
{0x81, 0, 0, 0, 0},
{0x82, 0, 0, 0, 0},
{0x83, 0xb4, 0xb4, 0, 0},
{0x84, 0x1, 0x1, 0, 0},
{0x85, 0x20, 0x20, 0, 0},
{0x86, 0x5, 0x5, 0, 0},
{0x87, 0xff, 0xff, 0, 0},
{0x88, 0x7, 0x7, 0, 0},
{0x89, 0x77, 0x77, 0, 0},
{0x8A, 0x77, 0x77, 0, 0},
{0x8B, 0x77, 0x77, 0, 0},
{0x8C, 0x77, 0x77, 0, 0},
{0x8D, 0x8, 0x8, 0, 0},
{0x8E, 0xa, 0xa, 0, 0},
{0x8F, 0x8, 0x8, 0, 0},
{0x90, 0x18, 0x18, 0, 0},
{0x91, 0x5, 0x5, 0, 0},
{0x92, 0x1f, 0x1f, 0, 0},
{0x93, 0x10, 0x10, 0, 0},
{0x94, 0x3, 0x3, 0, 0},
{0x95, 0, 0, 0, 0},
{0x96, 0, 0, 0, 0},
{0x97, 0xaa, 0xaa, 0, 0},
{0x98, 0, 0, 0, 0},
{0x99, 0x23, 0x23, 0, 0},
{0x9A, 0x7, 0x7, 0, 0},
{0x9B, 0xf, 0xf, 0, 0},
{0x9C, 0x10, 0x10, 0, 0},
{0x9D, 0x3, 0x3, 0, 0},
{0x9E, 0x4, 0x4, 0, 0},
{0x9F, 0x20, 0x20, 0, 0},
{0xA0, 0, 0, 0, 0},
{0xA1, 0, 0, 0, 0},
{0xA2, 0, 0, 0, 0},
{0xA3, 0, 0, 0, 0},
{0xA4, 0x1, 0x1, 0, 0},
{0xA5, 0x77, 0x77, 0, 0},
{0xA6, 0x77, 0x77, 0, 0},
{0xA7, 0x77, 0x77, 0, 0},
{0xA8, 0x77, 0x77, 0, 0},
{0xA9, 0x8c, 0x8c, 0, 0},
{0xAA, 0x88, 0x88, 0, 0},
{0xAB, 0x78, 0x78, 0, 0},
{0xAC, 0x57, 0x57, 0, 0},
{0xAD, 0x88, 0x88, 0, 0},
{0xAE, 0, 0, 0, 0},
{0xAF, 0x8, 0x8, 0, 0},
{0xB0, 0x88, 0x88, 0, 0},
{0xB1, 0, 0, 0, 0},
{0xB2, 0x1b, 0x1b, 0, 0},
{0xB3, 0x3, 0x3, 0, 0},
{0xB4, 0x24, 0x24, 0, 0},
{0xB5, 0x3, 0x3, 0, 0},
{0xB6, 0x1b, 0x1b, 0, 0},
{0xB7, 0x24, 0x24, 0, 0},
{0xB8, 0x3, 0x3, 0, 0},
{0xB9, 0, 0, 0, 0},
{0xBA, 0xaa, 0xaa, 0, 0},
{0xBB, 0, 0, 0, 0},
{0xBC, 0x4, 0x4, 0, 0},
{0xBD, 0, 0, 0, 0},
{0xBE, 0x8, 0x8, 0, 0},
{0xBF, 0x11, 0x11, 0, 0},
{0xC0, 0, 0, 0, 0},
{0xC1, 0, 0, 0, 0},
{0xC2, 0x62, 0x62, 0, 0},
{0xC3, 0x1e, 0x1e, 0, 0},
{0xC4, 0x33, 0x33, 0, 0},
{0xC5, 0x37, 0x37, 0, 0},
{0xC6, 0, 0, 0, 0},
{0xC7, 0x70, 0x70, 0, 0},
{0xC8, 0x1e, 0x1e, 0, 0},
{0xC9, 0x6, 0x6, 0, 0},
{0xCA, 0x4, 0x4, 0, 0},
{0xCB, 0x2f, 0x2f, 0, 0},
{0xCC, 0xf, 0xf, 0, 0},
{0xCD, 0, 0, 0, 0},
{0xCE, 0xff, 0xff, 0, 0},
{0xCF, 0x8, 0x8, 0, 0},
{0xD0, 0x3f, 0x3f, 0, 0},
{0xD1, 0x3f, 0x3f, 0, 0},
{0xD2, 0x3f, 0x3f, 0, 0},
{0xD3, 0, 0, 0, 0},
{0xD4, 0, 0, 0, 0},
{0xD5, 0, 0, 0, 0},
{0xD6, 0xcc, 0xcc, 0, 0},
{0xD7, 0, 0, 0, 0},
{0xD8, 0x8, 0x8, 0, 0},
{0xD9, 0x8, 0x8, 0, 0},
{0xDA, 0x8, 0x8, 0, 0},
{0xDB, 0x11, 0x11, 0, 0},
{0xDC, 0, 0, 0, 0},
{0xDD, 0x87, 0x87, 0, 0},
{0xDE, 0x88, 0x88, 0, 0},
{0xDF, 0x8, 0x8, 0, 0},
{0xE0, 0x8, 0x8, 0, 0},
{0xE1, 0x8, 0x8, 0, 0},
{0xE2, 0, 0, 0, 0},
{0xE3, 0, 0, 0, 0},
{0xE4, 0, 0, 0, 0},
{0xE5, 0xf5, 0xf5, 0, 0},
{0xE6, 0x30, 0x30, 0, 0},
{0xE7, 0x1, 0x1, 0, 0},
{0xE8, 0, 0, 0, 0},
{0xE9, 0xff, 0xff, 0, 0},
{0xEA, 0, 0, 0, 0},
{0xEB, 0, 0, 0, 0},
{0xEC, 0x22, 0x22, 0, 0},
{0xED, 0, 0, 0, 0},
{0xEE, 0, 0, 0, 0},
{0xEF, 0, 0, 0, 0},
{0xF0, 0x3, 0x3, 0, 0},
{0xF1, 0x1, 0x1, 0, 0},
{0xF2, 0, 0, 0, 0},
{0xF3, 0, 0, 0, 0},
{0xF4, 0, 0, 0, 0},
{0xF5, 0, 0, 0, 0},
{0xF6, 0, 0, 0, 0},
{0xF7, 0x6, 0x6, 0, 0},
{0xF8, 0, 0, 0, 0},
{0xF9, 0, 0, 0, 0},
{0xFA, 0x40, 0x40, 0, 0},
{0xFB, 0, 0, 0, 0},
{0xFC, 0x1, 0x1, 0, 0},
{0xFD, 0x80, 0x80, 0, 0},
{0xFE, 0x2, 0x2, 0, 0},
{0xFF, 0x10, 0x10, 0, 0},
{0x100, 0x2, 0x2, 0, 0},
{0x101, 0x1e, 0x1e, 0, 0},
{0x102, 0x1e, 0x1e, 0, 0},
{0x103, 0, 0, 0, 0},
{0x104, 0x1f, 0x1f, 0, 0},
{0x105, 0, 0x8, 0, 1},
{0x106, 0x2a, 0x2a, 0, 0},
{0x107, 0xf, 0xf, 0, 0},
{0x108, 0, 0, 0, 0},
{0x109, 0, 0, 0, 0},
{0x10A, 0, 0, 0, 0},
{0x10B, 0, 0, 0, 0},
{0x10C, 0, 0, 0, 0},
{0x10D, 0, 0, 0, 0},
{0x10E, 0, 0, 0, 0},
{0x10F, 0, 0, 0, 0},
{0x110, 0, 0, 0, 0},
{0x111, 0, 0, 0, 0},
{0x112, 0, 0, 0, 0},
{0x113, 0, 0, 0, 0},
{0x114, 0, 0, 0, 0},
{0x115, 0, 0, 0, 0},
{0x116, 0, 0, 0, 0},
{0x117, 0, 0, 0, 0},
{0x118, 0, 0, 0, 0},
{0x119, 0, 0, 0, 0},
{0x11A, 0, 0, 0, 0},
{0x11B, 0, 0, 0, 0},
{0x11C, 0x1, 0x1, 0, 0},
{0x11D, 0, 0, 0, 0},
{0x11E, 0, 0, 0, 0},
{0x11F, 0, 0, 0, 0},
{0x120, 0, 0, 0, 0},
{0x121, 0, 0, 0, 0},
{0x122, 0x80, 0x80, 0, 0},
{0x123, 0, 0, 0, 0},
{0x124, 0xf8, 0xf8, 0, 0},
{0x125, 0, 0, 0, 0},
{0x126, 0, 0, 0, 0},
{0x127, 0, 0, 0, 0},
{0x128, 0, 0, 0, 0},
{0x129, 0, 0, 0, 0},
{0x12A, 0, 0, 0, 0},
{0x12B, 0, 0, 0, 0},
{0x12C, 0, 0, 0, 0},
{0x12D, 0, 0, 0, 0},
{0x12E, 0, 0, 0, 0},
{0x12F, 0, 0, 0, 0},
{0x130, 0, 0, 0, 0},
{0xFFFF, 0, 0, 0, 0}
};
#define LCNPHY_NUM_DIG_FILT_COEFFS 16
#define LCNPHY_NUM_TX_DIG_FILTERS_CCK 13
static const u16 LCNPHY_txdigfiltcoeffs_cck[LCNPHY_NUM_TX_DIG_FILTERS_CCK]
[LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 1, 415, 1874, 64, 128, 64, 792, 1656, 64, 128, 64, 778, 1582, 64,
128, 64,},
{1, 1, 402, 1847, 259, 59, 259, 671, 1794, 68, 54, 68, 608, 1863, 93,
167, 93,},
{2, 1, 415, 1874, 64, 128, 64, 792, 1656, 192, 384, 192, 778, 1582, 64,
128, 64,},
{3, 1, 302, 1841, 129, 258, 129, 658, 1720, 205, 410, 205, 754, 1760,
170, 340, 170,},
{20, 1, 360, 1884, 242, 1734, 242, 752, 1720, 205, 1845, 205, 767, 1760,
256, 185, 256,},
{21, 1, 360, 1884, 149, 1874, 149, 752, 1720, 205, 1883, 205, 767, 1760,
256, 273, 256,},
{22, 1, 360, 1884, 98, 1948, 98, 752, 1720, 205, 1924, 205, 767, 1760,
256, 352, 256,},
{23, 1, 350, 1884, 116, 1966, 116, 752, 1720, 205, 2008, 205, 767, 1760,
128, 233, 128,},
{24, 1, 325, 1884, 32, 40, 32, 756, 1720, 256, 471, 256, 766, 1760, 256,
1881, 256,},
{25, 1, 299, 1884, 51, 64, 51, 736, 1720, 256, 471, 256, 765, 1760, 256,
1881, 256,},
{26, 1, 277, 1943, 39, 117, 88, 637, 1838, 64, 192, 144, 614, 1864, 128,
384, 288,},
{27, 1, 245, 1943, 49, 147, 110, 626, 1838, 256, 768, 576, 613, 1864,
128, 384, 288,},
{30, 1, 302, 1841, 61, 122, 61, 658, 1720, 205, 410, 205, 754, 1760,
170, 340, 170,},
};
#define LCNPHY_NUM_TX_DIG_FILTERS_OFDM 3
static const u16 LCNPHY_txdigfiltcoeffs_ofdm[LCNPHY_NUM_TX_DIG_FILTERS_OFDM]
[LCNPHY_NUM_DIG_FILT_COEFFS + 1] = {
{0, 0, 0xa2, 0x0, 0x100, 0x100, 0x0, 0x0, 0x0, 0x100, 0x0, 0x0,
0x278, 0xfea0, 0x80, 0x100, 0x80,},
{1, 0, 374, 0xFF79, 16, 32, 16, 799, 0xFE74, 50, 32, 50,
750, 0xFE2B, 212, 0xFFCE, 212,},
{2, 0, 375, 0xFF16, 37, 76, 37, 799, 0xFE74, 32, 20, 32, 748,
0xFEF2, 128, 0xFFE2, 128}
};
#define wlc_lcnphy_set_start_tx_pwr_idx(pi, idx) \
mod_phy_reg(pi, 0x4a4, \
(0x1ff << 0), \
(u16)(idx) << 0)
#define wlc_lcnphy_set_tx_pwr_npt(pi, npt) \
mod_phy_reg(pi, 0x4a5, \
(0x7 << 8), \
(u16)(npt) << 8)
#define wlc_lcnphy_get_tx_pwr_ctrl(pi) \
(read_phy_reg((pi), 0x4a4) & \
((0x1 << 15) | \
(0x1 << 14) | \
(0x1 << 13)))
#define wlc_lcnphy_get_tx_pwr_npt(pi) \
((read_phy_reg(pi, 0x4a5) & \
(0x7 << 8)) >> \
8)
#define wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(pi) \
(read_phy_reg(pi, 0x473) & 0x1ff)
#define wlc_lcnphy_get_target_tx_pwr(pi) \
((read_phy_reg(pi, 0x4a7) & \
(0xff << 0)) >> \
0)
#define wlc_lcnphy_set_target_tx_pwr(pi, target) \
mod_phy_reg(pi, 0x4a7, \
(0xff << 0), \
(u16)(target) << 0)
#define wlc_radio_2064_rcal_done(pi) \
(0 != (read_radio_reg(pi, RADIO_2064_REG05C) & 0x20))
#define tempsense_done(pi) \
(0x8000 == (read_phy_reg(pi, 0x476) & 0x8000))
#define LCNPHY_IQLOCC_READ(val) \
((u8)(-(s8)(((val) & 0xf0) >> 4) + (s8)((val) & 0x0f)))
#define FIXED_TXPWR 78
#define LCNPHY_TEMPSENSE(val) ((s16)((val > 255) ? (val - 512) : val))
void wlc_lcnphy_write_table(struct brcms_phy *pi, const struct phytbl_info *pti)
{
wlc_phy_write_table(pi, pti, 0x455, 0x457, 0x456);
}
void wlc_lcnphy_read_table(struct brcms_phy *pi, struct phytbl_info *pti)
{
wlc_phy_read_table(pi, pti, 0x455, 0x457, 0x456);
}
static void
wlc_lcnphy_common_read_table(struct brcms_phy *pi, u32 tbl_id,
const u16 *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
struct phytbl_info tab;
tab.tbl_id = tbl_id;
tab.tbl_ptr = tbl_ptr;
tab.tbl_len = tbl_len;
tab.tbl_width = tbl_width;
tab.tbl_offset = tbl_offset;
wlc_lcnphy_read_table(pi, &tab);
}
static void
wlc_lcnphy_common_write_table(struct brcms_phy *pi, u32 tbl_id,
const u16 *tbl_ptr, u32 tbl_len,
u32 tbl_width, u32 tbl_offset)
{
struct phytbl_info tab;
tab.tbl_id = tbl_id;
tab.tbl_ptr = tbl_ptr;
tab.tbl_len = tbl_len;
tab.tbl_width = tbl_width;
tab.tbl_offset = tbl_offset;
wlc_lcnphy_write_table(pi, &tab);
}
static u32
wlc_lcnphy_qdiv_roundup(u32 dividend, u32 divisor, u8 precision)
{
u32 quotient, remainder, roundup, rbit;
quotient = dividend / divisor;
remainder = dividend % divisor;
rbit = divisor & 1;
roundup = (divisor >> 1) + rbit;
while (precision--) {
quotient <<= 1;
if (remainder >= roundup) {
quotient++;
remainder = ((remainder - roundup) << 1) + rbit;
} else {
remainder <<= 1;
}
}
if (remainder >= roundup)
quotient++;
return quotient;
}
static int wlc_lcnphy_calc_floor(s16 coeff_x, int type)
{
int k;
k = 0;
if (type == 0) {
if (coeff_x < 0)
k = (coeff_x - 1) / 2;
else
k = coeff_x / 2;
}
if (type == 1) {
if ((coeff_x + 1) < 0)
k = (coeff_x) / 2;
else
k = (coeff_x + 1) / 2;
}
return k;
}
static void
wlc_lcnphy_get_tx_gain(struct brcms_phy *pi, struct lcnphy_txgains *gains)
{
u16 dac_gain, rfgain0, rfgain1;
dac_gain = read_phy_reg(pi, 0x439) >> 0;
gains->dac_gain = (dac_gain & 0x380) >> 7;
rfgain0 = (read_phy_reg(pi, 0x4b5) & (0xffff << 0)) >> 0;
rfgain1 = (read_phy_reg(pi, 0x4fb) & (0x7fff << 0)) >> 0;
gains->gm_gain = rfgain0 & 0xff;
gains->pga_gain = (rfgain0 >> 8) & 0xff;
gains->pad_gain = rfgain1 & 0xff;
}
static void wlc_lcnphy_set_dac_gain(struct brcms_phy *pi, u16 dac_gain)
{
u16 dac_ctrl;
dac_ctrl = (read_phy_reg(pi, 0x439) >> 0);
dac_ctrl = dac_ctrl & 0xc7f;
dac_ctrl = dac_ctrl | (dac_gain << 7);
mod_phy_reg(pi, 0x439, (0xfff << 0), (dac_ctrl) << 0);
}
static void wlc_lcnphy_set_tx_gain_override(struct brcms_phy *pi, bool bEnable)
{
u16 bit = bEnable ? 1 : 0;
mod_phy_reg(pi, 0x4b0, (0x1 << 7), bit << 7);
mod_phy_reg(pi, 0x4b0, (0x1 << 14), bit << 14);
mod_phy_reg(pi, 0x43b, (0x1 << 6), bit << 6);
}
static void
wlc_lcnphy_rx_gain_override_enable(struct brcms_phy *pi, bool enable)
{
u16 ebit = enable ? 1 : 0;
mod_phy_reg(pi, 0x4b0, (0x1 << 8), ebit << 8);
mod_phy_reg(pi, 0x44c, (0x1 << 0), ebit << 0);
if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
mod_phy_reg(pi, 0x44c, (0x1 << 4), ebit << 4);
mod_phy_reg(pi, 0x44c, (0x1 << 6), ebit << 6);
mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
mod_phy_reg(pi, 0x4b0, (0x1 << 6), ebit << 6);
} else {
mod_phy_reg(pi, 0x4b0, (0x1 << 12), ebit << 12);
mod_phy_reg(pi, 0x4b0, (0x1 << 13), ebit << 13);
mod_phy_reg(pi, 0x4b0, (0x1 << 5), ebit << 5);
}
if (CHSPEC_IS2G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x4b0, (0x1 << 10), ebit << 10);
mod_phy_reg(pi, 0x4e5, (0x1 << 3), ebit << 3);
}
}
static void
wlc_lcnphy_set_rx_gain_by_distribution(struct brcms_phy *pi,
u16 trsw,
u16 ext_lna,
u16 biq2,
u16 biq1,
u16 tia, u16 lna2, u16 lna1)
{
u16 gain0_15, gain16_19;
gain16_19 = biq2 & 0xf;
gain0_15 = ((biq1 & 0xf) << 12) |
((tia & 0xf) << 8) |
((lna2 & 0x3) << 6) |
((lna2 &
0x3) << 4) | ((lna1 & 0x3) << 2) | ((lna1 & 0x3) << 0);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10);
} else {
mod_phy_reg(pi, 0x4b1, (0x1 << 10), 0 << 10);
mod_phy_reg(pi, 0x4b1, (0x1 << 15), 0 << 15);
mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
}
mod_phy_reg(pi, 0x44d, (0x1 << 0), (!trsw) << 0);
}
static void wlc_lcnphy_set_trsw_override(struct brcms_phy *pi, bool tx, bool rx)
{
mod_phy_reg(pi, 0x44d,
(0x1 << 1) |
(0x1 << 0), (tx ? (0x1 << 1) : 0) | (rx ? (0x1 << 0) : 0));
or_phy_reg(pi, 0x44c, (0x1 << 1) | (0x1 << 0));
}
static void wlc_lcnphy_clear_trsw_override(struct brcms_phy *pi)
{
and_phy_reg(pi, 0x44c, (u16) ~((0x1 << 1) | (0x1 << 0)));
}
static void wlc_lcnphy_set_rx_iq_comp(struct brcms_phy *pi, u16 a, u16 b)
{
mod_phy_reg(pi, 0x645, (0x3ff << 0), (a) << 0);
mod_phy_reg(pi, 0x646, (0x3ff << 0), (b) << 0);
mod_phy_reg(pi, 0x647, (0x3ff << 0), (a) << 0);
mod_phy_reg(pi, 0x648, (0x3ff << 0), (b) << 0);
mod_phy_reg(pi, 0x649, (0x3ff << 0), (a) << 0);
mod_phy_reg(pi, 0x64a, (0x3ff << 0), (b) << 0);
}
static bool
wlc_lcnphy_rx_iq_est(struct brcms_phy *pi,
u16 num_samps,
u8 wait_time, struct lcnphy_iq_est *iq_est)
{
int wait_count = 0;
bool result = true;
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
mod_phy_reg(pi, 0x6da, (0x1 << 5), (1) << 5);
mod_phy_reg(pi, 0x410, (0x1 << 3), (0) << 3);
mod_phy_reg(pi, 0x482, (0xffff << 0), (num_samps) << 0);
mod_phy_reg(pi, 0x481, (0xff << 0), ((u16) wait_time) << 0);
mod_phy_reg(pi, 0x481, (0x1 << 8), (0) << 8);
mod_phy_reg(pi, 0x481, (0x1 << 9), (1) << 9);
while (read_phy_reg(pi, 0x481) & (0x1 << 9)) {
if (wait_count > (10 * 500)) {
result = false;
goto cleanup;
}
udelay(100);
wait_count++;
}
iq_est->iq_prod = ((u32) read_phy_reg(pi, 0x483) << 16) |
(u32) read_phy_reg(pi, 0x484);
iq_est->i_pwr = ((u32) read_phy_reg(pi, 0x485) << 16) |
(u32) read_phy_reg(pi, 0x486);
iq_est->q_pwr = ((u32) read_phy_reg(pi, 0x487) << 16) |
(u32) read_phy_reg(pi, 0x488);
cleanup:
mod_phy_reg(pi, 0x410, (0x1 << 3), (1) << 3);
mod_phy_reg(pi, 0x6da, (0x1 << 5), (0) << 5);
return result;
}
static bool wlc_lcnphy_calc_rx_iq_comp(struct brcms_phy *pi, u16 num_samps)
{
#define LCNPHY_MIN_RXIQ_PWR 2
bool result;
u16 a0_new, b0_new;
struct lcnphy_iq_est iq_est = { 0, 0, 0 };
s32 a, b, temp;
s16 iq_nbits, qq_nbits, arsh, brsh;
s32 iq;
u32 ii, qq;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
a0_new = ((read_phy_reg(pi, 0x645) & (0x3ff << 0)) >> 0);
b0_new = ((read_phy_reg(pi, 0x646) & (0x3ff << 0)) >> 0);
mod_phy_reg(pi, 0x6d1, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x64b, (0x1 << 6), (1) << 6);
wlc_lcnphy_set_rx_iq_comp(pi, 0, 0);
result = wlc_lcnphy_rx_iq_est(pi, num_samps, 32, &iq_est);
if (!result)
goto cleanup;
iq = (s32) iq_est.iq_prod;
ii = iq_est.i_pwr;
qq = iq_est.q_pwr;
if ((ii + qq) < LCNPHY_MIN_RXIQ_PWR) {
result = false;
goto cleanup;
}
iq_nbits = wlc_phy_nbits(iq);
qq_nbits = wlc_phy_nbits(qq);
arsh = 10 - (30 - iq_nbits);
if (arsh >= 0) {
a = (-(iq << (30 - iq_nbits)) + (ii >> (1 + arsh)));
temp = (s32) (ii >> arsh);
if (temp == 0)
return false;
} else {
a = (-(iq << (30 - iq_nbits)) + (ii << (-1 - arsh)));
temp = (s32) (ii << -arsh);
if (temp == 0)
return false;
}
a /= temp;
brsh = qq_nbits - 31 + 20;
if (brsh >= 0) {
b = (qq << (31 - qq_nbits));
temp = (s32) (ii >> brsh);
if (temp == 0)
return false;
} else {
b = (qq << (31 - qq_nbits));
temp = (s32) (ii << -brsh);
if (temp == 0)
return false;
}
b /= temp;
b -= a * a;
b = (s32) int_sqrt((unsigned long) b);
b -= (1 << 10);
a0_new = (u16) (a & 0x3ff);
b0_new = (u16) (b & 0x3ff);
cleanup:
wlc_lcnphy_set_rx_iq_comp(pi, a0_new, b0_new);
mod_phy_reg(pi, 0x64b, (0x1 << 0), (1) << 0);
mod_phy_reg(pi, 0x64b, (0x1 << 3), (1) << 3);
pi_lcn->lcnphy_cal_results.rxiqcal_coeff_a0 = a0_new;
pi_lcn->lcnphy_cal_results.rxiqcal_coeff_b0 = b0_new;
return result;
}
static u32 wlc_lcnphy_measure_digital_power(struct brcms_phy *pi, u16 nsamples)
{
struct lcnphy_iq_est iq_est = { 0, 0, 0 };
if (!wlc_lcnphy_rx_iq_est(pi, nsamples, 32, &iq_est))
return 0;
return (iq_est.i_pwr + iq_est.q_pwr) / nsamples;
}
static bool
wlc_lcnphy_rx_iq_cal(struct brcms_phy *pi,
const struct lcnphy_rx_iqcomp *iqcomp,
int iqcomp_sz, bool tx_switch, bool rx_switch, int module,
int tx_gain_idx)
{
struct lcnphy_txgains old_gains;
u16 tx_pwr_ctrl;
u8 tx_gain_index_old = 0;
bool result = false, tx_gain_override_old = false;
u16 i, Core1TxControl_old, RFOverride0_old,
RFOverrideVal0_old, rfoverride2_old, rfoverride2val_old,
rfoverride3_old, rfoverride3val_old, rfoverride4_old,
rfoverride4val_old, afectrlovr_old, afectrlovrval_old;
int tia_gain;
u32 received_power, rx_pwr_threshold;
u16 old_sslpnCalibClkEnCtrl, old_sslpnRxFeClkEnCtrl;
u16 values_to_save[11];
s16 *ptr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
if (NULL == ptr)
return false;
if (module == 2) {
while (iqcomp_sz--) {
if (iqcomp[iqcomp_sz].chan ==
CHSPEC_CHANNEL(pi->radio_chanspec)) {
wlc_lcnphy_set_rx_iq_comp(pi,
(u16)
iqcomp[iqcomp_sz].a,
(u16)
iqcomp[iqcomp_sz].b);
result = true;
break;
}
}
goto cal_done;
}
if (module == 1) {
tx_pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
for (i = 0; i < 11; i++)
values_to_save[i] =
read_radio_reg(pi, rxiq_cal_rf_reg[i]);
Core1TxControl_old = read_phy_reg(pi, 0x631);
or_phy_reg(pi, 0x631, 0x0015);
RFOverride0_old = read_phy_reg(pi, 0x44c);
RFOverrideVal0_old = read_phy_reg(pi, 0x44d);
rfoverride2_old = read_phy_reg(pi, 0x4b0);
rfoverride2val_old = read_phy_reg(pi, 0x4b1);
rfoverride3_old = read_phy_reg(pi, 0x4f9);
rfoverride3val_old = read_phy_reg(pi, 0x4fa);
rfoverride4_old = read_phy_reg(pi, 0x938);
rfoverride4val_old = read_phy_reg(pi, 0x939);
afectrlovr_old = read_phy_reg(pi, 0x43b);
afectrlovrval_old = read_phy_reg(pi, 0x43c);
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
old_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old) {
wlc_lcnphy_get_tx_gain(pi, &old_gains);
tx_gain_index_old = pi_lcn->lcnphy_current_index;
}
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_idx);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
write_radio_reg(pi, RADIO_2064_REG116, 0x06);
write_radio_reg(pi, RADIO_2064_REG12C, 0x07);
write_radio_reg(pi, RADIO_2064_REG06A, 0xd3);
write_radio_reg(pi, RADIO_2064_REG098, 0x03);
write_radio_reg(pi, RADIO_2064_REG00B, 0x7);
mod_radio_reg(pi, RADIO_2064_REG113, 1 << 4, 1 << 4);
write_radio_reg(pi, RADIO_2064_REG01D, 0x01);
write_radio_reg(pi, RADIO_2064_REG114, 0x01);
write_radio_reg(pi, RADIO_2064_REG02E, 0x10);
write_radio_reg(pi, RADIO_2064_REG12A, 0x08);
mod_phy_reg(pi, 0x938, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x939, (0x1 << 0), 0 << 0);
mod_phy_reg(pi, 0x938, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x939, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x938, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x938, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x939, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x939, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
wlc_lcnphy_start_tx_tone(pi, 2000, 120, 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_set_trsw_override(pi, tx_switch, rx_switch);
wlc_lcnphy_rx_gain_override_enable(pi, true);
tia_gain = 8;
rx_pwr_threshold = 950;
while (tia_gain > 0) {
tia_gain -= 1;
wlc_lcnphy_set_rx_gain_by_distribution(pi,
0, 0, 2, 2,
(u16)
tia_gain, 1, 0);
udelay(500);
received_power =
wlc_lcnphy_measure_digital_power(pi, 2000);
if (received_power < rx_pwr_threshold)
break;
}
result = wlc_lcnphy_calc_rx_iq_comp(pi, 0xffff);
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x631, Core1TxControl_old);
write_phy_reg(pi, 0x44c, RFOverrideVal0_old);
write_phy_reg(pi, 0x44d, RFOverrideVal0_old);
write_phy_reg(pi, 0x4b0, rfoverride2_old);
write_phy_reg(pi, 0x4b1, rfoverride2val_old);
write_phy_reg(pi, 0x4f9, rfoverride3_old);
write_phy_reg(pi, 0x4fa, rfoverride3val_old);
write_phy_reg(pi, 0x938, rfoverride4_old);
write_phy_reg(pi, 0x939, rfoverride4val_old);
write_phy_reg(pi, 0x43b, afectrlovr_old);
write_phy_reg(pi, 0x43c, afectrlovrval_old);
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, old_sslpnRxFeClkEnCtrl);
wlc_lcnphy_clear_trsw_override(pi);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 0 << 2);
for (i = 0; i < 11; i++)
write_radio_reg(pi, rxiq_cal_rf_reg[i],
values_to_save[i]);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi, tx_gain_index_old);
else
wlc_lcnphy_disable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl);
wlc_lcnphy_rx_gain_override_enable(pi, false);
}
cal_done:
kfree(ptr);
return result;
}
s8 wlc_lcnphy_get_current_tx_pwr_idx(struct brcms_phy *pi)
{
s8 index;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (txpwrctrl_off(pi))
index = pi_lcn->lcnphy_current_index;
else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
index = (s8) (wlc_lcnphy_get_current_tx_pwr_idx_if_pwrctrl_on(
pi) / 2);
else
index = pi_lcn->lcnphy_current_index;
return index;
}
void wlc_lcnphy_crsuprs(struct brcms_phy *pi, int channel)
{
u16 afectrlovr, afectrlovrval;
afectrlovr = read_phy_reg(pi, 0x43b);
afectrlovrval = read_phy_reg(pi, 0x43c);
if (channel != 0) {
mod_phy_reg(pi, 0x43b, (0x1 << 1), (1) << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), (0) << 1);
mod_phy_reg(pi, 0x43b, (0x1 << 4), (1) << 4);
mod_phy_reg(pi, 0x43c, (0x1 << 6), (0) << 6);
write_phy_reg(pi, 0x44b, 0xffff);
wlc_lcnphy_tx_pu(pi, 1);
mod_phy_reg(pi, 0x634, (0xff << 8), (0) << 8);
or_phy_reg(pi, 0x6da, 0x0080);
or_phy_reg(pi, 0x00a, 0x228);
} else {
and_phy_reg(pi, 0x00a, ~(0x228));
and_phy_reg(pi, 0x6da, 0xFF7F);
write_phy_reg(pi, 0x43b, afectrlovr);
write_phy_reg(pi, 0x43c, afectrlovrval);
}
}
static void wlc_lcnphy_toggle_afe_pwdn(struct brcms_phy *pi)
{
u16 save_AfeCtrlOvrVal, save_AfeCtrlOvr;
save_AfeCtrlOvrVal = read_phy_reg(pi, 0x43c);
save_AfeCtrlOvr = read_phy_reg(pi, 0x43b);
write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal | 0x1);
write_phy_reg(pi, 0x43b, save_AfeCtrlOvr | 0x1);
write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal & 0xfffe);
write_phy_reg(pi, 0x43b, save_AfeCtrlOvr & 0xfffe);
write_phy_reg(pi, 0x43c, save_AfeCtrlOvrVal);
write_phy_reg(pi, 0x43b, save_AfeCtrlOvr);
}
static void
wlc_lcnphy_txrx_spur_avoidance_mode(struct brcms_phy *pi, bool enable)
{
if (enable) {
write_phy_reg(pi, 0x942, 0x7);
write_phy_reg(pi, 0x93b, ((1 << 13) + 23));
write_phy_reg(pi, 0x93c, ((1 << 13) + 1989));
write_phy_reg(pi, 0x44a, 0x084);
write_phy_reg(pi, 0x44a, 0x080);
write_phy_reg(pi, 0x6d3, 0x2222);
write_phy_reg(pi, 0x6d3, 0x2220);
} else {
write_phy_reg(pi, 0x942, 0x0);
write_phy_reg(pi, 0x93b, ((0 << 13) + 23));
write_phy_reg(pi, 0x93c, ((0 << 13) + 1989));
}
wlapi_switch_macfreq(pi->sh->physhim, enable);
}
static void
wlc_lcnphy_set_chanspec_tweaks(struct brcms_phy *pi, u16 chanspec)
{
u8 channel = CHSPEC_CHANNEL(chanspec);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (channel == 14)
mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
else
mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
pi_lcn->lcnphy_bandedge_corr = 2;
if (channel == 1)
pi_lcn->lcnphy_bandedge_corr = 4;
if (channel == 1 || channel == 2 || channel == 3 ||
channel == 4 || channel == 9 ||
channel == 10 || channel == 11 || channel == 12) {
si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03000c04);
si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x0);
si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x200005c0);
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1b) << 8);
write_phy_reg(pi, 0x425, 0x5907);
} else {
si_pmu_pllcontrol(pi->sh->sih, 0x2, 0xffffffff, 0x03140c04);
si_pmu_pllcontrol(pi->sh->sih, 0x3, 0xffffff, 0x333333);
si_pmu_pllcontrol(pi->sh->sih, 0x4, 0xffffffff, 0x202c2820);
si_pmu_pllupd(pi->sh->sih);
write_phy_reg(pi, 0x942, 0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
pi_lcn->lcnphy_spurmod = false;
mod_phy_reg(pi, 0x424, (0xff << 8), (0x1f) << 8);
write_phy_reg(pi, 0x425, 0x590a);
}
or_phy_reg(pi, 0x44a, 0x44);
write_phy_reg(pi, 0x44a, 0x80);
}
static void
wlc_lcnphy_radio_2064_channel_tune_4313(struct brcms_phy *pi, u8 channel)
{
uint i;
const struct chan_info_2064_lcnphy *ci;
u8 rfpll_doubler = 0;
u8 pll_pwrup, pll_pwrup_ovr;
s32 qFxtal, qFref, qFvco, qFcal;
u8 d15, d16, f16, e44, e45;
u32 div_int, div_frac, fvco3, fpfd, fref3, fcal_div;
u16 loop_bw, d30, setCount;
u8 h29, h28_ten, e30, h30_ten, cp_current;
u16 g30, d28;
ci = &chan_info_2064_lcnphy[0];
rfpll_doubler = 1;
mod_radio_reg(pi, RADIO_2064_REG09D, 0x4, 0x1 << 2);
write_radio_reg(pi, RADIO_2064_REG09E, 0xf);
if (!rfpll_doubler) {
loop_bw = PLL_2064_LOOP_BW;
d30 = PLL_2064_D30;
} else {
loop_bw = PLL_2064_LOOP_BW_DOUBLER;
d30 = PLL_2064_D30_DOUBLER;
}
if (CHSPEC_IS2G(pi->radio_chanspec)) {
for (i = 0; i < ARRAY_SIZE(chan_info_2064_lcnphy); i++)
if (chan_info_2064_lcnphy[i].chan == channel)
break;
if (i >= ARRAY_SIZE(chan_info_2064_lcnphy))
return;
ci = &chan_info_2064_lcnphy[i];
}
write_radio_reg(pi, RADIO_2064_REG02A, ci->logen_buftune);
mod_radio_reg(pi, RADIO_2064_REG030, 0x3, ci->logen_rccr_tx);
mod_radio_reg(pi, RADIO_2064_REG091, 0x3, ci->txrf_mix_tune_ctrl);
mod_radio_reg(pi, RADIO_2064_REG038, 0xf, ci->pa_input_tune_g);
mod_radio_reg(pi, RADIO_2064_REG030, 0x3 << 2,
(ci->logen_rccr_rx) << 2);
mod_radio_reg(pi, RADIO_2064_REG05E, 0xf, ci->pa_rxrf_lna1_freq_tune);
mod_radio_reg(pi, RADIO_2064_REG05E, (0xf) << 4,
(ci->pa_rxrf_lna2_freq_tune) << 4);
write_radio_reg(pi, RADIO_2064_REG06C, ci->rxrf_rxrf_spare1);
pll_pwrup = (u8) read_radio_reg(pi, RADIO_2064_REG044);
pll_pwrup_ovr = (u8) read_radio_reg(pi, RADIO_2064_REG12B);
or_radio_reg(pi, RADIO_2064_REG044, 0x07);
or_radio_reg(pi, RADIO_2064_REG12B, (0x07) << 1);
e44 = 0;
e45 = 0;
fpfd = rfpll_doubler ? (pi->xtalfreq << 1) : (pi->xtalfreq);
if (pi->xtalfreq > 26000000)
e44 = 1;
if (pi->xtalfreq > 52000000)
e45 = 1;
if (e44 == 0)
fcal_div = 1;
else if (e45 == 0)
fcal_div = 2;
else
fcal_div = 4;
fvco3 = (ci->freq * 3);
fref3 = 2 * fpfd;
qFxtal = wlc_lcnphy_qdiv_roundup(pi->xtalfreq, PLL_2064_MHZ, 16);
qFref = wlc_lcnphy_qdiv_roundup(fpfd, PLL_2064_MHZ, 16);
qFcal = pi->xtalfreq * fcal_div / PLL_2064_MHZ;
qFvco = wlc_lcnphy_qdiv_roundup(fvco3, 2, 16);
write_radio_reg(pi, RADIO_2064_REG04F, 0x02);
d15 = (pi->xtalfreq * fcal_div * 4 / 5) / PLL_2064_MHZ - 1;
write_radio_reg(pi, RADIO_2064_REG052, (0x07 & (d15 >> 2)));
write_radio_reg(pi, RADIO_2064_REG053, (d15 & 0x3) << 5);
d16 = (qFcal * 8 / (d15 + 1)) - 1;
write_radio_reg(pi, RADIO_2064_REG051, d16);
f16 = ((d16 + 1) * (d15 + 1)) / qFcal;
setCount = f16 * 3 * (ci->freq) / 32 - 1;
mod_radio_reg(pi, RADIO_2064_REG053, (0x0f << 0),
(u8) (setCount >> 8));
or_radio_reg(pi, RADIO_2064_REG053, 0x10);
write_radio_reg(pi, RADIO_2064_REG054, (u8) (setCount & 0xff));
div_int = ((fvco3 * (PLL_2064_MHZ >> 4)) / fref3) << 4;
div_frac = ((fvco3 * (PLL_2064_MHZ >> 4)) % fref3) << 4;
while (div_frac >= fref3) {
div_int++;
div_frac -= fref3;
}
div_frac = wlc_lcnphy_qdiv_roundup(div_frac, fref3, 20);
mod_radio_reg(pi, RADIO_2064_REG045, (0x1f << 0),
(u8) (div_int >> 4));
mod_radio_reg(pi, RADIO_2064_REG046, (0x1f << 4),
(u8) (div_int << 4));
mod_radio_reg(pi, RADIO_2064_REG046, (0x0f << 0),
(u8) (div_frac >> 16));
write_radio_reg(pi, RADIO_2064_REG047, (u8) (div_frac >> 8) & 0xff);
write_radio_reg(pi, RADIO_2064_REG048, (u8) div_frac & 0xff);
write_radio_reg(pi, RADIO_2064_REG040, 0xfb);
write_radio_reg(pi, RADIO_2064_REG041, 0x9A);
write_radio_reg(pi, RADIO_2064_REG042, 0xA3);
write_radio_reg(pi, RADIO_2064_REG043, 0x0C);
h29 = LCN_BW_LMT / loop_bw;
d28 = (((PLL_2064_HIGH_END_KVCO - PLL_2064_LOW_END_KVCO) *
(fvco3 / 2 - PLL_2064_LOW_END_VCO)) /
(PLL_2064_HIGH_END_VCO - PLL_2064_LOW_END_VCO))
+ PLL_2064_LOW_END_KVCO;
h28_ten = (d28 * 10) / LCN_VCO_DIV;
e30 = (d30 - LCN_OFFSET) / LCN_FACT;
g30 = LCN_OFFSET + (e30 * LCN_FACT);
h30_ten = (g30 * 10) / LCN_CUR_DIV;
cp_current = ((LCN_CUR_LMT * h29 * LCN_MULT * 100) / h28_ten) / h30_ten;
mod_radio_reg(pi, RADIO_2064_REG03C, 0x3f, cp_current);
if (channel >= 1 && channel <= 5)
write_radio_reg(pi, RADIO_2064_REG03C, 0x8);
else
write_radio_reg(pi, RADIO_2064_REG03C, 0x7);
write_radio_reg(pi, RADIO_2064_REG03D, 0x3);
mod_radio_reg(pi, RADIO_2064_REG044, 0x0c, 0x0c);
udelay(1);
wlc_2064_vco_cal(pi);
write_radio_reg(pi, RADIO_2064_REG044, pll_pwrup);
write_radio_reg(pi, RADIO_2064_REG12B, pll_pwrup_ovr);
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
write_radio_reg(pi, RADIO_2064_REG038, 3);
write_radio_reg(pi, RADIO_2064_REG091, 7);
}
}
static int
wlc_lcnphy_load_tx_iir_filter(struct brcms_phy *pi, bool is_ofdm, s16 filt_type)
{
s16 filt_index = -1;
int j;
u16 addr[] = {
0x910,
0x91e,
0x91f,
0x924,
0x925,
0x926,
0x920,
0x921,
0x927,
0x928,
0x929,
0x922,
0x923,
0x930,
0x931,
0x932
};
u16 addr_ofdm[] = {
0x90f,
0x900,
0x901,
0x906,
0x907,
0x908,
0x902,
0x903,
0x909,
0x90a,
0x90b,
0x904,
0x905,
0x90c,
0x90d,
0x90e
};
if (!is_ofdm) {
for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_CCK; j++) {
if (filt_type == LCNPHY_txdigfiltcoeffs_cck[j][0]) {
filt_index = (s16) j;
break;
}
}
if (filt_index != -1) {
for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++)
write_phy_reg(pi, addr[j],
LCNPHY_txdigfiltcoeffs_cck
[filt_index][j + 1]);
}
} else {
for (j = 0; j < LCNPHY_NUM_TX_DIG_FILTERS_OFDM; j++) {
if (filt_type == LCNPHY_txdigfiltcoeffs_ofdm[j][0]) {
filt_index = (s16) j;
break;
}
}
if (filt_index != -1) {
for (j = 0; j < LCNPHY_NUM_DIG_FILT_COEFFS; j++)
write_phy_reg(pi, addr_ofdm[j],
LCNPHY_txdigfiltcoeffs_ofdm
[filt_index][j + 1]);
}
}
return (filt_index != -1) ? 0 : -1;
}
void wlc_phy_chanspec_set_lcnphy(struct brcms_phy *pi, u16 chanspec)
{
u8 channel = CHSPEC_CHANNEL(chanspec);
wlc_phy_chanspec_radio_set((struct brcms_phy_pub *) pi, chanspec);
wlc_lcnphy_set_chanspec_tweaks(pi, pi->radio_chanspec);
or_phy_reg(pi, 0x44a, 0x44);
write_phy_reg(pi, 0x44a, 0x80);
wlc_lcnphy_radio_2064_channel_tune_4313(pi, channel);
udelay(1000);
wlc_lcnphy_toggle_afe_pwdn(pi);
write_phy_reg(pi, 0x657, lcnphy_sfo_cfg[channel - 1].ptcentreTs20);
write_phy_reg(pi, 0x658, lcnphy_sfo_cfg[channel - 1].ptcentreFactor);
if (CHSPEC_CHANNEL(pi->radio_chanspec) == 14) {
mod_phy_reg(pi, 0x448, (0x3 << 8), (2) << 8);
wlc_lcnphy_load_tx_iir_filter(pi, false, 3);
} else {
mod_phy_reg(pi, 0x448, (0x3 << 8), (1) << 8);
wlc_lcnphy_load_tx_iir_filter(pi, false, 2);
}
wlc_lcnphy_load_tx_iir_filter(pi, true, 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 3), (1) << 3);
}
static u16 wlc_lcnphy_get_pa_gain(struct brcms_phy *pi)
{
u16 pa_gain;
pa_gain = (read_phy_reg(pi, 0x4fb) &
LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK) >>
LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT;
return pa_gain;
}
static void wlc_lcnphy_set_tx_gain(struct brcms_phy *pi,
struct lcnphy_txgains *target_gains)
{
u16 pa_gain = wlc_lcnphy_get_pa_gain(pi);
mod_phy_reg(
pi, 0x4b5,
(0xffff << 0),
((target_gains->gm_gain) |
(target_gains->pga_gain << 8)) <<
0);
mod_phy_reg(pi, 0x4fb,
(0x7fff << 0),
((target_gains->pad_gain) | (pa_gain << 8)) << 0);
mod_phy_reg(
pi, 0x4fc,
(0xffff << 0),
((target_gains->gm_gain) |
(target_gains->pga_gain << 8)) <<
0);
mod_phy_reg(pi, 0x4fd,
(0x7fff << 0),
((target_gains->pad_gain) | (pa_gain << 8)) << 0);
wlc_lcnphy_set_dac_gain(pi, target_gains->dac_gain);
wlc_lcnphy_enable_tx_gain_override(pi);
}
static void wlc_lcnphy_set_bbmult(struct brcms_phy *pi, u8 m0)
{
u16 m0m1 = (u16) m0 << 8;
struct phytbl_info tab;
tab.tbl_ptr = &m0m1;
tab.tbl_len = 1;
tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
tab.tbl_offset = 87;
tab.tbl_width = 16;
wlc_lcnphy_write_table(pi, &tab);
}
static void wlc_lcnphy_clear_tx_power_offsets(struct brcms_phy *pi)
{
u32 data_buf[64];
struct phytbl_info tab;
memset(data_buf, 0, sizeof(data_buf));
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = data_buf;
if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
tab.tbl_len = 30;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET;
wlc_lcnphy_write_table(pi, &tab);
}
tab.tbl_len = 64;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_MAC_OFFSET;
wlc_lcnphy_write_table(pi, &tab);
}
enum lcnphy_tssi_mode {
LCNPHY_TSSI_PRE_PA,
LCNPHY_TSSI_POST_PA,
LCNPHY_TSSI_EXT
};
static void
wlc_lcnphy_set_tssi_mux(struct brcms_phy *pi, enum lcnphy_tssi_mode pos)
{
mod_phy_reg(pi, 0x4d7, (0x1 << 0), (0x1) << 0);
mod_phy_reg(pi, 0x4d7, (0x1 << 6), (1) << 6);
if (LCNPHY_TSSI_POST_PA == pos) {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x4d9, (0x1 << 3), (1) << 3);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
}
} else {
mod_phy_reg(pi, 0x4d9, (0x1 << 2), (0x1) << 2);
mod_phy_reg(pi, 0x4d9, (0x1 << 3), (0) << 3);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 0x8);
}
}
mod_phy_reg(pi, 0x637, (0x3 << 14), (0) << 14);
if (LCNPHY_TSSI_EXT == pos) {
write_radio_reg(pi, RADIO_2064_REG07F, 1);
mod_radio_reg(pi, RADIO_2064_REG005, 0x7, 0x2);
mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 0x1 << 7);
mod_radio_reg(pi, RADIO_2064_REG028, 0x1f, 0x3);
}
}
static u16 wlc_lcnphy_rfseq_tbl_adc_pwrup(struct brcms_phy *pi)
{
u16 N1, N2, N3, N4, N5, N6, N;
N1 = ((read_phy_reg(pi, 0x4a5) & (0xff << 0))
>> 0);
N2 = 1 << ((read_phy_reg(pi, 0x4a5) & (0x7 << 12))
>> 12);
N3 = ((read_phy_reg(pi, 0x40d) & (0xff << 0))
>> 0);
N4 = 1 << ((read_phy_reg(pi, 0x40d) & (0x7 << 8))
>> 8);
N5 = ((read_phy_reg(pi, 0x4a2) & (0xff << 0))
>> 0);
N6 = 1 << ((read_phy_reg(pi, 0x4a2) & (0x7 << 8))
>> 8);
N = 2 * (N1 + N2 + N3 + N4 + 2 * (N5 + N6)) + 80;
if (N < 1600)
N = 1600;
return N;
}
static void wlc_lcnphy_pwrctrl_rssiparams(struct brcms_phy *pi)
{
u16 auxpga_vmid, auxpga_vmid_temp, auxpga_gain_temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
auxpga_vmid = (2 << 8) |
(pi_lcn->lcnphy_rssi_vc << 4) | pi_lcn->lcnphy_rssi_vf;
auxpga_vmid_temp = (2 << 8) | (8 << 4) | 4;
auxpga_gain_temp = 2;
mod_phy_reg(pi, 0x4d8, (0x1 << 0), (0) << 0);
mod_phy_reg(pi, 0x4d8, (0x1 << 1), (0) << 1);
mod_phy_reg(pi, 0x4d7, (0x1 << 3), (0) << 3);
mod_phy_reg(pi, 0x4db,
(0x3ff << 0) |
(0x7 << 12),
(auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12));
mod_phy_reg(pi, 0x4dc,
(0x3ff << 0) |
(0x7 << 12),
(auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12));
mod_phy_reg(pi, 0x40a,
(0x3ff << 0) |
(0x7 << 12),
(auxpga_vmid << 0) | (pi_lcn->lcnphy_rssi_gs << 12));
mod_phy_reg(pi, 0x40b,
(0x3ff << 0) |
(0x7 << 12),
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_phy_reg(pi, 0x40c,
(0x3ff << 0) |
(0x7 << 12),
(auxpga_vmid_temp << 0) | (auxpga_gain_temp << 12));
mod_radio_reg(pi, RADIO_2064_REG082, (1 << 5), (1 << 5));
}
static void wlc_lcnphy_tssi_setup(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rfseq, ind;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &ind;
tab.tbl_len = 1;
tab.tbl_offset = 0;
for (ind = 0; ind < 128; ind++) {
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
tab.tbl_offset = 704;
for (ind = 0; ind < 128; ind++) {
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
mod_phy_reg(pi, 0x503, (0x1 << 0), (0) << 0);
mod_phy_reg(pi, 0x503, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x503, (0x1 << 4), (1) << 4);
wlc_lcnphy_set_tssi_mux(pi, LCNPHY_TSSI_EXT);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (1) << 15);
mod_phy_reg(pi, 0x4d0, (0x1 << 5), (0) << 5);
mod_phy_reg(pi, 0x4a4, (0x1ff << 0), (0) << 0);
mod_phy_reg(pi, 0x4a5, (0xff << 0), (255) << 0);
mod_phy_reg(pi, 0x4a5, (0x7 << 12), (5) << 12);
mod_phy_reg(pi, 0x4a5, (0x7 << 8), (0) << 8);
mod_phy_reg(pi, 0x40d, (0xff << 0), (64) << 0);
mod_phy_reg(pi, 0x40d, (0x7 << 8), (4) << 8);
mod_phy_reg(pi, 0x4a2, (0xff << 0), (64) << 0);
mod_phy_reg(pi, 0x4a2, (0x7 << 8), (4) << 8);
mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (0) << 6);
mod_phy_reg(pi, 0x4a8, (0xff << 0), (0x1) << 0);
wlc_lcnphy_clear_tx_power_offsets(pi);
mod_phy_reg(pi, 0x4a6, (0x1 << 15), (1) << 15);
mod_phy_reg(pi, 0x4a6, (0x1ff << 0), (0xff) << 0);
mod_phy_reg(pi, 0x49a, (0x1ff << 0), (0xff) << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG028, 0xf, 0xe);
mod_radio_reg(pi, RADIO_2064_REG086, 0x4, 0x4);
} else {
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x8, 1 << 3);
}
write_radio_reg(pi, RADIO_2064_REG025, 0xc);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
mod_radio_reg(pi, RADIO_2064_REG03A, 0x1, 1);
} else {
if (CHSPEC_IS2G(pi->radio_chanspec))
mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 1 << 1);
else
mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 0 << 1);
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2))
mod_radio_reg(pi, RADIO_2064_REG03A, 0x2, 1 << 1);
else
mod_radio_reg(pi, RADIO_2064_REG03A, 0x4, 1 << 2);
mod_radio_reg(pi, RADIO_2064_REG11A, 0x1, 1 << 0);
mod_radio_reg(pi, RADIO_2064_REG005, 0x8, 1 << 3);
if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
mod_phy_reg(pi, 0x4d7,
(0x1 << 3) | (0x7 << 12), 0 << 3 | 2 << 12);
rfseq = wlc_lcnphy_rfseq_tbl_adc_pwrup(pi);
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &rfseq;
tab.tbl_len = 1;
tab.tbl_offset = 6;
wlc_lcnphy_write_table(pi, &tab);
mod_phy_reg(pi, 0x938, (0x1 << 2), (1) << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), (1) << 2);
mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12);
mod_phy_reg(pi, 0x4d7, (0x1 << 2), (1) << 2);
mod_phy_reg(pi, 0x4d7, (0xf << 8), (0) << 8);
wlc_lcnphy_pwrctrl_rssiparams(pi);
}
void wlc_lcnphy_tx_pwr_update_npt(struct brcms_phy *pi)
{
u16 tx_cnt, tx_total, npt;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
tx_total = wlc_lcnphy_total_tx_frames(pi);
tx_cnt = tx_total - pi_lcn->lcnphy_tssi_tx_cnt;
npt = wlc_lcnphy_get_tx_pwr_npt(pi);
if (tx_cnt > (1 << npt)) {
pi_lcn->lcnphy_tssi_tx_cnt = tx_total;
pi_lcn->lcnphy_tssi_idx = wlc_lcnphy_get_current_tx_pwr_idx(pi);
pi_lcn->lcnphy_tssi_npt = npt;
}
}
s32 wlc_lcnphy_tssi2dbm(s32 tssi, s32 a1, s32 b0, s32 b1)
{
s32 a, b, p;
a = 32768 + (a1 * tssi);
b = (1024 * b0) + (64 * b1 * tssi);
p = ((2 * b) + a) / (2 * a);
return p;
}
static void wlc_lcnphy_txpower_reset_npt(struct brcms_phy *pi)
{
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return;
pi_lcn->lcnphy_tssi_idx = LCNPHY_TX_PWR_CTRL_START_INDEX_2G_4313;
pi_lcn->lcnphy_tssi_npt = LCNPHY_TX_PWR_CTRL_START_NPT;
}
void wlc_lcnphy_txpower_recalc_target(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 rate_table[BRCMS_NUM_RATES_CCK + BRCMS_NUM_RATES_OFDM +
BRCMS_NUM_RATES_MCS_1_STREAM];
uint i, j;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return;
for (i = 0, j = 0; i < ARRAY_SIZE(rate_table); i++, j++) {
if (i == BRCMS_NUM_RATES_CCK + BRCMS_NUM_RATES_OFDM)
j = TXP_FIRST_MCS_20_SISO;
rate_table[i] = (u32) ((s32) (-pi->tx_power_offset[j]));
}
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = ARRAY_SIZE(rate_table);
tab.tbl_ptr = rate_table;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET;
wlc_lcnphy_write_table(pi, &tab);
if (wlc_lcnphy_get_target_tx_pwr(pi) != pi->tx_power_min) {
wlc_lcnphy_set_target_tx_pwr(pi, pi->tx_power_min);
wlc_lcnphy_txpower_reset_npt(pi);
}
}
static void wlc_lcnphy_set_tx_pwr_soft_ctrl(struct brcms_phy *pi, s8 index)
{
u32 cck_offset[4] = { 22, 22, 22, 22 };
u32 ofdm_offset, reg_offset_cck;
int i;
u16 index2;
struct phytbl_info tab;
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
return;
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x1) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x0) << 14);
or_phy_reg(pi, 0x6da, 0x0040);
reg_offset_cck = 0;
for (i = 0; i < 4; i++)
cck_offset[i] -= reg_offset_cck;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 4;
tab.tbl_ptr = cck_offset;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET;
wlc_lcnphy_write_table(pi, &tab);
ofdm_offset = 0;
tab.tbl_len = 1;
tab.tbl_ptr = &ofdm_offset;
for (i = 836; i < 862; i++) {
tab.tbl_offset = i;
wlc_lcnphy_write_table(pi, &tab);
}
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (0x1) << 15);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0x1) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 13), (0x1) << 13);
mod_phy_reg(pi, 0x4b0, (0x1 << 7), (0) << 7);
mod_phy_reg(pi, 0x43b, (0x1 << 6), (0) << 6);
mod_phy_reg(pi, 0x4a9, (0x1 << 15), (1) << 15);
index2 = (u16) (index * 2);
mod_phy_reg(pi, 0x4a9, (0x1ff << 0), (index2) << 0);
mod_phy_reg(pi, 0x6a3, (0x1 << 4), (0) << 4);
}
static s8 wlc_lcnphy_tempcompensated_txpwrctrl(struct brcms_phy *pi)
{
s8 index, delta_brd, delta_temp, new_index, tempcorrx;
s16 manp, meas_temp, temp_diff;
bool neg = false;
u16 temp;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi))
return pi_lcn->lcnphy_current_index;
index = FIXED_TXPWR;
if (pi_lcn->lcnphy_tempsense_slope == 0)
return index;
temp = (u16) wlc_lcnphy_tempsense(pi, 0);
meas_temp = LCNPHY_TEMPSENSE(temp);
if (pi->tx_power_min != 0)
delta_brd = (pi_lcn->lcnphy_measPower - pi->tx_power_min);
else
delta_brd = 0;
manp = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_rawtempsense);
temp_diff = manp - meas_temp;
if (temp_diff < 0) {
neg = true;
temp_diff = -temp_diff;
}
delta_temp = (s8) wlc_lcnphy_qdiv_roundup((u32) (temp_diff * 192),
(u32) (pi_lcn->
lcnphy_tempsense_slope
* 10), 0);
if (neg)
delta_temp = -delta_temp;
if (pi_lcn->lcnphy_tempsense_option == 3
&& LCNREV_IS(pi->pubpi.phy_rev, 0))
delta_temp = 0;
if (pi_lcn->lcnphy_tempcorrx > 31)
tempcorrx = (s8) (pi_lcn->lcnphy_tempcorrx - 64);
else
tempcorrx = (s8) pi_lcn->lcnphy_tempcorrx;
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
tempcorrx = 4;
new_index =
index + delta_brd + delta_temp - pi_lcn->lcnphy_bandedge_corr;
new_index += tempcorrx;
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
index = 127;
if (new_index < 0 || new_index > 126)
return index;
return new_index;
}
static u16 wlc_lcnphy_set_tx_pwr_ctrl_mode(struct brcms_phy *pi, u16 mode)
{
u16 current_mode = mode;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) &&
mode == LCNPHY_TX_PWR_CTRL_HW)
current_mode = LCNPHY_TX_PWR_CTRL_TEMPBASED;
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) &&
mode == LCNPHY_TX_PWR_CTRL_TEMPBASED)
current_mode = LCNPHY_TX_PWR_CTRL_HW;
return current_mode;
}
void wlc_lcnphy_set_tx_pwr_ctrl(struct brcms_phy *pi, u16 mode)
{
u16 old_mode = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, mode);
old_mode = wlc_lcnphy_set_tx_pwr_ctrl_mode(pi, old_mode);
mod_phy_reg(pi, 0x6da, (0x1 << 6),
((LCNPHY_TX_PWR_CTRL_HW == mode) ? 1 : 0) << 6);
mod_phy_reg(pi, 0x6a3, (0x1 << 4),
((LCNPHY_TX_PWR_CTRL_HW == mode) ? 0 : 1) << 4);
if (old_mode != mode) {
if (LCNPHY_TX_PWR_CTRL_HW == old_mode) {
wlc_lcnphy_tx_pwr_update_npt(pi);
wlc_lcnphy_clear_tx_power_offsets(pi);
}
if (LCNPHY_TX_PWR_CTRL_HW == mode) {
wlc_lcnphy_txpower_recalc_target(pi);
wlc_lcnphy_set_start_tx_pwr_idx(pi,
pi_lcn->
lcnphy_tssi_idx);
wlc_lcnphy_set_tx_pwr_npt(pi, pi_lcn->lcnphy_tssi_npt);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 0);
pi_lcn->lcnphy_tssi_tx_cnt =
wlc_lcnphy_total_tx_frames(pi);
wlc_lcnphy_disable_tx_gain_override(pi);
pi_lcn->lcnphy_tx_power_idx_override = -1;
} else
wlc_lcnphy_enable_tx_gain_override(pi);
mod_phy_reg(pi, 0x4a4,
((0x1 << 15) | (0x1 << 14) | (0x1 << 13)), mode);
if (mode == LCNPHY_TX_PWR_CTRL_TEMPBASED) {
index = wlc_lcnphy_tempcompensated_txpwrctrl(pi);
wlc_lcnphy_set_tx_pwr_soft_ctrl(pi, index);
pi_lcn->lcnphy_current_index = (s8)
((read_phy_reg(pi,
0x4a9) &
0xFF) / 2);
}
}
}
static void
wlc_lcnphy_tx_iqlo_loopback(struct brcms_phy *pi, u16 *values_to_save)
{
u16 vmid;
int i;
for (i = 0; i < 20; i++)
values_to_save[i] =
read_radio_reg(pi, iqlo_loopback_rf_regs[i]);
mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12);
mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14);
mod_phy_reg(pi, 0x44c, (0x1 << 11), 1 << 11);
mod_phy_reg(pi, 0x44d, (0x1 << 13), 0 << 13);
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
mod_phy_reg(pi, 0x43b, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x43c, (0x1 << 0), 0 << 0);
if (LCNREV_IS(pi->pubpi.phy_rev, 2))
and_radio_reg(pi, RADIO_2064_REG03A, 0xFD);
else
and_radio_reg(pi, RADIO_2064_REG03A, 0xF9);
or_radio_reg(pi, RADIO_2064_REG11A, 0x1);
or_radio_reg(pi, RADIO_2064_REG036, 0x01);
or_radio_reg(pi, RADIO_2064_REG11A, 0x18);
udelay(20);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
if (CHSPEC_IS5G(pi->radio_chanspec))
mod_radio_reg(pi, RADIO_2064_REG03A, 1, 0);
else
or_radio_reg(pi, RADIO_2064_REG03A, 1);
} else {
if (CHSPEC_IS5G(pi->radio_chanspec))
mod_radio_reg(pi, RADIO_2064_REG03A, 3, 1);
else
or_radio_reg(pi, RADIO_2064_REG03A, 0x3);
}
udelay(20);
write_radio_reg(pi, RADIO_2064_REG025, 0xF);
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
if (CHSPEC_IS5G(pi->radio_chanspec))
mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x4);
else
mod_radio_reg(pi, RADIO_2064_REG028, 0xF, 0x6);
} else {
if (CHSPEC_IS5G(pi->radio_chanspec))
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x4 << 1);
else
mod_radio_reg(pi, RADIO_2064_REG028, 0x1e, 0x6 << 1);
}
udelay(20);
write_radio_reg(pi, RADIO_2064_REG005, 0x8);
or_radio_reg(pi, RADIO_2064_REG112, 0x80);
udelay(20);
or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
udelay(20);
or_radio_reg(pi, RADIO_2064_REG00B, 0x7);
or_radio_reg(pi, RADIO_2064_REG113, 0x10);
udelay(20);
write_radio_reg(pi, RADIO_2064_REG007, 0x1);
udelay(20);
vmid = 0x2A6;
mod_radio_reg(pi, RADIO_2064_REG0FC, 0x3 << 0, (vmid >> 8) & 0x3);
write_radio_reg(pi, RADIO_2064_REG0FD, (vmid & 0xff));
or_radio_reg(pi, RADIO_2064_REG11F, 0x44);
udelay(20);
or_radio_reg(pi, RADIO_2064_REG0FF, 0x10);
udelay(20);
write_radio_reg(pi, RADIO_2064_REG012, 0x02);
or_radio_reg(pi, RADIO_2064_REG112, 0x06);
write_radio_reg(pi, RADIO_2064_REG036, 0x11);
write_radio_reg(pi, RADIO_2064_REG059, 0xcc);
write_radio_reg(pi, RADIO_2064_REG05C, 0x2e);
write_radio_reg(pi, RADIO_2064_REG078, 0xd7);
write_radio_reg(pi, RADIO_2064_REG092, 0x15);
}
static bool wlc_lcnphy_iqcal_wait(struct brcms_phy *pi)
{
uint delay_count = 0;
while (wlc_lcnphy_iqcal_active(pi)) {
udelay(100);
delay_count++;
if (delay_count > (10 * 500))
break;
}
return (0 == wlc_lcnphy_iqcal_active(pi));
}
static void
wlc_lcnphy_tx_iqlo_loopback_cleanup(struct brcms_phy *pi, u16 *values_to_save)
{
int i;
and_phy_reg(pi, 0x44c, 0x0 >> 11);
and_phy_reg(pi, 0x43b, 0xC);
for (i = 0; i < 20; i++)
write_radio_reg(pi, iqlo_loopback_rf_regs[i],
values_to_save[i]);
}
static void
wlc_lcnphy_tx_iqlo_cal(struct brcms_phy *pi,
struct lcnphy_txgains *target_gains,
enum lcnphy_cal_mode cal_mode, bool keep_tone)
{
struct lcnphy_txgains cal_gains, temp_gains;
u16 hash;
u8 band_idx;
int j;
u16 ncorr_override[5];
u16 syst_coeffs[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000,
0x0000, 0x0000, 0x0000, 0x0000, 0x0000};
u16 commands_fullcal[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234
};
u16 commands_recal[] = {
0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234
};
u16 command_nums_fullcal[] = {
0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97
};
u16 command_nums_recal[] = {
0x7a97, 0x7a97, 0x7a97, 0x7a87, 0x7a87, 0x7b97
};
u16 *command_nums = command_nums_fullcal;
u16 *start_coeffs = NULL, *cal_cmds = NULL, cal_type, diq_start;
u16 tx_pwr_ctrl_old, save_txpwrctrlrfctrl2;
u16 save_sslpnCalibClkEnCtrl, save_sslpnRxFeClkEnCtrl;
bool tx_gain_override_old;
struct lcnphy_txgains old_gains;
uint i, n_cal_cmds = 0, n_cal_start = 0;
u16 *values_to_save;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
values_to_save = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
if (NULL == values_to_save)
return;
save_sslpnRxFeClkEnCtrl = read_phy_reg(pi, 0x6db);
save_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
or_phy_reg(pi, 0x6da, 0x40);
or_phy_reg(pi, 0x6db, 0x3);
switch (cal_mode) {
case LCNPHY_CAL_FULL:
start_coeffs = syst_coeffs;
cal_cmds = commands_fullcal;
n_cal_cmds = ARRAY_SIZE(commands_fullcal);
break;
case LCNPHY_CAL_RECAL:
start_coeffs = syst_coeffs;
cal_cmds = commands_recal;
n_cal_cmds = ARRAY_SIZE(commands_recal);
command_nums = command_nums_recal;
break;
default:
break;
}
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
start_coeffs, 11, 16, 64);
write_phy_reg(pi, 0x6da, 0xffff);
mod_phy_reg(pi, 0x503, (0x1 << 3), (1) << 3);
tx_pwr_ctrl_old = wlc_lcnphy_get_tx_pwr_ctrl(pi);
mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
save_txpwrctrlrfctrl2 = read_phy_reg(pi, 0x4db);
mod_phy_reg(pi, 0x4db, (0x3ff << 0), (0x2a6) << 0);
mod_phy_reg(pi, 0x4db, (0x7 << 12), (2) << 12);
wlc_lcnphy_tx_iqlo_loopback(pi, values_to_save);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
if (tx_gain_override_old)
wlc_lcnphy_get_tx_gain(pi, &old_gains);
if (!target_gains) {
if (!tx_gain_override_old)
wlc_lcnphy_set_tx_pwr_by_index(pi,
pi_lcn->lcnphy_tssi_idx);
wlc_lcnphy_get_tx_gain(pi, &temp_gains);
target_gains = &temp_gains;
}
hash = (target_gains->gm_gain << 8) |
(target_gains->pga_gain << 4) | (target_gains->pad_gain);
band_idx = (CHSPEC_IS5G(pi->radio_chanspec) ? 1 : 0);
cal_gains = *target_gains;
memset(ncorr_override, 0, sizeof(ncorr_override));
for (j = 0; j < iqcal_gainparams_numgains_lcnphy[band_idx]; j++) {
if (hash == tbl_iqcal_gainparams_lcnphy[band_idx][j][0]) {
cal_gains.gm_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][1];
cal_gains.pga_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][2];
cal_gains.pad_gain =
tbl_iqcal_gainparams_lcnphy[band_idx][j][3];
memcpy(ncorr_override,
&tbl_iqcal_gainparams_lcnphy[band_idx][j][3],
sizeof(ncorr_override));
break;
}
}
wlc_lcnphy_set_tx_gain(pi, &cal_gains);
write_phy_reg(pi, 0x453, 0xaa9);
write_phy_reg(pi, 0x93d, 0xc0);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
lcnphy_iqcal_loft_gainladder,
ARRAY_SIZE(lcnphy_iqcal_loft_gainladder),
16, 0);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
lcnphy_iqcal_ir_gainladder,
ARRAY_SIZE(
lcnphy_iqcal_ir_gainladder), 16,
32);
if (pi->phy_tx_tone_freq) {
wlc_lcnphy_stop_tx_tone(pi);
udelay(5);
wlc_lcnphy_start_tx_tone(pi, 3750, 88, 1);
} else {
wlc_lcnphy_start_tx_tone(pi, 3750, 88, 1);
}
write_phy_reg(pi, 0x6da, 0xffff);
for (i = n_cal_start; i < n_cal_cmds; i++) {
u16 zero_diq = 0;
u16 best_coeffs[11];
u16 command_num;
cal_type = (cal_cmds[i] & 0x0f00) >> 8;
command_num = command_nums[i];
if (ncorr_override[cal_type])
command_num =
ncorr_override[cal_type] << 8 | (command_num &
0xff);
write_phy_reg(pi, 0x452, command_num);
if ((cal_type == 3) || (cal_type == 4)) {
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&diq_start, 1, 16, 69);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&zero_diq, 1, 16, 69);
}
write_phy_reg(pi, 0x451, cal_cmds[i]);
if (!wlc_lcnphy_iqcal_wait(pi))
goto cleanup;
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
best_coeffs,
ARRAY_SIZE(best_coeffs), 16, 96);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
best_coeffs,
ARRAY_SIZE(best_coeffs), 16, 64);
if ((cal_type == 3) || (cal_type == 4))
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&diq_start, 1, 16, 69);
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs,
ARRAY_SIZE(pi_lcn->
lcnphy_cal_results.
txiqlocal_bestcoeffs),
16, 96);
}
wlc_lcnphy_common_read_table(pi, LCNPHY_TBL_ID_IQLOCAL,
pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs,
ARRAY_SIZE(pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs), 16, 96);
pi_lcn->lcnphy_cal_results.txiqlocal_bestcoeffs_valid = true;
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs[0], 4, 16, 80);
wlc_lcnphy_common_write_table(pi, LCNPHY_TBL_ID_IQLOCAL,
&pi_lcn->lcnphy_cal_results.
txiqlocal_bestcoeffs[5], 2, 16, 85);
cleanup:
wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, values_to_save);
kfree(values_to_save);
if (!keep_tone)
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x4db, save_txpwrctrlrfctrl2);
write_phy_reg(pi, 0x453, 0);
if (tx_gain_override_old)
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, tx_pwr_ctrl_old);
write_phy_reg(pi, 0x6da, save_sslpnCalibClkEnCtrl);
write_phy_reg(pi, 0x6db, save_sslpnRxFeClkEnCtrl);
}
static void wlc_lcnphy_idle_tssi_est(struct brcms_phy_pub *ppi)
{
bool suspend, tx_gain_override_old;
struct lcnphy_txgains old_gains;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
u16 idleTssi, idleTssi0_2C, idleTssi0_OB, idleTssi0_regvalue_OB,
idleTssi0_regvalue_2C;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
u16 SAVE_lpfgain = read_radio_reg(pi, RADIO_2064_REG112);
u16 SAVE_jtag_bb_afe_switch =
read_radio_reg(pi, RADIO_2064_REG007) & 1;
u16 SAVE_jtag_auxpga = read_radio_reg(pi, RADIO_2064_REG0FF) & 0x10;
u16 SAVE_iqadc_aux_en = read_radio_reg(pi, RADIO_2064_REG11F) & 4;
idleTssi = read_phy_reg(pi, 0x4ab);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
tx_gain_override_old = wlc_lcnphy_tx_gain_override_enabled(pi);
wlc_lcnphy_get_tx_gain(pi, &old_gains);
wlc_lcnphy_enable_tx_gain_override(pi);
wlc_lcnphy_set_tx_pwr_by_index(pi, 127);
write_radio_reg(pi, RADIO_2064_REG112, 0x6);
mod_radio_reg(pi, RADIO_2064_REG007, 0x1, 1);
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 1 << 2);
wlc_lcnphy_tssi_setup(pi);
wlc_phy_do_dummy_tx(pi, true, OFF);
idleTssi = ((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0);
idleTssi0_2C = ((read_phy_reg(pi, 0x63e) & (0x1ff << 0))
>> 0);
if (idleTssi0_2C >= 256)
idleTssi0_OB = idleTssi0_2C - 256;
else
idleTssi0_OB = idleTssi0_2C + 256;
idleTssi0_regvalue_OB = idleTssi0_OB;
if (idleTssi0_regvalue_OB >= 256)
idleTssi0_regvalue_2C = idleTssi0_regvalue_OB - 256;
else
idleTssi0_regvalue_2C = idleTssi0_regvalue_OB + 256;
mod_phy_reg(pi, 0x4a6, (0x1ff << 0), (idleTssi0_regvalue_2C) << 0);
mod_phy_reg(pi, 0x44c, (0x1 << 12), (0) << 12);
wlc_lcnphy_set_tx_gain_override(pi, tx_gain_override_old);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
write_radio_reg(pi, RADIO_2064_REG112, SAVE_lpfgain);
mod_radio_reg(pi, RADIO_2064_REG007, 0x1, SAVE_jtag_bb_afe_switch);
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, SAVE_jtag_auxpga);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, SAVE_iqadc_aux_en);
mod_radio_reg(pi, RADIO_2064_REG112, 0x80, 1 << 7);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
static void wlc_lcnphy_vbat_temp_sense_setup(struct brcms_phy *pi, u8 mode)
{
bool suspend;
u16 save_txpwrCtrlEn;
u8 auxpga_vmidcourse, auxpga_vmidfine, auxpga_gain;
u16 auxpga_vmid;
struct phytbl_info tab;
u32 val;
u8 save_reg007, save_reg0FF, save_reg11F, save_reg005, save_reg025,
save_reg112;
u16 values_to_save[14];
s8 index;
int i;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
udelay(999);
save_reg007 = (u8) read_radio_reg(pi, RADIO_2064_REG007);
save_reg0FF = (u8) read_radio_reg(pi, RADIO_2064_REG0FF);
save_reg11F = (u8) read_radio_reg(pi, RADIO_2064_REG11F);
save_reg005 = (u8) read_radio_reg(pi, RADIO_2064_REG005);
save_reg025 = (u8) read_radio_reg(pi, RADIO_2064_REG025);
save_reg112 = (u8) read_radio_reg(pi, RADIO_2064_REG112);
for (i = 0; i < 14; i++)
values_to_save[i] = read_phy_reg(pi, tempsense_phy_regs[i]);
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
save_txpwrCtrlEn = read_radio_reg(pi, 0x4a4);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
index = pi_lcn->lcnphy_current_index;
wlc_lcnphy_set_tx_pwr_by_index(pi, 127);
mod_radio_reg(pi, RADIO_2064_REG007, 0x1, 0x1);
mod_radio_reg(pi, RADIO_2064_REG0FF, 0x10, 0x1 << 4);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x4, 0x1 << 2);
mod_phy_reg(pi, 0x503, (0x1 << 0), (0) << 0);
mod_phy_reg(pi, 0x503, (0x1 << 2), (0) << 2);
mod_phy_reg(pi, 0x4a4, (0x1 << 14), (0) << 14);
mod_phy_reg(pi, 0x4a4, (0x1 << 15), (0) << 15);
mod_phy_reg(pi, 0x4d0, (0x1 << 5), (0) << 5);
mod_phy_reg(pi, 0x4a5, (0xff << 0), (255) << 0);
mod_phy_reg(pi, 0x4a5, (0x7 << 12), (5) << 12);
mod_phy_reg(pi, 0x4a5, (0x7 << 8), (0) << 8);
mod_phy_reg(pi, 0x40d, (0xff << 0), (64) << 0);
mod_phy_reg(pi, 0x40d, (0x7 << 8), (6) << 8);
mod_phy_reg(pi, 0x4a2, (0xff << 0), (64) << 0);
mod_phy_reg(pi, 0x4a2, (0x7 << 8), (6) << 8);
mod_phy_reg(pi, 0x4d9, (0x7 << 4), (2) << 4);
mod_phy_reg(pi, 0x4d9, (0x7 << 8), (3) << 8);
mod_phy_reg(pi, 0x4d9, (0x7 << 12), (1) << 12);
mod_phy_reg(pi, 0x4da, (0x1 << 12), (0) << 12);
mod_phy_reg(pi, 0x4da, (0x1 << 13), (1) << 13);
mod_phy_reg(pi, 0x4a6, (0x1 << 15), (1) << 15);
write_radio_reg(pi, RADIO_2064_REG025, 0xC);
mod_radio_reg(pi, RADIO_2064_REG005, 0x8, 0x1 << 3);
mod_phy_reg(pi, 0x938, (0x1 << 2), (1) << 2);
mod_phy_reg(pi, 0x939, (0x1 << 2), (1) << 2);
mod_phy_reg(pi, 0x4a4, (0x1 << 12), (1) << 12);
val = wlc_lcnphy_rfseq_tbl_adc_pwrup(pi);
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
tab.tbl_offset = 6;
wlc_lcnphy_write_table(pi, &tab);
if (mode == TEMPSENSE) {
mod_phy_reg(pi, 0x4d7, (0x1 << 3), (1) << 3);
mod_phy_reg(pi, 0x4d7, (0x7 << 12), (1) << 12);
auxpga_vmidcourse = 8;
auxpga_vmidfine = 0x4;
auxpga_gain = 2;
mod_radio_reg(pi, RADIO_2064_REG082, 0x20, 1 << 5);
} else {
mod_phy_reg(pi, 0x4d7, (0x1 << 3), (1) << 3);
mod_phy_reg(pi, 0x4d7, (0x7 << 12), (3) << 12);
auxpga_vmidcourse = 7;
auxpga_vmidfine = 0xa;
auxpga_gain = 2;
}
auxpga_vmid =
(u16) ((2 << 8) | (auxpga_vmidcourse << 4) | auxpga_vmidfine);
mod_phy_reg(pi, 0x4d8, (0x1 << 0), (1) << 0);
mod_phy_reg(pi, 0x4d8, (0x3ff << 2), (auxpga_vmid) << 2);
mod_phy_reg(pi, 0x4d8, (0x1 << 1), (1) << 1);
mod_phy_reg(pi, 0x4d8, (0x7 << 12), (auxpga_gain) << 12);
mod_phy_reg(pi, 0x4d0, (0x1 << 5), (1) << 5);
write_radio_reg(pi, RADIO_2064_REG112, 0x6);
wlc_phy_do_dummy_tx(pi, true, OFF);
if (!tempsense_done(pi))
udelay(10);
write_radio_reg(pi, RADIO_2064_REG007, (u16) save_reg007);
write_radio_reg(pi, RADIO_2064_REG0FF, (u16) save_reg0FF);
write_radio_reg(pi, RADIO_2064_REG11F, (u16) save_reg11F);
write_radio_reg(pi, RADIO_2064_REG005, (u16) save_reg005);
write_radio_reg(pi, RADIO_2064_REG025, (u16) save_reg025);
write_radio_reg(pi, RADIO_2064_REG112, (u16) save_reg112);
for (i = 0; i < 14; i++)
write_phy_reg(pi, tempsense_phy_regs[i], values_to_save[i]);
wlc_lcnphy_set_tx_pwr_by_index(pi, (int)index);
write_radio_reg(pi, 0x4a4, save_txpwrCtrlEn);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
udelay(999);
}
static void wlc_lcnphy_tx_pwr_ctrl_init(struct brcms_phy_pub *ppi)
{
struct lcnphy_txgains tx_gains;
u8 bbmult;
struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
bool suspend;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
if (!pi->hwpwrctrl_capable) {
if (CHSPEC_IS2G(pi->radio_chanspec)) {
tx_gains.gm_gain = 4;
tx_gains.pga_gain = 12;
tx_gains.pad_gain = 12;
tx_gains.dac_gain = 0;
bbmult = 150;
} else {
tx_gains.gm_gain = 7;
tx_gains.pga_gain = 15;
tx_gains.pad_gain = 14;
tx_gains.dac_gain = 0;
bbmult = 150;
}
wlc_lcnphy_set_tx_gain(pi, &tx_gains);
wlc_lcnphy_set_bbmult(pi, bbmult);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
} else {
wlc_lcnphy_idle_tssi_est(ppi);
wlc_lcnphy_clear_tx_power_offsets(pi);
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &pwr;
tab.tbl_len = 1;
tab.tbl_offset = 0;
for (tssi = 0; tssi < 128; tssi++) {
pwr = wlc_lcnphy_tssi2dbm(tssi, a1, b0, b1);
pwr = (pwr < mintargetpwr) ? mintargetpwr : pwr;
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
mod_phy_reg(pi, 0x410, (0x1 << 7), (0) << 7);
write_phy_reg(pi, 0x4a8, 10);
wlc_lcnphy_set_target_tx_pwr(pi, LCN_TARGET_PWR);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW);
}
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
static u8 wlc_lcnphy_get_bbmult(struct brcms_phy *pi)
{
u16 m0m1;
struct phytbl_info tab;
tab.tbl_ptr = &m0m1;
tab.tbl_len = 1;
tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
tab.tbl_offset = 87;
tab.tbl_width = 16;
wlc_lcnphy_read_table(pi, &tab);
return (u8) ((m0m1 & 0xff00) >> 8);
}
static void wlc_lcnphy_set_pa_gain(struct brcms_phy *pi, u16 gain)
{
mod_phy_reg(pi, 0x4fb,
LCNPHY_txgainctrlovrval1_pagain_ovr_val1_MASK,
gain << LCNPHY_txgainctrlovrval1_pagain_ovr_val1_SHIFT);
mod_phy_reg(pi, 0x4fd,
LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_MASK,
gain << LCNPHY_stxtxgainctrlovrval1_pagain_ovr_val1_SHIFT);
}
void
wlc_lcnphy_get_radio_loft(struct brcms_phy *pi,
u8 *ei0, u8 *eq0, u8 *fi0, u8 *fq0)
{
*ei0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG089));
*eq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08A));
*fi0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08B));
*fq0 = LCNPHY_IQLOCC_READ(read_radio_reg(pi, RADIO_2064_REG08C));
}
void wlc_lcnphy_set_tx_iqcc(struct brcms_phy *pi, u16 a, u16 b)
{
struct phytbl_info tab;
u16 iqcc[2];
iqcc[0] = a;
iqcc[1] = b;
tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
tab.tbl_width = 16;
tab.tbl_ptr = iqcc;
tab.tbl_len = 2;
tab.tbl_offset = 80;
wlc_lcnphy_write_table(pi, &tab);
}
void wlc_lcnphy_set_tx_locc(struct brcms_phy *pi, u16 didq)
{
struct phytbl_info tab;
tab.tbl_id = LCNPHY_TBL_ID_IQLOCAL;
tab.tbl_width = 16;
tab.tbl_ptr = &didq;
tab.tbl_len = 1;
tab.tbl_offset = 85;
wlc_lcnphy_write_table(pi, &tab);
}
void wlc_lcnphy_set_tx_pwr_by_index(struct brcms_phy *pi, int index)
{
struct phytbl_info tab;
u16 a, b;
u8 bb_mult;
u32 bbmultiqcomp, txgain, locoeffs, rfpower;
struct lcnphy_txgains gains;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi_lcn->lcnphy_tx_power_idx_override = (s8) index;
pi_lcn->lcnphy_current_index = (u8) index;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + index;
tab.tbl_ptr = &bbmultiqcomp;
wlc_lcnphy_read_table(pi, &tab);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + index;
tab.tbl_width = 32;
tab.tbl_ptr = &txgain;
wlc_lcnphy_read_table(pi, &tab);
gains.gm_gain = (u16) (txgain & 0xff);
gains.pga_gain = (u16) (txgain >> 8) & 0xff;
gains.pad_gain = (u16) (txgain >> 16) & 0xff;
gains.dac_gain = (u16) (bbmultiqcomp >> 28) & 0x07;
wlc_lcnphy_set_tx_gain(pi, &gains);
wlc_lcnphy_set_pa_gain(pi, (u16) (txgain >> 24) & 0x7f);
bb_mult = (u8) ((bbmultiqcomp >> 20) & 0xff);
wlc_lcnphy_set_bbmult(pi, bb_mult);
wlc_lcnphy_enable_tx_gain_override(pi);
if (!wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
a = (u16) ((bbmultiqcomp >> 10) & 0x3ff);
b = (u16) (bbmultiqcomp & 0x3ff);
wlc_lcnphy_set_tx_iqcc(pi, a, b);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_LO_OFFSET + index;
tab.tbl_ptr = &locoeffs;
wlc_lcnphy_read_table(pi, &tab);
wlc_lcnphy_set_tx_locc(pi, (u16) locoeffs);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_PWR_OFFSET + index;
tab.tbl_ptr = &rfpower;
wlc_lcnphy_read_table(pi, &tab);
mod_phy_reg(pi, 0x6a6, (0x1fff << 0), (rfpower * 8) << 0);
}
}
static void wlc_lcnphy_clear_papd_comptable(struct brcms_phy *pi)
{
u32 j;
struct phytbl_info tab;
u32 temp_offset[128];
tab.tbl_ptr = temp_offset;
tab.tbl_len = 128;
tab.tbl_id = LCNPHY_TBL_ID_PAPDCOMPDELTATBL;
tab.tbl_width = 32;
tab.tbl_offset = 0;
memset(temp_offset, 0, sizeof(temp_offset));
for (j = 1; j < 128; j += 2)
temp_offset[j] = 0x80000;
wlc_lcnphy_write_table(pi, &tab);
return;
}
void wlc_lcnphy_tx_pu(struct brcms_phy *pi, bool bEnable)
{
if (!bEnable) {
and_phy_reg(pi, 0x43b, ~(u16) ((0x1 << 1) | (0x1 << 4)));
mod_phy_reg(pi, 0x43c, (0x1 << 1), 1 << 1);
and_phy_reg(pi, 0x44c,
~(u16) ((0x1 << 3) |
(0x1 << 5) |
(0x1 << 12) |
(0x1 << 0) | (0x1 << 1) | (0x1 << 2)));
and_phy_reg(pi, 0x44d,
~(u16) ((0x1 << 3) | (0x1 << 5) | (0x1 << 14)));
mod_phy_reg(pi, 0x44d, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x44d, (0x1 << 1) | (0x1 << 0), (0x1 << 0));
and_phy_reg(pi, 0x4f9,
~(u16) ((0x1 << 0) | (0x1 << 1) | (0x1 << 2)));
and_phy_reg(pi, 0x4fa,
~(u16) ((0x1 << 0) | (0x1 << 1) | (0x1 << 2)));
} else {
mod_phy_reg(pi, 0x43b, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x43c, (0x1 << 1), 0 << 1);
mod_phy_reg(pi, 0x43b, (0x1 << 4), 1 << 4);
mod_phy_reg(pi, 0x43c, (0x1 << 6), 0 << 6);
mod_phy_reg(pi, 0x44c, (0x1 << 12), 1 << 12);
mod_phy_reg(pi, 0x44d, (0x1 << 14), 1 << 14);
wlc_lcnphy_set_trsw_override(pi, true, false);
mod_phy_reg(pi, 0x44d, (0x1 << 2), 0 << 2);
mod_phy_reg(pi, 0x44c, (0x1 << 2), 1 << 2);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x44c, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x44d, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x44c, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x44d, (0x1 << 5), 0 << 5);
mod_phy_reg(pi, 0x4f9, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x4fa, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x4f9, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x4fa, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 1 << 0);
} else {
mod_phy_reg(pi, 0x44c, (0x1 << 3), 1 << 3);
mod_phy_reg(pi, 0x44d, (0x1 << 3), 0 << 3);
mod_phy_reg(pi, 0x44c, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x44d, (0x1 << 5), 1 << 5);
mod_phy_reg(pi, 0x4f9, (0x1 << 1), 1 << 1);
mod_phy_reg(pi, 0x4fa, (0x1 << 1), 0 << 1);
mod_phy_reg(pi, 0x4f9, (0x1 << 2), 1 << 2);
mod_phy_reg(pi, 0x4fa, (0x1 << 2), 0 << 2);
mod_phy_reg(pi, 0x4f9, (0x1 << 0), 1 << 0);
mod_phy_reg(pi, 0x4fa, (0x1 << 0), 0 << 0);
}
}
}
static void
wlc_lcnphy_run_samples(struct brcms_phy *pi,
u16 num_samps,
u16 num_loops, u16 wait, bool iqcalmode)
{
or_phy_reg(pi, 0x6da, 0x8080);
mod_phy_reg(pi, 0x642, (0x7f << 0), (num_samps - 1) << 0);
if (num_loops != 0xffff)
num_loops--;
mod_phy_reg(pi, 0x640, (0xffff << 0), num_loops << 0);
mod_phy_reg(pi, 0x641, (0xffff << 0), wait << 0);
if (iqcalmode) {
and_phy_reg(pi, 0x453, (u16) ~(0x1 << 15));
or_phy_reg(pi, 0x453, (0x1 << 15));
} else {
write_phy_reg(pi, 0x63f, 1);
wlc_lcnphy_tx_pu(pi, 1);
}
or_radio_reg(pi, RADIO_2064_REG112, 0x6);
}
void wlc_lcnphy_deaf_mode(struct brcms_phy *pi, bool mode)
{
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
if (LCNREV_LT(pi->pubpi.phy_rev, 2)) {
mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5);
mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9);
} else {
mod_phy_reg(pi, 0x4b0, (0x1 << 5), (mode) << 5);
mod_phy_reg(pi, 0x4b1, (0x1 << 9), 0 << 9);
}
if (phybw40 == 0) {
mod_phy_reg((pi), 0x410,
(0x1 << 6) |
(0x1 << 5),
((CHSPEC_IS2G(
pi->radio_chanspec)) ? (!mode) : 0) <<
6 | (!mode) << 5);
mod_phy_reg(pi, 0x410, (0x1 << 7), (mode) << 7);
}
}
void
wlc_lcnphy_start_tx_tone(struct brcms_phy *pi, s32 f_kHz, u16 max_val,
bool iqcalmode)
{
u8 phy_bw;
u16 num_samps, t, k;
u32 bw;
s32 theta = 0, rot = 0;
struct cordic_iq tone_samp;
u32 data_buf[64];
u16 i_samp, q_samp;
struct phytbl_info tab;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_tx_tone_freq = f_kHz;
wlc_lcnphy_deaf_mode(pi, true);
phy_bw = 40;
if (pi_lcn->lcnphy_spurmod) {
write_phy_reg(pi, 0x942, 0x2);
write_phy_reg(pi, 0x93b, 0x0);
write_phy_reg(pi, 0x93c, 0x0);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, false);
}
if (f_kHz) {
k = 1;
do {
bw = phy_bw * 1000 * k;
num_samps = bw / abs(f_kHz);
k++;
} while ((num_samps * (u32) (abs(f_kHz))) != bw);
} else
num_samps = 2;
rot = ((f_kHz * 36) / phy_bw) / 100;
theta = 0;
for (t = 0; t < num_samps; t++) {
tone_samp = cordic_calc_iq(theta);
theta += rot;
i_samp = (u16) (FLOAT(tone_samp.i * max_val) & 0x3ff);
q_samp = (u16) (FLOAT(tone_samp.q * max_val) & 0x3ff);
data_buf[t] = (i_samp << 10) | q_samp;
}
mod_phy_reg(pi, 0x6d6, (0x3 << 0), 0 << 0);
mod_phy_reg(pi, 0x6da, (0x1 << 3), 1 << 3);
tab.tbl_ptr = data_buf;
tab.tbl_len = num_samps;
tab.tbl_id = LCNPHY_TBL_ID_SAMPLEPLAY;
tab.tbl_offset = 0;
tab.tbl_width = 32;
wlc_lcnphy_write_table(pi, &tab);
wlc_lcnphy_run_samples(pi, num_samps, 0xffff, 0, iqcalmode);
}
void wlc_lcnphy_stop_tx_tone(struct brcms_phy *pi)
{
s16 playback_status;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_tx_tone_freq = 0;
if (pi_lcn->lcnphy_spurmod) {
write_phy_reg(pi, 0x942, 0x7);
write_phy_reg(pi, 0x93b, 0x2017);
write_phy_reg(pi, 0x93c, 0x27c5);
wlc_lcnphy_txrx_spur_avoidance_mode(pi, true);
}
playback_status = read_phy_reg(pi, 0x644);
if (playback_status & (0x1 << 0)) {
wlc_lcnphy_tx_pu(pi, 0);
mod_phy_reg(pi, 0x63f, (0x1 << 1), 1 << 1);
} else if (playback_status & (0x1 << 1))
mod_phy_reg(pi, 0x453, (0x1 << 15), 0 << 15);
mod_phy_reg(pi, 0x6d6, (0x3 << 0), 1 << 0);
mod_phy_reg(pi, 0x6da, (0x1 << 3), 0 << 3);
mod_phy_reg(pi, 0x6da, (0x1 << 7), 0 << 7);
and_radio_reg(pi, RADIO_2064_REG112, 0xFFF9);
wlc_lcnphy_deaf_mode(pi, false);
}
static void
wlc_lcnphy_set_cc(struct brcms_phy *pi, int cal_type, s16 coeff_x, s16 coeff_y)
{
u16 di0dq0;
u16 x, y, data_rf;
int k;
switch (cal_type) {
case 0:
wlc_lcnphy_set_tx_iqcc(pi, coeff_x, coeff_y);
break;
case 2:
di0dq0 = (coeff_x & 0xff) << 8 | (coeff_y & 0xff);
wlc_lcnphy_set_tx_locc(pi, di0dq0);
break;
case 3:
k = wlc_lcnphy_calc_floor(coeff_x, 0);
y = 8 + k;
k = wlc_lcnphy_calc_floor(coeff_x, 1);
x = 8 - k;
data_rf = (x * 16 + y);
write_radio_reg(pi, RADIO_2064_REG089, data_rf);
k = wlc_lcnphy_calc_floor(coeff_y, 0);
y = 8 + k;
k = wlc_lcnphy_calc_floor(coeff_y, 1);
x = 8 - k;
data_rf = (x * 16 + y);
write_radio_reg(pi, RADIO_2064_REG08A, data_rf);
break;
case 4:
k = wlc_lcnphy_calc_floor(coeff_x, 0);
y = 8 + k;
k = wlc_lcnphy_calc_floor(coeff_x, 1);
x = 8 - k;
data_rf = (x * 16 + y);
write_radio_reg(pi, RADIO_2064_REG08B, data_rf);
k = wlc_lcnphy_calc_floor(coeff_y, 0);
y = 8 + k;
k = wlc_lcnphy_calc_floor(coeff_y, 1);
x = 8 - k;
data_rf = (x * 16 + y);
write_radio_reg(pi, RADIO_2064_REG08C, data_rf);
break;
}
}
static struct lcnphy_unsign16_struct
wlc_lcnphy_get_cc(struct brcms_phy *pi, int cal_type)
{
u16 a, b, didq;
u8 di0, dq0, ei, eq, fi, fq;
struct lcnphy_unsign16_struct cc;
cc.re = 0;
cc.im = 0;
switch (cal_type) {
case 0:
wlc_lcnphy_get_tx_iqcc(pi, &a, &b);
cc.re = a;
cc.im = b;
break;
case 2:
didq = wlc_lcnphy_get_tx_locc(pi);
di0 = (((didq & 0xff00) << 16) >> 24);
dq0 = (((didq & 0x00ff) << 24) >> 24);
cc.re = (u16) di0;
cc.im = (u16) dq0;
break;
case 3:
wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
cc.re = (u16) ei;
cc.im = (u16) eq;
break;
case 4:
wlc_lcnphy_get_radio_loft(pi, &ei, &eq, &fi, &fq);
cc.re = (u16) fi;
cc.im = (u16) fq;
break;
}
return cc;
}
static void
wlc_lcnphy_samp_cap(struct brcms_phy *pi, int clip_detect_algo, u16 thresh,
s16 *ptr, int mode)
{
u32 curval1, curval2, stpptr, curptr, strptr, val;
u16 sslpnCalibClkEnCtrl, timer;
u16 old_sslpnCalibClkEnCtrl;
s16 imag, real;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
timer = 0;
old_sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
curval1 = bcma_read16(pi->d11core, D11REGOFFS(psm_corectlsts));
ptr[130] = 0;
bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts),
((1 << 6) | curval1));
bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_strptr), 0x7E00);
bcma_write16(pi->d11core, D11REGOFFS(smpl_clct_stpptr), 0x8000);
udelay(20);
curval2 = bcma_read16(pi->d11core, D11REGOFFS(psm_phy_hdr_param));
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param),
curval2 | 0x30);
write_phy_reg(pi, 0x555, 0x0);
write_phy_reg(pi, 0x5a6, 0x5);
write_phy_reg(pi, 0x5a2, (u16) (mode | mode << 6));
write_phy_reg(pi, 0x5cf, 3);
write_phy_reg(pi, 0x5a5, 0x3);
write_phy_reg(pi, 0x583, 0x0);
write_phy_reg(pi, 0x584, 0x0);
write_phy_reg(pi, 0x585, 0x0fff);
write_phy_reg(pi, 0x586, 0x0000);
write_phy_reg(pi, 0x580, 0x4501);
sslpnCalibClkEnCtrl = read_phy_reg(pi, 0x6da);
write_phy_reg(pi, 0x6da, (u32) (sslpnCalibClkEnCtrl | 0x2008));
stpptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_stpptr));
curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
do {
udelay(10);
curptr = bcma_read16(pi->d11core, D11REGOFFS(smpl_clct_curptr));
timer++;
} while ((curptr != stpptr) && (timer < 500));
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), 0x2);
strptr = 0x7E00;
bcma_write32(pi->d11core, D11REGOFFS(tplatewrptr), strptr);
while (strptr < 0x8000) {
val = bcma_read32(pi->d11core, D11REGOFFS(tplatewrdata));
imag = ((val >> 16) & 0x3ff);
real = ((val) & 0x3ff);
if (imag > 511)
imag -= 1024;
if (real > 511)
real -= 1024;
if (pi_lcn->lcnphy_iqcal_swp_dis)
ptr[(strptr - 0x7E00) / 4] = real;
else
ptr[(strptr - 0x7E00) / 4] = imag;
if (clip_detect_algo) {
if (imag > thresh || imag < -thresh) {
strptr = 0x8000;
ptr[130] = 1;
}
}
strptr += 4;
}
write_phy_reg(pi, 0x6da, old_sslpnCalibClkEnCtrl);
bcma_write16(pi->d11core, D11REGOFFS(psm_phy_hdr_param), curval2);
bcma_write16(pi->d11core, D11REGOFFS(psm_corectlsts), curval1);
}
static void
wlc_lcnphy_a1(struct brcms_phy *pi, int cal_type, int num_levels,
int step_size_lg2)
{
const struct lcnphy_spb_tone *phy_c1;
struct lcnphy_spb_tone phy_c2;
struct lcnphy_unsign16_struct phy_c3;
int phy_c4, phy_c5, k, l, j, phy_c6;
u16 phy_c7, phy_c8, phy_c9;
s16 phy_c10, phy_c11, phy_c12, phy_c13, phy_c14, phy_c15, phy_c16;
s16 *ptr, phy_c17;
s32 phy_c18, phy_c19;
u32 phy_c20, phy_c21;
bool phy_c22, phy_c23, phy_c24, phy_c25;
u16 phy_c26, phy_c27;
u16 phy_c28, phy_c29, phy_c30;
u16 phy_c31;
u16 *phy_c32;
phy_c21 = 0;
phy_c10 = phy_c13 = phy_c14 = phy_c8 = 0;
ptr = kmalloc(sizeof(s16) * 131, GFP_ATOMIC);
if (NULL == ptr)
return;
phy_c32 = kmalloc(sizeof(u16) * 20, GFP_ATOMIC);
if (NULL == phy_c32) {
kfree(ptr);
return;
}
phy_c26 = read_phy_reg(pi, 0x6da);
phy_c27 = read_phy_reg(pi, 0x6db);
phy_c31 = read_radio_reg(pi, RADIO_2064_REG026);
write_phy_reg(pi, 0x93d, 0xC0);
wlc_lcnphy_start_tx_tone(pi, 3750, 88, 0);
write_phy_reg(pi, 0x6da, 0xffff);
or_phy_reg(pi, 0x6db, 0x3);
wlc_lcnphy_tx_iqlo_loopback(pi, phy_c32);
udelay(500);
phy_c28 = read_phy_reg(pi, 0x938);
phy_c29 = read_phy_reg(pi, 0x4d7);
phy_c30 = read_phy_reg(pi, 0x4d8);
or_phy_reg(pi, 0x938, 0x1 << 2);
or_phy_reg(pi, 0x4d7, 0x1 << 2);
or_phy_reg(pi, 0x4d7, 0x1 << 3);
mod_phy_reg(pi, 0x4d7, (0x7 << 12), 0x2 << 12);
or_phy_reg(pi, 0x4d8, 1 << 0);
or_phy_reg(pi, 0x4d8, 1 << 1);
mod_phy_reg(pi, 0x4d8, (0x3ff << 2), 0x23A << 2);
mod_phy_reg(pi, 0x4d8, (0x7 << 12), 0x7 << 12);
phy_c1 = &lcnphy_spb_tone_3750[0];
phy_c4 = 32;
if (num_levels == 0) {
if (cal_type != 0)
num_levels = 4;
else
num_levels = 9;
}
if (step_size_lg2 == 0) {
if (cal_type != 0)
step_size_lg2 = 3;
else
step_size_lg2 = 8;
}
phy_c7 = (1 << step_size_lg2);
phy_c3 = wlc_lcnphy_get_cc(pi, cal_type);
phy_c15 = (s16) phy_c3.re;
phy_c16 = (s16) phy_c3.im;
if (cal_type == 2) {
if (phy_c3.re > 127)
phy_c15 = phy_c3.re - 256;
if (phy_c3.im > 127)
phy_c16 = phy_c3.im - 256;
}
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
for (phy_c8 = 0; phy_c7 != 0 && phy_c8 < num_levels; phy_c8++) {
phy_c23 = true;
phy_c22 = false;
switch (cal_type) {
case 0:
phy_c10 = 511;
break;
case 2:
phy_c10 = 127;
break;
case 3:
phy_c10 = 15;
break;
case 4:
phy_c10 = 15;
break;
}
phy_c9 = read_phy_reg(pi, 0x93d);
phy_c9 = 2 * phy_c9;
phy_c24 = false;
phy_c5 = 7;
phy_c25 = true;
while (1) {
write_radio_reg(pi, RADIO_2064_REG026,
(phy_c5 & 0x7) | ((phy_c5 & 0x7) << 4));
udelay(50);
phy_c22 = false;
ptr[130] = 0;
wlc_lcnphy_samp_cap(pi, 1, phy_c9, &ptr[0], 2);
if (ptr[130] == 1)
phy_c22 = true;
if (phy_c22)
phy_c5 -= 1;
if ((phy_c22 != phy_c24) && (!phy_c25))
break;
if (!phy_c22)
phy_c5 += 1;
if (phy_c5 <= 0 || phy_c5 >= 7)
break;
phy_c24 = phy_c22;
phy_c25 = false;
}
if (phy_c5 < 0)
phy_c5 = 0;
else if (phy_c5 > 7)
phy_c5 = 7;
for (k = -phy_c7; k <= phy_c7; k += phy_c7) {
for (l = -phy_c7; l <= phy_c7; l += phy_c7) {
phy_c11 = phy_c15 + k;
phy_c12 = phy_c16 + l;
if (phy_c11 < -phy_c10)
phy_c11 = -phy_c10;
else if (phy_c11 > phy_c10)
phy_c11 = phy_c10;
if (phy_c12 < -phy_c10)
phy_c12 = -phy_c10;
else if (phy_c12 > phy_c10)
phy_c12 = phy_c10;
wlc_lcnphy_set_cc(pi, cal_type, phy_c11,
phy_c12);
udelay(20);
wlc_lcnphy_samp_cap(pi, 0, 0, ptr, 2);
phy_c18 = 0;
phy_c19 = 0;
for (j = 0; j < 128; j++) {
if (cal_type != 0)
phy_c6 = j % phy_c4;
else
phy_c6 = (2 * j) % phy_c4;
phy_c2.re = phy_c1[phy_c6].re;
phy_c2.im = phy_c1[phy_c6].im;
phy_c17 = ptr[j];
phy_c18 = phy_c18 + phy_c17 * phy_c2.re;
phy_c19 = phy_c19 + phy_c17 * phy_c2.im;
}
phy_c18 = phy_c18 >> 10;
phy_c19 = phy_c19 >> 10;
phy_c20 = ((phy_c18 * phy_c18) +
(phy_c19 * phy_c19));
if (phy_c23 || phy_c20 < phy_c21) {
phy_c21 = phy_c20;
phy_c13 = phy_c11;
phy_c14 = phy_c12;
}
phy_c23 = false;
}
}
phy_c23 = true;
phy_c15 = phy_c13;
phy_c16 = phy_c14;
phy_c7 = phy_c7 >> 1;
wlc_lcnphy_set_cc(pi, cal_type, phy_c15, phy_c16);
udelay(20);
}
goto cleanup;
cleanup:
wlc_lcnphy_tx_iqlo_loopback_cleanup(pi, phy_c32);
wlc_lcnphy_stop_tx_tone(pi);
write_phy_reg(pi, 0x6da, phy_c26);
write_phy_reg(pi, 0x6db, phy_c27);
write_phy_reg(pi, 0x938, phy_c28);
write_phy_reg(pi, 0x4d7, phy_c29);
write_phy_reg(pi, 0x4d8, phy_c30);
write_radio_reg(pi, RADIO_2064_REG026, phy_c31);
kfree(phy_c32);
kfree(ptr);
}
void wlc_lcnphy_get_tx_iqcc(struct brcms_phy *pi, u16 *a, u16 *b)
{
u16 iqcc[2];
struct phytbl_info tab;
tab.tbl_ptr = iqcc;
tab.tbl_len = 2;
tab.tbl_id = 0;
tab.tbl_offset = 80;
tab.tbl_width = 16;
wlc_lcnphy_read_table(pi, &tab);
*a = iqcc[0];
*b = iqcc[1];
}
static void wlc_lcnphy_tx_iqlo_soft_cal_full(struct brcms_phy *pi)
{
struct lcnphy_unsign16_struct iqcc0, locc2, locc3, locc4;
wlc_lcnphy_set_cc(pi, 0, 0, 0);
wlc_lcnphy_set_cc(pi, 2, 0, 0);
wlc_lcnphy_set_cc(pi, 3, 0, 0);
wlc_lcnphy_set_cc(pi, 4, 0, 0);
wlc_lcnphy_a1(pi, 4, 0, 0);
wlc_lcnphy_a1(pi, 3, 0, 0);
wlc_lcnphy_a1(pi, 2, 3, 2);
wlc_lcnphy_a1(pi, 0, 5, 8);
wlc_lcnphy_a1(pi, 2, 2, 1);
wlc_lcnphy_a1(pi, 0, 4, 3);
iqcc0 = wlc_lcnphy_get_cc(pi, 0);
locc2 = wlc_lcnphy_get_cc(pi, 2);
locc3 = wlc_lcnphy_get_cc(pi, 3);
locc4 = wlc_lcnphy_get_cc(pi, 4);
}
u16 wlc_lcnphy_get_tx_locc(struct brcms_phy *pi)
{
struct phytbl_info tab;
u16 didq;
tab.tbl_id = 0;
tab.tbl_width = 16;
tab.tbl_ptr = &didq;
tab.tbl_len = 1;
tab.tbl_offset = 85;
wlc_lcnphy_read_table(pi, &tab);
return didq;
}
static void wlc_lcnphy_txpwrtbl_iqlo_cal(struct brcms_phy *pi)
{
struct lcnphy_txgains target_gains, old_gains;
u8 save_bb_mult;
u16 a, b, didq, save_pa_gain = 0;
uint idx, SAVE_txpwrindex = 0xFF;
u32 val;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct phytbl_info tab;
u8 ei0, eq0, fi0, fq0;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
wlc_lcnphy_get_tx_gain(pi, &old_gains);
save_pa_gain = wlc_lcnphy_get_pa_gain(pi);
save_bb_mult = wlc_lcnphy_get_bbmult(pi);
if (SAVE_txpwrctrl == LCNPHY_TX_PWR_CTRL_OFF)
SAVE_txpwrindex = wlc_lcnphy_get_current_tx_pwr_idx(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
target_gains.gm_gain = 7;
target_gains.pga_gain = 0;
target_gains.pad_gain = 21;
target_gains.dac_gain = 0;
wlc_lcnphy_set_tx_gain(pi, &target_gains);
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
if (LCNREV_IS(pi->pubpi.phy_rev, 1) || pi_lcn->lcnphy_hw_iqcal_en) {
wlc_lcnphy_set_tx_pwr_by_index(pi, 30);
wlc_lcnphy_tx_iqlo_cal(pi, &target_gains,
(pi_lcn->
lcnphy_recal ? LCNPHY_CAL_RECAL :
LCNPHY_CAL_FULL), false);
} else {
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
wlc_lcnphy_get_radio_loft(pi, &ei0, &eq0, &fi0, &fq0);
if ((abs((s8) fi0) == 15) && (abs((s8) fq0) == 15)) {
if (CHSPEC_IS5G(pi->radio_chanspec)) {
target_gains.gm_gain = 255;
target_gains.pga_gain = 255;
target_gains.pad_gain = 0xf0;
target_gains.dac_gain = 0;
} else {
target_gains.gm_gain = 7;
target_gains.pga_gain = 45;
target_gains.pad_gain = 186;
target_gains.dac_gain = 0;
}
if (LCNREV_IS(pi->pubpi.phy_rev, 1)
|| pi_lcn->lcnphy_hw_iqcal_en) {
target_gains.pga_gain = 0;
target_gains.pad_gain = 30;
wlc_lcnphy_set_tx_pwr_by_index(pi, 16);
wlc_lcnphy_tx_iqlo_cal(pi, &target_gains,
LCNPHY_CAL_FULL, false);
} else {
wlc_lcnphy_tx_iqlo_soft_cal_full(pi);
}
}
wlc_lcnphy_get_tx_iqcc(pi, &a, &b);
didq = wlc_lcnphy_get_tx_locc(pi);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_RATE_OFFSET;
for (idx = 0; idx < 128; idx++) {
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + idx;
wlc_lcnphy_read_table(pi, &tab);
val = (val & 0xfff00000) |
((u32) (a & 0x3FF) << 10) | (b & 0x3ff);
wlc_lcnphy_write_table(pi, &tab);
val = didq;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_LO_OFFSET + idx;
wlc_lcnphy_write_table(pi, &tab);
}
pi_lcn->lcnphy_cal_results.txiqlocal_a = a;
pi_lcn->lcnphy_cal_results.txiqlocal_b = b;
pi_lcn->lcnphy_cal_results.txiqlocal_didq = didq;
pi_lcn->lcnphy_cal_results.txiqlocal_ei0 = ei0;
pi_lcn->lcnphy_cal_results.txiqlocal_eq0 = eq0;
pi_lcn->lcnphy_cal_results.txiqlocal_fi0 = fi0;
pi_lcn->lcnphy_cal_results.txiqlocal_fq0 = fq0;
wlc_lcnphy_set_bbmult(pi, save_bb_mult);
wlc_lcnphy_set_pa_gain(pi, save_pa_gain);
wlc_lcnphy_set_tx_gain(pi, &old_gains);
if (SAVE_txpwrctrl != LCNPHY_TX_PWR_CTRL_OFF)
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
else
wlc_lcnphy_set_tx_pwr_by_index(pi, SAVE_txpwrindex);
}
s16 wlc_lcnphy_tempsense_new(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s16 avg = 0;
bool suspend = false;
if (mode == 1) {
suspend = (0 == (bcma_read32(pi->d11core,
D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
}
tempsenseval1 = read_phy_reg(pi, 0x476) & 0x1FF;
tempsenseval2 = read_phy_reg(pi, 0x477) & 0x1FF;
if (tempsenseval1 > 255)
avg = (s16) (tempsenseval1 - 512);
else
avg = (s16) tempsenseval1;
if (tempsenseval2 > 255)
avg += (s16) (tempsenseval2 - 512);
else
avg += (s16) tempsenseval2;
avg /= 2;
if (mode == 1) {
mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14);
udelay(100);
mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
return avg;
}
u16 wlc_lcnphy_tempsense(struct brcms_phy *pi, bool mode)
{
u16 tempsenseval1, tempsenseval2;
s32 avg = 0;
bool suspend = false;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
if (mode == 1) {
suspend = (0 == (bcma_read32(pi->d11core,
D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, TEMPSENSE);
}
tempsenseval1 = read_phy_reg(pi, 0x476) & 0x1FF;
tempsenseval2 = read_phy_reg(pi, 0x477) & 0x1FF;
if (tempsenseval1 > 255)
avg = (int)(tempsenseval1 - 512);
else
avg = (int)tempsenseval1;
if (pi_lcn->lcnphy_tempsense_option == 1 || pi->hwpwrctrl_capable) {
if (tempsenseval2 > 255)
avg = (int)(avg - tempsenseval2 + 512);
else
avg = (int)(avg - tempsenseval2);
} else {
if (tempsenseval2 > 255)
avg = (int)(avg + tempsenseval2 - 512);
else
avg = (int)(avg + tempsenseval2);
avg = avg / 2;
}
if (avg < 0)
avg = avg + 512;
if (pi_lcn->lcnphy_tempsense_option == 2)
avg = tempsenseval1;
if (mode)
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_txpwrctrl);
if (mode == 1) {
mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14);
udelay(100);
mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
return (u16) avg;
}
s8 wlc_lcnphy_tempsense_degree(struct brcms_phy *pi, bool mode)
{
s32 degree = wlc_lcnphy_tempsense_new(pi, mode);
degree =
((degree <<
10) + LCN_TEMPSENSE_OFFSET + (LCN_TEMPSENSE_DEN >> 1))
/ LCN_TEMPSENSE_DEN;
return (s8) degree;
}
s8 wlc_lcnphy_vbatsense(struct brcms_phy *pi, bool mode)
{
u16 vbatsenseval;
s32 avg = 0;
bool suspend = false;
if (mode == 1) {
suspend = (0 == (bcma_read32(pi->d11core,
D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_vbat_temp_sense_setup(pi, VBATSENSE);
}
vbatsenseval = read_phy_reg(pi, 0x475) & 0x1FF;
if (vbatsenseval > 255)
avg = (s32) (vbatsenseval - 512);
else
avg = (s32) vbatsenseval;
avg = (avg * LCN_VBAT_SCALE_NOM +
(LCN_VBAT_SCALE_DEN >> 1)) / LCN_VBAT_SCALE_DEN;
if (mode == 1) {
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
return (s8) avg;
}
static void wlc_lcnphy_afe_clk_init(struct brcms_phy *pi, u8 mode)
{
u8 phybw40;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
mod_phy_reg(pi, 0x6d1, (0x1 << 7), (1) << 7);
if (((mode == AFE_CLK_INIT_MODE_PAPD) && (phybw40 == 0)) ||
(mode == AFE_CLK_INIT_MODE_TXRX2X))
write_phy_reg(pi, 0x6d0, 0x7);
wlc_lcnphy_toggle_afe_pwdn(pi);
}
static void wlc_lcnphy_temp_adj(struct brcms_phy *pi)
{
}
static void wlc_lcnphy_glacial_timer_based_cal(struct brcms_phy *pi)
{
bool suspend;
s8 index;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend)
wlapi_suspend_mac_and_wait(pi->sh->physhim);
wlc_lcnphy_deaf_mode(pi, true);
pi->phy_lastcal = pi->sh->now;
pi->phy_forcecal = false;
index = pi_lcn->lcnphy_current_index;
wlc_lcnphy_txpwrtbl_iqlo_cal(pi);
wlc_lcnphy_set_tx_pwr_by_index(pi, index);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_pwrctrl);
wlc_lcnphy_deaf_mode(pi, false);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
static void wlc_lcnphy_periodic_cal(struct brcms_phy *pi)
{
bool suspend, full_cal;
const struct lcnphy_rx_iqcomp *rx_iqcomp;
int rx_iqcomp_sz;
u16 SAVE_pwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
s8 index;
struct phytbl_info tab;
s32 a1, b0, b1;
s32 tssi, pwr, maxtargetpwr, mintargetpwr;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
pi->phy_lastcal = pi->sh->now;
pi->phy_forcecal = false;
full_cal =
(pi_lcn->lcnphy_full_cal_channel !=
CHSPEC_CHANNEL(pi->radio_chanspec));
pi_lcn->lcnphy_full_cal_channel = CHSPEC_CHANNEL(pi->radio_chanspec);
index = pi_lcn->lcnphy_current_index;
suspend = (0 == (bcma_read32(pi->d11core, D11REGOFFS(maccontrol)) &
MCTL_EN_MAC));
if (!suspend) {
wlapi_bmac_write_shm(pi->sh->physhim, M_CTS_DURATION, 10000);
wlapi_suspend_mac_and_wait(pi->sh->physhim);
}
wlc_lcnphy_deaf_mode(pi, true);
wlc_lcnphy_txpwrtbl_iqlo_cal(pi);
rx_iqcomp = lcnphy_rx_iqcomp_table_rev0;
rx_iqcomp_sz = ARRAY_SIZE(lcnphy_rx_iqcomp_table_rev0);
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 40);
else
wlc_lcnphy_rx_iq_cal(pi, NULL, 0, true, false, 1, 127);
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) {
wlc_lcnphy_idle_tssi_est((struct brcms_phy_pub *) pi);
b0 = pi->txpa_2g[0];
b1 = pi->txpa_2g[1];
a1 = pi->txpa_2g[2];
maxtargetpwr = wlc_lcnphy_tssi2dbm(10, a1, b0, b1);
mintargetpwr = wlc_lcnphy_tssi2dbm(125, a1, b0, b1);
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_ptr = &pwr;
tab.tbl_len = 1;
tab.tbl_offset = 0;
for (tssi = 0; tssi < 128; tssi++) {
pwr = wlc_lcnphy_tssi2dbm(tssi, a1, b0, b1);
pwr = (pwr < mintargetpwr) ? mintargetpwr : pwr;
wlc_lcnphy_write_table(pi, &tab);
tab.tbl_offset++;
}
}
wlc_lcnphy_set_tx_pwr_by_index(pi, index);
wlc_lcnphy_set_tx_pwr_ctrl(pi, SAVE_pwrctrl);
wlc_lcnphy_deaf_mode(pi, false);
if (!suspend)
wlapi_enable_mac(pi->sh->physhim);
}
void wlc_lcnphy_calib_modes(struct brcms_phy *pi, uint mode)
{
u16 temp_new;
int temp1, temp2, temp_diff;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
switch (mode) {
case PHY_PERICAL_CHAN:
break;
case PHY_FULLCAL:
wlc_lcnphy_periodic_cal(pi);
break;
case PHY_PERICAL_PHYINIT:
wlc_lcnphy_periodic_cal(pi);
break;
case PHY_PERICAL_WATCHDOG:
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
temp_new = wlc_lcnphy_tempsense(pi, 0);
temp1 = LCNPHY_TEMPSENSE(temp_new);
temp2 = LCNPHY_TEMPSENSE(pi_lcn->lcnphy_cal_temper);
temp_diff = temp1 - temp2;
if ((pi_lcn->lcnphy_cal_counter > 90) ||
(temp_diff > 60) || (temp_diff < -60)) {
wlc_lcnphy_glacial_timer_based_cal(pi);
wlc_2064_vco_cal(pi);
pi_lcn->lcnphy_cal_temper = temp_new;
pi_lcn->lcnphy_cal_counter = 0;
} else
pi_lcn->lcnphy_cal_counter++;
}
break;
case LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL:
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
wlc_lcnphy_tx_power_adjustment(
(struct brcms_phy_pub *) pi);
break;
}
}
void wlc_lcnphy_get_tssi(struct brcms_phy *pi, s8 *ofdm_pwr, s8 *cck_pwr)
{
s8 cck_offset;
u16 status;
status = (read_phy_reg(pi, 0x4ab));
if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi) &&
(status & (0x1 << 15))) {
*ofdm_pwr = (s8) (((read_phy_reg(pi, 0x4ab) & (0x1ff << 0))
>> 0) >> 1);
if (wlc_phy_tpc_isenabled_lcnphy(pi))
cck_offset = pi->tx_power_offset[TXP_FIRST_CCK];
else
cck_offset = 0;
*cck_pwr = *ofdm_pwr + cck_offset;
} else {
*cck_pwr = 0;
*ofdm_pwr = 0;
}
}
void wlc_phy_cal_init_lcnphy(struct brcms_phy *pi)
{
return;
}
void wlc_lcnphy_tx_power_adjustment(struct brcms_phy_pub *ppi)
{
s8 index;
u16 index2;
struct brcms_phy *pi = (struct brcms_phy *) ppi;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
u16 SAVE_txpwrctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi) &&
SAVE_txpwrctrl) {
index = wlc_lcnphy_tempcompensated_txpwrctrl(pi);
index2 = (u16) (index * 2);
mod_phy_reg(pi, 0x4a9, (0x1ff << 0), (index2) << 0);
pi_lcn->lcnphy_current_index =
(s8)((read_phy_reg(pi, 0x4a9) & 0xFF) / 2);
}
}
static void
wlc_lcnphy_load_tx_gain_table(struct brcms_phy *pi,
const struct lcnphy_tx_gain_tbl_entry *gain_table)
{
u32 j;
struct phytbl_info tab;
u32 val;
u16 pa_gain;
u16 gm_gain;
if (CHSPEC_IS5G(pi->radio_chanspec))
pa_gain = 0x70;
else
pa_gain = 0x70;
if (pi->sh->boardflags & BFL_FEM)
pa_gain = 0x10;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
tab.tbl_ptr = &val;
for (j = 0; j < 128; j++) {
gm_gain = gain_table[j].gm;
val = (((u32) pa_gain << 24) |
(gain_table[j].pad << 16) |
(gain_table[j].pga << 8) | gm_gain);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + j;
wlc_lcnphy_write_table(pi, &tab);
val = (gain_table[j].dac << 28) | (gain_table[j].bb_mult << 20);
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + j;
wlc_lcnphy_write_table(pi, &tab);
}
}
static void wlc_lcnphy_load_rfpower(struct brcms_phy *pi)
{
struct phytbl_info tab;
u32 val, bbmult, rfgain;
u8 index;
u8 scale_factor = 1;
s16 temp, temp1, temp2, qQ, qQ1, qQ2, shift;
tab.tbl_id = LCNPHY_TBL_ID_TXPWRCTL;
tab.tbl_width = 32;
tab.tbl_len = 1;
for (index = 0; index < 128; index++) {
tab.tbl_ptr = &bbmult;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_IQ_OFFSET + index;
wlc_lcnphy_read_table(pi, &tab);
bbmult = bbmult >> 20;
tab.tbl_ptr = &rfgain;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_GAIN_OFFSET + index;
wlc_lcnphy_read_table(pi, &tab);
qm_log10((s32) (bbmult), 0, &temp1, &qQ1);
qm_log10((s32) (1 << 6), 0, &temp2, &qQ2);
if (qQ1 < qQ2) {
temp2 = qm_shr16(temp2, qQ2 - qQ1);
qQ = qQ1;
} else {
temp1 = qm_shr16(temp1, qQ1 - qQ2);
qQ = qQ2;
}
temp = qm_sub16(temp1, temp2);
if (qQ >= 4)
shift = qQ - 4;
else
shift = 4 - qQ;
val = (((index << shift) + (5 * temp) +
(1 << (scale_factor + shift - 3))) >> (scale_factor +
shift - 2));
tab.tbl_ptr = &val;
tab.tbl_offset = LCNPHY_TX_PWR_CTRL_PWR_OFFSET + index;
wlc_lcnphy_write_table(pi, &tab);
}
}
static void wlc_lcnphy_bu_tweaks(struct brcms_phy *pi)
{
or_phy_reg(pi, 0x805, 0x1);
mod_phy_reg(pi, 0x42f, (0x7 << 0), (0x3) << 0);
mod_phy_reg(pi, 0x030, (0x7 << 0), (0x3) << 0);
write_phy_reg(pi, 0x414, 0x1e10);
write_phy_reg(pi, 0x415, 0x0640);
mod_phy_reg(pi, 0x4df, (0xff << 8), -9 << 8);
or_phy_reg(pi, 0x44a, 0x44);
write_phy_reg(pi, 0x44a, 0x80);
mod_phy_reg(pi, 0x434, (0xff << 0), (0xFD) << 0);
mod_phy_reg(pi, 0x420, (0xff << 0), (16) << 0);
if (!(pi->sh->boardrev < 0x1204))
mod_radio_reg(pi, RADIO_2064_REG09B, 0xF0, 0xF0);
write_phy_reg(pi, 0x7d6, 0x0902);
mod_phy_reg(pi, 0x429, (0xf << 0), (0x9) << 0);
mod_phy_reg(pi, 0x429, (0x3f << 4), (0xe) << 4);
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
mod_phy_reg(pi, 0x423, (0xff << 0), (0x46) << 0);
mod_phy_reg(pi, 0x411, (0xff << 0), (1) << 0);
mod_phy_reg(pi, 0x434, (0xff << 0), (0xFF) << 0);
mod_phy_reg(pi, 0x656, (0xf << 0), (2) << 0);
mod_phy_reg(pi, 0x44d, (0x1 << 2), (1) << 2);
mod_radio_reg(pi, RADIO_2064_REG0F7, 0x4, 0x4);
mod_radio_reg(pi, RADIO_2064_REG0F1, 0x3, 0);
mod_radio_reg(pi, RADIO_2064_REG0F2, 0xF8, 0x90);
mod_radio_reg(pi, RADIO_2064_REG0F3, 0x3, 0x2);
mod_radio_reg(pi, RADIO_2064_REG0F3, 0xf0, 0xa0);
mod_radio_reg(pi, RADIO_2064_REG11F, 0x2, 0x2);
wlc_lcnphy_clear_tx_power_offsets(pi);
mod_phy_reg(pi, 0x4d0, (0x1ff << 6), (10) << 6);
}
}
static void wlc_lcnphy_rcal(struct brcms_phy *pi)
{
u8 rcal_value;
and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
or_radio_reg(pi, RADIO_2064_REG004, 0x40);
or_radio_reg(pi, RADIO_2064_REG120, 0x10);
or_radio_reg(pi, RADIO_2064_REG078, 0x80);
or_radio_reg(pi, RADIO_2064_REG129, 0x02);
or_radio_reg(pi, RADIO_2064_REG057, 0x01);
or_radio_reg(pi, RADIO_2064_REG05B, 0x02);
mdelay(5);
SPINWAIT(!wlc_radio_2064_rcal_done(pi), 10 * 1000 * 1000);
if (wlc_radio_2064_rcal_done(pi)) {
rcal_value = (u8) read_radio_reg(pi, RADIO_2064_REG05C);
rcal_value = rcal_value & 0x1f;
}
and_radio_reg(pi, RADIO_2064_REG05B, 0xfD);
and_radio_reg(pi, RADIO_2064_REG057, 0xFE);
}
static void wlc_lcnphy_rc_cal(struct brcms_phy *pi)
{
u8 dflt_rc_cal_val;
u16 flt_val;
dflt_rc_cal_val = 7;
if (LCNREV_IS(pi->pubpi.phy_rev, 1))
dflt_rc_cal_val = 11;
flt_val =
(dflt_rc_cal_val << 10) | (dflt_rc_cal_val << 5) |
(dflt_rc_cal_val);
write_phy_reg(pi, 0x933, flt_val);
write_phy_reg(pi, 0x934, flt_val);
write_phy_reg(pi, 0x935, flt_val);
write_phy_reg(pi, 0x936, flt_val);
write_phy_reg(pi, 0x937, (flt_val & 0x1FF));
return;
}
static void wlc_radio_2064_init(struct brcms_phy *pi)
{
u32 i;
const struct lcnphy_radio_regs *lcnphyregs = NULL;
lcnphyregs = lcnphy_radio_regs_2064;
for (i = 0; lcnphyregs[i].address != 0xffff; i++)
if (CHSPEC_IS5G(pi->radio_chanspec) && lcnphyregs[i].do_init_a)
write_radio_reg(pi,
((lcnphyregs[i].address & 0x3fff) |
RADIO_DEFAULT_CORE),
(u16) lcnphyregs[i].init_a);
else if (lcnphyregs[i].do_init_g)
write_radio_reg(pi,
((lcnphyregs[i].address & 0x3fff) |
RADIO_DEFAULT_CORE),
(u16) lcnphyregs[i].init_g);
write_radio_reg(pi, RADIO_2064_REG032, 0x62);
write_radio_reg(pi, RADIO_2064_REG033, 0x19);
write_radio_reg(pi, RADIO_2064_REG090, 0x10);
write_radio_reg(pi, RADIO_2064_REG010, 0x00);
if (LCNREV_IS(pi->pubpi.phy_rev, 1)) {
write_radio_reg(pi, RADIO_2064_REG060, 0x7f);
write_radio_reg(pi, RADIO_2064_REG061, 0x72);
write_radio_reg(pi, RADIO_2064_REG062, 0x7f);
}
write_radio_reg(pi, RADIO_2064_REG01D, 0x02);
write_radio_reg(pi, RADIO_2064_REG01E, 0x06);
mod_phy_reg(pi, 0x4ea, (0x7 << 0), 0 << 0);
mod_phy_reg(pi, 0x4ea, (0x7 << 3), 1 << 3);
mod_phy_reg(pi, 0x4ea, (0x7 << 6), 2 << 6);
mod_phy_reg(pi, 0x4ea, (0x7 << 9), 3 << 9);
mod_phy_reg(pi, 0x4ea, (0x7 << 12), 4 << 12);
write_phy_reg(pi, 0x4ea, 0x4688);
mod_phy_reg(pi, 0x4eb, (0x7 << 0), 2 << 0);
mod_phy_reg(pi, 0x4eb, (0x7 << 6), 0 << 6);
mod_phy_reg(pi, 0x46a, (0xffff << 0), 25 << 0);
wlc_lcnphy_set_tx_locc(pi, 0);
wlc_lcnphy_rcal(pi);
wlc_lcnphy_rc_cal(pi);
}
static void wlc_lcnphy_radio_init(struct brcms_phy *pi)
{
wlc_radio_2064_init(pi);
}
static void wlc_lcnphy_tbl_init(struct brcms_phy *pi)
{
uint idx;
u8 phybw40;
struct phytbl_info tab;
u32 val;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
for (idx = 0; idx < dot11lcnphytbl_info_sz_rev0; idx++)
wlc_lcnphy_write_table(pi, &dot11lcnphytbl_info_rev0[idx]);
if (pi->sh->boardflags & BFL_FEM_BT) {
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
val = 100;
tab.tbl_offset = 4;
wlc_lcnphy_write_table(pi, &tab);
}
tab.tbl_id = LCNPHY_TBL_ID_RFSEQ;
tab.tbl_width = 16;
tab.tbl_ptr = &val;
tab.tbl_len = 1;
val = 114;
tab.tbl_offset = 0;
wlc_lcnphy_write_table(pi, &tab);
val = 130;
tab.tbl_offset = 1;
wlc_lcnphy_write_table(pi, &tab);
val = 6;
tab.tbl_offset = 8;
wlc_lcnphy_write_table(pi, &tab);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
if (pi->sh->boardflags & BFL_FEM)
wlc_lcnphy_load_tx_gain_table(
pi,
dot11lcnphy_2GHz_extPA_gaintable_rev0);
else
wlc_lcnphy_load_tx_gain_table(
pi,
dot11lcnphy_2GHz_gaintable_rev0);
}
if (LCNREV_IS(pi->pubpi.phy_rev, 2)) {
const struct phytbl_info *tb;
int l;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
l = dot11lcnphytbl_rx_gain_info_2G_rev2_sz;
if (pi->sh->boardflags & BFL_EXTLNA)
tb = dot11lcnphytbl_rx_gain_info_extlna_2G_rev2;
else
tb = dot11lcnphytbl_rx_gain_info_2G_rev2;
} else {
l = dot11lcnphytbl_rx_gain_info_5G_rev2_sz;
if (pi->sh->boardflags & BFL_EXTLNA_5GHz)
tb = dot11lcnphytbl_rx_gain_info_extlna_5G_rev2;
else
tb = dot11lcnphytbl_rx_gain_info_5G_rev2;
}
for (idx = 0; idx < l; idx++)
wlc_lcnphy_write_table(pi, &tb[idx]);
}
if ((pi->sh->boardflags & BFL_FEM)
&& !(pi->sh->boardflags & BFL_FEM_BT))
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313_epa);
else if (pi->sh->boardflags & BFL_FEM_BT) {
if (pi->sh->boardrev < 0x1250)
wlc_lcnphy_write_table(
pi,
&dot11lcn_sw_ctrl_tbl_info_4313_bt_epa);
else
wlc_lcnphy_write_table(
pi,
&dot11lcn_sw_ctrl_tbl_info_4313_bt_epa_p250);
} else
wlc_lcnphy_write_table(pi, &dot11lcn_sw_ctrl_tbl_info_4313);
wlc_lcnphy_load_rfpower(pi);
wlc_lcnphy_clear_papd_comptable(pi);
}
static void wlc_lcnphy_rev0_baseband_init(struct brcms_phy *pi)
{
u16 afectrl1;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
write_radio_reg(pi, RADIO_2064_REG11C, 0x0);
write_phy_reg(pi, 0x43b, 0x0);
write_phy_reg(pi, 0x43c, 0x0);
write_phy_reg(pi, 0x44c, 0x0);
write_phy_reg(pi, 0x4e6, 0x0);
write_phy_reg(pi, 0x4f9, 0x0);
write_phy_reg(pi, 0x4b0, 0x0);
write_phy_reg(pi, 0x938, 0x0);
write_phy_reg(pi, 0x4b0, 0x0);
write_phy_reg(pi, 0x44e, 0);
or_phy_reg(pi, 0x567, 0x03);
or_phy_reg(pi, 0x44a, 0x44);
write_phy_reg(pi, 0x44a, 0x80);
if (!(pi->sh->boardflags & BFL_FEM))
wlc_lcnphy_set_tx_pwr_by_index(pi, 52);
if (0) {
afectrl1 = 0;
afectrl1 = (u16) ((pi_lcn->lcnphy_rssi_vf) |
(pi_lcn->lcnphy_rssi_vc << 4) |
(pi_lcn->lcnphy_rssi_gs << 10));
write_phy_reg(pi, 0x43e, afectrl1);
}
mod_phy_reg(pi, 0x634, (0xff << 0), 0xC << 0);
if (pi->sh->boardflags & BFL_FEM) {
mod_phy_reg(pi, 0x634, (0xff << 0), 0xA << 0);
write_phy_reg(pi, 0x910, 0x1);
}
mod_phy_reg(pi, 0x448, (0x3 << 8), 1 << 8);
mod_phy_reg(pi, 0x608, (0xff << 0), 0x17 << 0);
mod_phy_reg(pi, 0x604, (0x7ff << 0), 0x3EA << 0);
}
static void wlc_lcnphy_rev2_baseband_init(struct brcms_phy *pi)
{
if (CHSPEC_IS5G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x416, (0xff << 0), 80 << 0);
mod_phy_reg(pi, 0x416, (0xff << 8), 80 << 8);
}
}
static void wlc_lcnphy_agc_temp_init(struct brcms_phy *pi)
{
s16 temp;
struct phytbl_info tab;
u32 tableBuffer[2];
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
temp = (s16) read_phy_reg(pi, 0x4df);
pi_lcn->lcnphy_ofdmgainidxtableoffset = (temp & (0xff << 0)) >> 0;
if (pi_lcn->lcnphy_ofdmgainidxtableoffset > 127)
pi_lcn->lcnphy_ofdmgainidxtableoffset -= 256;
pi_lcn->lcnphy_dsssgainidxtableoffset = (temp & (0xff << 8)) >> 8;
if (pi_lcn->lcnphy_dsssgainidxtableoffset > 127)
pi_lcn->lcnphy_dsssgainidxtableoffset -= 256;
tab.tbl_ptr = tableBuffer;
tab.tbl_len = 2;
tab.tbl_id = 17;
tab.tbl_offset = 59;
tab.tbl_width = 32;
wlc_lcnphy_read_table(pi, &tab);
if (tableBuffer[0] > 63)
tableBuffer[0] -= 128;
pi_lcn->lcnphy_tr_R_gain_val = tableBuffer[0];
if (tableBuffer[1] > 63)
tableBuffer[1] -= 128;
pi_lcn->lcnphy_tr_T_gain_val = tableBuffer[1];
temp = (s16) (read_phy_reg(pi, 0x434) & (0xff << 0));
if (temp > 127)
temp -= 256;
pi_lcn->lcnphy_input_pwr_offset_db = (s8) temp;
pi_lcn->lcnphy_Med_Low_Gain_db =
(read_phy_reg(pi, 0x424) & (0xff << 8)) >> 8;
pi_lcn->lcnphy_Very_Low_Gain_db =
(read_phy_reg(pi, 0x425) & (0xff << 0)) >> 0;
tab.tbl_ptr = tableBuffer;
tab.tbl_len = 2;
tab.tbl_id = LCNPHY_TBL_ID_GAIN_IDX;
tab.tbl_offset = 28;
tab.tbl_width = 32;
wlc_lcnphy_read_table(pi, &tab);
pi_lcn->lcnphy_gain_idx_14_lowword = tableBuffer[0];
pi_lcn->lcnphy_gain_idx_14_hiword = tableBuffer[1];
}
static void wlc_lcnphy_baseband_init(struct brcms_phy *pi)
{
wlc_lcnphy_tbl_init(pi);
wlc_lcnphy_rev0_baseband_init(pi);
if (LCNREV_IS(pi->pubpi.phy_rev, 2))
wlc_lcnphy_rev2_baseband_init(pi);
wlc_lcnphy_bu_tweaks(pi);
}
void wlc_phy_init_lcnphy(struct brcms_phy *pi)
{
u8 phybw40;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
phybw40 = CHSPEC_IS40(pi->radio_chanspec);
pi_lcn->lcnphy_cal_counter = 0;
pi_lcn->lcnphy_cal_temper = pi_lcn->lcnphy_rawtempsense;
or_phy_reg(pi, 0x44a, 0x80);
and_phy_reg(pi, 0x44a, 0x7f);
wlc_lcnphy_afe_clk_init(pi, AFE_CLK_INIT_MODE_TXRX2X);
write_phy_reg(pi, 0x60a, 160);
write_phy_reg(pi, 0x46a, 25);
wlc_lcnphy_baseband_init(pi);
wlc_lcnphy_radio_init(pi);
if (CHSPEC_IS2G(pi->radio_chanspec))
wlc_lcnphy_tx_pwr_ctrl_init((struct brcms_phy_pub *) pi);
wlc_phy_chanspec_set((struct brcms_phy_pub *) pi, pi->radio_chanspec);
si_pmu_regcontrol(pi->sh->sih, 0, 0xf, 0x9);
si_pmu_chipcontrol(pi->sh->sih, 0, 0xffffffff, 0x03CDDDDD);
if ((pi->sh->boardflags & BFL_FEM)
&& wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
wlc_lcnphy_set_tx_pwr_by_index(pi, FIXED_TXPWR);
wlc_lcnphy_agc_temp_init(pi);
wlc_lcnphy_temp_adj(pi);
mod_phy_reg(pi, 0x448, (0x1 << 14), (1) << 14);
udelay(100);
mod_phy_reg(pi, 0x448, (0x1 << 14), (0) << 14);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_HW);
pi_lcn->lcnphy_noise_samples = LCNPHY_NOISE_SAMPLES_DEFAULT;
wlc_lcnphy_calib_modes(pi, PHY_PERICAL_PHYINIT);
}
static bool wlc_phy_txpwr_srom_read_lcnphy(struct brcms_phy *pi)
{
s8 txpwr = 0;
int i;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
struct phy_shim_info *shim = pi->sh->physhim;
if (CHSPEC_IS2G(pi->radio_chanspec)) {
u16 cckpo = 0;
u32 offset_ofdm, offset_mcs;
pi_lcn->lcnphy_tr_isolation_mid =
(u8)wlapi_getintvar(shim, BRCMS_SROM_TRISO2G);
pi_lcn->lcnphy_rx_power_offset =
(u8)wlapi_getintvar(shim, BRCMS_SROM_RXPO2G);
pi->txpa_2g[0] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B0);
pi->txpa_2g[1] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B1);
pi->txpa_2g[2] = (s16)wlapi_getintvar(shim, BRCMS_SROM_PA0B2);
pi_lcn->lcnphy_rssi_vf =
(u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMF2G);
pi_lcn->lcnphy_rssi_vc =
(u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISMC2G);
pi_lcn->lcnphy_rssi_gs =
(u8)wlapi_getintvar(shim, BRCMS_SROM_RSSISAV2G);
pi_lcn->lcnphy_rssi_vf_lowtemp = pi_lcn->lcnphy_rssi_vf;
pi_lcn->lcnphy_rssi_vc_lowtemp = pi_lcn->lcnphy_rssi_vc;
pi_lcn->lcnphy_rssi_gs_lowtemp = pi_lcn->lcnphy_rssi_gs;
pi_lcn->lcnphy_rssi_vf_hightemp = pi_lcn->lcnphy_rssi_vf;
pi_lcn->lcnphy_rssi_vc_hightemp = pi_lcn->lcnphy_rssi_vc;
pi_lcn->lcnphy_rssi_gs_hightemp = pi_lcn->lcnphy_rssi_gs;
txpwr = (s8)wlapi_getintvar(shim, BRCMS_SROM_MAXP2GA0);
pi->tx_srom_max_2g = txpwr;
for (i = 0; i < PWRTBL_NUM_COEFF; i++) {
pi->txpa_2g_low_temp[i] = pi->txpa_2g[i];
pi->txpa_2g_high_temp[i] = pi->txpa_2g[i];
}
cckpo = (u16)wlapi_getintvar(shim, BRCMS_SROM_CCK2GPO);
offset_ofdm = (u32)wlapi_getintvar(shim, BRCMS_SROM_OFDM2GPO);
if (cckpo) {
uint max_pwr_chan = txpwr;
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++) {
pi->tx_srom_max_rate_2g[i] =
max_pwr_chan - ((cckpo & 0xf) * 2);
cckpo >>= 4;
}
for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) {
pi->tx_srom_max_rate_2g[i] =
max_pwr_chan -
((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
} else {
u8 opo = 0;
opo = (u8)wlapi_getintvar(shim, BRCMS_SROM_OPO);
for (i = TXP_FIRST_CCK; i <= TXP_LAST_CCK; i++)
pi->tx_srom_max_rate_2g[i] = txpwr;
for (i = TXP_FIRST_OFDM; i <= TXP_LAST_OFDM; i++) {
pi->tx_srom_max_rate_2g[i] = txpwr -
((offset_ofdm & 0xf) * 2);
offset_ofdm >>= 4;
}
offset_mcs =
wlapi_getintvar(shim,
BRCMS_SROM_MCS2GPO1) << 16;
offset_mcs |=
(u16) wlapi_getintvar(shim,
BRCMS_SROM_MCS2GPO0);
pi_lcn->lcnphy_mcs20_po = offset_mcs;
for (i = TXP_FIRST_SISO_MCS_20;
i <= TXP_LAST_SISO_MCS_20; i++) {
pi->tx_srom_max_rate_2g[i] =
txpwr - ((offset_mcs & 0xf) * 2);
offset_mcs >>= 4;
}
}
pi_lcn->lcnphy_rawtempsense =
(u16)wlapi_getintvar(shim, BRCMS_SROM_RAWTEMPSENSE);
pi_lcn->lcnphy_measPower =
(u8)wlapi_getintvar(shim, BRCMS_SROM_MEASPOWER);
pi_lcn->lcnphy_tempsense_slope =
(u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_SLOPE);
pi_lcn->lcnphy_hw_iqcal_en =
(bool)wlapi_getintvar(shim, BRCMS_SROM_HW_IQCAL_EN);
pi_lcn->lcnphy_iqcal_swp_dis =
(bool)wlapi_getintvar(shim, BRCMS_SROM_IQCAL_SWP_DIS);
pi_lcn->lcnphy_tempcorrx =
(u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPCORRX);
pi_lcn->lcnphy_tempsense_option =
(u8)wlapi_getintvar(shim, BRCMS_SROM_TEMPSENSE_OPTION);
pi_lcn->lcnphy_freqoffset_corr =
(u8)wlapi_getintvar(shim, BRCMS_SROM_FREQOFFSET_CORR);
if ((u8)wlapi_getintvar(shim, BRCMS_SROM_AA2G) > 1)
wlc_phy_ant_rxdiv_set((struct brcms_phy_pub *) pi,
(u8) wlapi_getintvar(shim, BRCMS_SROM_AA2G));
}
pi_lcn->lcnphy_cck_dig_filt_type = -1;
return true;
}
void wlc_2064_vco_cal(struct brcms_phy *pi)
{
u8 calnrst;
mod_radio_reg(pi, RADIO_2064_REG057, 1 << 3, 1 << 3);
calnrst = (u8) read_radio_reg(pi, RADIO_2064_REG056) & 0xf8;
write_radio_reg(pi, RADIO_2064_REG056, calnrst);
udelay(1);
write_radio_reg(pi, RADIO_2064_REG056, calnrst | 0x03);
udelay(1);
write_radio_reg(pi, RADIO_2064_REG056, calnrst | 0x07);
udelay(300);
mod_radio_reg(pi, RADIO_2064_REG057, 1 << 3, 0);
}
bool wlc_phy_tpc_isenabled_lcnphy(struct brcms_phy *pi)
{
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi))
return 0;
else
return (LCNPHY_TX_PWR_CTRL_HW ==
wlc_lcnphy_get_tx_pwr_ctrl((pi)));
}
void wlc_phy_txpower_recalc_target_lcnphy(struct brcms_phy *pi)
{
u16 pwr_ctrl;
if (wlc_lcnphy_tempsense_based_pwr_ctrl_enabled(pi)) {
wlc_lcnphy_calib_modes(pi, LCNPHY_PERICAL_TEMPBASED_TXPWRCTRL);
} else if (wlc_lcnphy_tssi_based_pwr_ctrl_enabled(pi)) {
pwr_ctrl = wlc_lcnphy_get_tx_pwr_ctrl(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, LCNPHY_TX_PWR_CTRL_OFF);
wlc_lcnphy_txpower_recalc_target(pi);
wlc_lcnphy_set_tx_pwr_ctrl(pi, pwr_ctrl);
}
}
void wlc_phy_detach_lcnphy(struct brcms_phy *pi)
{
kfree(pi->u.pi_lcnphy);
}
bool wlc_phy_attach_lcnphy(struct brcms_phy *pi)
{
struct brcms_phy_lcnphy *pi_lcn;
pi->u.pi_lcnphy = kzalloc(sizeof(struct brcms_phy_lcnphy), GFP_ATOMIC);
if (pi->u.pi_lcnphy == NULL)
return false;
pi_lcn = pi->u.pi_lcnphy;
if (0 == (pi->sh->boardflags & BFL_NOPA)) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
}
pi->xtalfreq = si_pmu_alp_clock(pi->sh->sih);
pi_lcn->lcnphy_papd_rxGnCtrl_init = 0;
pi->pi_fptr.init = wlc_phy_init_lcnphy;
pi->pi_fptr.calinit = wlc_phy_cal_init_lcnphy;
pi->pi_fptr.chanset = wlc_phy_chanspec_set_lcnphy;
pi->pi_fptr.txpwrrecalc = wlc_phy_txpower_recalc_target_lcnphy;
pi->pi_fptr.txiqccget = wlc_lcnphy_get_tx_iqcc;
pi->pi_fptr.txiqccset = wlc_lcnphy_set_tx_iqcc;
pi->pi_fptr.txloccget = wlc_lcnphy_get_tx_locc;
pi->pi_fptr.radioloftget = wlc_lcnphy_get_radio_loft;
pi->pi_fptr.detach = wlc_phy_detach_lcnphy;
if (!wlc_phy_txpwr_srom_read_lcnphy(pi))
return false;
if ((pi->sh->boardflags & BFL_FEM) &&
(LCNREV_IS(pi->pubpi.phy_rev, 1))) {
if (pi_lcn->lcnphy_tempsense_option == 3) {
pi->hwpwrctrl = true;
pi->hwpwrctrl_capable = true;
pi->temppwrctrl_capable = false;
} else {
pi->hwpwrctrl = false;
pi->hwpwrctrl_capable = false;
pi->temppwrctrl_capable = true;
}
}
return true;
}
static void wlc_lcnphy_set_rx_gain(struct brcms_phy *pi, u32 gain)
{
u16 trsw, ext_lna, lna1, lna2, tia, biq0, biq1, gain0_15, gain16_19;
trsw = (gain & ((u32) 1 << 28)) ? 0 : 1;
ext_lna = (u16) (gain >> 29) & 0x01;
lna1 = (u16) (gain >> 0) & 0x0f;
lna2 = (u16) (gain >> 4) & 0x0f;
tia = (u16) (gain >> 8) & 0xf;
biq0 = (u16) (gain >> 12) & 0xf;
biq1 = (u16) (gain >> 16) & 0xf;
gain0_15 = (u16) ((lna1 & 0x3) | ((lna1 & 0x3) << 2) |
((lna2 & 0x3) << 4) | ((lna2 & 0x3) << 6) |
((tia & 0xf) << 8) | ((biq0 & 0xf) << 12));
gain16_19 = biq1;
mod_phy_reg(pi, 0x44d, (0x1 << 0), trsw << 0);
mod_phy_reg(pi, 0x4b1, (0x1 << 9), ext_lna << 9);
mod_phy_reg(pi, 0x4b1, (0x1 << 10), ext_lna << 10);
mod_phy_reg(pi, 0x4b6, (0xffff << 0), gain0_15 << 0);
mod_phy_reg(pi, 0x4b7, (0xf << 0), gain16_19 << 0);
if (CHSPEC_IS2G(pi->radio_chanspec)) {
mod_phy_reg(pi, 0x4b1, (0x3 << 11), lna1 << 11);
mod_phy_reg(pi, 0x4e6, (0x3 << 3), lna1 << 3);
}
wlc_lcnphy_rx_gain_override_enable(pi, true);
}
static u32 wlc_lcnphy_get_receive_power(struct brcms_phy *pi, s32 *gain_index)
{
u32 received_power = 0;
s32 max_index = 0;
u32 gain_code = 0;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
max_index = 36;
if (*gain_index >= 0)
gain_code = lcnphy_23bitgaincode_table[*gain_index];
if (-1 == *gain_index) {
*gain_index = 0;
while ((*gain_index <= (s32) max_index)
&& (received_power < 700)) {
wlc_lcnphy_set_rx_gain(pi,
lcnphy_23bitgaincode_table
[*gain_index]);
received_power =
wlc_lcnphy_measure_digital_power(
pi,
pi_lcn->
lcnphy_noise_samples);
(*gain_index)++;
}
(*gain_index)--;
} else {
wlc_lcnphy_set_rx_gain(pi, gain_code);
received_power =
wlc_lcnphy_measure_digital_power(pi,
pi_lcn->
lcnphy_noise_samples);
}
return received_power;
}
s32 wlc_lcnphy_rx_signal_power(struct brcms_phy *pi, s32 gain_index)
{
s32 gain = 0;
s32 nominal_power_db;
s32 log_val, gain_mismatch, desired_gain, input_power_offset_db,
input_power_db;
s32 received_power, temperature;
u32 power;
u32 msb1, msb2, val1, val2, diff1, diff2;
uint freq;
struct brcms_phy_lcnphy *pi_lcn = pi->u.pi_lcnphy;
received_power = wlc_lcnphy_get_receive_power(pi, &gain_index);
gain = lcnphy_gain_table[gain_index];
nominal_power_db = read_phy_reg(pi, 0x425) >> 8;
power = (received_power * 16);
msb1 = ffs(power) - 1;
msb2 = msb1 + 1;
val1 = 1 << msb1;
val2 = 1 << msb2;
diff1 = (power - val1);
diff2 = (val2 - power);
if (diff1 < diff2)
log_val = msb1;
else
log_val = msb2;
log_val = log_val * 3;
gain_mismatch = (nominal_power_db / 2) - (log_val);
desired_gain = gain + gain_mismatch;
input_power_offset_db = read_phy_reg(pi, 0x434) & 0xFF;
if (input_power_offset_db > 127)
input_power_offset_db -= 256;
input_power_db = input_power_offset_db - desired_gain;
input_power_db =
input_power_db + lcnphy_gain_index_offset_for_rssi[gain_index];
freq = wlc_phy_channel2freq(CHSPEC_CHANNEL(pi->radio_chanspec));
if ((freq > 2427) && (freq <= 2467))
input_power_db = input_power_db - 1;
temperature = pi_lcn->lcnphy_lastsensed_temperature;
if ((temperature - 15) < -30)
input_power_db =
input_power_db +
(((temperature - 10 - 25) * 286) >> 12) -
7;
else if ((temperature - 15) < 4)
input_power_db =
input_power_db +
(((temperature - 10 - 25) * 286) >> 12) -
3;
else
input_power_db = input_power_db +
(((temperature - 10 - 25) * 286) >> 12);
wlc_lcnphy_rx_gain_override_enable(pi, 0);
return input_power_db;
}
| gpl-2.0 |
dastuam/linux37-beaglebone | drivers/staging/media/solo6x10/v4l2.c | 8283 | 23485 | /*
* Copyright (C) 2010 Bluecherry, LLC www.bluecherrydvr.com
* Copyright (C) 2010 Ben Collins <bcollins@bluecherry.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include <media/videobuf-dma-sg.h>
#include "solo6x10.h"
#include "tw28.h"
#define SOLO_HW_BPL 2048
#define SOLO_DISP_PIX_FIELD V4L2_FIELD_INTERLACED
/* Image size is two fields, SOLO_HW_BPL is one horizontal line */
#define solo_vlines(__solo) (__solo->video_vsize * 2)
#define solo_image_size(__solo) (solo_bytesperline(__solo) * \
solo_vlines(__solo))
#define solo_bytesperline(__solo) (__solo->video_hsize * 2)
#define MIN_VID_BUFFERS 4
/* Simple file handle */
struct solo_filehandle {
struct solo_dev *solo_dev;
struct videobuf_queue vidq;
struct task_struct *kthread;
spinlock_t slock;
int old_write;
struct list_head vidq_active;
struct p2m_desc desc[SOLO_NR_P2M_DESC];
int desc_idx;
};
unsigned video_nr = -1;
module_param(video_nr, uint, 0644);
MODULE_PARM_DESC(video_nr, "videoX start number, -1 is autodetect (default)");
static void erase_on(struct solo_dev *solo_dev)
{
solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, SOLO_VO_DISP_ERASE_ON);
solo_dev->erasing = 1;
solo_dev->frame_blank = 0;
}
static int erase_off(struct solo_dev *solo_dev)
{
if (!solo_dev->erasing)
return 0;
/* First time around, assert erase off */
if (!solo_dev->frame_blank)
solo_reg_write(solo_dev, SOLO_VO_DISP_ERASE, 0);
/* Keep the erasing flag on for 8 frames minimum */
if (solo_dev->frame_blank++ >= 8)
solo_dev->erasing = 0;
return 1;
}
void solo_video_in_isr(struct solo_dev *solo_dev)
{
solo_reg_write(solo_dev, SOLO_IRQ_STAT, SOLO_IRQ_VIDEO_IN);
wake_up_interruptible(&solo_dev->disp_thread_wait);
}
static void solo_win_setup(struct solo_dev *solo_dev, u8 ch,
int sx, int sy, int ex, int ey, int scale)
{
if (ch >= solo_dev->nr_chans)
return;
/* Here, we just keep window/channel the same */
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL0(ch),
SOLO_VI_WIN_CHANNEL(ch) |
SOLO_VI_WIN_SX(sx) |
SOLO_VI_WIN_EX(ex) |
SOLO_VI_WIN_SCALE(scale));
solo_reg_write(solo_dev, SOLO_VI_WIN_CTRL1(ch),
SOLO_VI_WIN_SY(sy) |
SOLO_VI_WIN_EY(ey));
}
static int solo_v4l2_ch_ext_4up(struct solo_dev *solo_dev, u8 idx, int on)
{
u8 ch = idx * 4;
if (ch >= solo_dev->nr_chans)
return -EINVAL;
if (!on) {
u8 i;
for (i = ch; i < ch + 4; i++)
solo_win_setup(solo_dev, i, solo_dev->video_hsize,
solo_vlines(solo_dev),
solo_dev->video_hsize,
solo_vlines(solo_dev), 0);
return 0;
}
/* Row 1 */
solo_win_setup(solo_dev, ch, 0, 0, solo_dev->video_hsize / 2,
solo_vlines(solo_dev) / 2, 3);
solo_win_setup(solo_dev, ch + 1, solo_dev->video_hsize / 2, 0,
solo_dev->video_hsize, solo_vlines(solo_dev) / 2, 3);
/* Row 2 */
solo_win_setup(solo_dev, ch + 2, 0, solo_vlines(solo_dev) / 2,
solo_dev->video_hsize / 2, solo_vlines(solo_dev), 3);
solo_win_setup(solo_dev, ch + 3, solo_dev->video_hsize / 2,
solo_vlines(solo_dev) / 2, solo_dev->video_hsize,
solo_vlines(solo_dev), 3);
return 0;
}
static int solo_v4l2_ch_ext_16up(struct solo_dev *solo_dev, int on)
{
int sy, ysize, hsize, i;
if (!on) {
for (i = 0; i < 16; i++)
solo_win_setup(solo_dev, i, solo_dev->video_hsize,
solo_vlines(solo_dev),
solo_dev->video_hsize,
solo_vlines(solo_dev), 0);
return 0;
}
ysize = solo_vlines(solo_dev) / 4;
hsize = solo_dev->video_hsize / 4;
for (sy = 0, i = 0; i < 4; i++, sy += ysize) {
solo_win_setup(solo_dev, i * 4, 0, sy, hsize,
sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 1, hsize, sy,
hsize * 2, sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 2, hsize * 2, sy,
hsize * 3, sy + ysize, 5);
solo_win_setup(solo_dev, (i * 4) + 3, hsize * 3, sy,
solo_dev->video_hsize, sy + ysize, 5);
}
return 0;
}
static int solo_v4l2_ch(struct solo_dev *solo_dev, u8 ch, int on)
{
u8 ext_ch;
if (ch < solo_dev->nr_chans) {
solo_win_setup(solo_dev, ch, on ? 0 : solo_dev->video_hsize,
on ? 0 : solo_vlines(solo_dev),
solo_dev->video_hsize, solo_vlines(solo_dev),
on ? 1 : 0);
return 0;
}
if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
return -EINVAL;
ext_ch = ch - solo_dev->nr_chans;
/* 4up's first */
if (ext_ch < 4)
return solo_v4l2_ch_ext_4up(solo_dev, ext_ch, on);
/* Remaining case is 16up for 16-port */
return solo_v4l2_ch_ext_16up(solo_dev, on);
}
static int solo_v4l2_set_ch(struct solo_dev *solo_dev, u8 ch)
{
if (ch >= solo_dev->nr_chans + solo_dev->nr_ext)
return -EINVAL;
erase_on(solo_dev);
solo_v4l2_ch(solo_dev, solo_dev->cur_disp_ch, 0);
solo_v4l2_ch(solo_dev, ch, 1);
solo_dev->cur_disp_ch = ch;
return 0;
}
static void disp_reset_desc(struct solo_filehandle *fh)
{
/* We use desc mode, which ignores desc 0 */
memset(fh->desc, 0, sizeof(*fh->desc));
fh->desc_idx = 1;
}
static int disp_flush_descs(struct solo_filehandle *fh)
{
int ret;
if (!fh->desc_idx)
return 0;
ret = solo_p2m_dma_desc(fh->solo_dev, SOLO_P2M_DMA_ID_DISP,
fh->desc, fh->desc_idx);
disp_reset_desc(fh);
return ret;
}
static int disp_push_desc(struct solo_filehandle *fh, dma_addr_t dma_addr,
u32 ext_addr, int size, int repeat, int ext_size)
{
if (fh->desc_idx >= SOLO_NR_P2M_DESC) {
int ret = disp_flush_descs(fh);
if (ret)
return ret;
}
solo_p2m_push_desc(&fh->desc[fh->desc_idx], 0, dma_addr, ext_addr,
size, repeat, ext_size);
fh->desc_idx++;
return 0;
}
static void solo_fillbuf(struct solo_filehandle *fh,
struct videobuf_buffer *vb)
{
struct solo_dev *solo_dev = fh->solo_dev;
struct videobuf_dmabuf *vbuf;
unsigned int fdma_addr;
int error = 1;
int i;
struct scatterlist *sg;
dma_addr_t sg_dma;
int sg_size_left;
vbuf = videobuf_to_dma(vb);
if (!vbuf)
goto finish_buf;
if (erase_off(solo_dev)) {
int i;
/* Just blit to the entire sg list, ignoring size */
for_each_sg(vbuf->sglist, sg, vbuf->sglen, i) {
void *p = sg_virt(sg);
size_t len = sg_dma_len(sg);
for (i = 0; i < len; i += 2) {
((u8 *)p)[i] = 0x80;
((u8 *)p)[i + 1] = 0x00;
}
}
error = 0;
goto finish_buf;
}
disp_reset_desc(fh);
sg = vbuf->sglist;
sg_dma = sg_dma_address(sg);
sg_size_left = sg_dma_len(sg);
fdma_addr = SOLO_DISP_EXT_ADDR + (fh->old_write *
(SOLO_HW_BPL * solo_vlines(solo_dev)));
for (i = 0; i < solo_vlines(solo_dev); i++) {
int line_len = solo_bytesperline(solo_dev);
int lines;
if (!sg_size_left) {
sg = sg_next(sg);
if (sg == NULL)
goto finish_buf;
sg_dma = sg_dma_address(sg);
sg_size_left = sg_dma_len(sg);
}
/* No room for an entire line, so chunk it up */
if (sg_size_left < line_len) {
int this_addr = fdma_addr;
while (line_len > 0) {
int this_write;
if (!sg_size_left) {
sg = sg_next(sg);
if (sg == NULL)
goto finish_buf;
sg_dma = sg_dma_address(sg);
sg_size_left = sg_dma_len(sg);
}
this_write = min(sg_size_left, line_len);
if (disp_push_desc(fh, sg_dma, this_addr,
this_write, 0, 0))
goto finish_buf;
line_len -= this_write;
sg_size_left -= this_write;
sg_dma += this_write;
this_addr += this_write;
}
fdma_addr += SOLO_HW_BPL;
continue;
}
/* Shove as many lines into a repeating descriptor as possible */
lines = min(sg_size_left / line_len,
solo_vlines(solo_dev) - i);
if (disp_push_desc(fh, sg_dma, fdma_addr, line_len,
lines - 1, SOLO_HW_BPL))
goto finish_buf;
i += lines - 1;
fdma_addr += SOLO_HW_BPL * lines;
sg_dma += lines * line_len;
sg_size_left -= lines * line_len;
}
error = disp_flush_descs(fh);
finish_buf:
if (error) {
vb->state = VIDEOBUF_ERROR;
} else {
vb->size = solo_vlines(solo_dev) * solo_bytesperline(solo_dev);
vb->state = VIDEOBUF_DONE;
vb->field_count++;
do_gettimeofday(&vb->ts);
}
wake_up(&vb->done);
return;
}
static void solo_thread_try(struct solo_filehandle *fh)
{
struct videobuf_buffer *vb;
unsigned int cur_write;
for (;;) {
spin_lock(&fh->slock);
if (list_empty(&fh->vidq_active))
break;
vb = list_first_entry(&fh->vidq_active, struct videobuf_buffer,
queue);
if (!waitqueue_active(&vb->done))
break;
cur_write = SOLO_VI_STATUS0_PAGE(solo_reg_read(fh->solo_dev,
SOLO_VI_STATUS0));
if (cur_write == fh->old_write)
break;
fh->old_write = cur_write;
list_del(&vb->queue);
spin_unlock(&fh->slock);
solo_fillbuf(fh, vb);
}
assert_spin_locked(&fh->slock);
spin_unlock(&fh->slock);
}
static int solo_thread(void *data)
{
struct solo_filehandle *fh = data;
struct solo_dev *solo_dev = fh->solo_dev;
DECLARE_WAITQUEUE(wait, current);
set_freezable();
add_wait_queue(&solo_dev->disp_thread_wait, &wait);
for (;;) {
long timeout = schedule_timeout_interruptible(HZ);
if (timeout == -ERESTARTSYS || kthread_should_stop())
break;
solo_thread_try(fh);
try_to_freeze();
}
remove_wait_queue(&solo_dev->disp_thread_wait, &wait);
return 0;
}
static int solo_start_thread(struct solo_filehandle *fh)
{
fh->kthread = kthread_run(solo_thread, fh, SOLO6X10_NAME "_disp");
if (IS_ERR(fh->kthread))
return PTR_ERR(fh->kthread);
return 0;
}
static void solo_stop_thread(struct solo_filehandle *fh)
{
if (fh->kthread) {
kthread_stop(fh->kthread);
fh->kthread = NULL;
}
}
static int solo_buf_setup(struct videobuf_queue *vq, unsigned int *count,
unsigned int *size)
{
struct solo_filehandle *fh = vq->priv_data;
struct solo_dev *solo_dev = fh->solo_dev;
*size = solo_image_size(solo_dev);
if (*count < MIN_VID_BUFFERS)
*count = MIN_VID_BUFFERS;
return 0;
}
static int solo_buf_prepare(struct videobuf_queue *vq,
struct videobuf_buffer *vb, enum v4l2_field field)
{
struct solo_filehandle *fh = vq->priv_data;
struct solo_dev *solo_dev = fh->solo_dev;
vb->size = solo_image_size(solo_dev);
if (vb->baddr != 0 && vb->bsize < vb->size)
return -EINVAL;
/* XXX: These properties only change when queue is idle */
vb->width = solo_dev->video_hsize;
vb->height = solo_vlines(solo_dev);
vb->bytesperline = solo_bytesperline(solo_dev);
vb->field = field;
if (vb->state == VIDEOBUF_NEEDS_INIT) {
int rc = videobuf_iolock(vq, vb, NULL);
if (rc < 0) {
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
videobuf_dma_unmap(vq->dev, dma);
videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
return rc;
}
}
vb->state = VIDEOBUF_PREPARED;
return 0;
}
static void solo_buf_queue(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct solo_filehandle *fh = vq->priv_data;
struct solo_dev *solo_dev = fh->solo_dev;
vb->state = VIDEOBUF_QUEUED;
list_add_tail(&vb->queue, &fh->vidq_active);
wake_up_interruptible(&solo_dev->disp_thread_wait);
}
static void solo_buf_release(struct videobuf_queue *vq,
struct videobuf_buffer *vb)
{
struct videobuf_dmabuf *dma = videobuf_to_dma(vb);
videobuf_dma_unmap(vq->dev, dma);
videobuf_dma_free(dma);
vb->state = VIDEOBUF_NEEDS_INIT;
}
static struct videobuf_queue_ops solo_video_qops = {
.buf_setup = solo_buf_setup,
.buf_prepare = solo_buf_prepare,
.buf_queue = solo_buf_queue,
.buf_release = solo_buf_release,
};
static unsigned int solo_v4l2_poll(struct file *file,
struct poll_table_struct *wait)
{
struct solo_filehandle *fh = file->private_data;
return videobuf_poll_stream(file, &fh->vidq, wait);
}
static int solo_v4l2_mmap(struct file *file, struct vm_area_struct *vma)
{
struct solo_filehandle *fh = file->private_data;
return videobuf_mmap_mapper(&fh->vidq, vma);
}
static int solo_v4l2_open(struct file *file)
{
struct solo_dev *solo_dev = video_drvdata(file);
struct solo_filehandle *fh;
int ret;
fh = kzalloc(sizeof(*fh), GFP_KERNEL);
if (fh == NULL)
return -ENOMEM;
spin_lock_init(&fh->slock);
INIT_LIST_HEAD(&fh->vidq_active);
fh->solo_dev = solo_dev;
file->private_data = fh;
ret = solo_start_thread(fh);
if (ret) {
kfree(fh);
return ret;
}
videobuf_queue_sg_init(&fh->vidq, &solo_video_qops,
&solo_dev->pdev->dev, &fh->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
SOLO_DISP_PIX_FIELD,
sizeof(struct videobuf_buffer), fh, NULL);
return 0;
}
static ssize_t solo_v4l2_read(struct file *file, char __user *data,
size_t count, loff_t *ppos)
{
struct solo_filehandle *fh = file->private_data;
return videobuf_read_stream(&fh->vidq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static int solo_v4l2_release(struct file *file)
{
struct solo_filehandle *fh = file->private_data;
videobuf_stop(&fh->vidq);
videobuf_mmap_free(&fh->vidq);
solo_stop_thread(fh);
kfree(fh);
return 0;
}
static int solo_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
strcpy(cap->driver, SOLO6X10_NAME);
strcpy(cap->card, "Softlogic 6x10");
snprintf(cap->bus_info, sizeof(cap->bus_info), "PCI %s",
pci_name(solo_dev->pdev));
cap->version = SOLO6X10_VER_NUM;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
return 0;
}
static int solo_enum_ext_input(struct solo_dev *solo_dev,
struct v4l2_input *input)
{
static const char *dispnames_1[] = { "4UP" };
static const char *dispnames_2[] = { "4UP-1", "4UP-2" };
static const char *dispnames_5[] = {
"4UP-1", "4UP-2", "4UP-3", "4UP-4", "16UP"
};
const char **dispnames;
if (input->index >= (solo_dev->nr_chans + solo_dev->nr_ext))
return -EINVAL;
if (solo_dev->nr_ext == 5)
dispnames = dispnames_5;
else if (solo_dev->nr_ext == 2)
dispnames = dispnames_2;
else
dispnames = dispnames_1;
snprintf(input->name, sizeof(input->name), "Multi %s",
dispnames[input->index - solo_dev->nr_chans]);
return 0;
}
static int solo_enum_input(struct file *file, void *priv,
struct v4l2_input *input)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
if (input->index >= solo_dev->nr_chans) {
int ret = solo_enum_ext_input(solo_dev, input);
if (ret < 0)
return ret;
} else {
snprintf(input->name, sizeof(input->name), "Camera %d",
input->index + 1);
/* We can only check this for normal inputs */
if (!tw28_get_video_status(solo_dev, input->index))
input->status = V4L2_IN_ST_NO_SIGNAL;
}
input->type = V4L2_INPUT_TYPE_CAMERA;
if (solo_dev->video_type == SOLO_VO_FMT_TYPE_NTSC)
input->std = V4L2_STD_NTSC_M;
else
input->std = V4L2_STD_PAL_B;
return 0;
}
static int solo_set_input(struct file *file, void *priv, unsigned int index)
{
struct solo_filehandle *fh = priv;
return solo_v4l2_set_ch(fh->solo_dev, index);
}
static int solo_get_input(struct file *file, void *priv, unsigned int *index)
{
struct solo_filehandle *fh = priv;
*index = fh->solo_dev->cur_disp_ch;
return 0;
}
static int solo_enum_fmt_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index)
return -EINVAL;
f->pixelformat = V4L2_PIX_FMT_UYVY;
strlcpy(f->description, "UYUV 4:2:2 Packed", sizeof(f->description));
return 0;
}
static int solo_try_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
int image_size = solo_image_size(solo_dev);
/* Check supported sizes */
if (pix->width != solo_dev->video_hsize)
pix->width = solo_dev->video_hsize;
if (pix->height != solo_vlines(solo_dev))
pix->height = solo_vlines(solo_dev);
if (pix->sizeimage != image_size)
pix->sizeimage = image_size;
/* Check formats */
if (pix->field == V4L2_FIELD_ANY)
pix->field = SOLO_DISP_PIX_FIELD;
if (pix->pixelformat != V4L2_PIX_FMT_UYVY ||
pix->field != SOLO_DISP_PIX_FIELD ||
pix->colorspace != V4L2_COLORSPACE_SMPTE170M)
return -EINVAL;
return 0;
}
static int solo_set_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_filehandle *fh = priv;
if (videobuf_queue_is_busy(&fh->vidq))
return -EBUSY;
/* For right now, if it doesn't match our running config,
* then fail */
return solo_try_fmt_cap(file, priv, f);
}
static int solo_get_fmt_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
struct v4l2_pix_format *pix = &f->fmt.pix;
pix->width = solo_dev->video_hsize;
pix->height = solo_vlines(solo_dev);
pix->pixelformat = V4L2_PIX_FMT_UYVY;
pix->field = SOLO_DISP_PIX_FIELD;
pix->sizeimage = solo_image_size(solo_dev);
pix->colorspace = V4L2_COLORSPACE_SMPTE170M;
pix->bytesperline = solo_bytesperline(solo_dev);
return 0;
}
static int solo_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *req)
{
struct solo_filehandle *fh = priv;
return videobuf_reqbufs(&fh->vidq, req);
}
static int solo_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct solo_filehandle *fh = priv;
return videobuf_querybuf(&fh->vidq, buf);
}
static int solo_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct solo_filehandle *fh = priv;
return videobuf_qbuf(&fh->vidq, buf);
}
static int solo_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct solo_filehandle *fh = priv;
return videobuf_dqbuf(&fh->vidq, buf, file->f_flags & O_NONBLOCK);
}
static int solo_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct solo_filehandle *fh = priv;
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamon(&fh->vidq);
}
static int solo_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct solo_filehandle *fh = priv;
if (i != V4L2_BUF_TYPE_VIDEO_CAPTURE)
return -EINVAL;
return videobuf_streamoff(&fh->vidq);
}
static int solo_s_std(struct file *file, void *priv, v4l2_std_id *i)
{
return 0;
}
static const u32 solo_motion_ctrls[] = {
V4L2_CID_MOTION_TRACE,
0
};
static const u32 *solo_ctrl_classes[] = {
solo_motion_ctrls,
NULL
};
static int solo_disp_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
qc->id = v4l2_ctrl_next(solo_ctrl_classes, qc->id);
if (!qc->id)
return -EINVAL;
switch (qc->id) {
#ifdef PRIVATE_CIDS
case V4L2_CID_MOTION_TRACE:
qc->type = V4L2_CTRL_TYPE_BOOLEAN;
qc->minimum = 0;
qc->maximum = qc->step = 1;
qc->default_value = 0;
strlcpy(qc->name, "Motion Detection Trace", sizeof(qc->name));
return 0;
#else
case V4L2_CID_MOTION_TRACE:
return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0);
#endif
}
return -EINVAL;
}
static int solo_disp_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
switch (ctrl->id) {
case V4L2_CID_MOTION_TRACE:
ctrl->value = solo_reg_read(solo_dev, SOLO_VI_MOTION_BAR)
? 1 : 0;
return 0;
}
return -EINVAL;
}
static int solo_disp_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct solo_filehandle *fh = priv;
struct solo_dev *solo_dev = fh->solo_dev;
switch (ctrl->id) {
case V4L2_CID_MOTION_TRACE:
if (ctrl->value) {
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER,
SOLO_VI_MOTION_Y_ADD |
SOLO_VI_MOTION_Y_VALUE(0x20) |
SOLO_VI_MOTION_CB_VALUE(0x10) |
SOLO_VI_MOTION_CR_VALUE(0x10));
solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR,
SOLO_VI_MOTION_CR_ADD |
SOLO_VI_MOTION_Y_VALUE(0x10) |
SOLO_VI_MOTION_CB_VALUE(0x80) |
SOLO_VI_MOTION_CR_VALUE(0x10));
} else {
solo_reg_write(solo_dev, SOLO_VI_MOTION_BORDER, 0);
solo_reg_write(solo_dev, SOLO_VI_MOTION_BAR, 0);
}
return 0;
}
return -EINVAL;
}
static const struct v4l2_file_operations solo_v4l2_fops = {
.owner = THIS_MODULE,
.open = solo_v4l2_open,
.release = solo_v4l2_release,
.read = solo_v4l2_read,
.poll = solo_v4l2_poll,
.mmap = solo_v4l2_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops solo_v4l2_ioctl_ops = {
.vidioc_querycap = solo_querycap,
.vidioc_s_std = solo_s_std,
/* Input callbacks */
.vidioc_enum_input = solo_enum_input,
.vidioc_s_input = solo_set_input,
.vidioc_g_input = solo_get_input,
/* Video capture format callbacks */
.vidioc_enum_fmt_vid_cap = solo_enum_fmt_cap,
.vidioc_try_fmt_vid_cap = solo_try_fmt_cap,
.vidioc_s_fmt_vid_cap = solo_set_fmt_cap,
.vidioc_g_fmt_vid_cap = solo_get_fmt_cap,
/* Streaming I/O */
.vidioc_reqbufs = solo_reqbufs,
.vidioc_querybuf = solo_querybuf,
.vidioc_qbuf = solo_qbuf,
.vidioc_dqbuf = solo_dqbuf,
.vidioc_streamon = solo_streamon,
.vidioc_streamoff = solo_streamoff,
/* Controls */
.vidioc_queryctrl = solo_disp_queryctrl,
.vidioc_g_ctrl = solo_disp_g_ctrl,
.vidioc_s_ctrl = solo_disp_s_ctrl,
};
static struct video_device solo_v4l2_template = {
.name = SOLO6X10_NAME,
.fops = &solo_v4l2_fops,
.ioctl_ops = &solo_v4l2_ioctl_ops,
.minor = -1,
.release = video_device_release,
.tvnorms = V4L2_STD_NTSC_M | V4L2_STD_PAL_B,
.current_norm = V4L2_STD_NTSC_M,
};
int solo_v4l2_init(struct solo_dev *solo_dev)
{
int ret;
int i;
init_waitqueue_head(&solo_dev->disp_thread_wait);
solo_dev->vfd = video_device_alloc();
if (!solo_dev->vfd)
return -ENOMEM;
*solo_dev->vfd = solo_v4l2_template;
solo_dev->vfd->parent = &solo_dev->pdev->dev;
ret = video_register_device(solo_dev->vfd, VFL_TYPE_GRABBER, video_nr);
if (ret < 0) {
video_device_release(solo_dev->vfd);
solo_dev->vfd = NULL;
return ret;
}
video_set_drvdata(solo_dev->vfd, solo_dev);
snprintf(solo_dev->vfd->name, sizeof(solo_dev->vfd->name), "%s (%i)",
SOLO6X10_NAME, solo_dev->vfd->num);
if (video_nr != -1)
video_nr++;
dev_info(&solo_dev->pdev->dev, "Display as /dev/video%d with "
"%d inputs (%d extended)\n", solo_dev->vfd->num,
solo_dev->nr_chans, solo_dev->nr_ext);
/* Cycle all the channels and clear */
for (i = 0; i < solo_dev->nr_chans; i++) {
solo_v4l2_set_ch(solo_dev, i);
while (erase_off(solo_dev))
;/* Do nothing */
}
/* Set the default display channel */
solo_v4l2_set_ch(solo_dev, 0);
while (erase_off(solo_dev))
;/* Do nothing */
solo_irq_on(solo_dev, SOLO_IRQ_VIDEO_IN);
return 0;
}
void solo_v4l2_exit(struct solo_dev *solo_dev)
{
solo_irq_off(solo_dev, SOLO_IRQ_VIDEO_IN);
if (solo_dev->vfd) {
video_unregister_device(solo_dev->vfd);
solo_dev->vfd = NULL;
}
}
| gpl-2.0 |
SaberMod/lge-kernel-mako | drivers/net/wireless/b43legacy/sysfs.c | 8539 | 5616 | /*
Broadcom B43legacy wireless driver
SYSFS support routines
Copyright (c) 2006 Michael Buesch <m@bues.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "sysfs.h"
#include "b43legacy.h"
#include "main.h"
#include "phy.h"
#include "radio.h"
#include <linux/capability.h>
#define GENERIC_FILESIZE 64
static int get_integer(const char *buf, size_t count)
{
char tmp[10 + 1] = { 0 };
int ret = -EINVAL;
if (count == 0)
goto out;
count = min(count, (size_t)10);
memcpy(tmp, buf, count);
ret = simple_strtol(tmp, NULL, 10);
out:
return ret;
}
static int get_boolean(const char *buf, size_t count)
{
if (count != 0) {
if (buf[0] == '1')
return 1;
if (buf[0] == '0')
return 0;
if (count >= 4 && memcmp(buf, "true", 4) == 0)
return 1;
if (count >= 5 && memcmp(buf, "false", 5) == 0)
return 0;
if (count >= 3 && memcmp(buf, "yes", 3) == 0)
return 1;
if (count >= 2 && memcmp(buf, "no", 2) == 0)
return 0;
if (count >= 2 && memcmp(buf, "on", 2) == 0)
return 1;
if (count >= 3 && memcmp(buf, "off", 3) == 0)
return 0;
}
return -EINVAL;
}
static ssize_t b43legacy_attr_interfmode_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev);
ssize_t count = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mutex_lock(&wldev->wl->mutex);
switch (wldev->phy.interfmode) {
case B43legacy_INTERFMODE_NONE:
count = snprintf(buf, PAGE_SIZE, "0 (No Interference"
" Mitigation)\n");
break;
case B43legacy_INTERFMODE_NONWLAN:
count = snprintf(buf, PAGE_SIZE, "1 (Non-WLAN Interference"
" Mitigation)\n");
break;
case B43legacy_INTERFMODE_MANUALWLAN:
count = snprintf(buf, PAGE_SIZE, "2 (WLAN Interference"
" Mitigation)\n");
break;
default:
B43legacy_WARN_ON(1);
}
mutex_unlock(&wldev->wl->mutex);
return count;
}
static ssize_t b43legacy_attr_interfmode_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev);
unsigned long flags;
int err;
int mode;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mode = get_integer(buf, count);
switch (mode) {
case 0:
mode = B43legacy_INTERFMODE_NONE;
break;
case 1:
mode = B43legacy_INTERFMODE_NONWLAN;
break;
case 2:
mode = B43legacy_INTERFMODE_MANUALWLAN;
break;
case 3:
mode = B43legacy_INTERFMODE_AUTOWLAN;
break;
default:
return -EINVAL;
}
mutex_lock(&wldev->wl->mutex);
spin_lock_irqsave(&wldev->wl->irq_lock, flags);
err = b43legacy_radio_set_interference_mitigation(wldev, mode);
if (err)
b43legacyerr(wldev->wl, "Interference Mitigation not "
"supported by device\n");
mmiowb();
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
return err ? err : count;
}
static DEVICE_ATTR(interference, 0644,
b43legacy_attr_interfmode_show,
b43legacy_attr_interfmode_store);
static ssize_t b43legacy_attr_preamble_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev);
ssize_t count;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
mutex_lock(&wldev->wl->mutex);
if (wldev->short_preamble)
count = snprintf(buf, PAGE_SIZE, "1 (Short Preamble"
" enabled)\n");
else
count = snprintf(buf, PAGE_SIZE, "0 (Short Preamble"
" disabled)\n");
mutex_unlock(&wldev->wl->mutex);
return count;
}
static ssize_t b43legacy_attr_preamble_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct b43legacy_wldev *wldev = dev_to_b43legacy_wldev(dev);
unsigned long flags;
int value;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
value = get_boolean(buf, count);
if (value < 0)
return value;
mutex_lock(&wldev->wl->mutex);
spin_lock_irqsave(&wldev->wl->irq_lock, flags);
wldev->short_preamble = !!value;
spin_unlock_irqrestore(&wldev->wl->irq_lock, flags);
mutex_unlock(&wldev->wl->mutex);
return count;
}
static DEVICE_ATTR(shortpreamble, 0644,
b43legacy_attr_preamble_show,
b43legacy_attr_preamble_store);
int b43legacy_sysfs_register(struct b43legacy_wldev *wldev)
{
struct device *dev = wldev->dev->dev;
int err;
B43legacy_WARN_ON(b43legacy_status(wldev) !=
B43legacy_STAT_INITIALIZED);
err = device_create_file(dev, &dev_attr_interference);
if (err)
goto out;
err = device_create_file(dev, &dev_attr_shortpreamble);
if (err)
goto err_remove_interfmode;
out:
return err;
err_remove_interfmode:
device_remove_file(dev, &dev_attr_interference);
goto out;
}
void b43legacy_sysfs_unregister(struct b43legacy_wldev *wldev)
{
struct device *dev = wldev->dev->dev;
device_remove_file(dev, &dev_attr_shortpreamble);
device_remove_file(dev, &dev_attr_interference);
}
| gpl-2.0 |
mdeejay/kernel-tuna | arch/xtensa/lib/pci-auto.c | 9563 | 9268 | /*
* arch/xtensa/lib/pci-auto.c
*
* PCI autoconfiguration library
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <zankel@tensilica.com, cez@zankel.net>
*
* Based on work from Matt Porter <mporter@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/pci-bridge.h>
/*
*
* Setting up a PCI
*
* pci_ctrl->first_busno = <first bus number (0)>
* pci_ctrl->last_busno = <last bus number (0xff)>
* pci_ctrl->ops = <PCI config operations>
* pci_ctrl->map_irq = <function to return the interrupt number for a device>
*
* pci_ctrl->io_space.start = <IO space start address (PCI view)>
* pci_ctrl->io_space.end = <IO space end address (PCI view)>
* pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
* pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
* pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
* pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
*
* pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
* <IO space end>, IORESOURCE_IO, "PCI host bridge");
* pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
* <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
*
* pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
*
* int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
*
*/
/* define DEBUG to print some debugging messages. */
#undef DEBUG
#ifdef DEBUG
# define DBG(x...) printk(x)
#else
# define DBG(x...)
#endif
static int pciauto_upper_iospc;
static int pciauto_upper_memspc;
static struct pci_dev pciauto_dev;
static struct pci_bus pciauto_bus;
/*
* Helper functions
*/
/* Initialize the bars of a PCI device. */
static void __init
pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
{
int bar_size;
int bar, bar_nr;
int *upper_limit;
int found_mem64 = 0;
for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
bar <= bar_limit;
bar+=4, bar_nr++)
{
/* Tickle the BAR and get the size */
pci_write_config_dword(dev, bar, 0xffffffff);
pci_read_config_dword(dev, bar, &bar_size);
/* If BAR is not implemented go to the next BAR */
if (!bar_size)
continue;
/* Check the BAR type and set our address mask */
if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
{
bar_size &= PCI_BASE_ADDRESS_IO_MASK;
upper_limit = &pciauto_upper_iospc;
DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
}
else
{
if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
PCI_BASE_ADDRESS_MEM_TYPE_64)
found_mem64 = 1;
bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
upper_limit = &pciauto_upper_memspc;
DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
}
/* Allocate a base address (bar_size is negative!) */
*upper_limit = (*upper_limit + bar_size) & bar_size;
/* Write it out and update our limit */
pci_write_config_dword(dev, bar, *upper_limit);
/*
* If we are a 64-bit decoder then increment to the
* upper 32 bits of the bar and force it to locate
* in the lower 4GB of memory.
*/
if (found_mem64)
pci_write_config_dword(dev, (bar+=4), 0x00000000);
DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
}
}
/* Initialize the interrupt number. */
static void __init
pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
{
u8 pin;
int irq = 0;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
/* Fix illegal pin numbers. */
if (pin == 0 || pin > 4)
pin = 1;
if (pci_ctrl->map_irq)
irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
if (irq == -1)
irq = 0;
DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
static void __init
pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
int sub_bus, int *iosave, int *memsave)
{
/* Configure bus number registers */
pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
/* Round memory allocator to 1MB boundary */
pciauto_upper_memspc &= ~(0x100000 - 1);
*memsave = pciauto_upper_memspc;
/* Round I/O allocator to 4KB boundary */
pciauto_upper_iospc &= ~(0x1000 - 1);
*iosave = pciauto_upper_iospc;
/* Set up memory and I/O filter limits, assume 32-bit I/O space */
pci_write_config_word(dev, PCI_MEMORY_LIMIT,
((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
pci_write_config_byte(dev, PCI_IO_LIMIT,
((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
}
static void __init
pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
int *iosave, int *memsave)
{
int cmdstat;
/* Configure bus number registers */
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
/*
* Round memory allocator to 1MB boundary.
* If no space used, allocate minimum.
*/
pciauto_upper_memspc &= ~(0x100000 - 1);
if (*memsave == pciauto_upper_memspc)
pciauto_upper_memspc -= 0x00100000;
pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
/* Allocate 1MB for pre-fretch */
pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
pciauto_upper_memspc -= 0x100000;
pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
pciauto_upper_memspc >> 16);
/* Round I/O allocator to 4KB boundary */
pciauto_upper_iospc &= ~(0x1000 - 1);
if (*iosave == pciauto_upper_iospc)
pciauto_upper_iospc -= 0x1000;
pci_write_config_byte(dev, PCI_IO_BASE,
(pciauto_upper_iospc & 0x0000f000) >> 8);
pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
pciauto_upper_iospc >> 16);
/* Enable memory and I/O accesses, enable bus master */
pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
pci_write_config_dword(dev, PCI_COMMAND,
cmdstat |
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
}
/*
* Scan the current PCI bus.
*/
int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
{
int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
unsigned short vid;
unsigned char header_type;
struct pci_dev *dev = &pciauto_dev;
pciauto_dev.bus = &pciauto_bus;
pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops;
/*
* Fetch our I/O and memory space upper boundaries used
* to allocated base addresses on this pci_controller.
*/
if (current_bus == pci_ctrl->first_busno)
{
pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
}
sub_bus = current_bus;
for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
{
/* Skip our host bridge */
if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
continue;
if (PCI_FUNC(pci_devfn) && !found_multi)
continue;
pciauto_bus.number = current_bus;
pciauto_dev.devfn = pci_devfn;
/* If config space read fails from this device, move on */
if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
continue;
if (!PCI_FUNC(pci_devfn))
found_multi = header_type & 0x80;
pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
if (vid == 0xffff || vid == 0x0000) {
found_multi = 0;
continue;
}
pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
int iosave, memsave;
DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
PCI_SLOT(pci_devfn));
/* Allocate PCI I/O and/or memory space */
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
&iosave, &memsave);
sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
&iosave, &memsave);
pciauto_bus.number = current_bus;
continue;
}
#if 0
/* Skip legacy mode IDE controller */
if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
unsigned char prg_iface;
pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
DBG("PCI Autoconfig: Skipping legacy mode "
"IDE controller\n");
continue;
}
}
#endif
/*
* Found a peripheral, enable some standard
* settings
*/
pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
pci_write_config_dword(dev, PCI_COMMAND,
cmdstat |
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
/* Allocate PCI I/O and/or memory space */
DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
}
return sub_bus;
}
| gpl-2.0 |
mp3deviant721/boeffla-kernel-cm-bacon-mod | arch/xtensa/lib/pci-auto.c | 9563 | 9268 | /*
* arch/xtensa/lib/pci-auto.c
*
* PCI autoconfiguration library
*
* Copyright (C) 2001 - 2005 Tensilica Inc.
*
* Chris Zankel <zankel@tensilica.com, cez@zankel.net>
*
* Based on work from Matt Porter <mporter@mvista.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <asm/pci-bridge.h>
/*
*
* Setting up a PCI
*
* pci_ctrl->first_busno = <first bus number (0)>
* pci_ctrl->last_busno = <last bus number (0xff)>
* pci_ctrl->ops = <PCI config operations>
* pci_ctrl->map_irq = <function to return the interrupt number for a device>
*
* pci_ctrl->io_space.start = <IO space start address (PCI view)>
* pci_ctrl->io_space.end = <IO space end address (PCI view)>
* pci_ctrl->io_space.base = <IO space offset: address 0 from CPU space>
* pci_ctrl->mem_space.start = <MEM space start address (PCI view)>
* pci_ctrl->mem_space.end = <MEM space end address (PCI view)>
* pci_ctrl->mem_space.base = <MEM space offset: address 0 from CPU space>
*
* pcibios_init_resource(&pci_ctrl->io_resource, <IO space start>,
* <IO space end>, IORESOURCE_IO, "PCI host bridge");
* pcibios_init_resource(&pci_ctrl->mem_resources[0], <MEM space start>,
* <MEM space end>, IORESOURCE_MEM, "PCI host bridge");
*
* pci_ctrl->last_busno = pciauto_bus_scan(pci_ctrl,pci_ctrl->first_busno);
*
* int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
*
*/
/* define DEBUG to print some debugging messages. */
#undef DEBUG
#ifdef DEBUG
# define DBG(x...) printk(x)
#else
# define DBG(x...)
#endif
static int pciauto_upper_iospc;
static int pciauto_upper_memspc;
static struct pci_dev pciauto_dev;
static struct pci_bus pciauto_bus;
/*
* Helper functions
*/
/* Initialize the bars of a PCI device. */
static void __init
pciauto_setup_bars(struct pci_dev *dev, int bar_limit)
{
int bar_size;
int bar, bar_nr;
int *upper_limit;
int found_mem64 = 0;
for (bar = PCI_BASE_ADDRESS_0, bar_nr = 0;
bar <= bar_limit;
bar+=4, bar_nr++)
{
/* Tickle the BAR and get the size */
pci_write_config_dword(dev, bar, 0xffffffff);
pci_read_config_dword(dev, bar, &bar_size);
/* If BAR is not implemented go to the next BAR */
if (!bar_size)
continue;
/* Check the BAR type and set our address mask */
if (bar_size & PCI_BASE_ADDRESS_SPACE_IO)
{
bar_size &= PCI_BASE_ADDRESS_IO_MASK;
upper_limit = &pciauto_upper_iospc;
DBG("PCI Autoconfig: BAR %d, I/O, ", bar_nr);
}
else
{
if ((bar_size & PCI_BASE_ADDRESS_MEM_TYPE_MASK) ==
PCI_BASE_ADDRESS_MEM_TYPE_64)
found_mem64 = 1;
bar_size &= PCI_BASE_ADDRESS_MEM_MASK;
upper_limit = &pciauto_upper_memspc;
DBG("PCI Autoconfig: BAR %d, Mem, ", bar_nr);
}
/* Allocate a base address (bar_size is negative!) */
*upper_limit = (*upper_limit + bar_size) & bar_size;
/* Write it out and update our limit */
pci_write_config_dword(dev, bar, *upper_limit);
/*
* If we are a 64-bit decoder then increment to the
* upper 32 bits of the bar and force it to locate
* in the lower 4GB of memory.
*/
if (found_mem64)
pci_write_config_dword(dev, (bar+=4), 0x00000000);
DBG("size=0x%x, address=0x%x\n", ~bar_size + 1, *upper_limit);
}
}
/* Initialize the interrupt number. */
static void __init
pciauto_setup_irq(struct pci_controller* pci_ctrl,struct pci_dev *dev,int devfn)
{
u8 pin;
int irq = 0;
pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
/* Fix illegal pin numbers. */
if (pin == 0 || pin > 4)
pin = 1;
if (pci_ctrl->map_irq)
irq = pci_ctrl->map_irq(dev, PCI_SLOT(devfn), pin);
if (irq == -1)
irq = 0;
DBG("PCI Autoconfig: Interrupt %d, pin %d\n", irq, pin);
pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
}
static void __init
pciauto_prescan_setup_bridge(struct pci_dev *dev, int current_bus,
int sub_bus, int *iosave, int *memsave)
{
/* Configure bus number registers */
pci_write_config_byte(dev, PCI_PRIMARY_BUS, current_bus);
pci_write_config_byte(dev, PCI_SECONDARY_BUS, sub_bus + 1);
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, 0xff);
/* Round memory allocator to 1MB boundary */
pciauto_upper_memspc &= ~(0x100000 - 1);
*memsave = pciauto_upper_memspc;
/* Round I/O allocator to 4KB boundary */
pciauto_upper_iospc &= ~(0x1000 - 1);
*iosave = pciauto_upper_iospc;
/* Set up memory and I/O filter limits, assume 32-bit I/O space */
pci_write_config_word(dev, PCI_MEMORY_LIMIT,
((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
pci_write_config_byte(dev, PCI_IO_LIMIT,
((pciauto_upper_iospc - 1) & 0x0000f000) >> 8);
pci_write_config_word(dev, PCI_IO_LIMIT_UPPER16,
((pciauto_upper_iospc - 1) & 0xffff0000) >> 16);
}
static void __init
pciauto_postscan_setup_bridge(struct pci_dev *dev, int current_bus, int sub_bus,
int *iosave, int *memsave)
{
int cmdstat;
/* Configure bus number registers */
pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, sub_bus);
/*
* Round memory allocator to 1MB boundary.
* If no space used, allocate minimum.
*/
pciauto_upper_memspc &= ~(0x100000 - 1);
if (*memsave == pciauto_upper_memspc)
pciauto_upper_memspc -= 0x00100000;
pci_write_config_word(dev, PCI_MEMORY_BASE, pciauto_upper_memspc >> 16);
/* Allocate 1MB for pre-fretch */
pci_write_config_word(dev, PCI_PREF_MEMORY_LIMIT,
((pciauto_upper_memspc - 1) & 0xfff00000) >> 16);
pciauto_upper_memspc -= 0x100000;
pci_write_config_word(dev, PCI_PREF_MEMORY_BASE,
pciauto_upper_memspc >> 16);
/* Round I/O allocator to 4KB boundary */
pciauto_upper_iospc &= ~(0x1000 - 1);
if (*iosave == pciauto_upper_iospc)
pciauto_upper_iospc -= 0x1000;
pci_write_config_byte(dev, PCI_IO_BASE,
(pciauto_upper_iospc & 0x0000f000) >> 8);
pci_write_config_word(dev, PCI_IO_BASE_UPPER16,
pciauto_upper_iospc >> 16);
/* Enable memory and I/O accesses, enable bus master */
pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
pci_write_config_dword(dev, PCI_COMMAND,
cmdstat |
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
}
/*
* Scan the current PCI bus.
*/
int __init pciauto_bus_scan(struct pci_controller *pci_ctrl, int current_bus)
{
int sub_bus, pci_devfn, pci_class, cmdstat, found_multi=0;
unsigned short vid;
unsigned char header_type;
struct pci_dev *dev = &pciauto_dev;
pciauto_dev.bus = &pciauto_bus;
pciauto_dev.sysdata = pci_ctrl;
pciauto_bus.ops = pci_ctrl->ops;
/*
* Fetch our I/O and memory space upper boundaries used
* to allocated base addresses on this pci_controller.
*/
if (current_bus == pci_ctrl->first_busno)
{
pciauto_upper_iospc = pci_ctrl->io_resource.end + 1;
pciauto_upper_memspc = pci_ctrl->mem_resources[0].end + 1;
}
sub_bus = current_bus;
for (pci_devfn = 0; pci_devfn < 0xff; pci_devfn++)
{
/* Skip our host bridge */
if ((current_bus == pci_ctrl->first_busno) && (pci_devfn == 0))
continue;
if (PCI_FUNC(pci_devfn) && !found_multi)
continue;
pciauto_bus.number = current_bus;
pciauto_dev.devfn = pci_devfn;
/* If config space read fails from this device, move on */
if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type))
continue;
if (!PCI_FUNC(pci_devfn))
found_multi = header_type & 0x80;
pci_read_config_word(dev, PCI_VENDOR_ID, &vid);
if (vid == 0xffff || vid == 0x0000) {
found_multi = 0;
continue;
}
pci_read_config_dword(dev, PCI_CLASS_REVISION, &pci_class);
if ((pci_class >> 16) == PCI_CLASS_BRIDGE_PCI) {
int iosave, memsave;
DBG("PCI Autoconfig: Found P2P bridge, device %d\n",
PCI_SLOT(pci_devfn));
/* Allocate PCI I/O and/or memory space */
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_1);
pciauto_prescan_setup_bridge(dev, current_bus, sub_bus,
&iosave, &memsave);
sub_bus = pciauto_bus_scan(pci_ctrl, sub_bus+1);
pciauto_postscan_setup_bridge(dev, current_bus, sub_bus,
&iosave, &memsave);
pciauto_bus.number = current_bus;
continue;
}
#if 0
/* Skip legacy mode IDE controller */
if ((pci_class >> 16) == PCI_CLASS_STORAGE_IDE) {
unsigned char prg_iface;
pci_read_config_byte(dev, PCI_CLASS_PROG, &prg_iface);
if (!(prg_iface & PCIAUTO_IDE_MODE_MASK)) {
DBG("PCI Autoconfig: Skipping legacy mode "
"IDE controller\n");
continue;
}
}
#endif
/*
* Found a peripheral, enable some standard
* settings
*/
pci_read_config_dword(dev, PCI_COMMAND, &cmdstat);
pci_write_config_dword(dev, PCI_COMMAND,
cmdstat |
PCI_COMMAND_IO |
PCI_COMMAND_MEMORY |
PCI_COMMAND_MASTER);
pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0x80);
/* Allocate PCI I/O and/or memory space */
DBG("PCI Autoconfig: Found Bus %d, Device %d, Function %d\n",
current_bus, PCI_SLOT(pci_devfn), PCI_FUNC(pci_devfn) );
pciauto_setup_bars(dev, PCI_BASE_ADDRESS_5);
pciauto_setup_irq(pci_ctrl, dev, pci_devfn);
}
return sub_bus;
}
| gpl-2.0 |
mythos234/OnePlus2testing | drivers/isdn/hardware/eicon/diva.c | 9563 | 17142 | /* $Id: diva.c,v 1.21.4.1 2004/05/08 14:33:43 armin Exp $ */
#define CARDTYPE_H_WANT_DATA 1
#define CARDTYPE_H_WANT_IDI_DATA 0
#define CARDTYPE_H_WANT_RESOURCE_DATA 0
#define CARDTYPE_H_WANT_FILE_DATA 0
#include "platform.h"
#include "debuglib.h"
#include "cardtype.h"
#include "pc.h"
#include "di_defs.h"
#include "di.h"
#include "io.h"
#include "pc_maint.h"
#include "xdi_msg.h"
#include "xdi_adapter.h"
#include "diva_pci.h"
#include "diva.h"
#ifdef CONFIG_ISDN_DIVAS_PRIPCI
#include "os_pri.h"
#endif
#ifdef CONFIG_ISDN_DIVAS_BRIPCI
#include "os_bri.h"
#include "os_4bri.h"
#endif
PISDN_ADAPTER IoAdapters[MAX_ADAPTER];
extern IDI_CALL Requests[MAX_ADAPTER];
extern int create_adapter_proc(diva_os_xdi_adapter_t *a);
extern void remove_adapter_proc(diva_os_xdi_adapter_t *a);
#define DivaIdiReqFunc(N) \
static void DivaIdiRequest##N(ENTITY *e) \
{ if (IoAdapters[N]) (*IoAdapters[N]->DIRequest)(IoAdapters[N], e); }
/*
** Create own 32 Adapters
*/
DivaIdiReqFunc(0)
DivaIdiReqFunc(1)
DivaIdiReqFunc(2)
DivaIdiReqFunc(3)
DivaIdiReqFunc(4)
DivaIdiReqFunc(5)
DivaIdiReqFunc(6)
DivaIdiReqFunc(7)
DivaIdiReqFunc(8)
DivaIdiReqFunc(9)
DivaIdiReqFunc(10)
DivaIdiReqFunc(11)
DivaIdiReqFunc(12)
DivaIdiReqFunc(13)
DivaIdiReqFunc(14)
DivaIdiReqFunc(15)
DivaIdiReqFunc(16)
DivaIdiReqFunc(17)
DivaIdiReqFunc(18)
DivaIdiReqFunc(19)
DivaIdiReqFunc(20)
DivaIdiReqFunc(21)
DivaIdiReqFunc(22)
DivaIdiReqFunc(23)
DivaIdiReqFunc(24)
DivaIdiReqFunc(25)
DivaIdiReqFunc(26)
DivaIdiReqFunc(27)
DivaIdiReqFunc(28)
DivaIdiReqFunc(29)
DivaIdiReqFunc(30)
DivaIdiReqFunc(31)
/*
** LOCALS
*/
static LIST_HEAD(adapter_queue);
typedef struct _diva_get_xlog {
word command;
byte req;
byte rc;
byte data[sizeof(struct mi_pc_maint)];
} diva_get_xlog_t;
typedef struct _diva_supported_cards_info {
int CardOrdinal;
diva_init_card_proc_t init_card;
} diva_supported_cards_info_t;
static diva_supported_cards_info_t divas_supported_cards[] = {
#ifdef CONFIG_ISDN_DIVAS_PRIPCI
/*
PRI Cards
*/
{CARDTYPE_DIVASRV_P_30M_PCI, diva_pri_init_card},
/*
PRI Rev.2 Cards
*/
{CARDTYPE_DIVASRV_P_30M_V2_PCI, diva_pri_init_card},
/*
PRI Rev.2 VoIP Cards
*/
{CARDTYPE_DIVASRV_VOICE_P_30M_V2_PCI, diva_pri_init_card},
#endif
#ifdef CONFIG_ISDN_DIVAS_BRIPCI
/*
4BRI Rev 1 Cards
*/
{CARDTYPE_DIVASRV_Q_8M_PCI, diva_4bri_init_card},
{CARDTYPE_DIVASRV_VOICE_Q_8M_PCI, diva_4bri_init_card},
/*
4BRI Rev 2 Cards
*/
{CARDTYPE_DIVASRV_Q_8M_V2_PCI, diva_4bri_init_card},
{CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI, diva_4bri_init_card},
/*
4BRI Based BRI Rev 2 Cards
*/
{CARDTYPE_DIVASRV_B_2M_V2_PCI, diva_4bri_init_card},
{CARDTYPE_DIVASRV_B_2F_PCI, diva_4bri_init_card},
{CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI, diva_4bri_init_card},
/*
BRI
*/
{CARDTYPE_MAESTRA_PCI, diva_bri_init_card},
#endif
/*
EOL
*/
{-1}
};
static void diva_init_request_array(void);
static void *divas_create_pci_card(int handle, void *pci_dev_handle);
static diva_os_spin_lock_t adapter_lock;
static int diva_find_free_adapters(int base, int nr)
{
int i;
for (i = 0; i < nr; i++) {
if (IoAdapters[base + i]) {
return (-1);
}
}
return (0);
}
static diva_os_xdi_adapter_t *diva_q_get_next(struct list_head *what)
{
diva_os_xdi_adapter_t *a = NULL;
if (what && (what->next != &adapter_queue))
a = list_entry(what->next, diva_os_xdi_adapter_t, link);
return (a);
}
/* --------------------------------------------------------------------------
Add card to the card list
-------------------------------------------------------------------------- */
void *diva_driver_add_card(void *pdev, unsigned long CardOrdinal)
{
diva_os_spin_lock_magic_t old_irql;
diva_os_xdi_adapter_t *pdiva, *pa;
int i, j, max, nr;
for (i = 0; divas_supported_cards[i].CardOrdinal != -1; i++) {
if (divas_supported_cards[i].CardOrdinal == CardOrdinal) {
if (!(pdiva = divas_create_pci_card(i, pdev))) {
return NULL;
}
switch (CardOrdinal) {
case CARDTYPE_DIVASRV_Q_8M_PCI:
case CARDTYPE_DIVASRV_VOICE_Q_8M_PCI:
case CARDTYPE_DIVASRV_Q_8M_V2_PCI:
case CARDTYPE_DIVASRV_VOICE_Q_8M_V2_PCI:
max = MAX_ADAPTER - 4;
nr = 4;
break;
default:
max = MAX_ADAPTER;
nr = 1;
}
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
for (i = 0; i < max; i++) {
if (!diva_find_free_adapters(i, nr)) {
pdiva->controller = i + 1;
pdiva->xdi_adapter.ANum = pdiva->controller;
IoAdapters[i] = &pdiva->xdi_adapter;
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
create_adapter_proc(pdiva); /* add adapter to proc file system */
DBG_LOG(("add %s:%d",
CardProperties
[CardOrdinal].Name,
pdiva->controller))
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
pa = pdiva;
for (j = 1; j < nr; j++) { /* slave adapters, if any */
pa = diva_q_get_next(&pa->link);
if (pa && !pa->interface.cleanup_adapter_proc) {
pa->controller = i + 1 + j;
pa->xdi_adapter.ANum = pa->controller;
IoAdapters[i + j] = &pa->xdi_adapter;
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
DBG_LOG(("add slave adapter (%d)",
pa->controller))
create_adapter_proc(pa); /* add adapter to proc file system */
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add card");
} else {
DBG_ERR(("slave adapter problem"))
break;
}
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
return (pdiva);
}
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add card");
/*
Not able to add adapter - remove it and return error
*/
DBG_ERR(("can not alloc request array"))
diva_driver_remove_card(pdiva);
return NULL;
}
}
return NULL;
}
/* --------------------------------------------------------------------------
Called on driver load, MAIN, main, DriverEntry
-------------------------------------------------------------------------- */
int divasa_xdi_driver_entry(void)
{
diva_os_initialize_spin_lock(&adapter_lock, "adapter");
memset(&IoAdapters[0], 0x00, sizeof(IoAdapters));
diva_init_request_array();
return (0);
}
/* --------------------------------------------------------------------------
Remove adapter from list
-------------------------------------------------------------------------- */
static diva_os_xdi_adapter_t *get_and_remove_from_queue(void)
{
diva_os_spin_lock_magic_t old_irql;
diva_os_xdi_adapter_t *a = NULL;
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "driver_unload");
if (!list_empty(&adapter_queue)) {
a = list_entry(adapter_queue.next, diva_os_xdi_adapter_t, link);
list_del(adapter_queue.next);
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
return (a);
}
/* --------------------------------------------------------------------------
Remove card from the card list
-------------------------------------------------------------------------- */
void diva_driver_remove_card(void *pdiva)
{
diva_os_spin_lock_magic_t old_irql;
diva_os_xdi_adapter_t *a[4];
diva_os_xdi_adapter_t *pa;
int i;
pa = a[0] = (diva_os_xdi_adapter_t *) pdiva;
a[1] = a[2] = a[3] = NULL;
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "remode adapter");
for (i = 1; i < 4; i++) {
if ((pa = diva_q_get_next(&pa->link))
&& !pa->interface.cleanup_adapter_proc) {
a[i] = pa;
} else {
break;
}
}
for (i = 0; ((i < 4) && a[i]); i++) {
list_del(&a[i]->link);
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "driver_unload");
(*(a[0]->interface.cleanup_adapter_proc)) (a[0]);
for (i = 0; i < 4; i++) {
if (a[i]) {
if (a[i]->controller) {
DBG_LOG(("remove adapter (%d)",
a[i]->controller)) IoAdapters[a[i]->controller - 1] = NULL;
remove_adapter_proc(a[i]);
}
diva_os_free(0, a[i]);
}
}
}
/* --------------------------------------------------------------------------
Create diva PCI adapter and init internal adapter structures
-------------------------------------------------------------------------- */
static void *divas_create_pci_card(int handle, void *pci_dev_handle)
{
diva_supported_cards_info_t *pI = &divas_supported_cards[handle];
diva_os_spin_lock_magic_t old_irql;
diva_os_xdi_adapter_t *a;
DBG_LOG(("found %d-%s", pI->CardOrdinal, CardProperties[pI->CardOrdinal].Name))
if (!(a = (diva_os_xdi_adapter_t *) diva_os_malloc(0, sizeof(*a)))) {
DBG_ERR(("A: can't alloc adapter"));
return NULL;
}
memset(a, 0x00, sizeof(*a));
a->CardIndex = handle;
a->CardOrdinal = pI->CardOrdinal;
a->Bus = DIVAS_XDI_ADAPTER_BUS_PCI;
a->xdi_adapter.cardType = a->CardOrdinal;
a->resources.pci.bus = diva_os_get_pci_bus(pci_dev_handle);
a->resources.pci.func = diva_os_get_pci_func(pci_dev_handle);
a->resources.pci.hdev = pci_dev_handle;
/*
Add master adapter first, so slave adapters will receive higher
numbers as master adapter
*/
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
list_add_tail(&a->link, &adapter_queue);
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
if ((*(pI->init_card)) (a)) {
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
list_del(&a->link);
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "found_pci_card");
diva_os_free(0, a);
DBG_ERR(("A: can't get adapter resources"));
return NULL;
}
return (a);
}
/* --------------------------------------------------------------------------
Called on driver unload FINIT, finit, Unload
-------------------------------------------------------------------------- */
void divasa_xdi_driver_unload(void)
{
diva_os_xdi_adapter_t *a;
while ((a = get_and_remove_from_queue())) {
if (a->interface.cleanup_adapter_proc) {
(*(a->interface.cleanup_adapter_proc)) (a);
}
if (a->controller) {
IoAdapters[a->controller - 1] = NULL;
remove_adapter_proc(a);
}
diva_os_free(0, a);
}
diva_os_destroy_spin_lock(&adapter_lock, "adapter");
}
/*
** Receive and process command from user mode utility
*/
void *diva_xdi_open_adapter(void *os_handle, const void __user *src,
int length,
divas_xdi_copy_from_user_fn_t cp_fn)
{
diva_xdi_um_cfg_cmd_t msg;
diva_os_xdi_adapter_t *a = NULL;
diva_os_spin_lock_magic_t old_irql;
struct list_head *tmp;
if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
DBG_ERR(("A: A(?) open, msg too small (%d < %d)",
length, sizeof(diva_xdi_um_cfg_cmd_t)))
return NULL;
}
if ((*cp_fn) (os_handle, &msg, src, sizeof(msg)) <= 0) {
DBG_ERR(("A: A(?) open, write error"))
return NULL;
}
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "open_adapter");
list_for_each(tmp, &adapter_queue) {
a = list_entry(tmp, diva_os_xdi_adapter_t, link);
if (a->controller == (int)msg.adapter)
break;
a = NULL;
}
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "open_adapter");
if (!a) {
DBG_ERR(("A: A(%d) open, adapter not found", msg.adapter))
}
return (a);
}
/*
** Easy cleanup mailbox status
*/
void diva_xdi_close_adapter(void *adapter, void *os_handle)
{
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
if (a->xdi_mbox.data) {
diva_os_free(0, a->xdi_mbox.data);
a->xdi_mbox.data = NULL;
}
}
int
diva_xdi_write(void *adapter, void *os_handle, const void __user *src,
int length, divas_xdi_copy_from_user_fn_t cp_fn)
{
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
void *data;
if (a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY) {
DBG_ERR(("A: A(%d) write, mbox busy", a->controller))
return (-1);
}
if (length < sizeof(diva_xdi_um_cfg_cmd_t)) {
DBG_ERR(("A: A(%d) write, message too small (%d < %d)",
a->controller, length,
sizeof(diva_xdi_um_cfg_cmd_t)))
return (-3);
}
if (!(data = diva_os_malloc(0, length))) {
DBG_ERR(("A: A(%d) write, ENOMEM", a->controller))
return (-2);
}
length = (*cp_fn) (os_handle, data, src, length);
if (length > 0) {
if ((*(a->interface.cmd_proc))
(a, (diva_xdi_um_cfg_cmd_t *) data, length)) {
length = -3;
}
} else {
DBG_ERR(("A: A(%d) write error (%d)", a->controller,
length))
}
diva_os_free(0, data);
return (length);
}
/*
** Write answers to user mode utility, if any
*/
int
diva_xdi_read(void *adapter, void *os_handle, void __user *dst,
int max_length, divas_xdi_copy_to_user_fn_t cp_fn)
{
diva_os_xdi_adapter_t *a = (diva_os_xdi_adapter_t *) adapter;
int ret;
if (!(a->xdi_mbox.status & DIVA_XDI_MBOX_BUSY)) {
DBG_ERR(("A: A(%d) rx mbox empty", a->controller))
return (-1);
}
if (!a->xdi_mbox.data) {
a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
DBG_ERR(("A: A(%d) rx ENOMEM", a->controller))
return (-2);
}
if (max_length < a->xdi_mbox.data_length) {
DBG_ERR(("A: A(%d) rx buffer too short(%d < %d)",
a->controller, max_length,
a->xdi_mbox.data_length))
return (-3);
}
ret = (*cp_fn) (os_handle, dst, a->xdi_mbox.data,
a->xdi_mbox.data_length);
if (ret > 0) {
diva_os_free(0, a->xdi_mbox.data);
a->xdi_mbox.data = NULL;
a->xdi_mbox.status &= ~DIVA_XDI_MBOX_BUSY;
}
return (ret);
}
irqreturn_t diva_os_irq_wrapper(int irq, void *context)
{
diva_os_xdi_adapter_t *a = context;
diva_xdi_clear_interrupts_proc_t clear_int_proc;
if (!a || !a->xdi_adapter.diva_isr_handler)
return IRQ_NONE;
if ((clear_int_proc = a->clear_interrupts_proc)) {
(*clear_int_proc) (a);
a->clear_interrupts_proc = NULL;
return IRQ_HANDLED;
}
(*(a->xdi_adapter.diva_isr_handler)) (&a->xdi_adapter);
return IRQ_HANDLED;
}
static void diva_init_request_array(void)
{
Requests[0] = DivaIdiRequest0;
Requests[1] = DivaIdiRequest1;
Requests[2] = DivaIdiRequest2;
Requests[3] = DivaIdiRequest3;
Requests[4] = DivaIdiRequest4;
Requests[5] = DivaIdiRequest5;
Requests[6] = DivaIdiRequest6;
Requests[7] = DivaIdiRequest7;
Requests[8] = DivaIdiRequest8;
Requests[9] = DivaIdiRequest9;
Requests[10] = DivaIdiRequest10;
Requests[11] = DivaIdiRequest11;
Requests[12] = DivaIdiRequest12;
Requests[13] = DivaIdiRequest13;
Requests[14] = DivaIdiRequest14;
Requests[15] = DivaIdiRequest15;
Requests[16] = DivaIdiRequest16;
Requests[17] = DivaIdiRequest17;
Requests[18] = DivaIdiRequest18;
Requests[19] = DivaIdiRequest19;
Requests[20] = DivaIdiRequest20;
Requests[21] = DivaIdiRequest21;
Requests[22] = DivaIdiRequest22;
Requests[23] = DivaIdiRequest23;
Requests[24] = DivaIdiRequest24;
Requests[25] = DivaIdiRequest25;
Requests[26] = DivaIdiRequest26;
Requests[27] = DivaIdiRequest27;
Requests[28] = DivaIdiRequest28;
Requests[29] = DivaIdiRequest29;
Requests[30] = DivaIdiRequest30;
Requests[31] = DivaIdiRequest31;
}
void diva_xdi_display_adapter_features(int card)
{
dword features;
if (!card || ((card - 1) >= MAX_ADAPTER) || !IoAdapters[card - 1]) {
return;
}
card--;
features = IoAdapters[card]->Properties.Features;
DBG_LOG(("FEATURES FOR ADAPTER: %d", card + 1))
DBG_LOG((" DI_FAX3 : %s",
(features & DI_FAX3) ? "Y" : "N"))
DBG_LOG((" DI_MODEM : %s",
(features & DI_MODEM) ? "Y" : "N"))
DBG_LOG((" DI_POST : %s",
(features & DI_POST) ? "Y" : "N"))
DBG_LOG((" DI_V110 : %s",
(features & DI_V110) ? "Y" : "N"))
DBG_LOG((" DI_V120 : %s",
(features & DI_V120) ? "Y" : "N"))
DBG_LOG((" DI_POTS : %s",
(features & DI_POTS) ? "Y" : "N"))
DBG_LOG((" DI_CODEC : %s",
(features & DI_CODEC) ? "Y" : "N"))
DBG_LOG((" DI_MANAGE : %s",
(features & DI_MANAGE) ? "Y" : "N"))
DBG_LOG((" DI_V_42 : %s",
(features & DI_V_42) ? "Y" : "N"))
DBG_LOG((" DI_EXTD_FAX : %s",
(features & DI_EXTD_FAX) ? "Y" : "N"))
DBG_LOG((" DI_AT_PARSER : %s",
(features & DI_AT_PARSER) ? "Y" : "N"))
DBG_LOG((" DI_VOICE_OVER_IP : %s",
(features & DI_VOICE_OVER_IP) ? "Y" : "N"))
}
void diva_add_slave_adapter(diva_os_xdi_adapter_t *a)
{
diva_os_spin_lock_magic_t old_irql;
diva_os_enter_spin_lock(&adapter_lock, &old_irql, "add_slave");
list_add_tail(&a->link, &adapter_queue);
diva_os_leave_spin_lock(&adapter_lock, &old_irql, "add_slave");
}
int diva_card_read_xlog(diva_os_xdi_adapter_t *a)
{
diva_get_xlog_t *req;
byte *data;
if (!a->xdi_adapter.Initialized || !a->xdi_adapter.DIRequest) {
return (-1);
}
if (!(data = diva_os_malloc(0, sizeof(struct mi_pc_maint)))) {
return (-1);
}
memset(data, 0x00, sizeof(struct mi_pc_maint));
if (!(req = diva_os_malloc(0, sizeof(*req)))) {
diva_os_free(0, data);
return (-1);
}
req->command = 0x0400;
req->req = LOG;
req->rc = 0x00;
(*(a->xdi_adapter.DIRequest)) (&a->xdi_adapter, (ENTITY *) req);
if (!req->rc || req->req) {
diva_os_free(0, data);
diva_os_free(0, req);
return (-1);
}
memcpy(data, &req->req, sizeof(struct mi_pc_maint));
diva_os_free(0, req);
a->xdi_mbox.data_length = sizeof(struct mi_pc_maint);
a->xdi_mbox.data = data;
a->xdi_mbox.status = DIVA_XDI_MBOX_BUSY;
return (0);
}
void xdiFreeFile(void *handle)
{
}
| gpl-2.0 |
zte8930/msm8930 | net/ceph/ceph_fs.c | 12379 | 1726 | /*
* Some non-inline ceph helpers
*/
#include <linux/module.h>
#include <linux/ceph/types.h>
/*
* return true if @layout appears to be valid
*/
int ceph_file_layout_is_valid(const struct ceph_file_layout *layout)
{
__u32 su = le32_to_cpu(layout->fl_stripe_unit);
__u32 sc = le32_to_cpu(layout->fl_stripe_count);
__u32 os = le32_to_cpu(layout->fl_object_size);
/* stripe unit, object size must be non-zero, 64k increment */
if (!su || (su & (CEPH_MIN_STRIPE_UNIT-1)))
return 0;
if (!os || (os & (CEPH_MIN_STRIPE_UNIT-1)))
return 0;
/* object size must be a multiple of stripe unit */
if (os < su || os % su)
return 0;
/* stripe count must be non-zero */
if (!sc)
return 0;
return 1;
}
int ceph_flags_to_mode(int flags)
{
int mode;
#ifdef O_DIRECTORY /* fixme */
if ((flags & O_DIRECTORY) == O_DIRECTORY)
return CEPH_FILE_MODE_PIN;
#endif
switch (flags & O_ACCMODE) {
case O_WRONLY:
mode = CEPH_FILE_MODE_WR;
break;
case O_RDONLY:
mode = CEPH_FILE_MODE_RD;
break;
case O_RDWR:
case O_ACCMODE: /* this is what the VFS does */
mode = CEPH_FILE_MODE_RDWR;
break;
}
#ifdef O_LAZY
if (flags & O_LAZY)
mode |= CEPH_FILE_MODE_LAZY;
#endif
return mode;
}
EXPORT_SYMBOL(ceph_flags_to_mode);
int ceph_caps_for_mode(int mode)
{
int caps = CEPH_CAP_PIN;
if (mode & CEPH_FILE_MODE_RD)
caps |= CEPH_CAP_FILE_SHARED |
CEPH_CAP_FILE_RD | CEPH_CAP_FILE_CACHE;
if (mode & CEPH_FILE_MODE_WR)
caps |= CEPH_CAP_FILE_EXCL |
CEPH_CAP_FILE_WR | CEPH_CAP_FILE_BUFFER |
CEPH_CAP_AUTH_SHARED | CEPH_CAP_AUTH_EXCL |
CEPH_CAP_XATTR_SHARED | CEPH_CAP_XATTR_EXCL;
if (mode & CEPH_FILE_MODE_LAZY)
caps |= CEPH_CAP_FILE_LAZYIO;
return caps;
}
EXPORT_SYMBOL(ceph_caps_for_mode);
| gpl-2.0 |
sohkis/android_kernel_motorola_shamu | drivers/ide/ide-iops.c | 13659 | 13870 | /*
* Copyright (C) 2000-2002 Andre Hedrick <andre@linux-ide.org>
* Copyright (C) 2003 Red Hat
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/errno.h>
#include <linux/genhd.h>
#include <linux/blkpg.h>
#include <linux/slab.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ide.h>
#include <linux/bitops.h>
#include <linux/nmi.h>
#include <asm/byteorder.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
#include <asm/io.h>
void SELECT_MASK(ide_drive_t *drive, int mask)
{
const struct ide_port_ops *port_ops = drive->hwif->port_ops;
if (port_ops && port_ops->maskproc)
port_ops->maskproc(drive, mask);
}
u8 ide_read_error(ide_drive_t *drive)
{
struct ide_taskfile tf;
drive->hwif->tp_ops->tf_read(drive, &tf, IDE_VALID_ERROR);
return tf.error;
}
EXPORT_SYMBOL_GPL(ide_read_error);
void ide_fix_driveid(u16 *id)
{
#ifndef __LITTLE_ENDIAN
# ifdef __BIG_ENDIAN
int i;
for (i = 0; i < 256; i++)
id[i] = __le16_to_cpu(id[i]);
# else
# error "Please fix <asm/byteorder.h>"
# endif
#endif
}
/*
* ide_fixstring() cleans up and (optionally) byte-swaps a text string,
* removing leading/trailing blanks and compressing internal blanks.
* It is primarily used to tidy up the model name/number fields as
* returned by the ATA_CMD_ID_ATA[PI] commands.
*/
void ide_fixstring(u8 *s, const int bytecount, const int byteswap)
{
u8 *p, *end = &s[bytecount & ~1]; /* bytecount must be even */
if (byteswap) {
/* convert from big-endian to host byte order */
for (p = s ; p != end ; p += 2)
be16_to_cpus((u16 *) p);
}
/* strip leading blanks */
p = s;
while (s != end && *s == ' ')
++s;
/* compress internal blanks and strip trailing blanks */
while (s != end && *s) {
if (*s++ != ' ' || (s != end && *s && *s != ' '))
*p++ = *(s-1);
}
/* wipe out trailing garbage */
while (p != end)
*p++ = '\0';
}
EXPORT_SYMBOL(ide_fixstring);
/*
* This routine busy-waits for the drive status to be not "busy".
* It then checks the status for all of the "good" bits and none
* of the "bad" bits, and if all is okay it returns 0. All other
* cases return error -- caller may then invoke ide_error().
*
* This routine should get fixed to not hog the cpu during extra long waits..
* That could be done by busy-waiting for the first jiffy or two, and then
* setting a timer to wake up at half second intervals thereafter,
* until timeout is achieved, before timing out.
*/
int __ide_wait_stat(ide_drive_t *drive, u8 good, u8 bad,
unsigned long timeout, u8 *rstat)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
unsigned long flags;
int i;
u8 stat;
udelay(1); /* spec allows drive 400ns to assert "BUSY" */
stat = tp_ops->read_status(hwif);
if (stat & ATA_BUSY) {
local_save_flags(flags);
local_irq_enable_in_hardirq();
timeout += jiffies;
while ((stat = tp_ops->read_status(hwif)) & ATA_BUSY) {
if (time_after(jiffies, timeout)) {
/*
* One last read after the timeout in case
* heavy interrupt load made us not make any
* progress during the timeout..
*/
stat = tp_ops->read_status(hwif);
if ((stat & ATA_BUSY) == 0)
break;
local_irq_restore(flags);
*rstat = stat;
return -EBUSY;
}
}
local_irq_restore(flags);
}
/*
* Allow status to settle, then read it again.
* A few rare drives vastly violate the 400ns spec here,
* so we'll wait up to 10usec for a "good" status
* rather than expensively fail things immediately.
* This fix courtesy of Matthew Faupel & Niccolo Rigacci.
*/
for (i = 0; i < 10; i++) {
udelay(1);
stat = tp_ops->read_status(hwif);
if (OK_STAT(stat, good, bad)) {
*rstat = stat;
return 0;
}
}
*rstat = stat;
return -EFAULT;
}
/*
* In case of error returns error value after doing "*startstop = ide_error()".
* The caller should return the updated value of "startstop" in this case,
* "startstop" is unchanged when the function returns 0.
*/
int ide_wait_stat(ide_startstop_t *startstop, ide_drive_t *drive, u8 good,
u8 bad, unsigned long timeout)
{
int err;
u8 stat;
/* bail early if we've exceeded max_failures */
if (drive->max_failures && (drive->failures > drive->max_failures)) {
*startstop = ide_stopped;
return 1;
}
err = __ide_wait_stat(drive, good, bad, timeout, &stat);
if (err) {
char *s = (err == -EBUSY) ? "status timeout" : "status error";
*startstop = ide_error(drive, s, stat);
}
return err;
}
EXPORT_SYMBOL(ide_wait_stat);
/**
* ide_in_drive_list - look for drive in black/white list
* @id: drive identifier
* @table: list to inspect
*
* Look for a drive in the blacklist and the whitelist tables
* Returns 1 if the drive is found in the table.
*/
int ide_in_drive_list(u16 *id, const struct drive_list_entry *table)
{
for ( ; table->id_model; table++)
if ((!strcmp(table->id_model, (char *)&id[ATA_ID_PROD])) &&
(!table->id_firmware ||
strstr((char *)&id[ATA_ID_FW_REV], table->id_firmware)))
return 1;
return 0;
}
EXPORT_SYMBOL_GPL(ide_in_drive_list);
/*
* Early UDMA66 devices don't set bit14 to 1, only bit13 is valid.
* Some optical devices with the buggy firmwares have the same problem.
*/
static const struct drive_list_entry ivb_list[] = {
{ "QUANTUM FIREBALLlct10 05" , "A03.0900" },
{ "QUANTUM FIREBALLlct20 30" , "APL.0900" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202J" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202N" , "SB01" },
{ "TSSTcorp CDDVDW SH-S202H" , "SB00" },
{ "TSSTcorp CDDVDW SH-S202H" , "SB01" },
{ "SAMSUNG SP0822N" , "WA100-10" },
{ NULL , NULL }
};
/*
* All hosts that use the 80c ribbon must use!
* The name is derived from upper byte of word 93 and the 80c ribbon.
*/
u8 eighty_ninty_three(ide_drive_t *drive)
{
ide_hwif_t *hwif = drive->hwif;
u16 *id = drive->id;
int ivb = ide_in_drive_list(id, ivb_list);
if (hwif->cbl == ATA_CBL_SATA || hwif->cbl == ATA_CBL_PATA40_SHORT)
return 1;
if (ivb)
printk(KERN_DEBUG "%s: skipping word 93 validity check\n",
drive->name);
if (ata_id_is_sata(id) && !ivb)
return 1;
if (hwif->cbl != ATA_CBL_PATA80 && !ivb)
goto no_80w;
/*
* FIXME:
* - change master/slave IDENTIFY order
* - force bit13 (80c cable present) check also for !ivb devices
* (unless the slave device is pre-ATA3)
*/
if (id[ATA_ID_HW_CONFIG] & 0x4000)
return 1;
if (ivb) {
const char *model = (char *)&id[ATA_ID_PROD];
if (strstr(model, "TSSTcorp CDDVDW SH-S202")) {
/*
* These ATAPI devices always report 80c cable
* so we have to depend on the host in this case.
*/
if (hwif->cbl == ATA_CBL_PATA80)
return 1;
} else {
/* Depend on the device side cable detection. */
if (id[ATA_ID_HW_CONFIG] & 0x2000)
return 1;
}
}
no_80w:
if (drive->dev_flags & IDE_DFLAG_UDMA33_WARNED)
return 0;
printk(KERN_WARNING "%s: %s side 80-wire cable detection failed, "
"limiting max speed to UDMA33\n",
drive->name,
hwif->cbl == ATA_CBL_PATA80 ? "drive" : "host");
drive->dev_flags |= IDE_DFLAG_UDMA33_WARNED;
return 0;
}
static const char *nien_quirk_list[] = {
"QUANTUM FIREBALLlct08 08",
"QUANTUM FIREBALLP KA6.4",
"QUANTUM FIREBALLP KA9.1",
"QUANTUM FIREBALLP KX13.6",
"QUANTUM FIREBALLP KX20.5",
"QUANTUM FIREBALLP KX27.3",
"QUANTUM FIREBALLP LM20.4",
"QUANTUM FIREBALLP LM20.5",
"FUJITSU MHZ2160BH G2",
NULL
};
void ide_check_nien_quirk_list(ide_drive_t *drive)
{
const char **list, *m = (char *)&drive->id[ATA_ID_PROD];
for (list = nien_quirk_list; *list != NULL; list++)
if (strstr(m, *list) != NULL) {
drive->dev_flags |= IDE_DFLAG_NIEN_QUIRK;
return;
}
}
int ide_driveid_update(ide_drive_t *drive)
{
u16 *id;
int rc;
id = kmalloc(SECTOR_SIZE, GFP_ATOMIC);
if (id == NULL)
return 0;
SELECT_MASK(drive, 1);
rc = ide_dev_read_id(drive, ATA_CMD_ID_ATA, id, 1);
SELECT_MASK(drive, 0);
if (rc)
goto out_err;
drive->id[ATA_ID_UDMA_MODES] = id[ATA_ID_UDMA_MODES];
drive->id[ATA_ID_MWDMA_MODES] = id[ATA_ID_MWDMA_MODES];
drive->id[ATA_ID_SWDMA_MODES] = id[ATA_ID_SWDMA_MODES];
drive->id[ATA_ID_CFA_MODES] = id[ATA_ID_CFA_MODES];
/* anything more ? */
kfree(id);
return 1;
out_err:
if (rc == 2)
printk(KERN_ERR "%s: %s: bad status\n", drive->name, __func__);
kfree(id);
return 0;
}
int ide_config_drive_speed(ide_drive_t *drive, u8 speed)
{
ide_hwif_t *hwif = drive->hwif;
const struct ide_tp_ops *tp_ops = hwif->tp_ops;
struct ide_taskfile tf;
u16 *id = drive->id, i;
int error = 0;
u8 stat;
#ifdef CONFIG_BLK_DEV_IDEDMA
if (hwif->dma_ops) /* check if host supports DMA */
hwif->dma_ops->dma_host_set(drive, 0);
#endif
/* Skip setting PIO flow-control modes on pre-EIDE drives */
if ((speed & 0xf8) == XFER_PIO_0 && ata_id_has_iordy(drive->id) == 0)
goto skip;
/*
* Don't use ide_wait_cmd here - it will
* attempt to set_geometry and recalibrate,
* but for some reason these don't work at
* this point (lost interrupt).
*/
udelay(1);
tp_ops->dev_select(drive);
SELECT_MASK(drive, 1);
udelay(1);
tp_ops->write_devctl(hwif, ATA_NIEN | ATA_DEVCTL_OBS);
memset(&tf, 0, sizeof(tf));
tf.feature = SETFEATURES_XFER;
tf.nsect = speed;
tp_ops->tf_load(drive, &tf, IDE_VALID_FEATURE | IDE_VALID_NSECT);
tp_ops->exec_command(hwif, ATA_CMD_SET_FEATURES);
if (drive->dev_flags & IDE_DFLAG_NIEN_QUIRK)
tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS);
error = __ide_wait_stat(drive, drive->ready_stat,
ATA_BUSY | ATA_DRQ | ATA_ERR,
WAIT_CMD, &stat);
SELECT_MASK(drive, 0);
if (error) {
(void) ide_dump_status(drive, "set_drive_speed_status", stat);
return error;
}
if (speed >= XFER_SW_DMA_0) {
id[ATA_ID_UDMA_MODES] &= ~0xFF00;
id[ATA_ID_MWDMA_MODES] &= ~0x0700;
id[ATA_ID_SWDMA_MODES] &= ~0x0700;
if (ata_id_is_cfa(id))
id[ATA_ID_CFA_MODES] &= ~0x0E00;
} else if (ata_id_is_cfa(id))
id[ATA_ID_CFA_MODES] &= ~0x01C0;
skip:
#ifdef CONFIG_BLK_DEV_IDEDMA
if (speed >= XFER_SW_DMA_0 && (drive->dev_flags & IDE_DFLAG_USING_DMA))
hwif->dma_ops->dma_host_set(drive, 1);
else if (hwif->dma_ops) /* check if host supports DMA */
ide_dma_off_quietly(drive);
#endif
if (speed >= XFER_UDMA_0) {
i = 1 << (speed - XFER_UDMA_0);
id[ATA_ID_UDMA_MODES] |= (i << 8 | i);
} else if (ata_id_is_cfa(id) && speed >= XFER_MW_DMA_3) {
i = speed - XFER_MW_DMA_2;
id[ATA_ID_CFA_MODES] |= i << 9;
} else if (speed >= XFER_MW_DMA_0) {
i = 1 << (speed - XFER_MW_DMA_0);
id[ATA_ID_MWDMA_MODES] |= (i << 8 | i);
} else if (speed >= XFER_SW_DMA_0) {
i = 1 << (speed - XFER_SW_DMA_0);
id[ATA_ID_SWDMA_MODES] |= (i << 8 | i);
} else if (ata_id_is_cfa(id) && speed >= XFER_PIO_5) {
i = speed - XFER_PIO_4;
id[ATA_ID_CFA_MODES] |= i << 6;
}
if (!drive->init_speed)
drive->init_speed = speed;
drive->current_speed = speed;
return error;
}
/*
* This should get invoked any time we exit the driver to
* wait for an interrupt response from a drive. handler() points
* at the appropriate code to handle the next interrupt, and a
* timer is started to prevent us from waiting forever in case
* something goes wrong (see the ide_timer_expiry() handler later on).
*
* See also ide_execute_command
*/
void __ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
unsigned int timeout)
{
ide_hwif_t *hwif = drive->hwif;
BUG_ON(hwif->handler);
hwif->handler = handler;
hwif->timer.expires = jiffies + timeout;
hwif->req_gen_timer = hwif->req_gen;
add_timer(&hwif->timer);
}
void ide_set_handler(ide_drive_t *drive, ide_handler_t *handler,
unsigned int timeout)
{
ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
__ide_set_handler(drive, handler, timeout);
spin_unlock_irqrestore(&hwif->lock, flags);
}
EXPORT_SYMBOL(ide_set_handler);
/**
* ide_execute_command - execute an IDE command
* @drive: IDE drive to issue the command against
* @cmd: command
* @handler: handler for next phase
* @timeout: timeout for command
*
* Helper function to issue an IDE command. This handles the
* atomicity requirements, command timing and ensures that the
* handler and IRQ setup do not race. All IDE command kick off
* should go via this function or do equivalent locking.
*/
void ide_execute_command(ide_drive_t *drive, struct ide_cmd *cmd,
ide_handler_t *handler, unsigned timeout)
{
ide_hwif_t *hwif = drive->hwif;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
if ((cmd->protocol != ATAPI_PROT_DMA &&
cmd->protocol != ATAPI_PROT_PIO) ||
(drive->atapi_flags & IDE_AFLAG_DRQ_INTERRUPT))
__ide_set_handler(drive, handler, timeout);
hwif->tp_ops->exec_command(hwif, cmd->tf.command);
/*
* Drive takes 400nS to respond, we must avoid the IRQ being
* serviced before that.
*
* FIXME: we could skip this delay with care on non shared devices
*/
ndelay(400);
spin_unlock_irqrestore(&hwif->lock, flags);
}
/*
* ide_wait_not_busy() waits for the currently selected device on the hwif
* to report a non-busy status, see comments in ide_probe_port().
*/
int ide_wait_not_busy(ide_hwif_t *hwif, unsigned long timeout)
{
u8 stat = 0;
while (timeout--) {
/*
* Turn this into a schedule() sleep once I'm sure
* about locking issues (2.5 work ?).
*/
mdelay(1);
stat = hwif->tp_ops->read_status(hwif);
if ((stat & ATA_BUSY) == 0)
return 0;
/*
* Assume a value of 0xff means nothing is connected to
* the interface and it doesn't implement the pull-down
* resistor on D7.
*/
if (stat == 0xff)
return -ENODEV;
touch_softlockup_watchdog();
touch_nmi_watchdog();
}
return -EBUSY;
}
| gpl-2.0 |
mongoose700/xen-coalesce-kernel | drivers/hid/hid-saitek.c | 92 | 5039 | /*
* HID driver for Saitek devices.
*
* PS1000 (USB gamepad):
* Fixes the HID report descriptor by removing a non-existent axis and
* clearing the constant bit on the input reports for buttons and d-pad.
* (This module is based on "hid-ortek".)
* Copyright (c) 2012 Andreas Hübner
*
* R.A.T.7, R.A.T.9, M.M.O.7 (USB gaming mice):
* Fixes the mode button which cycles through three constantly pressed
* buttons. All three press events are mapped to one button and the
* missing release event is generated immediately.
*
*/
/*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*/
#include <linux/device.h>
#include <linux/hid.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include "hid-ids.h"
#define SAITEK_FIX_PS1000 0x0001
#define SAITEK_RELEASE_MODE_RAT7 0x0002
#define SAITEK_RELEASE_MODE_MMO7 0x0004
struct saitek_sc {
unsigned long quirks;
int mode;
};
static int saitek_probe(struct hid_device *hdev,
const struct hid_device_id *id)
{
unsigned long quirks = id->driver_data;
struct saitek_sc *ssc;
int ret;
ssc = devm_kzalloc(&hdev->dev, sizeof(*ssc), GFP_KERNEL);
if (ssc == NULL) {
hid_err(hdev, "can't alloc saitek descriptor\n");
return -ENOMEM;
}
ssc->quirks = quirks;
ssc->mode = -1;
hid_set_drvdata(hdev, ssc);
ret = hid_parse(hdev);
if (ret) {
hid_err(hdev, "parse failed\n");
return ret;
}
ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
if (ret) {
hid_err(hdev, "hw start failed\n");
return ret;
}
return 0;
}
static __u8 *saitek_report_fixup(struct hid_device *hdev, __u8 *rdesc,
unsigned int *rsize)
{
struct saitek_sc *ssc = hid_get_drvdata(hdev);
if ((ssc->quirks & SAITEK_FIX_PS1000) && *rsize == 137 &&
rdesc[20] == 0x09 && rdesc[21] == 0x33 &&
rdesc[94] == 0x81 && rdesc[95] == 0x03 &&
rdesc[110] == 0x81 && rdesc[111] == 0x03) {
hid_info(hdev, "Fixing up Saitek PS1000 report descriptor\n");
/* convert spurious axis to a "noop" Logical Minimum (0) */
rdesc[20] = 0x15;
rdesc[21] = 0x00;
/* clear constant bit on buttons and d-pad */
rdesc[95] = 0x02;
rdesc[111] = 0x02;
}
return rdesc;
}
static int saitek_raw_event(struct hid_device *hdev,
struct hid_report *report, u8 *raw_data, int size)
{
struct saitek_sc *ssc = hid_get_drvdata(hdev);
if (ssc->quirks & SAITEK_RELEASE_MODE_RAT7 && size == 7) {
/* R.A.T.7 uses bits 13, 14, 15 for the mode */
int mode = -1;
if (raw_data[1] & 0x01)
mode = 0;
else if (raw_data[1] & 0x02)
mode = 1;
else if (raw_data[1] & 0x04)
mode = 2;
/* clear mode bits */
raw_data[1] &= ~0x07;
if (mode != ssc->mode) {
hid_dbg(hdev, "entered mode %d\n", mode);
if (ssc->mode != -1) {
/* use bit 13 as the mode button */
raw_data[1] |= 0x04;
}
ssc->mode = mode;
}
} else if (ssc->quirks & SAITEK_RELEASE_MODE_MMO7 && size == 8) {
/* M.M.O.7 uses bits 8, 22, 23 for the mode */
int mode = -1;
if (raw_data[1] & 0x80)
mode = 0;
else if (raw_data[2] & 0x01)
mode = 1;
else if (raw_data[2] & 0x02)
mode = 2;
/* clear mode bits */
raw_data[1] &= ~0x80;
raw_data[2] &= ~0x03;
if (mode != ssc->mode) {
hid_dbg(hdev, "entered mode %d\n", mode);
if (ssc->mode != -1) {
/* use bit 8 as the mode button, bits 22
* and 23 do not represent buttons
* according to the HID report descriptor
*/
raw_data[1] |= 0x80;
}
ssc->mode = mode;
}
}
return 0;
}
static int saitek_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
struct saitek_sc *ssc = hid_get_drvdata(hdev);
struct input_dev *input = field->hidinput->input;
if (usage->type == EV_KEY && value &&
(((ssc->quirks & SAITEK_RELEASE_MODE_RAT7) &&
usage->code - BTN_MOUSE == 10) ||
((ssc->quirks & SAITEK_RELEASE_MODE_MMO7) &&
usage->code - BTN_MOUSE == 15))) {
input_report_key(input, usage->code, 1);
/* report missing release event */
input_report_key(input, usage->code, 0);
return 1;
}
return 0;
}
static const struct hid_device_id saitek_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000),
.driver_data = SAITEK_FIX_PS1000 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
.driver_data = SAITEK_RELEASE_MODE_MMO7 },
{ }
};
MODULE_DEVICE_TABLE(hid, saitek_devices);
static struct hid_driver saitek_driver = {
.name = "saitek",
.id_table = saitek_devices,
.probe = saitek_probe,
.report_fixup = saitek_report_fixup,
.raw_event = saitek_raw_event,
.event = saitek_event,
};
module_hid_driver(saitek_driver);
MODULE_LICENSE("GPL");
| gpl-2.0 |
serdarkaracay/vlc | src/stream_output/sdp.c | 92 | 8835 | /*****************************************************************************
* sdp.c : SDP creation helpers
*****************************************************************************
* Copyright © 2007 Rémi Denis-Courmont
* $Id$
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation; either version 2.1 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301, USA.
*****************************************************************************/
#ifdef HAVE_CONFIG_H
# include "config.h"
#endif
#include <vlc_common.h>
#include <stddef.h>
#include <string.h>
#include <stdarg.h>
#include <stdio.h>
#include <assert.h>
#include <vlc_network.h>
#include <vlc_charset.h>
#include "stream_output.h"
#define MAXSDPADDRESS 47
static
char *AddressToSDP (const struct sockaddr *addr, socklen_t addrlen, char *buf)
{
if (addrlen < offsetof (struct sockaddr, sa_family)
+ sizeof (addr->sa_family))
return NULL;
strcpy (buf, "IN IP* ");
if (vlc_getnameinfo (addr, addrlen, buf + 7, MAXSDPADDRESS - 7, NULL,
NI_NUMERICHOST))
return NULL;
switch (addr->sa_family)
{
case AF_INET:
{
if (net_SockAddrIsMulticast (addr, addrlen))
strcat (buf, "/255"); // obsolete in RFC4566, dummy value
buf[5] = '4';
break;
}
#ifdef AF_INET6
case AF_INET6:
{
char *ptr = strchr (buf, '%');
if (ptr != NULL)
*ptr = '\0'; // remove scope ID
buf[5] = '6';
break;
}
#endif
default:
return NULL;
}
return buf;
}
static bool IsSDPString (const char *str)
{
if (strchr (str, '\r') != NULL)
return false;
if (strchr (str, '\n') != NULL)
return false;
if (!IsUTF8 (str))
return false;
return true;
}
static
char *sdp_Start (const char *name, const char *description, const char *url,
const char *email, const char *phone,
const struct sockaddr *src, size_t srclen,
const struct sockaddr *addr, size_t addrlen)
{
uint64_t now = NTPtime64 ();
char *sdp;
char connection[MAXSDPADDRESS], hostname[256],
sfilter[MAXSDPADDRESS + sizeof ("\r\na=source-filter: incl * ")];
const char *preurl = "\r\nu=", *premail = "\r\ne=", *prephone = "\r\np=";
gethostname (hostname, sizeof (hostname));
if (name == NULL)
name = "Unnamed";
if (description == NULL)
description = "N/A";
if (url == NULL)
preurl = url = "";
if (email == NULL)
premail = email = "";
if (phone == NULL)
prephone = phone = "";
if (!IsSDPString (name) || !IsSDPString (description)
|| !IsSDPString (url) || !IsSDPString (email) || !IsSDPString (phone)
|| (AddressToSDP (addr, addrlen, connection) == NULL))
return NULL;
strcpy (sfilter, "");
if (srclen > 0)
{
char machine[MAXSDPADDRESS];
if (AddressToSDP (src, srclen, machine) != NULL)
sprintf (sfilter, "\r\na=source-filter: incl IN IP%c * %s",
machine[5], machine + 7);
}
if (asprintf (&sdp, "v=0"
"\r\no=- %"PRIu64" %"PRIu64" IN IP%c %s"
"\r\ns=%s"
"\r\ni=%s"
"%s%s" // optional URL
"%s%s" // optional email
"%s%s" // optional phone number
"\r\nc=%s"
// bandwidth not specified
"\r\nt=0 0" // one dummy time span
// no repeating
// no time zone adjustment (silly idea anyway)
// no encryption key (deprecated)
"\r\na=tool:"PACKAGE_STRING
"\r\na=recvonly"
"\r\na=type:broadcast"
"\r\na=charset:UTF-8"
"%s" // optional source filter
"\r\n",
/* o= */ now, now, connection[5], hostname,
/* s= */ name,
/* i= */ description,
/* u= */ preurl, url,
/* e= */ premail, email,
/* p= */ prephone, phone,
/* c= */ connection,
/* source-filter */ sfilter) == -1)
return NULL;
return sdp;
}
static char *
vsdp_AddAttribute (char **sdp, const char *name, const char *fmt, va_list ap)
{
size_t oldlen = strlen (*sdp);
size_t addlen = sizeof ("a=\r\n") + strlen (name);
if (fmt != NULL)
{
va_list aq;
va_copy (aq, ap);
addlen += 1 + vsnprintf (NULL, 0, fmt, aq);
va_end (aq);
}
char *ret = realloc (*sdp, oldlen + addlen);
if (ret == NULL)
return NULL;
oldlen += sprintf (ret + oldlen, "a=%s", name);
if (fmt != NULL)
{
ret[oldlen++] = ':';
oldlen += vsprintf (ret + oldlen, fmt, ap);
}
strcpy (ret + oldlen, "\r\n");
return *sdp = ret;
}
char *sdp_AddAttribute (char **sdp, const char *name, const char *fmt, ...)
{
char *ret;
va_list ap;
va_start (ap, fmt);
ret = vsdp_AddAttribute (sdp, name, fmt, ap);
va_end (ap);
return ret;
}
char *sdp_AddMedia (char **sdp,
const char *type, const char *protocol, int dport,
unsigned pt, bool bw_indep, unsigned bw,
const char *ptname, unsigned clock, unsigned chans,
const char *fmtp)
{
char *newsdp, *ptr;
size_t inlen = strlen (*sdp), outlen = inlen;
/* Some default values */
if (type == NULL)
type = "video";
if (protocol == NULL)
protocol = "RTP/AVP";
assert (pt < 128u);
outlen += snprintf (NULL, 0,
"m=%s %u %s %d\r\n"
"b=TIAS:%u\r\n"
"b=RR:0\r\n",
type, dport, protocol, pt, bw);
newsdp = realloc (*sdp, outlen + 1);
if (newsdp == NULL)
return NULL;
*sdp = newsdp;
ptr = newsdp + inlen;
ptr += sprintf (ptr, "m=%s %u %s %u\r\n",
type, dport, protocol, pt);
if (bw > 0)
ptr += sprintf (ptr, "b=%s:%u\r\n", bw_indep ? "TIAS" : "AS", bw);
ptr += sprintf (ptr, "b=RR:0\r\n");
/* RTP payload type map */
if (ptname != NULL)
{
if ((strcmp (type, "audio") == 0) && (chans != 1))
sdp_AddAttribute (sdp, "rtpmap", "%u %s/%u/%u", pt, ptname, clock,
chans);
else
sdp_AddAttribute (sdp, "rtpmap", "%u %s/%u", pt, ptname, clock);
}
/* Format parameters */
if (fmtp != NULL)
sdp_AddAttribute (sdp, "fmtp", "%u %s", pt, fmtp);
return newsdp;
}
char *vlc_sdp_Start (vlc_object_t *obj, const char *cfgpref,
const struct sockaddr *src, size_t srclen,
const struct sockaddr *addr, size_t addrlen)
{
size_t cfglen = strlen (cfgpref);
if (cfglen > 100)
return NULL;
char varname[cfglen + sizeof ("description")], *subvar = varname + cfglen;
strcpy (varname, cfgpref);
strcpy (subvar, "name");
char *name = var_GetNonEmptyString (obj, varname);
strcpy (subvar, "description");
char *description = var_GetNonEmptyString (obj, varname);
strcpy (subvar, "url");
char *url = var_GetNonEmptyString (obj, varname);
strcpy (subvar, "email");
char *email = var_GetNonEmptyString (obj, varname);
strcpy (subvar, "phone");
char *phone = var_GetNonEmptyString (obj, varname);
char *sdp = sdp_Start (name, description, url, email, phone,
src, srclen, addr, addrlen);
free (name);
free (description);
free (url);
free (email);
free (phone);
if (sdp == NULL)
return NULL;
strcpy (subvar, "cat");
char *cat = var_GetNonEmptyString (obj, varname);
if (cat != NULL)
{
sdp_AddAttribute (&sdp, "cat", "%s", cat);
/* Totally non-standard */
sdp_AddAttribute (&sdp, "x-plgroup", "%s", cat);
free (cat);
}
return sdp;
}
| gpl-2.0 |
Wagnerp/QEMU-s5l89xx-port | hw/blizzard.c | 92 | 29144 | /*
* Epson S1D13744/S1D13745 (Blizzard/Hailstorm/Tornado) LCD/TV controller.
*
* Copyright (C) 2008 Nokia Corporation
* Written by Andrzej Zaborowski <andrew@openedhand.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 or
* (at your option) version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qemu-common.h"
#include "sysemu.h"
#include "console.h"
#include "devices.h"
#include "vga_int.h"
#include "pixel_ops.h"
typedef void (*blizzard_fn_t)(uint8_t *, const uint8_t *, unsigned int);
typedef struct {
uint8_t reg;
uint32_t addr;
int swallow;
int pll;
int pll_range;
int pll_ctrl;
uint8_t pll_mode;
uint8_t clksel;
int memenable;
int memrefresh;
uint8_t timing[3];
int priority;
uint8_t lcd_config;
int x;
int y;
int skipx;
int skipy;
uint8_t hndp;
uint8_t vndp;
uint8_t hsync;
uint8_t vsync;
uint8_t pclk;
uint8_t u;
uint8_t v;
uint8_t yrc[2];
int ix[2];
int iy[2];
int ox[2];
int oy[2];
int enable;
int blank;
int bpp;
int invalidate;
int mx[2];
int my[2];
uint8_t mode;
uint8_t effect;
uint8_t iformat;
uint8_t source;
DisplayState *state;
blizzard_fn_t *line_fn_tab[2];
void *fb;
uint8_t hssi_config[3];
uint8_t tv_config;
uint8_t tv_timing[4];
uint8_t vbi;
uint8_t tv_x;
uint8_t tv_y;
uint8_t tv_test;
uint8_t tv_filter_config;
uint8_t tv_filter_idx;
uint8_t tv_filter_coeff[0x20];
uint8_t border_r;
uint8_t border_g;
uint8_t border_b;
uint8_t gamma_config;
uint8_t gamma_idx;
uint8_t gamma_lut[0x100];
uint8_t matrix_ena;
uint8_t matrix_coeff[0x12];
uint8_t matrix_r;
uint8_t matrix_g;
uint8_t matrix_b;
uint8_t pm;
uint8_t status;
uint8_t rgbgpio_dir;
uint8_t rgbgpio;
uint8_t gpio_dir;
uint8_t gpio;
uint8_t gpio_edge[2];
uint8_t gpio_irq;
uint8_t gpio_pdown;
struct {
int x;
int y;
int dx;
int dy;
int len;
int buflen;
void *buf;
void *data;
uint16_t *ptr;
int angle;
int pitch;
blizzard_fn_t line_fn;
} data;
} BlizzardState;
/* Bytes(!) per pixel */
static const int blizzard_iformat_bpp[0x10] = {
0,
2, /* RGB 5:6:5*/
3, /* RGB 6:6:6 mode 1 */
3, /* RGB 8:8:8 mode 1 */
0, 0,
4, /* RGB 6:6:6 mode 2 */
4, /* RGB 8:8:8 mode 2 */
0, /* YUV 4:2:2 */
0, /* YUV 4:2:0 */
0, 0, 0, 0, 0, 0,
};
static inline void blizzard_rgb2yuv(int r, int g, int b,
int *y, int *u, int *v)
{
*y = 0x10 + ((0x838 * r + 0x1022 * g + 0x322 * b) >> 13);
*u = 0x80 + ((0xe0e * b - 0x04c1 * r - 0x94e * g) >> 13);
*v = 0x80 + ((0xe0e * r - 0x0bc7 * g - 0x247 * b) >> 13);
}
static void blizzard_window(BlizzardState *s)
{
uint8_t *src, *dst;
int bypp[2];
int bypl[3];
int y;
blizzard_fn_t fn = s->data.line_fn;
if (!fn)
return;
if (s->mx[0] > s->data.x)
s->mx[0] = s->data.x;
if (s->my[0] > s->data.y)
s->my[0] = s->data.y;
if (s->mx[1] < s->data.x + s->data.dx)
s->mx[1] = s->data.x + s->data.dx;
if (s->my[1] < s->data.y + s->data.dy)
s->my[1] = s->data.y + s->data.dy;
bypp[0] = s->bpp;
bypp[1] = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
bypl[0] = bypp[0] * s->data.pitch;
bypl[1] = bypp[1] * s->x;
bypl[2] = bypp[0] * s->data.dx;
src = s->data.data;
dst = s->fb + bypl[1] * s->data.y + bypp[1] * s->data.x;
for (y = s->data.dy; y > 0; y --, src += bypl[0], dst += bypl[1])
fn(dst, src, bypl[2]);
}
static int blizzard_transfer_setup(BlizzardState *s)
{
if (s->source > 3 || !s->bpp ||
s->ix[1] < s->ix[0] || s->iy[1] < s->iy[0])
return 0;
s->data.angle = s->effect & 3;
s->data.line_fn = s->line_fn_tab[!!s->data.angle][s->iformat];
s->data.x = s->ix[0];
s->data.y = s->iy[0];
s->data.dx = s->ix[1] - s->ix[0] + 1;
s->data.dy = s->iy[1] - s->iy[0] + 1;
s->data.len = s->bpp * s->data.dx * s->data.dy;
s->data.pitch = s->data.dx;
if (s->data.len > s->data.buflen) {
s->data.buf = qemu_realloc(s->data.buf, s->data.len);
s->data.buflen = s->data.len;
}
s->data.ptr = s->data.buf;
s->data.data = s->data.buf;
s->data.len /= 2;
return 1;
}
static void blizzard_reset(BlizzardState *s)
{
s->reg = 0;
s->swallow = 0;
s->pll = 9;
s->pll_range = 1;
s->pll_ctrl = 0x14;
s->pll_mode = 0x32;
s->clksel = 0x00;
s->memenable = 0;
s->memrefresh = 0x25c;
s->timing[0] = 0x3f;
s->timing[1] = 0x13;
s->timing[2] = 0x21;
s->priority = 0;
s->lcd_config = 0x74;
s->x = 8;
s->y = 1;
s->skipx = 0;
s->skipy = 0;
s->hndp = 3;
s->vndp = 2;
s->hsync = 1;
s->vsync = 1;
s->pclk = 0x80;
s->ix[0] = 0;
s->ix[1] = 0;
s->iy[0] = 0;
s->iy[1] = 0;
s->ox[0] = 0;
s->ox[1] = 0;
s->oy[0] = 0;
s->oy[1] = 0;
s->yrc[0] = 0x00;
s->yrc[1] = 0x30;
s->u = 0;
s->v = 0;
s->iformat = 3;
s->source = 0;
s->bpp = blizzard_iformat_bpp[s->iformat];
s->hssi_config[0] = 0x00;
s->hssi_config[1] = 0x00;
s->hssi_config[2] = 0x01;
s->tv_config = 0x00;
s->tv_timing[0] = 0x00;
s->tv_timing[1] = 0x00;
s->tv_timing[2] = 0x00;
s->tv_timing[3] = 0x00;
s->vbi = 0x10;
s->tv_x = 0x14;
s->tv_y = 0x03;
s->tv_test = 0x00;
s->tv_filter_config = 0x80;
s->tv_filter_idx = 0x00;
s->border_r = 0x10;
s->border_g = 0x80;
s->border_b = 0x80;
s->gamma_config = 0x00;
s->gamma_idx = 0x00;
s->matrix_ena = 0x00;
memset(&s->matrix_coeff, 0, sizeof(s->matrix_coeff));
s->matrix_r = 0x00;
s->matrix_g = 0x00;
s->matrix_b = 0x00;
s->pm = 0x02;
s->status = 0x00;
s->rgbgpio_dir = 0x00;
s->gpio_dir = 0x00;
s->gpio_edge[0] = 0x00;
s->gpio_edge[1] = 0x00;
s->gpio_irq = 0x00;
s->gpio_pdown = 0xff;
}
static inline void blizzard_invalidate_display(void *opaque) {
BlizzardState *s = (BlizzardState *) opaque;
s->invalidate = 1;
}
static uint16_t blizzard_reg_read(void *opaque, uint8_t reg)
{
BlizzardState *s = (BlizzardState *) opaque;
switch (reg) {
case 0x00: /* Revision Code */
return 0xa5;
case 0x02: /* Configuration Readback */
return 0x83; /* Macrovision OK, CNF[2:0] = 3 */
case 0x04: /* PLL M-Divider */
return (s->pll - 1) | (1 << 7);
case 0x06: /* PLL Lock Range Control */
return s->pll_range;
case 0x08: /* PLL Lock Synthesis Control 0 */
return s->pll_ctrl & 0xff;
case 0x0a: /* PLL Lock Synthesis Control 1 */
return s->pll_ctrl >> 8;
case 0x0c: /* PLL Mode Control 0 */
return s->pll_mode;
case 0x0e: /* Clock-Source Select */
return s->clksel;
case 0x10: /* Memory Controller Activate */
case 0x14: /* Memory Controller Bank 0 Status Flag */
return s->memenable;
case 0x18: /* Auto-Refresh Interval Setting 0 */
return s->memrefresh & 0xff;
case 0x1a: /* Auto-Refresh Interval Setting 1 */
return s->memrefresh >> 8;
case 0x1c: /* Power-On Sequence Timing Control */
return s->timing[0];
case 0x1e: /* Timing Control 0 */
return s->timing[1];
case 0x20: /* Timing Control 1 */
return s->timing[2];
case 0x24: /* Arbitration Priority Control */
return s->priority;
case 0x28: /* LCD Panel Configuration */
return s->lcd_config;
case 0x2a: /* LCD Horizontal Display Width */
return s->x >> 3;
case 0x2c: /* LCD Horizontal Non-display Period */
return s->hndp;
case 0x2e: /* LCD Vertical Display Height 0 */
return s->y & 0xff;
case 0x30: /* LCD Vertical Display Height 1 */
return s->y >> 8;
case 0x32: /* LCD Vertical Non-display Period */
return s->vndp;
case 0x34: /* LCD HS Pulse-width */
return s->hsync;
case 0x36: /* LCd HS Pulse Start Position */
return s->skipx >> 3;
case 0x38: /* LCD VS Pulse-width */
return s->vsync;
case 0x3a: /* LCD VS Pulse Start Position */
return s->skipy;
case 0x3c: /* PCLK Polarity */
return s->pclk;
case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */
return s->hssi_config[0];
case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */
return s->hssi_config[1];
case 0x42: /* High-speed Serial Interface Tx Mode */
return s->hssi_config[2];
case 0x44: /* TV Display Configuration */
return s->tv_config;
case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits */
return s->tv_timing[(reg - 0x46) >> 1];
case 0x4e: /* VBI: Closed Caption / XDS Control / Status */
return s->vbi;
case 0x50: /* TV Horizontal Start Position */
return s->tv_x;
case 0x52: /* TV Vertical Start Position */
return s->tv_y;
case 0x54: /* TV Test Pattern Setting */
return s->tv_test;
case 0x56: /* TV Filter Setting */
return s->tv_filter_config;
case 0x58: /* TV Filter Coefficient Index */
return s->tv_filter_idx;
case 0x5a: /* TV Filter Coefficient Data */
if (s->tv_filter_idx < 0x20)
return s->tv_filter_coeff[s->tv_filter_idx ++];
return 0;
case 0x60: /* Input YUV/RGB Translate Mode 0 */
return s->yrc[0];
case 0x62: /* Input YUV/RGB Translate Mode 1 */
return s->yrc[1];
case 0x64: /* U Data Fix */
return s->u;
case 0x66: /* V Data Fix */
return s->v;
case 0x68: /* Display Mode */
return s->mode;
case 0x6a: /* Special Effects */
return s->effect;
case 0x6c: /* Input Window X Start Position 0 */
return s->ix[0] & 0xff;
case 0x6e: /* Input Window X Start Position 1 */
return s->ix[0] >> 3;
case 0x70: /* Input Window Y Start Position 0 */
return s->ix[0] & 0xff;
case 0x72: /* Input Window Y Start Position 1 */
return s->ix[0] >> 3;
case 0x74: /* Input Window X End Position 0 */
return s->ix[1] & 0xff;
case 0x76: /* Input Window X End Position 1 */
return s->ix[1] >> 3;
case 0x78: /* Input Window Y End Position 0 */
return s->ix[1] & 0xff;
case 0x7a: /* Input Window Y End Position 1 */
return s->ix[1] >> 3;
case 0x7c: /* Output Window X Start Position 0 */
return s->ox[0] & 0xff;
case 0x7e: /* Output Window X Start Position 1 */
return s->ox[0] >> 3;
case 0x80: /* Output Window Y Start Position 0 */
return s->oy[0] & 0xff;
case 0x82: /* Output Window Y Start Position 1 */
return s->oy[0] >> 3;
case 0x84: /* Output Window X End Position 0 */
return s->ox[1] & 0xff;
case 0x86: /* Output Window X End Position 1 */
return s->ox[1] >> 3;
case 0x88: /* Output Window Y End Position 0 */
return s->oy[1] & 0xff;
case 0x8a: /* Output Window Y End Position 1 */
return s->oy[1] >> 3;
case 0x8c: /* Input Data Format */
return s->iformat;
case 0x8e: /* Data Source Select */
return s->source;
case 0x90: /* Display Memory Data Port */
return 0;
case 0xa8: /* Border Color 0 */
return s->border_r;
case 0xaa: /* Border Color 1 */
return s->border_g;
case 0xac: /* Border Color 2 */
return s->border_b;
case 0xb4: /* Gamma Correction Enable */
return s->gamma_config;
case 0xb6: /* Gamma Correction Table Index */
return s->gamma_idx;
case 0xb8: /* Gamma Correction Table Data */
return s->gamma_lut[s->gamma_idx ++];
case 0xba: /* 3x3 Matrix Enable */
return s->matrix_ena;
case 0xbc ... 0xde: /* Coefficient Registers */
return s->matrix_coeff[(reg - 0xbc) >> 1];
case 0xe0: /* 3x3 Matrix Red Offset */
return s->matrix_r;
case 0xe2: /* 3x3 Matrix Green Offset */
return s->matrix_g;
case 0xe4: /* 3x3 Matrix Blue Offset */
return s->matrix_b;
case 0xe6: /* Power-save */
return s->pm;
case 0xe8: /* Non-display Period Control / Status */
return s->status | (1 << 5);
case 0xea: /* RGB Interface Control */
return s->rgbgpio_dir;
case 0xec: /* RGB Interface Status */
return s->rgbgpio;
case 0xee: /* General-purpose IO Pins Configuration */
return s->gpio_dir;
case 0xf0: /* General-purpose IO Pins Status / Control */
return s->gpio;
case 0xf2: /* GPIO Positive Edge Interrupt Trigger */
return s->gpio_edge[0];
case 0xf4: /* GPIO Negative Edge Interrupt Trigger */
return s->gpio_edge[1];
case 0xf6: /* GPIO Interrupt Status */
return s->gpio_irq;
case 0xf8: /* GPIO Pull-down Control */
return s->gpio_pdown;
default:
fprintf(stderr, "%s: unknown register %02x\n", __FUNCTION__, reg);
return 0;
}
}
static void blizzard_reg_write(void *opaque, uint8_t reg, uint16_t value)
{
BlizzardState *s = (BlizzardState *) opaque;
switch (reg) {
case 0x04: /* PLL M-Divider */
s->pll = (value & 0x3f) + 1;
break;
case 0x06: /* PLL Lock Range Control */
s->pll_range = value & 3;
break;
case 0x08: /* PLL Lock Synthesis Control 0 */
s->pll_ctrl &= 0xf00;
s->pll_ctrl |= (value << 0) & 0x0ff;
break;
case 0x0a: /* PLL Lock Synthesis Control 1 */
s->pll_ctrl &= 0x0ff;
s->pll_ctrl |= (value << 8) & 0xf00;
break;
case 0x0c: /* PLL Mode Control 0 */
s->pll_mode = value & 0x77;
if ((value & 3) == 0 || (value & 3) == 3)
fprintf(stderr, "%s: wrong PLL Control bits (%i)\n",
__FUNCTION__, value & 3);
break;
case 0x0e: /* Clock-Source Select */
s->clksel = value & 0xff;
break;
case 0x10: /* Memory Controller Activate */
s->memenable = value & 1;
break;
case 0x14: /* Memory Controller Bank 0 Status Flag */
break;
case 0x18: /* Auto-Refresh Interval Setting 0 */
s->memrefresh &= 0xf00;
s->memrefresh |= (value << 0) & 0x0ff;
break;
case 0x1a: /* Auto-Refresh Interval Setting 1 */
s->memrefresh &= 0x0ff;
s->memrefresh |= (value << 8) & 0xf00;
break;
case 0x1c: /* Power-On Sequence Timing Control */
s->timing[0] = value & 0x7f;
break;
case 0x1e: /* Timing Control 0 */
s->timing[1] = value & 0x17;
break;
case 0x20: /* Timing Control 1 */
s->timing[2] = value & 0x35;
break;
case 0x24: /* Arbitration Priority Control */
s->priority = value & 1;
break;
case 0x28: /* LCD Panel Configuration */
s->lcd_config = value & 0xff;
if (value & (1 << 7))
fprintf(stderr, "%s: data swap not supported!\n", __FUNCTION__);
break;
case 0x2a: /* LCD Horizontal Display Width */
s->x = value << 3;
break;
case 0x2c: /* LCD Horizontal Non-display Period */
s->hndp = value & 0xff;
break;
case 0x2e: /* LCD Vertical Display Height 0 */
s->y &= 0x300;
s->y |= (value << 0) & 0x0ff;
break;
case 0x30: /* LCD Vertical Display Height 1 */
s->y &= 0x0ff;
s->y |= (value << 8) & 0x300;
break;
case 0x32: /* LCD Vertical Non-display Period */
s->vndp = value & 0xff;
break;
case 0x34: /* LCD HS Pulse-width */
s->hsync = value & 0xff;
break;
case 0x36: /* LCD HS Pulse Start Position */
s->skipx = value & 0xff;
break;
case 0x38: /* LCD VS Pulse-width */
s->vsync = value & 0xbf;
break;
case 0x3a: /* LCD VS Pulse Start Position */
s->skipy = value & 0xff;
break;
case 0x3c: /* PCLK Polarity */
s->pclk = value & 0x82;
/* Affects calculation of s->hndp, s->hsync and s->skipx. */
break;
case 0x3e: /* High-speed Serial Interface Tx Configuration Port 0 */
s->hssi_config[0] = value;
break;
case 0x40: /* High-speed Serial Interface Tx Configuration Port 1 */
s->hssi_config[1] = value;
if (((value >> 4) & 3) == 3)
fprintf(stderr, "%s: Illegal active-data-links value\n",
__FUNCTION__);
break;
case 0x42: /* High-speed Serial Interface Tx Mode */
s->hssi_config[2] = value & 0xbd;
break;
case 0x44: /* TV Display Configuration */
s->tv_config = value & 0xfe;
break;
case 0x46 ... 0x4c: /* TV Vertical Blanking Interval Data bits 0 */
s->tv_timing[(reg - 0x46) >> 1] = value;
break;
case 0x4e: /* VBI: Closed Caption / XDS Control / Status */
s->vbi = value;
break;
case 0x50: /* TV Horizontal Start Position */
s->tv_x = value;
break;
case 0x52: /* TV Vertical Start Position */
s->tv_y = value & 0x7f;
break;
case 0x54: /* TV Test Pattern Setting */
s->tv_test = value;
break;
case 0x56: /* TV Filter Setting */
s->tv_filter_config = value & 0xbf;
break;
case 0x58: /* TV Filter Coefficient Index */
s->tv_filter_idx = value & 0x1f;
break;
case 0x5a: /* TV Filter Coefficient Data */
if (s->tv_filter_idx < 0x20)
s->tv_filter_coeff[s->tv_filter_idx ++] = value;
break;
case 0x60: /* Input YUV/RGB Translate Mode 0 */
s->yrc[0] = value & 0xb0;
break;
case 0x62: /* Input YUV/RGB Translate Mode 1 */
s->yrc[1] = value & 0x30;
break;
case 0x64: /* U Data Fix */
s->u = value & 0xff;
break;
case 0x66: /* V Data Fix */
s->v = value & 0xff;
break;
case 0x68: /* Display Mode */
if ((s->mode ^ value) & 3)
s->invalidate = 1;
s->mode = value & 0xb7;
s->enable = value & 1;
s->blank = (value >> 1) & 1;
if (value & (1 << 4))
fprintf(stderr, "%s: Macrovision enable attempt!\n", __FUNCTION__);
break;
case 0x6a: /* Special Effects */
s->effect = value & 0xfb;
break;
case 0x6c: /* Input Window X Start Position 0 */
s->ix[0] &= 0x300;
s->ix[0] |= (value << 0) & 0x0ff;
break;
case 0x6e: /* Input Window X Start Position 1 */
s->ix[0] &= 0x0ff;
s->ix[0] |= (value << 8) & 0x300;
break;
case 0x70: /* Input Window Y Start Position 0 */
s->iy[0] &= 0x300;
s->iy[0] |= (value << 0) & 0x0ff;
break;
case 0x72: /* Input Window Y Start Position 1 */
s->iy[0] &= 0x0ff;
s->iy[0] |= (value << 8) & 0x300;
break;
case 0x74: /* Input Window X End Position 0 */
s->ix[1] &= 0x300;
s->ix[1] |= (value << 0) & 0x0ff;
break;
case 0x76: /* Input Window X End Position 1 */
s->ix[1] &= 0x0ff;
s->ix[1] |= (value << 8) & 0x300;
break;
case 0x78: /* Input Window Y End Position 0 */
s->iy[1] &= 0x300;
s->iy[1] |= (value << 0) & 0x0ff;
break;
case 0x7a: /* Input Window Y End Position 1 */
s->iy[1] &= 0x0ff;
s->iy[1] |= (value << 8) & 0x300;
break;
case 0x7c: /* Output Window X Start Position 0 */
s->ox[0] &= 0x300;
s->ox[0] |= (value << 0) & 0x0ff;
break;
case 0x7e: /* Output Window X Start Position 1 */
s->ox[0] &= 0x0ff;
s->ox[0] |= (value << 8) & 0x300;
break;
case 0x80: /* Output Window Y Start Position 0 */
s->oy[0] &= 0x300;
s->oy[0] |= (value << 0) & 0x0ff;
break;
case 0x82: /* Output Window Y Start Position 1 */
s->oy[0] &= 0x0ff;
s->oy[0] |= (value << 8) & 0x300;
break;
case 0x84: /* Output Window X End Position 0 */
s->ox[1] &= 0x300;
s->ox[1] |= (value << 0) & 0x0ff;
break;
case 0x86: /* Output Window X End Position 1 */
s->ox[1] &= 0x0ff;
s->ox[1] |= (value << 8) & 0x300;
break;
case 0x88: /* Output Window Y End Position 0 */
s->oy[1] &= 0x300;
s->oy[1] |= (value << 0) & 0x0ff;
break;
case 0x8a: /* Output Window Y End Position 1 */
s->oy[1] &= 0x0ff;
s->oy[1] |= (value << 8) & 0x300;
break;
case 0x8c: /* Input Data Format */
s->iformat = value & 0xf;
s->bpp = blizzard_iformat_bpp[s->iformat];
if (!s->bpp)
fprintf(stderr, "%s: Illegal or unsupported input format %x\n",
__FUNCTION__, s->iformat);
break;
case 0x8e: /* Data Source Select */
s->source = value & 7;
/* Currently all windows will be "destructive overlays". */
if ((!(s->effect & (1 << 3)) && (s->ix[0] != s->ox[0] ||
s->iy[0] != s->oy[0] ||
s->ix[1] != s->ox[1] ||
s->iy[1] != s->oy[1])) ||
!((s->ix[1] - s->ix[0]) & (s->iy[1] - s->iy[0]) &
(s->ox[1] - s->ox[0]) & (s->oy[1] - s->oy[0]) & 1))
fprintf(stderr, "%s: Illegal input/output window positions\n",
__FUNCTION__);
blizzard_transfer_setup(s);
break;
case 0x90: /* Display Memory Data Port */
if (!s->data.len && !blizzard_transfer_setup(s))
break;
*s->data.ptr ++ = value;
if (-- s->data.len == 0)
blizzard_window(s);
break;
case 0xa8: /* Border Color 0 */
s->border_r = value;
break;
case 0xaa: /* Border Color 1 */
s->border_g = value;
break;
case 0xac: /* Border Color 2 */
s->border_b = value;
break;
case 0xb4: /* Gamma Correction Enable */
s->gamma_config = value & 0x87;
break;
case 0xb6: /* Gamma Correction Table Index */
s->gamma_idx = value;
break;
case 0xb8: /* Gamma Correction Table Data */
s->gamma_lut[s->gamma_idx ++] = value;
break;
case 0xba: /* 3x3 Matrix Enable */
s->matrix_ena = value & 1;
break;
case 0xbc ... 0xde: /* Coefficient Registers */
s->matrix_coeff[(reg - 0xbc) >> 1] = value & ((reg & 2) ? 0x80 : 0xff);
break;
case 0xe0: /* 3x3 Matrix Red Offset */
s->matrix_r = value;
break;
case 0xe2: /* 3x3 Matrix Green Offset */
s->matrix_g = value;
break;
case 0xe4: /* 3x3 Matrix Blue Offset */
s->matrix_b = value;
break;
case 0xe6: /* Power-save */
s->pm = value & 0x83;
if (value & s->mode & 1)
fprintf(stderr, "%s: The display must be disabled before entering "
"Standby Mode\n", __FUNCTION__);
break;
case 0xe8: /* Non-display Period Control / Status */
s->status = value & 0x1b;
break;
case 0xea: /* RGB Interface Control */
s->rgbgpio_dir = value & 0x8f;
break;
case 0xec: /* RGB Interface Status */
s->rgbgpio = value & 0xcf;
break;
case 0xee: /* General-purpose IO Pins Configuration */
s->gpio_dir = value;
break;
case 0xf0: /* General-purpose IO Pins Status / Control */
s->gpio = value;
break;
case 0xf2: /* GPIO Positive Edge Interrupt Trigger */
s->gpio_edge[0] = value;
break;
case 0xf4: /* GPIO Negative Edge Interrupt Trigger */
s->gpio_edge[1] = value;
break;
case 0xf6: /* GPIO Interrupt Status */
s->gpio_irq &= value;
break;
case 0xf8: /* GPIO Pull-down Control */
s->gpio_pdown = value;
break;
default:
fprintf(stderr, "%s: unknown register %02x\n", __FUNCTION__, reg);
break;
}
}
uint16_t s1d13745_read(void *opaque, int dc)
{
BlizzardState *s = (BlizzardState *) opaque;
uint16_t value = blizzard_reg_read(s, s->reg);
if (s->swallow -- > 0)
return 0;
if (dc)
s->reg ++;
return value;
}
void s1d13745_write(void *opaque, int dc, uint16_t value)
{
BlizzardState *s = (BlizzardState *) opaque;
if (s->swallow -- > 0)
return;
if (dc) {
blizzard_reg_write(s, s->reg, value);
if (s->reg != 0x90 && s->reg != 0x5a && s->reg != 0xb8)
s->reg += 2;
} else
s->reg = value & 0xff;
}
void s1d13745_write_block(void *opaque, int dc,
void *buf, size_t len, int pitch)
{
BlizzardState *s = (BlizzardState *) opaque;
while (len > 0) {
if (s->reg == 0x90 && dc &&
(s->data.len || blizzard_transfer_setup(s)) &&
len >= (s->data.len << 1)) {
len -= s->data.len << 1;
s->data.len = 0;
s->data.data = buf;
if (pitch)
s->data.pitch = pitch;
blizzard_window(s);
s->data.data = s->data.buf;
continue;
}
s1d13745_write(opaque, dc, *(uint16_t *) buf);
len -= 2;
buf += 2;
}
return;
}
static void blizzard_update_display(void *opaque)
{
BlizzardState *s = (BlizzardState *) opaque;
int y, bypp, bypl, bwidth;
uint8_t *src, *dst;
if (!s->enable)
return;
if (s->x != ds_get_width(s->state) || s->y != ds_get_height(s->state)) {
s->invalidate = 1;
qemu_console_resize(s->state, s->x, s->y);
}
if (s->invalidate) {
s->invalidate = 0;
if (s->blank) {
bypp = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
memset(ds_get_data(s->state), 0, bypp * s->x * s->y);
return;
}
s->mx[0] = 0;
s->mx[1] = s->x;
s->my[0] = 0;
s->my[1] = s->y;
}
if (s->mx[1] <= s->mx[0])
return;
bypp = (ds_get_bits_per_pixel(s->state) + 7) >> 3;
bypl = bypp * s->x;
bwidth = bypp * (s->mx[1] - s->mx[0]);
y = s->my[0];
src = s->fb + bypl * y + bypp * s->mx[0];
dst = ds_get_data(s->state) + bypl * y + bypp * s->mx[0];
for (; y < s->my[1]; y ++, src += bypl, dst += bypl)
memcpy(dst, src, bwidth);
dpy_update(s->state, s->mx[0], s->my[0],
s->mx[1] - s->mx[0], y - s->my[0]);
s->mx[0] = s->x;
s->mx[1] = 0;
s->my[0] = s->y;
s->my[1] = 0;
}
static void blizzard_screen_dump(void *opaque, const char *filename) {
BlizzardState *s = (BlizzardState *) opaque;
blizzard_update_display(opaque);
if (s && ds_get_data(s->state))
ppm_save(filename, s->state->surface);
}
#define DEPTH 8
#include "blizzard_template.h"
#define DEPTH 15
#include "blizzard_template.h"
#define DEPTH 16
#include "blizzard_template.h"
#define DEPTH 24
#include "blizzard_template.h"
#define DEPTH 32
#include "blizzard_template.h"
void *s1d13745_init(qemu_irq gpio_int)
{
BlizzardState *s = (BlizzardState *) qemu_mallocz(sizeof(*s));
s->fb = qemu_malloc(0x180000);
s->state = graphic_console_init(blizzard_update_display,
blizzard_invalidate_display,
blizzard_screen_dump, NULL, s);
switch (ds_get_bits_per_pixel(s->state)) {
case 0:
s->line_fn_tab[0] = s->line_fn_tab[1] =
qemu_mallocz(sizeof(blizzard_fn_t) * 0x10);
break;
case 8:
s->line_fn_tab[0] = blizzard_draw_fn_8;
s->line_fn_tab[1] = blizzard_draw_fn_r_8;
break;
case 15:
s->line_fn_tab[0] = blizzard_draw_fn_15;
s->line_fn_tab[1] = blizzard_draw_fn_r_15;
break;
case 16:
s->line_fn_tab[0] = blizzard_draw_fn_16;
s->line_fn_tab[1] = blizzard_draw_fn_r_16;
break;
case 24:
s->line_fn_tab[0] = blizzard_draw_fn_24;
s->line_fn_tab[1] = blizzard_draw_fn_r_24;
break;
case 32:
s->line_fn_tab[0] = blizzard_draw_fn_32;
s->line_fn_tab[1] = blizzard_draw_fn_r_32;
break;
default:
fprintf(stderr, "%s: Bad color depth\n", __FUNCTION__);
exit(1);
}
blizzard_reset(s);
return s;
}
| gpl-2.0 |
TeamEpsilon/linux-3.8 | arch/arm/mach-exynos/mach-smdkv310.c | 92 | 11617 | /* linux/arch/arm/mach-exynos4/mach-smdkv310.c
*
* Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/serial_core.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/lcd.h>
#include <linux/mmc/host.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/pwm.h>
#include <linux/pwm_backlight.h>
#include <linux/platform_data/i2c-s3c2410.h>
#include <linux/platform_data/s3c-hsotg.h>
#include <linux/platform_data/usb-ehci-s5p.h>
#include <linux/platform_data/usb-exynos.h>
#include <asm/mach/arch.h>
#include <asm/hardware/gic.h>
#include <asm/mach-types.h>
#include <video/platform_lcd.h>
#include <video/samsung_fimd.h>
#include <plat/regs-serial.h>
#include <plat/regs-srom.h>
#include <plat/cpu.h>
#include <plat/devs.h>
#include <plat/fb.h>
#include <plat/keypad.h>
#include <plat/sdhci.h>
#include <plat/gpio-cfg.h>
#include <plat/backlight.h>
#include <plat/mfc.h>
#include <plat/clock.h>
#include <plat/hdmi.h>
#include <mach/map.h>
#include <drm/exynos_drm.h>
#include "common.h"
/* Following are default values for UCON, ULCON and UFCON UART registers */
#define SMDKV310_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \
S3C2410_UCON_RXILEVEL | \
S3C2410_UCON_TXIRQMODE | \
S3C2410_UCON_RXIRQMODE | \
S3C2410_UCON_RXFIFO_TOI | \
S3C2443_UCON_RXERR_IRQEN)
#define SMDKV310_ULCON_DEFAULT S3C2410_LCON_CS8
#define SMDKV310_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \
S5PV210_UFCON_TXTRIG4 | \
S5PV210_UFCON_RXTRIG4)
static struct s3c2410_uartcfg smdkv310_uartcfgs[] __initdata = {
[0] = {
.hwport = 0,
.flags = 0,
.ucon = SMDKV310_UCON_DEFAULT,
.ulcon = SMDKV310_ULCON_DEFAULT,
.ufcon = SMDKV310_UFCON_DEFAULT,
},
[1] = {
.hwport = 1,
.flags = 0,
.ucon = SMDKV310_UCON_DEFAULT,
.ulcon = SMDKV310_ULCON_DEFAULT,
.ufcon = SMDKV310_UFCON_DEFAULT,
},
[2] = {
.hwport = 2,
.flags = 0,
.ucon = SMDKV310_UCON_DEFAULT,
.ulcon = SMDKV310_ULCON_DEFAULT,
.ufcon = SMDKV310_UFCON_DEFAULT,
},
[3] = {
.hwport = 3,
.flags = 0,
.ucon = SMDKV310_UCON_DEFAULT,
.ulcon = SMDKV310_ULCON_DEFAULT,
.ufcon = SMDKV310_UFCON_DEFAULT,
},
};
static struct s3c_sdhci_platdata smdkv310_hsmmc0_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH0_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
static struct s3c_sdhci_platdata smdkv310_hsmmc1_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK0(2),
.ext_cd_gpio_invert = 1,
};
static struct s3c_sdhci_platdata smdkv310_hsmmc2_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_INTERNAL,
#ifdef CONFIG_EXYNOS4_SDHCI_CH2_8BIT
.max_width = 8,
.host_caps = MMC_CAP_8_BIT_DATA,
#endif
};
static struct s3c_sdhci_platdata smdkv310_hsmmc3_pdata __initdata = {
.cd_type = S3C_SDHCI_CD_GPIO,
.ext_cd_gpio = EXYNOS4_GPK2(2),
.ext_cd_gpio_invert = 1,
};
static void lcd_lte480wv_set_power(struct plat_lcd_data *pd,
unsigned int power)
{
if (power) {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_HIGH, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
/* fire nRESET on power up */
gpio_request_one(EXYNOS4_GPX0(6), GPIOF_OUT_INIT_HIGH, "GPX0");
mdelay(100);
gpio_set_value(EXYNOS4_GPX0(6), 0);
mdelay(10);
gpio_set_value(EXYNOS4_GPX0(6), 1);
mdelay(10);
gpio_free(EXYNOS4_GPX0(6));
} else {
#if !defined(CONFIG_BACKLIGHT_PWM)
gpio_request_one(EXYNOS4_GPD0(1), GPIOF_OUT_INIT_LOW, "GPD0");
gpio_free(EXYNOS4_GPD0(1));
#endif
}
}
static struct plat_lcd_data smdkv310_lcd_lte480wv_data = {
.set_power = lcd_lte480wv_set_power,
};
static struct platform_device smdkv310_lcd_lte480wv = {
.name = "platform-lcd",
.dev.parent = &s5p_device_fimd0.dev,
.dev.platform_data = &smdkv310_lcd_lte480wv_data,
};
#ifdef CONFIG_DRM_EXYNOS_FIMD
static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
.panel = {
.timing = {
.left_margin = 13,
.right_margin = 8,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
},
},
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.default_win = 0,
.bpp = 32,
};
#else
static struct s3c_fb_pd_win smdkv310_fb_win0 = {
.max_bpp = 32,
.default_bpp = 24,
.xres = 800,
.yres = 480,
};
static struct fb_videomode smdkv310_lcd_timing = {
.left_margin = 13,
.right_margin = 8,
.upper_margin = 7,
.lower_margin = 5,
.hsync_len = 3,
.vsync_len = 1,
.xres = 800,
.yres = 480,
};
static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = {
.win[0] = &smdkv310_fb_win0,
.vtiming = &smdkv310_lcd_timing,
.vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
.vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
.setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
};
#endif
static struct resource smdkv310_smsc911x_resources[] = {
[0] = DEFINE_RES_MEM(EXYNOS4_PA_SROM_BANK(1), SZ_64K),
[1] = DEFINE_RES_NAMED(IRQ_EINT(5), 1, NULL, IORESOURCE_IRQ \
| IRQF_TRIGGER_LOW),
};
static struct smsc911x_platform_config smsc9215_config = {
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_16BIT | SMSC911X_FORCE_INTERNAL_PHY,
.phy_interface = PHY_INTERFACE_MODE_MII,
.mac = {0x00, 0x80, 0x00, 0x23, 0x45, 0x67},
};
static struct platform_device smdkv310_smsc911x = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(smdkv310_smsc911x_resources),
.resource = smdkv310_smsc911x_resources,
.dev = {
.platform_data = &smsc9215_config,
},
};
static uint32_t smdkv310_keymap[] __initdata = {
/* KEY(row, col, keycode) */
KEY(0, 3, KEY_1), KEY(0, 4, KEY_2), KEY(0, 5, KEY_3),
KEY(0, 6, KEY_4), KEY(0, 7, KEY_5),
KEY(1, 3, KEY_A), KEY(1, 4, KEY_B), KEY(1, 5, KEY_C),
KEY(1, 6, KEY_D), KEY(1, 7, KEY_E)
};
static struct matrix_keymap_data smdkv310_keymap_data __initdata = {
.keymap = smdkv310_keymap,
.keymap_size = ARRAY_SIZE(smdkv310_keymap),
};
static struct samsung_keypad_platdata smdkv310_keypad_data __initdata = {
.keymap_data = &smdkv310_keymap_data,
.rows = 2,
.cols = 8,
};
static struct i2c_board_info i2c_devs1[] __initdata = {
{I2C_BOARD_INFO("wm8994", 0x1a),},
};
/* USB EHCI */
static struct s5p_ehci_platdata smdkv310_ehci_pdata;
static void __init smdkv310_ehci_init(void)
{
struct s5p_ehci_platdata *pdata = &smdkv310_ehci_pdata;
s5p_ehci_set_platdata(pdata);
}
/* USB OHCI */
static struct exynos4_ohci_platdata smdkv310_ohci_pdata;
static void __init smdkv310_ohci_init(void)
{
struct exynos4_ohci_platdata *pdata = &smdkv310_ohci_pdata;
exynos4_ohci_set_platdata(pdata);
}
/* USB OTG */
static struct s3c_hsotg_plat smdkv310_hsotg_pdata;
/* Audio device */
static struct platform_device smdkv310_device_audio = {
.name = "smdk-audio",
.id = -1,
};
static struct platform_device *smdkv310_devices[] __initdata = {
&s3c_device_hsmmc0,
&s3c_device_hsmmc1,
&s3c_device_hsmmc2,
&s3c_device_hsmmc3,
&s3c_device_i2c1,
&s5p_device_i2c_hdmiphy,
&s3c_device_rtc,
&s3c_device_usb_hsotg,
&s3c_device_wdt,
&s5p_device_ehci,
&s5p_device_fimc0,
&s5p_device_fimc1,
&s5p_device_fimc2,
&s5p_device_fimc3,
&s5p_device_fimc_md,
&s5p_device_g2d,
&s5p_device_jpeg,
&exynos4_device_ac97,
&exynos4_device_i2s0,
&exynos4_device_ohci,
&samsung_device_keypad,
&s5p_device_mfc,
&s5p_device_mfc_l,
&s5p_device_mfc_r,
&exynos4_device_spdif,
&samsung_asoc_idma,
&s5p_device_fimd0,
&smdkv310_device_audio,
&smdkv310_lcd_lte480wv,
&smdkv310_smsc911x,
&exynos4_device_ahci,
&s5p_device_hdmi,
&s5p_device_mixer,
};
static void __init smdkv310_smsc911x_init(void)
{
u32 cs1;
/* configure nCS1 width to 16 bits */
cs1 = __raw_readl(S5P_SROM_BW) &
~(S5P_SROM_BW__CS_MASK << S5P_SROM_BW__NCS1__SHIFT);
cs1 |= ((1 << S5P_SROM_BW__DATAWIDTH__SHIFT) |
(1 << S5P_SROM_BW__WAITENABLE__SHIFT) |
(1 << S5P_SROM_BW__BYTEENABLE__SHIFT)) <<
S5P_SROM_BW__NCS1__SHIFT;
__raw_writel(cs1, S5P_SROM_BW);
/* set timing for nCS1 suitable for ethernet chip */
__raw_writel((0x1 << S5P_SROM_BCX__PMC__SHIFT) |
(0x9 << S5P_SROM_BCX__TACP__SHIFT) |
(0xc << S5P_SROM_BCX__TCAH__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOH__SHIFT) |
(0x6 << S5P_SROM_BCX__TACC__SHIFT) |
(0x1 << S5P_SROM_BCX__TCOS__SHIFT) |
(0x1 << S5P_SROM_BCX__TACS__SHIFT), S5P_SROM_BC1);
}
/* LCD Backlight data */
static struct samsung_bl_gpio_info smdkv310_bl_gpio_info = {
.no = EXYNOS4_GPD0(1),
.func = S3C_GPIO_SFN(2),
};
static struct platform_pwm_backlight_data smdkv310_bl_data = {
.pwm_id = 1,
.pwm_period_ns = 1000,
};
/* I2C module and id for HDMIPHY */
static struct i2c_board_info hdmiphy_info = {
I2C_BOARD_INFO("hdmiphy-exynos4210", 0x38),
};
static struct pwm_lookup smdkv310_pwm_lookup[] = {
PWM_LOOKUP("s3c24xx-pwm.1", 0, "pwm-backlight.0", NULL),
};
static void s5p_tv_setup(void)
{
/* direct HPD to HDMI chip */
WARN_ON(gpio_request_one(EXYNOS4_GPX3(7), GPIOF_IN, "hpd-plug"));
s3c_gpio_cfgpin(EXYNOS4_GPX3(7), S3C_GPIO_SFN(0x3));
s3c_gpio_setpull(EXYNOS4_GPX3(7), S3C_GPIO_PULL_NONE);
}
static void __init smdkv310_map_io(void)
{
exynos_init_io(NULL, 0);
s3c24xx_init_clocks(clk_xusbxti.rate);
s3c24xx_init_uarts(smdkv310_uartcfgs, ARRAY_SIZE(smdkv310_uartcfgs));
}
static void __init smdkv310_reserve(void)
{
s5p_mfc_reserve_mem(0x43000000, 8 << 20, 0x51000000, 8 << 20);
}
static void __init smdkv310_machine_init(void)
{
s3c_i2c1_set_platdata(NULL);
i2c_register_board_info(1, i2c_devs1, ARRAY_SIZE(i2c_devs1));
smdkv310_smsc911x_init();
s3c_sdhci0_set_platdata(&smdkv310_hsmmc0_pdata);
s3c_sdhci1_set_platdata(&smdkv310_hsmmc1_pdata);
s3c_sdhci2_set_platdata(&smdkv310_hsmmc2_pdata);
s3c_sdhci3_set_platdata(&smdkv310_hsmmc3_pdata);
s5p_tv_setup();
s5p_i2c_hdmiphy_set_platdata(NULL);
s5p_hdmi_set_platdata(&hdmiphy_info, NULL, 0);
samsung_keypad_set_platdata(&smdkv310_keypad_data);
samsung_bl_set(&smdkv310_bl_gpio_info, &smdkv310_bl_data);
pwm_add_table(smdkv310_pwm_lookup, ARRAY_SIZE(smdkv310_pwm_lookup));
#ifdef CONFIG_DRM_EXYNOS_FIMD
s5p_device_fimd0.dev.platform_data = &drm_fimd_pdata;
exynos4_fimd0_gpio_setup_24bpp();
#else
s5p_fimd0_set_platdata(&smdkv310_lcd0_pdata);
#endif
smdkv310_ehci_init();
smdkv310_ohci_init();
s3c_hsotg_set_platdata(&smdkv310_hsotg_pdata);
platform_add_devices(smdkv310_devices, ARRAY_SIZE(smdkv310_devices));
}
MACHINE_START(SMDKV310, "SMDKV310")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
/* Maintainer: Changhwan Youn <chaos.youn@samsung.com> */
.atag_offset = 0x100,
.smp = smp_ops(exynos_smp_ops),
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.timer = &exynos4_timer,
.reserve = &smdkv310_reserve,
.restart = exynos4_restart,
MACHINE_END
MACHINE_START(SMDKC210, "SMDKC210")
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.atag_offset = 0x100,
.smp = smp_ops(exynos_smp_ops),
.init_irq = exynos4_init_irq,
.map_io = smdkv310_map_io,
.handle_irq = gic_handle_irq,
.init_machine = smdkv310_machine_init,
.init_late = exynos_init_late,
.timer = &exynos4_timer,
.reserve = &smdkv310_reserve,
.restart = exynos4_restart,
MACHINE_END
| gpl-2.0 |
kevin0100/android_kernel_cyanogen_msm8916 | kernel/fork.c | 92 | 47921 | /*
* linux/kernel/fork.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*/
/*
* 'fork.c' contains the help-routines for the 'fork' system call
* (see also entry.S and others).
* Fork is rather simple, once you get the hang of it, but the memory
* management can be a bitch. See 'mm/memory.c': 'copy_page_range()'
*/
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/unistd.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/completion.h>
#include <linux/personality.h>
#include <linux/mempolicy.h>
#include <linux/sem.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/iocontext.h>
#include <linux/key.h>
#include <linux/binfmts.h>
#include <linux/mman.h>
#include <linux/mmu_notifier.h>
#include <linux/fs.h>
#include <linux/nsproxy.h>
#include <linux/capability.h>
#include <linux/cpu.h>
#include <linux/cgroup.h>
#include <linux/security.h>
#include <linux/hugetlb.h>
#include <linux/seccomp.h>
#include <linux/swap.h>
#include <linux/syscalls.h>
#include <linux/jiffies.h>
#include <linux/futex.h>
#include <linux/compat.h>
#include <linux/kthread.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/rcupdate.h>
#include <linux/ptrace.h>
#include <linux/mount.h>
#include <linux/audit.h>
#include <linux/memcontrol.h>
#include <linux/ftrace.h>
#include <linux/proc_fs.h>
#include <linux/profile.h>
#include <linux/rmap.h>
#include <linux/ksm.h>
#include <linux/acct.h>
#include <linux/tsacct_kern.h>
#include <linux/cn_proc.h>
#include <linux/freezer.h>
#include <linux/delayacct.h>
#include <linux/taskstats_kern.h>
#include <linux/random.h>
#include <linux/tty.h>
#include <linux/blkdev.h>
#include <linux/fs_struct.h>
#include <linux/magic.h>
#include <linux/perf_event.h>
#include <linux/posix-timers.h>
#include <linux/user-return-notifier.h>
#include <linux/oom.h>
#include <linux/khugepaged.h>
#include <linux/signalfd.h>
#include <linux/uprobes.h>
#include <linux/aio.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/mmu_context.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
#include <trace/events/sched.h>
#define CREATE_TRACE_POINTS
#include <trace/events/task.h>
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
unsigned long total_forks; /* Handle normal Linux uptimes. */
int nr_threads; /* The idle threads do not count.. */
int max_threads; /* tunable limit on nr_threads */
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
{
return lockdep_is_held(&tasklist_lock);
}
EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held);
#endif /* #ifdef CONFIG_PROVE_RCU */
int nr_processes(void)
{
int cpu;
int total = 0;
for_each_possible_cpu(cpu)
total += per_cpu(process_counts, cpu);
return total;
}
void __weak arch_release_task_struct(struct task_struct *tsk)
{
}
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
static struct kmem_cache *task_struct_cachep;
static inline struct task_struct *alloc_task_struct_node(int node)
{
return kmem_cache_alloc_node(task_struct_cachep, GFP_KERNEL, node);
}
static inline void free_task_struct(struct task_struct *tsk)
{
kmem_cache_free(task_struct_cachep, tsk);
}
#endif
void __weak arch_release_thread_info(struct thread_info *ti)
{
}
#ifndef CONFIG_ARCH_THREAD_INFO_ALLOCATOR
/*
* Allocate pages if THREAD_SIZE is >= PAGE_SIZE, otherwise use a
* kmemcache based allocator.
*/
# if THREAD_SIZE >= PAGE_SIZE
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
struct page *page = alloc_pages_node(node, THREADINFO_GFP_ACCOUNTED,
THREAD_SIZE_ORDER);
return page ? page_address(page) : NULL;
}
static inline void free_thread_info(struct thread_info *ti)
{
free_memcg_kmem_pages((unsigned long)ti, THREAD_SIZE_ORDER);
}
# else
static struct kmem_cache *thread_info_cache;
static struct thread_info *alloc_thread_info_node(struct task_struct *tsk,
int node)
{
return kmem_cache_alloc_node(thread_info_cache, THREADINFO_GFP, node);
}
static void free_thread_info(struct thread_info *ti)
{
kmem_cache_free(thread_info_cache, ti);
}
void thread_info_cache_init(void)
{
thread_info_cache = kmem_cache_create("thread_info", THREAD_SIZE,
THREAD_SIZE, 0, NULL);
BUG_ON(thread_info_cache == NULL);
}
# endif
#endif
/* SLAB cache for signal_struct structures (tsk->signal) */
static struct kmem_cache *signal_cachep;
/* SLAB cache for sighand_struct structures (tsk->sighand) */
struct kmem_cache *sighand_cachep;
/* SLAB cache for files_struct structures (tsk->files) */
struct kmem_cache *files_cachep;
/* SLAB cache for fs_struct structures (tsk->fs) */
struct kmem_cache *fs_cachep;
/* SLAB cache for vm_area_struct structures */
struct kmem_cache *vm_area_cachep;
/* SLAB cache for mm_struct structures (tsk->mm) */
static struct kmem_cache *mm_cachep;
/* Notifier list called when a task struct is freed */
static ATOMIC_NOTIFIER_HEAD(task_free_notifier);
static void account_kernel_stack(struct thread_info *ti, int account)
{
struct zone *zone = page_zone(virt_to_page(ti));
mod_zone_page_state(zone, NR_KERNEL_STACK, account);
}
void free_task(struct task_struct *tsk)
{
account_kernel_stack(tsk->stack, -1);
arch_release_thread_info(tsk->stack);
free_thread_info(tsk->stack);
rt_mutex_debug_task_free(tsk);
ftrace_graph_exit_task(tsk);
put_seccomp_filter(tsk);
arch_release_task_struct(tsk);
free_task_struct(tsk);
}
EXPORT_SYMBOL(free_task);
static inline void free_signal_struct(struct signal_struct *sig)
{
taskstats_tgid_free(sig);
sched_autogroup_exit(sig);
kmem_cache_free(signal_cachep, sig);
}
static inline void put_signal_struct(struct signal_struct *sig)
{
if (atomic_dec_and_test(&sig->sigcnt))
free_signal_struct(sig);
}
int task_free_register(struct notifier_block *n)
{
return atomic_notifier_chain_register(&task_free_notifier, n);
}
EXPORT_SYMBOL(task_free_register);
int task_free_unregister(struct notifier_block *n)
{
return atomic_notifier_chain_unregister(&task_free_notifier, n);
}
EXPORT_SYMBOL(task_free_unregister);
void __put_task_struct(struct task_struct *tsk)
{
WARN_ON(!tsk->exit_state);
WARN_ON(atomic_read(&tsk->usage));
WARN_ON(tsk == current);
security_task_free(tsk);
exit_creds(tsk);
delayacct_tsk_free(tsk);
put_signal_struct(tsk->signal);
atomic_notifier_call_chain(&task_free_notifier, 0, tsk);
if (!profile_handoff_task(tsk))
free_task(tsk);
}
EXPORT_SYMBOL_GPL(__put_task_struct);
void __init __weak arch_task_cache_init(void) { }
void __init fork_init(unsigned long mempages)
{
#ifndef CONFIG_ARCH_TASK_STRUCT_ALLOCATOR
#ifndef ARCH_MIN_TASKALIGN
#define ARCH_MIN_TASKALIGN L1_CACHE_BYTES
#endif
/* create a slab on which task_structs can be allocated */
task_struct_cachep =
kmem_cache_create("task_struct", sizeof(struct task_struct),
ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
#endif
/* do the arch specific task caches init */
arch_task_cache_init();
/*
* The default maximum number of threads is set to a safe
* value: the thread structures can take up at most half
* of memory.
*/
max_threads = mempages / (8 * THREAD_SIZE / PAGE_SIZE);
/*
* we need to allow at least 20 threads to boot a system
*/
if (max_threads < 20)
max_threads = 20;
init_task.signal->rlim[RLIMIT_NPROC].rlim_cur = max_threads/2;
init_task.signal->rlim[RLIMIT_NPROC].rlim_max = max_threads/2;
init_task.signal->rlim[RLIMIT_SIGPENDING] =
init_task.signal->rlim[RLIMIT_NPROC];
}
int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst,
struct task_struct *src)
{
*dst = *src;
return 0;
}
static struct task_struct *dup_task_struct(struct task_struct *orig)
{
struct task_struct *tsk;
struct thread_info *ti;
unsigned long *stackend;
int node = tsk_fork_get_node(orig);
int err;
tsk = alloc_task_struct_node(node);
if (!tsk)
return NULL;
ti = alloc_thread_info_node(tsk, node);
if (!ti)
goto free_tsk;
err = arch_dup_task_struct(tsk, orig);
if (err)
goto free_ti;
tsk->stack = ti;
setup_thread_stack(tsk, orig);
clear_user_return_notifier(tsk);
clear_tsk_need_resched(tsk);
stackend = end_of_stack(tsk);
*stackend = STACK_END_MAGIC; /* for overflow detection */
#ifdef CONFIG_CC_STACKPROTECTOR
tsk->stack_canary = get_random_int();
#endif
/*
* One for us, one for whoever does the "release_task()" (usually
* parent)
*/
atomic_set(&tsk->usage, 2);
#ifdef CONFIG_BLK_DEV_IO_TRACE
tsk->btrace_seq = 0;
#endif
tsk->splice_pipe = NULL;
tsk->task_frag.page = NULL;
account_kernel_stack(ti, 1);
return tsk;
free_ti:
free_thread_info(ti);
free_tsk:
free_task_struct(tsk);
return NULL;
}
#ifdef CONFIG_MMU
static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
{
struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
struct rb_node **rb_link, *rb_parent;
int retval;
unsigned long charge;
struct mempolicy *pol;
uprobe_start_dup_mmap();
down_write(&oldmm->mmap_sem);
flush_cache_dup_mm(oldmm);
uprobe_dup_mmap(oldmm, mm);
/*
* Not linked in yet - no deadlock potential:
*/
down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
mm->locked_vm = 0;
mm->mmap = NULL;
mm->mmap_cache = NULL;
mm->map_count = 0;
cpumask_clear(mm_cpumask(mm));
mm->mm_rb = RB_ROOT;
rb_link = &mm->mm_rb.rb_node;
rb_parent = NULL;
pprev = &mm->mmap;
retval = ksm_fork(mm, oldmm);
if (retval)
goto out;
retval = khugepaged_fork(mm, oldmm);
if (retval)
goto out;
prev = NULL;
for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
struct file *file;
if (mpnt->vm_flags & VM_DONTCOPY) {
vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
-vma_pages(mpnt));
continue;
}
charge = 0;
if (mpnt->vm_flags & VM_ACCOUNT) {
unsigned long len = vma_pages(mpnt);
if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
goto fail_nomem;
charge = len;
}
tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
if (!tmp)
goto fail_nomem;
*tmp = *mpnt;
INIT_LIST_HEAD(&tmp->anon_vma_chain);
pol = mpol_dup(vma_policy(mpnt));
retval = PTR_ERR(pol);
if (IS_ERR(pol))
goto fail_nomem_policy;
vma_set_policy(tmp, pol);
tmp->vm_mm = mm;
if (anon_vma_fork(tmp, mpnt))
goto fail_nomem_anon_vma_fork;
tmp->vm_flags &= ~VM_LOCKED;
tmp->vm_next = tmp->vm_prev = NULL;
file = tmp->vm_file;
if (file) {
struct inode *inode = file_inode(file);
struct address_space *mapping = file->f_mapping;
get_file(file);
if (tmp->vm_flags & VM_DENYWRITE)
atomic_dec(&inode->i_writecount);
mutex_lock(&mapping->i_mmap_mutex);
if (tmp->vm_flags & VM_SHARED)
mapping->i_mmap_writable++;
flush_dcache_mmap_lock(mapping);
/* insert tmp into the share list, just after mpnt */
if (unlikely(tmp->vm_flags & VM_NONLINEAR))
vma_nonlinear_insert(tmp,
&mapping->i_mmap_nonlinear);
else
vma_interval_tree_insert_after(tmp, mpnt,
&mapping->i_mmap);
flush_dcache_mmap_unlock(mapping);
mutex_unlock(&mapping->i_mmap_mutex);
}
/*
* Clear hugetlb-related page reserves for children. This only
* affects MAP_PRIVATE mappings. Faults generated by the child
* are not guaranteed to succeed, even if read-only
*/
if (is_vm_hugetlb_page(tmp))
reset_vma_resv_huge_pages(tmp);
/*
* Link in the new vma and copy the page table entries.
*/
*pprev = tmp;
pprev = &tmp->vm_next;
tmp->vm_prev = prev;
prev = tmp;
__vma_link_rb(mm, tmp, rb_link, rb_parent);
rb_link = &tmp->vm_rb.rb_right;
rb_parent = &tmp->vm_rb;
mm->map_count++;
retval = copy_page_range(mm, oldmm, mpnt);
if (tmp->vm_ops && tmp->vm_ops->open)
tmp->vm_ops->open(tmp);
if (retval)
goto out;
}
/* a new mm has just been created */
arch_dup_mmap(oldmm, mm);
retval = 0;
out:
up_write(&mm->mmap_sem);
flush_tlb_mm(oldmm);
up_write(&oldmm->mmap_sem);
uprobe_end_dup_mmap();
return retval;
fail_nomem_anon_vma_fork:
mpol_put(pol);
fail_nomem_policy:
kmem_cache_free(vm_area_cachep, tmp);
fail_nomem:
retval = -ENOMEM;
vm_unacct_memory(charge);
goto out;
}
static inline int mm_alloc_pgd(struct mm_struct *mm)
{
mm->pgd = pgd_alloc(mm);
if (unlikely(!mm->pgd))
return -ENOMEM;
return 0;
}
static inline void mm_free_pgd(struct mm_struct *mm)
{
pgd_free(mm, mm->pgd);
}
#else
#define dup_mmap(mm, oldmm) (0)
#define mm_alloc_pgd(mm) (0)
#define mm_free_pgd(mm)
#endif /* CONFIG_MMU */
__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
static unsigned long default_dump_filter = MMF_DUMP_FILTER_DEFAULT;
static int __init coredump_filter_setup(char *s)
{
default_dump_filter =
(simple_strtoul(s, NULL, 0) << MMF_DUMP_FILTER_SHIFT) &
MMF_DUMP_FILTER_MASK;
return 1;
}
__setup("coredump_filter=", coredump_filter_setup);
#include <linux/init_task.h>
static void mm_init_aio(struct mm_struct *mm)
{
#ifdef CONFIG_AIO
spin_lock_init(&mm->ioctx_lock);
INIT_HLIST_HEAD(&mm->ioctx_list);
#endif
}
static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
{
atomic_set(&mm->mm_users, 1);
atomic_set(&mm->mm_count, 1);
init_rwsem(&mm->mmap_sem);
INIT_LIST_HEAD(&mm->mmlist);
mm->flags = (current->mm) ?
(current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
mm->core_state = NULL;
mm->nr_ptes = 0;
memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
spin_lock_init(&mm->page_table_lock);
mm_init_aio(mm);
mm_init_owner(mm, p);
clear_tlb_flush_pending(mm);
if (likely(!mm_alloc_pgd(mm))) {
mm->def_flags = 0;
mmu_notifier_mm_init(mm);
return mm;
}
free_mm(mm);
return NULL;
}
static void check_mm(struct mm_struct *mm)
{
int i;
for (i = 0; i < NR_MM_COUNTERS; i++) {
long x = atomic_long_read(&mm->rss_stat.count[i]);
if (unlikely(x))
printk(KERN_ALERT "BUG: Bad rss-counter state "
"mm:%p idx:%d val:%ld\n", mm, i, x);
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
VM_BUG_ON(mm->pmd_huge_pte);
#endif
}
/*
* Allocate and initialize an mm_struct.
*/
struct mm_struct *mm_alloc(void)
{
struct mm_struct *mm;
mm = allocate_mm();
if (!mm)
return NULL;
memset(mm, 0, sizeof(*mm));
mm_init_cpumask(mm);
return mm_init(mm, current);
}
/*
* Called when the last reference to the mm
* is dropped: either by a lazy thread or by
* mmput. Free the page directory and the mm.
*/
void __mmdrop(struct mm_struct *mm)
{
BUG_ON(mm == &init_mm);
mm_free_pgd(mm);
destroy_context(mm);
mmu_notifier_mm_destroy(mm);
check_mm(mm);
free_mm(mm);
}
EXPORT_SYMBOL_GPL(__mmdrop);
/*
* Decrement the use count and release all resources for an mm.
*/
int mmput(struct mm_struct *mm)
{
int mm_freed = 0;
might_sleep();
if (atomic_dec_and_test(&mm->mm_users)) {
uprobe_clear_state(mm);
exit_aio(mm);
ksm_exit(mm);
khugepaged_exit(mm); /* must run before exit_mmap */
exit_mmap(mm);
set_mm_exe_file(mm, NULL);
if (!list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
list_del(&mm->mmlist);
spin_unlock(&mmlist_lock);
}
if (mm->binfmt)
module_put(mm->binfmt->module);
mmdrop(mm);
mm_freed = 1;
}
return mm_freed;
}
EXPORT_SYMBOL_GPL(mmput);
void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file)
{
if (new_exe_file)
get_file(new_exe_file);
if (mm->exe_file)
fput(mm->exe_file);
mm->exe_file = new_exe_file;
}
struct file *get_mm_exe_file(struct mm_struct *mm)
{
struct file *exe_file;
/* We need mmap_sem to protect against races with removal of exe_file */
down_read(&mm->mmap_sem);
exe_file = mm->exe_file;
if (exe_file)
get_file(exe_file);
up_read(&mm->mmap_sem);
return exe_file;
}
static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm)
{
/* It's safe to write the exe_file pointer without exe_file_lock because
* this is called during fork when the task is not yet in /proc */
newmm->exe_file = get_mm_exe_file(oldmm);
}
/**
* get_task_mm - acquire a reference to the task's mm
*
* Returns %NULL if the task has no mm. Checks PF_KTHREAD (meaning
* this kernel workthread has transiently adopted a user mm with use_mm,
* to do its AIO) is not set and if so returns a reference to it, after
* bumping up the use count. User must release the mm via mmput()
* after use. Typically used by /proc and ptrace.
*/
struct mm_struct *get_task_mm(struct task_struct *task)
{
struct mm_struct *mm;
task_lock(task);
mm = task->mm;
if (mm) {
if (task->flags & PF_KTHREAD)
mm = NULL;
else
atomic_inc(&mm->mm_users);
}
task_unlock(task);
return mm;
}
EXPORT_SYMBOL_GPL(get_task_mm);
struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
{
struct mm_struct *mm;
int err;
err = mutex_lock_killable(&task->signal->cred_guard_mutex);
if (err)
return ERR_PTR(err);
mm = get_task_mm(task);
if (mm && mm != current->mm &&
!ptrace_may_access(task, mode) &&
!capable(CAP_SYS_RESOURCE)) {
mmput(mm);
mm = ERR_PTR(-EACCES);
}
mutex_unlock(&task->signal->cred_guard_mutex);
return mm;
}
static void complete_vfork_done(struct task_struct *tsk)
{
struct completion *vfork;
task_lock(tsk);
vfork = tsk->vfork_done;
if (likely(vfork)) {
tsk->vfork_done = NULL;
complete(vfork);
}
task_unlock(tsk);
}
static int wait_for_vfork_done(struct task_struct *child,
struct completion *vfork)
{
int killed;
freezer_do_not_count();
killed = wait_for_completion_killable(vfork);
freezer_count();
if (killed) {
task_lock(child);
child->vfork_done = NULL;
task_unlock(child);
}
put_task_struct(child);
return killed;
}
/* Please note the differences between mmput and mm_release.
* mmput is called whenever we stop holding onto a mm_struct,
* error success whatever.
*
* mm_release is called after a mm_struct has been removed
* from the current process.
*
* This difference is important for error handling, when we
* only half set up a mm_struct for a new process and need to restore
* the old one. Because we mmput the new mm_struct before
* restoring the old one. . .
* Eric Biederman 10 January 1998
*/
void mm_release(struct task_struct *tsk, struct mm_struct *mm)
{
/* Get rid of any futexes when releasing the mm */
#ifdef CONFIG_FUTEX
if (unlikely(tsk->robust_list)) {
exit_robust_list(tsk);
tsk->robust_list = NULL;
}
#ifdef CONFIG_COMPAT
if (unlikely(tsk->compat_robust_list)) {
compat_exit_robust_list(tsk);
tsk->compat_robust_list = NULL;
}
#endif
if (unlikely(!list_empty(&tsk->pi_state_list)))
exit_pi_state_list(tsk);
#endif
uprobe_free_utask(tsk);
/* Get rid of any cached register state */
deactivate_mm(tsk, mm);
/*
* If we're exiting normally, clear a user-space tid field if
* requested. We leave this alone when dying by signal, to leave
* the value intact in a core dump, and to save the unnecessary
* trouble, say, a killed vfork parent shouldn't touch this mm.
* Userland only wants this done for a sys_exit.
*/
if (tsk->clear_child_tid) {
if (!(tsk->flags & PF_SIGNALED) &&
atomic_read(&mm->mm_users) > 1) {
/*
* We don't check the error code - if userspace has
* not set up a proper pointer then tough luck.
*/
put_user(0, tsk->clear_child_tid);
sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
1, NULL, NULL, 0);
}
tsk->clear_child_tid = NULL;
}
/*
* All done, finally we can wake up parent and return this mm to him.
* Also kthread_stop() uses this completion for synchronization.
*/
if (tsk->vfork_done)
complete_vfork_done(tsk);
}
/*
* Allocate a new mm structure and copy contents from the
* mm structure of the passed in task structure.
*/
struct mm_struct *dup_mm(struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm = current->mm;
int err;
if (!oldmm)
return NULL;
mm = allocate_mm();
if (!mm)
goto fail_nomem;
memcpy(mm, oldmm, sizeof(*mm));
mm_init_cpumask(mm);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
mm->pmd_huge_pte = NULL;
#endif
#ifdef CONFIG_NUMA_BALANCING
mm->first_nid = NUMA_PTE_SCAN_INIT;
#endif
if (!mm_init(mm, tsk))
goto fail_nomem;
if (init_new_context(tsk, mm))
goto fail_nocontext;
dup_mm_exe_file(oldmm, mm);
err = dup_mmap(mm, oldmm);
if (err)
goto free_pt;
mm->hiwater_rss = get_mm_rss(mm);
mm->hiwater_vm = mm->total_vm;
if (mm->binfmt && !try_module_get(mm->binfmt->module))
goto free_pt;
return mm;
free_pt:
/* don't put binfmt in mmput, we haven't got module yet */
mm->binfmt = NULL;
mmput(mm);
fail_nomem:
return NULL;
fail_nocontext:
/*
* If init_new_context() failed, we cannot use mmput() to free the mm
* because it calls destroy_context()
*/
mm_free_pgd(mm);
free_mm(mm);
return NULL;
}
static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
{
struct mm_struct *mm, *oldmm;
int retval;
tsk->min_flt = tsk->maj_flt = 0;
tsk->nvcsw = tsk->nivcsw = 0;
#ifdef CONFIG_DETECT_HUNG_TASK
tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw;
#endif
tsk->mm = NULL;
tsk->active_mm = NULL;
/*
* Are we cloning a kernel thread?
*
* We need to steal a active VM for that..
*/
oldmm = current->mm;
if (!oldmm)
return 0;
if (clone_flags & CLONE_VM) {
atomic_inc(&oldmm->mm_users);
mm = oldmm;
goto good_mm;
}
retval = -ENOMEM;
mm = dup_mm(tsk);
if (!mm)
goto fail_nomem;
good_mm:
tsk->mm = mm;
tsk->active_mm = mm;
return 0;
fail_nomem:
return retval;
}
static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
{
struct fs_struct *fs = current->fs;
if (clone_flags & CLONE_FS) {
/* tsk->fs is already what we want */
spin_lock(&fs->lock);
if (fs->in_exec) {
spin_unlock(&fs->lock);
return -EAGAIN;
}
fs->users++;
spin_unlock(&fs->lock);
return 0;
}
tsk->fs = copy_fs_struct(fs);
if (!tsk->fs)
return -ENOMEM;
return 0;
}
static int copy_files(unsigned long clone_flags, struct task_struct *tsk)
{
struct files_struct *oldf, *newf;
int error = 0;
/*
* A background process may not have any files ...
*/
oldf = current->files;
if (!oldf)
goto out;
if (clone_flags & CLONE_FILES) {
atomic_inc(&oldf->count);
goto out;
}
newf = dup_fd(oldf, &error);
if (!newf)
goto out;
tsk->files = newf;
error = 0;
out:
return error;
}
static int copy_io(unsigned long clone_flags, struct task_struct *tsk)
{
#ifdef CONFIG_BLOCK
struct io_context *ioc = current->io_context;
struct io_context *new_ioc;
if (!ioc)
return 0;
/*
* Share io context with parent, if CLONE_IO is set
*/
if (clone_flags & CLONE_IO) {
ioc_task_link(ioc);
tsk->io_context = ioc;
} else if (ioprio_valid(ioc->ioprio)) {
new_ioc = get_task_io_context(tsk, GFP_KERNEL, NUMA_NO_NODE);
if (unlikely(!new_ioc))
return -ENOMEM;
new_ioc->ioprio = ioc->ioprio;
put_io_context(new_ioc);
}
#endif
return 0;
}
static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
{
struct sighand_struct *sig;
if (clone_flags & CLONE_SIGHAND) {
atomic_inc(¤t->sighand->count);
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
rcu_assign_pointer(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
atomic_set(&sig->count, 1);
memcpy(sig->action, current->sighand->action, sizeof(sig->action));
return 0;
}
void __cleanup_sighand(struct sighand_struct *sighand)
{
if (atomic_dec_and_test(&sighand->count)) {
signalfd_cleanup(sighand);
kmem_cache_free(sighand_cachep, sighand);
}
}
/*
* Initialize POSIX timer handling for a thread group.
*/
static void posix_cpu_timers_init_group(struct signal_struct *sig)
{
unsigned long cpu_limit;
/* Thread group counters. */
thread_group_cputime_init(sig);
cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
if (cpu_limit != RLIM_INFINITY) {
sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
sig->cputimer.running = 1;
}
/* The timer lists. */
INIT_LIST_HEAD(&sig->cpu_timers[0]);
INIT_LIST_HEAD(&sig->cpu_timers[1]);
INIT_LIST_HEAD(&sig->cpu_timers[2]);
}
static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
{
struct signal_struct *sig;
if (clone_flags & CLONE_THREAD)
return 0;
sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL);
tsk->signal = sig;
if (!sig)
return -ENOMEM;
sig->nr_threads = 1;
atomic_set(&sig->live, 1);
atomic_set(&sig->sigcnt, 1);
/* list_add(thread_node, thread_head) without INIT_LIST_HEAD() */
sig->thread_head = (struct list_head)LIST_HEAD_INIT(tsk->thread_node);
tsk->thread_node = (struct list_head)LIST_HEAD_INIT(sig->thread_head);
init_waitqueue_head(&sig->wait_chldexit);
sig->curr_target = tsk;
init_sigpending(&sig->shared_pending);
INIT_LIST_HEAD(&sig->posix_timers);
hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
sig->real_timer.function = it_real_fn;
task_lock(current->group_leader);
memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
task_unlock(current->group_leader);
posix_cpu_timers_init_group(sig);
tty_audit_fork(sig);
sched_autogroup_fork(sig);
#ifdef CONFIG_CGROUPS
init_rwsem(&sig->group_rwsem);
#endif
sig->oom_score_adj = current->signal->oom_score_adj;
sig->oom_score_adj_min = current->signal->oom_score_adj_min;
sig->has_child_subreaper = current->signal->has_child_subreaper ||
current->signal->is_child_subreaper;
mutex_init(&sig->cred_guard_mutex);
return 0;
}
static void copy_flags(unsigned long clone_flags, struct task_struct *p)
{
unsigned long new_flags = p->flags;
new_flags &= ~(PF_SUPERPRIV | PF_WQ_WORKER);
new_flags |= PF_FORKNOEXEC;
p->flags = new_flags;
}
SYSCALL_DEFINE1(set_tid_address, int __user *, tidptr)
{
current->clear_child_tid = tidptr;
return task_pid_vnr(current);
}
static void rt_mutex_init_task(struct task_struct *p)
{
raw_spin_lock_init(&p->pi_lock);
#ifdef CONFIG_RT_MUTEXES
plist_head_init(&p->pi_waiters);
p->pi_blocked_on = NULL;
#endif
}
#ifdef CONFIG_MM_OWNER
void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
{
mm->owner = p;
}
#endif /* CONFIG_MM_OWNER */
/*
* Initialize POSIX timer handling for a single task.
*/
static void posix_cpu_timers_init(struct task_struct *tsk)
{
tsk->cputime_expires.prof_exp = 0;
tsk->cputime_expires.virt_exp = 0;
tsk->cputime_expires.sched_exp = 0;
INIT_LIST_HEAD(&tsk->cpu_timers[0]);
INIT_LIST_HEAD(&tsk->cpu_timers[1]);
INIT_LIST_HEAD(&tsk->cpu_timers[2]);
}
/*
* This creates a new process as a copy of the old one,
* but does not actually start it yet.
*
* It copies the registers, and all the appropriate
* parts of the process environment (as per the clone
* flags). The actual kick-off is left to the caller.
*/
static struct task_struct *copy_process(unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *child_tidptr,
struct pid *pid,
int trace)
{
int retval;
struct task_struct *p;
if ((clone_flags & (CLONE_NEWNS|CLONE_FS)) == (CLONE_NEWNS|CLONE_FS))
return ERR_PTR(-EINVAL);
if ((clone_flags & (CLONE_NEWUSER|CLONE_FS)) == (CLONE_NEWUSER|CLONE_FS))
return ERR_PTR(-EINVAL);
/*
* Thread groups must share signals as well, and detached threads
* can only be started up within the thread group.
*/
if ((clone_flags & CLONE_THREAD) && !(clone_flags & CLONE_SIGHAND))
return ERR_PTR(-EINVAL);
/*
* Shared signal handlers imply shared VM. By way of the above,
* thread groups also imply shared VM. Blocking this case allows
* for various simplifications in other code.
*/
if ((clone_flags & CLONE_SIGHAND) && !(clone_flags & CLONE_VM))
return ERR_PTR(-EINVAL);
/*
* Siblings of global init remain as zombies on exit since they are
* not reaped by their parent (swapper). To solve this and to avoid
* multi-rooted process trees, prevent global and container-inits
* from creating siblings.
*/
if ((clone_flags & CLONE_PARENT) &&
current->signal->flags & SIGNAL_UNKILLABLE)
return ERR_PTR(-EINVAL);
/*
* If the new process will be in a different pid namespace don't
* allow it to share a thread group or signal handlers with the
* forking task.
*/
if ((clone_flags & (CLONE_SIGHAND | CLONE_NEWPID)) &&
(task_active_pid_ns(current) != current->nsproxy->pid_ns))
return ERR_PTR(-EINVAL);
retval = security_task_create(clone_flags);
if (retval)
goto fork_out;
retval = -ENOMEM;
p = dup_task_struct(current);
if (!p)
goto fork_out;
ftrace_graph_init_task(p);
get_seccomp_filter(p);
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
DEBUG_LOCKS_WARN_ON(!p->hardirqs_enabled);
DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
#endif
retval = -EAGAIN;
if (atomic_read(&p->real_cred->user->processes) >=
task_rlimit(p, RLIMIT_NPROC)) {
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
p->real_cred->user != INIT_USER)
goto bad_fork_free;
}
current->flags &= ~PF_NPROC_EXCEEDED;
retval = copy_creds(p, clone_flags);
if (retval < 0)
goto bad_fork_free;
/*
* If multiple threads are within copy_process(), then this check
* triggers too late. This doesn't hurt, the check is only there
* to stop root fork bombs.
*/
retval = -EAGAIN;
if (nr_threads >= max_threads)
goto bad_fork_cleanup_count;
if (!try_module_get(task_thread_info(p)->exec_domain->module))
goto bad_fork_cleanup_count;
p->did_exec = 0;
delayacct_tsk_init(p); /* Must remain after dup_task_struct() */
copy_flags(clone_flags, p);
INIT_LIST_HEAD(&p->children);
INIT_LIST_HEAD(&p->sibling);
rcu_copy_process(p);
p->vfork_done = NULL;
spin_lock_init(&p->alloc_lock);
init_sigpending(&p->pending);
p->utime = p->stime = p->gtime = 0;
p->utimescaled = p->stimescaled = 0;
#ifndef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
p->prev_cputime.utime = p->prev_cputime.stime = 0;
#endif
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
seqlock_init(&p->vtime_seqlock);
p->vtime_snap = 0;
p->vtime_snap_whence = VTIME_SLEEPING;
#endif
#if defined(SPLIT_RSS_COUNTING)
memset(&p->rss_stat, 0, sizeof(p->rss_stat));
#endif
p->default_timer_slack_ns = current->timer_slack_ns;
task_io_accounting_init(&p->ioac);
acct_clear_integrals(p);
posix_cpu_timers_init(p);
do_posix_clock_monotonic_gettime(&p->start_time);
p->real_start_time = p->start_time;
monotonic_to_bootbased(&p->real_start_time);
p->io_context = NULL;
p->audit_context = NULL;
if (clone_flags & CLONE_THREAD)
threadgroup_change_begin(current);
cgroup_fork(p);
#ifdef CONFIG_NUMA
p->mempolicy = mpol_dup(p->mempolicy);
if (IS_ERR(p->mempolicy)) {
retval = PTR_ERR(p->mempolicy);
p->mempolicy = NULL;
goto bad_fork_cleanup_cgroup;
}
mpol_fix_fork_child_flag(p);
#endif
#ifdef CONFIG_CPUSETS
p->cpuset_mem_spread_rotor = NUMA_NO_NODE;
p->cpuset_slab_spread_rotor = NUMA_NO_NODE;
seqcount_init(&p->mems_allowed_seq);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS
p->irq_events = 0;
p->hardirqs_enabled = 0;
p->hardirq_enable_ip = 0;
p->hardirq_enable_event = 0;
p->hardirq_disable_ip = _THIS_IP_;
p->hardirq_disable_event = 0;
p->softirqs_enabled = 1;
p->softirq_enable_ip = _THIS_IP_;
p->softirq_enable_event = 0;
p->softirq_disable_ip = 0;
p->softirq_disable_event = 0;
p->hardirq_context = 0;
p->softirq_context = 0;
#endif
#ifdef CONFIG_LOCKDEP
p->lockdep_depth = 0; /* no locks held yet */
p->curr_chain_key = 0;
p->lockdep_recursion = 0;
#endif
#ifdef CONFIG_DEBUG_MUTEXES
p->blocked_on = NULL; /* not blocked yet */
#endif
#ifdef CONFIG_MEMCG
p->memcg_batch.do_batch = 0;
p->memcg_batch.memcg = NULL;
#endif
#ifdef CONFIG_BCACHE
p->sequential_io = 0;
p->sequential_io_avg = 0;
#endif
/* Perform scheduler related setup. Assign this task to a CPU. */
sched_fork(p);
retval = perf_event_init_task(p);
if (retval)
goto bad_fork_cleanup_policy;
retval = audit_alloc(p);
if (retval)
goto bad_fork_cleanup_policy;
/* copy all the process information */
retval = copy_semundo(clone_flags, p);
if (retval)
goto bad_fork_cleanup_audit;
retval = copy_files(clone_flags, p);
if (retval)
goto bad_fork_cleanup_semundo;
retval = copy_fs(clone_flags, p);
if (retval)
goto bad_fork_cleanup_files;
retval = copy_sighand(clone_flags, p);
if (retval)
goto bad_fork_cleanup_fs;
retval = copy_signal(clone_flags, p);
if (retval)
goto bad_fork_cleanup_sighand;
retval = copy_mm(clone_flags, p);
if (retval)
goto bad_fork_cleanup_signal;
retval = copy_namespaces(clone_flags, p);
if (retval)
goto bad_fork_cleanup_mm;
retval = copy_io(clone_flags, p);
if (retval)
goto bad_fork_cleanup_namespaces;
retval = copy_thread(clone_flags, stack_start, stack_size, p);
if (retval)
goto bad_fork_cleanup_io;
if (pid != &init_struct_pid) {
retval = -ENOMEM;
pid = alloc_pid(p->nsproxy->pid_ns);
if (!pid)
goto bad_fork_cleanup_io;
}
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
p->tgid = current->tgid;
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
/*
* Clear TID on mm_release()?
*/
p->clear_child_tid = (clone_flags & CLONE_CHILD_CLEARTID) ? child_tidptr : NULL;
#ifdef CONFIG_BLOCK
p->plug = NULL;
#endif
#ifdef CONFIG_FUTEX
p->robust_list = NULL;
#ifdef CONFIG_COMPAT
p->compat_robust_list = NULL;
#endif
INIT_LIST_HEAD(&p->pi_state_list);
p->pi_state_cache = NULL;
#endif
uprobe_copy_process(p);
/*
* sigaltstack should be cleared when sharing the same VM
*/
if ((clone_flags & (CLONE_VM|CLONE_VFORK)) == CLONE_VM)
p->sas_ss_sp = p->sas_ss_size = 0;
/*
* Syscall tracing and stepping should be turned off in the
* child regardless of CLONE_PTRACE.
*/
user_disable_single_step(p);
clear_tsk_thread_flag(p, TIF_SYSCALL_TRACE);
#ifdef TIF_SYSCALL_EMU
clear_tsk_thread_flag(p, TIF_SYSCALL_EMU);
#endif
clear_all_latency_tracing(p);
/* ok, now we should be set up.. */
if (clone_flags & CLONE_THREAD)
p->exit_signal = -1;
else if (clone_flags & CLONE_PARENT)
p->exit_signal = current->group_leader->exit_signal;
else
p->exit_signal = (clone_flags & CSIGNAL);
p->pdeath_signal = 0;
p->exit_state = 0;
p->nr_dirtied = 0;
p->nr_dirtied_pause = 128 >> (PAGE_SHIFT - 10);
p->dirty_paused_when = 0;
/*
* Ok, make it visible to the rest of the system.
* We dont wake it up yet.
*/
p->group_leader = p;
INIT_LIST_HEAD(&p->thread_group);
p->task_works = NULL;
/* Need tasklist lock for parent etc handling! */
write_lock_irq(&tasklist_lock);
/* CLONE_PARENT re-uses the old parent */
if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) {
p->real_parent = current->real_parent;
p->parent_exec_id = current->parent_exec_id;
} else {
p->real_parent = current;
p->parent_exec_id = current->self_exec_id;
}
spin_lock(¤t->sighand->siglock);
/*
* Process group and session signals need to be delivered to just the
* parent before the fork or both the parent and the child after the
* fork. Restart if a signal comes in before we add the new process to
* it's process group.
* A fatal signal pending means that current will exit, so the new
* thread can't slip out of an OOM kill (or normal SIGKILL).
*/
recalc_sigpending();
if (signal_pending(current)) {
spin_unlock(¤t->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
current->signal->nr_threads++;
atomic_inc(¤t->signal->live);
atomic_inc(¤t->signal->sigcnt);
p->group_leader = current->group_leader;
list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
}
if (likely(p->pid)) {
ptrace_init_task(p, (clone_flags & CLONE_PTRACE) || trace);
if (thread_group_leader(p)) {
if (is_child_reaper(pid)) {
ns_of_pid(pid)->child_reaper = p;
p->signal->flags |= SIGNAL_UNKILLABLE;
}
p->signal->leader_pid = pid;
p->signal->tty = tty_kref_get(current->signal->tty);
attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
attach_pid(p, PIDTYPE_SID, task_session(current));
list_add_tail(&p->sibling, &p->real_parent->children);
list_add_tail_rcu(&p->tasks, &init_task.tasks);
__this_cpu_inc(process_counts);
} else {
list_add_tail_rcu(&p->thread_node,
&p->signal->thread_head);
}
attach_pid(p, PIDTYPE_PID, pid);
nr_threads++;
}
total_forks++;
spin_unlock(¤t->sighand->siglock);
syscall_tracepoint_update(p);
write_unlock_irq(&tasklist_lock);
proc_fork_connector(p);
cgroup_post_fork(p);
if (clone_flags & CLONE_THREAD)
threadgroup_change_end(current);
perf_event_fork(p);
trace_task_newtask(p, clone_flags);
return p;
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);
bad_fork_cleanup_io:
if (p->io_context)
exit_io_context(p);
bad_fork_cleanup_namespaces:
exit_task_namespaces(p);
bad_fork_cleanup_mm:
if (p->mm)
mmput(p->mm);
bad_fork_cleanup_signal:
if (!(clone_flags & CLONE_THREAD))
free_signal_struct(p->signal);
bad_fork_cleanup_sighand:
__cleanup_sighand(p->sighand);
bad_fork_cleanup_fs:
exit_fs(p); /* blocking */
bad_fork_cleanup_files:
exit_files(p); /* blocking */
bad_fork_cleanup_semundo:
exit_sem(p);
bad_fork_cleanup_audit:
audit_free(p);
bad_fork_cleanup_policy:
perf_event_free_task(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_cgroup:
#endif
if (clone_flags & CLONE_THREAD)
threadgroup_change_end(current);
cgroup_exit(p, 0);
delayacct_tsk_free(p);
module_put(task_thread_info(p)->exec_domain->module);
bad_fork_cleanup_count:
atomic_dec(&p->cred->user->processes);
exit_creds(p);
bad_fork_free:
free_task(p);
fork_out:
return ERR_PTR(retval);
}
static inline void init_idle_pids(struct pid_link *links)
{
enum pid_type type;
for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) {
INIT_HLIST_NODE(&links[type].node); /* not really needed */
links[type].pid = &init_struct_pid;
}
}
struct task_struct * __cpuinit fork_idle(int cpu)
{
struct task_struct *task;
task = copy_process(CLONE_VM, 0, 0, NULL, &init_struct_pid, 0);
if (!IS_ERR(task)) {
init_idle_pids(task->pids);
init_idle(task, cpu);
}
return task;
}
/*
* Ok, this is the main fork-routine.
*
* It copies the process, and if successful kick-starts
* it and waits for it to finish using the VM if required.
*/
long do_fork(unsigned long clone_flags,
unsigned long stack_start,
unsigned long stack_size,
int __user *parent_tidptr,
int __user *child_tidptr)
{
struct task_struct *p;
int trace = 0;
long nr;
/*
* Do some preliminary argument and permissions checking before we
* actually start allocating stuff
*/
if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) {
if (clone_flags & (CLONE_THREAD|CLONE_PARENT))
return -EINVAL;
}
/*
* Determine whether and which event to report to ptracer. When
* called from kernel_thread or CLONE_UNTRACED is explicitly
* requested, no event is reported; otherwise, report if the event
* for the type of forking is enabled.
*/
if (!(clone_flags & CLONE_UNTRACED)) {
if (clone_flags & CLONE_VFORK)
trace = PTRACE_EVENT_VFORK;
else if ((clone_flags & CSIGNAL) != SIGCHLD)
trace = PTRACE_EVENT_CLONE;
else
trace = PTRACE_EVENT_FORK;
if (likely(!ptrace_event_enabled(current, trace)))
trace = 0;
}
p = copy_process(clone_flags, stack_start, stack_size,
child_tidptr, NULL, trace);
/*
* Do this prior waking up the new thread - the thread pointer
* might get invalid after that point, if the thread exits quickly.
*/
if (!IS_ERR(p)) {
struct completion vfork;
struct pid *pid;
trace_sched_process_fork(current, p);
pid = get_task_pid(p, PIDTYPE_PID);
nr = pid_vnr(pid);
if (clone_flags & CLONE_PARENT_SETTID)
put_user(nr, parent_tidptr);
if (clone_flags & CLONE_VFORK) {
p->vfork_done = &vfork;
init_completion(&vfork);
get_task_struct(p);
}
wake_up_new_task(p);
/* forking complete and child started to run, tell ptracer */
if (unlikely(trace))
ptrace_event_pid(trace, pid);
if (clone_flags & CLONE_VFORK) {
if (!wait_for_vfork_done(p, &vfork))
ptrace_event_pid(PTRACE_EVENT_VFORK_DONE, pid);
}
put_pid(pid);
} else {
nr = PTR_ERR(p);
}
return nr;
}
/*
* Create a kernel thread.
*/
pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
{
return do_fork(flags|CLONE_VM|CLONE_UNTRACED, (unsigned long)fn,
(unsigned long)arg, NULL, NULL);
}
#ifdef __ARCH_WANT_SYS_FORK
SYSCALL_DEFINE0(fork)
{
#ifdef CONFIG_MMU
return do_fork(SIGCHLD, 0, 0, NULL, NULL);
#else
/* can not support in nommu mode */
return(-EINVAL);
#endif
}
#endif
#ifdef __ARCH_WANT_SYS_VFORK
SYSCALL_DEFINE0(vfork)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, 0,
0, NULL, NULL);
}
#endif
#ifdef __ARCH_WANT_SYS_CLONE
#ifdef CONFIG_CLONE_BACKWARDS
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
int, tls_val,
int __user *, child_tidptr)
#elif defined(CONFIG_CLONE_BACKWARDS2)
SYSCALL_DEFINE5(clone, unsigned long, newsp, unsigned long, clone_flags,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#elif defined(CONFIG_CLONE_BACKWARDS3)
SYSCALL_DEFINE6(clone, unsigned long, clone_flags, unsigned long, newsp,
int, stack_size,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#else
SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
int __user *, parent_tidptr,
int __user *, child_tidptr,
int, tls_val)
#endif
{
return do_fork(clone_flags, newsp, 0, parent_tidptr, child_tidptr);
}
#endif
#ifndef ARCH_MIN_MMSTRUCT_ALIGN
#define ARCH_MIN_MMSTRUCT_ALIGN 0
#endif
static void sighand_ctor(void *data)
{
struct sighand_struct *sighand = data;
spin_lock_init(&sighand->siglock);
init_waitqueue_head(&sighand->signalfd_wqh);
}
void __init proc_caches_init(void)
{
sighand_cachep = kmem_cache_create("sighand_cache",
sizeof(struct sighand_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
SLAB_NOTRACK, sighand_ctor);
signal_cachep = kmem_cache_create("signal_cache",
sizeof(struct signal_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
files_cachep = kmem_cache_create("files_cache",
sizeof(struct files_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
fs_cachep = kmem_cache_create("fs_cache",
sizeof(struct fs_struct), 0,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
/*
* FIXME! The "sizeof(struct mm_struct)" currently includes the
* whole struct cpumask for the OFFSTACK case. We could change
* this to *only* allocate as much of it as required by the
* maximum number of CPU's we can ever have. The cpumask_allocation
* is at the end of the structure, exactly for that reason.
*/
mm_cachep = kmem_cache_create("mm_struct",
sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
mmap_init();
nsproxy_cache_init();
}
/*
* Check constraints on flags passed to the unshare system call.
*/
static int check_unshare_flags(unsigned long unshare_flags)
{
if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET|
CLONE_NEWUSER|CLONE_NEWPID))
return -EINVAL;
/*
* Not implemented, but pretend it works if there is nothing to
* unshare. Note that unsharing CLONE_THREAD or CLONE_SIGHAND
* needs to unshare vm.
*/
if (unshare_flags & (CLONE_THREAD | CLONE_SIGHAND | CLONE_VM)) {
/* FIXME: get_task_mm() increments ->mm_users */
if (atomic_read(¤t->mm->mm_users) > 1)
return -EINVAL;
}
return 0;
}
/*
* Unshare the filesystem structure if it is being shared
*/
static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
{
struct fs_struct *fs = current->fs;
if (!(unshare_flags & CLONE_FS) || !fs)
return 0;
/* don't need lock here; in the worst case we'll do useless copy */
if (fs->users == 1)
return 0;
*new_fsp = copy_fs_struct(fs);
if (!*new_fsp)
return -ENOMEM;
return 0;
}
/*
* Unshare file descriptor table if it is being shared
*/
static int unshare_fd(unsigned long unshare_flags, struct files_struct **new_fdp)
{
struct files_struct *fd = current->files;
int error = 0;
if ((unshare_flags & CLONE_FILES) &&
(fd && atomic_read(&fd->count) > 1)) {
*new_fdp = dup_fd(fd, &error);
if (!*new_fdp)
return error;
}
return 0;
}
/*
* unshare allows a process to 'unshare' part of the process
* context which was originally shared using clone. copy_*
* functions used by do_fork() cannot be used here directly
* because they modify an inactive task_struct that is being
* constructed. Here we are modifying the current, active,
* task_struct.
*/
SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
{
struct fs_struct *fs, *new_fs = NULL;
struct files_struct *fd, *new_fd = NULL;
struct cred *new_cred = NULL;
struct nsproxy *new_nsproxy = NULL;
int do_sysvsem = 0;
int err;
/*
* If unsharing a user namespace must also unshare the thread.
*/
if (unshare_flags & CLONE_NEWUSER)
unshare_flags |= CLONE_THREAD | CLONE_FS;
/*
* If unsharing a pid namespace must also unshare the thread.
*/
if (unshare_flags & CLONE_NEWPID)
unshare_flags |= CLONE_THREAD;
/*
* If unsharing a thread from a thread group, must also unshare vm.
*/
if (unshare_flags & CLONE_THREAD)
unshare_flags |= CLONE_VM;
/*
* If unsharing vm, must also unshare signal handlers.
*/
if (unshare_flags & CLONE_VM)
unshare_flags |= CLONE_SIGHAND;
/*
* If unsharing namespace, must also unshare filesystem information.
*/
if (unshare_flags & CLONE_NEWNS)
unshare_flags |= CLONE_FS;
err = check_unshare_flags(unshare_flags);
if (err)
goto bad_unshare_out;
/*
* CLONE_NEWIPC must also detach from the undolist: after switching
* to a new ipc namespace, the semaphore arrays from the old
* namespace are unreachable.
*/
if (unshare_flags & (CLONE_NEWIPC|CLONE_SYSVSEM))
do_sysvsem = 1;
err = unshare_fs(unshare_flags, &new_fs);
if (err)
goto bad_unshare_out;
err = unshare_fd(unshare_flags, &new_fd);
if (err)
goto bad_unshare_cleanup_fs;
err = unshare_userns(unshare_flags, &new_cred);
if (err)
goto bad_unshare_cleanup_fd;
err = unshare_nsproxy_namespaces(unshare_flags, &new_nsproxy,
new_cred, new_fs);
if (err)
goto bad_unshare_cleanup_cred;
if (new_fs || new_fd || do_sysvsem || new_cred || new_nsproxy) {
if (do_sysvsem) {
/*
* CLONE_SYSVSEM is equivalent to sys_exit().
*/
exit_sem(current);
}
if (new_nsproxy)
switch_task_namespaces(current, new_nsproxy);
task_lock(current);
if (new_fs) {
fs = current->fs;
spin_lock(&fs->lock);
current->fs = new_fs;
if (--fs->users)
new_fs = NULL;
else
new_fs = fs;
spin_unlock(&fs->lock);
}
if (new_fd) {
fd = current->files;
current->files = new_fd;
new_fd = fd;
}
task_unlock(current);
if (new_cred) {
/* Install the new user namespace */
commit_creds(new_cred);
new_cred = NULL;
}
}
bad_unshare_cleanup_cred:
if (new_cred)
put_cred(new_cred);
bad_unshare_cleanup_fd:
if (new_fd)
put_files_struct(new_fd);
bad_unshare_cleanup_fs:
if (new_fs)
free_fs_struct(new_fs);
bad_unshare_out:
return err;
}
/*
* Helper to unshare the files of the current task.
* We don't want to expose copy_files internals to
* the exec layer of the kernel.
*/
int unshare_files(struct files_struct **displaced)
{
struct task_struct *task = current;
struct files_struct *copy = NULL;
int error;
error = unshare_fd(CLONE_FILES, ©);
if (error || !copy) {
*displaced = NULL;
return error;
}
*displaced = task->files;
task_lock(task);
task->files = copy;
task_unlock(task);
return 0;
}
| gpl-2.0 |
nit-in/android_kernel_htc_pico-1 | arch/sparc/kernel/setup_64.c | 1628 | 12623 | /*
* linux/arch/sparc64/kernel/setup.c
*
* Copyright (C) 1995,1996 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <asm/smp.h>
#include <linux/user.h>
#include <linux/screen_info.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
#include <linux/syscalls.h>
#include <linux/kdev_t.h>
#include <linux/major.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/inet.h>
#include <linux/console.h>
#include <linux/root_dev.h>
#include <linux/interrupt.h>
#include <linux/cpu.h>
#include <linux/initrd.h>
#include <linux/module.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/oplib.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/idprom.h>
#include <asm/head.h>
#include <asm/starfire.h>
#include <asm/mmu_context.h>
#include <asm/timer.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/mmu.h>
#include <asm/ns87303.h>
#include <asm/btext.h>
#include <asm/elf.h>
#include <asm/mdesc.h>
#ifdef CONFIG_IP_PNP
#include <net/ipconfig.h>
#endif
#include "entry.h"
#include "kernel.h"
/* Used to synchronize accesses to NatSemi SUPER I/O chip configure
* operations in asm/ns87303.h
*/
DEFINE_SPINLOCK(ns87303_lock);
EXPORT_SYMBOL(ns87303_lock);
struct screen_info screen_info = {
0, 0, /* orig-x, orig-y */
0, /* unused */
0, /* orig-video-page */
0, /* orig-video-mode */
128, /* orig-video-cols */
0, 0, 0, /* unused, ega_bx, unused */
54, /* orig-video-lines */
0, /* orig-video-isVGA */
16 /* orig-video-points */
};
static void
prom_console_write(struct console *con, const char *s, unsigned n)
{
prom_write(s, n);
}
/* Exported for mm/init.c:paging_init. */
unsigned long cmdline_memory_size = 0;
static struct console prom_early_console = {
.name = "earlyprom",
.write = prom_console_write,
.flags = CON_PRINTBUFFER | CON_BOOT | CON_ANYTIME,
.index = -1,
};
/*
* Process kernel command line switches that are specific to the
* SPARC or that require special low-level processing.
*/
static void __init process_switch(char c)
{
switch (c) {
case 'd':
case 's':
break;
case 'h':
prom_printf("boot_flags_init: Halt!\n");
prom_halt();
break;
case 'p':
/* Just ignore, this behavior is now the default. */
break;
case 'P':
/* Force UltraSPARC-III P-Cache on. */
if (tlb_type != cheetah) {
printk("BOOT: Ignoring P-Cache force option.\n");
break;
}
cheetah_pcache_forced_on = 1;
add_taint(TAINT_MACHINE_CHECK);
cheetah_enable_pcache();
break;
default:
printk("Unknown boot switch (-%c)\n", c);
break;
}
}
static void __init boot_flags_init(char *commands)
{
while (*commands) {
/* Move to the start of the next "argument". */
while (*commands && *commands == ' ')
commands++;
/* Process any command switches, otherwise skip it. */
if (*commands == '\0')
break;
if (*commands == '-') {
commands++;
while (*commands && *commands != ' ')
process_switch(*commands++);
continue;
}
if (!strncmp(commands, "mem=", 4)) {
/*
* "mem=XXX[kKmM]" overrides the PROM-reported
* memory size.
*/
cmdline_memory_size = simple_strtoul(commands + 4,
&commands, 0);
if (*commands == 'K' || *commands == 'k') {
cmdline_memory_size <<= 10;
commands++;
} else if (*commands=='M' || *commands=='m') {
cmdline_memory_size <<= 20;
commands++;
}
}
while (*commands && *commands != ' ')
commands++;
}
}
extern unsigned short root_flags;
extern unsigned short root_dev;
extern unsigned short ram_flags;
#define RAMDISK_IMAGE_START_MASK 0x07FF
#define RAMDISK_PROMPT_FLAG 0x8000
#define RAMDISK_LOAD_FLAG 0x4000
extern int root_mountflags;
char reboot_command[COMMAND_LINE_SIZE];
static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
void __init per_cpu_patch(void)
{
struct cpuid_patch_entry *p;
unsigned long ver;
int is_jbus;
if (tlb_type == spitfire && !this_is_starfire)
return;
is_jbus = 0;
if (tlb_type != hypervisor) {
__asm__ ("rdpr %%ver, %0" : "=r" (ver));
is_jbus = ((ver >> 32UL) == __JALAPENO_ID ||
(ver >> 32UL) == __SERRANO_ID);
}
p = &__cpuid_patch;
while (p < &__cpuid_patch_end) {
unsigned long addr = p->addr;
unsigned int *insns;
switch (tlb_type) {
case spitfire:
insns = &p->starfire[0];
break;
case cheetah:
case cheetah_plus:
if (is_jbus)
insns = &p->cheetah_jbus[0];
else
insns = &p->cheetah_safari[0];
break;
case hypervisor:
insns = &p->sun4v[0];
break;
default:
prom_printf("Unknown cpu type, halting.\n");
prom_halt();
}
*(unsigned int *) (addr + 0) = insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
*(unsigned int *) (addr + 8) = insns[2];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 8));
*(unsigned int *) (addr + 12) = insns[3];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 12));
p++;
}
}
void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *start,
struct sun4v_1insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insn;
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
start++;
}
}
void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
struct sun4v_2insn_patch_entry *end)
{
while (start < end) {
unsigned long addr = start->addr;
*(unsigned int *) (addr + 0) = start->insns[0];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 0));
*(unsigned int *) (addr + 4) = start->insns[1];
wmb();
__asm__ __volatile__("flush %0" : : "r" (addr + 4));
start++;
}
}
void __init sun4v_patch(void)
{
extern void sun4v_hvapi_init(void);
if (tlb_type != hypervisor)
return;
sun4v_patch_1insn_range(&__sun4v_1insn_patch,
&__sun4v_1insn_patch_end);
sun4v_patch_2insn_range(&__sun4v_2insn_patch,
&__sun4v_2insn_patch_end);
sun4v_hvapi_init();
}
static void __init popc_patch(void)
{
struct popc_3insn_patch_entry *p3;
struct popc_6insn_patch_entry *p6;
p3 = &__popc_3insn_patch;
while (p3 < &__popc_3insn_patch_end) {
unsigned long i, addr = p3->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p3->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p3++;
}
p6 = &__popc_6insn_patch;
while (p6 < &__popc_6insn_patch_end) {
unsigned long i, addr = p6->addr;
for (i = 0; i < 6; i++) {
*(unsigned int *) (addr + (i * 4)) = p6->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p6++;
}
}
#ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu)
{
prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
cpu, NR_CPUS);
prom_halt();
}
#endif
/* On Ultra, we support all of the v8 capabilities. */
unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
HWCAP_SPARC_SWAP | HWCAP_SPARC_MULDIV |
HWCAP_SPARC_V9);
EXPORT_SYMBOL(sparc64_elf_hwcap);
static const char *hwcaps[] = {
"flush", "stbar", "swap", "muldiv", "v9",
"ultra3", "blkinit", "n2",
/* These strings are as they appear in the machine description
* 'hwcap-list' property for cpu nodes.
*/
"mul32", "div32", "fsmuld", "v8plus", "popc", "vis", "vis2",
"ASIBlkInit", "fmaf", "vis3", "hpc", "random", "trans", "fjfmau",
"ima", "cspare",
};
void cpucap_info(struct seq_file *m)
{
unsigned long caps = sparc64_elf_hwcap;
int i, printed = 0;
seq_puts(m, "cpucaps\t\t: ");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit) {
seq_printf(m, "%s%s",
printed ? "," : "", hwcaps[i]);
printed++;
}
}
seq_putc(m, '\n');
}
static void __init report_hwcaps(unsigned long caps)
{
int i, printed = 0;
printk(KERN_INFO "CPU CAPS: [");
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (caps & bit) {
printk(KERN_CONT "%s%s",
printed ? "," : "", hwcaps[i]);
if (++printed == 8) {
printk(KERN_CONT "]\n");
printk(KERN_INFO "CPU CAPS: [");
printed = 0;
}
}
}
printk(KERN_CONT "]\n");
}
static unsigned long __init mdesc_cpu_hwcap_list(void)
{
struct mdesc_handle *hp;
unsigned long caps = 0;
const char *prop;
int len;
u64 pn;
hp = mdesc_grab();
if (!hp)
return 0;
pn = mdesc_node_by_name(hp, MDESC_NODE_NULL, "cpu");
if (pn == MDESC_NODE_NULL)
goto out;
prop = mdesc_get_property(hp, pn, "hwcap-list", &len);
if (!prop)
goto out;
while (len) {
int i, plen;
for (i = 0; i < ARRAY_SIZE(hwcaps); i++) {
unsigned long bit = 1UL << i;
if (!strcmp(prop, hwcaps[i])) {
caps |= bit;
break;
}
}
plen = strlen(prop) + 1;
prop += plen;
len -= plen;
}
out:
mdesc_release(hp);
return caps;
}
/* This yields a mask that user programs can use to figure out what
* instruction set this cpu supports.
*/
static void __init init_sparc64_elf_hwcap(void)
{
unsigned long cap = sparc64_elf_hwcap;
unsigned long mdesc_caps;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= HWCAP_SPARC_ULTRA3;
else if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
cap |= HWCAP_SPARC_BLKINIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
cap |= HWCAP_SPARC_N2;
}
cap |= (AV_SPARC_MUL32 | AV_SPARC_DIV32 | AV_SPARC_V8PLUS);
mdesc_caps = mdesc_cpu_hwcap_list();
if (!mdesc_caps) {
if (tlb_type == spitfire)
cap |= AV_SPARC_VIS;
if (tlb_type == cheetah || tlb_type == cheetah_plus)
cap |= AV_SPARC_VIS | AV_SPARC_VIS2;
if (tlb_type == cheetah_plus) {
unsigned long impl, ver;
__asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
impl = ((ver >> 32) & 0xffff);
if (impl == PANTHER_IMPL)
cap |= AV_SPARC_POPC;
}
if (tlb_type == hypervisor) {
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA1)
cap |= AV_SPARC_ASI_BLK_INIT;
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA2 ||
sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
cap |= (AV_SPARC_VIS | AV_SPARC_VIS2 |
AV_SPARC_ASI_BLK_INIT |
AV_SPARC_POPC);
if (sun4v_chip_type == SUN4V_CHIP_NIAGARA3)
cap |= (AV_SPARC_VIS3 | AV_SPARC_HPC |
AV_SPARC_FMAF);
}
}
sparc64_elf_hwcap = cap | mdesc_caps;
report_hwcaps(sparc64_elf_hwcap);
if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch();
}
void __init setup_arch(char **cmdline_p)
{
/* Initialize PROM console and command line. */
*cmdline_p = prom_getbootargs();
strcpy(boot_command_line, *cmdline_p);
parse_early_param();
boot_flags_init(*cmdline_p);
#ifdef CONFIG_EARLYFB
if (btext_find_display())
#endif
register_console(&prom_early_console);
if (tlb_type == hypervisor)
printk("ARCH: SUN4V\n");
else
printk("ARCH: SUN4U\n");
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
idprom_init();
if (!root_flags)
root_mountflags &= ~MS_RDONLY;
ROOT_DEV = old_decode_dev(root_dev);
#ifdef CONFIG_BLK_DEV_RAM
rd_image_start = ram_flags & RAMDISK_IMAGE_START_MASK;
rd_prompt = ((ram_flags & RAMDISK_PROMPT_FLAG) != 0);
rd_doload = ((ram_flags & RAMDISK_LOAD_FLAG) != 0);
#endif
task_thread_info(&init_task)->kregs = &fake_swapper_regs;
#ifdef CONFIG_IP_PNP
if (!ic_set_manually) {
phandle chosen = prom_finddevice("/chosen");
u32 cl, sv, gw;
cl = prom_getintdefault (chosen, "client-ip", 0);
sv = prom_getintdefault (chosen, "server-ip", 0);
gw = prom_getintdefault (chosen, "gateway-ip", 0);
if (cl && sv) {
ic_myaddr = cl;
ic_servaddr = sv;
if (gw)
ic_gateway = gw;
#if defined(CONFIG_IP_PNP_BOOTP) || defined(CONFIG_IP_PNP_RARP)
ic_proto_enabled = 0;
#endif
}
}
#endif
/* Get boot processor trap_block[] setup. */
init_cur_cpu_trap(current_thread_info());
paging_init();
init_sparc64_elf_hwcap();
}
extern int stop_a_enabled;
void sun_do_break(void)
{
if (!stop_a_enabled)
return;
prom_printf("\n");
flush_user_windows();
prom_cmdline();
}
EXPORT_SYMBOL(sun_do_break);
int stop_a_enabled = 1;
EXPORT_SYMBOL(stop_a_enabled);
| gpl-2.0 |
Shabbypenguin/Jellybean_kernel | drivers/net/wireless/p54/main.c | 2652 | 18777 | /*
* mac80211 glue code for mac80211 Prism54 drivers
*
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2007-2009, Christian Lamparter <chunkeey@web.de>
* Copyright 2008, Johannes Berg <johannes@sipsolutions.net>
*
* Based on:
* - the islsm (softmac prism54) driver, which is:
* Copyright 2004-2006 Jean-Baptiste Note <jbnote@gmail.com>, et al.
* - stlc45xx driver
* Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies).
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "p54.h"
#include "lmac.h"
static int modparam_nohwcrypt;
module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
MODULE_DESCRIPTION("Softmac Prism54 common code");
MODULE_LICENSE("GPL");
MODULE_ALIAS("prism54common");
static int p54_sta_add_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct p54_common *priv = hw->priv;
/*
* Notify the firmware that we don't want or we don't
* need to buffer frames for this station anymore.
*/
p54_sta_unlock(priv, sta->addr);
return 0;
}
static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif,
enum sta_notify_cmd notify_cmd,
struct ieee80211_sta *sta)
{
struct p54_common *priv = dev->priv;
switch (notify_cmd) {
case STA_NOTIFY_AWAKE:
/* update the firmware's filter table */
p54_sta_unlock(priv, sta->addr);
break;
default:
break;
}
}
static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta,
bool set)
{
struct p54_common *priv = dev->priv;
return p54_update_beacon_tim(priv, sta->aid, set);
}
u8 *p54_find_ie(struct sk_buff *skb, u8 ie)
{
struct ieee80211_mgmt *mgmt = (void *)skb->data;
u8 *pos, *end;
if (skb->len <= sizeof(mgmt))
return NULL;
pos = (u8 *)mgmt->u.beacon.variable;
end = skb->data + skb->len;
while (pos < end) {
if (pos + 2 + pos[1] > end)
return NULL;
if (pos[0] == ie)
return pos;
pos += 2 + pos[1];
}
return NULL;
}
static int p54_beacon_format_ie_tim(struct sk_buff *skb)
{
/*
* the good excuse for this mess is ... the firmware.
* The dummy TIM MUST be at the end of the beacon frame,
* because it'll be overwritten!
*/
u8 *tim;
u8 dtim_len;
u8 dtim_period;
u8 *next;
tim = p54_find_ie(skb, WLAN_EID_TIM);
if (!tim)
return 0;
dtim_len = tim[1];
dtim_period = tim[3];
next = tim + 2 + dtim_len;
if (dtim_len < 3)
return -EINVAL;
memmove(tim, next, skb_tail_pointer(skb) - next);
tim = skb_tail_pointer(skb) - (dtim_len + 2);
/* add the dummy at the end */
tim[0] = WLAN_EID_TIM;
tim[1] = 3;
tim[2] = 0;
tim[3] = dtim_period;
tim[4] = 0;
if (dtim_len > 3)
skb_trim(skb, skb->len - (dtim_len - 3));
return 0;
}
static int p54_beacon_update(struct p54_common *priv,
struct ieee80211_vif *vif)
{
struct sk_buff *beacon;
int ret;
beacon = ieee80211_beacon_get(priv->hw, vif);
if (!beacon)
return -ENOMEM;
ret = p54_beacon_format_ie_tim(beacon);
if (ret)
return ret;
/*
* During operation, the firmware takes care of beaconing.
* The driver only needs to upload a new beacon template, once
* the template was changed by the stack or userspace.
*
* LMAC API 3.2.2 also specifies that the driver does not need
* to cancel the old beacon template by hand, instead the firmware
* will release the previous one through the feedback mechanism.
*/
p54_tx_80211(priv->hw, beacon);
priv->tsf_high32 = 0;
priv->tsf_low32 = 0;
return 0;
}
static int p54_start(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
int err;
mutex_lock(&priv->conf_mutex);
err = priv->open(dev);
if (err)
goto out;
P54_SET_QUEUE(priv->qos_params[0], 0x0002, 0x0003, 0x0007, 47);
P54_SET_QUEUE(priv->qos_params[1], 0x0002, 0x0007, 0x000f, 94);
P54_SET_QUEUE(priv->qos_params[2], 0x0003, 0x000f, 0x03ff, 0);
P54_SET_QUEUE(priv->qos_params[3], 0x0007, 0x000f, 0x03ff, 0);
err = p54_set_edcf(priv);
if (err)
goto out;
memset(priv->bssid, ~0, ETH_ALEN);
priv->mode = NL80211_IFTYPE_MONITOR;
err = p54_setup_mac(priv);
if (err) {
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
goto out;
}
ieee80211_queue_delayed_work(dev, &priv->work, 0);
priv->softled_state = 0;
err = p54_set_leds(priv);
out:
mutex_unlock(&priv->conf_mutex);
return err;
}
static void p54_stop(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
int i;
mutex_lock(&priv->conf_mutex);
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->softled_state = 0;
p54_set_leds(priv);
cancel_delayed_work_sync(&priv->work);
priv->stop(dev);
skb_queue_purge(&priv->tx_pending);
skb_queue_purge(&priv->tx_queue);
for (i = 0; i < P54_QUEUE_NUM; i++) {
priv->tx_stats[i].count = 0;
priv->tx_stats[i].len = 0;
}
priv->beacon_req_id = cpu_to_le32(0);
priv->tsf_high32 = priv->tsf_low32 = 0;
mutex_unlock(&priv->conf_mutex);
}
static int p54_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
if (priv->mode != NL80211_IFTYPE_MONITOR) {
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
priv->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
priv->mode = vif->type;
break;
default:
mutex_unlock(&priv->conf_mutex);
return -EOPNOTSUPP;
}
memcpy(priv->mac_addr, vif->addr, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
return 0;
}
static void p54_remove_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct p54_common *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
priv->vif = NULL;
/*
* LMAC API 3.2.2 states that any active beacon template must be
* canceled by the driver before attempting a mode transition.
*/
if (le32_to_cpu(priv->beacon_req_id) != 0) {
p54_tx_cancel(priv, priv->beacon_req_id);
wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ);
}
priv->mode = NL80211_IFTYPE_MONITOR;
memset(priv->mac_addr, 0, ETH_ALEN);
memset(priv->bssid, 0, ETH_ALEN);
p54_setup_mac(priv);
mutex_unlock(&priv->conf_mutex);
}
static int p54_config(struct ieee80211_hw *dev, u32 changed)
{
int ret = 0;
struct p54_common *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
mutex_lock(&priv->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER)
priv->output_power = conf->power_level << 2;
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ret = p54_scan(priv, P54_SCAN_EXIT, 0);
if (ret)
goto out;
}
if (changed & IEEE80211_CONF_CHANGE_PS) {
ret = p54_set_ps(priv);
if (ret)
goto out;
}
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
ret = p54_setup_mac(priv);
if (ret)
goto out;
}
out:
mutex_unlock(&priv->conf_mutex);
return ret;
}
static u64 p54_prepare_multicast(struct ieee80211_hw *dev,
struct netdev_hw_addr_list *mc_list)
{
struct p54_common *priv = dev->priv;
struct netdev_hw_addr *ha;
int i;
BUILD_BUG_ON(ARRAY_SIZE(priv->mc_maclist) !=
ARRAY_SIZE(((struct p54_group_address_table *)NULL)->mac_list));
/*
* The first entry is reserved for the global broadcast MAC.
* Otherwise the firmware will drop it and ARP will no longer work.
*/
i = 1;
priv->mc_maclist_num = netdev_hw_addr_list_count(mc_list) + i;
netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(&priv->mc_maclist[i], ha->addr, ETH_ALEN);
i++;
if (i >= ARRAY_SIZE(priv->mc_maclist))
break;
}
return 1; /* update */
}
static void p54_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
struct p54_common *priv = dev->priv;
*total_flags &= FIF_PROMISC_IN_BSS |
FIF_ALLMULTI |
FIF_OTHER_BSS;
priv->filter_flags = *total_flags;
if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS))
p54_setup_mac(priv);
if (changed_flags & FIF_ALLMULTI || multicast)
p54_set_groupfilter(priv);
}
static int p54_conf_tx(struct ieee80211_hw *dev, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct p54_common *priv = dev->priv;
int ret;
mutex_lock(&priv->conf_mutex);
if (queue < dev->queues) {
P54_SET_QUEUE(priv->qos_params[queue], params->aifs,
params->cw_min, params->cw_max, params->txop);
ret = p54_set_edcf(priv);
} else
ret = -EINVAL;
mutex_unlock(&priv->conf_mutex);
return ret;
}
static void p54_work(struct work_struct *work)
{
struct p54_common *priv = container_of(work, struct p54_common,
work.work);
if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED))
return ;
/*
* TODO: walk through tx_queue and do the following tasks
* 1. initiate bursts.
* 2. cancel stuck frames / reset the device if necessary.
*/
p54_fetch_statistics(priv);
}
static int p54_get_stats(struct ieee80211_hw *dev,
struct ieee80211_low_level_stats *stats)
{
struct p54_common *priv = dev->priv;
memcpy(stats, &priv->stats, sizeof(*stats));
return 0;
}
static void p54_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *info,
u32 changed)
{
struct p54_common *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
if (changed & BSS_CHANGED_BSSID) {
memcpy(priv->bssid, info->bssid, ETH_ALEN);
p54_setup_mac(priv);
}
if (changed & BSS_CHANGED_BEACON) {
p54_scan(priv, P54_SCAN_EXIT, 0);
p54_setup_mac(priv);
p54_beacon_update(priv, vif);
p54_set_edcf(priv);
}
if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BEACON)) {
priv->use_short_slot = info->use_short_slot;
p54_set_edcf(priv);
}
if (changed & BSS_CHANGED_BASIC_RATES) {
if (dev->conf.channel->band == IEEE80211_BAND_5GHZ)
priv->basic_rate_mask = (info->basic_rates << 4);
else
priv->basic_rate_mask = info->basic_rates;
p54_setup_mac(priv);
if (priv->fw_var >= 0x500)
p54_scan(priv, P54_SCAN_EXIT, 0);
}
if (changed & BSS_CHANGED_ASSOC) {
if (info->assoc) {
priv->aid = info->aid;
priv->wakeup_timer = info->beacon_int *
info->dtim_period * 5;
p54_setup_mac(priv);
} else {
priv->wakeup_timer = 500;
priv->aid = 0;
}
}
mutex_unlock(&priv->conf_mutex);
}
static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct p54_common *priv = dev->priv;
int slot, ret = 0;
u8 algo = 0;
u8 *addr = NULL;
if (modparam_nohwcrypt)
return -EOPNOTSUPP;
mutex_lock(&priv->conf_mutex);
if (cmd == SET_KEY) {
switch (key->cipher) {
case WLAN_CIPHER_SUITE_TKIP:
if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL |
BR_DESC_PRIV_CAP_TKIP))) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_TKIPMICHAEL;
break;
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_WEP;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
algo = P54_CRYPTO_AESCCMP;
break;
default:
ret = -EOPNOTSUPP;
goto out_unlock;
}
slot = bitmap_find_free_region(priv->used_rxkeys,
priv->rx_keycache_size, 0);
if (slot < 0) {
/*
* The device supports the chosen algorithm, but the
* firmware does not provide enough key slots to store
* all of them.
* But encryption offload for outgoing frames is always
* possible, so we just pretend that the upload was
* successful and do the decryption in software.
*/
/* mark the key as invalid. */
key->hw_key_idx = 0xff;
goto out_unlock;
}
} else {
slot = key->hw_key_idx;
if (slot == 0xff) {
/* This key was not uploaded into the rx key cache. */
goto out_unlock;
}
bitmap_release_region(priv->used_rxkeys, slot, 0);
algo = 0;
}
if (sta)
addr = sta->addr;
ret = p54_upload_key(priv, algo, slot, key->keyidx,
key->keylen, addr, key->key);
if (ret) {
bitmap_release_region(priv->used_rxkeys, slot, 0);
ret = -EOPNOTSUPP;
goto out_unlock;
}
key->hw_key_idx = slot;
out_unlock:
mutex_unlock(&priv->conf_mutex);
return ret;
}
static int p54_get_survey(struct ieee80211_hw *dev, int idx,
struct survey_info *survey)
{
struct p54_common *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->channel;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = clamp_t(s8, priv->noise, -128, 127);
return 0;
}
static unsigned int p54_flush_count(struct p54_common *priv)
{
unsigned int total = 0, i;
BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats));
/*
* Because the firmware has the sole control over any frames
* in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they
* don't really count as pending or active.
*/
for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++)
total += priv->tx_stats[i].len;
return total;
}
static void p54_flush(struct ieee80211_hw *dev, bool drop)
{
struct p54_common *priv = dev->priv;
unsigned int total, i;
/*
* Currently, it wouldn't really matter if we wait for one second
* or 15 minutes. But once someone gets around and completes the
* TODOs [ancel stuck frames / reset device] in p54_work, it will
* suddenly make sense to wait that long.
*/
i = P54_STATISTICS_UPDATE * 2 / 20;
/*
* In this case no locking is required because as we speak the
* queues have already been stopped and no new frames can sneak
* up from behind.
*/
while ((total = p54_flush_count(priv) && i--)) {
/* waste time */
msleep(20);
}
WARN(total, "tx flush timeout, unresponsive firmware");
}
static void p54_set_coverage_class(struct ieee80211_hw *dev, u8 coverage_class)
{
struct p54_common *priv = dev->priv;
mutex_lock(&priv->conf_mutex);
/* support all coverage class values as in 802.11-2007 Table 7-27 */
priv->coverage_class = clamp_t(u8, coverage_class, 0, 31);
p54_set_edcf(priv);
mutex_unlock(&priv->conf_mutex);
}
static const struct ieee80211_ops p54_ops = {
.tx = p54_tx_80211,
.start = p54_start,
.stop = p54_stop,
.add_interface = p54_add_interface,
.remove_interface = p54_remove_interface,
.set_tim = p54_set_tim,
.sta_notify = p54_sta_notify,
.sta_add = p54_sta_add_remove,
.sta_remove = p54_sta_add_remove,
.set_key = p54_set_key,
.config = p54_config,
.flush = p54_flush,
.bss_info_changed = p54_bss_info_changed,
.prepare_multicast = p54_prepare_multicast,
.configure_filter = p54_configure_filter,
.conf_tx = p54_conf_tx,
.get_stats = p54_get_stats,
.get_survey = p54_get_survey,
.set_coverage_class = p54_set_coverage_class,
};
struct ieee80211_hw *p54_init_common(size_t priv_data_len)
{
struct ieee80211_hw *dev;
struct p54_common *priv;
dev = ieee80211_alloc_hw(priv_data_len, &p54_ops);
if (!dev)
return NULL;
priv = dev->priv;
priv->hw = dev;
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->basic_rate_mask = 0x15f;
spin_lock_init(&priv->tx_stats_lock);
skb_queue_head_init(&priv->tx_queue);
skb_queue_head_init(&priv->tx_pending);
dev->flags = IEEE80211_HW_RX_INCLUDES_FCS |
IEEE80211_HW_SIGNAL_DBM |
IEEE80211_HW_SUPPORTS_PS |
IEEE80211_HW_PS_NULLFUNC_STACK |
IEEE80211_HW_BEACON_FILTER |
IEEE80211_HW_REPORTS_TX_ACK_STATUS;
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
dev->channel_change_time = 1000; /* TODO: find actual value */
priv->beacon_req_id = cpu_to_le32(0);
priv->tx_stats[P54_QUEUE_BEACON].limit = 1;
priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1;
priv->tx_stats[P54_QUEUE_MGMT].limit = 3;
priv->tx_stats[P54_QUEUE_CAB].limit = 3;
priv->tx_stats[P54_QUEUE_DATA].limit = 5;
dev->queues = 1;
priv->noise = -94;
/*
* We support at most 8 tries no matter which rate they're at,
* we cannot support max_rates * max_rate_tries as we set it
* here, but setting it correctly to 4/2 or so would limit us
* artificially if the RC algorithm wants just two rates, so
* let's say 4/7, we'll redistribute it at TX time, see the
* comments there.
*/
dev->max_rates = 4;
dev->max_rate_tries = 7;
dev->extra_tx_headroom = sizeof(struct p54_hdr) + 4 +
sizeof(struct p54_tx_data);
/*
* For now, disable PS by default because it affects
* link stability significantly.
*/
dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
mutex_init(&priv->conf_mutex);
mutex_init(&priv->eeprom_mutex);
init_completion(&priv->eeprom_comp);
init_completion(&priv->beacon_comp);
INIT_DELAYED_WORK(&priv->work, p54_work);
memset(&priv->mc_maclist[0], ~0, ETH_ALEN);
return dev;
}
EXPORT_SYMBOL_GPL(p54_init_common);
int p54_register_common(struct ieee80211_hw *dev, struct device *pdev)
{
struct p54_common __maybe_unused *priv = dev->priv;
int err;
err = ieee80211_register_hw(dev);
if (err) {
dev_err(pdev, "Cannot register device (%d).\n", err);
return err;
}
#ifdef CONFIG_P54_LEDS
err = p54_init_leds(priv);
if (err)
return err;
#endif /* CONFIG_P54_LEDS */
dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy));
return 0;
}
EXPORT_SYMBOL_GPL(p54_register_common);
void p54_free_common(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
unsigned int i;
for (i = 0; i < IEEE80211_NUM_BANDS; i++)
kfree(priv->band_table[i]);
kfree(priv->iq_autocal);
kfree(priv->output_limit);
kfree(priv->curve_data);
kfree(priv->rssi_db);
kfree(priv->used_rxkeys);
priv->iq_autocal = NULL;
priv->output_limit = NULL;
priv->curve_data = NULL;
priv->rssi_db = NULL;
priv->used_rxkeys = NULL;
ieee80211_free_hw(dev);
}
EXPORT_SYMBOL_GPL(p54_free_common);
void p54_unregister_common(struct ieee80211_hw *dev)
{
struct p54_common *priv = dev->priv;
#ifdef CONFIG_P54_LEDS
p54_unregister_leds(priv);
#endif /* CONFIG_P54_LEDS */
ieee80211_unregister_hw(dev);
mutex_destroy(&priv->conf_mutex);
mutex_destroy(&priv->eeprom_mutex);
}
EXPORT_SYMBOL_GPL(p54_unregister_common);
| gpl-2.0 |
selva-simple/galaxyr_cm10_kernel | drivers/usb/serial/digi_acceleport.c | 2908 | 58014 | /*
* Digi AccelePort USB-4 and USB-2 Serial Converters
*
* Copyright 2000 by Digi International
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Shamelessly based on Brian Warner's keyspan_pda.c and Greg Kroah-Hartman's
* usb-serial driver.
*
* Peter Berger (pberger@brimson.com)
* Al Borchers (borchers@steinerpoint.com)
*
* (12/03/2001) gkh
* switched to using port->port.count instead of private version.
* Removed port->active
*
* (04/08/2001) gb
* Identify version on module load.
*
* (11/01/2000) Adam J. Richter
* usb_device_id table support
*
* (11/01/2000) pberger and borchers
* -- Turned off the USB_DISABLE_SPD flag for write bulk urbs--it caused
* USB 4 ports to hang on startup.
* -- Serialized access to write urbs by adding the dp_write_urb_in_use
* flag; otherwise, the driver caused SMP system hangs. Watching the
* urb status is not sufficient.
*
* (10/05/2000) gkh
* -- Fixed bug with urb->dev not being set properly, now that the usb
* core needs it.
*
* (8/8/2000) pberger and borchers
* -- Fixed close so that
* - it can timeout while waiting for transmit idle, if needed;
* - it ignores interrupts when flushing the port, turning
* of modem signalling, and so on;
* - it waits for the flush to really complete before returning.
* -- Read_bulk_callback and write_bulk_callback check for a closed
* port before using the tty struct or writing to the port.
* -- The two changes above fix the oops caused by interrupted closes.
* -- Added interruptible args to write_oob_command and set_modem_signals
* and added a timeout arg to transmit_idle; needed for fixes to
* close.
* -- Added code for rx_throttle and rx_unthrottle so that input flow
* control works.
* -- Added code to set overrun, parity, framing, and break errors
* (untested).
* -- Set USB_DISABLE_SPD flag for write bulk urbs, so no 0 length
* bulk writes are done. These hung the Digi USB device. The
* 0 length bulk writes were a new feature of usb-uhci added in
* the 2.4.0-test6 kernels.
* -- Fixed mod inc race in open; do mod inc before sleeping to wait
* for a close to finish.
*
* (7/31/2000) pberger
* -- Fixed bugs with hardware handshaking:
* - Added code to set/clear tty->hw_stopped in digi_read_oob_callback()
* and digi_set_termios()
* -- Added code in digi_set_termios() to
* - add conditional in code handling transition from B0 to only
* set RTS if RTS/CTS flow control is either not in use or if
* the port is not currently throttled.
* - handle turning off CRTSCTS.
*
* (7/30/2000) borchers
* -- Added support for more than one Digi USB device by moving
* globals to a private structure in the pointed to from the
* usb_serial structure.
* -- Moved the modem change and transmit idle wait queues into
* the port private structure, so each port has its own queue
* rather than sharing global queues.
* -- Added support for break signals.
*
* (7/25/2000) pberger
* -- Added USB-2 support. Note: the USB-2 supports 3 devices: two
* serial and a parallel port. The parallel port is implemented
* as a serial-to-parallel converter. That is, the driver actually
* presents all three USB-2 interfaces as serial ports, but the third
* one physically connects to a parallel device. Thus, for example,
* one could plug a parallel printer into the USB-2's third port,
* but from the kernel's (and userland's) point of view what's
* actually out there is a serial device.
*
* (7/15/2000) borchers
* -- Fixed race in open when a close is in progress.
* -- Keep count of opens and dec the module use count for each
* outstanding open when shutdown is called (on disconnect).
* -- Fixed sanity checks in read_bulk_callback and write_bulk_callback
* so pointers are checked before use.
* -- Split read bulk callback into in band and out of band
* callbacks, and no longer restart read chains if there is
* a status error or a sanity error. This fixed the seg
* faults and other errors we used to get on disconnect.
* -- Port->active is once again a flag as usb-serial intended it
* to be, not a count. Since it was only a char it would
* have been limited to 256 simultaneous opens. Now the open
* count is kept in the port private structure in dp_open_count.
* -- Added code for modularization of the digi_acceleport driver.
*
* (6/27/2000) pberger and borchers
* -- Zeroed out sync field in the wakeup_task before first use;
* otherwise the uninitialized value might prevent the task from
* being scheduled.
* -- Initialized ret value to 0 in write_bulk_callback, otherwise
* the uninitialized value could cause a spurious debugging message.
*
* (6/22/2000) pberger and borchers
* -- Made cond_wait_... inline--apparently on SPARC the flags arg
* to spin_lock_irqsave cannot be passed to another function
* to call spin_unlock_irqrestore. Thanks to Pauline Middelink.
* -- In digi_set_modem_signals the inner nested spin locks use just
* spin_lock() rather than spin_lock_irqsave(). The old code
* mistakenly left interrupts off. Thanks to Pauline Middelink.
* -- copy_from_user (which can sleep) is no longer called while a
* spinlock is held. We copy to a local buffer before getting
* the spinlock--don't like the extra copy but the code is simpler.
* -- Printk and dbg are no longer called while a spin lock is held.
*
* (6/4/2000) pberger and borchers
* -- Replaced separate calls to spin_unlock_irqrestore and
* interruptible_sleep_on_timeout with a new function
* cond_wait_interruptible_timeout_irqrestore. This eliminates
* the race condition where the wake up could happen after
* the unlock and before the sleep.
* -- Close now waits for output to drain.
* -- Open waits until any close in progress is finished.
* -- All out of band responses are now processed, not just the
* first in a USB packet.
* -- Fixed a bug that prevented the driver from working when the
* first Digi port was not the first USB serial port--the driver
* was mistakenly using the external USB serial port number to
* try to index into its internal ports.
* -- Fixed an SMP bug -- write_bulk_callback is called directly from
* an interrupt, so spin_lock_irqsave/spin_unlock_irqrestore are
* needed for locks outside write_bulk_callback that are also
* acquired by write_bulk_callback to prevent deadlocks.
* -- Fixed support for select() by making digi_chars_in_buffer()
* return 256 when -EINPROGRESS is set, as the line discipline
* code in n_tty.c expects.
* -- Fixed an include file ordering problem that prevented debugging
* messages from working.
* -- Fixed an intermittent timeout problem that caused writes to
* sometimes get stuck on some machines on some kernels. It turns
* out in these circumstances write_chan() (in n_tty.c) was
* asleep waiting for our wakeup call. Even though we call
* wake_up_interruptible() in digi_write_bulk_callback(), there is
* a race condition that could cause the wakeup to fail: if our
* wake_up_interruptible() call occurs between the time that our
* driver write routine finishes and write_chan() sets current->state
* to TASK_INTERRUPTIBLE, the effect of our wakeup setting the state
* to TASK_RUNNING will be lost and write_chan's subsequent call to
* schedule() will never return (unless it catches a signal).
* This race condition occurs because write_bulk_callback() (and thus
* the wakeup) are called asynchronously from an interrupt, rather than
* from the scheduler. We can avoid the race by calling the wakeup
* from the scheduler queue and that's our fix: Now, at the end of
* write_bulk_callback() we queue up a wakeup call on the scheduler
* task queue. We still also invoke the wakeup directly since that
* squeezes a bit more performance out of the driver, and any lost
* race conditions will get cleaned up at the next scheduler run.
*
* NOTE: The problem also goes away if you comment out
* the two code lines in write_chan() where current->state
* is set to TASK_RUNNING just before calling driver.write() and to
* TASK_INTERRUPTIBLE immediately afterwards. This is why the
* problem did not show up with the 2.2 kernels -- they do not
* include that code.
*
* (5/16/2000) pberger and borchers
* -- Added timeouts to sleeps, to defend against lost wake ups.
* -- Handle transition to/from B0 baud rate in digi_set_termios.
*
* (5/13/2000) pberger and borchers
* -- All commands now sent on out of band port, using
* digi_write_oob_command.
* -- Get modem control signals whenever they change, support TIOCMGET/
* SET/BIS/BIC ioctls.
* -- digi_set_termios now supports parity, word size, stop bits, and
* receive enable.
* -- Cleaned up open and close, use digi_set_termios and
* digi_write_oob_command to set port parameters.
* -- Added digi_startup_device to start read chains on all ports.
* -- Write buffer is only used when count==1, to be sure put_char can
* write a char (unless the buffer is full).
*
* (5/10/2000) pberger and borchers
* -- Added MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT calls on open/close.
* -- Fixed problem where the first incoming character is lost on
* port opens after the first close on that port. Now we keep
* the read_urb chain open until shutdown.
* -- Added more port conditioning calls in digi_open and digi_close.
* -- Convert port->active to a use count so that we can deal with multiple
* opens and closes properly.
* -- Fixed some problems with the locking code.
*
* (5/3/2000) pberger and borchers
* -- First alpha version of the driver--many known limitations and bugs.
*
*
* Locking and SMP
*
* - Each port, including the out-of-band port, has a lock used to
* serialize all access to the port's private structure.
* - The port lock is also used to serialize all writes and access to
* the port's URB.
* - The port lock is also used for the port write_wait condition
* variable. Holding the port lock will prevent a wake up on the
* port's write_wait; this can be used with cond_wait_... to be sure
* the wake up is not lost in a race when dropping the lock and
* sleeping waiting for the wakeup.
* - digi_write() does not sleep, since it is sometimes called on
* interrupt time.
* - digi_write_bulk_callback() and digi_read_bulk_callback() are
* called directly from interrupts. Hence spin_lock_irqsave()
* and spin_unlock_irqrestore() are used in the rest of the code
* for any locks they acquire.
* - digi_write_bulk_callback() gets the port lock before waking up
* processes sleeping on the port write_wait. It also schedules
* wake ups so they happen from the scheduler, because the tty
* system can miss wake ups from interrupts.
* - All sleeps use a timeout of DIGI_RETRY_TIMEOUT before looping to
* recheck the condition they are sleeping on. This is defensive,
* in case a wake up is lost.
* - Following Documentation/DocBook/kernel-locking.pdf no spin locks
* are held when calling copy_to/from_user or printk.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/tty.h>
#include <linux/tty_driver.h>
#include <linux/tty_flip.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/workqueue.h>
#include <linux/uaccess.h>
#include <linux/usb.h>
#include <linux/wait.h>
#include <linux/usb/serial.h>
/* Defines */
/*
* Version Information
*/
#define DRIVER_VERSION "v1.80.1.2"
#define DRIVER_AUTHOR "Peter Berger <pberger@brimson.com>, Al Borchers <borchers@steinerpoint.com>"
#define DRIVER_DESC "Digi AccelePort USB-2/USB-4 Serial Converter driver"
/* port output buffer length -- must be <= transfer buffer length - 2 */
/* so we can be sure to send the full buffer in one urb */
#define DIGI_OUT_BUF_SIZE 8
/* port input buffer length -- must be >= transfer buffer length - 3 */
/* so we can be sure to hold at least one full buffer from one urb */
#define DIGI_IN_BUF_SIZE 64
/* retry timeout while sleeping */
#define DIGI_RETRY_TIMEOUT (HZ/10)
/* timeout while waiting for tty output to drain in close */
/* this delay is used twice in close, so the total delay could */
/* be twice this value */
#define DIGI_CLOSE_TIMEOUT (5*HZ)
/* AccelePort USB Defines */
/* ids */
#define DIGI_VENDOR_ID 0x05c5
#define DIGI_2_ID 0x0002 /* USB-2 */
#define DIGI_4_ID 0x0004 /* USB-4 */
/* commands
* "INB": can be used on the in-band endpoint
* "OOB": can be used on the out-of-band endpoint
*/
#define DIGI_CMD_SET_BAUD_RATE 0 /* INB, OOB */
#define DIGI_CMD_SET_WORD_SIZE 1 /* INB, OOB */
#define DIGI_CMD_SET_PARITY 2 /* INB, OOB */
#define DIGI_CMD_SET_STOP_BITS 3 /* INB, OOB */
#define DIGI_CMD_SET_INPUT_FLOW_CONTROL 4 /* INB, OOB */
#define DIGI_CMD_SET_OUTPUT_FLOW_CONTROL 5 /* INB, OOB */
#define DIGI_CMD_SET_DTR_SIGNAL 6 /* INB, OOB */
#define DIGI_CMD_SET_RTS_SIGNAL 7 /* INB, OOB */
#define DIGI_CMD_READ_INPUT_SIGNALS 8 /* OOB */
#define DIGI_CMD_IFLUSH_FIFO 9 /* OOB */
#define DIGI_CMD_RECEIVE_ENABLE 10 /* INB, OOB */
#define DIGI_CMD_BREAK_CONTROL 11 /* INB, OOB */
#define DIGI_CMD_LOCAL_LOOPBACK 12 /* INB, OOB */
#define DIGI_CMD_TRANSMIT_IDLE 13 /* INB, OOB */
#define DIGI_CMD_READ_UART_REGISTER 14 /* OOB */
#define DIGI_CMD_WRITE_UART_REGISTER 15 /* INB, OOB */
#define DIGI_CMD_AND_UART_REGISTER 16 /* INB, OOB */
#define DIGI_CMD_OR_UART_REGISTER 17 /* INB, OOB */
#define DIGI_CMD_SEND_DATA 18 /* INB */
#define DIGI_CMD_RECEIVE_DATA 19 /* INB */
#define DIGI_CMD_RECEIVE_DISABLE 20 /* INB */
#define DIGI_CMD_GET_PORT_TYPE 21 /* OOB */
/* baud rates */
#define DIGI_BAUD_50 0
#define DIGI_BAUD_75 1
#define DIGI_BAUD_110 2
#define DIGI_BAUD_150 3
#define DIGI_BAUD_200 4
#define DIGI_BAUD_300 5
#define DIGI_BAUD_600 6
#define DIGI_BAUD_1200 7
#define DIGI_BAUD_1800 8
#define DIGI_BAUD_2400 9
#define DIGI_BAUD_4800 10
#define DIGI_BAUD_7200 11
#define DIGI_BAUD_9600 12
#define DIGI_BAUD_14400 13
#define DIGI_BAUD_19200 14
#define DIGI_BAUD_28800 15
#define DIGI_BAUD_38400 16
#define DIGI_BAUD_57600 17
#define DIGI_BAUD_76800 18
#define DIGI_BAUD_115200 19
#define DIGI_BAUD_153600 20
#define DIGI_BAUD_230400 21
#define DIGI_BAUD_460800 22
/* arguments */
#define DIGI_WORD_SIZE_5 0
#define DIGI_WORD_SIZE_6 1
#define DIGI_WORD_SIZE_7 2
#define DIGI_WORD_SIZE_8 3
#define DIGI_PARITY_NONE 0
#define DIGI_PARITY_ODD 1
#define DIGI_PARITY_EVEN 2
#define DIGI_PARITY_MARK 3
#define DIGI_PARITY_SPACE 4
#define DIGI_STOP_BITS_1 0
#define DIGI_STOP_BITS_2 1
#define DIGI_INPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_INPUT_FLOW_CONTROL_RTS 2
#define DIGI_INPUT_FLOW_CONTROL_DTR 4
#define DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF 1
#define DIGI_OUTPUT_FLOW_CONTROL_CTS 2
#define DIGI_OUTPUT_FLOW_CONTROL_DSR 4
#define DIGI_DTR_INACTIVE 0
#define DIGI_DTR_ACTIVE 1
#define DIGI_DTR_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_INACTIVE 0
#define DIGI_RTS_ACTIVE 1
#define DIGI_RTS_INPUT_FLOW_CONTROL 2
#define DIGI_RTS_TOGGLE 3
#define DIGI_FLUSH_TX 1
#define DIGI_FLUSH_RX 2
#define DIGI_RESUME_TX 4 /* clears xoff condition */
#define DIGI_TRANSMIT_NOT_IDLE 0
#define DIGI_TRANSMIT_IDLE 1
#define DIGI_DISABLE 0
#define DIGI_ENABLE 1
#define DIGI_DEASSERT 0
#define DIGI_ASSERT 1
/* in band status codes */
#define DIGI_OVERRUN_ERROR 4
#define DIGI_PARITY_ERROR 8
#define DIGI_FRAMING_ERROR 16
#define DIGI_BREAK_ERROR 32
/* out of band status */
#define DIGI_NO_ERROR 0
#define DIGI_BAD_FIRST_PARAMETER 1
#define DIGI_BAD_SECOND_PARAMETER 2
#define DIGI_INVALID_LINE 3
#define DIGI_INVALID_OPCODE 4
/* input signals */
#define DIGI_READ_INPUT_SIGNALS_SLOT 1
#define DIGI_READ_INPUT_SIGNALS_ERR 2
#define DIGI_READ_INPUT_SIGNALS_BUSY 4
#define DIGI_READ_INPUT_SIGNALS_PE 8
#define DIGI_READ_INPUT_SIGNALS_CTS 16
#define DIGI_READ_INPUT_SIGNALS_DSR 32
#define DIGI_READ_INPUT_SIGNALS_RI 64
#define DIGI_READ_INPUT_SIGNALS_DCD 128
/* Structures */
struct digi_serial {
spinlock_t ds_serial_lock;
struct usb_serial_port *ds_oob_port; /* out-of-band port */
int ds_oob_port_num; /* index of out-of-band port */
int ds_device_started;
};
struct digi_port {
spinlock_t dp_port_lock;
int dp_port_num;
int dp_out_buf_len;
unsigned char dp_out_buf[DIGI_OUT_BUF_SIZE];
int dp_write_urb_in_use;
unsigned int dp_modem_signals;
wait_queue_head_t dp_modem_change_wait;
int dp_transmit_idle;
wait_queue_head_t dp_transmit_idle_wait;
int dp_throttled;
int dp_throttle_restart;
wait_queue_head_t dp_flush_wait;
wait_queue_head_t dp_close_wait; /* wait queue for close */
struct work_struct dp_wakeup_work;
struct usb_serial_port *dp_port;
};
/* Local Function Declarations */
static void digi_wakeup_write(struct usb_serial_port *port);
static void digi_wakeup_write_lock(struct work_struct *work);
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible);
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout);
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible);
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout);
static void digi_rx_throttle(struct tty_struct *tty);
static void digi_rx_unthrottle(struct tty_struct *tty);
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios);
static void digi_break_ctl(struct tty_struct *tty, int break_state);
static int digi_tiocmget(struct tty_struct *tty);
static int digi_tiocmset(struct tty_struct *tty, unsigned int set,
unsigned int clear);
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count);
static void digi_write_bulk_callback(struct urb *urb);
static int digi_write_room(struct tty_struct *tty);
static int digi_chars_in_buffer(struct tty_struct *tty);
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port);
static void digi_close(struct usb_serial_port *port);
static void digi_dtr_rts(struct usb_serial_port *port, int on);
static int digi_startup_device(struct usb_serial *serial);
static int digi_startup(struct usb_serial *serial);
static void digi_disconnect(struct usb_serial *serial);
static void digi_release(struct usb_serial *serial);
static void digi_read_bulk_callback(struct urb *urb);
static int digi_read_inb_callback(struct urb *urb);
static int digi_read_oob_callback(struct urb *urb);
/* Statics */
static int debug;
static const struct usb_device_id id_table_combined[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_2[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_2_ID) },
{ } /* Terminating entry */
};
static const struct usb_device_id id_table_4[] = {
{ USB_DEVICE(DIGI_VENDOR_ID, DIGI_4_ID) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, id_table_combined);
static struct usb_driver digi_driver = {
.name = "digi_acceleport",
.probe = usb_serial_probe,
.disconnect = usb_serial_disconnect,
.id_table = id_table_combined,
.no_dynamic_id = 1,
};
/* device info needed for the Digi serial converter */
static struct usb_serial_driver digi_acceleport_2_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_2",
},
.description = "Digi 2 port USB adapter",
.usb_driver = &digi_driver,
.id_table = id_table_2,
.num_ports = 3,
.open = digi_open,
.close = digi_close,
.dtr_rts = digi_dtr_rts,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
};
static struct usb_serial_driver digi_acceleport_4_device = {
.driver = {
.owner = THIS_MODULE,
.name = "digi_4",
},
.description = "Digi 4 port USB adapter",
.usb_driver = &digi_driver,
.id_table = id_table_4,
.num_ports = 4,
.open = digi_open,
.close = digi_close,
.write = digi_write,
.write_room = digi_write_room,
.write_bulk_callback = digi_write_bulk_callback,
.read_bulk_callback = digi_read_bulk_callback,
.chars_in_buffer = digi_chars_in_buffer,
.throttle = digi_rx_throttle,
.unthrottle = digi_rx_unthrottle,
.set_termios = digi_set_termios,
.break_ctl = digi_break_ctl,
.tiocmget = digi_tiocmget,
.tiocmset = digi_tiocmset,
.attach = digi_startup,
.disconnect = digi_disconnect,
.release = digi_release,
};
/* Functions */
/*
* Cond Wait Interruptible Timeout Irqrestore
*
* Do spin_unlock_irqrestore and interruptible_sleep_on_timeout
* so that wake ups are not lost if they occur between the unlock
* and the sleep. In other words, spin_unlock_irqrestore and
* interruptible_sleep_on_timeout are "atomic" with respect to
* wake ups. This is used to implement condition variables.
*
* interruptible_sleep_on_timeout is deprecated and has been replaced
* with the equivalent code.
*/
static long cond_wait_interruptible_timeout_irqrestore(
wait_queue_head_t *q, long timeout,
spinlock_t *lock, unsigned long flags)
__releases(lock)
{
DEFINE_WAIT(wait);
prepare_to_wait(q, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irqrestore(lock, flags);
timeout = schedule_timeout(timeout);
finish_wait(q, &wait);
return timeout;
}
/*
* Digi Wakeup Write
*
* Wake up port, line discipline, and tty processes sleeping
* on writes.
*/
static void digi_wakeup_write_lock(struct work_struct *work)
{
struct digi_port *priv =
container_of(work, struct digi_port, dp_wakeup_work);
struct usb_serial_port *port = priv->dp_port;
unsigned long flags;
spin_lock_irqsave(&priv->dp_port_lock, flags);
digi_wakeup_write(port);
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
}
static void digi_wakeup_write(struct usb_serial_port *port)
{
struct tty_struct *tty = tty_port_tty_get(&port->port);
if (tty) {
tty_wakeup(tty);
tty_kref_put(tty);
}
}
/*
* Digi Write OOB Command
*
* Write commands on the out of band port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. Returns 0
* if successful, -EINTR if interrupted while sleeping and
* the interruptible flag is true, or a negative error
* returned by usb_submit_urb.
*/
static int digi_write_oob_command(struct usb_serial_port *port,
unsigned char *buf, int count, int interruptible)
{
int ret = 0;
int len;
struct usb_serial_port *oob_port = (struct usb_serial_port *)((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned long flags = 0;
dbg("digi_write_oob_command: TOP: port=%d, count=%d", oob_priv->dp_port_num, count);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
while (count > 0) {
while (oob_priv->dp_write_urb_in_use) {
cond_wait_interruptible_timeout_irqrestore(
&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
}
/* len must be a multiple of 4, so commands are not split */
len = min(count, oob_port->bulk_out_size);
if (len > 4)
len &= ~3;
memcpy(oob_port->write_urb->transfer_buffer, buf, len);
oob_port->write_urb->transfer_buffer_length = len;
oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Write In Band Command
*
* Write commands on the given port. Commands are 4
* bytes each, multiple commands can be sent at once, and
* no command will be split across USB packets. If timeout
* is non-zero, write in band command will return after
* waiting unsuccessfully for the URB status to clear for
* timeout ticks. Returns 0 if successful, or a negative
* error returned by digi_write.
*/
static int digi_write_inb_command(struct usb_serial_port *port,
unsigned char *buf, int count, unsigned long timeout)
{
int ret = 0;
int len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags = 0;
dbg("digi_write_inb_command: TOP: port=%d, count=%d",
priv->dp_port_num, count);
if (timeout)
timeout += jiffies;
else
timeout = ULONG_MAX;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (count > 0 && ret == 0) {
while (priv->dp_write_urb_in_use &&
time_before(jiffies, timeout)) {
cond_wait_interruptible_timeout_irqrestore(
&port->write_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
/* len must be a multiple of 4 and small enough to */
/* guarantee the write will send buffered data first, */
/* so commands are in order with data and not split */
len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
if (len > 4)
len &= ~3;
/* write any buffered data first */
if (priv->dp_out_buf_len > 0) {
data[0] = DIGI_CMD_SEND_DATA;
data[1] = priv->dp_out_buf_len;
memcpy(data + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
memcpy(data + 2 + priv->dp_out_buf_len, buf, len);
port->write_urb->transfer_buffer_length
= priv->dp_out_buf_len + 2 + len;
} else {
memcpy(data, buf, len);
port->write_urb->transfer_buffer_length = len;
}
port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
count -= len;
buf += len;
}
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
return ret;
}
/*
* Digi Set Modem Signals
*
* Sets or clears DTR and RTS on the port, according to the
* modem_signals argument. Use TIOCM_DTR and TIOCM_RTS flags
* for the modem_signals argument. Returns 0 if successful,
* -EINTR if interrupted while sleeping, or a non-zero error
* returned by usb_submit_urb.
*/
static int digi_set_modem_signals(struct usb_serial_port *port,
unsigned int modem_signals, int interruptible)
{
int ret;
struct digi_port *port_priv = usb_get_serial_port_data(port);
struct usb_serial_port *oob_port = (struct usb_serial_port *) ((struct digi_serial *)(usb_get_serial_data(port->serial)))->ds_oob_port;
struct digi_port *oob_priv = usb_get_serial_port_data(oob_port);
unsigned char *data = oob_port->write_urb->transfer_buffer;
unsigned long flags = 0;
dbg("digi_set_modem_signals: TOP: port=%d, modem_signals=0x%x",
port_priv->dp_port_num, modem_signals);
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
while (oob_priv->dp_write_urb_in_use) {
spin_unlock(&port_priv->dp_port_lock);
cond_wait_interruptible_timeout_irqrestore(
&oob_port->write_wait, DIGI_RETRY_TIMEOUT,
&oob_priv->dp_port_lock, flags);
if (interruptible && signal_pending(current))
return -EINTR;
spin_lock_irqsave(&oob_priv->dp_port_lock, flags);
spin_lock(&port_priv->dp_port_lock);
}
data[0] = DIGI_CMD_SET_DTR_SIGNAL;
data[1] = port_priv->dp_port_num;
data[2] = (modem_signals & TIOCM_DTR) ?
DIGI_DTR_ACTIVE : DIGI_DTR_INACTIVE;
data[3] = 0;
data[4] = DIGI_CMD_SET_RTS_SIGNAL;
data[5] = port_priv->dp_port_num;
data[6] = (modem_signals & TIOCM_RTS) ?
DIGI_RTS_ACTIVE : DIGI_RTS_INACTIVE;
data[7] = 0;
oob_port->write_urb->transfer_buffer_length = 8;
oob_port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(oob_port->write_urb, GFP_ATOMIC);
if (ret == 0) {
oob_priv->dp_write_urb_in_use = 1;
port_priv->dp_modem_signals =
(port_priv->dp_modem_signals&~(TIOCM_DTR|TIOCM_RTS))
| (modem_signals&(TIOCM_DTR|TIOCM_RTS));
}
spin_unlock(&port_priv->dp_port_lock);
spin_unlock_irqrestore(&oob_priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev, "%s: usb_submit_urb failed, ret=%d\n",
__func__, ret);
return ret;
}
/*
* Digi Transmit Idle
*
* Digi transmit idle waits, up to timeout ticks, for the transmitter
* to go idle. It returns 0 if successful or a negative error.
*
* There are race conditions here if more than one process is calling
* digi_transmit_idle on the same port at the same time. However, this
* is only called from close, and only one process can be in close on a
* port at a time, so its ok.
*/
static int digi_transmit_idle(struct usb_serial_port *port,
unsigned long timeout)
{
int ret;
unsigned char buf[2];
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned long flags = 0;
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
buf[0] = DIGI_CMD_TRANSMIT_IDLE;
buf[1] = 0;
timeout += jiffies;
ret = digi_write_inb_command(port, buf, 2, timeout - jiffies);
if (ret != 0)
return ret;
spin_lock_irqsave(&priv->dp_port_lock, flags);
while (time_before(jiffies, timeout) && !priv->dp_transmit_idle) {
cond_wait_interruptible_timeout_irqrestore(
&priv->dp_transmit_idle_wait, DIGI_RETRY_TIMEOUT,
&priv->dp_port_lock, flags);
if (signal_pending(current))
return -EINTR;
spin_lock_irqsave(&priv->dp_port_lock, flags);
}
priv->dp_transmit_idle = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
static void digi_rx_throttle(struct tty_struct *tty)
{
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
dbg("digi_rx_throttle: TOP: port=%d", priv->dp_port_num);
/* stop receiving characters by not resubmitting the read urb */
spin_lock_irqsave(&priv->dp_port_lock, flags);
priv->dp_throttled = 1;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
}
static void digi_rx_unthrottle(struct tty_struct *tty)
{
int ret = 0;
unsigned long flags;
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
dbg("digi_rx_unthrottle: TOP: port=%d", priv->dp_port_num);
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* restart read chain */
if (priv->dp_throttle_restart) {
port->read_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_ATOMIC);
}
/* turn throttle off */
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
static void digi_set_termios(struct tty_struct *tty,
struct usb_serial_port *port, struct ktermios *old_termios)
{
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int iflag = tty->termios->c_iflag;
unsigned int cflag = tty->termios->c_cflag;
unsigned int old_iflag = old_termios->c_iflag;
unsigned int old_cflag = old_termios->c_cflag;
unsigned char buf[32];
unsigned int modem_signals;
int arg, ret;
int i = 0;
speed_t baud;
dbg("digi_set_termios: TOP: port=%d, iflag=0x%x, old_iflag=0x%x, cflag=0x%x, old_cflag=0x%x", priv->dp_port_num, iflag, old_iflag, cflag, old_cflag);
/* set baud rate */
baud = tty_get_baud_rate(tty);
if (baud != tty_termios_baud_rate(old_termios)) {
arg = -1;
/* reassert DTR and (maybe) RTS on transition from B0 */
if ((old_cflag&CBAUD) == B0) {
/* don't set RTS if using hardware flow control */
/* and throttling input */
modem_signals = TIOCM_DTR;
if (!(tty->termios->c_cflag & CRTSCTS) ||
!test_bit(TTY_THROTTLED, &tty->flags))
modem_signals |= TIOCM_RTS;
digi_set_modem_signals(port, modem_signals, 1);
}
switch (baud) {
/* drop DTR and RTS on transition to B0 */
case 0: digi_set_modem_signals(port, 0, 1); break;
case 50: arg = DIGI_BAUD_50; break;
case 75: arg = DIGI_BAUD_75; break;
case 110: arg = DIGI_BAUD_110; break;
case 150: arg = DIGI_BAUD_150; break;
case 200: arg = DIGI_BAUD_200; break;
case 300: arg = DIGI_BAUD_300; break;
case 600: arg = DIGI_BAUD_600; break;
case 1200: arg = DIGI_BAUD_1200; break;
case 1800: arg = DIGI_BAUD_1800; break;
case 2400: arg = DIGI_BAUD_2400; break;
case 4800: arg = DIGI_BAUD_4800; break;
case 9600: arg = DIGI_BAUD_9600; break;
case 19200: arg = DIGI_BAUD_19200; break;
case 38400: arg = DIGI_BAUD_38400; break;
case 57600: arg = DIGI_BAUD_57600; break;
case 115200: arg = DIGI_BAUD_115200; break;
case 230400: arg = DIGI_BAUD_230400; break;
case 460800: arg = DIGI_BAUD_460800; break;
default:
arg = DIGI_BAUD_9600;
baud = 9600;
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_BAUD_RATE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set parity */
tty->termios->c_cflag &= ~CMSPAR;
if ((cflag&(PARENB|PARODD)) != (old_cflag&(PARENB|PARODD))) {
if (cflag&PARENB) {
if (cflag&PARODD)
arg = DIGI_PARITY_ODD;
else
arg = DIGI_PARITY_EVEN;
} else {
arg = DIGI_PARITY_NONE;
}
buf[i++] = DIGI_CMD_SET_PARITY;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set word size */
if ((cflag&CSIZE) != (old_cflag&CSIZE)) {
arg = -1;
switch (cflag&CSIZE) {
case CS5: arg = DIGI_WORD_SIZE_5; break;
case CS6: arg = DIGI_WORD_SIZE_6; break;
case CS7: arg = DIGI_WORD_SIZE_7; break;
case CS8: arg = DIGI_WORD_SIZE_8; break;
default:
dbg("digi_set_termios: can't handle word size %d",
(cflag&CSIZE));
break;
}
if (arg != -1) {
buf[i++] = DIGI_CMD_SET_WORD_SIZE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
}
/* set stop bits */
if ((cflag&CSTOPB) != (old_cflag&CSTOPB)) {
if ((cflag&CSTOPB))
arg = DIGI_STOP_BITS_2;
else
arg = DIGI_STOP_BITS_1;
buf[i++] = DIGI_CMD_SET_STOP_BITS;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set input flow control */
if ((iflag&IXOFF) != (old_iflag&IXOFF)
|| (cflag&CRTSCTS) != (old_cflag&CRTSCTS)) {
arg = 0;
if (iflag&IXOFF)
arg |= DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_INPUT_FLOW_CONTROL_XON_XOFF;
if (cflag&CRTSCTS) {
arg |= DIGI_INPUT_FLOW_CONTROL_RTS;
/* On USB-4 it is necessary to assert RTS prior */
/* to selecting RTS input flow control. */
buf[i++] = DIGI_CMD_SET_RTS_SIGNAL;
buf[i++] = priv->dp_port_num;
buf[i++] = DIGI_RTS_ACTIVE;
buf[i++] = 0;
} else {
arg &= ~DIGI_INPUT_FLOW_CONTROL_RTS;
}
buf[i++] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set output flow control */
if ((iflag & IXON) != (old_iflag & IXON)
|| (cflag & CRTSCTS) != (old_cflag & CRTSCTS)) {
arg = 0;
if (iflag & IXON)
arg |= DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
else
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_XON_XOFF;
if (cflag & CRTSCTS) {
arg |= DIGI_OUTPUT_FLOW_CONTROL_CTS;
} else {
arg &= ~DIGI_OUTPUT_FLOW_CONTROL_CTS;
tty->hw_stopped = 0;
}
buf[i++] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
/* set receive enable/disable */
if ((cflag & CREAD) != (old_cflag & CREAD)) {
if (cflag & CREAD)
arg = DIGI_ENABLE;
else
arg = DIGI_DISABLE;
buf[i++] = DIGI_CMD_RECEIVE_ENABLE;
buf[i++] = priv->dp_port_num;
buf[i++] = arg;
buf[i++] = 0;
}
ret = digi_write_oob_command(port, buf, i, 1);
if (ret != 0)
dbg("digi_set_termios: write oob failed, ret=%d", ret);
tty_encode_baud_rate(tty, baud, baud);
}
static void digi_break_ctl(struct tty_struct *tty, int break_state)
{
struct usb_serial_port *port = tty->driver_data;
unsigned char buf[4];
buf[0] = DIGI_CMD_BREAK_CONTROL;
buf[1] = 2; /* length */
buf[2] = break_state ? 1 : 0;
buf[3] = 0; /* pad */
digi_write_inb_command(port, buf, 4, 0);
}
static int digi_tiocmget(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
dbg("%s: TOP: port=%d", __func__, priv->dp_port_num);
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = priv->dp_modem_signals;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return val;
}
static int digi_tiocmset(struct tty_struct *tty,
unsigned int set, unsigned int clear)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned int val;
unsigned long flags;
dbg("%s: TOP: port=%d", __func__, priv->dp_port_num);
spin_lock_irqsave(&priv->dp_port_lock, flags);
val = (priv->dp_modem_signals & ~clear) | set;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return digi_set_modem_signals(port, val, 1);
}
static int digi_write(struct tty_struct *tty, struct usb_serial_port *port,
const unsigned char *buf, int count)
{
int ret, data_len, new_len;
struct digi_port *priv = usb_get_serial_port_data(port);
unsigned char *data = port->write_urb->transfer_buffer;
unsigned long flags = 0;
dbg("digi_write: TOP: port=%d, count=%d, in_interrupt=%ld",
priv->dp_port_num, count, in_interrupt());
/* copy user data (which can sleep) before getting spin lock */
count = min(count, port->bulk_out_size-2);
count = min(64, count);
/* be sure only one write proceeds at a time */
/* there are races on the port private buffer */
spin_lock_irqsave(&priv->dp_port_lock, flags);
/* wait for urb status clear to submit another urb */
if (priv->dp_write_urb_in_use) {
/* buffer data if count is 1 (probably put_char) if possible */
if (count == 1 && priv->dp_out_buf_len < DIGI_OUT_BUF_SIZE) {
priv->dp_out_buf[priv->dp_out_buf_len++] = *buf;
new_len = 1;
} else {
new_len = 0;
}
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return new_len;
}
/* allow space for any buffered data and for new data, up to */
/* transfer buffer size - 2 (for command and length bytes) */
new_len = min(count, port->bulk_out_size-2-priv->dp_out_buf_len);
data_len = new_len + priv->dp_out_buf_len;
if (data_len == 0) {
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
return 0;
}
port->write_urb->transfer_buffer_length = data_len+2;
port->write_urb->dev = port->serial->dev;
*data++ = DIGI_CMD_SEND_DATA;
*data++ = data_len;
/* copy in buffered data first */
memcpy(data, priv->dp_out_buf, priv->dp_out_buf_len);
data += priv->dp_out_buf_len;
/* copy in new data */
memcpy(data, buf, new_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
ret = new_len;
priv->dp_out_buf_len = 0;
}
/* return length of new data written, or error */
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
if (ret < 0)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
dbg("digi_write: returning %d", ret);
return ret;
}
static void digi_write_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial;
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret = 0;
int status = urb->status;
dbg("digi_write_bulk_callback: TOP, status=%d", status);
/* port and serial sanity check */
if (port == NULL || (priv = usb_get_serial_port_data(port)) == NULL) {
pr_err("%s: port or port->private is NULL, status=%d\n",
__func__, status);
return;
}
serial = port->serial;
if (serial == NULL || (serial_priv = usb_get_serial_data(serial)) == NULL) {
dev_err(&port->dev,
"%s: serial or serial->private is NULL, status=%d\n",
__func__, status);
return;
}
/* handle oob callback */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
dbg("digi_write_bulk_callback: oob callback");
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&port->write_wait);
spin_unlock(&priv->dp_port_lock);
return;
}
/* try to send any buffered data on this port */
spin_lock(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
if (priv->dp_out_buf_len > 0) {
*((unsigned char *)(port->write_urb->transfer_buffer))
= (unsigned char)DIGI_CMD_SEND_DATA;
*((unsigned char *)(port->write_urb->transfer_buffer) + 1)
= (unsigned char)priv->dp_out_buf_len;
port->write_urb->transfer_buffer_length =
priv->dp_out_buf_len + 2;
port->write_urb->dev = serial->dev;
memcpy(port->write_urb->transfer_buffer + 2, priv->dp_out_buf,
priv->dp_out_buf_len);
ret = usb_submit_urb(port->write_urb, GFP_ATOMIC);
if (ret == 0) {
priv->dp_write_urb_in_use = 1;
priv->dp_out_buf_len = 0;
}
}
/* wake up processes sleeping on writes immediately */
digi_wakeup_write(port);
/* also queue up a wakeup at scheduler time, in case we */
/* lost the race in write_chan(). */
schedule_work(&priv->dp_wakeup_work);
spin_unlock(&priv->dp_port_lock);
if (ret && ret != -EPERM)
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
static int digi_write_room(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
int room;
unsigned long flags = 0;
spin_lock_irqsave(&priv->dp_port_lock, flags);
if (priv->dp_write_urb_in_use)
room = 0;
else
room = port->bulk_out_size - 2 - priv->dp_out_buf_len;
spin_unlock_irqrestore(&priv->dp_port_lock, flags);
dbg("digi_write_room: port=%d, room=%d", priv->dp_port_num, room);
return room;
}
static int digi_chars_in_buffer(struct tty_struct *tty)
{
struct usb_serial_port *port = tty->driver_data;
struct digi_port *priv = usb_get_serial_port_data(port);
if (priv->dp_write_urb_in_use) {
dbg("digi_chars_in_buffer: port=%d, chars=%d",
priv->dp_port_num, port->bulk_out_size - 2);
/* return(port->bulk_out_size - 2); */
return 256;
} else {
dbg("digi_chars_in_buffer: port=%d, chars=%d",
priv->dp_port_num, priv->dp_out_buf_len);
return priv->dp_out_buf_len;
}
}
static void digi_dtr_rts(struct usb_serial_port *port, int on)
{
/* Adjust DTR and RTS */
digi_set_modem_signals(port, on * (TIOCM_DTR|TIOCM_RTS), 1);
}
static int digi_open(struct tty_struct *tty, struct usb_serial_port *port)
{
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
struct ktermios not_termios;
dbg("digi_open: TOP: port=%d", priv->dp_port_num);
/* be sure the device is started up */
if (digi_startup_device(port->serial) != 0)
return -ENXIO;
/* read modem signals automatically whenever they change */
buf[0] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_ENABLE;
buf[3] = 0;
/* flush fifos */
buf[4] = DIGI_CMD_IFLUSH_FIFO;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[7] = 0;
ret = digi_write_oob_command(port, buf, 8, 1);
if (ret != 0)
dbg("digi_open: write oob failed, ret=%d", ret);
/* set termios settings */
if (tty) {
not_termios.c_cflag = ~tty->termios->c_cflag;
not_termios.c_iflag = ~tty->termios->c_iflag;
digi_set_termios(tty, port, ¬_termios);
}
return 0;
}
static void digi_close(struct usb_serial_port *port)
{
DEFINE_WAIT(wait);
int ret;
unsigned char buf[32];
struct digi_port *priv = usb_get_serial_port_data(port);
dbg("digi_close: TOP: port=%d", priv->dp_port_num);
mutex_lock(&port->serial->disc_mutex);
/* if disconnected, just clear flags */
if (port->serial->disconnected)
goto exit;
if (port->serial->dev) {
/* FIXME: Transmit idle belongs in the wait_unti_sent path */
digi_transmit_idle(port, DIGI_CLOSE_TIMEOUT);
/* disable input flow control */
buf[0] = DIGI_CMD_SET_INPUT_FLOW_CONTROL;
buf[1] = priv->dp_port_num;
buf[2] = DIGI_DISABLE;
buf[3] = 0;
/* disable output flow control */
buf[4] = DIGI_CMD_SET_OUTPUT_FLOW_CONTROL;
buf[5] = priv->dp_port_num;
buf[6] = DIGI_DISABLE;
buf[7] = 0;
/* disable reading modem signals automatically */
buf[8] = DIGI_CMD_READ_INPUT_SIGNALS;
buf[9] = priv->dp_port_num;
buf[10] = DIGI_DISABLE;
buf[11] = 0;
/* disable receive */
buf[12] = DIGI_CMD_RECEIVE_ENABLE;
buf[13] = priv->dp_port_num;
buf[14] = DIGI_DISABLE;
buf[15] = 0;
/* flush fifos */
buf[16] = DIGI_CMD_IFLUSH_FIFO;
buf[17] = priv->dp_port_num;
buf[18] = DIGI_FLUSH_TX | DIGI_FLUSH_RX;
buf[19] = 0;
ret = digi_write_oob_command(port, buf, 20, 0);
if (ret != 0)
dbg("digi_close: write oob failed, ret=%d", ret);
/* wait for final commands on oob port to complete */
prepare_to_wait(&priv->dp_flush_wait, &wait,
TASK_INTERRUPTIBLE);
schedule_timeout(DIGI_CLOSE_TIMEOUT);
finish_wait(&priv->dp_flush_wait, &wait);
/* shutdown any outstanding bulk writes */
usb_kill_urb(port->write_urb);
}
exit:
spin_lock_irq(&priv->dp_port_lock);
priv->dp_write_urb_in_use = 0;
wake_up_interruptible(&priv->dp_close_wait);
spin_unlock_irq(&priv->dp_port_lock);
mutex_unlock(&port->serial->disc_mutex);
dbg("digi_close: done");
}
/*
* Digi Startup Device
*
* Starts reads on all ports. Must be called AFTER startup, with
* urbs initialized. Returns 0 if successful, non-zero error otherwise.
*/
static int digi_startup_device(struct usb_serial *serial)
{
int i, ret = 0;
struct digi_serial *serial_priv = usb_get_serial_data(serial);
struct usb_serial_port *port;
/* be sure this happens exactly once */
spin_lock(&serial_priv->ds_serial_lock);
if (serial_priv->ds_device_started) {
spin_unlock(&serial_priv->ds_serial_lock);
return 0;
}
serial_priv->ds_device_started = 1;
spin_unlock(&serial_priv->ds_serial_lock);
/* start reading from each bulk in endpoint for the device */
/* set USB_DISABLE_SPD flag for write bulk urbs */
for (i = 0; i < serial->type->num_ports + 1; i++) {
port = serial->port[i];
port->write_urb->dev = port->serial->dev;
ret = usb_submit_urb(port->read_urb, GFP_KERNEL);
if (ret != 0) {
dev_err(&port->dev,
"%s: usb_submit_urb failed, ret=%d, port=%d\n",
__func__, ret, i);
break;
}
}
return ret;
}
static int digi_startup(struct usb_serial *serial)
{
int i;
struct digi_port *priv;
struct digi_serial *serial_priv;
dbg("digi_startup: TOP");
/* allocate the private data structures for all ports */
/* number of regular ports + 1 for the out-of-band port */
for (i = 0; i < serial->type->num_ports + 1; i++) {
/* allocate port private structure */
priv = kmalloc(sizeof(struct digi_port), GFP_KERNEL);
if (priv == NULL) {
while (--i >= 0)
kfree(usb_get_serial_port_data(serial->port[i]));
return 1; /* error */
}
/* initialize port private structure */
spin_lock_init(&priv->dp_port_lock);
priv->dp_port_num = i;
priv->dp_out_buf_len = 0;
priv->dp_write_urb_in_use = 0;
priv->dp_modem_signals = 0;
init_waitqueue_head(&priv->dp_modem_change_wait);
priv->dp_transmit_idle = 0;
init_waitqueue_head(&priv->dp_transmit_idle_wait);
priv->dp_throttled = 0;
priv->dp_throttle_restart = 0;
init_waitqueue_head(&priv->dp_flush_wait);
init_waitqueue_head(&priv->dp_close_wait);
INIT_WORK(&priv->dp_wakeup_work, digi_wakeup_write_lock);
priv->dp_port = serial->port[i];
/* initialize write wait queue for this port */
init_waitqueue_head(&serial->port[i]->write_wait);
usb_set_serial_port_data(serial->port[i], priv);
}
/* allocate serial private structure */
serial_priv = kmalloc(sizeof(struct digi_serial), GFP_KERNEL);
if (serial_priv == NULL) {
for (i = 0; i < serial->type->num_ports + 1; i++)
kfree(usb_get_serial_port_data(serial->port[i]));
return 1; /* error */
}
/* initialize serial private structure */
spin_lock_init(&serial_priv->ds_serial_lock);
serial_priv->ds_oob_port_num = serial->type->num_ports;
serial_priv->ds_oob_port = serial->port[serial_priv->ds_oob_port_num];
serial_priv->ds_device_started = 0;
usb_set_serial_data(serial, serial_priv);
return 0;
}
static void digi_disconnect(struct usb_serial *serial)
{
int i;
dbg("digi_disconnect: TOP, in_interrupt()=%ld", in_interrupt());
/* stop reads and writes on all ports */
for (i = 0; i < serial->type->num_ports + 1; i++) {
usb_kill_urb(serial->port[i]->read_urb);
usb_kill_urb(serial->port[i]->write_urb);
}
}
static void digi_release(struct usb_serial *serial)
{
int i;
dbg("digi_release: TOP, in_interrupt()=%ld", in_interrupt());
/* free the private data structures for all ports */
/* number of regular ports + 1 for the out-of-band port */
for (i = 0; i < serial->type->num_ports + 1; i++)
kfree(usb_get_serial_port_data(serial->port[i]));
kfree(usb_get_serial_data(serial));
}
static void digi_read_bulk_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct digi_port *priv;
struct digi_serial *serial_priv;
int ret;
int status = urb->status;
dbg("digi_read_bulk_callback: TOP");
/* port sanity check, do not resubmit if port is not valid */
if (port == NULL)
return;
priv = usb_get_serial_port_data(port);
if (priv == NULL) {
dev_err(&port->dev, "%s: port->private is NULL, status=%d\n",
__func__, status);
return;
}
if (port->serial == NULL ||
(serial_priv = usb_get_serial_data(port->serial)) == NULL) {
dev_err(&port->dev, "%s: serial is bad or serial->private "
"is NULL, status=%d\n", __func__, status);
return;
}
/* do not resubmit urb if it has any status error */
if (status) {
dev_err(&port->dev,
"%s: nonzero read bulk status: status=%d, port=%d\n",
__func__, status, priv->dp_port_num);
return;
}
/* handle oob or inb callback, do not resubmit if error */
if (priv->dp_port_num == serial_priv->ds_oob_port_num) {
if (digi_read_oob_callback(urb) != 0)
return;
} else {
if (digi_read_inb_callback(urb) != 0)
return;
}
/* continue read */
urb->dev = port->serial->dev;
ret = usb_submit_urb(urb, GFP_ATOMIC);
if (ret != 0 && ret != -EPERM) {
dev_err(&port->dev,
"%s: failed resubmitting urb, ret=%d, port=%d\n",
__func__, ret, priv->dp_port_num);
}
}
/*
* Digi Read INB Callback
*
* Digi Read INB Callback handles reads on the in band ports, sending
* the data on to the tty subsystem. When called we know port and
* port->private are not NULL and port->serial has been validated.
* It returns 0 if successful, 1 if successful but the port is
* throttled, and -1 if the sanity checks failed.
*/
static int digi_read_inb_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode = ((unsigned char *)urb->transfer_buffer)[0];
int len = ((unsigned char *)urb->transfer_buffer)[1];
int port_status = ((unsigned char *)urb->transfer_buffer)[2];
unsigned char *data = ((unsigned char *)urb->transfer_buffer) + 3;
int flag, throttled;
int status = urb->status;
/* do not process callbacks on closed ports */
/* but do continue the read chain */
if (urb->status == -ENOENT)
return 0;
/* short/multiple packet check */
if (urb->actual_length != len + 2) {
dev_err(&port->dev, "%s: INCOMPLETE OR MULTIPLE PACKET, "
"status=%d, port=%d, opcode=%d, len=%d, "
"actual_length=%d, status=%d\n", __func__, status,
priv->dp_port_num, opcode, len, urb->actual_length,
port_status);
return -1;
}
tty = tty_port_tty_get(&port->port);
spin_lock(&priv->dp_port_lock);
/* check for throttle; if set, do not resubmit read urb */
/* indicate the read chain needs to be restarted on unthrottle */
throttled = priv->dp_throttled;
if (throttled)
priv->dp_throttle_restart = 1;
/* receive data */
if (tty && opcode == DIGI_CMD_RECEIVE_DATA) {
/* get flag from port_status */
flag = 0;
/* overrun is special, not associated with a char */
if (port_status & DIGI_OVERRUN_ERROR)
tty_insert_flip_char(tty, 0, TTY_OVERRUN);
/* break takes precedence over parity, */
/* which takes precedence over framing errors */
if (port_status & DIGI_BREAK_ERROR)
flag = TTY_BREAK;
else if (port_status & DIGI_PARITY_ERROR)
flag = TTY_PARITY;
else if (port_status & DIGI_FRAMING_ERROR)
flag = TTY_FRAME;
/* data length is len-1 (one byte of len is port_status) */
--len;
if (len > 0) {
tty_insert_flip_string_fixed_flag(tty, data, flag,
len);
tty_flip_buffer_push(tty);
}
}
spin_unlock(&priv->dp_port_lock);
tty_kref_put(tty);
if (opcode == DIGI_CMD_RECEIVE_DISABLE)
dbg("%s: got RECEIVE_DISABLE", __func__);
else if (opcode != DIGI_CMD_RECEIVE_DATA)
dbg("%s: unknown opcode: %d", __func__, opcode);
return throttled ? 1 : 0;
}
/*
* Digi Read OOB Callback
*
* Digi Read OOB Callback handles reads on the out of band port.
* When called we know port and port->private are not NULL and
* the port->serial is valid. It returns 0 if successful, and
* -1 if the sanity checks failed.
*/
static int digi_read_oob_callback(struct urb *urb)
{
struct usb_serial_port *port = urb->context;
struct usb_serial *serial = port->serial;
struct tty_struct *tty;
struct digi_port *priv = usb_get_serial_port_data(port);
int opcode, line, status, val;
int i;
unsigned int rts;
dbg("digi_read_oob_callback: port=%d, len=%d",
priv->dp_port_num, urb->actual_length);
/* handle each oob command */
for (i = 0; i < urb->actual_length - 3;) {
opcode = ((unsigned char *)urb->transfer_buffer)[i++];
line = ((unsigned char *)urb->transfer_buffer)[i++];
status = ((unsigned char *)urb->transfer_buffer)[i++];
val = ((unsigned char *)urb->transfer_buffer)[i++];
dbg("digi_read_oob_callback: opcode=%d, line=%d, status=%d, val=%d",
opcode, line, status, val);
if (status != 0 || line >= serial->type->num_ports)
continue;
port = serial->port[line];
priv = usb_get_serial_port_data(port);
if (priv == NULL)
return -1;
tty = tty_port_tty_get(&port->port);
rts = 0;
if (tty)
rts = tty->termios->c_cflag & CRTSCTS;
if (tty && opcode == DIGI_CMD_READ_INPUT_SIGNALS) {
spin_lock(&priv->dp_port_lock);
/* convert from digi flags to termiox flags */
if (val & DIGI_READ_INPUT_SIGNALS_CTS) {
priv->dp_modem_signals |= TIOCM_CTS;
/* port must be open to use tty struct */
if (rts) {
tty->hw_stopped = 0;
digi_wakeup_write(port);
}
} else {
priv->dp_modem_signals &= ~TIOCM_CTS;
/* port must be open to use tty struct */
if (rts)
tty->hw_stopped = 1;
}
if (val & DIGI_READ_INPUT_SIGNALS_DSR)
priv->dp_modem_signals |= TIOCM_DSR;
else
priv->dp_modem_signals &= ~TIOCM_DSR;
if (val & DIGI_READ_INPUT_SIGNALS_RI)
priv->dp_modem_signals |= TIOCM_RI;
else
priv->dp_modem_signals &= ~TIOCM_RI;
if (val & DIGI_READ_INPUT_SIGNALS_DCD)
priv->dp_modem_signals |= TIOCM_CD;
else
priv->dp_modem_signals &= ~TIOCM_CD;
wake_up_interruptible(&priv->dp_modem_change_wait);
spin_unlock(&priv->dp_port_lock);
} else if (opcode == DIGI_CMD_TRANSMIT_IDLE) {
spin_lock(&priv->dp_port_lock);
priv->dp_transmit_idle = 1;
wake_up_interruptible(&priv->dp_transmit_idle_wait);
spin_unlock(&priv->dp_port_lock);
} else if (opcode == DIGI_CMD_IFLUSH_FIFO) {
wake_up_interruptible(&priv->dp_flush_wait);
}
tty_kref_put(tty);
}
return 0;
}
static int __init digi_init(void)
{
int retval;
retval = usb_serial_register(&digi_acceleport_2_device);
if (retval)
goto failed_acceleport_2_device;
retval = usb_serial_register(&digi_acceleport_4_device);
if (retval)
goto failed_acceleport_4_device;
retval = usb_register(&digi_driver);
if (retval)
goto failed_usb_register;
printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
DRIVER_DESC "\n");
return 0;
failed_usb_register:
usb_serial_deregister(&digi_acceleport_4_device);
failed_acceleport_4_device:
usb_serial_deregister(&digi_acceleport_2_device);
failed_acceleport_2_device:
return retval;
}
static void __exit digi_exit (void)
{
usb_deregister(&digi_driver);
usb_serial_deregister(&digi_acceleport_2_device);
usb_serial_deregister(&digi_acceleport_4_device);
}
module_init(digi_init);
module_exit(digi_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(debug, bool, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug enabled or not");
| gpl-2.0 |
lollipop-og/bricked-geehrc | drivers/infiniband/core/cm.c | 4700 | 110021 | /*
* Copyright (c) 2004-2007 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004, 2005 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/completion.h>
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/idr.h>
#include <linux/interrupt.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/workqueue.h>
#include <linux/kdev_t.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_cm.h>
#include "cm_msgs.h"
MODULE_AUTHOR("Sean Hefty");
MODULE_DESCRIPTION("InfiniBand CM");
MODULE_LICENSE("Dual BSD/GPL");
static void cm_add_one(struct ib_device *device);
static void cm_remove_one(struct ib_device *device);
static struct ib_client cm_client = {
.name = "cm",
.add = cm_add_one,
.remove = cm_remove_one
};
static struct ib_cm {
spinlock_t lock;
struct list_head device_list;
rwlock_t device_lock;
struct rb_root listen_service_table;
u64 listen_service_id;
/* struct rb_root peer_service_table; todo: fix peer to peer */
struct rb_root remote_qp_table;
struct rb_root remote_id_table;
struct rb_root remote_sidr_table;
struct idr local_id_table;
__be32 random_id_operand;
struct list_head timewait_list;
struct workqueue_struct *wq;
} cm;
/* Counter indexes ordered by attribute ID */
enum {
CM_REQ_COUNTER,
CM_MRA_COUNTER,
CM_REJ_COUNTER,
CM_REP_COUNTER,
CM_RTU_COUNTER,
CM_DREQ_COUNTER,
CM_DREP_COUNTER,
CM_SIDR_REQ_COUNTER,
CM_SIDR_REP_COUNTER,
CM_LAP_COUNTER,
CM_APR_COUNTER,
CM_ATTR_COUNT,
CM_ATTR_ID_OFFSET = 0x0010,
};
enum {
CM_XMIT,
CM_XMIT_RETRIES,
CM_RECV,
CM_RECV_DUPLICATES,
CM_COUNTER_GROUPS
};
static char const counter_group_names[CM_COUNTER_GROUPS]
[sizeof("cm_rx_duplicates")] = {
"cm_tx_msgs", "cm_tx_retries",
"cm_rx_msgs", "cm_rx_duplicates"
};
struct cm_counter_group {
struct kobject obj;
atomic_long_t counter[CM_ATTR_COUNT];
};
struct cm_counter_attribute {
struct attribute attr;
int index;
};
#define CM_COUNTER_ATTR(_name, _index) \
struct cm_counter_attribute cm_##_name##_counter_attr = { \
.attr = { .name = __stringify(_name), .mode = 0444 }, \
.index = _index \
}
static CM_COUNTER_ATTR(req, CM_REQ_COUNTER);
static CM_COUNTER_ATTR(mra, CM_MRA_COUNTER);
static CM_COUNTER_ATTR(rej, CM_REJ_COUNTER);
static CM_COUNTER_ATTR(rep, CM_REP_COUNTER);
static CM_COUNTER_ATTR(rtu, CM_RTU_COUNTER);
static CM_COUNTER_ATTR(dreq, CM_DREQ_COUNTER);
static CM_COUNTER_ATTR(drep, CM_DREP_COUNTER);
static CM_COUNTER_ATTR(sidr_req, CM_SIDR_REQ_COUNTER);
static CM_COUNTER_ATTR(sidr_rep, CM_SIDR_REP_COUNTER);
static CM_COUNTER_ATTR(lap, CM_LAP_COUNTER);
static CM_COUNTER_ATTR(apr, CM_APR_COUNTER);
static struct attribute *cm_counter_default_attrs[] = {
&cm_req_counter_attr.attr,
&cm_mra_counter_attr.attr,
&cm_rej_counter_attr.attr,
&cm_rep_counter_attr.attr,
&cm_rtu_counter_attr.attr,
&cm_dreq_counter_attr.attr,
&cm_drep_counter_attr.attr,
&cm_sidr_req_counter_attr.attr,
&cm_sidr_rep_counter_attr.attr,
&cm_lap_counter_attr.attr,
&cm_apr_counter_attr.attr,
NULL
};
struct cm_port {
struct cm_device *cm_dev;
struct ib_mad_agent *mad_agent;
struct kobject port_obj;
u8 port_num;
struct cm_counter_group counter_group[CM_COUNTER_GROUPS];
};
struct cm_device {
struct list_head list;
struct ib_device *ib_device;
struct device *device;
u8 ack_delay;
struct cm_port *port[0];
};
struct cm_av {
struct cm_port *port;
union ib_gid dgid;
struct ib_ah_attr ah_attr;
u16 pkey_index;
u8 timeout;
};
struct cm_work {
struct delayed_work work;
struct list_head list;
struct cm_port *port;
struct ib_mad_recv_wc *mad_recv_wc; /* Received MADs */
__be32 local_id; /* Established / timewait */
__be32 remote_id;
struct ib_cm_event cm_event;
struct ib_sa_path_rec path[0];
};
struct cm_timewait_info {
struct cm_work work; /* Must be first. */
struct list_head list;
struct rb_node remote_qp_node;
struct rb_node remote_id_node;
__be64 remote_ca_guid;
__be32 remote_qpn;
u8 inserted_remote_qp;
u8 inserted_remote_id;
};
struct cm_id_private {
struct ib_cm_id id;
struct rb_node service_node;
struct rb_node sidr_id_node;
spinlock_t lock; /* Do not acquire inside cm.lock */
struct completion comp;
atomic_t refcount;
struct ib_mad_send_buf *msg;
struct cm_timewait_info *timewait_info;
/* todo: use alternate port on send failure */
struct cm_av av;
struct cm_av alt_av;
struct ib_cm_compare_data *compare_data;
void *private_data;
__be64 tid;
__be32 local_qpn;
__be32 remote_qpn;
enum ib_qp_type qp_type;
__be32 sq_psn;
__be32 rq_psn;
int timeout_ms;
enum ib_mtu path_mtu;
__be16 pkey;
u8 private_data_len;
u8 max_cm_retries;
u8 peer_to_peer;
u8 responder_resources;
u8 initiator_depth;
u8 retry_count;
u8 rnr_retry_count;
u8 service_timeout;
u8 target_ack_delay;
struct list_head work_list;
atomic_t work_count;
};
static void cm_work_handler(struct work_struct *work);
static inline void cm_deref_id(struct cm_id_private *cm_id_priv)
{
if (atomic_dec_and_test(&cm_id_priv->refcount))
complete(&cm_id_priv->comp);
}
static int cm_alloc_msg(struct cm_id_private *cm_id_priv,
struct ib_mad_send_buf **msg)
{
struct ib_mad_agent *mad_agent;
struct ib_mad_send_buf *m;
struct ib_ah *ah;
mad_agent = cm_id_priv->av.port->mad_agent;
ah = ib_create_ah(mad_agent->qp->pd, &cm_id_priv->av.ah_attr);
if (IS_ERR(ah))
return PTR_ERR(ah);
m = ib_create_send_mad(mad_agent, cm_id_priv->id.remote_cm_qpn,
cm_id_priv->av.pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
/* Timeout set by caller if response is expected. */
m->ah = ah;
m->retries = cm_id_priv->max_cm_retries;
atomic_inc(&cm_id_priv->refcount);
m->context[0] = cm_id_priv;
*msg = m;
return 0;
}
static int cm_alloc_response_msg(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
struct ib_mad_send_buf **msg)
{
struct ib_mad_send_buf *m;
struct ib_ah *ah;
ah = ib_create_ah_from_wc(port->mad_agent->qp->pd, mad_recv_wc->wc,
mad_recv_wc->recv_buf.grh, port->port_num);
if (IS_ERR(ah))
return PTR_ERR(ah);
m = ib_create_send_mad(port->mad_agent, 1, mad_recv_wc->wc->pkey_index,
0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA,
GFP_ATOMIC);
if (IS_ERR(m)) {
ib_destroy_ah(ah);
return PTR_ERR(m);
}
m->ah = ah;
*msg = m;
return 0;
}
static void cm_free_msg(struct ib_mad_send_buf *msg)
{
ib_destroy_ah(msg->ah);
if (msg->context[0])
cm_deref_id(msg->context[0]);
ib_free_send_mad(msg);
}
static void * cm_copy_private_data(const void *private_data,
u8 private_data_len)
{
void *data;
if (!private_data || !private_data_len)
return NULL;
data = kmemdup(private_data, private_data_len, GFP_KERNEL);
if (!data)
return ERR_PTR(-ENOMEM);
return data;
}
static void cm_set_private_data(struct cm_id_private *cm_id_priv,
void *private_data, u8 private_data_len)
{
if (cm_id_priv->private_data && cm_id_priv->private_data_len)
kfree(cm_id_priv->private_data);
cm_id_priv->private_data = private_data;
cm_id_priv->private_data_len = private_data_len;
}
static void cm_init_av_for_response(struct cm_port *port, struct ib_wc *wc,
struct ib_grh *grh, struct cm_av *av)
{
av->port = port;
av->pkey_index = wc->pkey_index;
ib_init_ah_from_wc(port->cm_dev->ib_device, port->port_num, wc,
grh, &av->ah_attr);
}
static int cm_init_av_by_path(struct ib_sa_path_rec *path, struct cm_av *av)
{
struct cm_device *cm_dev;
struct cm_port *port = NULL;
unsigned long flags;
int ret;
u8 p;
read_lock_irqsave(&cm.device_lock, flags);
list_for_each_entry(cm_dev, &cm.device_list, list) {
if (!ib_find_cached_gid(cm_dev->ib_device, &path->sgid,
&p, NULL)) {
port = cm_dev->port[p-1];
break;
}
}
read_unlock_irqrestore(&cm.device_lock, flags);
if (!port)
return -EINVAL;
ret = ib_find_cached_pkey(cm_dev->ib_device, port->port_num,
be16_to_cpu(path->pkey), &av->pkey_index);
if (ret)
return ret;
av->port = port;
ib_init_ah_from_path(cm_dev->ib_device, port->port_num, path,
&av->ah_attr);
av->timeout = path->packet_life_time + 1;
return 0;
}
static int cm_alloc_id(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
int ret, id;
static int next_id;
do {
spin_lock_irqsave(&cm.lock, flags);
ret = idr_get_new_above(&cm.local_id_table, cm_id_priv,
next_id, &id);
if (!ret)
next_id = ((unsigned) id + 1) & MAX_ID_MASK;
spin_unlock_irqrestore(&cm.lock, flags);
} while( (ret == -EAGAIN) && idr_pre_get(&cm.local_id_table, GFP_KERNEL) );
cm_id_priv->id.local_id = (__force __be32)id ^ cm.random_id_operand;
return ret;
}
static void cm_free_id(__be32 local_id)
{
spin_lock_irq(&cm.lock);
idr_remove(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
spin_unlock_irq(&cm.lock);
}
static struct cm_id_private * cm_get_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
cm_id_priv = idr_find(&cm.local_id_table,
(__force int) (local_id ^ cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
return cm_id_priv;
}
static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
{
struct cm_id_private *cm_id_priv;
spin_lock_irq(&cm.lock);
cm_id_priv = cm_get_id(local_id, remote_id);
spin_unlock_irq(&cm.lock);
return cm_id_priv;
}
static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
{
int i;
for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
((unsigned long *) mask)[i];
}
static int cm_compare_data(struct ib_cm_compare_data *src_data,
struct ib_cm_compare_data *dst_data)
{
u8 src[IB_CM_COMPARE_SIZE];
u8 dst[IB_CM_COMPARE_SIZE];
if (!src_data || !dst_data)
return 0;
cm_mask_copy(src, src_data->data, dst_data->mask);
cm_mask_copy(dst, dst_data->data, src_data->mask);
return memcmp(src, dst, IB_CM_COMPARE_SIZE);
}
static int cm_compare_private_data(u8 *private_data,
struct ib_cm_compare_data *dst_data)
{
u8 src[IB_CM_COMPARE_SIZE];
if (!dst_data)
return 0;
cm_mask_copy(src, private_data, dst_data->mask);
return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
}
/*
* Trivial helpers to strip endian annotation and compare; the
* endianness doesn't actually matter since we just need a stable
* order for the RB tree.
*/
static int be32_lt(__be32 a, __be32 b)
{
return (__force u32) a < (__force u32) b;
}
static int be32_gt(__be32 a, __be32 b)
{
return (__force u32) a > (__force u32) b;
}
static int be64_lt(__be64 a, __be64 b)
{
return (__force u64) a < (__force u64) b;
}
static int be64_gt(__be64 a, __be64 b)
{
return (__force u64) a > (__force u64) b;
}
static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
{
struct rb_node **link = &cm.listen_service_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
__be64 service_id = cm_id_priv->id.service_id;
__be64 service_mask = cm_id_priv->id.service_mask;
int data_cmp;
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
service_node);
data_cmp = cm_compare_data(cm_id_priv->compare_data,
cur_cm_id_priv->compare_data);
if ((cur_cm_id_priv->id.service_mask & service_id) ==
(service_mask & cur_cm_id_priv->id.service_id) &&
(cm_id_priv->id.device == cur_cm_id_priv->id.device) &&
!data_cmp)
return cur_cm_id_priv;
if (cm_id_priv->id.device < cur_cm_id_priv->id.device)
link = &(*link)->rb_left;
else if (cm_id_priv->id.device > cur_cm_id_priv->id.device)
link = &(*link)->rb_right;
else if (be64_lt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_left;
else if (be64_gt(service_id, cur_cm_id_priv->id.service_id))
link = &(*link)->rb_right;
else if (data_cmp < 0)
link = &(*link)->rb_left;
else
link = &(*link)->rb_right;
}
rb_link_node(&cm_id_priv->service_node, parent, link);
rb_insert_color(&cm_id_priv->service_node, &cm.listen_service_table);
return NULL;
}
static struct cm_id_private * cm_find_listen(struct ib_device *device,
__be64 service_id,
u8 *private_data)
{
struct rb_node *node = cm.listen_service_table.rb_node;
struct cm_id_private *cm_id_priv;
int data_cmp;
while (node) {
cm_id_priv = rb_entry(node, struct cm_id_private, service_node);
data_cmp = cm_compare_private_data(private_data,
cm_id_priv->compare_data);
if ((cm_id_priv->id.service_mask & service_id) ==
cm_id_priv->id.service_id &&
(cm_id_priv->id.device == device) && !data_cmp)
return cm_id_priv;
if (device < cm_id_priv->id.device)
node = node->rb_left;
else if (device > cm_id_priv->id.device)
node = node->rb_right;
else if (be64_lt(service_id, cm_id_priv->id.service_id))
node = node->rb_left;
else if (be64_gt(service_id, cm_id_priv->id.service_id))
node = node->rb_right;
else if (data_cmp < 0)
node = node->rb_left;
else
node = node->rb_right;
}
return NULL;
}
static struct cm_timewait_info * cm_insert_remote_id(struct cm_timewait_info
*timewait_info)
{
struct rb_node **link = &cm.remote_id_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
__be32 remote_id = timewait_info->work.remote_id;
while (*link) {
parent = *link;
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
remote_id_node);
if (be32_lt(remote_id, cur_timewait_info->work.remote_id))
link = &(*link)->rb_left;
else if (be32_gt(remote_id, cur_timewait_info->work.remote_id))
link = &(*link)->rb_right;
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_left;
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_right;
else
return cur_timewait_info;
}
timewait_info->inserted_remote_id = 1;
rb_link_node(&timewait_info->remote_id_node, parent, link);
rb_insert_color(&timewait_info->remote_id_node, &cm.remote_id_table);
return NULL;
}
static struct cm_timewait_info * cm_find_remote_id(__be64 remote_ca_guid,
__be32 remote_id)
{
struct rb_node *node = cm.remote_id_table.rb_node;
struct cm_timewait_info *timewait_info;
while (node) {
timewait_info = rb_entry(node, struct cm_timewait_info,
remote_id_node);
if (be32_lt(remote_id, timewait_info->work.remote_id))
node = node->rb_left;
else if (be32_gt(remote_id, timewait_info->work.remote_id))
node = node->rb_right;
else if (be64_lt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_left;
else if (be64_gt(remote_ca_guid, timewait_info->remote_ca_guid))
node = node->rb_right;
else
return timewait_info;
}
return NULL;
}
static struct cm_timewait_info * cm_insert_remote_qpn(struct cm_timewait_info
*timewait_info)
{
struct rb_node **link = &cm.remote_qp_table.rb_node;
struct rb_node *parent = NULL;
struct cm_timewait_info *cur_timewait_info;
__be64 remote_ca_guid = timewait_info->remote_ca_guid;
__be32 remote_qpn = timewait_info->remote_qpn;
while (*link) {
parent = *link;
cur_timewait_info = rb_entry(parent, struct cm_timewait_info,
remote_qp_node);
if (be32_lt(remote_qpn, cur_timewait_info->remote_qpn))
link = &(*link)->rb_left;
else if (be32_gt(remote_qpn, cur_timewait_info->remote_qpn))
link = &(*link)->rb_right;
else if (be64_lt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_left;
else if (be64_gt(remote_ca_guid, cur_timewait_info->remote_ca_guid))
link = &(*link)->rb_right;
else
return cur_timewait_info;
}
timewait_info->inserted_remote_qp = 1;
rb_link_node(&timewait_info->remote_qp_node, parent, link);
rb_insert_color(&timewait_info->remote_qp_node, &cm.remote_qp_table);
return NULL;
}
static struct cm_id_private * cm_insert_remote_sidr(struct cm_id_private
*cm_id_priv)
{
struct rb_node **link = &cm.remote_sidr_table.rb_node;
struct rb_node *parent = NULL;
struct cm_id_private *cur_cm_id_priv;
union ib_gid *port_gid = &cm_id_priv->av.dgid;
__be32 remote_id = cm_id_priv->id.remote_id;
while (*link) {
parent = *link;
cur_cm_id_priv = rb_entry(parent, struct cm_id_private,
sidr_id_node);
if (be32_lt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_left;
else if (be32_gt(remote_id, cur_cm_id_priv->id.remote_id))
link = &(*link)->rb_right;
else {
int cmp;
cmp = memcmp(port_gid, &cur_cm_id_priv->av.dgid,
sizeof *port_gid);
if (cmp < 0)
link = &(*link)->rb_left;
else if (cmp > 0)
link = &(*link)->rb_right;
else
return cur_cm_id_priv;
}
}
rb_link_node(&cm_id_priv->sidr_id_node, parent, link);
rb_insert_color(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
return NULL;
}
static void cm_reject_sidr_req(struct cm_id_private *cm_id_priv,
enum ib_cm_sidr_status status)
{
struct ib_cm_sidr_rep_param param;
memset(¶m, 0, sizeof param);
param.status = status;
ib_send_cm_sidr_rep(&cm_id_priv->id, ¶m);
}
struct ib_cm_id *ib_create_cm_id(struct ib_device *device,
ib_cm_handler cm_handler,
void *context)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = kzalloc(sizeof *cm_id_priv, GFP_KERNEL);
if (!cm_id_priv)
return ERR_PTR(-ENOMEM);
cm_id_priv->id.state = IB_CM_IDLE;
cm_id_priv->id.device = device;
cm_id_priv->id.cm_handler = cm_handler;
cm_id_priv->id.context = context;
cm_id_priv->id.remote_cm_qpn = 1;
ret = cm_alloc_id(cm_id_priv);
if (ret)
goto error;
spin_lock_init(&cm_id_priv->lock);
init_completion(&cm_id_priv->comp);
INIT_LIST_HEAD(&cm_id_priv->work_list);
atomic_set(&cm_id_priv->work_count, -1);
atomic_set(&cm_id_priv->refcount, 1);
return &cm_id_priv->id;
error:
kfree(cm_id_priv);
return ERR_PTR(-ENOMEM);
}
EXPORT_SYMBOL(ib_create_cm_id);
static struct cm_work * cm_dequeue_work(struct cm_id_private *cm_id_priv)
{
struct cm_work *work;
if (list_empty(&cm_id_priv->work_list))
return NULL;
work = list_entry(cm_id_priv->work_list.next, struct cm_work, list);
list_del(&work->list);
return work;
}
static void cm_free_work(struct cm_work *work)
{
if (work->mad_recv_wc)
ib_free_recv_mad(work->mad_recv_wc);
kfree(work);
}
static inline int cm_convert_to_ms(int iba_time)
{
/* approximate conversion to ms from 4.096us x 2^iba_time */
return 1 << max(iba_time - 8, 0);
}
/*
* calculate: 4.096x2^ack_timeout = 4.096x2^ack_delay + 2x4.096x2^life_time
* Because of how ack_timeout is stored, adding one doubles the timeout.
* To avoid large timeouts, select the max(ack_delay, life_time + 1), and
* increment it (round up) only if the other is within 50%.
*/
static u8 cm_ack_timeout(u8 ca_ack_delay, u8 packet_life_time)
{
int ack_timeout = packet_life_time + 1;
if (ack_timeout >= ca_ack_delay)
ack_timeout += (ca_ack_delay >= (ack_timeout - 1));
else
ack_timeout = ca_ack_delay +
(ack_timeout >= (ca_ack_delay - 1));
return min(31, ack_timeout);
}
static void cm_cleanup_timewait(struct cm_timewait_info *timewait_info)
{
if (timewait_info->inserted_remote_id) {
rb_erase(&timewait_info->remote_id_node, &cm.remote_id_table);
timewait_info->inserted_remote_id = 0;
}
if (timewait_info->inserted_remote_qp) {
rb_erase(&timewait_info->remote_qp_node, &cm.remote_qp_table);
timewait_info->inserted_remote_qp = 0;
}
}
static struct cm_timewait_info * cm_create_timewait_info(__be32 local_id)
{
struct cm_timewait_info *timewait_info;
timewait_info = kzalloc(sizeof *timewait_info, GFP_KERNEL);
if (!timewait_info)
return ERR_PTR(-ENOMEM);
timewait_info->work.local_id = local_id;
INIT_DELAYED_WORK(&timewait_info->work.work, cm_work_handler);
timewait_info->work.cm_event.event = IB_CM_TIMEWAIT_EXIT;
return timewait_info;
}
static void cm_enter_timewait(struct cm_id_private *cm_id_priv)
{
int wait_time;
unsigned long flags;
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
list_add_tail(&cm_id_priv->timewait_info->list, &cm.timewait_list);
spin_unlock_irqrestore(&cm.lock, flags);
/*
* The cm_id could be destroyed by the user before we exit timewait.
* To protect against this, we search for the cm_id after exiting
* timewait before notifying the user that we've exited timewait.
*/
cm_id_priv->id.state = IB_CM_TIMEWAIT;
wait_time = cm_convert_to_ms(cm_id_priv->av.timeout);
queue_delayed_work(cm.wq, &cm_id_priv->timewait_info->work.work,
msecs_to_jiffies(wait_time));
cm_id_priv->timewait_info = NULL;
}
static void cm_reset_to_idle(struct cm_id_private *cm_id_priv)
{
unsigned long flags;
cm_id_priv->id.state = IB_CM_IDLE;
if (cm_id_priv->timewait_info) {
spin_lock_irqsave(&cm.lock, flags);
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irqrestore(&cm.lock, flags);
kfree(cm_id_priv->timewait_info);
cm_id_priv->timewait_info = NULL;
}
}
static void cm_destroy_id(struct ib_cm_id *cm_id, int err)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
retest:
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id->state) {
case IB_CM_LISTEN:
cm_id->state = IB_CM_IDLE;
spin_unlock_irq(&cm_id_priv->lock);
spin_lock_irq(&cm.lock);
rb_erase(&cm_id_priv->service_node, &cm.listen_service_table);
spin_unlock_irq(&cm.lock);
break;
case IB_CM_SIDR_REQ_SENT:
cm_id->state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_SIDR_REQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
break;
case IB_CM_REQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
&cm_id_priv->id.device->node_guid,
sizeof cm_id_priv->id.device->node_guid,
NULL, 0);
break;
case IB_CM_REQ_RCVD:
if (err == -ENOMEM) {
/* Do not reject to allow future retries. */
cm_reset_to_idle(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
} else {
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
}
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* Fall through */
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_ESTABLISHED:
spin_unlock_irq(&cm_id_priv->lock);
if (cm_id_priv->qp_type == IB_QPT_XRC_TGT)
break;
ib_send_cm_dreq(cm_id, NULL, 0);
goto retest;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
break;
case IB_CM_DREQ_RCVD:
spin_unlock_irq(&cm_id_priv->lock);
ib_send_cm_drep(cm_id, NULL, 0);
break;
default:
spin_unlock_irq(&cm_id_priv->lock);
break;
}
cm_free_id(cm_id->local_id);
cm_deref_id(cm_id_priv);
wait_for_completion(&cm_id_priv->comp);
while ((work = cm_dequeue_work(cm_id_priv)) != NULL)
cm_free_work(work);
kfree(cm_id_priv->compare_data);
kfree(cm_id_priv->private_data);
kfree(cm_id_priv);
}
void ib_destroy_cm_id(struct ib_cm_id *cm_id)
{
cm_destroy_id(cm_id, 0);
}
EXPORT_SYMBOL(ib_destroy_cm_id);
int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
struct ib_cm_compare_data *compare_data)
{
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
unsigned long flags;
int ret = 0;
service_mask = service_mask ? service_mask : ~cpu_to_be64(0);
service_id &= service_mask;
if ((service_id & IB_SERVICE_ID_AGN_MASK) == IB_CM_ASSIGN_SERVICE_ID &&
(service_id != IB_CM_ASSIGN_SERVICE_ID))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
if (cm_id->state != IB_CM_IDLE)
return -EINVAL;
if (compare_data) {
cm_id_priv->compare_data = kzalloc(sizeof *compare_data,
GFP_KERNEL);
if (!cm_id_priv->compare_data)
return -ENOMEM;
cm_mask_copy(cm_id_priv->compare_data->data,
compare_data->data, compare_data->mask);
memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
IB_CM_COMPARE_SIZE);
}
cm_id->state = IB_CM_LISTEN;
spin_lock_irqsave(&cm.lock, flags);
if (service_id == IB_CM_ASSIGN_SERVICE_ID) {
cm_id->service_id = cpu_to_be64(cm.listen_service_id++);
cm_id->service_mask = ~cpu_to_be64(0);
} else {
cm_id->service_id = service_id;
cm_id->service_mask = service_mask;
}
cur_cm_id_priv = cm_insert_listen(cm_id_priv);
spin_unlock_irqrestore(&cm.lock, flags);
if (cur_cm_id_priv) {
cm_id->state = IB_CM_IDLE;
kfree(cm_id_priv->compare_data);
cm_id_priv->compare_data = NULL;
ret = -EBUSY;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_listen);
static __be64 cm_form_tid(struct cm_id_private *cm_id_priv,
enum cm_msg_sequence msg_seq)
{
u64 hi_tid, low_tid;
hi_tid = ((u64) cm_id_priv->av.port->mad_agent->hi_tid) << 32;
low_tid = (u64) ((__force u32)cm_id_priv->id.local_id |
(msg_seq << 30));
return cpu_to_be64(hi_tid | low_tid);
}
static void cm_format_mad_hdr(struct ib_mad_hdr *hdr,
__be16 attr_id, __be64 tid)
{
hdr->base_version = IB_MGMT_BASE_VERSION;
hdr->mgmt_class = IB_MGMT_CLASS_CM;
hdr->class_version = IB_CM_CLASS_VERSION;
hdr->method = IB_MGMT_METHOD_SEND;
hdr->attr_id = attr_id;
hdr->tid = tid;
}
static void cm_format_req(struct cm_req_msg *req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_req_param *param)
{
struct ib_sa_path_rec *pri_path = param->primary_path;
struct ib_sa_path_rec *alt_path = param->alternate_path;
cm_format_mad_hdr(&req_msg->hdr, CM_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_REQ));
req_msg->local_comm_id = cm_id_priv->id.local_id;
req_msg->service_id = param->service_id;
req_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
cm_req_set_local_qpn(req_msg, cpu_to_be32(param->qp_num));
cm_req_set_init_depth(req_msg, param->initiator_depth);
cm_req_set_remote_resp_timeout(req_msg,
param->remote_cm_response_timeout);
cm_req_set_qp_type(req_msg, param->qp_type);
cm_req_set_flow_ctrl(req_msg, param->flow_control);
cm_req_set_starting_psn(req_msg, cpu_to_be32(param->starting_psn));
cm_req_set_local_resp_timeout(req_msg,
param->local_cm_response_timeout);
req_msg->pkey = param->primary_path->pkey;
cm_req_set_path_mtu(req_msg, param->primary_path->mtu);
cm_req_set_max_cm_retries(req_msg, param->max_cm_retries);
if (param->qp_type != IB_QPT_XRC_INI) {
cm_req_set_resp_res(req_msg, param->responder_resources);
cm_req_set_retry_count(req_msg, param->retry_count);
cm_req_set_rnr_retry_count(req_msg, param->rnr_retry_count);
cm_req_set_srq(req_msg, param->srq);
}
if (pri_path->hop_limit <= 1) {
req_msg->primary_local_lid = pri_path->slid;
req_msg->primary_remote_lid = pri_path->dlid;
} else {
/* Work-around until there's a way to obtain remote LID info */
req_msg->primary_local_lid = IB_LID_PERMISSIVE;
req_msg->primary_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->primary_local_gid = pri_path->sgid;
req_msg->primary_remote_gid = pri_path->dgid;
cm_req_set_primary_flow_label(req_msg, pri_path->flow_label);
cm_req_set_primary_packet_rate(req_msg, pri_path->rate);
req_msg->primary_traffic_class = pri_path->traffic_class;
req_msg->primary_hop_limit = pri_path->hop_limit;
cm_req_set_primary_sl(req_msg, pri_path->sl);
cm_req_set_primary_subnet_local(req_msg, (pri_path->hop_limit <= 1));
cm_req_set_primary_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
pri_path->packet_life_time));
if (alt_path) {
if (alt_path->hop_limit <= 1) {
req_msg->alt_local_lid = alt_path->slid;
req_msg->alt_remote_lid = alt_path->dlid;
} else {
req_msg->alt_local_lid = IB_LID_PERMISSIVE;
req_msg->alt_remote_lid = IB_LID_PERMISSIVE;
}
req_msg->alt_local_gid = alt_path->sgid;
req_msg->alt_remote_gid = alt_path->dgid;
cm_req_set_alt_flow_label(req_msg,
alt_path->flow_label);
cm_req_set_alt_packet_rate(req_msg, alt_path->rate);
req_msg->alt_traffic_class = alt_path->traffic_class;
req_msg->alt_hop_limit = alt_path->hop_limit;
cm_req_set_alt_sl(req_msg, alt_path->sl);
cm_req_set_alt_subnet_local(req_msg, (alt_path->hop_limit <= 1));
cm_req_set_alt_local_ack_timeout(req_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alt_path->packet_life_time));
}
if (param->private_data && param->private_data_len)
memcpy(req_msg->private_data, param->private_data,
param->private_data_len);
}
static int cm_validate_req_param(struct ib_cm_req_param *param)
{
/* peer-to-peer not supported */
if (param->peer_to_peer)
return -EINVAL;
if (!param->primary_path)
return -EINVAL;
if (param->qp_type != IB_QPT_RC && param->qp_type != IB_QPT_UC &&
param->qp_type != IB_QPT_XRC_INI)
return -EINVAL;
if (param->private_data &&
param->private_data_len > IB_CM_REQ_PRIVATE_DATA_SIZE)
return -EINVAL;
if (param->alternate_path &&
(param->alternate_path->pkey != param->primary_path->pkey ||
param->alternate_path->mtu != param->primary_path->mtu))
return -EINVAL;
return 0;
}
int ib_send_cm_req(struct ib_cm_id *cm_id,
struct ib_cm_req_param *param)
{
struct cm_id_private *cm_id_priv;
struct cm_req_msg *req_msg;
unsigned long flags;
int ret;
ret = cm_validate_req_param(param);
if (ret)
return ret;
/* Verify that we're not in timewait. */
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_IDLE) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
ret = -EINVAL;
goto out;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
goto out;
}
ret = cm_init_av_by_path(param->primary_path, &cm_id_priv->av);
if (ret)
goto error1;
if (param->alternate_path) {
ret = cm_init_av_by_path(param->alternate_path,
&cm_id_priv->alt_av);
if (ret)
goto error1;
}
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = cm_convert_to_ms(
param->primary_path->packet_life_time) * 2 +
cm_convert_to_ms(
param->remote_cm_response_timeout);
cm_id_priv->max_cm_retries = param->max_cm_retries;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->retry_count = param->retry_count;
cm_id_priv->path_mtu = param->primary_path->mtu;
cm_id_priv->pkey = param->primary_path->pkey;
cm_id_priv->qp_type = param->qp_type;
ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
if (ret)
goto error1;
req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
cm_format_req(req_msg, cm_id_priv, param);
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->msg->timeout_ms = cm_id_priv->timeout_ms;
cm_id_priv->msg->context[1] = (void *) (unsigned long) IB_CM_REQ_SENT;
cm_id_priv->local_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->rq_psn = cm_req_get_starting_psn(req_msg);
spin_lock_irqsave(&cm_id_priv->lock, flags);
ret = ib_post_send_mad(cm_id_priv->msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
goto error2;
}
BUG_ON(cm_id->state != IB_CM_IDLE);
cm_id->state = IB_CM_REQ_SENT;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error2: cm_free_msg(cm_id_priv->msg);
error1: kfree(cm_id_priv->timewait_info);
out: return ret;
}
EXPORT_SYMBOL(ib_send_cm_req);
static int cm_issue_rej(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc,
enum ib_cm_rej_reason reason,
enum cm_msg_response msg_rejected,
void *ari, u8 ari_length)
{
struct ib_mad_send_buf *msg = NULL;
struct cm_rej_msg *rej_msg, *rcv_msg;
int ret;
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
if (ret)
return ret;
/* We just need common CM header information. Cast to any message. */
rcv_msg = (struct cm_rej_msg *) mad_recv_wc->recv_buf.mad;
rej_msg = (struct cm_rej_msg *) msg->mad;
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, rcv_msg->hdr.tid);
rej_msg->remote_comm_id = rcv_msg->local_comm_id;
rej_msg->local_comm_id = rcv_msg->remote_comm_id;
cm_rej_set_msg_rejected(rej_msg, msg_rejected);
rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
memcpy(rej_msg->ari, ari, ari_length);
}
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
return ret;
}
static inline int cm_is_active_peer(__be64 local_ca_guid, __be64 remote_ca_guid,
__be32 local_qpn, __be32 remote_qpn)
{
return (be64_to_cpu(local_ca_guid) > be64_to_cpu(remote_ca_guid) ||
((local_ca_guid == remote_ca_guid) &&
(be32_to_cpu(local_qpn) > be32_to_cpu(remote_qpn))));
}
static void cm_format_paths_from_req(struct cm_req_msg *req_msg,
struct ib_sa_path_rec *primary_path,
struct ib_sa_path_rec *alt_path)
{
memset(primary_path, 0, sizeof *primary_path);
primary_path->dgid = req_msg->primary_local_gid;
primary_path->sgid = req_msg->primary_remote_gid;
primary_path->dlid = req_msg->primary_local_lid;
primary_path->slid = req_msg->primary_remote_lid;
primary_path->flow_label = cm_req_get_primary_flow_label(req_msg);
primary_path->hop_limit = req_msg->primary_hop_limit;
primary_path->traffic_class = req_msg->primary_traffic_class;
primary_path->reversible = 1;
primary_path->pkey = req_msg->pkey;
primary_path->sl = cm_req_get_primary_sl(req_msg);
primary_path->mtu_selector = IB_SA_EQ;
primary_path->mtu = cm_req_get_path_mtu(req_msg);
primary_path->rate_selector = IB_SA_EQ;
primary_path->rate = cm_req_get_primary_packet_rate(req_msg);
primary_path->packet_life_time_selector = IB_SA_EQ;
primary_path->packet_life_time =
cm_req_get_primary_local_ack_timeout(req_msg);
primary_path->packet_life_time -= (primary_path->packet_life_time > 0);
if (req_msg->alt_local_lid) {
memset(alt_path, 0, sizeof *alt_path);
alt_path->dgid = req_msg->alt_local_gid;
alt_path->sgid = req_msg->alt_remote_gid;
alt_path->dlid = req_msg->alt_local_lid;
alt_path->slid = req_msg->alt_remote_lid;
alt_path->flow_label = cm_req_get_alt_flow_label(req_msg);
alt_path->hop_limit = req_msg->alt_hop_limit;
alt_path->traffic_class = req_msg->alt_traffic_class;
alt_path->reversible = 1;
alt_path->pkey = req_msg->pkey;
alt_path->sl = cm_req_get_alt_sl(req_msg);
alt_path->mtu_selector = IB_SA_EQ;
alt_path->mtu = cm_req_get_path_mtu(req_msg);
alt_path->rate_selector = IB_SA_EQ;
alt_path->rate = cm_req_get_alt_packet_rate(req_msg);
alt_path->packet_life_time_selector = IB_SA_EQ;
alt_path->packet_life_time =
cm_req_get_alt_local_ack_timeout(req_msg);
alt_path->packet_life_time -= (alt_path->packet_life_time > 0);
}
}
static void cm_format_req_event(struct cm_work *work,
struct cm_id_private *cm_id_priv,
struct ib_cm_id *listen_id)
{
struct cm_req_msg *req_msg;
struct ib_cm_req_event_param *param;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.req_rcvd;
param->listen_id = listen_id;
param->port = cm_id_priv->av.port->port_num;
param->primary_path = &work->path[0];
if (req_msg->alt_local_lid)
param->alternate_path = &work->path[1];
else
param->alternate_path = NULL;
param->remote_ca_guid = req_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(req_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_req_get_local_qpn(req_msg));
param->qp_type = cm_req_get_qp_type(req_msg);
param->starting_psn = be32_to_cpu(cm_req_get_starting_psn(req_msg));
param->responder_resources = cm_req_get_init_depth(req_msg);
param->initiator_depth = cm_req_get_resp_res(req_msg);
param->local_cm_response_timeout =
cm_req_get_remote_resp_timeout(req_msg);
param->flow_control = cm_req_get_flow_ctrl(req_msg);
param->remote_cm_response_timeout =
cm_req_get_local_resp_timeout(req_msg);
param->retry_count = cm_req_get_retry_count(req_msg);
param->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
param->srq = cm_req_get_srq(req_msg);
work->cm_event.private_data = &req_msg->private_data;
}
static void cm_process_work(struct cm_id_private *cm_id_priv,
struct cm_work *work)
{
int ret;
/* We will typically only have the current event to report. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &work->cm_event);
cm_free_work(work);
while (!ret && !atomic_add_negative(-1, &cm_id_priv->work_count)) {
spin_lock_irq(&cm_id_priv->lock);
work = cm_dequeue_work(cm_id_priv);
spin_unlock_irq(&cm_id_priv->lock);
BUG_ON(!work);
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id,
&work->cm_event);
cm_free_work(work);
}
cm_deref_id(cm_id_priv);
if (ret)
cm_destroy_id(&cm_id_priv->id, ret);
}
static void cm_format_mra(struct cm_mra_msg *mra_msg,
struct cm_id_private *cm_id_priv,
enum cm_msg_response msg_mraed, u8 service_timeout,
const void *private_data, u8 private_data_len)
{
cm_format_mad_hdr(&mra_msg->hdr, CM_MRA_ATTR_ID, cm_id_priv->tid);
cm_mra_set_msg_mraed(mra_msg, msg_mraed);
mra_msg->local_comm_id = cm_id_priv->id.local_id;
mra_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_mra_set_service_timeout(mra_msg, service_timeout);
if (private_data && private_data_len)
memcpy(mra_msg->private_data, private_data, private_data_len);
}
static void cm_format_rej(struct cm_rej_msg *rej_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_rej_reason reason,
void *ari,
u8 ari_length,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&rej_msg->hdr, CM_REJ_ATTR_ID, cm_id_priv->tid);
rej_msg->remote_comm_id = cm_id_priv->id.remote_id;
switch(cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
rej_msg->local_comm_id = 0;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_MRA_REQ_SENT:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REQ);
break;
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_REP);
break;
default:
rej_msg->local_comm_id = cm_id_priv->id.local_id;
cm_rej_set_msg_rejected(rej_msg, CM_MSG_RESPONSE_OTHER);
break;
}
rej_msg->reason = cpu_to_be16(reason);
if (ari && ari_length) {
cm_rej_set_reject_info_len(rej_msg, ari_length);
memcpy(rej_msg->ari, ari, ari_length);
}
if (private_data && private_data_len)
memcpy(rej_msg->private_data, private_data, private_data_len);
}
static void cm_dup_req_handler(struct cm_work *work,
struct cm_id_private *cm_id_priv)
{
struct ib_mad_send_buf *msg = NULL;
int ret;
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_REQ_COUNTER]);
/* Quick state check to discard duplicate REQs. */
if (cm_id_priv->id.state == IB_CM_REQ_RCVD)
return;
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
return;
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_MRA_REQ_SENT:
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_REQ, cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
break;
case IB_CM_TIMEWAIT:
cm_format_rej((struct cm_rej_msg *) msg->mad, cm_id_priv,
IB_CM_REJ_STALE_CONN, NULL, 0, NULL, 0);
break;
default:
goto unlock;
}
spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
return;
unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
}
static struct cm_id_private * cm_match_req(struct cm_work *work,
struct cm_id_private *cm_id_priv)
{
struct cm_id_private *listen_cm_id_priv, *cur_cm_id_priv;
struct cm_timewait_info *timewait_info;
struct cm_req_msg *req_msg;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
/* Check for possible duplicate REQ. */
spin_lock_irq(&cm.lock);
timewait_info = cm_insert_remote_id(cm_id_priv->timewait_info);
if (timewait_info) {
cur_cm_id_priv = cm_get_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
spin_unlock_irq(&cm.lock);
if (cur_cm_id_priv) {
cm_dup_req_handler(work, cur_cm_id_priv);
cm_deref_id(cur_cm_id_priv);
}
return NULL;
}
/* Check for stale connections. */
timewait_info = cm_insert_remote_qpn(cm_id_priv->timewait_info);
if (timewait_info) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REQ,
NULL, 0);
return NULL;
}
/* Find matching listen request. */
listen_cm_id_priv = cm_find_listen(cm_id_priv->id.device,
req_msg->service_id,
req_msg->private_data);
if (!listen_cm_id_priv) {
cm_cleanup_timewait(cm_id_priv->timewait_info);
spin_unlock_irq(&cm.lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_INVALID_SERVICE_ID, CM_MSG_RESPONSE_REQ,
NULL, 0);
goto out;
}
atomic_inc(&listen_cm_id_priv->refcount);
atomic_inc(&cm_id_priv->refcount);
cm_id_priv->id.state = IB_CM_REQ_RCVD;
atomic_inc(&cm_id_priv->work_count);
spin_unlock_irq(&cm.lock);
out:
return listen_cm_id_priv;
}
/*
* Work-around for inter-subnet connections. If the LIDs are permissive,
* we need to override the LID/SL data in the REQ with the LID information
* in the work completion.
*/
static void cm_process_routed_req(struct cm_req_msg *req_msg, struct ib_wc *wc)
{
if (!cm_req_get_primary_subnet_local(req_msg)) {
if (req_msg->primary_local_lid == IB_LID_PERMISSIVE) {
req_msg->primary_local_lid = cpu_to_be16(wc->slid);
cm_req_set_primary_sl(req_msg, wc->sl);
}
if (req_msg->primary_remote_lid == IB_LID_PERMISSIVE)
req_msg->primary_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
if (!cm_req_get_alt_subnet_local(req_msg)) {
if (req_msg->alt_local_lid == IB_LID_PERMISSIVE) {
req_msg->alt_local_lid = cpu_to_be16(wc->slid);
cm_req_set_alt_sl(req_msg, wc->sl);
}
if (req_msg->alt_remote_lid == IB_LID_PERMISSIVE)
req_msg->alt_remote_lid = cpu_to_be16(wc->dlid_path_bits);
}
}
static int cm_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *listen_cm_id_priv;
struct cm_req_msg *req_msg;
int ret;
req_msg = (struct cm_req_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
cm_id_priv->id.remote_id = req_msg->local_comm_id;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_id_priv->timewait_info = cm_create_timewait_info(cm_id_priv->
id.local_id);
if (IS_ERR(cm_id_priv->timewait_info)) {
ret = PTR_ERR(cm_id_priv->timewait_info);
goto destroy;
}
cm_id_priv->timewait_info->work.remote_id = req_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = req_msg->local_ca_guid;
cm_id_priv->timewait_info->remote_qpn = cm_req_get_local_qpn(req_msg);
listen_cm_id_priv = cm_match_req(work, cm_id_priv);
if (!listen_cm_id_priv) {
ret = -EINVAL;
kfree(cm_id_priv->timewait_info);
goto destroy;
}
cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = listen_cm_id_priv->id.context;
cm_id_priv->id.service_id = req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_process_routed_req(req_msg, work->mad_recv_wc->wc);
cm_format_paths_from_req(req_msg, &work->path[0], &work->path[1]);
ret = cm_init_av_by_path(&work->path[0], &cm_id_priv->av);
if (ret) {
ib_get_cached_gid(work->port->cm_dev->ib_device,
work->port->port_num, 0, &work->path[0].sgid);
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_GID,
&work->path[0].sgid, sizeof work->path[0].sgid,
NULL, 0);
goto rejected;
}
if (req_msg->alt_local_lid) {
ret = cm_init_av_by_path(&work->path[1], &cm_id_priv->alt_av);
if (ret) {
ib_send_cm_rej(cm_id, IB_CM_REJ_INVALID_ALT_GID,
&work->path[0].sgid,
sizeof work->path[0].sgid, NULL, 0);
goto rejected;
}
}
cm_id_priv->tid = req_msg->hdr.tid;
cm_id_priv->timeout_ms = cm_convert_to_ms(
cm_req_get_local_resp_timeout(req_msg));
cm_id_priv->max_cm_retries = cm_req_get_max_cm_retries(req_msg);
cm_id_priv->remote_qpn = cm_req_get_local_qpn(req_msg);
cm_id_priv->initiator_depth = cm_req_get_resp_res(req_msg);
cm_id_priv->responder_resources = cm_req_get_init_depth(req_msg);
cm_id_priv->path_mtu = cm_req_get_path_mtu(req_msg);
cm_id_priv->pkey = req_msg->pkey;
cm_id_priv->sq_psn = cm_req_get_starting_psn(req_msg);
cm_id_priv->retry_count = cm_req_get_retry_count(req_msg);
cm_id_priv->rnr_retry_count = cm_req_get_rnr_retry_count(req_msg);
cm_id_priv->qp_type = cm_req_get_qp_type(req_msg);
cm_format_req_event(work, cm_id_priv, &listen_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(listen_cm_id_priv);
return 0;
rejected:
atomic_dec(&cm_id_priv->refcount);
cm_deref_id(listen_cm_id_priv);
destroy:
ib_destroy_cm_id(cm_id);
return ret;
}
static void cm_format_rep(struct cm_rep_msg *rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_rep_param *param)
{
cm_format_mad_hdr(&rep_msg->hdr, CM_REP_ATTR_ID, cm_id_priv->tid);
rep_msg->local_comm_id = cm_id_priv->id.local_id;
rep_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_rep_set_starting_psn(rep_msg, cpu_to_be32(param->starting_psn));
rep_msg->resp_resources = param->responder_resources;
cm_rep_set_target_ack_delay(rep_msg,
cm_id_priv->av.port->cm_dev->ack_delay);
cm_rep_set_failover(rep_msg, param->failover_accepted);
cm_rep_set_rnr_retry_count(rep_msg, param->rnr_retry_count);
rep_msg->local_ca_guid = cm_id_priv->id.device->node_guid;
if (cm_id_priv->qp_type != IB_QPT_XRC_TGT) {
rep_msg->initiator_depth = param->initiator_depth;
cm_rep_set_flow_ctrl(rep_msg, param->flow_control);
cm_rep_set_srq(rep_msg, param->srq);
cm_rep_set_local_qpn(rep_msg, cpu_to_be32(param->qp_num));
} else {
cm_rep_set_srq(rep_msg, 1);
cm_rep_set_local_eecn(rep_msg, cpu_to_be32(param->qp_num));
}
if (param->private_data && param->private_data_len)
memcpy(rep_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_rep(struct ib_cm_id *cm_id,
struct ib_cm_rep_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
struct cm_rep_msg *rep_msg;
unsigned long flags;
int ret;
if (param->private_data &&
param->private_data_len > IB_CM_REP_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REQ_RCVD &&
cm_id->state != IB_CM_MRA_REQ_SENT) {
ret = -EINVAL;
goto out;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
rep_msg = (struct cm_rep_msg *) msg->mad;
cm_format_rep(rep_msg, cm_id_priv, param);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_REP_SENT;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_REP_SENT;
cm_id_priv->msg = msg;
cm_id_priv->initiator_depth = param->initiator_depth;
cm_id_priv->responder_resources = param->responder_resources;
cm_id_priv->rq_psn = cm_rep_get_starting_psn(rep_msg);
cm_id_priv->local_qpn = cpu_to_be32(param->qp_num & 0xFFFFFF);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rep);
static void cm_format_rtu(struct cm_rtu_msg *rtu_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&rtu_msg->hdr, CM_RTU_ATTR_ID, cm_id_priv->tid);
rtu_msg->local_comm_id = cm_id_priv->id.local_id;
rtu_msg->remote_comm_id = cm_id_priv->id.remote_id;
if (private_data && private_data_len)
memcpy(rtu_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_rtu(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
void *data;
int ret;
if (private_data && private_data_len > IB_CM_RTU_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_REP_RCVD &&
cm_id->state != IB_CM_MRA_REP_SENT) {
ret = -EINVAL;
goto error;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error;
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
kfree(data);
return ret;
}
cm_id->state = IB_CM_ESTABLISHED;
cm_set_private_data(cm_id_priv, data, private_data_len);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rtu);
static void cm_format_rep_event(struct cm_work *work, enum ib_qp_type qp_type)
{
struct cm_rep_msg *rep_msg;
struct ib_cm_rep_event_param *param;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rep_rcvd;
param->remote_ca_guid = rep_msg->local_ca_guid;
param->remote_qkey = be32_to_cpu(rep_msg->local_qkey);
param->remote_qpn = be32_to_cpu(cm_rep_get_qpn(rep_msg, qp_type));
param->starting_psn = be32_to_cpu(cm_rep_get_starting_psn(rep_msg));
param->responder_resources = rep_msg->initiator_depth;
param->initiator_depth = rep_msg->resp_resources;
param->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
param->failover_accepted = cm_rep_get_failover(rep_msg);
param->flow_control = cm_rep_get_flow_ctrl(rep_msg);
param->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
param->srq = cm_rep_get_srq(rep_msg);
work->cm_event.private_data = &rep_msg->private_data;
}
static void cm_dup_rep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
struct ib_mad_send_buf *msg = NULL;
int ret;
rep_msg = (struct cm_rep_msg *) work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id,
rep_msg->local_comm_id);
if (!cm_id_priv)
return;
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_REP_COUNTER]);
ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
if (ret)
goto deref;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state == IB_CM_ESTABLISHED)
cm_format_rtu((struct cm_rtu_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else if (cm_id_priv->id.state == IB_CM_MRA_REP_SENT)
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_REP, cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
else
goto unlock;
spin_unlock_irq(&cm_id_priv->lock);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto free;
goto deref;
unlock: spin_unlock_irq(&cm_id_priv->lock);
free: cm_free_msg(msg);
deref: cm_deref_id(cm_id_priv);
}
static int cm_rep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rep_msg *rep_msg;
int ret;
rep_msg = (struct cm_rep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rep_msg->remote_comm_id, 0);
if (!cm_id_priv) {
cm_dup_rep_handler(work);
return -EINVAL;
}
cm_format_rep_event(work, cm_id_priv->qp_type);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
break;
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
cm_id_priv->timewait_info->work.remote_id = rep_msg->local_comm_id;
cm_id_priv->timewait_info->remote_ca_guid = rep_msg->local_ca_guid;
cm_id_priv->timewait_info->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
spin_lock(&cm.lock);
/* Check for duplicate REP. */
if (cm_insert_remote_id(cm_id_priv->timewait_info)) {
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto error;
}
/* Check for a stale connection. */
if (cm_insert_remote_qpn(cm_id_priv->timewait_info)) {
rb_erase(&cm_id_priv->timewait_info->remote_id_node,
&cm.remote_id_table);
cm_id_priv->timewait_info->inserted_remote_id = 0;
spin_unlock(&cm.lock);
spin_unlock_irq(&cm_id_priv->lock);
cm_issue_rej(work->port, work->mad_recv_wc,
IB_CM_REJ_STALE_CONN, CM_MSG_RESPONSE_REP,
NULL, 0);
ret = -EINVAL;
goto error;
}
spin_unlock(&cm.lock);
cm_id_priv->id.state = IB_CM_REP_RCVD;
cm_id_priv->id.remote_id = rep_msg->local_comm_id;
cm_id_priv->remote_qpn = cm_rep_get_qpn(rep_msg, cm_id_priv->qp_type);
cm_id_priv->initiator_depth = rep_msg->resp_resources;
cm_id_priv->responder_resources = rep_msg->initiator_depth;
cm_id_priv->sq_psn = cm_rep_get_starting_psn(rep_msg);
cm_id_priv->rnr_retry_count = cm_rep_get_rnr_retry_count(rep_msg);
cm_id_priv->target_ack_delay = cm_rep_get_target_ack_delay(rep_msg);
cm_id_priv->av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->av.timeout - 1);
cm_id_priv->alt_av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
/* todo: handle peer_to_peer */
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
error:
cm_deref_id(cm_id_priv);
return ret;
}
static int cm_establish_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
int ret;
/* See comment in cm_establish about lookup. */
cm_id_priv = cm_acquire_id(work->local_id, work->remote_id);
if (!cm_id_priv)
return -EINVAL;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_rtu_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rtu_msg *rtu_msg;
int ret;
rtu_msg = (struct cm_rtu_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(rtu_msg->remote_comm_id,
rtu_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &rtu_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_REP_SENT &&
cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_RTU_COUNTER]);
goto out;
}
cm_id_priv->id.state = IB_CM_ESTABLISHED;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_dreq(struct cm_dreq_msg *dreq_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&dreq_msg->hdr, CM_DREQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_DREQ));
dreq_msg->local_comm_id = cm_id_priv->id.local_id;
dreq_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_dreq_set_remote_qpn(dreq_msg, cm_id_priv->remote_qpn);
if (private_data && private_data_len)
memcpy(dreq_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_dreq(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_DREQ_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED) {
ret = -EINVAL;
goto out;
}
if (cm_id->lap_state == IB_CM_LAP_SENT ||
cm_id->lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret) {
cm_enter_timewait(cm_id_priv);
goto out;
}
cm_format_dreq((struct cm_dreq_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_DREQ_SENT;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
cm_enter_timewait(cm_id_priv);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_DREQ_SENT;
cm_id_priv->msg = msg;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_dreq);
static void cm_format_drep(struct cm_drep_msg *drep_msg,
struct cm_id_private *cm_id_priv,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, cm_id_priv->tid);
drep_msg->local_comm_id = cm_id_priv->id.local_id;
drep_msg->remote_comm_id = cm_id_priv->id.remote_id;
if (private_data && private_data_len)
memcpy(drep_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_drep(struct ib_cm_id *cm_id,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
void *data;
int ret;
if (private_data && private_data_len > IB_CM_DREP_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_DREQ_RCVD) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return -EINVAL;
}
cm_set_private_data(cm_id_priv, data, private_data_len);
cm_enter_timewait(cm_id_priv);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_drep);
static int cm_issue_drep(struct cm_port *port,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct ib_mad_send_buf *msg = NULL;
struct cm_dreq_msg *dreq_msg;
struct cm_drep_msg *drep_msg;
int ret;
ret = cm_alloc_response_msg(port, mad_recv_wc, &msg);
if (ret)
return ret;
dreq_msg = (struct cm_dreq_msg *) mad_recv_wc->recv_buf.mad;
drep_msg = (struct cm_drep_msg *) msg->mad;
cm_format_mad_hdr(&drep_msg->hdr, CM_DREP_ATTR_ID, dreq_msg->hdr.tid);
drep_msg->remote_comm_id = dreq_msg->local_comm_id;
drep_msg->local_comm_id = dreq_msg->remote_comm_id;
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
return ret;
}
static int cm_dreq_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_dreq_msg *dreq_msg;
struct ib_mad_send_buf *msg = NULL;
int ret;
dreq_msg = (struct cm_dreq_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
dreq_msg->local_comm_id);
if (!cm_id_priv) {
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
cm_issue_drep(work->port, work->mad_recv_wc);
return -EINVAL;
}
work->cm_event.private_data = &dreq_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->local_qpn != cm_dreq_get_remote_qpn(dreq_msg))
goto unlock;
switch (cm_id_priv->id.state) {
case IB_CM_REP_SENT:
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT ||
cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
break;
case IB_CM_MRA_REP_RCVD:
break;
case IB_CM_TIMEWAIT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
cm_format_drep((struct cm_drep_msg *) msg->mad, cm_id_priv,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_DREQ_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_DREQ_COUNTER]);
goto unlock;
default:
goto unlock;
}
cm_id_priv->id.state = IB_CM_DREQ_RCVD;
cm_id_priv->tid = dreq_msg->hdr.tid;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_drep_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_drep_msg *drep_msg;
int ret;
drep_msg = (struct cm_drep_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(drep_msg->remote_comm_id,
drep_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &drep_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_DREQ_SENT &&
cm_id_priv->id.state != IB_CM_DREQ_RCVD) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_enter_timewait(cm_id_priv);
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
int ib_send_cm_rej(struct ib_cm_id *cm_id,
enum ib_cm_rej_reason reason,
void *ari,
u8 ari_length,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((private_data && private_data_len > IB_CM_REJ_PRIVATE_DATA_SIZE) ||
(ari && ari_length > IB_CM_REJ_ARI_LENGTH))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id->state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
ret = cm_alloc_msg(cm_id_priv, &msg);
if (!ret)
cm_format_rej((struct cm_rej_msg *) msg->mad,
cm_id_priv, reason, ari, ari_length,
private_data, private_data_len);
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ret = cm_alloc_msg(cm_id_priv, &msg);
if (!ret)
cm_format_rej((struct cm_rej_msg *) msg->mad,
cm_id_priv, reason, ari, ari_length,
private_data, private_data_len);
cm_enter_timewait(cm_id_priv);
break;
default:
ret = -EINVAL;
goto out;
}
if (ret)
goto out;
ret = ib_post_send_mad(msg, NULL);
if (ret)
cm_free_msg(msg);
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_rej);
static void cm_format_rej_event(struct cm_work *work)
{
struct cm_rej_msg *rej_msg;
struct ib_cm_rej_event_param *param;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.rej_rcvd;
param->ari = rej_msg->ari;
param->ari_length = cm_rej_get_reject_info_len(rej_msg);
param->reason = __be16_to_cpu(rej_msg->reason);
work->cm_event.private_data = &rej_msg->private_data;
}
static struct cm_id_private * cm_acquire_rejected_id(struct cm_rej_msg *rej_msg)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
__be32 remote_id;
remote_id = rej_msg->local_comm_id;
if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_TIMEOUT) {
spin_lock_irq(&cm.lock);
timewait_info = cm_find_remote_id( *((__be64 *) rej_msg->ari),
remote_id);
if (!timewait_info) {
spin_unlock_irq(&cm.lock);
return NULL;
}
cm_id_priv = idr_find(&cm.local_id_table, (__force int)
(timewait_info->work.local_id ^
cm.random_id_operand));
if (cm_id_priv) {
if (cm_id_priv->id.remote_id == remote_id)
atomic_inc(&cm_id_priv->refcount);
else
cm_id_priv = NULL;
}
spin_unlock_irq(&cm.lock);
} else if (cm_rej_get_msg_rejected(rej_msg) == CM_MSG_RESPONSE_REQ)
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, 0);
else
cm_id_priv = cm_acquire_id(rej_msg->remote_comm_id, remote_id);
return cm_id_priv;
}
static int cm_rej_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_rej_msg *rej_msg;
int ret;
rej_msg = (struct cm_rej_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_rejected_id(rej_msg);
if (!cm_id_priv)
return -EINVAL;
cm_format_rej_event(work);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* fall through */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
if (__be16_to_cpu(rej_msg->reason) == IB_CM_REJ_STALE_CONN)
cm_enter_timewait(cm_id_priv);
else
cm_reset_to_idle(cm_id_priv);
break;
case IB_CM_DREQ_SENT:
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
/* fall through */
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
cm_enter_timewait(cm_id_priv);
break;
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT ||
cm_id_priv->id.lap_state == IB_CM_LAP_SENT) {
if (cm_id_priv->id.lap_state == IB_CM_LAP_SENT)
ib_cancel_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg);
cm_enter_timewait(cm_id_priv);
break;
}
/* fall through */
default:
spin_unlock_irq(&cm_id_priv->lock);
ret = -EINVAL;
goto out;
}
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
int ib_send_cm_mra(struct ib_cm_id *cm_id,
u8 service_timeout,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
enum ib_cm_state cm_state;
enum ib_cm_lap_state lap_state;
enum cm_msg_response msg_response;
void *data;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_MRA_PRIVATE_DATA_SIZE)
return -EINVAL;
data = cm_copy_private_data(private_data, private_data_len);
if (IS_ERR(data))
return PTR_ERR(data);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch(cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
cm_state = IB_CM_MRA_REQ_SENT;
lap_state = cm_id->lap_state;
msg_response = CM_MSG_RESPONSE_REQ;
break;
case IB_CM_REP_RCVD:
cm_state = IB_CM_MRA_REP_SENT;
lap_state = cm_id->lap_state;
msg_response = CM_MSG_RESPONSE_REP;
break;
case IB_CM_ESTABLISHED:
if (cm_id->lap_state == IB_CM_LAP_RCVD) {
cm_state = cm_id->state;
lap_state = IB_CM_MRA_LAP_SENT;
msg_response = CM_MSG_RESPONSE_OTHER;
break;
}
default:
ret = -EINVAL;
goto error1;
}
if (!(service_timeout & IB_CM_MRA_FLAG_DELAY)) {
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error1;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
msg_response, service_timeout,
private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret)
goto error2;
}
cm_id->state = cm_state;
cm_id->lap_state = lap_state;
cm_id_priv->service_timeout = service_timeout;
cm_set_private_data(cm_id_priv, data, private_data_len);
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return 0;
error1: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
return ret;
error2: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
kfree(data);
cm_free_msg(msg);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_mra);
static struct cm_id_private * cm_acquire_mraed_id(struct cm_mra_msg *mra_msg)
{
switch (cm_mra_get_msg_mraed(mra_msg)) {
case CM_MSG_RESPONSE_REQ:
return cm_acquire_id(mra_msg->remote_comm_id, 0);
case CM_MSG_RESPONSE_REP:
case CM_MSG_RESPONSE_OTHER:
return cm_acquire_id(mra_msg->remote_comm_id,
mra_msg->local_comm_id);
default:
return NULL;
}
}
static int cm_mra_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_mra_msg *mra_msg;
int timeout, ret;
mra_msg = (struct cm_mra_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_mraed_id(mra_msg);
if (!cm_id_priv)
return -EINVAL;
work->cm_event.private_data = &mra_msg->private_data;
work->cm_event.param.mra_rcvd.service_timeout =
cm_mra_get_service_timeout(mra_msg);
timeout = cm_convert_to_ms(cm_mra_get_service_timeout(mra_msg)) +
cm_convert_to_ms(cm_id_priv->av.timeout);
spin_lock_irq(&cm_id_priv->lock);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REQ ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REQ_RCVD;
break;
case IB_CM_REP_SENT:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_REP ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout))
goto out;
cm_id_priv->id.state = IB_CM_MRA_REP_RCVD;
break;
case IB_CM_ESTABLISHED:
if (cm_mra_get_msg_mraed(mra_msg) != CM_MSG_RESPONSE_OTHER ||
cm_id_priv->id.lap_state != IB_CM_LAP_SENT ||
ib_modify_mad(cm_id_priv->av.port->mad_agent,
cm_id_priv->msg, timeout)) {
if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
atomic_long_inc(&work->port->
counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_MRA_LAP_RCVD;
break;
case IB_CM_MRA_REQ_RCVD:
case IB_CM_MRA_REP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_MRA_COUNTER]);
/* fall through */
default:
goto out;
}
cm_id_priv->msg->context[1] = (void *) (unsigned long)
cm_id_priv->id.state;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
spin_unlock_irq(&cm_id_priv->lock);
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_lap(struct cm_lap_msg *lap_msg,
struct cm_id_private *cm_id_priv,
struct ib_sa_path_rec *alternate_path,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&lap_msg->hdr, CM_LAP_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_LAP));
lap_msg->local_comm_id = cm_id_priv->id.local_id;
lap_msg->remote_comm_id = cm_id_priv->id.remote_id;
cm_lap_set_remote_qpn(lap_msg, cm_id_priv->remote_qpn);
/* todo: need remote CM response timeout */
cm_lap_set_remote_resp_timeout(lap_msg, 0x1F);
lap_msg->alt_local_lid = alternate_path->slid;
lap_msg->alt_remote_lid = alternate_path->dlid;
lap_msg->alt_local_gid = alternate_path->sgid;
lap_msg->alt_remote_gid = alternate_path->dgid;
cm_lap_set_flow_label(lap_msg, alternate_path->flow_label);
cm_lap_set_traffic_class(lap_msg, alternate_path->traffic_class);
lap_msg->alt_hop_limit = alternate_path->hop_limit;
cm_lap_set_packet_rate(lap_msg, alternate_path->rate);
cm_lap_set_sl(lap_msg, alternate_path->sl);
cm_lap_set_subnet_local(lap_msg, 1); /* local only... */
cm_lap_set_local_ack_timeout(lap_msg,
cm_ack_timeout(cm_id_priv->av.port->cm_dev->ack_delay,
alternate_path->packet_life_time));
if (private_data && private_data_len)
memcpy(lap_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_lap(struct ib_cm_id *cm_id,
struct ib_sa_path_rec *alternate_path,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (private_data && private_data_len > IB_CM_LAP_PRIVATE_DATA_SIZE)
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
(cm_id->lap_state != IB_CM_LAP_UNINIT &&
cm_id->lap_state != IB_CM_LAP_IDLE)) {
ret = -EINVAL;
goto out;
}
ret = cm_init_av_by_path(alternate_path, &cm_id_priv->alt_av);
if (ret)
goto out;
cm_id_priv->alt_av.timeout =
cm_ack_timeout(cm_id_priv->target_ack_delay,
cm_id_priv->alt_av.timeout - 1);
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_lap((struct cm_lap_msg *) msg->mad, cm_id_priv,
alternate_path, private_data, private_data_len);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_ESTABLISHED;
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->lap_state = IB_CM_LAP_SENT;
cm_id_priv->msg = msg;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_lap);
static void cm_format_path_from_lap(struct cm_id_private *cm_id_priv,
struct ib_sa_path_rec *path,
struct cm_lap_msg *lap_msg)
{
memset(path, 0, sizeof *path);
path->dgid = lap_msg->alt_local_gid;
path->sgid = lap_msg->alt_remote_gid;
path->dlid = lap_msg->alt_local_lid;
path->slid = lap_msg->alt_remote_lid;
path->flow_label = cm_lap_get_flow_label(lap_msg);
path->hop_limit = lap_msg->alt_hop_limit;
path->traffic_class = cm_lap_get_traffic_class(lap_msg);
path->reversible = 1;
path->pkey = cm_id_priv->pkey;
path->sl = cm_lap_get_sl(lap_msg);
path->mtu_selector = IB_SA_EQ;
path->mtu = cm_id_priv->path_mtu;
path->rate_selector = IB_SA_EQ;
path->rate = cm_lap_get_packet_rate(lap_msg);
path->packet_life_time_selector = IB_SA_EQ;
path->packet_life_time = cm_lap_get_local_ack_timeout(lap_msg);
path->packet_life_time -= (path->packet_life_time > 0);
}
static int cm_lap_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_lap_msg *lap_msg;
struct ib_cm_lap_event_param *param;
struct ib_mad_send_buf *msg = NULL;
int ret;
/* todo: verify LAP request and send reject APR if invalid. */
lap_msg = (struct cm_lap_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(lap_msg->remote_comm_id,
lap_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL;
param = &work->cm_event.param.lap_rcvd;
param->alternate_path = &work->path[0];
cm_format_path_from_lap(cm_id_priv, param->alternate_path, lap_msg);
work->cm_event.private_data = &lap_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED)
goto unlock;
switch (cm_id_priv->id.lap_state) {
case IB_CM_LAP_UNINIT:
case IB_CM_LAP_IDLE:
break;
case IB_CM_MRA_LAP_SENT:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
goto unlock;
cm_format_mra((struct cm_mra_msg *) msg->mad, cm_id_priv,
CM_MSG_RESPONSE_OTHER,
cm_id_priv->service_timeout,
cm_id_priv->private_data,
cm_id_priv->private_data_len);
spin_unlock_irq(&cm_id_priv->lock);
if (ib_post_send_mad(msg, NULL))
cm_free_msg(msg);
goto deref;
case IB_CM_LAP_RCVD:
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_LAP_COUNTER]);
goto unlock;
default:
goto unlock;
}
cm_id_priv->id.lap_state = IB_CM_LAP_RCVD;
cm_id_priv->tid = lap_msg->hdr.tid;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_init_av_by_path(param->alternate_path, &cm_id_priv->alt_av);
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
unlock: spin_unlock_irq(&cm_id_priv->lock);
deref: cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_apr(struct cm_apr_msg *apr_msg,
struct cm_id_private *cm_id_priv,
enum ib_cm_apr_status status,
void *info,
u8 info_length,
const void *private_data,
u8 private_data_len)
{
cm_format_mad_hdr(&apr_msg->hdr, CM_APR_ATTR_ID, cm_id_priv->tid);
apr_msg->local_comm_id = cm_id_priv->id.local_id;
apr_msg->remote_comm_id = cm_id_priv->id.remote_id;
apr_msg->ap_status = (u8) status;
if (info && info_length) {
apr_msg->info_length = info_length;
memcpy(apr_msg->info, info, info_length);
}
if (private_data && private_data_len)
memcpy(apr_msg->private_data, private_data, private_data_len);
}
int ib_send_cm_apr(struct ib_cm_id *cm_id,
enum ib_cm_apr_status status,
void *info,
u8 info_length,
const void *private_data,
u8 private_data_len)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((private_data && private_data_len > IB_CM_APR_PRIVATE_DATA_SIZE) ||
(info && info_length > IB_CM_APR_INFO_LENGTH))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_ESTABLISHED ||
(cm_id->lap_state != IB_CM_LAP_RCVD &&
cm_id->lap_state != IB_CM_MRA_LAP_SENT)) {
ret = -EINVAL;
goto out;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_apr((struct cm_apr_msg *) msg->mad, cm_id_priv, status,
info, info_length, private_data, private_data_len);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->lap_state = IB_CM_LAP_IDLE;
out: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_apr);
static int cm_apr_handler(struct cm_work *work)
{
struct cm_id_private *cm_id_priv;
struct cm_apr_msg *apr_msg;
int ret;
apr_msg = (struct cm_apr_msg *)work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(apr_msg->remote_comm_id,
apr_msg->local_comm_id);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
work->cm_event.param.apr_rcvd.ap_status = apr_msg->ap_status;
work->cm_event.param.apr_rcvd.apr_info = &apr_msg->info;
work->cm_event.param.apr_rcvd.info_len = apr_msg->info_length;
work->cm_event.private_data = &apr_msg->private_data;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_ESTABLISHED ||
(cm_id_priv->id.lap_state != IB_CM_LAP_SENT &&
cm_id_priv->id.lap_state != IB_CM_MRA_LAP_RCVD)) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.lap_state = IB_CM_LAP_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
cm_id_priv->msg = NULL;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static int cm_timewait_handler(struct cm_work *work)
{
struct cm_timewait_info *timewait_info;
struct cm_id_private *cm_id_priv;
int ret;
timewait_info = (struct cm_timewait_info *)work;
spin_lock_irq(&cm.lock);
list_del(&timewait_info->list);
spin_unlock_irq(&cm.lock);
cm_id_priv = cm_acquire_id(timewait_info->work.local_id,
timewait_info->work.remote_id);
if (!cm_id_priv)
return -EINVAL;
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_TIMEWAIT ||
cm_id_priv->remote_qpn != timewait_info->remote_qpn) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
ret = atomic_inc_and_test(&cm_id_priv->work_count);
if (!ret)
list_add_tail(&work->list, &cm_id_priv->work_list);
spin_unlock_irq(&cm_id_priv->lock);
if (ret)
cm_process_work(cm_id_priv, work);
else
cm_deref_id(cm_id_priv);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_format_sidr_req(struct cm_sidr_req_msg *sidr_req_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_req_param *param)
{
cm_format_mad_hdr(&sidr_req_msg->hdr, CM_SIDR_REQ_ATTR_ID,
cm_form_tid(cm_id_priv, CM_MSG_SEQUENCE_SIDR));
sidr_req_msg->request_id = cm_id_priv->id.local_id;
sidr_req_msg->pkey = param->path->pkey;
sidr_req_msg->service_id = param->service_id;
if (param->private_data && param->private_data_len)
memcpy(sidr_req_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_sidr_req(struct ib_cm_id *cm_id,
struct ib_cm_sidr_req_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if (!param->path || (param->private_data &&
param->private_data_len > IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
ret = cm_init_av_by_path(param->path, &cm_id_priv->av);
if (ret)
goto out;
cm_id->service_id = param->service_id;
cm_id->service_mask = ~cpu_to_be64(0);
cm_id_priv->timeout_ms = param->timeout_ms;
cm_id_priv->max_cm_retries = param->max_cm_retries;
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto out;
cm_format_sidr_req((struct cm_sidr_req_msg *) msg->mad, cm_id_priv,
param);
msg->timeout_ms = cm_id_priv->timeout_ms;
msg->context[1] = (void *) (unsigned long) IB_CM_SIDR_REQ_SENT;
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state == IB_CM_IDLE)
ret = ib_post_send_mad(msg, NULL);
else
ret = -EINVAL;
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
goto out;
}
cm_id->state = IB_CM_SIDR_REQ_SENT;
cm_id_priv->msg = msg;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
out:
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_req);
static void cm_format_sidr_req_event(struct cm_work *work,
struct ib_cm_id *listen_id)
{
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_cm_sidr_req_event_param *param;
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_req_rcvd;
param->pkey = __be16_to_cpu(sidr_req_msg->pkey);
param->listen_id = listen_id;
param->port = work->port->port_num;
work->cm_event.private_data = &sidr_req_msg->private_data;
}
static int cm_sidr_req_handler(struct cm_work *work)
{
struct ib_cm_id *cm_id;
struct cm_id_private *cm_id_priv, *cur_cm_id_priv;
struct cm_sidr_req_msg *sidr_req_msg;
struct ib_wc *wc;
cm_id = ib_create_cm_id(work->port->cm_dev->ib_device, NULL, NULL);
if (IS_ERR(cm_id))
return PTR_ERR(cm_id);
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
/* Record SGID/SLID and request ID for lookup. */
sidr_req_msg = (struct cm_sidr_req_msg *)
work->mad_recv_wc->recv_buf.mad;
wc = work->mad_recv_wc->wc;
cm_id_priv->av.dgid.global.subnet_prefix = cpu_to_be64(wc->slid);
cm_id_priv->av.dgid.global.interface_id = 0;
cm_init_av_for_response(work->port, work->mad_recv_wc->wc,
work->mad_recv_wc->recv_buf.grh,
&cm_id_priv->av);
cm_id_priv->id.remote_id = sidr_req_msg->request_id;
cm_id_priv->tid = sidr_req_msg->hdr.tid;
atomic_inc(&cm_id_priv->work_count);
spin_lock_irq(&cm.lock);
cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
if (cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
counter[CM_SIDR_REQ_COUNTER]);
goto out; /* Duplicate message. */
}
cm_id_priv->id.state = IB_CM_SIDR_REQ_RCVD;
cur_cm_id_priv = cm_find_listen(cm_id->device,
sidr_req_msg->service_id,
sidr_req_msg->private_data);
if (!cur_cm_id_priv) {
spin_unlock_irq(&cm.lock);
cm_reject_sidr_req(cm_id_priv, IB_SIDR_UNSUPPORTED);
goto out; /* No match. */
}
atomic_inc(&cur_cm_id_priv->refcount);
atomic_inc(&cm_id_priv->refcount);
spin_unlock_irq(&cm.lock);
cm_id_priv->id.cm_handler = cur_cm_id_priv->id.cm_handler;
cm_id_priv->id.context = cur_cm_id_priv->id.context;
cm_id_priv->id.service_id = sidr_req_msg->service_id;
cm_id_priv->id.service_mask = ~cpu_to_be64(0);
cm_format_sidr_req_event(work, &cur_cm_id_priv->id);
cm_process_work(cm_id_priv, work);
cm_deref_id(cur_cm_id_priv);
return 0;
out:
ib_destroy_cm_id(&cm_id_priv->id);
return -EINVAL;
}
static void cm_format_sidr_rep(struct cm_sidr_rep_msg *sidr_rep_msg,
struct cm_id_private *cm_id_priv,
struct ib_cm_sidr_rep_param *param)
{
cm_format_mad_hdr(&sidr_rep_msg->hdr, CM_SIDR_REP_ATTR_ID,
cm_id_priv->tid);
sidr_rep_msg->request_id = cm_id_priv->id.remote_id;
sidr_rep_msg->status = param->status;
cm_sidr_rep_set_qpn(sidr_rep_msg, cpu_to_be32(param->qp_num));
sidr_rep_msg->service_id = cm_id_priv->id.service_id;
sidr_rep_msg->qkey = cpu_to_be32(param->qkey);
if (param->info && param->info_length)
memcpy(sidr_rep_msg->info, param->info, param->info_length);
if (param->private_data && param->private_data_len)
memcpy(sidr_rep_msg->private_data, param->private_data,
param->private_data_len);
}
int ib_send_cm_sidr_rep(struct ib_cm_id *cm_id,
struct ib_cm_sidr_rep_param *param)
{
struct cm_id_private *cm_id_priv;
struct ib_mad_send_buf *msg;
unsigned long flags;
int ret;
if ((param->info && param->info_length > IB_CM_SIDR_REP_INFO_LENGTH) ||
(param->private_data &&
param->private_data_len > IB_CM_SIDR_REP_PRIVATE_DATA_SIZE))
return -EINVAL;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state != IB_CM_SIDR_REQ_RCVD) {
ret = -EINVAL;
goto error;
}
ret = cm_alloc_msg(cm_id_priv, &msg);
if (ret)
goto error;
cm_format_sidr_rep((struct cm_sidr_rep_msg *) msg->mad, cm_id_priv,
param);
ret = ib_post_send_mad(msg, NULL);
if (ret) {
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
cm_free_msg(msg);
return ret;
}
cm_id->state = IB_CM_IDLE;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
spin_lock_irqsave(&cm.lock, flags);
rb_erase(&cm_id_priv->sidr_id_node, &cm.remote_sidr_table);
spin_unlock_irqrestore(&cm.lock, flags);
return 0;
error: spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_send_cm_sidr_rep);
static void cm_format_sidr_rep_event(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct ib_cm_sidr_rep_event_param *param;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
param = &work->cm_event.param.sidr_rep_rcvd;
param->status = sidr_rep_msg->status;
param->qkey = be32_to_cpu(sidr_rep_msg->qkey);
param->qpn = be32_to_cpu(cm_sidr_rep_get_qpn(sidr_rep_msg));
param->info = &sidr_rep_msg->info;
param->info_len = sidr_rep_msg->info_length;
work->cm_event.private_data = &sidr_rep_msg->private_data;
}
static int cm_sidr_rep_handler(struct cm_work *work)
{
struct cm_sidr_rep_msg *sidr_rep_msg;
struct cm_id_private *cm_id_priv;
sidr_rep_msg = (struct cm_sidr_rep_msg *)
work->mad_recv_wc->recv_buf.mad;
cm_id_priv = cm_acquire_id(sidr_rep_msg->request_id, 0);
if (!cm_id_priv)
return -EINVAL; /* Unmatched reply. */
spin_lock_irq(&cm_id_priv->lock);
if (cm_id_priv->id.state != IB_CM_SIDR_REQ_SENT) {
spin_unlock_irq(&cm_id_priv->lock);
goto out;
}
cm_id_priv->id.state = IB_CM_IDLE;
ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
spin_unlock_irq(&cm_id_priv->lock);
cm_format_sidr_rep_event(work);
cm_process_work(cm_id_priv, work);
return 0;
out:
cm_deref_id(cm_id_priv);
return -EINVAL;
}
static void cm_process_send_error(struct ib_mad_send_buf *msg,
enum ib_wc_status wc_status)
{
struct cm_id_private *cm_id_priv;
struct ib_cm_event cm_event;
enum ib_cm_state state;
int ret;
memset(&cm_event, 0, sizeof cm_event);
cm_id_priv = msg->context[0];
/* Discard old sends or ones without a response. */
spin_lock_irq(&cm_id_priv->lock);
state = (enum ib_cm_state) (unsigned long) msg->context[1];
if (msg != cm_id_priv->msg || state != cm_id_priv->id.state)
goto discard;
switch (state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
cm_reset_to_idle(cm_id_priv);
cm_event.event = IB_CM_REQ_ERROR;
break;
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_reset_to_idle(cm_id_priv);
cm_event.event = IB_CM_REP_ERROR;
break;
case IB_CM_DREQ_SENT:
cm_enter_timewait(cm_id_priv);
cm_event.event = IB_CM_DREQ_ERROR;
break;
case IB_CM_SIDR_REQ_SENT:
cm_id_priv->id.state = IB_CM_IDLE;
cm_event.event = IB_CM_SIDR_REQ_ERROR;
break;
default:
goto discard;
}
spin_unlock_irq(&cm_id_priv->lock);
cm_event.param.send_status = wc_status;
/* No other events can occur on the cm_id at this point. */
ret = cm_id_priv->id.cm_handler(&cm_id_priv->id, &cm_event);
cm_free_msg(msg);
if (ret)
ib_destroy_cm_id(&cm_id_priv->id);
return;
discard:
spin_unlock_irq(&cm_id_priv->lock);
cm_free_msg(msg);
}
static void cm_send_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_send_wc *mad_send_wc)
{
struct ib_mad_send_buf *msg = mad_send_wc->send_buf;
struct cm_port *port;
u16 attr_index;
port = mad_agent->context;
attr_index = be16_to_cpu(((struct ib_mad_hdr *)
msg->mad)->attr_id) - CM_ATTR_ID_OFFSET;
/*
* If the send was in response to a received message (context[0] is not
* set to a cm_id), and is not a REJ, then it is a send that was
* manually retried.
*/
if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
msg->retries = 1;
atomic_long_add(1 + msg->retries,
&port->counter_group[CM_XMIT].counter[attr_index]);
if (msg->retries)
atomic_long_add(msg->retries,
&port->counter_group[CM_XMIT_RETRIES].
counter[attr_index]);
switch (mad_send_wc->status) {
case IB_WC_SUCCESS:
case IB_WC_WR_FLUSH_ERR:
cm_free_msg(msg);
break;
default:
if (msg->context[0] && msg->context[1])
cm_process_send_error(msg, mad_send_wc->status);
else
cm_free_msg(msg);
break;
}
}
static void cm_work_handler(struct work_struct *_work)
{
struct cm_work *work = container_of(_work, struct cm_work, work.work);
int ret;
switch (work->cm_event.event) {
case IB_CM_REQ_RECEIVED:
ret = cm_req_handler(work);
break;
case IB_CM_MRA_RECEIVED:
ret = cm_mra_handler(work);
break;
case IB_CM_REJ_RECEIVED:
ret = cm_rej_handler(work);
break;
case IB_CM_REP_RECEIVED:
ret = cm_rep_handler(work);
break;
case IB_CM_RTU_RECEIVED:
ret = cm_rtu_handler(work);
break;
case IB_CM_USER_ESTABLISHED:
ret = cm_establish_handler(work);
break;
case IB_CM_DREQ_RECEIVED:
ret = cm_dreq_handler(work);
break;
case IB_CM_DREP_RECEIVED:
ret = cm_drep_handler(work);
break;
case IB_CM_SIDR_REQ_RECEIVED:
ret = cm_sidr_req_handler(work);
break;
case IB_CM_SIDR_REP_RECEIVED:
ret = cm_sidr_rep_handler(work);
break;
case IB_CM_LAP_RECEIVED:
ret = cm_lap_handler(work);
break;
case IB_CM_APR_RECEIVED:
ret = cm_apr_handler(work);
break;
case IB_CM_TIMEWAIT_EXIT:
ret = cm_timewait_handler(work);
break;
default:
ret = -EINVAL;
break;
}
if (ret)
cm_free_work(work);
}
static int cm_establish(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
struct cm_work *work;
unsigned long flags;
int ret = 0;
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (!work)
return -ENOMEM;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id->state)
{
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
cm_id->state = IB_CM_ESTABLISHED;
break;
case IB_CM_ESTABLISHED:
ret = -EISCONN;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
if (ret) {
kfree(work);
goto out;
}
/*
* The CM worker thread may try to destroy the cm_id before it
* can execute this work item. To prevent potential deadlock,
* we need to find the cm_id once we're in the context of the
* worker thread, rather than holding a reference on it.
*/
INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->local_id = cm_id->local_id;
work->remote_id = cm_id->remote_id;
work->mad_recv_wc = NULL;
work->cm_event.event = IB_CM_USER_ESTABLISHED;
queue_delayed_work(cm.wq, &work->work, 0);
out:
return ret;
}
static int cm_migrate(struct ib_cm_id *cm_id)
{
struct cm_id_private *cm_id_priv;
unsigned long flags;
int ret = 0;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
spin_lock_irqsave(&cm_id_priv->lock, flags);
if (cm_id->state == IB_CM_ESTABLISHED &&
(cm_id->lap_state == IB_CM_LAP_UNINIT ||
cm_id->lap_state == IB_CM_LAP_IDLE)) {
cm_id->lap_state = IB_CM_LAP_IDLE;
cm_id_priv->av = cm_id_priv->alt_av;
} else
ret = -EINVAL;
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
int ib_cm_notify(struct ib_cm_id *cm_id, enum ib_event_type event)
{
int ret;
switch (event) {
case IB_EVENT_COMM_EST:
ret = cm_establish(cm_id);
break;
case IB_EVENT_PATH_MIG:
ret = cm_migrate(cm_id);
break;
default:
ret = -EINVAL;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_notify);
static void cm_recv_handler(struct ib_mad_agent *mad_agent,
struct ib_mad_recv_wc *mad_recv_wc)
{
struct cm_port *port = mad_agent->context;
struct cm_work *work;
enum ib_cm_event_type event;
u16 attr_id;
int paths = 0;
switch (mad_recv_wc->recv_buf.mad->mad_hdr.attr_id) {
case CM_REQ_ATTR_ID:
paths = 1 + (((struct cm_req_msg *) mad_recv_wc->recv_buf.mad)->
alt_local_lid != 0);
event = IB_CM_REQ_RECEIVED;
break;
case CM_MRA_ATTR_ID:
event = IB_CM_MRA_RECEIVED;
break;
case CM_REJ_ATTR_ID:
event = IB_CM_REJ_RECEIVED;
break;
case CM_REP_ATTR_ID:
event = IB_CM_REP_RECEIVED;
break;
case CM_RTU_ATTR_ID:
event = IB_CM_RTU_RECEIVED;
break;
case CM_DREQ_ATTR_ID:
event = IB_CM_DREQ_RECEIVED;
break;
case CM_DREP_ATTR_ID:
event = IB_CM_DREP_RECEIVED;
break;
case CM_SIDR_REQ_ATTR_ID:
event = IB_CM_SIDR_REQ_RECEIVED;
break;
case CM_SIDR_REP_ATTR_ID:
event = IB_CM_SIDR_REP_RECEIVED;
break;
case CM_LAP_ATTR_ID:
paths = 1;
event = IB_CM_LAP_RECEIVED;
break;
case CM_APR_ATTR_ID:
event = IB_CM_APR_RECEIVED;
break;
default:
ib_free_recv_mad(mad_recv_wc);
return;
}
attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
atomic_long_inc(&port->counter_group[CM_RECV].
counter[attr_id - CM_ATTR_ID_OFFSET]);
work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
GFP_KERNEL);
if (!work) {
ib_free_recv_mad(mad_recv_wc);
return;
}
INIT_DELAYED_WORK(&work->work, cm_work_handler);
work->cm_event.event = event;
work->mad_recv_wc = mad_recv_wc;
work->port = port;
queue_delayed_work(cm.wq, &work->work, 0);
}
static int cm_init_qp_init_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_SENT:
case IB_CM_MRA_REQ_RCVD:
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS |
IB_QP_PKEY_INDEX | IB_QP_PORT;
qp_attr->qp_access_flags = IB_ACCESS_REMOTE_WRITE;
if (cm_id_priv->responder_resources)
qp_attr->qp_access_flags |= IB_ACCESS_REMOTE_READ |
IB_ACCESS_REMOTE_ATOMIC;
qp_attr->pkey_index = cm_id_priv->av.pkey_index;
qp_attr->port_num = cm_id_priv->av.port->port_num;
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
static int cm_init_qp_rtr_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
*qp_attr_mask = IB_QP_STATE | IB_QP_AV | IB_QP_PATH_MTU |
IB_QP_DEST_QPN | IB_QP_RQ_PSN;
qp_attr->ah_attr = cm_id_priv->av.ah_attr;
qp_attr->path_mtu = cm_id_priv->path_mtu;
qp_attr->dest_qp_num = be32_to_cpu(cm_id_priv->remote_qpn);
qp_attr->rq_psn = be32_to_cpu(cm_id_priv->rq_psn);
if (cm_id_priv->qp_type == IB_QPT_RC ||
cm_id_priv->qp_type == IB_QPT_XRC_TGT) {
*qp_attr_mask |= IB_QP_MAX_DEST_RD_ATOMIC |
IB_QP_MIN_RNR_TIMER;
qp_attr->max_dest_rd_atomic =
cm_id_priv->responder_resources;
qp_attr->min_rnr_timer = 0;
}
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_ALT_PATH;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
}
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
static int cm_init_qp_rts_attr(struct cm_id_private *cm_id_priv,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&cm_id_priv->lock, flags);
switch (cm_id_priv->id.state) {
/* Allow transition to RTS before sending REP */
case IB_CM_REQ_RCVD:
case IB_CM_MRA_REQ_SENT:
case IB_CM_REP_RCVD:
case IB_CM_MRA_REP_SENT:
case IB_CM_REP_SENT:
case IB_CM_MRA_REP_RCVD:
case IB_CM_ESTABLISHED:
if (cm_id_priv->id.lap_state == IB_CM_LAP_UNINIT) {
*qp_attr_mask = IB_QP_STATE | IB_QP_SQ_PSN;
qp_attr->sq_psn = be32_to_cpu(cm_id_priv->sq_psn);
switch (cm_id_priv->qp_type) {
case IB_QPT_RC:
case IB_QPT_XRC_INI:
*qp_attr_mask |= IB_QP_RETRY_CNT | IB_QP_RNR_RETRY |
IB_QP_MAX_QP_RD_ATOMIC;
qp_attr->retry_cnt = cm_id_priv->retry_count;
qp_attr->rnr_retry = cm_id_priv->rnr_retry_count;
qp_attr->max_rd_atomic = cm_id_priv->initiator_depth;
/* fall through */
case IB_QPT_XRC_TGT:
*qp_attr_mask |= IB_QP_TIMEOUT;
qp_attr->timeout = cm_id_priv->av.timeout;
break;
default:
break;
}
if (cm_id_priv->alt_av.ah_attr.dlid) {
*qp_attr_mask |= IB_QP_PATH_MIG_STATE;
qp_attr->path_mig_state = IB_MIG_REARM;
}
} else {
*qp_attr_mask = IB_QP_ALT_PATH | IB_QP_PATH_MIG_STATE;
qp_attr->alt_port_num = cm_id_priv->alt_av.port->port_num;
qp_attr->alt_pkey_index = cm_id_priv->alt_av.pkey_index;
qp_attr->alt_timeout = cm_id_priv->alt_av.timeout;
qp_attr->alt_ah_attr = cm_id_priv->alt_av.ah_attr;
qp_attr->path_mig_state = IB_MIG_REARM;
}
ret = 0;
break;
default:
ret = -EINVAL;
break;
}
spin_unlock_irqrestore(&cm_id_priv->lock, flags);
return ret;
}
int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask)
{
struct cm_id_private *cm_id_priv;
int ret;
cm_id_priv = container_of(cm_id, struct cm_id_private, id);
switch (qp_attr->qp_state) {
case IB_QPS_INIT:
ret = cm_init_qp_init_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTR:
ret = cm_init_qp_rtr_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
case IB_QPS_RTS:
ret = cm_init_qp_rts_attr(cm_id_priv, qp_attr, qp_attr_mask);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(ib_cm_init_qp_attr);
static void cm_get_ack_delay(struct cm_device *cm_dev)
{
struct ib_device_attr attr;
if (ib_query_device(cm_dev->ib_device, &attr))
cm_dev->ack_delay = 0; /* acks will rely on packet life time */
else
cm_dev->ack_delay = attr.local_ca_ack_delay;
}
static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
char *buf)
{
struct cm_counter_group *group;
struct cm_counter_attribute *cm_attr;
group = container_of(obj, struct cm_counter_group, obj);
cm_attr = container_of(attr, struct cm_counter_attribute, attr);
return sprintf(buf, "%ld\n",
atomic_long_read(&group->counter[cm_attr->index]));
}
static const struct sysfs_ops cm_counter_ops = {
.show = cm_show_counter
};
static struct kobj_type cm_counter_obj_type = {
.sysfs_ops = &cm_counter_ops,
.default_attrs = cm_counter_default_attrs
};
static void cm_release_port_obj(struct kobject *obj)
{
struct cm_port *cm_port;
cm_port = container_of(obj, struct cm_port, port_obj);
kfree(cm_port);
}
static struct kobj_type cm_port_obj_type = {
.release = cm_release_port_obj
};
static char *cm_devnode(struct device *dev, umode_t *mode)
{
if (mode)
*mode = 0666;
return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev));
}
struct class cm_class = {
.owner = THIS_MODULE,
.name = "infiniband_cm",
.devnode = cm_devnode,
};
EXPORT_SYMBOL(cm_class);
static int cm_create_port_fs(struct cm_port *port)
{
int i, ret;
ret = kobject_init_and_add(&port->port_obj, &cm_port_obj_type,
&port->cm_dev->device->kobj,
"%d", port->port_num);
if (ret) {
kfree(port);
return ret;
}
for (i = 0; i < CM_COUNTER_GROUPS; i++) {
ret = kobject_init_and_add(&port->counter_group[i].obj,
&cm_counter_obj_type,
&port->port_obj,
"%s", counter_group_names[i]);
if (ret)
goto error;
}
return 0;
error:
while (i--)
kobject_put(&port->counter_group[i].obj);
kobject_put(&port->port_obj);
return ret;
}
static void cm_remove_port_fs(struct cm_port *port)
{
int i;
for (i = 0; i < CM_COUNTER_GROUPS; i++)
kobject_put(&port->counter_group[i].obj);
kobject_put(&port->port_obj);
}
static void cm_add_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct ib_mad_reg_req reg_req = {
.mgmt_class = IB_MGMT_CLASS_CM,
.mgmt_class_version = IB_CM_CLASS_VERSION
};
struct ib_port_modify port_modify = {
.set_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
int ret;
u8 i;
if (rdma_node_get_transport(ib_device->node_type) != RDMA_TRANSPORT_IB)
return;
cm_dev = kzalloc(sizeof(*cm_dev) + sizeof(*port) *
ib_device->phys_port_cnt, GFP_KERNEL);
if (!cm_dev)
return;
cm_dev->ib_device = ib_device;
cm_get_ack_delay(cm_dev);
cm_dev->device = device_create(&cm_class, &ib_device->dev,
MKDEV(0, 0), NULL,
"%s", ib_device->name);
if (IS_ERR(cm_dev->device)) {
kfree(cm_dev);
return;
}
set_bit(IB_MGMT_METHOD_SEND, reg_req.method_mask);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = kzalloc(sizeof *port, GFP_KERNEL);
if (!port)
goto error1;
cm_dev->port[i-1] = port;
port->cm_dev = cm_dev;
port->port_num = i;
ret = cm_create_port_fs(port);
if (ret)
goto error1;
port->mad_agent = ib_register_mad_agent(ib_device, i,
IB_QPT_GSI,
®_req,
0,
cm_send_handler,
cm_recv_handler,
port);
if (IS_ERR(port->mad_agent))
goto error2;
ret = ib_modify_port(ib_device, i, 0, &port_modify);
if (ret)
goto error3;
}
ib_set_client_data(ib_device, &cm_client, cm_dev);
write_lock_irqsave(&cm.device_lock, flags);
list_add_tail(&cm_dev->list, &cm.device_list);
write_unlock_irqrestore(&cm.device_lock, flags);
return;
error3:
ib_unregister_mad_agent(port->mad_agent);
error2:
cm_remove_port_fs(port);
error1:
port_modify.set_port_cap_mask = 0;
port_modify.clr_port_cap_mask = IB_PORT_CM_SUP;
while (--i) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
static void cm_remove_one(struct ib_device *ib_device)
{
struct cm_device *cm_dev;
struct cm_port *port;
struct ib_port_modify port_modify = {
.clr_port_cap_mask = IB_PORT_CM_SUP
};
unsigned long flags;
int i;
cm_dev = ib_get_client_data(ib_device, &cm_client);
if (!cm_dev)
return;
write_lock_irqsave(&cm.device_lock, flags);
list_del(&cm_dev->list);
write_unlock_irqrestore(&cm.device_lock, flags);
for (i = 1; i <= ib_device->phys_port_cnt; i++) {
port = cm_dev->port[i-1];
ib_modify_port(ib_device, port->port_num, 0, &port_modify);
ib_unregister_mad_agent(port->mad_agent);
flush_workqueue(cm.wq);
cm_remove_port_fs(port);
}
device_unregister(cm_dev->device);
kfree(cm_dev);
}
static int __init ib_cm_init(void)
{
int ret;
memset(&cm, 0, sizeof cm);
INIT_LIST_HEAD(&cm.device_list);
rwlock_init(&cm.device_lock);
spin_lock_init(&cm.lock);
cm.listen_service_table = RB_ROOT;
cm.listen_service_id = be64_to_cpu(IB_CM_ASSIGN_SERVICE_ID);
cm.remote_id_table = RB_ROOT;
cm.remote_qp_table = RB_ROOT;
cm.remote_sidr_table = RB_ROOT;
idr_init(&cm.local_id_table);
get_random_bytes(&cm.random_id_operand, sizeof cm.random_id_operand);
idr_pre_get(&cm.local_id_table, GFP_KERNEL);
INIT_LIST_HEAD(&cm.timewait_list);
ret = class_register(&cm_class);
if (ret)
return -ENOMEM;
cm.wq = create_workqueue("ib_cm");
if (!cm.wq) {
ret = -ENOMEM;
goto error1;
}
ret = ib_register_client(&cm_client);
if (ret)
goto error2;
return 0;
error2:
destroy_workqueue(cm.wq);
error1:
class_unregister(&cm_class);
return ret;
}
static void __exit ib_cm_cleanup(void)
{
struct cm_timewait_info *timewait_info, *tmp;
spin_lock_irq(&cm.lock);
list_for_each_entry(timewait_info, &cm.timewait_list, list)
cancel_delayed_work(&timewait_info->work.work);
spin_unlock_irq(&cm.lock);
ib_unregister_client(&cm_client);
destroy_workqueue(cm.wq);
list_for_each_entry_safe(timewait_info, tmp, &cm.timewait_list, list) {
list_del(&timewait_info->list);
kfree(timewait_info);
}
class_unregister(&cm_class);
idr_destroy(&cm.local_id_table);
}
module_init(ib_cm_init);
module_exit(ib_cm_cleanup);
| gpl-2.0 |
davtse/i9505 | drivers/media/video/gspca/spca508.c | 4956 | 41826 | /*
* SPCA508 chip based cameras subdriver
*
* Copyright (C) 2009 Jean-Francois Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "spca508"
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA508 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 brightness;
u8 subtype;
#define CreativeVista 0
#define HamaUSBSightcam 1
#define HamaUSBSightcam2 2
#define IntelEasyPCCamera 3
#define MicroInnovationIC200 4
#define ViewQuestVQ110 5
};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define BRIGHTNESS_DEF 128
.default_value = BRIGHTNESS_DEF,
},
.set = sd_setbrightness,
.get = sd_getbrightness,
},
};
static const struct v4l2_pix_format sif_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3},
{176, 144, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{320, 240, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{352, 288, V4L2_PIX_FMT_SPCA508, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
/* Frame packet header offsets for the spca508 */
#define SPCA508_OFFSET_DATA 37
/*
* Initialization data: this is the first set-up data written to the
* device (before the open data).
*/
static const u16 spca508_init_data[][2] = {
{0x0000, 0x870b},
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
{0x0003, 0x8111}, /* Reset compression & memory */
{0x0000, 0x8110}, /* Disable all outputs */
/* READ {0x0000, 0x8114} -> 0000: 00 */
{0x0000, 0x8114}, /* SW GPIO data */
{0x0008, 0x8110}, /* Enable charge pump output */
{0x0002, 0x8116}, /* 200 kHz pump clock */
/* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE:) */
{0x0003, 0x8111}, /* Reset compression & memory */
{0x0000, 0x8111}, /* Normal mode (not reset) */
{0x0098, 0x8110},
/* Enable charge pump output, sync.serial,external 2x clock */
{0x000d, 0x8114}, /* SW GPIO data */
{0x0002, 0x8116}, /* 200 kHz pump clock */
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
/* --------------------------------------- */
{0x000f, 0x8402}, /* memory bank */
{0x0000, 0x8403}, /* ... address */
/* --------------------------------------- */
/* 0x88__ is Synchronous Serial Interface. */
/* TBD: This table could be expressed more compactly */
/* using spca508_write_i2c_vector(). */
/* TBD: Should see if the values in spca50x_i2c_data */
/* would work with the VQ110 instead of the values */
/* below. */
{0x00c0, 0x8804}, /* SSI slave addr */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0012, 0x8801}, /* SSI reg addr */
{0x0080, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0012, 0x8801}, /* SSI reg addr */
{0x0000, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802}, /* 375 Khz SSI clock */
{0x0011, 0x8801}, /* SSI reg addr */
{0x0040, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0013, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0014, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0015, 0x8801},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0016, 0x8801},
{0x0003, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0017, 0x8801},
{0x0036, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0018, 0x8801},
{0x00ec, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x001a, 0x8801},
{0x0094, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x001b, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0027, 0x8801},
{0x00a2, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0028, 0x8801},
{0x0040, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002a, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002b, 0x8801},
{0x00a8, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002c, 0x8801},
{0x00fe, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x002d, 0x8801},
{0x0003, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0038, 0x8801},
{0x0083, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0033, 0x8801},
{0x0081, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0034, 0x8801},
{0x004a, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0039, 0x8801},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0010, 0x8801},
{0x00a8, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0006, 0x8801},
{0x0058, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0000, 0x8801},
{0x0004, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0040, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0041, 0x8801},
{0x000c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0042, 0x8801},
{0x000c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0043, 0x8801},
{0x0028, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0044, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0045, 0x8801},
{0x0020, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0046, 0x8801},
{0x0020, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0047, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0048, 0x8801},
{0x004c, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x0049, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x004a, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x0008, 0x8802},
{0x004b, 0x8801},
{0x0084, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* --------------------------------------- */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
{0x0000, 0x8701}, /* CKx1 clock delay adj */
{0x0000, 0x8701}, /* CKx1 clock delay adj */
{0x0001, 0x870c}, /* CKOx2 output */
/* --------------------------------------- */
{0x0080, 0x8600}, /* Line memory read counter (L) */
{0x0001, 0x8606}, /* reserved */
{0x0064, 0x8607}, /* Line memory read counter (H) 0x6480=25,728 */
{0x002a, 0x8601}, /* CDSP sharp interpolation mode,
* line sel for color sep, edge enhance enab */
{0x0000, 0x8602}, /* optical black level for user settng = 0 */
{0x0080, 0x8600}, /* Line memory read counter (L) */
{0x000a, 0x8603}, /* optical black level calc mode:
* auto; optical black offset = 10 */
{0x00df, 0x865b}, /* Horiz offset for valid pixels (L)=0xdf */
{0x0012, 0x865c}, /* Vert offset for valid lines (L)=0x12 */
/* The following two lines seem to be the "wrong" resolution. */
/* But perhaps these indicate the actual size of the sensor */
/* rather than the size of the current video mode. */
{0x0058, 0x865d}, /* Horiz valid pixels (*4) (L) = 352 */
{0x0048, 0x865e}, /* Vert valid lines (*4) (L) = 288 */
{0x0015, 0x8608}, /* A11 Coef ... */
{0x0030, 0x8609},
{0x00fb, 0x860a},
{0x003e, 0x860b},
{0x00ce, 0x860c},
{0x00f4, 0x860d},
{0x00eb, 0x860e},
{0x00dc, 0x860f},
{0x0039, 0x8610},
{0x0001, 0x8611}, /* R offset for white balance ... */
{0x0000, 0x8612},
{0x0001, 0x8613},
{0x0000, 0x8614},
{0x005b, 0x8651}, /* R gain for white balance ... */
{0x0040, 0x8652},
{0x0060, 0x8653},
{0x0040, 0x8654},
{0x0000, 0x8655},
{0x0001, 0x863f}, /* Fixed gamma correction enable, USB control,
* lum filter disable, lum noise clip disable */
{0x00a1, 0x8656}, /* Window1 size 256x256, Windows2 size 64x64,
* gamma look-up disable,
* new edge enhancement enable */
{0x0018, 0x8657}, /* Edge gain high thresh */
{0x0020, 0x8658}, /* Edge gain low thresh */
{0x000a, 0x8659}, /* Edge bandwidth high threshold */
{0x0005, 0x865a}, /* Edge bandwidth low threshold */
/* -------------------------------- */
{0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0xa908, 0x8802},
{0x0034, 0x8801}, /* SSI reg addr */
{0x00ca, 0x8800},
/* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0x1f08, 0x8802},
{0x0006, 0x8801},
{0x0080, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* ----- Read back coefs we wrote earlier. */
/* READ { 0x0000, 0x8608 } -> 0000: 15 */
/* READ { 0x0000, 0x8609 } -> 0000: 30 */
/* READ { 0x0000, 0x860a } -> 0000: fb */
/* READ { 0x0000, 0x860b } -> 0000: 3e */
/* READ { 0x0000, 0x860c } -> 0000: ce */
/* READ { 0x0000, 0x860d } -> 0000: f4 */
/* READ { 0x0000, 0x860e } -> 0000: eb */
/* READ { 0x0000, 0x860f } -> 0000: dc */
/* READ { 0x0000, 0x8610 } -> 0000: 39 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 08 */
{0xb008, 0x8802},
{0x0006, 0x8801},
{0x007d, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* This chunk is seemingly redundant with */
/* earlier commands (A11 Coef...), but if I disable it, */
/* the image appears too dark. Maybe there was some kind of */
/* reset since the earlier commands, so this is necessary again. */
{0x0015, 0x8608},
{0x0030, 0x8609},
{0xfffb, 0x860a},
{0x003e, 0x860b},
{0xffce, 0x860c},
{0xfff4, 0x860d},
{0xffeb, 0x860e},
{0xffdc, 0x860f},
{0x0039, 0x8610},
{0x0018, 0x8657},
{0x0000, 0x8508}, /* Disable compression. */
/* Previous line was:
{0x0021, 0x8508}, * Enable compression. */
{0x0032, 0x850b}, /* compression stuff */
{0x0003, 0x8509}, /* compression stuff */
{0x0011, 0x850a}, /* compression stuff */
{0x0021, 0x850d}, /* compression stuff */
{0x0010, 0x850c}, /* compression stuff */
{0x0003, 0x8500}, /* *** Video mode: 160x120 */
{0x0001, 0x8501}, /* Hardware-dominated snap control */
{0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
* gamma look-up disable,
* new edge enhancement enable */
{0x0018, 0x8617}, /* Window1 start X (*2) */
{0x0008, 0x8618}, /* Window1 start Y (*2) */
{0x0061, 0x8656}, /* Window1 size 128x128, Windows2 size 128x128,
* gamma look-up disable,
* new edge enhancement enable */
{0x0058, 0x8619}, /* Window2 start X (*2) */
{0x0008, 0x861a}, /* Window2 start Y (*2) */
{0x00ff, 0x8615}, /* High lum thresh for white balance */
{0x0000, 0x8616}, /* Low lum thresh for white balance */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
{0x0012, 0x8700}, /* Clock speed 48Mhz/(2+2)/2= 6 Mhz */
/* READ { 0x0000, 0x8656 } -> 0000: 61 */
{0x0028, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802}, /* 375 Khz SSI clock, SSI r/w sync with VSYNC */
{0x0010, 0x8801}, /* SSI reg addr */
{0x003e, 0x8800}, /* SSI data to write */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0028, 0x8802},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802},
{0x0000, 0x8801},
{0x001f, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0001, 0x8602}, /* optical black level for user settning = 1 */
/* Original: */
{0x0023, 0x8700}, /* Clock speed 48Mhz/(3+2)/4= 2.4 Mhz */
{0x000f, 0x8602}, /* optical black level for user settning = 15 */
{0x0028, 0x8802},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 28 */
{0x1f28, 0x8802},
{0x0010, 0x8801},
{0x007b, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x002f, 0x8651}, /* R gain for white balance ... */
{0x0080, 0x8653},
/* READ { 0x0000, 0x8655 } -> 0000: 00 */
{0x0000, 0x8655},
{0x0030, 0x8112}, /* Video drop enable, ISO streaming enable */
{0x0020, 0x8112}, /* Video drop enable, ISO streaming disable */
/* UNKNOWN DIRECTION (URB_FUNCTION_SELECT_INTERFACE: (ALT=0) ) */
{}
};
/*
* Initialization data for Intel EasyPC Camera CS110
*/
static const u16 spca508cs110_init_data[][2] = {
{0x0000, 0x870b}, /* Reset CTL3 */
{0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
{0x0000, 0x8111}, /* Normal operation on reset */
{0x0090, 0x8110},
/* External Clock 2x & Synchronous Serial Interface Output */
{0x0020, 0x8112}, /* Video Drop packet enable */
{0x0000, 0x8114}, /* Software GPIO output data */
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
/* Initial sequence Synchronous Serial Interface */
{0x000f, 0x8402}, /* Memory bank Address */
{0x0000, 0x8403}, /* Memory bank Address */
{0x00ba, 0x8804}, /* SSI Slave address */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock two DataByte */
{0x0001, 0x8801},
{0x000a, 0x8805}, /* a - NWG: Dunno what this is about */
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0002, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0003, 0x8801},
{0x0027, 0x8805},
{0x0001, 0x8800},
{0x0010, 0x8802},
{0x0004, 0x8801},
{0x0065, 0x8805},
{0x0001, 0x8800},
{0x0010, 0x8802},
{0x0005, 0x8801},
{0x0003, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0006, 0x8801},
{0x001c, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0007, 0x8801},
{0x002a, 0x8805},
{0x0000, 0x8800},
{0x0010, 0x8802},
{0x0002, 0x8704}, /* External input CKIx1 */
{0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
{0x009a, 0x8600}, /* Line memory Read Counter (L) */
{0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
{0x0003, 0x865c}, /* 3 Vertical Offset for Valid Lines(L) */
{0x0058, 0x865d}, /* 58 Horizontal Valid Pixel Window(L) */
{0x0006, 0x8660}, /* Nibble data + input order */
{0x000a, 0x8602}, /* Optical black level set to 0x0a */
{0x0000, 0x8603}, /* Optical black level Offset */
/* {0x0000, 0x8611}, * 0 R Offset for white Balance */
/* {0x0000, 0x8612}, * 1 Gr Offset for white Balance */
/* {0x0000, 0x8613}, * 1f B Offset for white Balance */
/* {0x0000, 0x8614}, * f0 Gb Offset for white Balance */
{0x0040, 0x8651}, /* 2b BLUE gain for white balance good at all 60 */
{0x0030, 0x8652}, /* 41 Gr Gain for white Balance (L) */
{0x0035, 0x8653}, /* 26 RED gain for white balance */
{0x0035, 0x8654}, /* 40Gb Gain for white Balance (L) */
{0x0041, 0x863f},
/* Fixed Gamma correction enabled (makes colours look better) */
{0x0000, 0x8655},
/* High bits for white balance*****brightness control*** */
{}
};
static const u16 spca508_sightcam_init_data[][2] = {
/* This line seems to setup the frame/canvas */
{0x000f, 0x8402},
/* These 6 lines are needed to startup the webcam */
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
/* This part seems to make the pictures darker? (autobrightness?) */
{0x0001, 0x8801},
{0x0004, 0x8800},
{0x0003, 0x8801},
{0x00e0, 0x8800},
{0x0004, 0x8801},
{0x00b4, 0x8800},
{0x0005, 0x8801},
{0x0000, 0x8800},
{0x0006, 0x8801},
{0x00e0, 0x8800},
{0x0007, 0x8801},
{0x000c, 0x8800},
/* This section is just needed, it probably
* does something like the previous section,
* but the cam won't start if it's not included.
*/
{0x0014, 0x8801},
{0x0008, 0x8800},
{0x0015, 0x8801},
{0x0067, 0x8800},
{0x0016, 0x8801},
{0x0000, 0x8800},
{0x0017, 0x8801},
{0x0020, 0x8800},
{0x0018, 0x8801},
{0x0044, 0x8800},
/* Makes the picture darker - and the
* cam won't start if not included
*/
{0x001e, 0x8801},
{0x00ea, 0x8800},
{0x001f, 0x8801},
{0x0001, 0x8800},
{0x0003, 0x8801},
{0x00e0, 0x8800},
/* seems to place the colors ontop of each other #1 */
{0x0006, 0x8704},
{0x0001, 0x870c},
{0x0016, 0x8600},
{0x0002, 0x8606},
/* if not included the pictures becomes _very_ dark */
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0000, 0x8602},
/* seems to place the colors ontop of each other #2 */
{0x0016, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
/* webcam won't start if not included */
{0x0007, 0x865b},
{0x0001, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
/* adjusts the colors */
{0x0049, 0x8651},
{0x0040, 0x8652},
{0x004c, 0x8653},
{0x0040, 0x8654},
{}
};
static const u16 spca508_sightcam2_init_data[][2] = {
{0x0020, 0x8112},
{0x000f, 0x8402},
{0x0000, 0x8403},
{0x0008, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0009, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000a, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000b, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000c, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000d, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000e, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0007, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x000f, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0018, 0x8660},
{0x0010, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0011, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0000, 0x86b0},
{0x0034, 0x86b1},
{0x0000, 0x86b2},
{0x0049, 0x86b3},
{0x0000, 0x86b4},
{0x0000, 0x86b4},
{0x0012, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0013, 0x8201},
{0x0008, 0x8200},
{0x0001, 0x8200},
{0x0001, 0x86b0},
{0x00aa, 0x86b1},
{0x0000, 0x86b2},
{0x00e4, 0x86b3},
{0x0000, 0x86b4},
{0x0000, 0x86b4},
{0x0018, 0x8660},
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
{0x0003, 0x8801},
{0x0012, 0x8800},
{0x0004, 0x8801},
{0x0005, 0x8800},
{0x0005, 0x8801},
{0x0000, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x0000, 0x8800},
{0x0008, 0x8801},
{0x0005, 0x8800},
{0x000a, 0x8700},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x0009, 0x8801},
{0x0000, 0x8800},
{0x000a, 0x8801},
{0x0000, 0x8800},
{0x000b, 0x8801},
{0x0000, 0x8800},
{0x000c, 0x8801},
{0x0000, 0x8800},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x000f, 0x8801},
{0x0000, 0x8800},
{0x0010, 0x8801},
{0x0006, 0x8800},
{0x0011, 0x8801},
{0x0006, 0x8800},
{0x0012, 0x8801},
{0x0000, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x000a, 0x8700},
{0x0000, 0x8702},
{0x0000, 0x8703},
{0x00c2, 0x8704},
{0x0001, 0x870c},
{0x0044, 0x8600},
{0x0002, 0x8606},
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0008, 0x8602},
{0x0044, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
{0x0004, 0x865b},
{0x0002, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x002c, 0x860b},
{0x00db, 0x860c},
{0x00f9, 0x860d},
{0x00f1, 0x860e},
{0x00e3, 0x860f},
{0x002c, 0x8610},
{0x006c, 0x8651},
{0x0041, 0x8652},
{0x0059, 0x8653},
{0x0040, 0x8654},
{0x00fa, 0x8611},
{0x00ff, 0x8612},
{0x00f8, 0x8613},
{0x0000, 0x8614},
{0x0001, 0x863f},
{0x0000, 0x8640},
{0x0026, 0x8641},
{0x0045, 0x8642},
{0x0060, 0x8643},
{0x0075, 0x8644},
{0x0088, 0x8645},
{0x009b, 0x8646},
{0x00b0, 0x8647},
{0x00c5, 0x8648},
{0x00d2, 0x8649},
{0x00dc, 0x864a},
{0x00e5, 0x864b},
{0x00eb, 0x864c},
{0x00f0, 0x864d},
{0x00f6, 0x864e},
{0x00fa, 0x864f},
{0x00ff, 0x8650},
{0x0060, 0x8657},
{0x0010, 0x8658},
{0x0018, 0x8659},
{0x0005, 0x865a},
{0x0018, 0x8660},
{0x0003, 0x8509},
{0x0011, 0x850a},
{0x0032, 0x850b},
{0x0010, 0x850c},
{0x0021, 0x850d},
{0x0001, 0x8500},
{0x0000, 0x8508},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x0039, 0x860b},
{0x00d0, 0x860c},
{0x00f7, 0x860d},
{0x00ed, 0x860e},
{0x00db, 0x860f},
{0x0039, 0x8610},
{0x0012, 0x8657},
{0x000c, 0x8619},
{0x0004, 0x861a},
{0x00a1, 0x8656},
{0x00c8, 0x8615},
{0x0032, 0x8616},
{0x0030, 0x8112},
{0x0020, 0x8112},
{0x0020, 0x8112},
{0x000f, 0x8402},
{0x0000, 0x8403},
{0x0090, 0x8110},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x0080, 0x8804},
{0x0003, 0x8801},
{0x0012, 0x8800},
{0x0004, 0x8801},
{0x0005, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x000a, 0x8700},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x0005, 0x8801},
{0x0047, 0x8800},
{0x0006, 0x8801},
{0x0000, 0x8800},
{0x0007, 0x8801},
{0x00c0, 0x8800},
{0x0008, 0x8801},
{0x0003, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x0009, 0x8801},
{0x0000, 0x8800},
{0x000a, 0x8801},
{0x0000, 0x8800},
{0x000b, 0x8801},
{0x0000, 0x8800},
{0x000c, 0x8801},
{0x0000, 0x8800},
{0x000e, 0x8801},
{0x0004, 0x8800},
{0x000f, 0x8801},
{0x0000, 0x8800},
{0x0010, 0x8801},
{0x0006, 0x8800},
{0x0011, 0x8801},
{0x0006, 0x8800},
{0x0012, 0x8801},
{0x0000, 0x8800},
{0x0013, 0x8801},
{0x0001, 0x8800},
{0x000a, 0x8700},
{0x0000, 0x8702},
{0x0000, 0x8703},
{0x00c2, 0x8704},
{0x0001, 0x870c},
{0x0044, 0x8600},
{0x0002, 0x8606},
{0x0064, 0x8607},
{0x003a, 0x8601},
{0x0008, 0x8602},
{0x0044, 0x8600},
{0x0018, 0x8617},
{0x0008, 0x8618},
{0x00a1, 0x8656},
{0x0004, 0x865b},
{0x0002, 0x865c},
{0x0058, 0x865d},
{0x0048, 0x865e},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x002c, 0x860b},
{0x00db, 0x860c},
{0x00f9, 0x860d},
{0x00f1, 0x860e},
{0x00e3, 0x860f},
{0x002c, 0x8610},
{0x006c, 0x8651},
{0x0041, 0x8652},
{0x0059, 0x8653},
{0x0040, 0x8654},
{0x00fa, 0x8611},
{0x00ff, 0x8612},
{0x00f8, 0x8613},
{0x0000, 0x8614},
{0x0001, 0x863f},
{0x0000, 0x8640},
{0x0026, 0x8641},
{0x0045, 0x8642},
{0x0060, 0x8643},
{0x0075, 0x8644},
{0x0088, 0x8645},
{0x009b, 0x8646},
{0x00b0, 0x8647},
{0x00c5, 0x8648},
{0x00d2, 0x8649},
{0x00dc, 0x864a},
{0x00e5, 0x864b},
{0x00eb, 0x864c},
{0x00f0, 0x864d},
{0x00f6, 0x864e},
{0x00fa, 0x864f},
{0x00ff, 0x8650},
{0x0060, 0x8657},
{0x0010, 0x8658},
{0x0018, 0x8659},
{0x0005, 0x865a},
{0x0018, 0x8660},
{0x0003, 0x8509},
{0x0011, 0x850a},
{0x0032, 0x850b},
{0x0010, 0x850c},
{0x0021, 0x850d},
{0x0001, 0x8500},
{0x0000, 0x8508},
{0x0012, 0x8608},
{0x002c, 0x8609},
{0x0002, 0x860a},
{0x0039, 0x860b},
{0x00d0, 0x860c},
{0x00f7, 0x860d},
{0x00ed, 0x860e},
{0x00db, 0x860f},
{0x0039, 0x8610},
{0x0012, 0x8657},
{0x0064, 0x8619},
/* This line starts it all, it is not needed here */
/* since it has been build into the driver */
/* jfm: don't start now */
/* {0x0030, 0x8112}, */
{}
};
/*
* Initialization data for Creative Webcam Vista
*/
static const u16 spca508_vista_init_data[][2] = {
{0x0008, 0x8200}, /* Clear register */
{0x0000, 0x870b}, /* Reset CTL3 */
{0x0020, 0x8112}, /* Video Drop packet enable */
{0x0003, 0x8111}, /* Soft Reset compression, memory, TG & CDSP */
{0x0000, 0x8110}, /* Disable everything */
{0x0000, 0x8114}, /* Software GPIO output data */
{0x0000, 0x8114},
{0x0003, 0x8111},
{0x0000, 0x8111},
{0x0090, 0x8110}, /* Enable: SSI output, External 2X clock output */
{0x0020, 0x8112},
{0x0000, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0001, 0x8114},
{0x0003, 0x8114},
{0x000f, 0x8402}, /* Memory bank Address */
{0x0000, 0x8403}, /* Memory bank Address */
{0x00ba, 0x8804}, /* SSI Slave address */
{0x0010, 0x8802}, /* 93.75kHz SSI Clock Two DataByte */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802}, /* Will write 2 bytes (DATA1+DATA2) */
{0x0020, 0x8801}, /* Register address for SSI read/write */
{0x0044, 0x8805}, /* DATA2 */
{0x0004, 0x8800}, /* DATA1 -> write triggered */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003c, 0x8801},
{0x0001, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0001, 0x8801},
{0x000a, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0002, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0003, 0x8801},
{0x0027, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0004, 0x8801},
{0x0065, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0005, 0x8801},
{0x0003, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0006, 0x8801},
{0x001c, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0007, 0x8801},
{0x002a, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x000e, 0x8801},
{0x0000, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0028, 0x8801},
{0x002e, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0039, 0x8801},
{0x0013, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x003b, 0x8801},
{0x000c, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0035, 0x8801},
{0x0028, 0x8805},
{0x0000, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
/* READ { 0x0001, 0x8802 } -> 0000: 10 */
{0x0010, 0x8802},
{0x0009, 0x8801},
{0x0042, 0x8805},
{0x0001, 0x8800},
/* READ { 0x0001, 0x8803 } -> 0000: 00 */
{0x0050, 0x8703},
{0x0002, 0x8704}, /* External input CKIx1 */
{0x0001, 0x870c}, /* Select CKOx2 output */
{0x009a, 0x8600}, /* Line memory Read Counter (L) */
{0x0001, 0x8606}, /* 1 Line memory Read Counter (H) Result: (d)410 */
{0x0023, 0x8601},
{0x0010, 0x8602},
{0x000a, 0x8603},
{0x009a, 0x8600},
{0x0001, 0x865b}, /* 1 Horizontal Offset for Valid Pixel(L) */
{0x0003, 0x865c}, /* Vertical offset for valid lines (L) */
{0x0058, 0x865d}, /* Horizontal valid pixels window (L) */
{0x0048, 0x865e}, /* Vertical valid lines window (L) */
{0x0000, 0x865f},
{0x0006, 0x8660},
/* Enable nibble data input, select nibble input order */
{0x0013, 0x8608}, /* A11 Coeficients for color correction */
{0x0028, 0x8609},
/* Note: these values are confirmed at the end of array */
{0x0005, 0x860a}, /* ... */
{0x0025, 0x860b},
{0x00e1, 0x860c},
{0x00fa, 0x860d},
{0x00f4, 0x860e},
{0x00e8, 0x860f},
{0x0025, 0x8610}, /* A33 Coef. */
{0x00fc, 0x8611}, /* White balance offset: R */
{0x0001, 0x8612}, /* White balance offset: Gr */
{0x00fe, 0x8613}, /* White balance offset: B */
{0x0000, 0x8614}, /* White balance offset: Gb */
{0x0064, 0x8651}, /* R gain for white balance (L) */
{0x0040, 0x8652}, /* Gr gain for white balance (L) */
{0x0066, 0x8653}, /* B gain for white balance (L) */
{0x0040, 0x8654}, /* Gb gain for white balance (L) */
{0x0001, 0x863f}, /* Enable fixed gamma correction */
{0x00a1, 0x8656}, /* Size - Window1: 256x256, Window2: 128x128,
* UV division: UV no change,
* Enable New edge enhancement */
{0x0018, 0x8657}, /* Edge gain high threshold */
{0x0020, 0x8658}, /* Edge gain low threshold */
{0x000a, 0x8659}, /* Edge bandwidth high threshold */
{0x0005, 0x865a}, /* Edge bandwidth low threshold */
{0x0064, 0x8607}, /* UV filter enable */
{0x0016, 0x8660},
{0x0000, 0x86b0}, /* Bad pixels compensation address */
{0x00dc, 0x86b1}, /* X coord for bad pixels compensation (L) */
{0x0000, 0x86b2},
{0x0009, 0x86b3}, /* Y coord for bad pixels compensation (L) */
{0x0000, 0x86b4},
{0x0001, 0x86b0},
{0x00f5, 0x86b1},
{0x0000, 0x86b2},
{0x00c6, 0x86b3},
{0x0000, 0x86b4},
{0x0002, 0x86b0},
{0x001c, 0x86b1},
{0x0001, 0x86b2},
{0x00d7, 0x86b3},
{0x0000, 0x86b4},
{0x0003, 0x86b0},
{0x001c, 0x86b1},
{0x0001, 0x86b2},
{0x00d8, 0x86b3},
{0x0000, 0x86b4},
{0x0004, 0x86b0},
{0x001d, 0x86b1},
{0x0001, 0x86b2},
{0x00d8, 0x86b3},
{0x0000, 0x86b4},
{0x001e, 0x8660},
/* READ { 0x0000, 0x8608 } -> 0000: 13 */
/* READ { 0x0000, 0x8609 } -> 0000: 28 */
/* READ { 0x0000, 0x8610 } -> 0000: 05 */
/* READ { 0x0000, 0x8611 } -> 0000: 25 */
/* READ { 0x0000, 0x8612 } -> 0000: e1 */
/* READ { 0x0000, 0x8613 } -> 0000: fa */
/* READ { 0x0000, 0x8614 } -> 0000: f4 */
/* READ { 0x0000, 0x8615 } -> 0000: e8 */
/* READ { 0x0000, 0x8616 } -> 0000: 25 */
{}
};
static int reg_write(struct usb_device *dev,
u16 index, u16 value)
{
int ret;
ret = usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
0, /* request */
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write i:0x%04x = 0x%02x",
index, value);
if (ret < 0)
pr_err("reg write: error %d\n", ret);
return ret;
}
/* read 1 byte */
/* returns: negative is error, pos or zero is data */
static int reg_read(struct gspca_dev *gspca_dev,
u16 index) /* wIndex */
{
int ret;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
0, /* register */
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
gspca_dev->usb_buf, 1,
500); /* timeout */
PDEBUG(D_USBI, "reg read i:%04x --> %02x",
index, gspca_dev->usb_buf[0]);
if (ret < 0) {
pr_err("reg_read err %d\n", ret);
return ret;
}
return gspca_dev->usb_buf[0];
}
/* send 1 or 2 bytes to the sensor via the Synchronous Serial Interface */
static int ssi_w(struct gspca_dev *gspca_dev,
u16 reg, u16 val)
{
struct usb_device *dev = gspca_dev->dev;
int ret, retry;
ret = reg_write(dev, 0x8802, reg >> 8);
if (ret < 0)
goto out;
ret = reg_write(dev, 0x8801, reg & 0x00ff);
if (ret < 0)
goto out;
if ((reg & 0xff00) == 0x1000) { /* if 2 bytes */
ret = reg_write(dev, 0x8805, val & 0x00ff);
if (ret < 0)
goto out;
val >>= 8;
}
ret = reg_write(dev, 0x8800, val);
if (ret < 0)
goto out;
/* poll until not busy */
retry = 10;
for (;;) {
ret = reg_read(gspca_dev, 0x8803);
if (ret < 0)
break;
if (gspca_dev->usb_buf[0] == 0)
break;
if (--retry <= 0) {
PDEBUG(D_ERR, "ssi_w busy %02x",
gspca_dev->usb_buf[0]);
ret = -1;
break;
}
msleep(8);
}
out:
return ret;
}
static int write_vector(struct gspca_dev *gspca_dev,
const u16 (*data)[2])
{
struct usb_device *dev = gspca_dev->dev;
int ret = 0;
while ((*data)[1] != 0) {
if ((*data)[1] & 0x8000) {
if ((*data)[1] == 0xdd00) /* delay */
msleep((*data)[0]);
else
ret = reg_write(dev, (*data)[1], (*data)[0]);
} else {
ret = ssi_w(gspca_dev, (*data)[1], (*data)[0]);
}
if (ret < 0)
break;
data++;
}
return ret;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
const u16 (*init_data)[2];
static const u16 (*(init_data_tb[]))[2] = {
spca508_vista_init_data, /* CreativeVista 0 */
spca508_sightcam_init_data, /* HamaUSBSightcam 1 */
spca508_sightcam2_init_data, /* HamaUSBSightcam2 2 */
spca508cs110_init_data, /* IntelEasyPCCamera 3 */
spca508cs110_init_data, /* MicroInnovationIC200 4 */
spca508_init_data, /* ViewQuestVQ110 5 */
};
#ifdef GSPCA_DEBUG
int data1, data2;
/* Read from global register the USB product and vendor IDs, just to
* prove that we can communicate with the device. This works, which
* confirms at we are communicating properly and that the device
* is a 508. */
data1 = reg_read(gspca_dev, 0x8104);
data2 = reg_read(gspca_dev, 0x8105);
PDEBUG(D_PROBE, "Webcam Vendor ID: 0x%02x%02x", data2, data1);
data1 = reg_read(gspca_dev, 0x8106);
data2 = reg_read(gspca_dev, 0x8107);
PDEBUG(D_PROBE, "Webcam Product ID: 0x%02x%02x", data2, data1);
data1 = reg_read(gspca_dev, 0x8621);
PDEBUG(D_PROBE, "Window 1 average luminance: %d", data1);
#endif
cam = &gspca_dev->cam;
cam->cam_mode = sif_mode;
cam->nmodes = ARRAY_SIZE(sif_mode);
sd->subtype = id->driver_info;
sd->brightness = BRIGHTNESS_DEF;
init_data = init_data_tb[sd->subtype];
return write_vector(gspca_dev, init_data);
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
return 0;
}
static int sd_start(struct gspca_dev *gspca_dev)
{
int mode;
mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv;
reg_write(gspca_dev->dev, 0x8500, mode);
switch (mode) {
case 0:
case 1:
reg_write(gspca_dev->dev, 0x8700, 0x28); /* clock */
break;
default:
/* case 2: */
/* case 3: */
reg_write(gspca_dev->dev, 0x8700, 0x23); /* clock */
break;
}
reg_write(gspca_dev->dev, 0x8112, 0x10 | 0x20);
return 0;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* Video ISO disable, Video Drop Packet enable: */
reg_write(gspca_dev->dev, 0x8112, 0x20);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
switch (data[0]) {
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
data += SPCA508_OFFSET_DATA;
len -= SPCA508_OFFSET_DATA;
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
break;
case 0xff: /* drop */
break;
default:
data += 1;
len -= 1;
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
break;
}
}
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 brightness = sd->brightness;
/* MX seem contrast */
reg_write(gspca_dev->dev, 0x8651, brightness);
reg_write(gspca_dev->dev, 0x8652, brightness);
reg_write(gspca_dev->dev, 0x8653, brightness);
reg_write(gspca_dev->dev, 0x8654, brightness);
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
return 0;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->brightness;
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x0130, 0x0130), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x041e, 0x4018), .driver_info = CreativeVista},
{USB_DEVICE(0x0733, 0x0110), .driver_info = ViewQuestVQ110},
{USB_DEVICE(0x0af9, 0x0010), .driver_info = HamaUSBSightcam},
{USB_DEVICE(0x0af9, 0x0011), .driver_info = HamaUSBSightcam2},
{USB_DEVICE(0x8086, 0x0110), .driver_info = IntelEasyPCCamera},
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
grzmot22/android_kernel_htc_protou | drivers/media/video/gspca/spca505.c | 4956 | 19084 | /*
* SPCA505 chip based cameras initialization data
*
* V4L2 by Jean-Francis Moine <http://moinejf.free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define MODULE_NAME "spca505"
#include "gspca.h"
MODULE_AUTHOR("Michel Xhaard <mxhaard@users.sourceforge.net>");
MODULE_DESCRIPTION("GSPCA/SPCA505 USB Camera Driver");
MODULE_LICENSE("GPL");
/* specific webcam descriptor */
struct sd {
struct gspca_dev gspca_dev; /* !! must be the first item */
u8 brightness;
u8 subtype;
#define IntelPCCameraPro 0
#define Nxultra 1
};
/* V4L2 controls supported by the driver */
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val);
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val);
static const struct ctrl sd_ctrls[] = {
{
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness",
.minimum = 0,
.maximum = 255,
.step = 1,
#define BRIGHTNESS_DEF 127
.default_value = BRIGHTNESS_DEF,
},
.set = sd_setbrightness,
.get = sd_getbrightness,
},
};
static const struct v4l2_pix_format vga_mode[] = {
{160, 120, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 160,
.sizeimage = 160 * 120 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 4},
{176, 144, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 176,
.sizeimage = 176 * 144 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 3},
{320, 240, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 320,
.sizeimage = 320 * 240 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 2},
{352, 288, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 352,
.sizeimage = 352 * 288 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 1},
{640, 480, V4L2_PIX_FMT_SPCA505, V4L2_FIELD_NONE,
.bytesperline = 640,
.sizeimage = 640 * 480 * 3 / 2,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 0},
};
#define SPCA50X_OFFSET_DATA 10
#define SPCA50X_REG_USB 0x02 /* spca505 501 */
#define SPCA50X_USB_CTRL 0x00 /* spca505 */
#define SPCA50X_CUSB_ENABLE 0x01 /* spca505 */
#define SPCA50X_REG_GLOBAL 0x03 /* spca505 */
#define SPCA50X_GMISC0_IDSEL 0x01 /* Global control device ID select spca505 */
#define SPCA50X_GLOBAL_MISC0 0x00 /* Global control miscellaneous 0 spca505 */
#define SPCA50X_GLOBAL_MISC1 0x01 /* 505 */
#define SPCA50X_GLOBAL_MISC3 0x03 /* 505 */
#define SPCA50X_GMISC3_SAA7113RST 0x20 /* Not sure about this one spca505 */
/* Image format and compression control */
#define SPCA50X_REG_COMPRESS 0x04
/*
* Data to initialize a SPCA505. Common to the CCD and external modes
*/
static const u8 spca505_init_data[][3] = {
/* bmRequest,value,index */
{SPCA50X_REG_GLOBAL, SPCA50X_GMISC3_SAA7113RST, SPCA50X_GLOBAL_MISC3},
/* Sensor reset */
{SPCA50X_REG_GLOBAL, 0x00, SPCA50X_GLOBAL_MISC3},
{SPCA50X_REG_GLOBAL, 0x00, SPCA50X_GLOBAL_MISC1},
/* Block USB reset */
{SPCA50X_REG_GLOBAL, SPCA50X_GMISC0_IDSEL, SPCA50X_GLOBAL_MISC0},
{0x05, 0x01, 0x10},
/* Maybe power down some stuff */
{0x05, 0x0f, 0x11},
/* Setup internal CCD ? */
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
{}
};
/*
* Data to initialize the camera using the internal CCD
*/
static const u8 spca505_open_data_ccd[][3] = {
/* bmRequest,value,index */
/* Internal CCD data set */
{0x03, 0x04, 0x01},
/* This could be a reset */
{0x03, 0x00, 0x01},
/* Setup compression and image registers. 0x6 and 0x7 seem to be
related to H&V hold, and are resolution mode specific */
{0x04, 0x10, 0x01},
/* DIFF(0x50), was (0x10) */
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x20, 0x06},
{0x04, 0x20, 0x07},
{0x08, 0x0a, 0x00},
/* DIFF (0x4a), was (0xa) */
{0x05, 0x00, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x00, 0x00},
/* DIFF not written */
{0x05, 0x00, 0x01},
/* DIFF not written */
{0x05, 0x00, 0x02},
/* DIFF not written */
{0x05, 0x00, 0x03},
/* DIFF not written */
{0x05, 0x00, 0x04},
/* DIFF not written */
{0x05, 0x80, 0x05},
/* DIFF not written */
{0x05, 0xe0, 0x06},
/* DIFF not written */
{0x05, 0x20, 0x07},
/* DIFF not written */
{0x05, 0xa0, 0x08},
/* DIFF not written */
{0x05, 0x0, 0x12},
/* DIFF not written */
{0x05, 0x02, 0x0f},
/* DIFF not written */
{0x05, 0x10, 0x46},
/* DIFF not written */
{0x05, 0x8, 0x4a},
/* DIFF not written */
{0x03, 0x08, 0x03},
/* DIFF (0x3,0x28,0x3) */
{0x03, 0x08, 0x01},
{0x03, 0x0c, 0x03},
/* DIFF not written */
{0x03, 0x21, 0x00},
/* DIFF (0x39) */
/* Extra block copied from init to hopefully ensure CCD is in a sane state */
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
/* End of extra block */
{0x06, 0x3f, 0x1},
/* Block skipped */
{0x06, 0x10, 0x02},
{0x06, 0x64, 0x07},
{0x06, 0x10, 0x08},
{0x06, 0x00, 0x09},
{0x06, 0x00, 0x0a},
{0x06, 0x00, 0x0b},
{0x06, 0x10, 0x0c},
{0x06, 0x00, 0x0d},
{0x06, 0x00, 0x0e},
{0x06, 0x00, 0x0f},
{0x06, 0x10, 0x10},
{0x06, 0x02, 0x11},
{0x06, 0x00, 0x12},
{0x06, 0x04, 0x13},
{0x06, 0x02, 0x14},
{0x06, 0x8a, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0xb6, 0x53},
{0x06, 0x3d, 0x54},
{0x06, 0x60, 0x57},
{0x06, 0x20, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x05, 0x5a},
{0x05, 0x01, 0xc0},
{0x05, 0x10, 0xcb},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x0, 0xc2},
/* 4 was 0 */
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x0, 0xc1},
/* */
{0x05, 0x00, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
/* */
{0x05, 0x17, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x06, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x80, 0xc1},
/* */
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x03, 0x4c, 0x3},
{0x03, 0x18, 0x1},
{0x06, 0x70, 0x51},
{0x06, 0xbe, 0x53},
{0x06, 0x71, 0x57},
{0x06, 0x20, 0x58},
{0x06, 0x05, 0x59},
{0x06, 0x15, 0x5a},
{0x04, 0x00, 0x08},
/* Compress = OFF (0x1 to turn on) */
{0x04, 0x12, 0x09},
{0x04, 0x21, 0x0a},
{0x04, 0x10, 0x0b},
{0x04, 0x21, 0x0c},
{0x04, 0x05, 0x00},
/* was 5 (Image Type ? ) */
{0x04, 0x00, 0x01},
{0x06, 0x3f, 0x01},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x40, 0x06},
{0x04, 0x40, 0x07},
{0x06, 0x1c, 0x17},
{0x06, 0xe2, 0x19},
{0x06, 0x1c, 0x1b},
{0x06, 0xe2, 0x1d},
{0x06, 0xaa, 0x1f},
{0x06, 0x70, 0x20},
{0x05, 0x01, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x01, 0x00},
{0x05, 0x05, 0x01},
{0x05, 0x00, 0xc1},
/* */
{0x05, 0x00, 0xc2},
{0x05, 0x00, 0xca},
{0x06, 0x70, 0x51},
{0x06, 0xbe, 0x53},
{}
};
/*
* Made by Tomasz Zablocki (skalamandra@poczta.onet.pl)
* SPCA505b chip based cameras initialization data
*/
/* jfm */
#define initial_brightness 0x7f /* 0x0(white)-0xff(black) */
/* #define initial_brightness 0x0 //0x0(white)-0xff(black) */
/*
* Data to initialize a SPCA505. Common to the CCD and external modes
*/
static const u8 spca505b_init_data[][3] = {
/* start */
{0x02, 0x00, 0x00}, /* init */
{0x02, 0x00, 0x01},
{0x02, 0x00, 0x02},
{0x02, 0x00, 0x03},
{0x02, 0x00, 0x04},
{0x02, 0x00, 0x05},
{0x02, 0x00, 0x06},
{0x02, 0x00, 0x07},
{0x02, 0x00, 0x08},
{0x02, 0x00, 0x09},
{0x03, 0x00, 0x00},
{0x03, 0x00, 0x01},
{0x03, 0x00, 0x02},
{0x03, 0x00, 0x03},
{0x03, 0x00, 0x04},
{0x03, 0x00, 0x05},
{0x03, 0x00, 0x06},
{0x04, 0x00, 0x00},
{0x04, 0x00, 0x02},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x00, 0x06},
{0x04, 0x00, 0x07},
{0x04, 0x00, 0x08},
{0x04, 0x00, 0x09},
{0x04, 0x00, 0x0a},
{0x04, 0x00, 0x0b},
{0x04, 0x00, 0x0c},
{0x07, 0x00, 0x00},
{0x07, 0x00, 0x03},
{0x08, 0x00, 0x00},
{0x08, 0x00, 0x01},
{0x08, 0x00, 0x02},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x06, 0x18, 0x0c},
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x18, 0x10},
{0x06, 0xfe, 0x12},
{0x06, 0x00, 0x11},
{0x06, 0x00, 0x14},
{0x06, 0x00, 0x13},
{0x06, 0x28, 0x51},
{0x06, 0xff, 0x53},
{0x02, 0x00, 0x08},
{0x03, 0x00, 0x03},
{0x03, 0x10, 0x03},
{}
};
/*
* Data to initialize the camera using the internal CCD
*/
static const u8 spca505b_open_data_ccd[][3] = {
/* {0x02,0x00,0x00}, */
{0x03, 0x04, 0x01}, /* rst */
{0x03, 0x00, 0x01},
{0x03, 0x00, 0x00},
{0x03, 0x21, 0x00},
{0x03, 0x00, 0x04},
{0x03, 0x00, 0x03},
{0x03, 0x18, 0x03},
{0x03, 0x08, 0x01},
{0x03, 0x1c, 0x03},
{0x03, 0x5c, 0x03},
{0x03, 0x5c, 0x03},
{0x03, 0x18, 0x01},
/* same as 505 */
{0x04, 0x10, 0x01},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x20, 0x06},
{0x04, 0x20, 0x07},
{0x08, 0x0a, 0x00},
{0x05, 0x00, 0x10},
{0x05, 0x00, 0x11},
{0x05, 0x00, 0x12},
{0x05, 0x6f, 0x00},
{0x05, initial_brightness >> 6, 0x00},
{0x05, (initial_brightness << 2) & 0xff, 0x01},
{0x05, 0x00, 0x02},
{0x05, 0x01, 0x03},
{0x05, 0x00, 0x04},
{0x05, 0x03, 0x05},
{0x05, 0xe0, 0x06},
{0x05, 0x20, 0x07},
{0x05, 0xa0, 0x08},
{0x05, 0x00, 0x12},
{0x05, 0x02, 0x0f},
{0x05, 0x80, 0x14}, /* max exposure off (0=on) */
{0x05, 0x01, 0xb0},
{0x05, 0x01, 0xbf},
{0x03, 0x02, 0x06},
{0x05, 0x10, 0x46},
{0x05, 0x08, 0x4a},
{0x06, 0x00, 0x01},
{0x06, 0x10, 0x02},
{0x06, 0x64, 0x07},
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x04, 0x00, 0x01},
{0x06, 0x18, 0x0c},
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x11, 0x10}, /* contrast */
{0x06, 0x00, 0x11},
{0x06, 0xfe, 0x12},
{0x06, 0x00, 0x13},
{0x06, 0x00, 0x14},
{0x06, 0x9d, 0x51},
{0x06, 0x40, 0x52},
{0x06, 0x7c, 0x53},
{0x06, 0x40, 0x54},
{0x06, 0x02, 0x57},
{0x06, 0x03, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x05, 0x5a},
{0x06, 0x03, 0x56},
{0x06, 0x02, 0x3f},
{0x06, 0x00, 0x40},
{0x06, 0x39, 0x41},
{0x06, 0x69, 0x42},
{0x06, 0x87, 0x43},
{0x06, 0x9e, 0x44},
{0x06, 0xb1, 0x45},
{0x06, 0xbf, 0x46},
{0x06, 0xcc, 0x47},
{0x06, 0xd5, 0x48},
{0x06, 0xdd, 0x49},
{0x06, 0xe3, 0x4a},
{0x06, 0xe8, 0x4b},
{0x06, 0xed, 0x4c},
{0x06, 0xf2, 0x4d},
{0x06, 0xf7, 0x4e},
{0x06, 0xfc, 0x4f},
{0x06, 0xff, 0x50},
{0x05, 0x01, 0xc0},
{0x05, 0x10, 0xcb},
{0x05, 0x40, 0xc1},
{0x05, 0x04, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
{0x05, 0x09, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0xc0, 0xc1},
{0x05, 0x09, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x40, 0xc1},
{0x05, 0x59, 0xc2},
{0x05, 0x00, 0xca},
{0x04, 0x00, 0x01},
{0x05, 0x80, 0xc1},
{0x05, 0xec, 0xc2},
{0x05, 0x0, 0xca},
{0x06, 0x02, 0x57},
{0x06, 0x01, 0x58},
{0x06, 0x15, 0x59},
{0x06, 0x0a, 0x5a},
{0x06, 0x01, 0x57},
{0x06, 0x8a, 0x03},
{0x06, 0x0a, 0x6c},
{0x06, 0x30, 0x01},
{0x06, 0x20, 0x02},
{0x06, 0x00, 0x03},
{0x05, 0x8c, 0x25},
{0x06, 0x4d, 0x51}, /* maybe saturation (4d) */
{0x06, 0x84, 0x53}, /* making green (84) */
{0x06, 0x00, 0x57}, /* sharpness (1) */
{0x06, 0x18, 0x08},
{0x06, 0xfc, 0x09},
{0x06, 0xfc, 0x0a},
{0x06, 0xfc, 0x0b},
{0x06, 0x18, 0x0c}, /* maybe hue (18) */
{0x06, 0xfc, 0x0d},
{0x06, 0xfc, 0x0e},
{0x06, 0xfc, 0x0f},
{0x06, 0x18, 0x10}, /* maybe contrast (18) */
{0x05, 0x01, 0x02},
{0x04, 0x00, 0x08}, /* compression */
{0x04, 0x12, 0x09},
{0x04, 0x21, 0x0a},
{0x04, 0x10, 0x0b},
{0x04, 0x21, 0x0c},
{0x04, 0x1d, 0x00}, /* imagetype (1d) */
{0x04, 0x41, 0x01}, /* hardware snapcontrol */
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x04, 0x10, 0x06},
{0x04, 0x10, 0x07},
{0x04, 0x40, 0x06},
{0x04, 0x40, 0x07},
{0x04, 0x00, 0x04},
{0x04, 0x00, 0x05},
{0x06, 0x1c, 0x17},
{0x06, 0xe2, 0x19},
{0x06, 0x1c, 0x1b},
{0x06, 0xe2, 0x1d},
{0x06, 0x5f, 0x1f},
{0x06, 0x32, 0x20},
{0x05, initial_brightness >> 6, 0x00},
{0x05, (initial_brightness << 2) & 0xff, 0x01},
{0x05, 0x06, 0xc1},
{0x05, 0x58, 0xc2},
{0x05, 0x00, 0xca},
{0x05, 0x00, 0x11},
{}
};
static int reg_write(struct usb_device *dev,
u16 req, u16 index, u16 value)
{
int ret;
ret = usb_control_msg(dev,
usb_sndctrlpipe(dev, 0),
req,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, NULL, 0, 500);
PDEBUG(D_USBO, "reg write: 0x%02x,0x%02x:0x%02x, %d",
req, index, value, ret);
if (ret < 0)
pr_err("reg write: error %d\n", ret);
return ret;
}
/* returns: negative is error, pos or zero is data */
static int reg_read(struct gspca_dev *gspca_dev,
u16 req, /* bRequest */
u16 index) /* wIndex */
{
int ret;
ret = usb_control_msg(gspca_dev->dev,
usb_rcvctrlpipe(gspca_dev->dev, 0),
req,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
0, /* value */
index,
gspca_dev->usb_buf, 2,
500); /* timeout */
if (ret < 0)
return ret;
return (gspca_dev->usb_buf[1] << 8) + gspca_dev->usb_buf[0];
}
static int write_vector(struct gspca_dev *gspca_dev,
const u8 data[][3])
{
struct usb_device *dev = gspca_dev->dev;
int ret, i = 0;
while (data[i][0] != 0) {
ret = reg_write(dev, data[i][0], data[i][2], data[i][1]);
if (ret < 0)
return ret;
i++;
}
return 0;
}
/* this function is called at probe time */
static int sd_config(struct gspca_dev *gspca_dev,
const struct usb_device_id *id)
{
struct sd *sd = (struct sd *) gspca_dev;
struct cam *cam;
cam = &gspca_dev->cam;
cam->cam_mode = vga_mode;
sd->subtype = id->driver_info;
if (sd->subtype != IntelPCCameraPro)
cam->nmodes = ARRAY_SIZE(vga_mode);
else /* no 640x480 for IntelPCCameraPro */
cam->nmodes = ARRAY_SIZE(vga_mode) - 1;
sd->brightness = BRIGHTNESS_DEF;
return 0;
}
/* this function is called at probe and resume time */
static int sd_init(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
if (write_vector(gspca_dev,
sd->subtype == Nxultra
? spca505b_init_data
: spca505_init_data))
return -EIO;
return 0;
}
static void setbrightness(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 brightness = sd->brightness;
reg_write(gspca_dev->dev, 0x05, 0x00, (255 - brightness) >> 6);
reg_write(gspca_dev->dev, 0x05, 0x01, (255 - brightness) << 2);
}
static int sd_start(struct gspca_dev *gspca_dev)
{
struct sd *sd = (struct sd *) gspca_dev;
struct usb_device *dev = gspca_dev->dev;
int ret, mode;
static u8 mode_tb[][3] = {
/* r00 r06 r07 */
{0x00, 0x10, 0x10}, /* 640x480 */
{0x01, 0x1a, 0x1a}, /* 352x288 */
{0x02, 0x1c, 0x1d}, /* 320x240 */
{0x04, 0x34, 0x34}, /* 176x144 */
{0x05, 0x40, 0x40} /* 160x120 */
};
if (sd->subtype == Nxultra)
write_vector(gspca_dev, spca505b_open_data_ccd);
else
write_vector(gspca_dev, spca505_open_data_ccd);
ret = reg_read(gspca_dev, 0x06, 0x16);
if (ret < 0) {
PDEBUG(D_ERR|D_CONF,
"register read failed err: %d",
ret);
return ret;
}
if (ret != 0x0101) {
pr_err("After vector read returns 0x%04x should be 0x0101\n",
ret);
}
ret = reg_write(gspca_dev->dev, 0x06, 0x16, 0x0a);
if (ret < 0)
return ret;
reg_write(gspca_dev->dev, 0x05, 0xc2, 0x12);
/* necessary because without it we can see stream
* only once after loading module */
/* stopping usb registers Tomasz change */
reg_write(dev, 0x02, 0x00, 0x00);
mode = gspca_dev->cam.cam_mode[(int) gspca_dev->curr_mode].priv;
reg_write(dev, SPCA50X_REG_COMPRESS, 0x00, mode_tb[mode][0]);
reg_write(dev, SPCA50X_REG_COMPRESS, 0x06, mode_tb[mode][1]);
reg_write(dev, SPCA50X_REG_COMPRESS, 0x07, mode_tb[mode][2]);
ret = reg_write(dev, SPCA50X_REG_USB,
SPCA50X_USB_CTRL,
SPCA50X_CUSB_ENABLE);
setbrightness(gspca_dev);
return ret;
}
static void sd_stopN(struct gspca_dev *gspca_dev)
{
/* Disable ISO packet machine */
reg_write(gspca_dev->dev, 0x02, 0x00, 0x00);
}
/* called on streamoff with alt 0 and on disconnect */
static void sd_stop0(struct gspca_dev *gspca_dev)
{
if (!gspca_dev->present)
return;
/* This maybe reset or power control */
reg_write(gspca_dev->dev, 0x03, 0x03, 0x20);
reg_write(gspca_dev->dev, 0x03, 0x01, 0x00);
reg_write(gspca_dev->dev, 0x03, 0x00, 0x01);
reg_write(gspca_dev->dev, 0x05, 0x10, 0x01);
reg_write(gspca_dev->dev, 0x05, 0x11, 0x0f);
}
static void sd_pkt_scan(struct gspca_dev *gspca_dev,
u8 *data, /* isoc packet */
int len) /* iso packet length */
{
switch (data[0]) {
case 0: /* start of frame */
gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0);
data += SPCA50X_OFFSET_DATA;
len -= SPCA50X_OFFSET_DATA;
gspca_frame_add(gspca_dev, FIRST_PACKET, data, len);
break;
case 0xff: /* drop */
break;
default:
data += 1;
len -= 1;
gspca_frame_add(gspca_dev, INTER_PACKET, data, len);
break;
}
}
static int sd_setbrightness(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
sd->brightness = val;
if (gspca_dev->streaming)
setbrightness(gspca_dev);
return 0;
}
static int sd_getbrightness(struct gspca_dev *gspca_dev, __s32 *val)
{
struct sd *sd = (struct sd *) gspca_dev;
*val = sd->brightness;
return 0;
}
/* sub-driver description */
static const struct sd_desc sd_desc = {
.name = MODULE_NAME,
.ctrls = sd_ctrls,
.nctrls = ARRAY_SIZE(sd_ctrls),
.config = sd_config,
.init = sd_init,
.start = sd_start,
.stopN = sd_stopN,
.stop0 = sd_stop0,
.pkt_scan = sd_pkt_scan,
};
/* -- module initialisation -- */
static const struct usb_device_id device_table[] = {
{USB_DEVICE(0x041e, 0x401d), .driver_info = Nxultra},
{USB_DEVICE(0x0733, 0x0430), .driver_info = IntelPCCameraPro},
/*fixme: may be UsbGrabberPV321 BRIDGE_SPCA506 SENSOR_SAA7113 */
{}
};
MODULE_DEVICE_TABLE(usb, device_table);
/* -- device connect -- */
static int sd_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd),
THIS_MODULE);
}
static struct usb_driver sd_driver = {
.name = MODULE_NAME,
.id_table = device_table,
.probe = sd_probe,
.disconnect = gspca_disconnect,
#ifdef CONFIG_PM
.suspend = gspca_suspend,
.resume = gspca_resume,
#endif
};
module_usb_driver(sd_driver);
| gpl-2.0 |
u-ra/android_kernel_htc_memul | drivers/gpu/drm/drm_fb_helper.c | 5212 | 37877 | /*
* Copyright (c) 2006-2009 Red Hat Inc.
* Copyright (c) 2006-2008 Intel Corporation
* Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
*
* DRM framebuffer helper functions
*
* Permission to use, copy, modify, distribute, and sell this software and its
* documentation for any purpose is hereby granted without fee, provided that
* the above copyright notice appear in all copies and that both that copyright
* notice and this permission notice appear in supporting documentation, and
* that the name of the copyright holders not be used in advertising or
* publicity pertaining to distribution of the software without specific,
* written prior permission. The copyright holders make no representations
* about the suitability of this software for any purpose. It is provided "as
* is" without express or implied warranty.
*
* THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
* INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
* EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
* CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
* DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
* OF THIS SOFTWARE.
*
* Authors:
* Dave Airlie <airlied@linux.ie>
* Jesse Barnes <jesse.barnes@intel.com>
*/
#include <linux/kernel.h>
#include <linux/sysrq.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/module.h>
#include "drmP.h"
#include "drm_crtc.h"
#include "drm_fb_helper.h"
#include "drm_crtc_helper.h"
MODULE_AUTHOR("David Airlie, Jesse Barnes");
MODULE_DESCRIPTION("DRM KMS helper");
MODULE_LICENSE("GPL and additional rights");
static LIST_HEAD(kernel_fb_helper_list);
/* simple single crtc case helper function */
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
int i;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct drm_fb_helper_connector *fb_helper_connector;
fb_helper_connector = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
if (!fb_helper_connector)
goto fail;
fb_helper_connector->connector = connector;
fb_helper->connector_info[fb_helper->connector_count++] = fb_helper_connector;
}
return 0;
fail:
for (i = 0; i < fb_helper->connector_count; i++) {
kfree(fb_helper->connector_info[i]);
fb_helper->connector_info[i] = NULL;
}
fb_helper->connector_count = 0;
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_single_add_all_connectors);
static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_cmdline_mode *mode;
struct drm_connector *connector;
char *option = NULL;
fb_helper_conn = fb_helper->connector_info[i];
connector = fb_helper_conn->connector;
mode = &fb_helper_conn->cmdline_mode;
/* do something on return - turn off connector maybe */
if (fb_get_options(drm_get_connector_name(connector), &option))
continue;
if (drm_mode_parse_command_line_for_connector(option,
connector,
mode)) {
if (mode->force) {
const char *s;
switch (mode->force) {
case DRM_FORCE_OFF: s = "OFF"; break;
case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
default:
case DRM_FORCE_ON: s = "ON"; break;
}
DRM_INFO("forcing %s connector %s\n",
drm_get_connector_name(connector), s);
connector->force = mode->force;
}
DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
drm_get_connector_name(connector),
mode->xres, mode->yres,
mode->refresh_specified ? mode->refresh : 60,
mode->rb ? " reduced blanking" : "",
mode->margins ? " with margins" : "",
mode->interlace ? " interlaced" : "");
}
}
return 0;
}
static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
{
uint16_t *r_base, *g_base, *b_base;
int i;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
for (i = 0; i < crtc->gamma_size; i++)
helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
}
static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
{
uint16_t *r_base, *g_base, *b_base;
r_base = crtc->gamma_store;
g_base = r_base + crtc->gamma_size;
b_base = g_base + crtc->gamma_size;
crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
}
int drm_fb_helper_debug_enter(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_crtc_helper_funcs *funcs;
int i;
if (list_empty(&kernel_fb_helper_list))
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set =
&helper->crtc_info[i].mode_set;
if (!mode_set->crtc->enabled)
continue;
funcs = mode_set->crtc->helper_private;
drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
funcs->mode_set_base_atomic(mode_set->crtc,
mode_set->fb,
mode_set->x,
mode_set->y,
ENTER_ATOMIC_MODE_SET);
}
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_debug_enter);
/* Find the real fb for a given fb helper CRTC */
static struct drm_framebuffer *drm_mode_config_fb(struct drm_crtc *crtc)
{
struct drm_device *dev = crtc->dev;
struct drm_crtc *c;
list_for_each_entry(c, &dev->mode_config.crtc_list, head) {
if (crtc->base.id == c->base.id)
return c->fb;
}
return NULL;
}
int drm_fb_helper_debug_leave(struct fb_info *info)
{
struct drm_fb_helper *helper = info->par;
struct drm_crtc *crtc;
struct drm_crtc_helper_funcs *funcs;
struct drm_framebuffer *fb;
int i;
for (i = 0; i < helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &helper->crtc_info[i].mode_set;
crtc = mode_set->crtc;
funcs = crtc->helper_private;
fb = drm_mode_config_fb(crtc);
if (!crtc->enabled)
continue;
if (!fb) {
DRM_ERROR("no fb to restore??\n");
continue;
}
drm_fb_helper_restore_lut_atomic(mode_set->crtc);
funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
crtc->y, LEAVE_ATOMIC_MODE_SET);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_debug_leave);
bool drm_fb_helper_restore_fbdev_mode(struct drm_fb_helper *fb_helper)
{
bool error = false;
int i, ret;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_mode_set *mode_set = &fb_helper->crtc_info[i].mode_set;
ret = drm_crtc_helper_set_config(mode_set);
if (ret)
error = true;
}
return error;
}
EXPORT_SYMBOL(drm_fb_helper_restore_fbdev_mode);
bool drm_fb_helper_force_kernel_mode(void)
{
bool ret, error = false;
struct drm_fb_helper *helper;
if (list_empty(&kernel_fb_helper_list))
return false;
list_for_each_entry(helper, &kernel_fb_helper_list, kernel_fb_list) {
if (helper->dev->switch_power_state == DRM_SWITCH_POWER_OFF)
continue;
ret = drm_fb_helper_restore_fbdev_mode(helper);
if (ret)
error = true;
}
return error;
}
int drm_fb_helper_panic(struct notifier_block *n, unsigned long ununsed,
void *panic_str)
{
/*
* It's a waste of time and effort to switch back to text console
* if the kernel should reboot before panic messages can be seen.
*/
if (panic_timeout < 0)
return 0;
printk(KERN_ERR "panic occurred, switching back to text console\n");
return drm_fb_helper_force_kernel_mode();
}
EXPORT_SYMBOL(drm_fb_helper_panic);
static struct notifier_block paniced = {
.notifier_call = drm_fb_helper_panic,
};
/**
* drm_fb_helper_restore - restore the framebuffer console (kernel) config
*
* Restore's the kernel's fbcon mode, used for lastclose & panic paths.
*/
void drm_fb_helper_restore(void)
{
bool ret;
ret = drm_fb_helper_force_kernel_mode();
if (ret == true)
DRM_ERROR("Failed to restore crtc configuration\n");
}
EXPORT_SYMBOL(drm_fb_helper_restore);
#ifdef CONFIG_MAGIC_SYSRQ
static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
{
drm_fb_helper_restore();
}
static DECLARE_WORK(drm_fb_helper_restore_work, drm_fb_helper_restore_work_fn);
static void drm_fb_helper_sysrq(int dummy1)
{
schedule_work(&drm_fb_helper_restore_work);
}
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.handler = drm_fb_helper_sysrq,
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
#else
static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_dpms(struct fb_info *info, int dpms_mode)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_crtc *crtc;
struct drm_connector *connector;
int i, j;
/*
* For each CRTC in this fb, turn the connectors on/off.
*/
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
if (!crtc->enabled)
continue;
/* Walk the connectors & encoders on this fb turning them on/off */
for (j = 0; j < fb_helper->connector_count; j++) {
connector = fb_helper->connector_info[j]->connector;
drm_helper_connector_dpms(connector, dpms_mode);
drm_connector_property_set_value(connector,
dev->mode_config.dpms_property, dpms_mode);
}
}
mutex_unlock(&dev->mode_config.mutex);
}
int drm_fb_helper_blank(int blank, struct fb_info *info)
{
switch (blank) {
/* Display: On; HSync: On, VSync: On */
case FB_BLANK_UNBLANK:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_ON);
break;
/* Display: Off; HSync: On, VSync: On */
case FB_BLANK_NORMAL:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: Off, VSync: On */
case FB_BLANK_HSYNC_SUSPEND:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_STANDBY);
break;
/* Display: Off; HSync: On, VSync: Off */
case FB_BLANK_VSYNC_SUSPEND:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_SUSPEND);
break;
/* Display: Off; HSync: Off, VSync: Off */
case FB_BLANK_POWERDOWN:
drm_fb_helper_dpms(info, DRM_MODE_DPMS_OFF);
break;
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_blank);
static void drm_fb_helper_crtc_free(struct drm_fb_helper *helper)
{
int i;
for (i = 0; i < helper->connector_count; i++)
kfree(helper->connector_info[i]);
kfree(helper->connector_info);
for (i = 0; i < helper->crtc_count; i++) {
kfree(helper->crtc_info[i].mode_set.connectors);
if (helper->crtc_info[i].mode_set.mode)
drm_mode_destroy(helper->dev, helper->crtc_info[i].mode_set.mode);
}
kfree(helper->crtc_info);
}
int drm_fb_helper_init(struct drm_device *dev,
struct drm_fb_helper *fb_helper,
int crtc_count, int max_conn_count)
{
struct drm_crtc *crtc;
int ret = 0;
int i;
fb_helper->dev = dev;
INIT_LIST_HEAD(&fb_helper->kernel_fb_list);
fb_helper->crtc_info = kcalloc(crtc_count, sizeof(struct drm_fb_helper_crtc), GFP_KERNEL);
if (!fb_helper->crtc_info)
return -ENOMEM;
fb_helper->crtc_count = crtc_count;
fb_helper->connector_info = kcalloc(dev->mode_config.num_connector, sizeof(struct drm_fb_helper_connector *), GFP_KERNEL);
if (!fb_helper->connector_info) {
kfree(fb_helper->crtc_info);
return -ENOMEM;
}
fb_helper->connector_count = 0;
for (i = 0; i < crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.connectors =
kcalloc(max_conn_count,
sizeof(struct drm_connector *),
GFP_KERNEL);
if (!fb_helper->crtc_info[i].mode_set.connectors) {
ret = -ENOMEM;
goto out_free;
}
fb_helper->crtc_info[i].mode_set.num_connectors = 0;
}
i = 0;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
fb_helper->crtc_info[i].mode_set.crtc = crtc;
i++;
}
return 0;
out_free:
drm_fb_helper_crtc_free(fb_helper);
return -ENOMEM;
}
EXPORT_SYMBOL(drm_fb_helper_init);
void drm_fb_helper_fini(struct drm_fb_helper *fb_helper)
{
if (!list_empty(&fb_helper->kernel_fb_list)) {
list_del(&fb_helper->kernel_fb_list);
if (list_empty(&kernel_fb_helper_list)) {
printk(KERN_INFO "drm: unregistered panic notifier\n");
atomic_notifier_chain_unregister(&panic_notifier_list,
&paniced);
unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
}
drm_fb_helper_crtc_free(fb_helper);
}
EXPORT_SYMBOL(drm_fb_helper_fini);
static int setcolreg(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, u16 regno, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
int pindex;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 *palette;
u32 value;
/* place color in psuedopalette */
if (regno > 16)
return -EINVAL;
palette = (u32 *)info->pseudo_palette;
red >>= (16 - info->var.red.length);
green >>= (16 - info->var.green.length);
blue >>= (16 - info->var.blue.length);
value = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
if (info->var.transp.length > 0) {
u32 mask = (1 << info->var.transp.length) - 1;
mask <<= info->var.transp.offset;
value |= mask;
}
palette[regno] = value;
return 0;
}
pindex = regno;
if (fb->bits_per_pixel == 16) {
pindex = regno << 3;
if (fb->depth == 16 && regno > 63)
return -EINVAL;
if (fb->depth == 15 && regno > 31)
return -EINVAL;
if (fb->depth == 16) {
u16 r, g, b;
int i;
if (regno < 32) {
for (i = 0; i < 8; i++)
fb_helper->funcs->gamma_set(crtc, red,
green, blue, pindex + i);
}
fb_helper->funcs->gamma_get(crtc, &r,
&g, &b,
pindex >> 1);
for (i = 0; i < 4; i++)
fb_helper->funcs->gamma_set(crtc, r,
green, b,
(pindex >> 1) + i);
}
}
if (fb->depth != 16)
fb_helper->funcs->gamma_set(crtc, red, green, blue, pindex);
return 0;
}
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_crtc_helper_funcs *crtc_funcs;
u16 *red, *green, *blue, *transp;
struct drm_crtc *crtc;
int i, j, rc = 0;
int start;
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
crtc_funcs = crtc->helper_private;
red = cmap->red;
green = cmap->green;
blue = cmap->blue;
transp = cmap->transp;
start = cmap->start;
for (j = 0; j < cmap->len; j++) {
u16 hred, hgreen, hblue, htransp = 0xffff;
hred = *red++;
hgreen = *green++;
hblue = *blue++;
if (transp)
htransp = *transp++;
rc = setcolreg(crtc, hred, hgreen, hblue, start++, info);
if (rc)
return rc;
}
crtc_funcs->load_lut(crtc);
}
return rc;
}
EXPORT_SYMBOL(drm_fb_helper_setcmap);
int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_framebuffer *fb = fb_helper->fb;
int depth;
if (var->pixclock != 0 || in_dbg_master())
return -EINVAL;
/* Need to resize the fb object !!! */
if (var->bits_per_pixel > fb->bits_per_pixel ||
var->xres > fb->width || var->yres > fb->height ||
var->xres_virtual > fb->width || var->yres_virtual > fb->height) {
DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
"request %dx%d-%d (virtual %dx%d) > %dx%d-%d\n",
var->xres, var->yres, var->bits_per_pixel,
var->xres_virtual, var->yres_virtual,
fb->width, fb->height, fb->bits_per_pixel);
return -EINVAL;
}
switch (var->bits_per_pixel) {
case 16:
depth = (var->green.length == 6) ? 16 : 15;
break;
case 32:
depth = (var->transp.length > 0) ? 32 : 24;
break;
default:
depth = var->bits_per_pixel;
break;
}
switch (depth) {
case 8:
var->red.offset = 0;
var->green.offset = 0;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 15:
var->red.offset = 10;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 5;
var->blue.length = 5;
var->transp.length = 1;
var->transp.offset = 15;
break;
case 16:
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 24:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 0;
var->transp.offset = 0;
break;
case 32:
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
var->transp.length = 8;
var->transp.offset = 24;
break;
default:
return -EINVAL;
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_check_var);
/* this will let fbcon do the mode init */
int drm_fb_helper_set_par(struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct fb_var_screeninfo *var = &info->var;
struct drm_crtc *crtc;
int ret;
int i;
if (var->pixclock != 0) {
DRM_ERROR("PIXEL CLOCK SET\n");
return -EINVAL;
}
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
ret = crtc->funcs->set_config(&fb_helper->crtc_info[i].mode_set);
if (ret) {
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
}
mutex_unlock(&dev->mode_config.mutex);
if (fb_helper->delayed_hotplug) {
fb_helper->delayed_hotplug = false;
drm_fb_helper_hotplug_event(fb_helper);
}
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_set_par);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
struct drm_fb_helper *fb_helper = info->par;
struct drm_device *dev = fb_helper->dev;
struct drm_mode_set *modeset;
struct drm_crtc *crtc;
int ret = 0;
int i;
mutex_lock(&dev->mode_config.mutex);
for (i = 0; i < fb_helper->crtc_count; i++) {
crtc = fb_helper->crtc_info[i].mode_set.crtc;
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->x = var->xoffset;
modeset->y = var->yoffset;
if (modeset->num_connectors) {
ret = crtc->funcs->set_config(modeset);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
}
}
}
mutex_unlock(&dev->mode_config.mutex);
return ret;
}
EXPORT_SYMBOL(drm_fb_helper_pan_display);
int drm_fb_helper_single_fb_probe(struct drm_fb_helper *fb_helper,
int preferred_bpp)
{
int new_fb = 0;
int crtc_count = 0;
int i;
struct fb_info *info;
struct drm_fb_helper_surface_size sizes;
int gamma_size = 0;
memset(&sizes, 0, sizeof(struct drm_fb_helper_surface_size));
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
sizes.fb_width = (unsigned)-1;
sizes.fb_height = (unsigned)-1;
/* if driver picks 8 or 16 by default use that
for both depth/bpp */
if (preferred_bpp != sizes.surface_bpp) {
sizes.surface_depth = sizes.surface_bpp = preferred_bpp;
}
/* first up get a count of crtcs now in use and new min/maxes width/heights */
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_fb_helper_connector *fb_helper_conn = fb_helper->connector_info[i];
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->bpp_specified) {
switch (cmdline_mode->bpp) {
case 8:
sizes.surface_depth = sizes.surface_bpp = 8;
break;
case 15:
sizes.surface_depth = 15;
sizes.surface_bpp = 16;
break;
case 16:
sizes.surface_depth = sizes.surface_bpp = 16;
break;
case 24:
sizes.surface_depth = sizes.surface_bpp = 24;
break;
case 32:
sizes.surface_depth = 24;
sizes.surface_bpp = 32;
break;
}
break;
}
}
crtc_count = 0;
for (i = 0; i < fb_helper->crtc_count; i++) {
struct drm_display_mode *desired_mode;
desired_mode = fb_helper->crtc_info[i].desired_mode;
if (desired_mode) {
if (gamma_size == 0)
gamma_size = fb_helper->crtc_info[i].mode_set.crtc->gamma_size;
if (desired_mode->hdisplay < sizes.fb_width)
sizes.fb_width = desired_mode->hdisplay;
if (desired_mode->vdisplay < sizes.fb_height)
sizes.fb_height = desired_mode->vdisplay;
if (desired_mode->hdisplay > sizes.surface_width)
sizes.surface_width = desired_mode->hdisplay;
if (desired_mode->vdisplay > sizes.surface_height)
sizes.surface_height = desired_mode->vdisplay;
crtc_count++;
}
}
if (crtc_count == 0 || sizes.fb_width == -1 || sizes.fb_height == -1) {
/* hmm everyone went away - assume VGA cable just fell out
and will come back later. */
DRM_INFO("Cannot find any crtc or sizes - going 1024x768\n");
sizes.fb_width = sizes.surface_width = 1024;
sizes.fb_height = sizes.surface_height = 768;
}
/* push down into drivers */
new_fb = (*fb_helper->funcs->fb_probe)(fb_helper, &sizes);
if (new_fb < 0)
return new_fb;
info = fb_helper->fbdev;
/* set the fb pointer */
for (i = 0; i < fb_helper->crtc_count; i++) {
fb_helper->crtc_info[i].mode_set.fb = fb_helper->fb;
}
if (new_fb) {
info->var.pixclock = 0;
if (register_framebuffer(info) < 0) {
return -EINVAL;
}
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node,
info->fix.id);
} else {
drm_fb_helper_set_par(info);
}
/* Switch back to kernel console on panic */
/* multi card linked list maybe */
if (list_empty(&kernel_fb_helper_list)) {
printk(KERN_INFO "drm: registered panic notifier\n");
atomic_notifier_chain_register(&panic_notifier_list,
&paniced);
register_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
}
if (new_fb)
list_add(&fb_helper->kernel_fb_list, &kernel_fb_helper_list);
return 0;
}
EXPORT_SYMBOL(drm_fb_helper_single_fb_probe);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
uint32_t depth)
{
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.visual = depth == 8 ? FB_VISUAL_PSEUDOCOLOR :
FB_VISUAL_TRUECOLOR;
info->fix.mmio_start = 0;
info->fix.mmio_len = 0;
info->fix.type_aux = 0;
info->fix.xpanstep = 1; /* doing it in hw */
info->fix.ypanstep = 1; /* doing it in hw */
info->fix.ywrapstep = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.type_aux = 0;
info->fix.line_length = pitch;
return;
}
EXPORT_SYMBOL(drm_fb_helper_fill_fix);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height)
{
struct drm_framebuffer *fb = fb_helper->fb;
info->pseudo_palette = fb_helper->pseudo_palette;
info->var.xres_virtual = fb->width;
info->var.yres_virtual = fb->height;
info->var.bits_per_pixel = fb->bits_per_pixel;
info->var.accel_flags = FB_ACCELF_TEXT;
info->var.xoffset = 0;
info->var.yoffset = 0;
info->var.activate = FB_ACTIVATE_NOW;
info->var.height = -1;
info->var.width = -1;
switch (fb->depth) {
case 8:
info->var.red.offset = 0;
info->var.green.offset = 0;
info->var.blue.offset = 0;
info->var.red.length = 8; /* 8bit DAC */
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 15:
info->var.red.offset = 10;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 5;
info->var.blue.length = 5;
info->var.transp.offset = 15;
info->var.transp.length = 1;
break;
case 16:
info->var.red.offset = 11;
info->var.green.offset = 5;
info->var.blue.offset = 0;
info->var.red.length = 5;
info->var.green.length = 6;
info->var.blue.length = 5;
info->var.transp.offset = 0;
break;
case 24:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 0;
info->var.transp.length = 0;
break;
case 32:
info->var.red.offset = 16;
info->var.green.offset = 8;
info->var.blue.offset = 0;
info->var.red.length = 8;
info->var.green.length = 8;
info->var.blue.length = 8;
info->var.transp.offset = 24;
info->var.transp.length = 8;
break;
default:
break;
}
info->var.xres = fb_width;
info->var.yres = fb_height;
}
EXPORT_SYMBOL(drm_fb_helper_fill_var);
static int drm_fb_helper_probe_connector_modes(struct drm_fb_helper *fb_helper,
uint32_t maxX,
uint32_t maxY)
{
struct drm_connector *connector;
int count = 0;
int i;
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
count += connector->funcs->fill_modes(connector, maxX, maxY);
}
return count;
}
static struct drm_display_mode *drm_has_preferred_mode(struct drm_fb_helper_connector *fb_connector, int width, int height)
{
struct drm_display_mode *mode;
list_for_each_entry(mode, &fb_connector->connector->modes, head) {
if (drm_mode_width(mode) > width ||
drm_mode_height(mode) > height)
continue;
if (mode->type & DRM_MODE_TYPE_PREFERRED)
return mode;
}
return NULL;
}
static bool drm_has_cmdline_mode(struct drm_fb_helper_connector *fb_connector)
{
struct drm_cmdline_mode *cmdline_mode;
cmdline_mode = &fb_connector->cmdline_mode;
return cmdline_mode->specified;
}
static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_fb_helper_connector *fb_helper_conn,
int width, int height)
{
struct drm_cmdline_mode *cmdline_mode;
struct drm_display_mode *mode = NULL;
cmdline_mode = &fb_helper_conn->cmdline_mode;
if (cmdline_mode->specified == false)
return mode;
/* attempt to find a matching mode in the list of modes
* we have gotten so far, if not add a CVT mode that conforms
*/
if (cmdline_mode->rb || cmdline_mode->margins)
goto create_mode;
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
/* check width/height */
if (mode->hdisplay != cmdline_mode->xres ||
mode->vdisplay != cmdline_mode->yres)
continue;
if (cmdline_mode->refresh_specified) {
if (mode->vrefresh != cmdline_mode->refresh)
continue;
}
if (cmdline_mode->interlace) {
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
}
return mode;
}
create_mode:
mode = drm_mode_create_from_cmdline_mode(fb_helper_conn->connector->dev,
cmdline_mode);
list_add(&mode->head, &fb_helper_conn->connector->modes);
return mode;
}
static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
{
bool enable;
if (strict) {
enable = connector->status == connector_status_connected;
} else {
enable = connector->status != connector_status_disconnected;
}
return enable;
}
static void drm_enable_connectors(struct drm_fb_helper *fb_helper,
bool *enabled)
{
bool any_enabled = false;
struct drm_connector *connector;
int i = 0;
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, true);
DRM_DEBUG_KMS("connector %d enabled? %s\n", connector->base.id,
enabled[i] ? "yes" : "no");
any_enabled |= enabled[i];
}
if (any_enabled)
return;
for (i = 0; i < fb_helper->connector_count; i++) {
connector = fb_helper->connector_info[i]->connector;
enabled[i] = drm_connector_enabled(connector, false);
}
}
static bool drm_target_cloned(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
int count, i, j;
bool can_clone = false;
struct drm_fb_helper_connector *fb_helper_conn;
struct drm_display_mode *dmt_mode, *mode;
/* only contemplate cloning in the single crtc case */
if (fb_helper->crtc_count > 1)
return false;
count = 0;
for (i = 0; i < fb_helper->connector_count; i++) {
if (enabled[i])
count++;
}
/* only contemplate cloning if more than one connector is enabled */
if (count <= 1)
return false;
/* check the command line or if nothing common pick 1024x768 */
can_clone = true;
for (i = 0; i < fb_helper->connector_count; i++) {
if (!enabled[i])
continue;
fb_helper_conn = fb_helper->connector_info[i];
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
can_clone = false;
break;
}
for (j = 0; j < i; j++) {
if (!enabled[j])
continue;
if (!drm_mode_equal(modes[j], modes[i]))
can_clone = false;
}
}
if (can_clone) {
DRM_DEBUG_KMS("can clone using command line\n");
return true;
}
/* try and find a 1024x768 mode on each connector */
can_clone = true;
dmt_mode = drm_mode_find_dmt(fb_helper->dev, 1024, 768, 60);
for (i = 0; i < fb_helper->connector_count; i++) {
if (!enabled[i])
continue;
fb_helper_conn = fb_helper->connector_info[i];
list_for_each_entry(mode, &fb_helper_conn->connector->modes, head) {
if (drm_mode_equal(mode, dmt_mode))
modes[i] = mode;
}
if (!modes[i])
can_clone = false;
}
if (can_clone) {
DRM_DEBUG_KMS("can clone using 1024x768\n");
return true;
}
DRM_INFO("kms: can't enable cloning when we probably wanted to.\n");
return false;
}
static bool drm_target_preferred(struct drm_fb_helper *fb_helper,
struct drm_display_mode **modes,
bool *enabled, int width, int height)
{
struct drm_fb_helper_connector *fb_helper_conn;
int i;
for (i = 0; i < fb_helper->connector_count; i++) {
fb_helper_conn = fb_helper->connector_info[i];
if (enabled[i] == false)
continue;
DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
fb_helper_conn->connector->base.id);
/* got for command line mode first */
modes[i] = drm_pick_cmdline_mode(fb_helper_conn, width, height);
if (!modes[i]) {
DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
fb_helper_conn->connector->base.id);
modes[i] = drm_has_preferred_mode(fb_helper_conn, width, height);
}
/* No preferred modes, pick one off the list */
if (!modes[i] && !list_empty(&fb_helper_conn->connector->modes)) {
list_for_each_entry(modes[i], &fb_helper_conn->connector->modes, head)
break;
}
DRM_DEBUG_KMS("found mode %s\n", modes[i] ? modes[i]->name :
"none");
}
return true;
}
static int drm_pick_crtcs(struct drm_fb_helper *fb_helper,
struct drm_fb_helper_crtc **best_crtcs,
struct drm_display_mode **modes,
int n, int width, int height)
{
int c, o;
struct drm_device *dev = fb_helper->dev;
struct drm_connector *connector;
struct drm_connector_helper_funcs *connector_funcs;
struct drm_encoder *encoder;
struct drm_fb_helper_crtc *best_crtc;
int my_score, best_score, score;
struct drm_fb_helper_crtc **crtcs, *crtc;
struct drm_fb_helper_connector *fb_helper_conn;
if (n == fb_helper->connector_count)
return 0;
fb_helper_conn = fb_helper->connector_info[n];
connector = fb_helper_conn->connector;
best_crtcs[n] = NULL;
best_crtc = NULL;
best_score = drm_pick_crtcs(fb_helper, best_crtcs, modes, n+1, width, height);
if (modes[n] == NULL)
return best_score;
crtcs = kzalloc(dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
if (!crtcs)
return best_score;
my_score = 1;
if (connector->status == connector_status_connected)
my_score++;
if (drm_has_cmdline_mode(fb_helper_conn))
my_score++;
if (drm_has_preferred_mode(fb_helper_conn, width, height))
my_score++;
connector_funcs = connector->helper_private;
encoder = connector_funcs->best_encoder(connector);
if (!encoder)
goto out;
/* select a crtc for this connector and then attempt to configure
remaining connectors */
for (c = 0; c < fb_helper->crtc_count; c++) {
crtc = &fb_helper->crtc_info[c];
if ((encoder->possible_crtcs & (1 << c)) == 0) {
continue;
}
for (o = 0; o < n; o++)
if (best_crtcs[o] == crtc)
break;
if (o < n) {
/* ignore cloning unless only a single crtc */
if (fb_helper->crtc_count > 1)
continue;
if (!drm_mode_equal(modes[o], modes[n]))
continue;
}
crtcs[n] = crtc;
memcpy(crtcs, best_crtcs, n * sizeof(struct drm_fb_helper_crtc *));
score = my_score + drm_pick_crtcs(fb_helper, crtcs, modes, n + 1,
width, height);
if (score > best_score) {
best_crtc = crtc;
best_score = score;
memcpy(best_crtcs, crtcs,
dev->mode_config.num_connector *
sizeof(struct drm_fb_helper_crtc *));
}
}
out:
kfree(crtcs);
return best_score;
}
static void drm_setup_crtcs(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
struct drm_fb_helper_crtc **crtcs;
struct drm_display_mode **modes;
struct drm_encoder *encoder;
struct drm_mode_set *modeset;
bool *enabled;
int width, height;
int i, ret;
DRM_DEBUG_KMS("\n");
width = dev->mode_config.max_width;
height = dev->mode_config.max_height;
/* clean out all the encoder/crtc combos */
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
encoder->crtc = NULL;
}
crtcs = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_fb_helper_crtc *), GFP_KERNEL);
modes = kcalloc(dev->mode_config.num_connector,
sizeof(struct drm_display_mode *), GFP_KERNEL);
enabled = kcalloc(dev->mode_config.num_connector,
sizeof(bool), GFP_KERNEL);
drm_enable_connectors(fb_helper, enabled);
ret = drm_target_cloned(fb_helper, modes, enabled, width, height);
if (!ret) {
ret = drm_target_preferred(fb_helper, modes, enabled, width, height);
if (!ret)
DRM_ERROR("Unable to find initial modes\n");
}
DRM_DEBUG_KMS("picking CRTCs for %dx%d config\n", width, height);
drm_pick_crtcs(fb_helper, crtcs, modes, 0, width, height);
/* need to set the modesets up here for use later */
/* fill out the connector<->crtc mappings into the modesets */
for (i = 0; i < fb_helper->crtc_count; i++) {
modeset = &fb_helper->crtc_info[i].mode_set;
modeset->num_connectors = 0;
}
for (i = 0; i < fb_helper->connector_count; i++) {
struct drm_display_mode *mode = modes[i];
struct drm_fb_helper_crtc *fb_crtc = crtcs[i];
modeset = &fb_crtc->mode_set;
if (mode && fb_crtc) {
DRM_DEBUG_KMS("desired mode %s set on crtc %d\n",
mode->name, fb_crtc->mode_set.crtc->base.id);
fb_crtc->desired_mode = mode;
if (modeset->mode)
drm_mode_destroy(dev, modeset->mode);
modeset->mode = drm_mode_duplicate(dev,
fb_crtc->desired_mode);
modeset->connectors[modeset->num_connectors++] = fb_helper->connector_info[i]->connector;
}
}
kfree(crtcs);
kfree(modes);
kfree(enabled);
}
/**
* drm_helper_initial_config - setup a sane initial connector configuration
* @dev: DRM device
*
* LOCKING:
* Called at init time, must take mode config lock.
*
* Scan the CRTCs and connectors and try to put together an initial setup.
* At the moment, this is a cloned configuration across all heads with
* a new framebuffer object as the backing store.
*
* RETURNS:
* Zero if everything went ok, nonzero otherwise.
*/
bool drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(fb_helper->dev);
drm_fb_helper_parse_command_line(fb_helper);
count = drm_fb_helper_probe_connector_modes(fb_helper,
dev->mode_config.max_width,
dev->mode_config.max_height);
/*
* we shouldn't end up with no modes here.
*/
if (count == 0) {
printk(KERN_INFO "No connectors reported connected with modes\n");
}
drm_setup_crtcs(fb_helper);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_initial_config);
/**
* drm_fb_helper_hotplug_event - respond to a hotplug notification by
* probing all the outputs attached to the fb.
* @fb_helper: the drm_fb_helper
*
* LOCKING:
* Called at runtime, must take mode config lock.
*
* Scan the connectors attached to the fb_helper and try to put together a
* setup after *notification of a change in output configuration.
*
* RETURNS:
* 0 on success and a non-zero error code otherwise.
*/
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper)
{
struct drm_device *dev = fb_helper->dev;
int count = 0;
u32 max_width, max_height, bpp_sel;
bool bound = false, crtcs_bound = false;
struct drm_crtc *crtc;
if (!fb_helper->fb)
return 0;
mutex_lock(&dev->mode_config.mutex);
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
if (crtc->fb)
crtcs_bound = true;
if (crtc->fb == fb_helper->fb)
bound = true;
}
if (!bound && crtcs_bound) {
fb_helper->delayed_hotplug = true;
mutex_unlock(&dev->mode_config.mutex);
return 0;
}
DRM_DEBUG_KMS("\n");
max_width = fb_helper->fb->width;
max_height = fb_helper->fb->height;
bpp_sel = fb_helper->fb->bits_per_pixel;
count = drm_fb_helper_probe_connector_modes(fb_helper, max_width,
max_height);
drm_setup_crtcs(fb_helper);
mutex_unlock(&dev->mode_config.mutex);
return drm_fb_helper_single_fb_probe(fb_helper, bpp_sel);
}
EXPORT_SYMBOL(drm_fb_helper_hotplug_event);
/* The Kconfig DRM_KMS_HELPER selects FRAMEBUFFER_CONSOLE (if !EXPERT)
* but the module doesn't depend on any fb console symbols. At least
* attempt to load fbcon to avoid leaving the system without a usable console.
*/
#if defined(CONFIG_FRAMEBUFFER_CONSOLE_MODULE) && !defined(CONFIG_EXPERT)
static int __init drm_fb_helper_modinit(void)
{
const char *name = "fbcon";
struct module *fbcon;
mutex_lock(&module_mutex);
fbcon = find_module(name);
mutex_unlock(&module_mutex);
if (!fbcon)
request_module_nowait(name);
return 0;
}
module_init(drm_fb_helper_modinit);
#endif
| gpl-2.0 |
xcstacy/mako-linaro | drivers/media/video/cx88/cx88-blackbird.c | 5468 | 38576 | /*
*
* Support for a cx23416 mpeg encoder via cx2388x host port.
* "blackbird" reference design.
*
* (c) 2004 Jelle Foks <jelle@foks.us>
* (c) 2004 Gerd Knorr <kraxel@bytesex.org>
*
* (c) 2005-2006 Mauro Carvalho Chehab <mchehab@infradead.org>
* - video_ioctl2 conversion
*
* Includes parts from the ivtv driver <http://sourceforge.net/projects/ivtv/>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/firmware.h>
#include <media/v4l2-common.h>
#include <media/v4l2-ioctl.h>
#include <media/cx2341x.h>
#include "cx88.h"
MODULE_DESCRIPTION("driver for cx2388x/cx23416 based mpeg encoder cards");
MODULE_AUTHOR("Jelle Foks <jelle@foks.us>, Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]");
MODULE_LICENSE("GPL");
MODULE_VERSION(CX88_VERSION);
static unsigned int mpegbufs = 32;
module_param(mpegbufs,int,0644);
MODULE_PARM_DESC(mpegbufs,"number of mpeg buffers, range 2-32");
static unsigned int debug;
module_param(debug,int,0644);
MODULE_PARM_DESC(debug,"enable debug messages [blackbird]");
#define dprintk(level,fmt, arg...) if (debug >= level) \
printk(KERN_DEBUG "%s/2-bb: " fmt, dev->core->name , ## arg)
/* ------------------------------------------------------------------ */
#define BLACKBIRD_FIRM_IMAGE_SIZE 376836
/* defines below are from ivtv-driver.h */
#define IVTV_CMD_HW_BLOCKS_RST 0xFFFFFFFF
/* Firmware API commands */
#define IVTV_API_STD_TIMEOUT 500
enum blackbird_capture_type {
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_CAPTURE,
BLACKBIRD_RAW_PASSTHRU_CAPTURE
};
enum blackbird_capture_bits {
BLACKBIRD_RAW_BITS_NONE = 0x00,
BLACKBIRD_RAW_BITS_YUV_CAPTURE = 0x01,
BLACKBIRD_RAW_BITS_PCM_CAPTURE = 0x02,
BLACKBIRD_RAW_BITS_VBI_CAPTURE = 0x04,
BLACKBIRD_RAW_BITS_PASSTHRU_CAPTURE = 0x08,
BLACKBIRD_RAW_BITS_TO_HOST_CAPTURE = 0x10
};
enum blackbird_capture_end {
BLACKBIRD_END_AT_GOP, /* stop at the end of gop, generate irq */
BLACKBIRD_END_NOW, /* stop immediately, no irq */
};
enum blackbird_framerate {
BLACKBIRD_FRAMERATE_NTSC_30, /* NTSC: 30fps */
BLACKBIRD_FRAMERATE_PAL_25 /* PAL: 25fps */
};
enum blackbird_stream_port {
BLACKBIRD_OUTPUT_PORT_MEMORY,
BLACKBIRD_OUTPUT_PORT_STREAMING,
BLACKBIRD_OUTPUT_PORT_SERIAL
};
enum blackbird_data_xfer_status {
BLACKBIRD_MORE_BUFFERS_FOLLOW,
BLACKBIRD_LAST_BUFFER,
};
enum blackbird_picture_mask {
BLACKBIRD_PICTURE_MASK_NONE,
BLACKBIRD_PICTURE_MASK_I_FRAMES,
BLACKBIRD_PICTURE_MASK_I_P_FRAMES = 0x3,
BLACKBIRD_PICTURE_MASK_ALL_FRAMES = 0x7,
};
enum blackbird_vbi_mode_bits {
BLACKBIRD_VBI_BITS_SLICED,
BLACKBIRD_VBI_BITS_RAW,
};
enum blackbird_vbi_insertion_bits {
BLACKBIRD_VBI_BITS_INSERT_IN_XTENSION_USR_DATA,
BLACKBIRD_VBI_BITS_INSERT_IN_PRIVATE_PACKETS = 0x1 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM = 0x2 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_USR_DATA = 0x4 << 1,
BLACKBIRD_VBI_BITS_SEPARATE_STREAM_PRV_DATA = 0x5 << 1,
};
enum blackbird_dma_unit {
BLACKBIRD_DMA_BYTES,
BLACKBIRD_DMA_FRAMES,
};
enum blackbird_dma_transfer_status_bits {
BLACKBIRD_DMA_TRANSFER_BITS_DONE = 0x01,
BLACKBIRD_DMA_TRANSFER_BITS_ERROR = 0x04,
BLACKBIRD_DMA_TRANSFER_BITS_LL_ERROR = 0x10,
};
enum blackbird_pause {
BLACKBIRD_PAUSE_ENCODING,
BLACKBIRD_RESUME_ENCODING,
};
enum blackbird_copyright {
BLACKBIRD_COPYRIGHT_OFF,
BLACKBIRD_COPYRIGHT_ON,
};
enum blackbird_notification_type {
BLACKBIRD_NOTIFICATION_REFRESH,
};
enum blackbird_notification_status {
BLACKBIRD_NOTIFICATION_OFF,
BLACKBIRD_NOTIFICATION_ON,
};
enum blackbird_notification_mailbox {
BLACKBIRD_NOTIFICATION_NO_MAILBOX = -1,
};
enum blackbird_field1_lines {
BLACKBIRD_FIELD1_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD1_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD1_MICRONAS = 0x0105, /* 261 */
};
enum blackbird_field2_lines {
BLACKBIRD_FIELD2_SAA7114 = 0x00EF, /* 239 */
BLACKBIRD_FIELD2_SAA7115 = 0x00F0, /* 240 */
BLACKBIRD_FIELD2_MICRONAS = 0x0106, /* 262 */
};
enum blackbird_custom_data_type {
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
BLACKBIRD_CUSTOM_PRIVATE_PACKET,
};
enum blackbird_mute {
BLACKBIRD_UNMUTE,
BLACKBIRD_MUTE,
};
enum blackbird_mute_video_mask {
BLACKBIRD_MUTE_VIDEO_V_MASK = 0x0000FF00,
BLACKBIRD_MUTE_VIDEO_U_MASK = 0x00FF0000,
BLACKBIRD_MUTE_VIDEO_Y_MASK = 0xFF000000,
};
enum blackbird_mute_video_shift {
BLACKBIRD_MUTE_VIDEO_V_SHIFT = 8,
BLACKBIRD_MUTE_VIDEO_U_SHIFT = 16,
BLACKBIRD_MUTE_VIDEO_Y_SHIFT = 24,
};
/* Registers */
#define IVTV_REG_ENC_SDRAM_REFRESH (0x07F8 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_ENC_SDRAM_PRECHARGE (0x07FC /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_SPU (0x9050 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_HW_BLOCKS (0x9054 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_VPU (0x9058 /*| IVTV_REG_OFFSET*/)
#define IVTV_REG_APU (0xA064 /*| IVTV_REG_OFFSET*/)
/* ------------------------------------------------------------------ */
static void host_setup(struct cx88_core *core)
{
/* toggle reset of the host */
cx_write(MO_GPHST_SOFT_RST, 1);
udelay(100);
cx_write(MO_GPHST_SOFT_RST, 0);
udelay(100);
/* host port setup */
cx_write(MO_GPHST_WSC, 0x44444444U);
cx_write(MO_GPHST_XFR, 0);
cx_write(MO_GPHST_WDTH, 15);
cx_write(MO_GPHST_HDSHK, 0);
cx_write(MO_GPHST_MUX16, 0x44448888U);
cx_write(MO_GPHST_MODE, 0);
}
/* ------------------------------------------------------------------ */
#define P1_MDATA0 0x390000
#define P1_MDATA1 0x390001
#define P1_MDATA2 0x390002
#define P1_MDATA3 0x390003
#define P1_MADDR2 0x390004
#define P1_MADDR1 0x390005
#define P1_MADDR0 0x390006
#define P1_RDATA0 0x390008
#define P1_RDATA1 0x390009
#define P1_RDATA2 0x39000A
#define P1_RDATA3 0x39000B
#define P1_RADDR0 0x39000C
#define P1_RADDR1 0x39000D
#define P1_RRDWR 0x39000E
static int wait_ready_gpio0_bit1(struct cx88_core *core, u32 state)
{
unsigned long timeout = jiffies + msecs_to_jiffies(1);
u32 gpio0,need;
need = state ? 2 : 0;
for (;;) {
gpio0 = cx_read(MO_GP0_IO) & 2;
if (need == gpio0)
return 0;
if (time_after(jiffies,timeout))
return -1;
udelay(1);
}
}
static int memory_write(struct cx88_core *core, u32 address, u32 value)
{
/* Warning: address is dword address (4 bytes) */
cx_writeb(P1_MDATA0, (unsigned int)value);
cx_writeb(P1_MDATA1, (unsigned int)(value >> 8));
cx_writeb(P1_MDATA2, (unsigned int)(value >> 16));
cx_writeb(P1_MDATA3, (unsigned int)(value >> 24));
cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) | 0x40);
cx_writeb(P1_MADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MDATA0);
cx_read(P1_MADDR0);
return wait_ready_gpio0_bit1(core,1);
}
static int memory_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
u32 val;
/* Warning: address is dword address (4 bytes) */
cx_writeb(P1_MADDR2, (unsigned int)(address >> 16) & ~0xC0);
cx_writeb(P1_MADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_MADDR0, (unsigned int)address);
cx_read(P1_MADDR0);
retval = wait_ready_gpio0_bit1(core,1);
cx_writeb(P1_MDATA3, 0);
val = (unsigned char)cx_read(P1_MDATA3) << 24;
cx_writeb(P1_MDATA2, 0);
val |= (unsigned char)cx_read(P1_MDATA2) << 16;
cx_writeb(P1_MDATA1, 0);
val |= (unsigned char)cx_read(P1_MDATA1) << 8;
cx_writeb(P1_MDATA0, 0);
val |= (unsigned char)cx_read(P1_MDATA0);
*value = val;
return retval;
}
static int register_write(struct cx88_core *core, u32 address, u32 value)
{
cx_writeb(P1_RDATA0, (unsigned int)value);
cx_writeb(P1_RDATA1, (unsigned int)(value >> 8));
cx_writeb(P1_RDATA2, (unsigned int)(value >> 16));
cx_writeb(P1_RDATA3, (unsigned int)(value >> 24));
cx_writeb(P1_RADDR0, (unsigned int)address);
cx_writeb(P1_RADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_RRDWR, 1);
cx_read(P1_RDATA0);
cx_read(P1_RADDR0);
return wait_ready_gpio0_bit1(core,1);
}
static int register_read(struct cx88_core *core, u32 address, u32 *value)
{
int retval;
u32 val;
cx_writeb(P1_RADDR0, (unsigned int)address);
cx_writeb(P1_RADDR1, (unsigned int)(address >> 8));
cx_writeb(P1_RRDWR, 0);
cx_read(P1_RADDR0);
retval = wait_ready_gpio0_bit1(core,1);
val = (unsigned char)cx_read(P1_RDATA0);
val |= (unsigned char)cx_read(P1_RDATA1) << 8;
val |= (unsigned char)cx_read(P1_RDATA2) << 16;
val |= (unsigned char)cx_read(P1_RDATA3) << 24;
*value = val;
return retval;
}
/* ------------------------------------------------------------------ */
static int blackbird_mbox_func(void *priv, u32 command, int in, int out, u32 data[CX2341X_MBOX_MAX_DATA])
{
struct cx8802_dev *dev = priv;
unsigned long timeout;
u32 value, flag, retval;
int i;
dprintk(1,"%s: 0x%X\n", __func__, command);
/* this may not be 100% safe if we can't read any memory location
without side effects */
memory_read(dev->core, dev->mailbox - 4, &value);
if (value != 0x12345678) {
dprintk(0, "Firmware and/or mailbox pointer not initialized or corrupted\n");
return -1;
}
memory_read(dev->core, dev->mailbox, &flag);
if (flag) {
dprintk(0, "ERROR: Mailbox appears to be in use (%x)\n", flag);
return -1;
}
flag |= 1; /* tell 'em we're working on it */
memory_write(dev->core, dev->mailbox, flag);
/* write command + args + fill remaining with zeros */
memory_write(dev->core, dev->mailbox + 1, command); /* command code */
memory_write(dev->core, dev->mailbox + 3, IVTV_API_STD_TIMEOUT); /* timeout */
for (i = 0; i < in; i++) {
memory_write(dev->core, dev->mailbox + 4 + i, data[i]);
dprintk(1, "API Input %d = %d\n", i, data[i]);
}
for (; i < CX2341X_MBOX_MAX_DATA; i++)
memory_write(dev->core, dev->mailbox + 4 + i, 0);
flag |= 3; /* tell 'em we're done writing */
memory_write(dev->core, dev->mailbox, flag);
/* wait for firmware to handle the API command */
timeout = jiffies + msecs_to_jiffies(10);
for (;;) {
memory_read(dev->core, dev->mailbox, &flag);
if (0 != (flag & 4))
break;
if (time_after(jiffies,timeout)) {
dprintk(0, "ERROR: API Mailbox timeout\n");
return -1;
}
udelay(10);
}
/* read output values */
for (i = 0; i < out; i++) {
memory_read(dev->core, dev->mailbox + 4 + i, data + i);
dprintk(1, "API Output %d = %d\n", i, data[i]);
}
memory_read(dev->core, dev->mailbox + 2, &retval);
dprintk(1, "API result = %d\n",retval);
flag = 0;
memory_write(dev->core, dev->mailbox, flag);
return retval;
}
/* ------------------------------------------------------------------ */
/* We don't need to call the API often, so using just one mailbox will probably suffice */
static int blackbird_api_cmd(struct cx8802_dev *dev, u32 command,
u32 inputcnt, u32 outputcnt, ...)
{
u32 data[CX2341X_MBOX_MAX_DATA];
va_list vargs;
int i, err;
va_start(vargs, outputcnt);
for (i = 0; i < inputcnt; i++) {
data[i] = va_arg(vargs, int);
}
err = blackbird_mbox_func(dev, command, inputcnt, outputcnt, data);
for (i = 0; i < outputcnt; i++) {
int *vptr = va_arg(vargs, int *);
*vptr = data[i];
}
va_end(vargs);
return err;
}
static int blackbird_find_mailbox(struct cx8802_dev *dev)
{
u32 signature[4]={0x12345678, 0x34567812, 0x56781234, 0x78123456};
int signaturecnt=0;
u32 value;
int i;
for (i = 0; i < BLACKBIRD_FIRM_IMAGE_SIZE; i++) {
memory_read(dev->core, i, &value);
if (value == signature[signaturecnt])
signaturecnt++;
else
signaturecnt = 0;
if (4 == signaturecnt) {
dprintk(1, "Mailbox signature found\n");
return i+1;
}
}
dprintk(0, "Mailbox signature values not found!\n");
return -1;
}
static int blackbird_load_firmware(struct cx8802_dev *dev)
{
static const unsigned char magic[8] = {
0xa7, 0x0d, 0x00, 0x00, 0x66, 0xbb, 0x55, 0xaa
};
const struct firmware *firmware;
int i, retval = 0;
u32 value = 0;
u32 checksum = 0;
u32 *dataptr;
retval = register_write(dev->core, IVTV_REG_VPU, 0xFFFFFFED);
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_REFRESH, 0x80000640);
retval |= register_write(dev->core, IVTV_REG_ENC_SDRAM_PRECHARGE, 0x1A);
msleep(1);
retval |= register_write(dev->core, IVTV_REG_APU, 0);
if (retval < 0)
dprintk(0, "Error with register_write\n");
retval = request_firmware(&firmware, CX2341X_FIRM_ENC_FILENAME,
&dev->pci->dev);
if (retval != 0) {
dprintk(0, "ERROR: Hotplug firmware request failed (%s).\n",
CX2341X_FIRM_ENC_FILENAME);
dprintk(0, "Please fix your hotplug setup, the board will "
"not work without firmware loaded!\n");
return -1;
}
if (firmware->size != BLACKBIRD_FIRM_IMAGE_SIZE) {
dprintk(0, "ERROR: Firmware size mismatch (have %zd, expected %d)\n",
firmware->size, BLACKBIRD_FIRM_IMAGE_SIZE);
release_firmware(firmware);
return -1;
}
if (0 != memcmp(firmware->data, magic, 8)) {
dprintk(0, "ERROR: Firmware magic mismatch, wrong file?\n");
release_firmware(firmware);
return -1;
}
/* transfer to the chip */
dprintk(1,"Loading firmware ...\n");
dataptr = (u32*)firmware->data;
for (i = 0; i < (firmware->size >> 2); i++) {
value = *dataptr;
checksum += ~value;
memory_write(dev->core, i, value);
dataptr++;
}
/* read back to verify with the checksum */
for (i--; i >= 0; i--) {
memory_read(dev->core, i, &value);
checksum -= ~value;
}
if (checksum) {
dprintk(0, "ERROR: Firmware load failed (checksum mismatch).\n");
release_firmware(firmware);
return -1;
}
release_firmware(firmware);
dprintk(0, "Firmware upload successful.\n");
retval |= register_write(dev->core, IVTV_REG_HW_BLOCKS, IVTV_CMD_HW_BLOCKS_RST);
retval |= register_read(dev->core, IVTV_REG_SPU, &value);
retval |= register_write(dev->core, IVTV_REG_SPU, value & 0xFFFFFFFE);
msleep(1);
retval |= register_read(dev->core, IVTV_REG_VPU, &value);
retval |= register_write(dev->core, IVTV_REG_VPU, value & 0xFFFFFFE8);
if (retval < 0)
dprintk(0, "Error with register_write\n");
return 0;
}
/**
Settings used by the windows tv app for PVR2000:
=================================================================================================================
Profile | Codec | Resolution | CBR/VBR | Video Qlty | V. Bitrate | Frmrate | Audio Codec | A. Bitrate | A. Mode
-----------------------------------------------------------------------------------------------------------------
MPEG-1 | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 2000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
MPEG-2 | MPEG2 | 720x576PAL | VBR | 600 :Good | 4000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
VCD | MPEG1 | 352x288PAL | (CBR) | 1000:Optimal | 1150 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
DVD | MPEG2 | 720x576PAL | VBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
DB* DVD | MPEG2 | 720x576PAL | CBR | 600 :Good | 6000 Kbps | 25fps | MPG1 Layer2 | 224kbps | Stereo
=================================================================================================================
*DB: "DirectBurn"
*/
static void blackbird_codec_settings(struct cx8802_dev *dev)
{
/* assign frame size */
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
dev->height, dev->width);
dev->params.width = dev->width;
dev->params.height = dev->height;
dev->params.is_50hz = (dev->core->tvnorm & V4L2_STD_625_50) != 0;
cx2341x_update(dev, blackbird_mbox_func, NULL, &dev->params);
}
static int blackbird_initialize_codec(struct cx8802_dev *dev)
{
struct cx88_core *core = dev->core;
int version;
int retval;
dprintk(1,"Initialize codec\n");
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dev->mpeg_active = 0;
/* ping was not successful, reset and upload firmware */
cx_write(MO_SRST_IO, 0); /* SYS_RSTO=0 */
cx_write(MO_SRST_IO, 1); /* SYS_RSTO=1 */
retval = blackbird_load_firmware(dev);
if (retval < 0)
return retval;
retval = blackbird_find_mailbox(dev);
if (retval < 0)
return -1;
dev->mailbox = retval;
retval = blackbird_api_cmd(dev, CX2341X_ENC_PING_FW, 0, 0); /* ping */
if (retval < 0) {
dprintk(0, "ERROR: Firmware ping failed!\n");
return -1;
}
retval = blackbird_api_cmd(dev, CX2341X_ENC_GET_VERSION, 0, 1, &version);
if (retval < 0) {
dprintk(0, "ERROR: Firmware get encoder version failed!\n");
return -1;
}
dprintk(0, "Firmware version is 0x%08x\n", version);
}
cx_write(MO_PINMUX_IO, 0x88); /* 656-8bit IO and enable MPEG parallel IO */
cx_clear(MO_INPUT_FORMAT, 0x100); /* chroma subcarrier lock to normal? */
cx_write(MO_VBOS_CONTROL, 0x84A00); /* no 656 mode, 8-bit pixels, disable VBI */
cx_clear(MO_OUTPUT_FORMAT, 0x0008); /* Normal Y-limits to let the mpeg encoder sync */
blackbird_codec_settings(dev);
blackbird_api_cmd(dev, CX2341X_ENC_SET_NUM_VSYNC_LINES, 2, 0,
BLACKBIRD_FIELD1_SAA7115,
BLACKBIRD_FIELD2_SAA7115
);
blackbird_api_cmd(dev, CX2341X_ENC_SET_PLACEHOLDER, 12, 0,
BLACKBIRD_CUSTOM_EXTENSION_USR_DATA,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
return 0;
}
static int blackbird_start_codec(struct file *file, void *priv)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
/* start capturing to the host interface */
u32 reg;
int i;
int lastchange = -1;
int lastval = 0;
for (i = 0; (i < 10) && (i < (lastchange + 4)); i++) {
reg = cx_read(AUD_STATUS);
dprintk(1, "AUD_STATUS:%dL: 0x%x\n", i, reg);
if ((reg & 0x0F) != lastval) {
lastval = reg & 0x0F;
lastchange = i;
}
msleep(100);
}
/* unmute audio source */
cx_clear(AUD_VOL_CTL, (1 << 6));
blackbird_api_cmd(dev, CX2341X_ENC_REFRESH_INPUT, 0, 0);
/* initialize the video input */
blackbird_api_cmd(dev, CX2341X_ENC_INITIALIZE_INPUT, 0, 0);
/* start capturing to the host interface */
blackbird_api_cmd(dev, CX2341X_ENC_START_CAPTURE, 2, 0,
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_BITS_NONE
);
dev->mpeg_active = 1;
return 0;
}
static int blackbird_stop_codec(struct cx8802_dev *dev)
{
blackbird_api_cmd(dev, CX2341X_ENC_STOP_CAPTURE, 3, 0,
BLACKBIRD_END_NOW,
BLACKBIRD_MPEG_CAPTURE,
BLACKBIRD_RAW_BITS_NONE
);
dev->mpeg_active = 0;
return 0;
}
/* ------------------------------------------------------------------ */
static int bb_buf_setup(struct videobuf_queue *q,
unsigned int *count, unsigned int *size)
{
struct cx8802_fh *fh = q->priv_data;
fh->dev->ts_packet_size = 188 * 4; /* was: 512 */
fh->dev->ts_packet_count = mpegbufs; /* was: 100 */
*size = fh->dev->ts_packet_size * fh->dev->ts_packet_count;
*count = fh->dev->ts_packet_count;
return 0;
}
static int
bb_buf_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb,
enum v4l2_field field)
{
struct cx8802_fh *fh = q->priv_data;
return cx8802_buf_prepare(q, fh->dev, (struct cx88_buffer*)vb, field);
}
static void
bb_buf_queue(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
struct cx8802_fh *fh = q->priv_data;
cx8802_buf_queue(fh->dev, (struct cx88_buffer*)vb);
}
static void
bb_buf_release(struct videobuf_queue *q, struct videobuf_buffer *vb)
{
cx88_free_buffer(q, (struct cx88_buffer*)vb);
}
static struct videobuf_queue_ops blackbird_qops = {
.buf_setup = bb_buf_setup,
.buf_prepare = bb_buf_prepare,
.buf_queue = bb_buf_queue,
.buf_release = bb_buf_release,
};
/* ------------------------------------------------------------------ */
static const u32 *ctrl_classes[] = {
cx88_user_ctrls,
cx2341x_mpeg_ctrls,
NULL
};
static int blackbird_queryctrl(struct cx8802_dev *dev, struct v4l2_queryctrl *qctrl)
{
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (qctrl->id == 0)
return -EINVAL;
/* Standard V4L2 controls */
if (cx8800_ctrl_query(dev->core, qctrl) == 0)
return 0;
/* MPEG V4L2 controls */
if (cx2341x_ctrl_query(&dev->params, qctrl))
qctrl->flags |= V4L2_CTRL_FLAG_DISABLED;
return 0;
}
/* ------------------------------------------------------------------ */
/* IOCTL Handlers */
static int vidioc_querymenu (struct file *file, void *priv,
struct v4l2_querymenu *qmenu)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct v4l2_queryctrl qctrl;
qctrl.id = qmenu->id;
blackbird_queryctrl(dev, &qctrl);
return v4l2_ctrl_query_menu(qmenu, &qctrl,
cx2341x_ctrl_get_menu(&dev->params, qmenu->id));
}
static int vidioc_querycap (struct file *file, void *priv,
struct v4l2_capability *cap)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
strcpy(cap->driver, "cx88_blackbird");
strlcpy(cap->card, core->board.name, sizeof(cap->card));
sprintf(cap->bus_info,"PCI:%s",pci_name(dev->pci));
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_READWRITE |
V4L2_CAP_STREAMING;
if (UNSET != core->board.tuner_type)
cap->capabilities |= V4L2_CAP_TUNER;
return 0;
}
static int vidioc_enum_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
if (f->index != 0)
return -EINVAL;
strlcpy(f->description, "MPEG", sizeof(f->description));
f->pixelformat = V4L2_PIX_FMT_MPEG;
return 0;
}
static int vidioc_g_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */
f->fmt.pix.colorspace = 0;
f->fmt.pix.width = dev->width;
f->fmt.pix.height = dev->height;
f->fmt.pix.field = fh->mpegq.field;
dprintk(0,"VIDIOC_G_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
static int vidioc_try_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
f->fmt.pix.colorspace = 0;
dprintk(0,"VIDIOC_TRY_FMT: w: %d, h: %d, f: %d\n",
dev->width, dev->height, fh->mpegq.field );
return 0;
}
static int vidioc_s_fmt_vid_cap (struct file *file, void *priv,
struct v4l2_format *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG;
f->fmt.pix.bytesperline = 0;
f->fmt.pix.sizeimage = dev->ts_packet_size * dev->ts_packet_count; /* 188 * 4 * 1024; */;
f->fmt.pix.colorspace = 0;
dev->width = f->fmt.pix.width;
dev->height = f->fmt.pix.height;
fh->mpegq.field = f->fmt.pix.field;
cx88_set_scale(core, f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field);
blackbird_api_cmd(dev, CX2341X_ENC_SET_FRAME_SIZE, 2, 0,
f->fmt.pix.height, f->fmt.pix.width);
dprintk(0,"VIDIOC_S_FMT: w: %d, h: %d, f: %d\n",
f->fmt.pix.width, f->fmt.pix.height, f->fmt.pix.field );
return 0;
}
static int vidioc_reqbufs (struct file *file, void *priv, struct v4l2_requestbuffers *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_reqbufs(&fh->mpegq, p));
}
static int vidioc_querybuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_querybuf(&fh->mpegq, p));
}
static int vidioc_qbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_qbuf(&fh->mpegq, p));
}
static int vidioc_dqbuf (struct file *file, void *priv, struct v4l2_buffer *p)
{
struct cx8802_fh *fh = priv;
return (videobuf_dqbuf(&fh->mpegq, p,
file->f_flags & O_NONBLOCK));
}
static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
return videobuf_streamon(&fh->mpegq);
}
static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
{
struct cx8802_fh *fh = priv;
return videobuf_streamoff(&fh->mpegq);
}
static int vidioc_g_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
return cx2341x_ext_ctrls(&dev->params, 0, f, VIDIOC_G_EXT_CTRLS);
}
static int vidioc_s_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
p = dev->params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_S_EXT_CTRLS);
if (!err) {
err = cx2341x_update(dev, blackbird_mbox_func, &dev->params, &p);
dev->params = p;
}
return err;
}
static int vidioc_try_ext_ctrls (struct file *file, void *priv,
struct v4l2_ext_controls *f)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx2341x_mpeg_params p;
int err;
if (f->ctrl_class != V4L2_CTRL_CLASS_MPEG)
return -EINVAL;
p = dev->params;
err = cx2341x_ext_ctrls(&p, 0, f, VIDIOC_TRY_EXT_CTRLS);
return err;
}
static int vidioc_s_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8802_fh *fh = priv;
struct cx8802_dev *dev = fh->dev;
struct cx88_core *core = dev->core;
if (dev->mpeg_active)
blackbird_stop_codec(dev);
cx88_set_freq (core,f);
blackbird_initialize_codec(dev);
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
return 0;
}
static int vidioc_log_status (struct file *file, void *priv)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
struct cx88_core *core = dev->core;
char name[32 + 2];
snprintf(name, sizeof(name), "%s/2", core->name);
printk("%s/2: ============ START LOG STATUS ============\n",
core->name);
call_all(core, core, log_status);
cx2341x_log_status(&dev->params, name);
printk("%s/2: ============= END LOG STATUS =============\n",
core->name);
return 0;
}
static int vidioc_queryctrl (struct file *file, void *priv,
struct v4l2_queryctrl *qctrl)
{
struct cx8802_dev *dev = ((struct cx8802_fh *)priv)->dev;
if (blackbird_queryctrl(dev, qctrl) == 0)
return 0;
qctrl->id = v4l2_ctrl_next(ctrl_classes, qctrl->id);
if (unlikely(qctrl->id == 0))
return -EINVAL;
return cx8800_ctrl_query(dev->core, qctrl);
}
static int vidioc_enum_input (struct file *file, void *priv,
struct v4l2_input *i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return cx88_enum_input (core,i);
}
static int vidioc_g_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return
cx88_get_control(core,ctl);
}
static int vidioc_s_ctrl (struct file *file, void *priv,
struct v4l2_control *ctl)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
return
cx88_set_control(core,ctl);
}
static int vidioc_g_frequency (struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct cx8802_fh *fh = priv;
struct cx88_core *core = fh->dev->core;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
f->type = V4L2_TUNER_ANALOG_TV;
f->frequency = core->freq;
call_all(core, tuner, g_frequency, f);
return 0;
}
static int vidioc_g_input (struct file *file, void *priv, unsigned int *i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
*i = core->input;
return 0;
}
static int vidioc_s_input (struct file *file, void *priv, unsigned int i)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
if (i >= 4)
return -EINVAL;
mutex_lock(&core->lock);
cx88_newstation(core);
cx88_video_mux(core,i);
mutex_unlock(&core->lock);
return 0;
}
static int vidioc_g_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
u32 reg;
if (unlikely(UNSET == core->board.tuner_type))
return -EINVAL;
if (0 != t->index)
return -EINVAL;
strcpy(t->name, "Television");
t->type = V4L2_TUNER_ANALOG_TV;
t->capability = V4L2_TUNER_CAP_NORM;
t->rangehigh = 0xffffffffUL;
cx88_get_stereo(core ,t);
reg = cx_read(MO_DEVICE_STATUS);
t->signal = (reg & (1<<5)) ? 0xffff : 0x0000;
return 0;
}
static int vidioc_s_tuner (struct file *file, void *priv,
struct v4l2_tuner *t)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
if (UNSET == core->board.tuner_type)
return -EINVAL;
if (0 != t->index)
return -EINVAL;
cx88_set_stereo(core, t->audmode, 1);
return 0;
}
static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *id)
{
struct cx88_core *core = ((struct cx8802_fh *)priv)->dev->core;
mutex_lock(&core->lock);
cx88_set_tvnorm(core,*id);
mutex_unlock(&core->lock);
return 0;
}
/* FIXME: cx88_ioctl_hook not implemented */
static int mpeg_open(struct file *file)
{
struct video_device *vdev = video_devdata(file);
struct cx8802_dev *dev = video_drvdata(file);
struct cx8802_fh *fh;
struct cx8802_driver *drv = NULL;
int err;
dprintk( 1, "%s\n", __func__);
mutex_lock(&dev->core->lock);
/* Make sure we can acquire the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
if (!drv) {
dprintk(1, "%s: blackbird driver is not loaded\n", __func__);
mutex_unlock(&dev->core->lock);
return -ENODEV;
}
err = drv->request_acquire(drv);
if (err != 0) {
dprintk(1,"%s: Unable to acquire hardware, %d\n", __func__, err);
mutex_unlock(&dev->core->lock);
return err;
}
if (!dev->core->mpeg_users && blackbird_initialize_codec(dev) < 0) {
drv->request_release(drv);
mutex_unlock(&dev->core->lock);
return -EINVAL;
}
dprintk(1, "open dev=%s\n", video_device_node_name(vdev));
/* allocate + initialize per filehandle data */
fh = kzalloc(sizeof(*fh),GFP_KERNEL);
if (NULL == fh) {
drv->request_release(drv);
mutex_unlock(&dev->core->lock);
return -ENOMEM;
}
file->private_data = fh;
fh->dev = dev;
videobuf_queue_sg_init(&fh->mpegq, &blackbird_qops,
&dev->pci->dev, &dev->slock,
V4L2_BUF_TYPE_VIDEO_CAPTURE,
V4L2_FIELD_INTERLACED,
sizeof(struct cx88_buffer),
fh, NULL);
/* FIXME: locking against other video device */
cx88_set_scale(dev->core, dev->width, dev->height,
fh->mpegq.field);
dev->core->mpeg_users++;
mutex_unlock(&dev->core->lock);
return 0;
}
static int mpeg_release(struct file *file)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
struct cx8802_driver *drv = NULL;
mutex_lock(&dev->core->lock);
if (dev->mpeg_active && dev->core->mpeg_users == 1)
blackbird_stop_codec(dev);
cx8802_cancel_buffers(fh->dev);
/* stop mpeg capture */
videobuf_stop(&fh->mpegq);
videobuf_mmap_free(&fh->mpegq);
file->private_data = NULL;
kfree(fh);
/* Make sure we release the hardware */
drv = cx8802_get_driver(dev, CX88_MPEG_BLACKBIRD);
WARN_ON(!drv);
if (drv)
drv->request_release(drv);
dev->core->mpeg_users--;
mutex_unlock(&dev->core->lock);
return 0;
}
static ssize_t
mpeg_read(struct file *file, char __user *data, size_t count, loff_t *ppos)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
if (!dev->mpeg_active)
blackbird_start_codec(file, fh);
return videobuf_read_stream(&fh->mpegq, data, count, ppos, 0,
file->f_flags & O_NONBLOCK);
}
static unsigned int
mpeg_poll(struct file *file, struct poll_table_struct *wait)
{
struct cx8802_fh *fh = file->private_data;
struct cx8802_dev *dev = fh->dev;
if (!dev->mpeg_active)
blackbird_start_codec(file, fh);
return videobuf_poll_stream(file, &fh->mpegq, wait);
}
static int
mpeg_mmap(struct file *file, struct vm_area_struct * vma)
{
struct cx8802_fh *fh = file->private_data;
return videobuf_mmap_mapper(&fh->mpegq, vma);
}
static const struct v4l2_file_operations mpeg_fops =
{
.owner = THIS_MODULE,
.open = mpeg_open,
.release = mpeg_release,
.read = mpeg_read,
.poll = mpeg_poll,
.mmap = mpeg_mmap,
.ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops mpeg_ioctl_ops = {
.vidioc_querymenu = vidioc_querymenu,
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_g_ext_ctrls = vidioc_g_ext_ctrls,
.vidioc_s_ext_ctrls = vidioc_s_ext_ctrls,
.vidioc_try_ext_ctrls = vidioc_try_ext_ctrls,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_log_status = vidioc_log_status,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_enum_input = vidioc_enum_input,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_s_std = vidioc_s_std,
};
static struct video_device cx8802_mpeg_template = {
.name = "cx8802",
.fops = &mpeg_fops,
.ioctl_ops = &mpeg_ioctl_ops,
.tvnorms = CX88_NORMS,
.current_norm = V4L2_STD_NTSC_M,
};
/* ------------------------------------------------------------------ */
/* The CX8802 MPEG API will call this when we can use the hardware */
static int cx8802_blackbird_advise_acquire(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* By default, core setup will leave the cx22702 out of reset, on the bus.
* We left the hardware on power up with the cx22702 active.
* We're being given access to re-arrange the GPIOs.
* Take the bus off the cx22702 and put the cx23416 on it.
*/
/* Toggle reset on cx22702 leaving i2c active */
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
cx_clear(MO_GP0_IO, 0x00000080);
udelay(50);
cx_set(MO_GP0_IO, 0x00000080);
udelay(1000);
/* tri-state the cx22702 pins */
cx_set(MO_GP0_IO, 0x00000004);
udelay(1000);
break;
default:
err = -ENODEV;
}
return err;
}
/* The CX8802 MPEG API will call this when we need to release the hardware */
static int cx8802_blackbird_advise_release(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
int err = 0;
switch (core->boardnr) {
case CX88_BOARD_HAUPPAUGE_HVR1300:
/* Exit leaving the cx23416 on the bus */
break;
default:
err = -ENODEV;
}
return err;
}
static void blackbird_unregister_video(struct cx8802_dev *dev)
{
if (dev->mpeg_dev) {
if (video_is_registered(dev->mpeg_dev))
video_unregister_device(dev->mpeg_dev);
else
video_device_release(dev->mpeg_dev);
dev->mpeg_dev = NULL;
}
}
static int blackbird_register_video(struct cx8802_dev *dev)
{
int err;
dev->mpeg_dev = cx88_vdev_init(dev->core,dev->pci,
&cx8802_mpeg_template,"mpeg");
video_set_drvdata(dev->mpeg_dev, dev);
err = video_register_device(dev->mpeg_dev,VFL_TYPE_GRABBER, -1);
if (err < 0) {
printk(KERN_INFO "%s/2: can't register mpeg device\n",
dev->core->name);
return err;
}
printk(KERN_INFO "%s/2: registered device %s [mpeg]\n",
dev->core->name, video_device_node_name(dev->mpeg_dev));
return 0;
}
/* ----------------------------------------------------------- */
static int cx8802_blackbird_probe(struct cx8802_driver *drv)
{
struct cx88_core *core = drv->core;
struct cx8802_dev *dev = core->dvbdev;
int err;
dprintk( 1, "%s\n", __func__);
dprintk( 1, " ->being probed by Card=%d Name=%s, PCI %02x:%02x\n",
core->boardnr,
core->name,
core->pci_bus,
core->pci_slot);
err = -ENODEV;
if (!(core->board.mpeg & CX88_MPEG_BLACKBIRD))
goto fail_core;
dev->width = 720;
dev->height = 576;
cx2341x_fill_defaults(&dev->params);
dev->params.port = CX2341X_PORT_STREAMING;
cx8802_mpeg_template.current_norm = core->tvnorm;
if (core->tvnorm & V4L2_STD_525_60) {
dev->height = 480;
} else {
dev->height = 576;
}
/* blackbird stuff */
printk("%s/2: cx23416 based mpeg encoder (blackbird reference design)\n",
core->name);
host_setup(dev->core);
blackbird_initialize_codec(dev);
blackbird_register_video(dev);
/* initial device configuration: needed ? */
// init_controls(core);
cx88_set_tvnorm(core,core->tvnorm);
cx88_video_mux(core,0);
return 0;
fail_core:
return err;
}
static int cx8802_blackbird_remove(struct cx8802_driver *drv)
{
/* blackbird */
blackbird_unregister_video(drv->core->dvbdev);
return 0;
}
static struct cx8802_driver cx8802_blackbird_driver = {
.type_id = CX88_MPEG_BLACKBIRD,
.hw_access = CX8802_DRVCTL_SHARED,
.probe = cx8802_blackbird_probe,
.remove = cx8802_blackbird_remove,
.advise_acquire = cx8802_blackbird_advise_acquire,
.advise_release = cx8802_blackbird_advise_release,
};
static int __init blackbird_init(void)
{
printk(KERN_INFO "cx2388x blackbird driver version %s loaded\n",
CX88_VERSION);
return cx8802_register_driver(&cx8802_blackbird_driver);
}
static void __exit blackbird_fini(void)
{
cx8802_unregister_driver(&cx8802_blackbird_driver);
}
module_init(blackbird_init);
module_exit(blackbird_fini);
module_param_named(video_debug,cx8802_mpeg_template.debug, int, 0644);
MODULE_PARM_DESC(debug,"enable debug messages [video]");
| gpl-2.0 |
poopgiggle/fla-kernel | drivers/net/ethernet/mellanox/mlx4/sense.c | 5468 | 4126 | /*
* Copyright (c) 2007 Mellanox Technologies. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#include <linux/errno.h>
#include <linux/if_ether.h>
#include <linux/mlx4/cmd.h>
#include "mlx4.h"
int mlx4_SENSE_PORT(struct mlx4_dev *dev, int port,
enum mlx4_port_type *type)
{
u64 out_param;
int err = 0;
err = mlx4_cmd_imm(dev, 0, &out_param, port, 0,
MLX4_CMD_SENSE_PORT, MLX4_CMD_TIME_CLASS_B,
MLX4_CMD_WRAPPED);
if (err) {
mlx4_err(dev, "Sense command failed for port: %d\n", port);
return err;
}
if (out_param > 2) {
mlx4_err(dev, "Sense returned illegal value: 0x%llx\n", out_param);
return -EINVAL;
}
*type = out_param;
return 0;
}
void mlx4_do_sense_ports(struct mlx4_dev *dev,
enum mlx4_port_type *stype,
enum mlx4_port_type *defaults)
{
struct mlx4_sense *sense = &mlx4_priv(dev)->sense;
int err;
int i;
for (i = 1; i <= dev->caps.num_ports; i++) {
stype[i - 1] = 0;
if (sense->do_sense_port[i] && sense->sense_allowed[i] &&
dev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
err = mlx4_SENSE_PORT(dev, i, &stype[i - 1]);
if (err)
stype[i - 1] = defaults[i - 1];
} else
stype[i - 1] = defaults[i - 1];
}
/*
* If sensed nothing, remain in current configuration.
*/
for (i = 0; i < dev->caps.num_ports; i++)
stype[i] = stype[i] ? stype[i] : defaults[i];
}
static void mlx4_sense_port(struct work_struct *work)
{
struct delayed_work *delay = to_delayed_work(work);
struct mlx4_sense *sense = container_of(delay, struct mlx4_sense,
sense_poll);
struct mlx4_dev *dev = sense->dev;
struct mlx4_priv *priv = mlx4_priv(dev);
enum mlx4_port_type stype[MLX4_MAX_PORTS];
mutex_lock(&priv->port_mutex);
mlx4_do_sense_ports(dev, stype, &dev->caps.port_type[1]);
if (mlx4_check_port_params(dev, stype))
goto sense_again;
if (mlx4_change_port_types(dev, stype))
mlx4_err(dev, "Failed to change port_types\n");
sense_again:
mutex_unlock(&priv->port_mutex);
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_start_sense(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP))
return;
queue_delayed_work(mlx4_wq , &sense->sense_poll,
round_jiffies_relative(MLX4_SENSE_RANGE));
}
void mlx4_stop_sense(struct mlx4_dev *dev)
{
cancel_delayed_work_sync(&mlx4_priv(dev)->sense.sense_poll);
}
void mlx4_sense_init(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_sense *sense = &priv->sense;
int port;
sense->dev = dev;
for (port = 1; port <= dev->caps.num_ports; port++)
sense->do_sense_port[port] = 1;
INIT_DEFERRABLE_WORK(&sense->sense_poll, mlx4_sense_port);
}
| gpl-2.0 |
Slim80/Fulgor_Kernel_Lollipop | fs/ubifs/master.c | 7260 | 10744 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Authors: Artem Bityutskiy (Битюцкий Артём)
* Adrian Hunter
*/
/* This file implements reading and writing the master node */
#include "ubifs.h"
/**
* scan_for_master - search the valid master node.
* @c: UBIFS file-system description object
*
* This function scans the master node LEBs and search for the latest master
* node. Returns zero in case of success, %-EUCLEAN if there master area is
* corrupted and requires recovery, and a negative error code in case of
* failure.
*/
static int scan_for_master(struct ubifs_info *c)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
int lnum, offs = 0, nodes_cnt;
lnum = UBIFS_MST_LNUM;
sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb))
return PTR_ERR(sleb);
nodes_cnt = sleb->nodes_cnt;
if (nodes_cnt > 0) {
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
list);
if (snod->type != UBIFS_MST_NODE)
goto out_dump;
memcpy(c->mst_node, snod->node, snod->len);
offs = snod->offs;
}
ubifs_scan_destroy(sleb);
lnum += 1;
sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb))
return PTR_ERR(sleb);
if (sleb->nodes_cnt != nodes_cnt)
goto out;
if (!sleb->nodes_cnt)
goto out;
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list);
if (snod->type != UBIFS_MST_NODE)
goto out_dump;
if (snod->offs != offs)
goto out;
if (memcmp((void *)c->mst_node + UBIFS_CH_SZ,
(void *)snod->node + UBIFS_CH_SZ,
UBIFS_MST_NODE_SZ - UBIFS_CH_SZ))
goto out;
c->mst_offs = offs;
ubifs_scan_destroy(sleb);
return 0;
out:
ubifs_scan_destroy(sleb);
return -EUCLEAN;
out_dump:
ubifs_err("unexpected node type %d master LEB %d:%d",
snod->type, lnum, snod->offs);
ubifs_scan_destroy(sleb);
return -EINVAL;
}
/**
* validate_master - validate master node.
* @c: UBIFS file-system description object
*
* This function validates data which was read from master node. Returns zero
* if the data is all right and %-EINVAL if not.
*/
static int validate_master(const struct ubifs_info *c)
{
long long main_sz;
int err;
if (c->max_sqnum >= SQNUM_WATERMARK) {
err = 1;
goto out;
}
if (c->cmt_no >= c->max_sqnum) {
err = 2;
goto out;
}
if (c->highest_inum >= INUM_WATERMARK) {
err = 3;
goto out;
}
if (c->lhead_lnum < UBIFS_LOG_LNUM ||
c->lhead_lnum >= UBIFS_LOG_LNUM + c->log_lebs ||
c->lhead_offs < 0 || c->lhead_offs >= c->leb_size ||
c->lhead_offs & (c->min_io_size - 1)) {
err = 4;
goto out;
}
if (c->zroot.lnum >= c->leb_cnt || c->zroot.lnum < c->main_first ||
c->zroot.offs >= c->leb_size || c->zroot.offs & 7) {
err = 5;
goto out;
}
if (c->zroot.len < c->ranges[UBIFS_IDX_NODE].min_len ||
c->zroot.len > c->ranges[UBIFS_IDX_NODE].max_len) {
err = 6;
goto out;
}
if (c->gc_lnum >= c->leb_cnt || c->gc_lnum < c->main_first) {
err = 7;
goto out;
}
if (c->ihead_lnum >= c->leb_cnt || c->ihead_lnum < c->main_first ||
c->ihead_offs % c->min_io_size || c->ihead_offs < 0 ||
c->ihead_offs > c->leb_size || c->ihead_offs & 7) {
err = 8;
goto out;
}
main_sz = (long long)c->main_lebs * c->leb_size;
if (c->bi.old_idx_sz & 7 || c->bi.old_idx_sz >= main_sz) {
err = 9;
goto out;
}
if (c->lpt_lnum < c->lpt_first || c->lpt_lnum > c->lpt_last ||
c->lpt_offs < 0 || c->lpt_offs + c->nnode_sz > c->leb_size) {
err = 10;
goto out;
}
if (c->nhead_lnum < c->lpt_first || c->nhead_lnum > c->lpt_last ||
c->nhead_offs < 0 || c->nhead_offs % c->min_io_size ||
c->nhead_offs > c->leb_size) {
err = 11;
goto out;
}
if (c->ltab_lnum < c->lpt_first || c->ltab_lnum > c->lpt_last ||
c->ltab_offs < 0 ||
c->ltab_offs + c->ltab_sz > c->leb_size) {
err = 12;
goto out;
}
if (c->big_lpt && (c->lsave_lnum < c->lpt_first ||
c->lsave_lnum > c->lpt_last || c->lsave_offs < 0 ||
c->lsave_offs + c->lsave_sz > c->leb_size)) {
err = 13;
goto out;
}
if (c->lscan_lnum < c->main_first || c->lscan_lnum >= c->leb_cnt) {
err = 14;
goto out;
}
if (c->lst.empty_lebs < 0 || c->lst.empty_lebs > c->main_lebs - 2) {
err = 15;
goto out;
}
if (c->lst.idx_lebs < 0 || c->lst.idx_lebs > c->main_lebs - 1) {
err = 16;
goto out;
}
if (c->lst.total_free < 0 || c->lst.total_free > main_sz ||
c->lst.total_free & 7) {
err = 17;
goto out;
}
if (c->lst.total_dirty < 0 || (c->lst.total_dirty & 7)) {
err = 18;
goto out;
}
if (c->lst.total_used < 0 || (c->lst.total_used & 7)) {
err = 19;
goto out;
}
if (c->lst.total_free + c->lst.total_dirty +
c->lst.total_used > main_sz) {
err = 20;
goto out;
}
if (c->lst.total_dead + c->lst.total_dark +
c->lst.total_used + c->bi.old_idx_sz > main_sz) {
err = 21;
goto out;
}
if (c->lst.total_dead < 0 ||
c->lst.total_dead > c->lst.total_free + c->lst.total_dirty ||
c->lst.total_dead & 7) {
err = 22;
goto out;
}
if (c->lst.total_dark < 0 ||
c->lst.total_dark > c->lst.total_free + c->lst.total_dirty ||
c->lst.total_dark & 7) {
err = 23;
goto out;
}
return 0;
out:
ubifs_err("bad master node at offset %d error %d", c->mst_offs, err);
dbg_dump_node(c, c->mst_node);
return -EINVAL;
}
/**
* ubifs_read_master - read master node.
* @c: UBIFS file-system description object
*
* This function finds and reads the master node during file-system mount. If
* the flash is empty, it creates default master node as well. Returns zero in
* case of success and a negative error code in case of failure.
*/
int ubifs_read_master(struct ubifs_info *c)
{
int err, old_leb_cnt;
c->mst_node = kzalloc(c->mst_node_alsz, GFP_KERNEL);
if (!c->mst_node)
return -ENOMEM;
err = scan_for_master(c);
if (err) {
if (err == -EUCLEAN)
err = ubifs_recover_master_node(c);
if (err)
/*
* Note, we do not free 'c->mst_node' here because the
* unmount routine will take care of this.
*/
return err;
}
/* Make sure that the recovery flag is clear */
c->mst_node->flags &= cpu_to_le32(~UBIFS_MST_RCVRY);
c->max_sqnum = le64_to_cpu(c->mst_node->ch.sqnum);
c->highest_inum = le64_to_cpu(c->mst_node->highest_inum);
c->cmt_no = le64_to_cpu(c->mst_node->cmt_no);
c->zroot.lnum = le32_to_cpu(c->mst_node->root_lnum);
c->zroot.offs = le32_to_cpu(c->mst_node->root_offs);
c->zroot.len = le32_to_cpu(c->mst_node->root_len);
c->lhead_lnum = le32_to_cpu(c->mst_node->log_lnum);
c->gc_lnum = le32_to_cpu(c->mst_node->gc_lnum);
c->ihead_lnum = le32_to_cpu(c->mst_node->ihead_lnum);
c->ihead_offs = le32_to_cpu(c->mst_node->ihead_offs);
c->bi.old_idx_sz = le64_to_cpu(c->mst_node->index_size);
c->lpt_lnum = le32_to_cpu(c->mst_node->lpt_lnum);
c->lpt_offs = le32_to_cpu(c->mst_node->lpt_offs);
c->nhead_lnum = le32_to_cpu(c->mst_node->nhead_lnum);
c->nhead_offs = le32_to_cpu(c->mst_node->nhead_offs);
c->ltab_lnum = le32_to_cpu(c->mst_node->ltab_lnum);
c->ltab_offs = le32_to_cpu(c->mst_node->ltab_offs);
c->lsave_lnum = le32_to_cpu(c->mst_node->lsave_lnum);
c->lsave_offs = le32_to_cpu(c->mst_node->lsave_offs);
c->lscan_lnum = le32_to_cpu(c->mst_node->lscan_lnum);
c->lst.empty_lebs = le32_to_cpu(c->mst_node->empty_lebs);
c->lst.idx_lebs = le32_to_cpu(c->mst_node->idx_lebs);
old_leb_cnt = le32_to_cpu(c->mst_node->leb_cnt);
c->lst.total_free = le64_to_cpu(c->mst_node->total_free);
c->lst.total_dirty = le64_to_cpu(c->mst_node->total_dirty);
c->lst.total_used = le64_to_cpu(c->mst_node->total_used);
c->lst.total_dead = le64_to_cpu(c->mst_node->total_dead);
c->lst.total_dark = le64_to_cpu(c->mst_node->total_dark);
c->calc_idx_sz = c->bi.old_idx_sz;
if (c->mst_node->flags & cpu_to_le32(UBIFS_MST_NO_ORPHS))
c->no_orphs = 1;
if (old_leb_cnt != c->leb_cnt) {
/* The file system has been resized */
int growth = c->leb_cnt - old_leb_cnt;
if (c->leb_cnt < old_leb_cnt ||
c->leb_cnt < UBIFS_MIN_LEB_CNT) {
ubifs_err("bad leb_cnt on master node");
dbg_dump_node(c, c->mst_node);
return -EINVAL;
}
dbg_mnt("Auto resizing (master) from %d LEBs to %d LEBs",
old_leb_cnt, c->leb_cnt);
c->lst.empty_lebs += growth;
c->lst.total_free += growth * (long long)c->leb_size;
c->lst.total_dark += growth * (long long)c->dark_wm;
/*
* Reflect changes back onto the master node. N.B. the master
* node gets written immediately whenever mounting (or
* remounting) in read-write mode, so we do not need to write it
* here.
*/
c->mst_node->leb_cnt = cpu_to_le32(c->leb_cnt);
c->mst_node->empty_lebs = cpu_to_le32(c->lst.empty_lebs);
c->mst_node->total_free = cpu_to_le64(c->lst.total_free);
c->mst_node->total_dark = cpu_to_le64(c->lst.total_dark);
}
err = validate_master(c);
if (err)
return err;
err = dbg_old_index_check_init(c, &c->zroot);
return err;
}
/**
* ubifs_write_master - write master node.
* @c: UBIFS file-system description object
*
* This function writes the master node. The caller has to take the
* @c->mst_mutex lock before calling this function. Returns zero in case of
* success and a negative error code in case of failure. The master node is
* written twice to enable recovery.
*/
int ubifs_write_master(struct ubifs_info *c)
{
int err, lnum, offs, len;
ubifs_assert(!c->ro_media && !c->ro_mount);
if (c->ro_error)
return -EROFS;
lnum = UBIFS_MST_LNUM;
offs = c->mst_offs + c->mst_node_alsz;
len = UBIFS_MST_NODE_SZ;
if (offs + UBIFS_MST_NODE_SZ > c->leb_size) {
err = ubifs_leb_unmap(c, lnum);
if (err)
return err;
offs = 0;
}
c->mst_offs = offs;
c->mst_node->highest_inum = cpu_to_le64(c->highest_inum);
err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM);
if (err)
return err;
lnum += 1;
if (offs == 0) {
err = ubifs_leb_unmap(c, lnum);
if (err)
return err;
}
err = ubifs_write_node(c, c->mst_node, len, lnum, offs, UBI_SHORTTERM);
return err;
}
| gpl-2.0 |
dastin1015/android_kernel_htc_jewel | arch/tile/lib/memcpy_64.c | 7260 | 5364 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#define __memcpy memcpy
/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
/* Must be 8 bytes in size. */
#define word_t uint64_t
#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128
#error "Assumes 64 or 128 byte line size"
#endif
/* How many cache lines ahead should we prefetch? */
#define PREFETCH_LINES_AHEAD 3
/*
* Provide "base versions" of load and store for the normal code path.
* The kernel provides other versions for userspace copies.
*/
#define ST(p, v) (*(p) = (v))
#define LD(p) (*(p))
#ifndef USERCOPY_FUNC
#define ST1 ST
#define ST2 ST
#define ST4 ST
#define ST8 ST
#define LD1 LD
#define LD2 LD
#define LD4 LD
#define LD8 LD
#define RETVAL dstv
void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
#else
/*
* Special kernel version will provide implementation of the LDn/STn
* macros to return a count of uncopied bytes due to mm fault.
*/
#define RETVAL 0
int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
#endif
{
char *__restrict dst1 = (char *)dstv;
const char *__restrict src1 = (const char *)srcv;
const char *__restrict src1_end;
const char *__restrict prefetch;
word_t *__restrict dst8; /* 8-byte pointer to destination memory. */
word_t final; /* Final bytes to write to trailing word, if any */
long i;
if (n < 16) {
for (; n; n--)
ST1(dst1++, LD1(src1++));
return RETVAL;
}
/*
* Locate the end of source memory we will copy. Don't
* prefetch past this.
*/
src1_end = src1 + n - 1;
/* Prefetch ahead a few cache lines, but not past the end. */
prefetch = src1;
for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
__insn_prefetch(prefetch);
prefetch += CHIP_L2_LINE_SIZE();
prefetch = (prefetch > src1_end) ? prefetch : src1;
}
/* Copy bytes until dst is word-aligned. */
for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--)
ST1(dst1++, LD1(src1++));
/* 8-byte pointer to destination memory. */
dst8 = (word_t *)dst1;
if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) {
/*
* Misaligned copy. Copy 8 bytes at a time, but don't
* bother with other fanciness.
*
* TODO: Consider prefetching and using wh64 as well.
*/
/* Create an aligned src8. */
const word_t *__restrict src8 =
(const word_t *)((uintptr_t)src1 & -sizeof(word_t));
word_t b;
word_t a = LD8(src8++);
for (; n >= sizeof(word_t); n -= sizeof(word_t)) {
b = LD8(src8++);
a = __insn_dblalign(a, b, src1);
ST8(dst8++, a);
a = b;
}
if (n == 0)
return RETVAL;
b = ((const char *)src8 <= src1_end) ? *src8 : 0;
/*
* Final source bytes to write to trailing partial
* word, if any.
*/
final = __insn_dblalign(a, b, src1);
} else {
/* Aligned copy. */
const word_t* __restrict src8 = (const word_t *)src1;
/* src8 and dst8 are both word-aligned. */
if (n >= CHIP_L2_LINE_SIZE()) {
/* Copy until 'dst' is cache-line-aligned. */
for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
n -= sizeof(word_t))
ST8(dst8++, LD8(src8++));
for (; n >= CHIP_L2_LINE_SIZE(); ) {
__insn_wh64(dst8);
/*
* Prefetch and advance to next line
* to prefetch, but don't go past the end
*/
__insn_prefetch(prefetch);
prefetch += CHIP_L2_LINE_SIZE();
prefetch = (prefetch > src1_end) ? prefetch :
(const char *)src8;
/*
* Copy an entire cache line. Manually
* unrolled to avoid idiosyncracies of
* compiler unrolling.
*/
#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; })
COPY_WORD(0);
COPY_WORD(1);
COPY_WORD(2);
COPY_WORD(3);
COPY_WORD(4);
COPY_WORD(5);
COPY_WORD(6);
COPY_WORD(7);
#if CHIP_L2_LINE_SIZE() == 128
COPY_WORD(8);
COPY_WORD(9);
COPY_WORD(10);
COPY_WORD(11);
COPY_WORD(12);
COPY_WORD(13);
COPY_WORD(14);
COPY_WORD(15);
#elif CHIP_L2_LINE_SIZE() != 64
# error Fix code that assumes particular L2 cache line sizes
#endif
dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
}
}
for (; n >= sizeof(word_t); n -= sizeof(word_t))
ST8(dst8++, LD8(src8++));
if (__builtin_expect(n == 0, 1))
return RETVAL;
final = LD8(src8);
}
/* n != 0 if we get here. Write out any trailing bytes. */
dst1 = (char *)dst8;
if (n & 4) {
ST4((uint32_t *)dst1, final);
dst1 += 4;
final >>= 32;
n &= 3;
}
if (n & 2) {
ST2((uint16_t *)dst1, final);
dst1 += 2;
final >>= 16;
n &= 1;
}
if (n)
ST1((uint8_t *)dst1, final);
return RETVAL;
}
#ifdef USERCOPY_FUNC
#undef ST1
#undef ST2
#undef ST4
#undef ST8
#undef LD1
#undef LD2
#undef LD4
#undef LD8
#undef USERCOPY_FUNC
#endif
| gpl-2.0 |
liquidware/android-kernel-omap4 | arch/tile/lib/memcpy_64.c | 7260 | 5364 | /*
* Copyright 2011 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*/
#include <linux/types.h>
#include <linux/string.h>
#include <linux/module.h>
#define __memcpy memcpy
/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
/* Must be 8 bytes in size. */
#define word_t uint64_t
#if CHIP_L2_LINE_SIZE() != 64 && CHIP_L2_LINE_SIZE() != 128
#error "Assumes 64 or 128 byte line size"
#endif
/* How many cache lines ahead should we prefetch? */
#define PREFETCH_LINES_AHEAD 3
/*
* Provide "base versions" of load and store for the normal code path.
* The kernel provides other versions for userspace copies.
*/
#define ST(p, v) (*(p) = (v))
#define LD(p) (*(p))
#ifndef USERCOPY_FUNC
#define ST1 ST
#define ST2 ST
#define ST4 ST
#define ST8 ST
#define LD1 LD
#define LD2 LD
#define LD4 LD
#define LD8 LD
#define RETVAL dstv
void *memcpy(void *__restrict dstv, const void *__restrict srcv, size_t n)
#else
/*
* Special kernel version will provide implementation of the LDn/STn
* macros to return a count of uncopied bytes due to mm fault.
*/
#define RETVAL 0
int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
#endif
{
char *__restrict dst1 = (char *)dstv;
const char *__restrict src1 = (const char *)srcv;
const char *__restrict src1_end;
const char *__restrict prefetch;
word_t *__restrict dst8; /* 8-byte pointer to destination memory. */
word_t final; /* Final bytes to write to trailing word, if any */
long i;
if (n < 16) {
for (; n; n--)
ST1(dst1++, LD1(src1++));
return RETVAL;
}
/*
* Locate the end of source memory we will copy. Don't
* prefetch past this.
*/
src1_end = src1 + n - 1;
/* Prefetch ahead a few cache lines, but not past the end. */
prefetch = src1;
for (i = 0; i < PREFETCH_LINES_AHEAD; i++) {
__insn_prefetch(prefetch);
prefetch += CHIP_L2_LINE_SIZE();
prefetch = (prefetch > src1_end) ? prefetch : src1;
}
/* Copy bytes until dst is word-aligned. */
for (; (uintptr_t)dst1 & (sizeof(word_t) - 1); n--)
ST1(dst1++, LD1(src1++));
/* 8-byte pointer to destination memory. */
dst8 = (word_t *)dst1;
if (__builtin_expect((uintptr_t)src1 & (sizeof(word_t) - 1), 0)) {
/*
* Misaligned copy. Copy 8 bytes at a time, but don't
* bother with other fanciness.
*
* TODO: Consider prefetching and using wh64 as well.
*/
/* Create an aligned src8. */
const word_t *__restrict src8 =
(const word_t *)((uintptr_t)src1 & -sizeof(word_t));
word_t b;
word_t a = LD8(src8++);
for (; n >= sizeof(word_t); n -= sizeof(word_t)) {
b = LD8(src8++);
a = __insn_dblalign(a, b, src1);
ST8(dst8++, a);
a = b;
}
if (n == 0)
return RETVAL;
b = ((const char *)src8 <= src1_end) ? *src8 : 0;
/*
* Final source bytes to write to trailing partial
* word, if any.
*/
final = __insn_dblalign(a, b, src1);
} else {
/* Aligned copy. */
const word_t* __restrict src8 = (const word_t *)src1;
/* src8 and dst8 are both word-aligned. */
if (n >= CHIP_L2_LINE_SIZE()) {
/* Copy until 'dst' is cache-line-aligned. */
for (; (uintptr_t)dst8 & (CHIP_L2_LINE_SIZE() - 1);
n -= sizeof(word_t))
ST8(dst8++, LD8(src8++));
for (; n >= CHIP_L2_LINE_SIZE(); ) {
__insn_wh64(dst8);
/*
* Prefetch and advance to next line
* to prefetch, but don't go past the end
*/
__insn_prefetch(prefetch);
prefetch += CHIP_L2_LINE_SIZE();
prefetch = (prefetch > src1_end) ? prefetch :
(const char *)src8;
/*
* Copy an entire cache line. Manually
* unrolled to avoid idiosyncracies of
* compiler unrolling.
*/
#define COPY_WORD(offset) ({ ST8(dst8+offset, LD8(src8+offset)); n -= 8; })
COPY_WORD(0);
COPY_WORD(1);
COPY_WORD(2);
COPY_WORD(3);
COPY_WORD(4);
COPY_WORD(5);
COPY_WORD(6);
COPY_WORD(7);
#if CHIP_L2_LINE_SIZE() == 128
COPY_WORD(8);
COPY_WORD(9);
COPY_WORD(10);
COPY_WORD(11);
COPY_WORD(12);
COPY_WORD(13);
COPY_WORD(14);
COPY_WORD(15);
#elif CHIP_L2_LINE_SIZE() != 64
# error Fix code that assumes particular L2 cache line sizes
#endif
dst8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
src8 += CHIP_L2_LINE_SIZE() / sizeof(word_t);
}
}
for (; n >= sizeof(word_t); n -= sizeof(word_t))
ST8(dst8++, LD8(src8++));
if (__builtin_expect(n == 0, 1))
return RETVAL;
final = LD8(src8);
}
/* n != 0 if we get here. Write out any trailing bytes. */
dst1 = (char *)dst8;
if (n & 4) {
ST4((uint32_t *)dst1, final);
dst1 += 4;
final >>= 32;
n &= 3;
}
if (n & 2) {
ST2((uint16_t *)dst1, final);
dst1 += 2;
final >>= 16;
n &= 1;
}
if (n)
ST1((uint8_t *)dst1, final);
return RETVAL;
}
#ifdef USERCOPY_FUNC
#undef ST1
#undef ST2
#undef ST4
#undef ST8
#undef LD1
#undef LD2
#undef LD4
#undef LD8
#undef USERCOPY_FUNC
#endif
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.