repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
engine95/navelC-990 | drivers/mfd/mcp-sa11x0.c | 4936 | 7290 | /*
* linux/drivers/mfd/mcp-sa11x0.c
*
* Copyright (C) 2001-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License.
*
* SA11x0 MCP (Multimedia Communications Port) driver.
*
* MCP read/write timeouts from Jordi Colomer, rehacked by rmk.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/io.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
#include <linux/mfd/mcp.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <mach/mcp.h>
#define DRIVER_NAME "sa11x0-mcp"
struct mcp_sa11x0 {
void __iomem *base0;
void __iomem *base1;
u32 mccr0;
u32 mccr1;
};
/* Register offsets */
#define MCCR0(m) ((m)->base0 + 0x00)
#define MCDR0(m) ((m)->base0 + 0x08)
#define MCDR1(m) ((m)->base0 + 0x0c)
#define MCDR2(m) ((m)->base0 + 0x10)
#define MCSR(m) ((m)->base0 + 0x18)
#define MCCR1(m) ((m)->base1 + 0x00)
#define priv(mcp) ((struct mcp_sa11x0 *)mcp_priv(mcp))
static void
mcp_sa11x0_set_telecom_divisor(struct mcp *mcp, unsigned int divisor)
{
struct mcp_sa11x0 *m = priv(mcp);
divisor /= 32;
m->mccr0 &= ~0x00007f00;
m->mccr0 |= divisor << 8;
writel_relaxed(m->mccr0, MCCR0(m));
}
static void
mcp_sa11x0_set_audio_divisor(struct mcp *mcp, unsigned int divisor)
{
struct mcp_sa11x0 *m = priv(mcp);
divisor /= 32;
m->mccr0 &= ~0x0000007f;
m->mccr0 |= divisor;
writel_relaxed(m->mccr0, MCCR0(m));
}
/*
* Write data to the device. The bit should be set after 3 subframe
* times (each frame is 64 clocks). We wait a maximum of 6 subframes.
* We really should try doing something more productive while we
* wait.
*/
static void
mcp_sa11x0_write(struct mcp *mcp, unsigned int reg, unsigned int val)
{
struct mcp_sa11x0 *m = priv(mcp);
int ret = -ETIME;
int i;
writel_relaxed(reg << 17 | MCDR2_Wr | (val & 0xffff), MCDR2(m));
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
if (readl_relaxed(MCSR(m)) & MCSR_CWC) {
ret = 0;
break;
}
}
if (ret < 0)
printk(KERN_WARNING "mcp: write timed out\n");
}
/*
* Read data from the device. The bit should be set after 3 subframe
* times (each frame is 64 clocks). We wait a maximum of 6 subframes.
* We really should try doing something more productive while we
* wait.
*/
static unsigned int
mcp_sa11x0_read(struct mcp *mcp, unsigned int reg)
{
struct mcp_sa11x0 *m = priv(mcp);
int ret = -ETIME;
int i;
writel_relaxed(reg << 17 | MCDR2_Rd, MCDR2(m));
for (i = 0; i < 2; i++) {
udelay(mcp->rw_timeout);
if (readl_relaxed(MCSR(m)) & MCSR_CRC) {
ret = readl_relaxed(MCDR2(m)) & 0xffff;
break;
}
}
if (ret < 0)
printk(KERN_WARNING "mcp: read timed out\n");
return ret;
}
static void mcp_sa11x0_enable(struct mcp *mcp)
{
struct mcp_sa11x0 *m = priv(mcp);
writel(-1, MCSR(m));
m->mccr0 |= MCCR0_MCE;
writel_relaxed(m->mccr0, MCCR0(m));
}
static void mcp_sa11x0_disable(struct mcp *mcp)
{
struct mcp_sa11x0 *m = priv(mcp);
m->mccr0 &= ~MCCR0_MCE;
writel_relaxed(m->mccr0, MCCR0(m));
}
/*
* Our methods.
*/
static struct mcp_ops mcp_sa11x0 = {
.set_telecom_divisor = mcp_sa11x0_set_telecom_divisor,
.set_audio_divisor = mcp_sa11x0_set_audio_divisor,
.reg_write = mcp_sa11x0_write,
.reg_read = mcp_sa11x0_read,
.enable = mcp_sa11x0_enable,
.disable = mcp_sa11x0_disable,
};
static int mcp_sa11x0_probe(struct platform_device *dev)
{
struct mcp_plat_data *data = dev->dev.platform_data;
struct resource *mem0, *mem1;
struct mcp_sa11x0 *m;
struct mcp *mcp;
int ret;
if (!data)
return -ENODEV;
mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
if (!mem0 || !mem1)
return -ENXIO;
if (!request_mem_region(mem0->start, resource_size(mem0),
DRIVER_NAME)) {
ret = -EBUSY;
goto err_mem0;
}
if (!request_mem_region(mem1->start, resource_size(mem1),
DRIVER_NAME)) {
ret = -EBUSY;
goto err_mem1;
}
mcp = mcp_host_alloc(&dev->dev, sizeof(struct mcp_sa11x0));
if (!mcp) {
ret = -ENOMEM;
goto err_alloc;
}
mcp->owner = THIS_MODULE;
mcp->ops = &mcp_sa11x0;
mcp->sclk_rate = data->sclk_rate;
m = priv(mcp);
m->mccr0 = data->mccr0 | 0x7f7f;
m->mccr1 = data->mccr1;
m->base0 = ioremap(mem0->start, resource_size(mem0));
m->base1 = ioremap(mem1->start, resource_size(mem1));
if (!m->base0 || !m->base1) {
ret = -ENOMEM;
goto err_ioremap;
}
platform_set_drvdata(dev, mcp);
/*
* Initialise device. Note that we initially
* set the sampling rate to minimum.
*/
writel_relaxed(-1, MCSR(m));
writel_relaxed(m->mccr1, MCCR1(m));
writel_relaxed(m->mccr0, MCCR0(m));
/*
* Calculate the read/write timeout (us) from the bit clock
* rate. This is the period for 3 64-bit frames. Always
* round this time up.
*/
mcp->rw_timeout = (64 * 3 * 1000000 + mcp->sclk_rate - 1) /
mcp->sclk_rate;
ret = mcp_host_add(mcp, data->codec_pdata);
if (ret == 0)
return 0;
platform_set_drvdata(dev, NULL);
err_ioremap:
iounmap(m->base1);
iounmap(m->base0);
mcp_host_free(mcp);
err_alloc:
release_mem_region(mem1->start, resource_size(mem1));
err_mem1:
release_mem_region(mem0->start, resource_size(mem0));
err_mem0:
return ret;
}
static int mcp_sa11x0_remove(struct platform_device *dev)
{
struct mcp *mcp = platform_get_drvdata(dev);
struct mcp_sa11x0 *m = priv(mcp);
struct resource *mem0, *mem1;
if (m->mccr0 & MCCR0_MCE)
dev_warn(&dev->dev,
"device left active (missing disable call?)\n");
mem0 = platform_get_resource(dev, IORESOURCE_MEM, 0);
mem1 = platform_get_resource(dev, IORESOURCE_MEM, 1);
platform_set_drvdata(dev, NULL);
mcp_host_del(mcp);
iounmap(m->base1);
iounmap(m->base0);
mcp_host_free(mcp);
release_mem_region(mem1->start, resource_size(mem1));
release_mem_region(mem0->start, resource_size(mem0));
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int mcp_sa11x0_suspend(struct device *dev)
{
struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
if (m->mccr0 & MCCR0_MCE)
dev_warn(dev, "device left active (missing disable call?)\n");
writel(m->mccr0 & ~MCCR0_MCE, MCCR0(m));
return 0;
}
static int mcp_sa11x0_resume(struct device *dev)
{
struct mcp_sa11x0 *m = priv(dev_get_drvdata(dev));
writel_relaxed(m->mccr1, MCCR1(m));
writel_relaxed(m->mccr0, MCCR0(m));
return 0;
}
#endif
static const struct dev_pm_ops mcp_sa11x0_pm_ops = {
#ifdef CONFIG_PM_SLEEP
.suspend = mcp_sa11x0_suspend,
.freeze = mcp_sa11x0_suspend,
.poweroff = mcp_sa11x0_suspend,
.resume_noirq = mcp_sa11x0_resume,
.thaw_noirq = mcp_sa11x0_resume,
.restore_noirq = mcp_sa11x0_resume,
#endif
};
static struct platform_driver mcp_sa11x0_driver = {
.probe = mcp_sa11x0_probe,
.remove = mcp_sa11x0_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.pm = &mcp_sa11x0_pm_ops,
},
};
/*
* This needs re-working
*/
module_platform_driver(mcp_sa11x0_driver);
MODULE_ALIAS("platform:" DRIVER_NAME);
MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
MODULE_DESCRIPTION("SA11x0 multimedia communications port driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
adityak74/android_kernel_mediatek_sprout | drivers/staging/vt6656/wpactl.c | 4936 | 23925 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: wpactl.c
*
* Purpose: handle wpa supplicant ioctl input/out functions
*
* Author: Lyndon Chen
*
* Date: July 28, 2006
*
* Functions:
*
* Revision History:
*
*/
#include "wpactl.h"
#include "key.h"
#include "mac.h"
#include "device.h"
#include "wmgr.h"
#include "iocmd.h"
#include "iowpa.h"
#include "control.h"
#include "rndis.h"
#include "rf.h"
/*--------------------- Static Definitions -------------------------*/
#define VIAWGET_WPA_MAX_BUF_SIZE 1024
static const int frequency_list[] = {
2412, 2417, 2422, 2427, 2432, 2437, 2442,
2447, 2452, 2457, 2462, 2467, 2472, 2484
};
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
static int msglevel = MSG_LEVEL_INFO;
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
static void wpadev_setup(struct net_device *dev)
{
dev->type = ARPHRD_IEEE80211;
dev->hard_header_len = ETH_HLEN;
dev->mtu = 2048;
dev->addr_len = ETH_ALEN;
dev->tx_queue_len = 1000;
memset(dev->broadcast, 0xFF, ETH_ALEN);
dev->flags = IFF_BROADCAST | IFF_MULTICAST;
}
/*
* Description:
* register netdev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* enable -
* Out:
*
* Return Value:
*
*/
static int wpa_init_wpadev(PSDevice pDevice)
{
PSDevice wpadev_priv;
struct net_device *dev = pDevice->dev;
int ret = 0;
pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
if (pDevice->wpadev == NULL)
return -ENOMEM;
wpadev_priv = netdev_priv(pDevice->wpadev);
*wpadev_priv = *pDevice;
memcpy(pDevice->wpadev->dev_addr, dev->dev_addr, ETH_ALEN);
pDevice->wpadev->base_addr = dev->base_addr;
pDevice->wpadev->irq = dev->irq;
pDevice->wpadev->mem_start = dev->mem_start;
pDevice->wpadev->mem_end = dev->mem_end;
ret = register_netdev(pDevice->wpadev);
if (ret) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: register_netdev(WPA) failed!\n",
dev->name);
free_netdev(pDevice->wpadev);
return -1;
}
if (pDevice->skb == NULL) {
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDevice->skb == NULL)
return -ENOMEM;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Registered netdev %s for WPA management\n",
dev->name, pDevice->wpadev->name);
return 0;
}
/*
* Description:
* unregister net_device (wpadev)
*
* Parameters:
* In:
* pDevice -
* Out:
*
* Return Value:
*
*/
static int wpa_release_wpadev(PSDevice pDevice)
{
if (pDevice->skb) {
dev_kfree_skb(pDevice->skb);
pDevice->skb = NULL;
}
if (pDevice->wpadev) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n",
pDevice->dev->name, pDevice->wpadev->name);
unregister_netdev(pDevice->wpadev);
free_netdev(pDevice->wpadev);
pDevice->wpadev = NULL;
}
return 0;
}
/*
* Description:
* Set enable/disable dev for wpa supplicant deamon
*
* Parameters:
* In:
* pDevice -
* val -
* Out:
*
* Return Value:
*
*/
int wpa_set_wpadev(PSDevice pDevice, int val)
{
if (val)
return wpa_init_wpadev(pDevice);
return wpa_release_wpadev(pDevice);
}
/*
* Description:
* Set WPA algorithm & keys
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
int wpa_set_keys(PSDevice pDevice, void *ctx, BOOL fcpfkernel)
{
struct viawget_wpa_param *param = ctx;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
DWORD dwKeyIndex = 0;
BYTE abyKey[MAX_KEY_LEN];
BYTE abySeq[MAX_KEY_LEN];
QWORD KeyRSC;
BYTE byKeyDecMode = KEY_CTL_WEP;
int ret = 0;
int uu;
int ii;
if (param->u.wpa_key.alg_name > WPA_ALG_CCMP)
return -EINVAL;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "param->u.wpa_key.alg_name = %d \n",
param->u.wpa_key.alg_name);
if (param->u.wpa_key.alg_name == WPA_ALG_NONE) {
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
pDevice->bEncryptionEnable = FALSE;
pDevice->byKeyIndex = 0;
pDevice->bTransmitKey = FALSE;
for (uu=0; uu<MAX_KEY_TABLE; uu++) {
MACvDisableKeyEntry(pDevice, uu);
}
return ret;
}
if (param->u.wpa_key.key && param->u.wpa_key.key_len > sizeof(abyKey))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.key && fcpfkernel) {
memcpy(&abyKey[0], param->u.wpa_key.key, param->u.wpa_key.key_len);
} else {
if (param->u.wpa_key.key &&
copy_from_user(&abyKey[0], param->u.wpa_key.key,
param->u.wpa_key.key_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
dwKeyIndex = (DWORD)(param->u.wpa_key.key_index);
if (param->u.wpa_key.alg_name == WPA_ALG_WEP) {
if (dwKeyIndex > 3) {
return -EINVAL;
} else {
if (param->u.wpa_key.set_tx) {
pDevice->byKeyIndex = (BYTE)dwKeyIndex;
pDevice->bTransmitKey = TRUE;
dwKeyIndex |= (1 << 31);
}
KeybSetDefaultKey( pDevice,
&(pDevice->sKey),
dwKeyIndex & ~(BIT30 | USE_KEYRSC),
param->u.wpa_key.key_len,
NULL,
abyKey,
KEY_CTL_WEP
);
}
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pDevice->bEncryptionEnable = TRUE;
return ret;
}
if (param->u.wpa_key.seq && param->u.wpa_key.seq_len > sizeof(abySeq))
return -EINVAL;
spin_unlock_irq(&pDevice->lock);
if (param->u.wpa_key.seq && fcpfkernel) {
memcpy(&abySeq[0], param->u.wpa_key.seq, param->u.wpa_key.seq_len);
} else {
if (param->u.wpa_key.seq &&
copy_from_user(&abySeq[0], param->u.wpa_key.seq,
param->u.wpa_key.seq_len)) {
spin_lock_irq(&pDevice->lock);
return -EINVAL;
}
}
spin_lock_irq(&pDevice->lock);
if (param->u.wpa_key.seq_len > 0) {
for (ii = 0 ; ii < param->u.wpa_key.seq_len ; ii++) {
if (ii < 4)
LODWORD(KeyRSC) |= (abySeq[ii] << (ii * 8));
else
HIDWORD(KeyRSC) |= (abySeq[ii] << ((ii-4) * 8));
}
dwKeyIndex |= 1 << 29;
}
if (param->u.wpa_key.key_index >= MAX_GROUP_KEY) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return dwKeyIndex > 3\n");
return -EINVAL;
}
if (param->u.wpa_key.alg_name == WPA_ALG_TKIP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
}
if (param->u.wpa_key.alg_name == WPA_ALG_CCMP) {
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
}
if (param->u.wpa_key.set_tx)
dwKeyIndex |= (1 << 31);
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled)
byKeyDecMode = KEY_CTL_CCMP;
else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled)
byKeyDecMode = KEY_CTL_TKIP;
else
byKeyDecMode = KEY_CTL_WEP;
// Fix HCT test that set 256 bits KEY and Ndis802_11Encryption3Enabled
if (pDevice->eEncryptionStatus == Ndis802_11Encryption3Enabled) {
if (param->u.wpa_key.key_len == MAX_KEY_LEN)
byKeyDecMode = KEY_CTL_TKIP;
else if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
} else if (pDevice->eEncryptionStatus == Ndis802_11Encryption2Enabled) {
if (param->u.wpa_key.key_len == WLAN_WEP40_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
else if (param->u.wpa_key.key_len == WLAN_WEP104_KEYLEN)
byKeyDecMode = KEY_CTL_WEP;
}
// Check TKIP key length
if ((byKeyDecMode == KEY_CTL_TKIP) &&
(param->u.wpa_key.key_len != MAX_KEY_LEN)) {
// TKIP Key must be 256 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return- TKIP Key must be 256 bits!\n");
return -EINVAL;
}
// Check AES key length
if ((byKeyDecMode == KEY_CTL_CCMP) &&
(param->u.wpa_key.key_len != AES_KEY_LEN)) {
// AES Key must be 128 bits
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "return - AES Key must be 128 bits\n");
return -EINVAL;
}
if (is_broadcast_ether_addr(¶m->addr[0]) || (param->addr == NULL)) {
/* if broadcast, set the key as every key entry's group key */
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Groupe Key Assign.\n");
if ((KeybSetAllGroupKey(pDevice, &(pDevice->sKey), dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) &&
(KeybSetDefaultKey(pDevice,
&(pDevice->sKey),
dwKeyIndex,
param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC),
(PBYTE)abyKey,
byKeyDecMode
) == TRUE) ) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "GROUP Key Assign.\n");
} else {
return -EINVAL;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Assign.\n");
// BSSID not 0xffffffffffff
// Pairwise Key can't be WEP
if (byKeyDecMode == KEY_CTL_WEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key can't be WEP\n");
return -EINVAL;
}
dwKeyIndex |= (1 << 30); // set pairwise key
if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA - WMAC_CONFIG_IBSS_STA\n"));
return -EINVAL;
}
if (KeybSetKey(pDevice, &(pDevice->sKey), ¶m->addr[0],
dwKeyIndex, param->u.wpa_key.key_len,
(PQWORD) &(KeyRSC), (PBYTE)abyKey, byKeyDecMode
) == TRUE) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Pairwise Key Set\n");
} else {
// Key Table Full
if (!compare_ether_addr(¶m->addr[0], pDevice->abyBSSID)) {
//DBG_PRN_WLAN03(("return NDIS_STATUS_INVALID_DATA -Key Table Full.2\n"));
return -EINVAL;
} else {
// Save Key and configure just before associate/reassociate to BSSID
// we do not implement now
return -EINVAL;
}
}
} // BSSID not 0xffffffffffff
if ((ret == 0) && ((param->u.wpa_key.set_tx) != 0)) {
pDevice->byKeyIndex = (BYTE)param->u.wpa_key.key_index;
pDevice->bTransmitKey = TRUE;
}
pDevice->bEncryptionEnable = TRUE;
return ret;
}
/*
* Description:
* enable wpa auth & mode
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_wpa(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
pMgmt->bShareKeyAlgorithm = FALSE;
return ret;
}
/*
* Description:
* set disassociate
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_disassociate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass) {
if (!memcmp(param->addr, pMgmt->abyCurrBSSID, 6))
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
}
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* enable scan process
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
int ret = 0;
/**set ap_scan=1&&scan_ssid=1 under hidden ssid mode**/
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
printk("wpa_set_scan-->desired [ssid=%s,ssid_len=%d]\n",
param->u.scan_req.ssid,param->u.scan_req.ssid_len);
// Set the SSID
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
memcpy(pItemSSID->abySSID, param->u.scan_req.ssid, param->u.scan_req.ssid_len);
pItemSSID->len = param->u.scan_req.ssid_len;
spin_lock_irq(&pDevice->lock);
BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass);
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_bssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
int ret = 0;
memcpy(param->u.wpa_associate.bssid, pMgmt->abyCurrBSSID, 6);
return ret;
}
/*
* Description:
* get bssid
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_ssid(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
int ret = 0;
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID;
memcpy(param->u.wpa_associate.ssid, pItemSSID->abySSID, pItemSSID->len);
param->u.wpa_associate.ssid_len = pItemSSID->len;
return ret;
}
/*
* Description:
* get scan results
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_get_scan(PSDevice pDevice, struct viawget_wpa_param *param)
{
struct viawget_scan_result *scan_buf;
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
PKnownBSS pBSS;
PBYTE pBuf;
int ret = 0;
u16 count = 0;
u16 ii;
u16 jj;
long ldBm; //James //add
//******mike:bubble sort by stronger RSSI*****//
PBYTE ptempBSS;
ptempBSS = kmalloc(sizeof(KnownBSS), GFP_ATOMIC);
if (ptempBSS == NULL) {
printk("bubble sort kmalloc memory fail@@@\n");
ret = -ENOMEM;
return ret;
}
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
for (jj = 0; jj < MAX_BSS_NUM - ii - 1; jj++) {
if ((pMgmt->sBSSList[jj].bActive != TRUE)
|| ((pMgmt->sBSSList[jj].uRSSI > pMgmt->sBSSList[jj + 1].uRSSI)
&& (pMgmt->sBSSList[jj + 1].bActive != FALSE))) {
memcpy(ptempBSS,&pMgmt->sBSSList[jj], sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj], &pMgmt->sBSSList[jj + 1],
sizeof(KnownBSS));
memcpy(&pMgmt->sBSSList[jj + 1], ptempBSS, sizeof(KnownBSS));
}
}
}
kfree(ptempBSS);
count = 0;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (!pBSS->bActive)
continue;
count++;
}
pBuf = kcalloc(count, sizeof(struct viawget_scan_result), GFP_ATOMIC);
if (pBuf == NULL) {
ret = -ENOMEM;
return ret;
}
scan_buf = (struct viawget_scan_result *)pBuf;
pBSS = &(pMgmt->sBSSList[0]);
for (ii = 0, jj = 0; ii < MAX_BSS_NUM; ii++) {
pBSS = &(pMgmt->sBSSList[ii]);
if (pBSS->bActive) {
if (jj >= count)
break;
memcpy(scan_buf->bssid, pBSS->abyBSSID, WLAN_BSSID_LEN);
pItemSSID = (PWLAN_IE_SSID)pBSS->abySSID;
memcpy(scan_buf->ssid, pItemSSID->abySSID, pItemSSID->len);
scan_buf->ssid_len = pItemSSID->len;
scan_buf->freq = frequency_list[pBSS->uChannel-1];
scan_buf->caps = pBSS->wCapInfo; // DavidWang for sharemode
RFvRSSITodBm(pDevice, (BYTE)(pBSS->uRSSI), &ldBm);
if (-ldBm < 50)
scan_buf->qual = 100;
else if (-ldBm > 90)
scan_buf->qual = 0;
else
scan_buf->qual=(40-(-ldBm-50))*100/40;
//James
//scan_buf->caps = pBSS->wCapInfo;
//scan_buf->qual =
scan_buf->noise = 0;
scan_buf->level = ldBm;
//scan_buf->maxrate =
if (pBSS->wWPALen != 0) {
scan_buf->wpa_ie_len = pBSS->wWPALen;
memcpy(scan_buf->wpa_ie, pBSS->byWPAIE, pBSS->wWPALen);
}
if (pBSS->wRSNLen != 0) {
scan_buf->rsn_ie_len = pBSS->wRSNLen;
memcpy(scan_buf->rsn_ie, pBSS->byRSNIE, pBSS->wRSNLen);
}
scan_buf = (struct viawget_scan_result *)((PBYTE)scan_buf + sizeof(struct viawget_scan_result));
jj ++;
}
}
if (jj < count)
count = jj;
if (copy_to_user(param->u.scan_results.buf, pBuf, sizeof(struct viawget_scan_result) * count))
ret = -EFAULT;
param->u.scan_results.scan_count = count;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " param->u.scan_results.scan_count = %d\n", count);
kfree(pBuf);
return ret;
}
/*
* Description:
* set associate with AP
*
* Parameters:
* In:
* pDevice -
* param -
* Out:
*
* Return Value:
*
*/
static int wpa_set_associate(PSDevice pDevice, struct viawget_wpa_param *param)
{
PSMgmtObject pMgmt = &pDevice->sMgmtObj;
PWLAN_IE_SSID pItemSSID;
BYTE abyNullAddr[] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
BYTE abyWPAIE[64];
int ret = 0;
BOOL bwepEnabled=FALSE;
// set key type & algorithm
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pairwise_suite = %d\n", param->u.wpa_associate.pairwise_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "group_suite = %d\n", param->u.wpa_associate.group_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "key_mgmt_suite = %d\n", param->u.wpa_associate.key_mgmt_suite);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "auth_alg = %d\n", param->u.wpa_associate.auth_alg);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "mode = %d\n", param->u.wpa_associate.mode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ie_len = %d\n", param->u.wpa_associate.wpa_ie_len);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Roaming dBm = %d\n", param->u.wpa_associate.roam_dbm); // Davidwang
if (param->u.wpa_associate.wpa_ie) {
if (param->u.wpa_associate.wpa_ie_len > sizeof(abyWPAIE))
return -EINVAL;
if (copy_from_user(&abyWPAIE[0], param->u.wpa_associate.wpa_ie,
param->u.wpa_associate.wpa_ie_len))
return -EFAULT;
}
if (param->u.wpa_associate.mode == 1)
pMgmt->eConfigMode = WMAC_CONFIG_IBSS_STA;
else
pMgmt->eConfigMode = WMAC_CONFIG_ESS_STA;
// set bssid
if (memcmp(param->u.wpa_associate.bssid, &abyNullAddr[0], 6) != 0)
memcpy(pMgmt->abyDesireBSSID, param->u.wpa_associate.bssid, 6);
// set ssid
memset(pMgmt->abyDesireSSID, 0, WLAN_IEHDR_LEN + WLAN_SSID_MAXLEN + 1);
pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID;
pItemSSID->byElementID = WLAN_EID_SSID;
pItemSSID->len = param->u.wpa_associate.ssid_len;
memcpy(pItemSSID->abySSID, param->u.wpa_associate.ssid, pItemSSID->len);
if (param->u.wpa_associate.wpa_ie_len == 0) {
if (param->u.wpa_associate.auth_alg & AUTH_ALG_SHARED_KEY)
pMgmt->eAuthenMode = WMAC_AUTH_SHAREKEY;
else
pMgmt->eAuthenMode = WMAC_AUTH_OPEN;
} else if (abyWPAIE[0] == RSN_INFO_ELEM) {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPA2PSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA2;
} else {
if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_WPA_NONE)
pMgmt->eAuthenMode = WMAC_AUTH_WPANONE;
else if (param->u.wpa_associate.key_mgmt_suite == KEY_MGMT_PSK)
pMgmt->eAuthenMode = WMAC_AUTH_WPAPSK;
else
pMgmt->eAuthenMode = WMAC_AUTH_WPA;
}
switch (param->u.wpa_associate.pairwise_suite) {
case CIPHER_CCMP:
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
break;
case CIPHER_TKIP:
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
case CIPHER_WEP40:
case CIPHER_WEP104:
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
bwepEnabled = TRUE;
break;
case CIPHER_NONE:
if (param->u.wpa_associate.group_suite == CIPHER_CCMP)
pDevice->eEncryptionStatus = Ndis802_11Encryption3Enabled;
else
pDevice->eEncryptionStatus = Ndis802_11Encryption2Enabled;
break;
default:
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
pMgmt->Roam_dbm = param->u.wpa_associate.roam_dbm;
if (pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) { // @wep-sharekey
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
pMgmt->bShareKeyAlgorithm = TRUE;
} else if (pMgmt->eAuthenMode == WMAC_AUTH_OPEN) {
if(bwepEnabled==TRUE) { //@open-wep
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
} else {
// @only open
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
}
}
// mike save old encryption status
pDevice->eOldEncryptionStatus = pDevice->eEncryptionStatus;
if (pDevice->eEncryptionStatus != Ndis802_11EncryptionDisabled)
pDevice->bEncryptionEnable = TRUE;
else
pDevice->bEncryptionEnable = FALSE;
if ((pMgmt->eAuthenMode == WMAC_AUTH_SHAREKEY) ||
((pMgmt->eAuthenMode == WMAC_AUTH_OPEN) && (bwepEnabled==TRUE))) {
// mike re-comment:open-wep && sharekey-wep needn't do initial key!!
} else {
KeyvInitTable(pDevice,&pDevice->sKey);
}
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = FALSE;
ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW);
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
/******* search if ap_scan=2, which is associating request in hidden ssid mode ****/
{
PKnownBSS pCurr = NULL;
pCurr = BSSpSearchBSSList(pDevice,
pMgmt->abyDesireBSSID,
pMgmt->abyDesireSSID,
pDevice->eConfigPHYMode
);
if (pCurr == NULL){
printk("wpa_set_associate---->hidden mode site survey before associate.......\n");
bScheduleCommand((void *)pDevice,
WLAN_CMD_BSSID_SCAN,
pMgmt->abyDesireSSID);
}
}
/****************************************************************/
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
return ret;
}
/*
* Description:
* wpa_ioctl main function supported for wpa supplicant
*
* Parameters:
* In:
* pDevice -
* iw_point -
* Out:
*
* Return Value:
*
*/
int wpa_ioctl(PSDevice pDevice, struct iw_point *p)
{
struct viawget_wpa_param *param;
int ret = 0;
int wpa_ioctl = 0;
if (p->length < sizeof(struct viawget_wpa_param) ||
p->length > VIAWGET_WPA_MAX_BUF_SIZE || !p->pointer)
return -EINVAL;
param = kmalloc((int)p->length, GFP_KERNEL);
if (param == NULL)
return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
ret = -EFAULT;
goto out;
}
switch (param->cmd) {
case VIAWGET_SET_WPA:
ret = wpa_set_wpa(pDevice, param);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_WPA \n");
break;
case VIAWGET_SET_KEY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_KEY \n");
spin_lock_irq(&pDevice->lock);
ret = wpa_set_keys(pDevice, param, FALSE);
spin_unlock_irq(&pDevice->lock);
break;
case VIAWGET_SET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_SCAN \n");
ret = wpa_set_scan(pDevice, param);
break;
case VIAWGET_GET_SCAN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SCAN\n");
ret = wpa_get_scan(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_SSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_SSID \n");
ret = wpa_get_ssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_GET_BSSID:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_GET_BSSID \n");
ret = wpa_get_bssid(pDevice, param);
wpa_ioctl = 1;
break;
case VIAWGET_SET_ASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_ASSOCIATE \n");
ret = wpa_set_associate(pDevice, param);
break;
case VIAWGET_SET_DISASSOCIATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DISASSOCIATE \n");
ret = wpa_set_disassociate(pDevice, param);
break;
case VIAWGET_SET_DROP_UNENCRYPT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DROP_UNENCRYPT \n");
break;
case VIAWGET_SET_DEAUTHENTICATE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "VIAWGET_SET_DEAUTHENTICATE \n");
break;
default:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wpa_ioctl: unknown cmd=%d\n",
param->cmd);
kfree(param);
return -EOPNOTSUPP;
}
if ((ret == 0) && wpa_ioctl) {
if (copy_to_user(p->pointer, param, p->length)) {
ret = -EFAULT;
goto out;
}
}
out:
kfree(param);
return ret;
}
| gpl-2.0 |
NoelMacwan/SXDNickiSS | drivers/spi/spi-tle62x0.c | 5192 | 7668 | /*
* Support Infineon TLE62x0 driver chips
*
* Copyright (c) 2007 Simtec Electronics
* Ben Dooks, <ben@simtec.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/spi/spi.h>
#include <linux/spi/tle62x0.h>
#define CMD_READ 0x00
#define CMD_SET 0xff
#define DIAG_NORMAL 0x03
#define DIAG_OVERLOAD 0x02
#define DIAG_OPEN 0x01
#define DIAG_SHORTGND 0x00
struct tle62x0_state {
struct spi_device *us;
struct mutex lock;
unsigned int nr_gpio;
unsigned int gpio_state;
unsigned char tx_buff[4];
unsigned char rx_buff[4];
};
static int to_gpio_num(struct device_attribute *attr);
static inline int tle62x0_write(struct tle62x0_state *st)
{
unsigned char *buff = st->tx_buff;
unsigned int gpio_state = st->gpio_state;
buff[0] = CMD_SET;
if (st->nr_gpio == 16) {
buff[1] = gpio_state >> 8;
buff[2] = gpio_state;
} else {
buff[1] = gpio_state;
}
dev_dbg(&st->us->dev, "buff %02x,%02x,%02x\n",
buff[0], buff[1], buff[2]);
return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
}
static inline int tle62x0_read(struct tle62x0_state *st)
{
unsigned char *txbuff = st->tx_buff;
struct spi_transfer xfer = {
.tx_buf = txbuff,
.rx_buf = st->rx_buff,
.len = (st->nr_gpio * 2) / 8,
};
struct spi_message msg;
txbuff[0] = CMD_READ;
txbuff[1] = 0x00;
txbuff[2] = 0x00;
txbuff[3] = 0x00;
spi_message_init(&msg);
spi_message_add_tail(&xfer, &msg);
return spi_sync(st->us, &msg);
}
static unsigned char *decode_fault(unsigned int fault_code)
{
fault_code &= 3;
switch (fault_code) {
case DIAG_NORMAL:
return "N";
case DIAG_OVERLOAD:
return "V";
case DIAG_OPEN:
return "O";
case DIAG_SHORTGND:
return "G";
}
return "?";
}
static ssize_t tle62x0_status_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
char *bp = buf;
unsigned char *buff = st->rx_buff;
unsigned long fault = 0;
int ptr;
int ret;
mutex_lock(&st->lock);
ret = tle62x0_read(st);
dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
if (ret < 0) {
mutex_unlock(&st->lock);
return ret;
}
for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
fault <<= 8;
fault |= ((unsigned long)buff[ptr]);
dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
}
for (ptr = 0; ptr < st->nr_gpio; ptr++) {
bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
}
*bp++ = '\n';
mutex_unlock(&st->lock);
return bp - buf;
}
static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
static ssize_t tle62x0_gpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
int gpio_num = to_gpio_num(attr);
int value;
mutex_lock(&st->lock);
value = (st->gpio_state >> gpio_num) & 1;
mutex_unlock(&st->lock);
return snprintf(buf, PAGE_SIZE, "%d", value);
}
static ssize_t tle62x0_gpio_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct tle62x0_state *st = dev_get_drvdata(dev);
int gpio_num = to_gpio_num(attr);
unsigned long val;
char *endp;
val = simple_strtoul(buf, &endp, 0);
if (buf == endp)
return -EINVAL;
dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
mutex_lock(&st->lock);
if (val)
st->gpio_state |= 1 << gpio_num;
else
st->gpio_state &= ~(1 << gpio_num);
tle62x0_write(st);
mutex_unlock(&st->lock);
return len;
}
static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
tle62x0_gpio_show, tle62x0_gpio_store);
static struct device_attribute *gpio_attrs[] = {
[0] = &dev_attr_gpio1,
[1] = &dev_attr_gpio2,
[2] = &dev_attr_gpio3,
[3] = &dev_attr_gpio4,
[4] = &dev_attr_gpio5,
[5] = &dev_attr_gpio6,
[6] = &dev_attr_gpio7,
[7] = &dev_attr_gpio8,
[8] = &dev_attr_gpio9,
[9] = &dev_attr_gpio10,
[10] = &dev_attr_gpio11,
[11] = &dev_attr_gpio12,
[12] = &dev_attr_gpio13,
[13] = &dev_attr_gpio14,
[14] = &dev_attr_gpio15,
[15] = &dev_attr_gpio16
};
static int to_gpio_num(struct device_attribute *attr)
{
int ptr;
for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
if (gpio_attrs[ptr] == attr)
return ptr;
}
return -1;
}
static int __devinit tle62x0_probe(struct spi_device *spi)
{
struct tle62x0_state *st;
struct tle62x0_pdata *pdata;
int ptr;
int ret;
pdata = spi->dev.platform_data;
if (pdata == NULL) {
dev_err(&spi->dev, "no device data specified\n");
return -EINVAL;
}
st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
if (st == NULL) {
dev_err(&spi->dev, "no memory for device state\n");
return -ENOMEM;
}
st->us = spi;
st->nr_gpio = pdata->gpio_count;
st->gpio_state = pdata->init_state;
mutex_init(&st->lock);
ret = device_create_file(&spi->dev, &dev_attr_status_show);
if (ret) {
dev_err(&spi->dev, "cannot create status attribute\n");
goto err_status;
}
for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
if (ret) {
dev_err(&spi->dev, "cannot create gpio attribute\n");
goto err_gpios;
}
}
/* tle62x0_write(st); */
spi_set_drvdata(spi, st);
return 0;
err_gpios:
while (--ptr >= 0)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
err_status:
kfree(st);
return ret;
}
static int __devexit tle62x0_remove(struct spi_device *spi)
{
struct tle62x0_state *st = spi_get_drvdata(spi);
int ptr;
for (ptr = 0; ptr < st->nr_gpio; ptr++)
device_remove_file(&spi->dev, gpio_attrs[ptr]);
device_remove_file(&spi->dev, &dev_attr_status_show);
kfree(st);
return 0;
}
static struct spi_driver tle62x0_driver = {
.driver = {
.name = "tle62x0",
.owner = THIS_MODULE,
},
.probe = tle62x0_probe,
.remove = __devexit_p(tle62x0_remove),
};
static __init int tle62x0_init(void)
{
return spi_register_driver(&tle62x0_driver);
}
static __exit void tle62x0_exit(void)
{
spi_unregister_driver(&tle62x0_driver);
}
module_init(tle62x0_init);
module_exit(tle62x0_exit);
MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
MODULE_DESCRIPTION("TLE62x0 SPI driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("spi:tle62x0");
| gpl-2.0 |
wpandroidios/android_kernel_htc_b2wlj_LP50_Sense7 | fs/jffs2/background.c | 7752 | 4311 | /*
* JFFS2 -- Journalling Flash File System, Version 2.
*
* Copyright © 2001-2007 Red Hat, Inc.
* Copyright © 2004-2010 David Woodhouse <dwmw2@infradead.org>
*
* Created by David Woodhouse <dwmw2@infradead.org>
*
* For licensing information, see the file 'LICENCE' in this directory.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/jffs2.h>
#include <linux/mtd/mtd.h>
#include <linux/completion.h>
#include <linux/sched.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include "nodelist.h"
static int jffs2_garbage_collect_thread(void *);
void jffs2_garbage_collect_trigger(struct jffs2_sb_info *c)
{
assert_spin_locked(&c->erase_completion_lock);
if (c->gc_task && jffs2_thread_should_wake(c))
send_sig(SIGHUP, c->gc_task, 1);
}
/* This must only ever be called when no GC thread is currently running */
int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c)
{
struct task_struct *tsk;
int ret = 0;
BUG_ON(c->gc_task);
init_completion(&c->gc_thread_start);
init_completion(&c->gc_thread_exit);
tsk = kthread_run(jffs2_garbage_collect_thread, c, "jffs2_gcd_mtd%d", c->mtd->index);
if (IS_ERR(tsk)) {
pr_warn("fork failed for JFFS2 garbage collect thread: %ld\n",
-PTR_ERR(tsk));
complete(&c->gc_thread_exit);
ret = PTR_ERR(tsk);
} else {
/* Wait for it... */
jffs2_dbg(1, "Garbage collect thread is pid %d\n", tsk->pid);
wait_for_completion(&c->gc_thread_start);
ret = tsk->pid;
}
return ret;
}
void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c)
{
int wait = 0;
spin_lock(&c->erase_completion_lock);
if (c->gc_task) {
jffs2_dbg(1, "Killing GC task %d\n", c->gc_task->pid);
send_sig(SIGKILL, c->gc_task, 1);
wait = 1;
}
spin_unlock(&c->erase_completion_lock);
if (wait)
wait_for_completion(&c->gc_thread_exit);
}
static int jffs2_garbage_collect_thread(void *_c)
{
struct jffs2_sb_info *c = _c;
allow_signal(SIGKILL);
allow_signal(SIGSTOP);
allow_signal(SIGCONT);
c->gc_task = current;
complete(&c->gc_thread_start);
set_user_nice(current, 10);
set_freezable();
for (;;) {
allow_signal(SIGHUP);
again:
spin_lock(&c->erase_completion_lock);
if (!jffs2_thread_should_wake(c)) {
set_current_state (TASK_INTERRUPTIBLE);
spin_unlock(&c->erase_completion_lock);
jffs2_dbg(1, "%s(): sleeping...\n", __func__);
schedule();
} else
spin_unlock(&c->erase_completion_lock);
/* Problem - immediately after bootup, the GCD spends a lot
* of time in places like jffs2_kill_fragtree(); so much so
* that userspace processes (like gdm and X) are starved
* despite plenty of cond_resched()s and renicing. Yield()
* doesn't help, either (presumably because userspace and GCD
* are generally competing for a higher latency resource -
* disk).
* This forces the GCD to slow the hell down. Pulling an
* inode in with read_inode() is much preferable to having
* the GC thread get there first. */
schedule_timeout_interruptible(msecs_to_jiffies(50));
if (kthread_should_stop()) {
jffs2_dbg(1, "%s(): kthread_stop() called\n", __func__);
goto die;
}
/* Put_super will send a SIGKILL and then wait on the sem.
*/
while (signal_pending(current) || freezing(current)) {
siginfo_t info;
unsigned long signr;
if (try_to_freeze())
goto again;
signr = dequeue_signal_lock(current, ¤t->blocked, &info);
switch(signr) {
case SIGSTOP:
jffs2_dbg(1, "%s(): SIGSTOP received\n",
__func__);
set_current_state(TASK_STOPPED);
schedule();
break;
case SIGKILL:
jffs2_dbg(1, "%s(): SIGKILL received\n",
__func__);
goto die;
case SIGHUP:
jffs2_dbg(1, "%s(): SIGHUP received\n",
__func__);
break;
default:
jffs2_dbg(1, "%s(): signal %ld received\n",
__func__, signr);
}
}
/* We don't want SIGHUP to interrupt us. STOP and KILL are OK though. */
disallow_signal(SIGHUP);
jffs2_dbg(1, "%s(): pass\n", __func__);
if (jffs2_garbage_collect_pass(c) == -ENOSPC) {
pr_notice("No space for garbage collection. Aborting GC thread\n");
goto die;
}
}
die:
spin_lock(&c->erase_completion_lock);
c->gc_task = NULL;
spin_unlock(&c->erase_completion_lock);
complete_and_exit(&c->gc_thread_exit, 0);
}
| gpl-2.0 |
profglavcho/mt6735-kernel-3.10.61 | arch/score/kernel/irq.c | 12104 | 2986 | /*
* arch/score/kernel/irq.c
*
* Score Processor version.
*
* Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
* Chen Liqin <liqin.chen@sunplusct.com>
* Lennox Wu <lennox.wu@sunplusct.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see the file COPYING, or write
* to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/seq_file.h>
#include <asm/io.h>
/* the interrupt controller is hardcoded at this address */
#define SCORE_PIC ((u32 __iomem __force *)0x95F50000)
#define INT_PNDL 0
#define INT_PNDH 1
#define INT_PRIORITY_M 2
#define INT_PRIORITY_SG0 4
#define INT_PRIORITY_SG1 5
#define INT_PRIORITY_SG2 6
#define INT_PRIORITY_SG3 7
#define INT_MASKL 8
#define INT_MASKH 9
/*
* handles all normal device IRQs
*/
asmlinkage void do_IRQ(int irq)
{
irq_enter();
generic_handle_irq(irq);
irq_exit();
}
static void score_mask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) | \
(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) | \
(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
static void score_unmask(struct irq_data *d)
{
unsigned int irq_source = 63 - d->irq;
if (irq_source < 32)
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKL) & \
~(1 << irq_source)), SCORE_PIC + INT_MASKL);
else
__raw_writel((__raw_readl(SCORE_PIC + INT_MASKH) & \
~(1 << (irq_source - 32))), SCORE_PIC + INT_MASKH);
}
struct irq_chip score_irq_chip = {
.name = "Score7-level",
.irq_mask = score_mask,
.irq_mask_ack = score_mask,
.irq_unmask = score_unmask,
};
/*
* initialise the interrupt system
*/
void __init init_IRQ(void)
{
int index;
unsigned long target_addr;
for (index = 0; index < NR_IRQS; ++index)
irq_set_chip_and_handler(index, &score_irq_chip,
handle_level_irq);
for (target_addr = IRQ_VECTOR_BASE_ADDR;
target_addr <= IRQ_VECTOR_END_ADDR;
target_addr += IRQ_VECTOR_SIZE)
memcpy((void *)target_addr, \
interrupt_exception_vector, IRQ_VECTOR_SIZE);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKL);
__raw_writel(0xffffffff, SCORE_PIC + INT_MASKH);
__asm__ __volatile__(
"mtcr %0, cr3\n\t"
: : "r" (EXCEPTION_VECTOR_BASE_ADDR | \
VECTOR_ADDRESS_OFFSET_MODE16));
}
| gpl-2.0 |
BSydz/Triumph-Sharp-2.2.2-Custom-Kernel | net/rxrpc/ar-skbuff.c | 12872 | 3506 | /* ar-skbuff.c: socket buffer destruction handling
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/af_rxrpc.h>
#include "ar-internal.h"
/*
* set up for the ACK at the end of the receive phase when we discard the final
* receive phase data packet
* - called with softirqs disabled
*/
static void rxrpc_request_final_ACK(struct rxrpc_call *call)
{
/* the call may be aborted before we have a chance to ACK it */
write_lock(&call->state_lock);
switch (call->state) {
case RXRPC_CALL_CLIENT_RECV_REPLY:
call->state = RXRPC_CALL_CLIENT_FINAL_ACK;
_debug("request final ACK");
/* get an extra ref on the call for the final-ACK generator to
* release */
rxrpc_get_call(call);
set_bit(RXRPC_CALL_ACK_FINAL, &call->events);
if (try_to_del_timer_sync(&call->ack_timer) >= 0)
rxrpc_queue_call(call);
break;
case RXRPC_CALL_SERVER_RECV_REQUEST:
call->state = RXRPC_CALL_SERVER_ACK_REQUEST;
default:
break;
}
write_unlock(&call->state_lock);
}
/*
* drop the bottom ACK off of the call ACK window and advance the window
*/
static void rxrpc_hard_ACK_data(struct rxrpc_call *call,
struct rxrpc_skb_priv *sp)
{
int loop;
u32 seq;
spin_lock_bh(&call->lock);
_debug("hard ACK #%u", ntohl(sp->hdr.seq));
for (loop = 0; loop < RXRPC_ACKR_WINDOW_ASZ; loop++) {
call->ackr_window[loop] >>= 1;
call->ackr_window[loop] |=
call->ackr_window[loop + 1] << (BITS_PER_LONG - 1);
}
seq = ntohl(sp->hdr.seq);
ASSERTCMP(seq, ==, call->rx_data_eaten + 1);
call->rx_data_eaten = seq;
if (call->ackr_win_top < UINT_MAX)
call->ackr_win_top++;
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
call->rx_data_post, >=, call->rx_data_recv);
ASSERTIFCMP(call->state <= RXRPC_CALL_COMPLETE,
call->rx_data_recv, >=, call->rx_data_eaten);
if (sp->hdr.flags & RXRPC_LAST_PACKET) {
rxrpc_request_final_ACK(call);
} else if (atomic_dec_and_test(&call->ackr_not_idle) &&
test_and_clear_bit(RXRPC_CALL_TX_SOFT_ACK, &call->flags)) {
_debug("send Rx idle ACK");
__rxrpc_propose_ACK(call, RXRPC_ACK_IDLE, sp->hdr.serial,
true);
}
spin_unlock_bh(&call->lock);
}
/*
* destroy a packet that has an RxRPC control buffer
* - advance the hard-ACK state of the parent call (done here in case something
* in the kernel bypasses recvmsg() and steals the packet directly off of the
* socket receive queue)
*/
void rxrpc_packet_destructor(struct sk_buff *skb)
{
struct rxrpc_skb_priv *sp = rxrpc_skb(skb);
struct rxrpc_call *call = sp->call;
_enter("%p{%p}", skb, call);
if (call) {
/* send the final ACK on a client call */
if (sp->hdr.type == RXRPC_PACKET_TYPE_DATA)
rxrpc_hard_ACK_data(call, sp);
rxrpc_put_call(call);
sp->call = NULL;
}
if (skb->sk)
sock_rfree(skb);
_leave("");
}
/**
* rxrpc_kernel_free_skb - Free an RxRPC socket buffer
* @skb: The socket buffer to be freed
*
* Let RxRPC free its own socket buffer, permitting it to maintain debug
* accounting.
*/
void rxrpc_kernel_free_skb(struct sk_buff *skb)
{
rxrpc_free_skb(skb);
}
EXPORT_SYMBOL(rxrpc_kernel_free_skb);
| gpl-2.0 |
omegamoon/Rockchip-GPL-Kernel | drivers/media/dvb/firewire/firedtv-rc.c | 12872 | 4075 | /*
* FireDTV driver (formerly known as FireSAT)
*
* Copyright (C) 2004 Andreas Monitzer <andy@monitzer.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*/
#include <linux/bitops.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/workqueue.h>
#include "firedtv.h"
/* fixed table with older keycodes, geared towards MythTV */
static const u16 oldtable[] = {
/* code from device: 0x4501...0x451f */
KEY_ESC,
KEY_F9,
KEY_1,
KEY_2,
KEY_3,
KEY_4,
KEY_5,
KEY_6,
KEY_7,
KEY_8,
KEY_9,
KEY_I,
KEY_0,
KEY_ENTER,
KEY_RED,
KEY_UP,
KEY_GREEN,
KEY_F10,
KEY_SPACE,
KEY_F11,
KEY_YELLOW,
KEY_DOWN,
KEY_BLUE,
KEY_Z,
KEY_P,
KEY_PAGEDOWN,
KEY_LEFT,
KEY_W,
KEY_RIGHT,
KEY_P,
KEY_M,
/* code from device: 0x4540...0x4542 */
KEY_R,
KEY_V,
KEY_C,
};
/* user-modifiable table for a remote as sold in 2008 */
static const u16 keytable[] = {
/* code from device: 0x0300...0x031f */
[0x00] = KEY_POWER,
[0x01] = KEY_SLEEP,
[0x02] = KEY_STOP,
[0x03] = KEY_OK,
[0x04] = KEY_RIGHT,
[0x05] = KEY_1,
[0x06] = KEY_2,
[0x07] = KEY_3,
[0x08] = KEY_LEFT,
[0x09] = KEY_4,
[0x0a] = KEY_5,
[0x0b] = KEY_6,
[0x0c] = KEY_UP,
[0x0d] = KEY_7,
[0x0e] = KEY_8,
[0x0f] = KEY_9,
[0x10] = KEY_DOWN,
[0x11] = KEY_TITLE, /* "OSD" - fixme */
[0x12] = KEY_0,
[0x13] = KEY_F20, /* "16:9" - fixme */
[0x14] = KEY_SCREEN, /* "FULL" - fixme */
[0x15] = KEY_MUTE,
[0x16] = KEY_SUBTITLE,
[0x17] = KEY_RECORD,
[0x18] = KEY_TEXT,
[0x19] = KEY_AUDIO,
[0x1a] = KEY_RED,
[0x1b] = KEY_PREVIOUS,
[0x1c] = KEY_REWIND,
[0x1d] = KEY_PLAYPAUSE,
[0x1e] = KEY_NEXT,
[0x1f] = KEY_VOLUMEUP,
/* code from device: 0x0340...0x0354 */
[0x20] = KEY_CHANNELUP,
[0x21] = KEY_F21, /* "4:3" - fixme */
[0x22] = KEY_TV,
[0x23] = KEY_DVD,
[0x24] = KEY_VCR,
[0x25] = KEY_AUX,
[0x26] = KEY_GREEN,
[0x27] = KEY_YELLOW,
[0x28] = KEY_BLUE,
[0x29] = KEY_CHANNEL, /* "CH.LIST" */
[0x2a] = KEY_VENDOR, /* "CI" - fixme */
[0x2b] = KEY_VOLUMEDOWN,
[0x2c] = KEY_CHANNELDOWN,
[0x2d] = KEY_LAST,
[0x2e] = KEY_INFO,
[0x2f] = KEY_FORWARD,
[0x30] = KEY_LIST,
[0x31] = KEY_FAVORITES,
[0x32] = KEY_MENU,
[0x33] = KEY_EPG,
[0x34] = KEY_EXIT,
};
int fdtv_register_rc(struct firedtv *fdtv, struct device *dev)
{
struct input_dev *idev;
int i, err;
idev = input_allocate_device();
if (!idev)
return -ENOMEM;
fdtv->remote_ctrl_dev = idev;
idev->name = "FireDTV remote control";
idev->dev.parent = dev;
idev->evbit[0] = BIT_MASK(EV_KEY);
idev->keycode = kmemdup(keytable, sizeof(keytable), GFP_KERNEL);
if (!idev->keycode) {
err = -ENOMEM;
goto fail;
}
idev->keycodesize = sizeof(keytable[0]);
idev->keycodemax = ARRAY_SIZE(keytable);
for (i = 0; i < ARRAY_SIZE(keytable); i++)
set_bit(keytable[i], idev->keybit);
err = input_register_device(idev);
if (err)
goto fail_free_keymap;
return 0;
fail_free_keymap:
kfree(idev->keycode);
fail:
input_free_device(idev);
return err;
}
void fdtv_unregister_rc(struct firedtv *fdtv)
{
cancel_work_sync(&fdtv->remote_ctrl_work);
kfree(fdtv->remote_ctrl_dev->keycode);
input_unregister_device(fdtv->remote_ctrl_dev);
}
void fdtv_handle_rc(struct firedtv *fdtv, unsigned int code)
{
struct input_dev *idev = fdtv->remote_ctrl_dev;
u16 *keycode = idev->keycode;
if (code >= 0x0300 && code <= 0x031f)
code = keycode[code - 0x0300];
else if (code >= 0x0340 && code <= 0x0354)
code = keycode[code - 0x0320];
else if (code >= 0x4501 && code <= 0x451f)
code = oldtable[code - 0x4501];
else if (code >= 0x4540 && code <= 0x4542)
code = oldtable[code - 0x4521];
else {
printk(KERN_DEBUG "firedtv: invalid key code 0x%04x "
"from remote control\n", code);
return;
}
input_report_key(idev, code, 1);
input_sync(idev);
input_report_key(idev, code, 0);
input_sync(idev);
}
| gpl-2.0 |
robertoalcantara/linux-sunxi_craff | modules/wifi/ar6302/AR6K_SDK_ISC.build_3.1_RC.329/host/tools/athbtfilter/bluez/abtfilt_bluez_dbus_glib.c | 73 | 63865 | //------------------------------------------------------------------------------
// <copyright file="abtfilt_bt.c" company="Atheros">
// Copyright (c) 2008 Atheros Corporation. All rights reserved.
//
//
// Permission to use, copy, modify, and/or distribute this software for any
// purpose with or without fee is hereby granted, provided that the above
// copyright notice and this permission notice appear in all copies.
//
// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
//
//
//------------------------------------------------------------------------------
//==============================================================================
// Author(s): ="Atheros"
//==============================================================================
/*
* Bluetooth Filter - BT module
*
*/
static const char athId[] __attribute__ ((unused)) = "$Id: //depot/sw/releases/olca3.1-RC/host/tools/athbtfilter/bluez/abtfilt_bluez_dbus_glib.c#1 $";
#include "abtfilt_bluez_dbus_glib.h"
#include <dbus/dbus-glib.h>
#undef HCI_INQUIRY
#include <bluetooth.h>
#include <hci.h>
#include <hci_lib.h>
#include <sys/poll.h>
/* Definitions */
#define BLUEZ_NAME "org.bluez"
#define BLUEZ_PATH "/org/bluez"
#define ADAPTER_INTERFACE "org.bluez.Adapter"
#define MANAGER_INTERFACE "org.bluez.Manager"
#define AUDIO_MANAGER_PATH "/org/bluez/audio"
#define AUDIO_SINK_INTERFACE "org.bluez.audio.Sink"
#define AUDIO_SOURCE_INTERFACE "org.bluez.audio.Source"
#define AUDIO_MANAGER_INTERFACE "org.bluez.audio.Manager"
#define AUDIO_HEADSET_INTERFACE "org.bluez.audio.Headset"
#define AUDIO_GATEWAY_INTERFACE "org.bluez.audio.Gateway"
#define AUDIO_DEVICE_INTERFACE "org.bluez.audio.Device"
#define BTEV_GET_BT_CONN_LINK_TYPE(p) ((p)[9])
#define BTEV_GET_TRANS_INTERVAL(p) ((p)[10])
#define BTEV_GET_RETRANS_INTERVAL(p) ((p)[11])
#define BTEV_GET_RX_PKT_LEN(p) ((A_UINT16)((p)[12]) | (((A_UINT16)((p)[13])) << 8))
#define BTEV_GET_TX_PKT_LEN(p) ((A_UINT16)((p)[14]) | (((A_UINT16)((p)[15])) << 8))
#define BTEV_CMD_COMPLETE_GET_OPCODE(p) ((A_UINT16)((p)[1]) | (((A_UINT16)((p)[2])) << 8))
#define BTEV_CMD_COMPLETE_GET_STATUS(p) ((p)[3])
typedef enum {
BT_ADAPTER_ADDED = 0,
BT_ADAPTER_REMOVED,
DEVICE_DISCOVERY_STARTED,
DEVICE_DISCOVERY_FINISHED,
REMOTE_DEVICE_CONNECTED,
REMOTE_DEVICE_DISCONNECTED,
AUDIO_DEVICE_ADDED,
AUDIO_DEVICE_REMOVED,
AUDIO_HEADSET_CONNECTED,
AUDIO_HEADSET_DISCONNECTED,
AUDIO_HEADSET_STREAM_STARTED,
AUDIO_HEADSET_STREAM_STOPPED,
AUDIO_GATEWAY_CONNECTED, /* Not Implemented */
AUDIO_GATEWAY_DISCONNECTED, /* Not Implemented */
AUDIO_SINK_CONNECTED,
AUDIO_SINK_DISCONNECTED,
AUDIO_SINK_STREAM_STARTED,
AUDIO_SINK_STREAM_STOPPED,
AUDIO_SOURCE_CONNECTED, /* Not Implemented */
AUDIO_SOURCE_DISCONNECTED, /* Not Implemented */
BT_EVENTS_NUM_MAX,
} BT_STACK_EVENT;
typedef enum {
PROXY_INVALID = 0,
DEVICE_MANAGER,
DEVICE_ADAPTER,
AUDIO_MANAGER,
AUDIO_HEADSET,
AUDIO_GATEWAY,
AUDIO_SOURCE,
AUDIO_SINK,
} BT_PROXY_TYPE;
typedef enum {
ARG_INVALID = 0,
ARG_NONE,
ARG_STRING,
} BT_CB_TYPE;
typedef struct _BT_NOTIFICATION_CONFIG_PARAMS {
const char *name;
BT_PROXY_TYPE proxy;
BT_CB_TYPE arg;
} BT_NOTIFICATION_CONFIG_PARAMS;
static BT_NOTIFICATION_CONFIG_PARAMS g_NotificationConfig[BT_EVENTS_NUM_MAX] =
{
/* BT_ADAPTER_ADDED */
{"AdapterAdded", DEVICE_MANAGER, ARG_STRING},
/* BT_ADAPTER_REMOVED */
{"AdapterRemoved", DEVICE_MANAGER, ARG_STRING},
/* DEVICE_DISCOVERY_STARTED */
{"DiscoveryStarted", DEVICE_ADAPTER, ARG_NONE},
/* DEVICE_DISCOVERY_FINISHED */
{"DiscoveryCompleted", DEVICE_ADAPTER, ARG_NONE},
/* REMOTE_DEVICE_CONNECTED */
{"RemoteDeviceConnected", DEVICE_ADAPTER, ARG_STRING},
/* REMOTE_DEVICE_DISCONNECTED */
{"RemoteDeviceDisconnected", DEVICE_ADAPTER, ARG_STRING},
/* AUDIO_DEVICE_ADDED */
{"DeviceCreated", AUDIO_MANAGER, ARG_STRING},
/* AUDIO_DEVICE_REMOVED */
{"DeviceRemoved", AUDIO_MANAGER, ARG_STRING},
/* AUDIO_HEADSET_CONNECTED */
{"Connected", AUDIO_HEADSET, ARG_NONE},
/* AUDIO_HEADSET_DISCONNECTED */
{"Disconnected", AUDIO_HEADSET, ARG_NONE},
/* AUDIO_HEADSET_STREAM_STARTED */
{"Playing", AUDIO_HEADSET, ARG_NONE},
/* AUDIO_HEADSET_STREAM_STOPPED */
{"Stopped", AUDIO_HEADSET, ARG_NONE},
/* AUDIO_GATEWAY_CONNECTED */
{NULL, PROXY_INVALID, ARG_INVALID},
/* AUDIO_GATEWAY_DISCONNECTED */
{NULL, PROXY_INVALID, ARG_INVALID},
/* AUDIO_SINK_CONNECTED */
{"Connected", AUDIO_SINK, ARG_NONE},
/* AUDIO_SINK_DISCONNECTED */
{"Disconnected", AUDIO_SINK, ARG_NONE},
/* AUDIO_SINK_STREAM_STARTED */
{"Playing", AUDIO_SINK, ARG_NONE},
/* AUDIO_SINK_STREAM_STOPPED */
{"Stopped", AUDIO_SINK, ARG_NONE},
/* AUDIO_SOURCE_CONNECTED */
{NULL, PROXY_INVALID, ARG_INVALID},
/* AUDIO_SOURCE_DISCONNECTED */
{NULL, PROXY_INVALID, ARG_INVALID},
};
typedef struct {
char *str;
unsigned int val;
} hci_map;
static const hci_map ver_map[] = {
{ "1.0b", 0x00 },
{ "1.1", 0x01 },
{ "1.2", 0x02 },
{ "2.0", 0x03 },
{ "2.1", 0x04 },
{ NULL }
};
/* Function Prototypes */
static void BtAdapterAdded(DBusGProxy *proxy, const char *string,
gpointer user_data);
static void BtAdapterRemoved(DBusGProxy *proxy, const char *string,
gpointer user_data);
static A_STATUS AcquireBtAdapter(ABF_BT_INFO *pAbfBtInfo);
static void ReleaseBTAdapter(ABF_BT_INFO *pAbfBtInfo);
static void *BtEventThread(void *arg);
static void RegisterBtStackEventCb(ABF_BT_INFO *pAbfBtInfo,
BT_STACK_EVENT event, GCallback handler);
static void DeRegisterBtStackEventCb(ABF_BT_INFO *pAbfBtInfo,
BT_STACK_EVENT event, GCallback handler);
static A_STATUS GetAdapterInfo(ABF_BT_INFO *pAbfBtInfo);
static void RemoteDeviceDisconnected(DBusGProxy *proxy, const char *string,
gpointer user_data);
static void RemoteDeviceConnected(DBusGProxy *proxy, const char *string,
gpointer user_data);
static void AudioDeviceAdded(DBusGProxy *proxy, const char *string,
gpointer user_data);
static void AudioDeviceRemoved(DBusGProxy *proxy, const char *string,
gpointer user_data);
static void DeviceDiscoveryStarted(DBusGProxy *proxy, gpointer user_data);
static void DeviceDiscoveryFinished(DBusGProxy *proxy, gpointer user_data);
static void AudioHeadsetConnected(DBusGProxy *proxy, gpointer user_data);
static void AudioHeadsetDisconnected(DBusGProxy *proxy, gpointer user_data);
static void AudioHeadsetStreamStarted(DBusGProxy *proxy, gpointer user_data);
static void AudioHeadsetStreamStopped(DBusGProxy *proxy, gpointer user_data);
static void AudioGatewayConnected(DBusGProxy *proxy, gpointer user_data);
static void AudioGatewayDisconnected(DBusGProxy *proxy, gpointer user_data);
static void AudioSinkConnected(DBusGProxy *proxy, gpointer user_data);
static void AudioSinkDisconnected(DBusGProxy *proxy, gpointer user_data);
static void AudioSinkStreamStarted(DBusGProxy *proxy, gpointer user_data);
static void AudioSinkStreamStopped(DBusGProxy *proxy, gpointer user_data);
static void AudioSourceConnected(DBusGProxy *proxy, gpointer user_data);
static void AudioSourceDisconnected(DBusGProxy *proxy, gpointer user_data);
static A_STATUS CheckAndAcquireDefaultAdapter(ABF_BT_INFO *pAbfBtInfo);
static void ReleaseDefaultAdapter(ABF_BT_INFO *pAbfBtInfo);
static void AcquireDefaultAudioDevice(ABF_BT_INFO *pAbfBtInfo);
static void ReleaseDefaultAudioDevice(ABF_BT_INFO *pAbfBtInfo);
static void GetBtAudioConnectionProperties(ABF_BT_INFO *pAbfBtInfo,
ATHBT_STATE_INDICATION Indication);
static A_STATUS SetupHciEventFilter(ABF_BT_INFO *pAbfBtInfo);
static void CheckHciEventFilter(ABF_BT_INFO *pAbfBtInfo);
static A_STATUS IssueHCICommand(ABF_BT_INFO *pAbfBtInfo,
A_UINT16 OpCode,
A_UCHAR *pCmdData,
int CmdLength,
int EventRecvTimeoutMS,
A_UCHAR *pEventBuffer,
int MaxLength,
A_UCHAR **ppEventPtr,
int *pEventLength);
/* APIs exported to other modules */
A_STATUS
Abf_BtStackNotificationInit(ATH_BT_FILTER_INSTANCE *pInstance, A_UINT32 Flags)
{
A_STATUS status;
GMainLoop *mainloop;
ATHBT_FILTER_INFO *pInfo;
ABF_BT_INFO *pAbfBtInfo;
pInfo = (ATHBT_FILTER_INFO *)pInstance->pContext;
if (pInfo->pBtInfo) {
return A_OK;
}
pAbfBtInfo = (ABF_BT_INFO *)A_MALLOC(sizeof(ABF_BT_INFO));
A_MEMZERO(pAbfBtInfo,sizeof(ABF_BT_INFO));
A_MUTEX_INIT(&pAbfBtInfo->hWaitEventLock);
A_COND_INIT(&pAbfBtInfo->hWaitEvent);
A_MEMZERO(pAbfBtInfo, sizeof(ABF_BT_INFO));
pAbfBtInfo->Flags = Flags;
if (pAbfBtInfo->Flags & ABF_ENABLE_AFH_CHANNEL_CLASSIFICATION) {
A_INFO("AFH Classification Command will be issued on WLAN connect/disconnect \n");
}
/* Set up the main loop */
mainloop = g_main_loop_new(NULL, FALSE);
pAbfBtInfo->AdapterAvailable = FALSE;
pAbfBtInfo->Mainloop = mainloop;
pAbfBtInfo->Loop = TRUE;
pAbfBtInfo->pInfo = pInfo;
pAbfBtInfo->HCIEventListenerSocket = -1;
/* Spawn a thread which will be used to process events from BT */
status = A_TASK_CREATE(&pInfo->hBtThread, BtEventThread, pAbfBtInfo);
if (A_FAILED(status)) {
A_ERR("[%s] Failed to spawn a BT thread\n", __FUNCTION__);
return A_ERROR;
}
pInfo->pBtInfo = pAbfBtInfo;
A_INFO("BT Stack Notification init complete\n");
return A_OK;
}
void
Abf_BtStackNotificationDeInit(ATH_BT_FILTER_INSTANCE *pInstance)
{
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pInstance->pContext;
ABF_BT_INFO *pAbfBtInfo = pInfo->pBtInfo;
if (!pAbfBtInfo) return;
if (pAbfBtInfo->Mainloop != NULL) {
/* Terminate and wait for the BT Event Handler task to finish */
A_MUTEX_LOCK(&pAbfBtInfo->hWaitEventLock);
if (pAbfBtInfo->Loop) {
pAbfBtInfo->Loop = FALSE;
A_COND_WAIT(&pAbfBtInfo->hWaitEvent, &pAbfBtInfo->hWaitEventLock,
WAITFOREVER);
}
A_MUTEX_UNLOCK(&pAbfBtInfo->hWaitEventLock);
}
/* Flush all the BT actions from the filter core TODO */
/* Free the remaining resources */
g_main_loop_unref(pAbfBtInfo->Mainloop);
pAbfBtInfo->AdapterAvailable = FALSE;
pInfo->pBtInfo = NULL;
A_MUTEX_DEINIT(&pAbfBtInfo->hWaitEventLock);
A_COND_DEINIT(&pAbfBtInfo->hWaitEvent);
A_MEMZERO(pAbfBtInfo, sizeof(ABF_BT_INFO));
A_FREE(pAbfBtInfo);
A_INFO("BT Stack Notification de-init complete\n");
}
/* Internal functions */
static gboolean MainLoopQuitCheck(gpointer arg)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)arg;
/* this is the only way to end a glib main loop
without creating an external g_source, this check is periodically
made to check the shutdown flag */
if (!pAbfBtInfo->Loop) {
g_main_loop_quit(pAbfBtInfo->Mainloop);
return FALSE;
}
/* reschedule */
return TRUE;
}
static void *
BtEventThread(void *arg)
{
DBusGConnection *bus;
GError *error = NULL;
DBusGProxy *manager;
GLogLevelFlags fatal_mask;
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)arg;
A_INFO("Starting the BT Event Handler task\n");
g_type_init();
fatal_mask = g_log_set_always_fatal(G_LOG_FATAL_MASK);
fatal_mask |= G_LOG_LEVEL_WARNING | G_LOG_LEVEL_CRITICAL;
g_log_set_always_fatal(fatal_mask);
do {
bus = dbus_g_bus_get(DBUS_BUS_SYSTEM, &error);
if (!bus) {
A_ERR("[%s] Couldn't connect to system bus: %d\n",
__FUNCTION__, error);
break;
}
pAbfBtInfo->Bus = bus;
manager = dbus_g_proxy_new_for_name(bus, BLUEZ_NAME, BLUEZ_PATH,
MANAGER_INTERFACE);
if (!manager) {
A_ERR("[%s] Failed to get name owner\n", __FUNCTION__);
dbus_g_connection_unref(bus);
pAbfBtInfo->Bus = NULL;
break;
}
pAbfBtInfo->DeviceManager = manager;
/* check for default adapter at startup */
CheckAndAcquireDefaultAdapter(pAbfBtInfo);
RegisterBtStackEventCb(pAbfBtInfo, BT_ADAPTER_ADDED,
G_CALLBACK(BtAdapterAdded));
RegisterBtStackEventCb(pAbfBtInfo, BT_ADAPTER_REMOVED,
G_CALLBACK(BtAdapterRemoved));
g_timeout_add(1000, MainLoopQuitCheck, pAbfBtInfo);
g_main_loop_run(pAbfBtInfo->Mainloop);
DeRegisterBtStackEventCb(pAbfBtInfo, BT_ADAPTER_ADDED,
G_CALLBACK(BtAdapterAdded));
DeRegisterBtStackEventCb(pAbfBtInfo, BT_ADAPTER_REMOVED,
G_CALLBACK(BtAdapterRemoved));
ReleaseDefaultAdapter(pAbfBtInfo);
g_object_unref(pAbfBtInfo->DeviceManager);
pAbfBtInfo->DeviceManager = NULL;
/* Release the system bus */
dbus_g_connection_unref(bus);
pAbfBtInfo->Bus = NULL;
} while (FALSE);
/* Clean up the resources allocated in this task */
A_INFO("Terminating the BT Event Handler task\n");
A_MUTEX_LOCK(&pAbfBtInfo->hWaitEventLock);
pAbfBtInfo->Loop = FALSE;
A_COND_SIGNAL(&pAbfBtInfo->hWaitEvent);
A_MUTEX_UNLOCK(&pAbfBtInfo->hWaitEventLock);
return NULL;
}
static A_STATUS
CheckAndAcquireDefaultAdapter(ABF_BT_INFO *pAbfBtInfo)
{
A_STATUS status = A_OK;
do {
if (pAbfBtInfo->AdapterAvailable) {
/* already available */
break;
}
/* acquire the adapter */
status = AcquireBtAdapter(pAbfBtInfo);
} while (FALSE);
return status;
}
static void ReleaseDefaultAdapter(ABF_BT_INFO *pAbfBtInfo)
{
if (pAbfBtInfo->AdapterAvailable) {
/* Release the BT adapter */
ReleaseBTAdapter(pAbfBtInfo);
A_INFO("[%s] BT Adapter Removed\n",pAbfBtInfo->AdapterName);
}
A_MEMZERO(pAbfBtInfo->AdapterName, sizeof(pAbfBtInfo->AdapterName));
}
/* Event Notifications */
static void
BtAdapterAdded(DBusGProxy *proxy, const char *string, gpointer user_data)
{
A_DEBUG("BtAdapterAdded Proxy Callback ... \n");
/* BUG!!!, the BtAdapterAdded callback is indicated too early by the BT service, on some systems
* the method call to "DefaultAdapter" through the Manager interface will fail because no
* default adapter exist yet even though this callback was indicated (there should be a default)
*
* Workaround is to delay before acquiring the default adapter.
* Acquiring the BT adapter should not be very infrequent though.
*
* */
sleep(1);
CheckAndAcquireDefaultAdapter((ABF_BT_INFO *)user_data);
}
static void
BtAdapterRemoved(DBusGProxy *proxy, const char *string, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
A_DEBUG("BtAdapterRemoved Proxy Callback ... \n");
if (!pAbfBtInfo->AdapterAvailable) return;
if (strcmp(string,pAbfBtInfo->AdapterName) == 0) {
/* the adapter we are watching has been removed */
ReleaseDefaultAdapter(pAbfBtInfo);
}
}
static void
DeviceDiscoveryStarted(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Device Inquiry Started\n");
AthBtIndicateState(pInstance, ATH_BT_INQUIRY, STATE_ON);
}
static void
DeviceDiscoveryFinished(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Device Inquiry Completed\n");
AthBtIndicateState(pInstance, ATH_BT_INQUIRY, STATE_OFF);
}
static void
RemoteDeviceConnected(DBusGProxy *proxy, const char *string, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Device Connected: %s\n", string);
A_STR2ADDR(string, pAbfBtInfo->RemoteDevice);
AthBtIndicateState(pInstance, ATH_BT_CONNECT, STATE_ON);
}
static void
RemoteDeviceDisconnected(DBusGProxy *proxy, const char *string, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Device Disconnected: %s\n", string);
A_MEMZERO(pAbfBtInfo->RemoteDevice, sizeof(pAbfBtInfo->RemoteDevice));
AthBtIndicateState(pInstance, ATH_BT_CONNECT, STATE_OFF);
}
static void ReleaseDefaultAudioDevice(ABF_BT_INFO *pAbfBtInfo)
{
if (pAbfBtInfo->AudioCbRegistered) {
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_CONNECTED,
G_CALLBACK(AudioHeadsetConnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_DISCONNECTED,
G_CALLBACK(AudioHeadsetDisconnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_STREAM_STARTED,
G_CALLBACK(AudioHeadsetStreamStarted));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_STREAM_STOPPED,
G_CALLBACK(AudioHeadsetStreamStopped));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_GATEWAY_CONNECTED,
G_CALLBACK(AudioGatewayConnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_GATEWAY_DISCONNECTED,
G_CALLBACK(AudioGatewayDisconnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_CONNECTED,
G_CALLBACK(AudioSinkConnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_DISCONNECTED,
G_CALLBACK(AudioSinkDisconnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_STREAM_STARTED,
G_CALLBACK(AudioSinkStreamStarted));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_STREAM_STOPPED,
G_CALLBACK(AudioSinkStreamStopped));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SOURCE_CONNECTED,
G_CALLBACK(AudioSourceConnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_SOURCE_DISCONNECTED,
G_CALLBACK(AudioSourceDisconnected));
pAbfBtInfo->AudioCbRegistered = FALSE;
}
if (pAbfBtInfo->AudioHeadset != NULL) {
g_object_unref(pAbfBtInfo->AudioHeadset);
pAbfBtInfo->AudioHeadset = NULL;
}
if (pAbfBtInfo->AudioGateway != NULL) {
g_object_unref(pAbfBtInfo->AudioGateway);
pAbfBtInfo->AudioGateway = NULL;
}
if (pAbfBtInfo->AudioSource != NULL) {
g_object_unref(pAbfBtInfo->AudioSource);
pAbfBtInfo->AudioSource = NULL;
}
if (pAbfBtInfo->AudioSink != NULL) {
g_object_unref(pAbfBtInfo->AudioSink);
pAbfBtInfo->AudioSink = NULL;
}
if (pAbfBtInfo->AudioDevice != NULL) {
g_object_unref(pAbfBtInfo->AudioDevice);
pAbfBtInfo->AudioDevice = NULL;
}
if (pAbfBtInfo->DefaultAudioDeviceAvailable) {
pAbfBtInfo->DefaultAudioDeviceAvailable = FALSE;
A_DEBUG("Default Audio Device Removed: %s\n", pAbfBtInfo->DefaultAudioDeviceName);
A_MEMZERO(pAbfBtInfo->DefaultAudioDeviceName,sizeof(pAbfBtInfo->DefaultAudioDeviceName));
}
}
static void AcquireDefaultAudioDevice(ABF_BT_INFO *pAbfBtInfo)
{
A_BOOL success = FALSE;
char *audioDevice;
GError *error = NULL;
do {
if (pAbfBtInfo->DefaultAudioDeviceAvailable) {
/* already acquired */
success = TRUE;
break;
}
A_INFO("Checking for a default audio device .. \n");
if (!dbus_g_proxy_call(pAbfBtInfo->AudioManager,
"DefaultDevice",
&error,
G_TYPE_INVALID,
G_TYPE_STRING,
&audioDevice,
G_TYPE_INVALID)) {
A_ERR("[%s] DefaultDevice method call failed \n", __FUNCTION__);
break;
}
if (error != NULL) {
A_ERR("[%s] Failed to get default audio device: %s \n", __FUNCTION__, error->message);
g_free(error);
break;
}
strncpy(pAbfBtInfo->DefaultAudioDeviceName,
audioDevice,
sizeof(pAbfBtInfo->DefaultAudioDeviceName));
g_free(audioDevice);
A_INFO("Default Audio Device: %s \n", pAbfBtInfo->DefaultAudioDeviceName);
pAbfBtInfo->DefaultAudioDeviceAvailable = TRUE;
/* get various proxies for the audio device */
pAbfBtInfo->AudioHeadset = dbus_g_proxy_new_for_name(pAbfBtInfo->Bus,
BLUEZ_NAME,
pAbfBtInfo->DefaultAudioDeviceName,
AUDIO_HEADSET_INTERFACE);
if (NULL == pAbfBtInfo->AudioHeadset) {
A_ERR("[%s] Failed to get audio headset interface \n", __FUNCTION__);
break;
}
pAbfBtInfo->AudioGateway = dbus_g_proxy_new_for_name(pAbfBtInfo->Bus,
BLUEZ_NAME,
pAbfBtInfo->DefaultAudioDeviceName,
AUDIO_GATEWAY_INTERFACE);
if (NULL == pAbfBtInfo->AudioGateway) {
A_ERR("[%s] Failed to get audio gateway interface \n", __FUNCTION__);
break;
}
pAbfBtInfo->AudioSource = dbus_g_proxy_new_for_name(pAbfBtInfo->Bus,
BLUEZ_NAME,
pAbfBtInfo->DefaultAudioDeviceName,
AUDIO_SOURCE_INTERFACE);
if (NULL == pAbfBtInfo->AudioSource) {
A_ERR("[%s] Failed to get audio source interface \n", __FUNCTION__);
break;
}
pAbfBtInfo->AudioSink = dbus_g_proxy_new_for_name(pAbfBtInfo->Bus,
BLUEZ_NAME,
pAbfBtInfo->DefaultAudioDeviceName,
AUDIO_SINK_INTERFACE);
if (NULL == pAbfBtInfo->AudioSink) {
A_ERR("[%s] Failed to get audio sink interface \n", __FUNCTION__);
break;
}
pAbfBtInfo->AudioDevice = dbus_g_proxy_new_for_name(pAbfBtInfo->Bus,
BLUEZ_NAME,
pAbfBtInfo->DefaultAudioDeviceName,
AUDIO_DEVICE_INTERFACE);
if (NULL == pAbfBtInfo->AudioDevice) {
A_ERR("[%s] Failed to get audio device interface \n", __FUNCTION__);
break;
}
/* Register for audio specific events */
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_CONNECTED,
G_CALLBACK(AudioHeadsetConnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_DISCONNECTED,
G_CALLBACK(AudioHeadsetDisconnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_STREAM_STARTED,
G_CALLBACK(AudioHeadsetStreamStarted));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_HEADSET_STREAM_STOPPED,
G_CALLBACK(AudioHeadsetStreamStopped));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_GATEWAY_CONNECTED,
G_CALLBACK(AudioGatewayConnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_GATEWAY_DISCONNECTED,
G_CALLBACK(AudioGatewayDisconnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_CONNECTED,
G_CALLBACK(AudioSinkConnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_DISCONNECTED,
G_CALLBACK(AudioSinkDisconnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_STREAM_STARTED,
G_CALLBACK(AudioSinkStreamStarted));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SINK_STREAM_STOPPED,
G_CALLBACK(AudioSinkStreamStopped));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SOURCE_CONNECTED,
G_CALLBACK(AudioSourceConnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_SOURCE_DISCONNECTED,
G_CALLBACK(AudioSourceDisconnected));
pAbfBtInfo->AudioCbRegistered = TRUE;
success = TRUE;
} while (FALSE);
if (!success) {
/* cleanup */
ReleaseDefaultAudioDevice(pAbfBtInfo);
}
}
static void
AudioDeviceAdded(DBusGProxy *proxy, const char *string, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
A_DEBUG("Audio Device Added: %s\n", string);
/* release current one if any */
ReleaseDefaultAudioDevice(pAbfBtInfo);
/* re-acquire the new default, it could be the same one */
AcquireDefaultAudioDevice(pAbfBtInfo);
}
static void
AudioDeviceRemoved(DBusGProxy *proxy, const char *string, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
A_DEBUG("Audio Device Removed: %s\n", string);
if (strcmp(string,pAbfBtInfo->DefaultAudioDeviceName) == 0) {
/* release current one */
ReleaseDefaultAudioDevice(pAbfBtInfo);
/* re-acquire the new default (if any) */
AcquireDefaultAudioDevice(pAbfBtInfo);
}
}
static void
AudioHeadsetConnected(DBusGProxy *proxy, gpointer user_data)
{
A_DEBUG("Audio Headset Connected\n");
}
static void
AudioHeadsetDisconnected(DBusGProxy *proxy, gpointer user_data)
{
A_DEBUG("Audio Headset Disconnected\n");
}
static void
AudioHeadsetStreamStarted(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Audio Headset Stream Started\n");
/* get properties of this headset connection */
GetBtAudioConnectionProperties(pAbfBtInfo, ATH_BT_SCO);
/* make the indication */
AthBtIndicateState(pInstance,
pAbfBtInfo->CurrentSCOLinkType == SCO_LINK ? ATH_BT_SCO : ATH_BT_ESCO,
STATE_ON);
}
static void
AudioHeadsetStreamStopped(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
/* This event can also be used to indicate the SCO state */
A_DEBUG("Audio Headset Stream Stopped\n");
AthBtIndicateState(pInstance,
pAbfBtInfo->CurrentSCOLinkType == SCO_LINK ? ATH_BT_SCO : ATH_BT_ESCO,
STATE_OFF);
}
static void
AudioGatewayConnected(DBusGProxy *proxy, gpointer user_data)
{
/* Not yet implemented */
A_DEBUG("Audio Gateway Connected\n");
}
static void
AudioGatewayDisconnected(DBusGProxy *proxy, gpointer user_data)
{
/* Not yet implemented */
A_DEBUG("Audio Gateway disconnected\n");
}
static void
AudioSinkConnected(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
A_DEBUG("Audio Sink Connected\n");
/* get connection properties */
GetBtAudioConnectionProperties(pAbfBtInfo, ATH_BT_A2DP);
}
static void
AudioSinkDisconnected(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Audio Sink Disconnected\n");
AthBtIndicateState(pInstance, ATH_BT_A2DP, STATE_OFF);
}
static void
AudioSinkStreamStarted(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Audio Sink Stream Started\n");
AthBtIndicateState(pInstance, ATH_BT_A2DP, STATE_ON);
}
static void
AudioSinkStreamStopped(DBusGProxy *proxy, gpointer user_data)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)user_data;
ATHBT_FILTER_INFO *pInfo = (ATHBT_FILTER_INFO *)pAbfBtInfo->pInfo;
ATH_BT_FILTER_INSTANCE *pInstance = pInfo->pInstance;
A_DEBUG("Audio Sink Stream Stopped\n");
AthBtIndicateState(pInstance, ATH_BT_A2DP, STATE_OFF);
}
static void
AudioSourceConnected(DBusGProxy *proxy, gpointer user_data)
{
/* Not yet implemented */
A_DEBUG("Audio Source Connected\n");
}
static void
AudioSourceDisconnected(DBusGProxy *proxy, gpointer user_data)
{
/* Not yet implemented */
A_DEBUG("Audio Source Disconnected\n");
}
/* (De)Registration */
static DBusGProxy *
GetDBusProxy(ABF_BT_INFO *pAbfBtInfo, BT_STACK_EVENT event)
{
DBusGProxy *proxy = NULL;
BT_NOTIFICATION_CONFIG_PARAMS *pNotificationConfig;
pNotificationConfig = &g_NotificationConfig[event];
if (pNotificationConfig->proxy == DEVICE_MANAGER) {
proxy = pAbfBtInfo->DeviceManager;
} else if (pNotificationConfig->proxy == DEVICE_ADAPTER) {
proxy = pAbfBtInfo->DeviceAdapter;
} else if (pNotificationConfig->proxy == AUDIO_MANAGER) {
proxy = pAbfBtInfo->AudioManager;
} else if (pNotificationConfig->proxy == AUDIO_HEADSET) {
proxy = pAbfBtInfo->AudioHeadset;
} else if (pNotificationConfig->proxy == AUDIO_SINK) {
proxy = pAbfBtInfo->AudioSink;
} else {
A_ERR("[%s] Unknown proxy %d for event : %d \n", __FUNCTION__, pNotificationConfig->proxy, event);
}
return proxy;
}
static void
RegisterBtStackEventCb(ABF_BT_INFO *pAbfBtInfo, BT_STACK_EVENT event,
GCallback handler)
{
const char *name;
DBusGProxy *proxy;
BT_NOTIFICATION_CONFIG_PARAMS *pNotificationConfig;
pNotificationConfig = &g_NotificationConfig[event];
name = pNotificationConfig->name;
if (event >= BT_EVENTS_NUM_MAX) {
A_ERR("[%s] Unknown Event: %d\n", __FUNCTION__, event);
return;
}
if (pNotificationConfig->proxy == PROXY_INVALID) {
/* not supported yet, so ignore registration */
return;
}
if ((proxy = GetDBusProxy(pAbfBtInfo, event)) == NULL) {
A_ERR("[%s] Unknown Proxy: %d (event:%d) \n", __FUNCTION__,
pNotificationConfig->proxy, event);
return;
}
if (pNotificationConfig->arg == ARG_NONE) {
dbus_g_proxy_add_signal(proxy, name, G_TYPE_INVALID);
} else if (pNotificationConfig->arg == ARG_STRING) {
dbus_g_proxy_add_signal(proxy, name, G_TYPE_STRING,
G_TYPE_INVALID);
} else {
A_ERR("[%s] Unkown Arg Type: %d\n", __FUNCTION__,
pNotificationConfig->arg);
return;
}
dbus_g_proxy_connect_signal(proxy, name, handler, (void *)pAbfBtInfo,
NULL);
}
static void
DeRegisterBtStackEventCb(ABF_BT_INFO *pAbfBtInfo, BT_STACK_EVENT event,
GCallback handler)
{
const char *name;
DBusGProxy *proxy;
BT_NOTIFICATION_CONFIG_PARAMS *pNotificationConfig;
pNotificationConfig = &g_NotificationConfig[event];
name = pNotificationConfig->name;
if (event >= BT_EVENTS_NUM_MAX) {
A_ERR("[%s] Unknown Event: %d\n", __FUNCTION__, event);
return;
}
if (pNotificationConfig->proxy == PROXY_INVALID) {
/* not supported yet, so ignore de-registration */
return;
}
if ((proxy = GetDBusProxy(pAbfBtInfo, event)) == NULL) {
A_ERR("[%s] Unknown Proxy: %d\n", __FUNCTION__,
pNotificationConfig->proxy);
return;
}
dbus_g_proxy_disconnect_signal(proxy, name, handler, (void *)pAbfBtInfo);
}
/* Misc */
static A_STATUS
AcquireBtAdapter(ABF_BT_INFO *pAbfBtInfo)
{
DBusGProxy *DeviceAdapter, *AudioManager;
DBusGConnection *bus = pAbfBtInfo->Bus;
A_STATUS status = A_ERROR;
char *adapterName;
GError *error = NULL;
char *hciName;
do {
if (!dbus_g_proxy_call(pAbfBtInfo->DeviceManager,
"DefaultAdapter",
&error,
G_TYPE_INVALID,
G_TYPE_STRING,
&adapterName,
G_TYPE_INVALID)) {
A_ERR("[%s] DefaultAdapter Method call failure \n", __FUNCTION__);
break;
}
if (error != NULL) {
A_ERR("[%s] Failed to get default adapter: %s \n", __FUNCTION__, error->message);
g_free(error);
break;
}
strcpy(pAbfBtInfo->AdapterName, adapterName);
/* assume ID 0 */
pAbfBtInfo->AdapterId = 0;
if ((hciName = strstr(pAbfBtInfo->AdapterName, "hci")) != NULL) {
/* get the number following the hci name, this is the ID used for
* socket calls to the HCI layer */
pAbfBtInfo->AdapterId = (int)hciName[3] - (int)'0';
if (pAbfBtInfo->AdapterId < 0) {
pAbfBtInfo->AdapterId = 0;
}
}
if (!A_SUCCESS(SetupHciEventFilter(pAbfBtInfo))) {
break;
}
g_free(adapterName);
DeviceAdapter = dbus_g_proxy_new_for_name(bus, BLUEZ_NAME,
pAbfBtInfo->AdapterName,
ADAPTER_INTERFACE);
if (!DeviceAdapter) {
A_ERR("[%s] Failed to get device adapter (%s) \n", __FUNCTION__, pAbfBtInfo->AdapterName);
break;
}
AudioManager = dbus_g_proxy_new_for_name(bus, BLUEZ_NAME,
AUDIO_MANAGER_PATH,
AUDIO_MANAGER_INTERFACE);
if (!AudioManager) {
A_ERR("[%s] Failed to get name owner\n", __FUNCTION__);
break;
}
pAbfBtInfo->DeviceAdapter = DeviceAdapter;
pAbfBtInfo->AudioManager = AudioManager;
GetAdapterInfo(pAbfBtInfo);
pAbfBtInfo->pInfo->LMPVersion = pAbfBtInfo->LMPVersion;
pAbfBtInfo->AdapterAvailable = TRUE;
/* Register to get notified of different stack events */
RegisterBtStackEventCb(pAbfBtInfo, DEVICE_DISCOVERY_STARTED,
G_CALLBACK(DeviceDiscoveryStarted));
RegisterBtStackEventCb(pAbfBtInfo, DEVICE_DISCOVERY_FINISHED,
G_CALLBACK(DeviceDiscoveryFinished));
RegisterBtStackEventCb(pAbfBtInfo, REMOTE_DEVICE_CONNECTED,
G_CALLBACK(RemoteDeviceConnected));
RegisterBtStackEventCb(pAbfBtInfo, REMOTE_DEVICE_DISCONNECTED,
G_CALLBACK(RemoteDeviceDisconnected));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_DEVICE_ADDED,
G_CALLBACK(AudioDeviceAdded));
RegisterBtStackEventCb(pAbfBtInfo, AUDIO_DEVICE_REMOVED,
G_CALLBACK(AudioDeviceRemoved));
pAbfBtInfo->AdapterCbRegistered = TRUE;
A_INFO("[%s] BT Adapter Added\n",pAbfBtInfo->AdapterName);
/* acquire default audio device */
AcquireDefaultAudioDevice(pAbfBtInfo);
status = A_OK;
} while (FALSE);
return status;
}
static void
ReleaseBTAdapter(ABF_BT_INFO *pAbfBtInfo)
{
if (pAbfBtInfo->AdapterCbRegistered) {
pAbfBtInfo->AdapterCbRegistered = FALSE;
/* Free the resources held for the event handlers */
DeRegisterBtStackEventCb(pAbfBtInfo, DEVICE_DISCOVERY_STARTED,
G_CALLBACK(DeviceDiscoveryStarted));
DeRegisterBtStackEventCb(pAbfBtInfo, DEVICE_DISCOVERY_FINISHED,
G_CALLBACK(DeviceDiscoveryFinished));
DeRegisterBtStackEventCb(pAbfBtInfo, REMOTE_DEVICE_CONNECTED,
G_CALLBACK(RemoteDeviceConnected));
DeRegisterBtStackEventCb(pAbfBtInfo, REMOTE_DEVICE_DISCONNECTED,
G_CALLBACK(RemoteDeviceDisconnected));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_DEVICE_ADDED,
G_CALLBACK(AudioDeviceAdded));
DeRegisterBtStackEventCb(pAbfBtInfo, AUDIO_DEVICE_REMOVED,
G_CALLBACK(AudioDeviceRemoved));
}
ReleaseDefaultAudioDevice(pAbfBtInfo);
if (pAbfBtInfo->HCIEventListenerSocket >= 0) {
close(pAbfBtInfo->HCIEventListenerSocket);
pAbfBtInfo->HCIEventListenerSocket = -1;
}
if (pAbfBtInfo->AudioManager != NULL) {
g_object_unref(pAbfBtInfo->AudioManager);
pAbfBtInfo->AudioManager = NULL;
}
A_MEMZERO(pAbfBtInfo->DeviceAddress,
sizeof(pAbfBtInfo->DeviceAddress));
A_MEMZERO(pAbfBtInfo->DeviceName,
sizeof(pAbfBtInfo->DeviceName));
A_MEMZERO(pAbfBtInfo->ManufacturerName,
sizeof(pAbfBtInfo->ManufacturerName));
A_MEMZERO(pAbfBtInfo->ProtocolVersion,
sizeof(pAbfBtInfo->ProtocolVersion));
pAbfBtInfo->LMPVersion = 0;
if (pAbfBtInfo->DeviceAdapter != NULL) {
g_object_unref(pAbfBtInfo->DeviceAdapter);
pAbfBtInfo->DeviceAdapter = NULL;
}
pAbfBtInfo->AdapterAvailable = FALSE;
}
static A_STATUS
GetAdapterInfo(ABF_BT_INFO *pAbfBtInfo)
{
int count;
char *reply;
GError *error = NULL;
DBusGProxy *DeviceAdapter;
if ((DeviceAdapter = pAbfBtInfo->DeviceAdapter) == NULL) return A_ERROR;
/* Device name */
if (!dbus_g_proxy_call(DeviceAdapter, "GetName", &error, G_TYPE_INVALID,
G_TYPE_STRING, &reply, G_TYPE_INVALID))
{
A_ERR("[%s] Failed to complete GetName: %d\n", __FUNCTION__, error);
return A_ERROR;
}
strcpy(pAbfBtInfo->DeviceName, reply);
g_free(reply);
/* Manufacturer name */
if (!dbus_g_proxy_call(DeviceAdapter, "GetManufacturer", &error,
G_TYPE_INVALID, G_TYPE_STRING, &reply,
G_TYPE_INVALID))
{
A_ERR("[%s] Failed to complete GetManufacturer: %d\n",
__FUNCTION__, error);
return A_ERROR;
}
strcpy(pAbfBtInfo->ManufacturerName, reply);
g_free(reply);
/* Bluetooth protocol Version */
if (!dbus_g_proxy_call(DeviceAdapter, "GetVersion", &error, G_TYPE_INVALID,
G_TYPE_STRING, &reply, G_TYPE_INVALID))
{
A_ERR("[%s] Failed to complete GetVersion: %d\n", __FUNCTION__, error);
return A_ERROR;
}
strcpy(pAbfBtInfo->ProtocolVersion, reply);
for (count = 0;
((count < sizeof(ver_map)/sizeof(hci_map)) && (ver_map[count].str));
count++)
{
if (strstr(pAbfBtInfo->ProtocolVersion, ver_map[count].str)) {
pAbfBtInfo->LMPVersion = ver_map[count].val;
break;
}
}
g_free(reply);
/* Device address */
if (!dbus_g_proxy_call(DeviceAdapter, "GetAddress", &error, G_TYPE_INVALID,
G_TYPE_STRING, &reply, G_TYPE_INVALID))
{
A_ERR("[%s] Failed to complete GetAddress: %d\n", __FUNCTION__, error);
return A_ERROR;
}
A_STR2ADDR(reply, pAbfBtInfo->DeviceAddress);
g_free(reply);
A_INFO("BT-HCI Device Address: (%2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X)\n",
pAbfBtInfo->DeviceAddress[0], pAbfBtInfo->DeviceAddress[1],
pAbfBtInfo->DeviceAddress[2], pAbfBtInfo->DeviceAddress[3],
pAbfBtInfo->DeviceAddress[4], pAbfBtInfo->DeviceAddress[5]);
A_INFO("BT-HCI Device Name: %s\n", pAbfBtInfo->DeviceName);
A_INFO("BT-HCI Manufacturer Name: %s\n", pAbfBtInfo->ManufacturerName);
A_INFO("BT-HCI Protocol Version: %s\n", pAbfBtInfo->ProtocolVersion);
A_INFO("BT-HCI LMP Version: %d\n", pAbfBtInfo->LMPVersion);
return A_OK;
}
#define ABTH_MAX_CONNECTIONS 16
static A_STATUS GetConnectedDeviceRole(ABF_BT_INFO *pAbfBtInfo,
A_CHAR *Address,
A_BOOL IsSCO,
A_UCHAR *pRole)
{
A_STATUS status = A_ERROR;
struct hci_conn_list_req *connList = NULL;
struct hci_conn_info *connInfo = NULL;
int i, sk = -1;
int len;
do {
sk = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI);
if (sk < 0) {
A_ERR("[%s] Failed to get raw BT socket: %d \n", __FUNCTION__, errno);
break;
}
len = (sizeof(*connInfo)) * ABTH_MAX_CONNECTIONS + sizeof(*connList);
connList = (struct hci_conn_list_req *)A_MALLOC(len);
if (connList == NULL) {
break;
}
A_MEMZERO(connList,len);
connList->dev_id = pAbfBtInfo->AdapterId;
connList->conn_num = ABTH_MAX_CONNECTIONS;
connInfo = connList->conn_info;
if (ioctl(sk, HCIGETCONNLIST, (void *)connList)) {
A_ERR("[%s] Failed to get connection list %d \n", __FUNCTION__, errno);
break;
}
/* walk through connection list */
for (i = 0; i < connList->conn_num; i++, connInfo++) {
char addr[32];
/* convert to a string to compare */
ba2str(&connInfo->bdaddr, addr);
if (strcmp(addr,Address) != 0) {
continue;
}
if (IsSCO) {
/* look for first non-ACL connection */
if (connInfo->type == ACL_LINK) {
continue;
}
pAbfBtInfo->CurrentSCOLinkType = connInfo->type;
} else {
/* look for first ACL connection */
if (connInfo->type != ACL_LINK) {
continue;
}
}
/* if we get here we have a connection we are interested in */
if (connInfo->link_mode & HCI_LM_MASTER) {
/* master */
*pRole = 0;
} else {
/* slave */
*pRole = 1;
}
A_INFO("[%s] Found Connection (Link-Type : %d), found role:%d \n",
Address, connInfo->type, *pRole);
break;
}
if (i == connList->conn_num) {
A_ERR("[%s] Could not find connection info for %s %d \n", __FUNCTION__, Address);
break;
}
status = A_OK;
} while (FALSE);
if (sk >= 0) {
close(sk);
}
if (connList != NULL) {
A_FREE(connList);
}
return status;
}
static void GetBtAudioConnectionProperties(ABF_BT_INFO *pAbfBtInfo,
ATHBT_STATE_INDICATION Indication)
{
A_UCHAR role = 0;
A_UCHAR lmpversion = 0;
A_CHAR *pDescr = NULL;
char *address = NULL;
char *version = NULL;
GError *error = NULL;
A_STATUS status;
do {
/* get remote device address */
if (!dbus_g_proxy_call(pAbfBtInfo->AudioDevice,
"GetAddress",
&error,
G_TYPE_INVALID,
G_TYPE_STRING,
&address,
G_TYPE_INVALID)) {
A_ERR("[%s] GetAddress method call failed \n", __FUNCTION__);
break;
}
if (error != NULL) {
A_ERR("[%s] Failed to GetAddress for audio device: %s \n", __FUNCTION__, error->message);
g_free(error);
break;
}
A_INFO("Connected audio device address: %s \n", address);
if (!dbus_g_proxy_call(pAbfBtInfo->DeviceAdapter,
"GetRemoteVersion",
&error,
G_TYPE_STRING,
address,
G_TYPE_INVALID,
G_TYPE_STRING,
&version,
G_TYPE_INVALID)) {
A_ERR("[%s] GetRemoteVersion method call failed \n", __FUNCTION__);
break;
}
if (error != NULL) {
A_ERR("[%s] Failed to GetRemoteVersion for audio device: %s \n", __FUNCTION__, error->message);
g_free(error);
break;
}
A_INFO("Connected audio device remote version: %s \n", version);
/* assume 2.1 or later */
lmpversion = 4;
if (strstr(version,"1.0") != NULL) {
lmpversion = 0;
} else if (strstr(version,"1.1") != NULL) {
lmpversion = 1;
} else if (strstr(version,"1.2") != NULL) {
lmpversion = 2;
} else if (strstr(version,"2.0") != NULL) {
lmpversion = 3;
}
/* get role */
status = GetConnectedDeviceRole(pAbfBtInfo,
address,
Indication == ATH_BT_A2DP ? FALSE : TRUE,
&role);
if (A_FAILED(status)) {
role = 0;
}
if (Indication == ATH_BT_A2DP) {
pDescr = "A2DP";
pAbfBtInfo->pInfo->A2DPConnection_LMPVersion = lmpversion;
pAbfBtInfo->pInfo->A2DPConnection_Role = role;
} else if (Indication == ATH_BT_SCO) {
if (pAbfBtInfo->CurrentSCOLinkType == SCO_LINK) {
pDescr = "SCO";
} else {
pDescr = "eSCO";
}
pAbfBtInfo->pInfo->SCOConnection_LMPVersion = lmpversion;
pAbfBtInfo->pInfo->SCOConnection_Role = role;
/* for SCO connections check if the event filter captured
* the SYNCH connection complete event */
CheckHciEventFilter(pAbfBtInfo);
} else {
pDescr = "UNKNOWN!!";
}
A_INFO("BT Audio connection properties: (%s) (role: %s, lmp version: %d) \n",
pDescr, role ? "SLAVE" : "MASTER", lmpversion);
} while (FALSE);
if (address != NULL) {
g_free(address);
}
if (version != NULL) {
g_free(version);
}
}
static A_STATUS WaitForHCIEvent(int Socket,
int TimeoutMs,
A_UCHAR *pBuffer,
int MaxLength,
A_UCHAR EventCode,
A_UINT16 OpCode,
A_UCHAR **ppEventPtr,
int *pEventLength)
{
int eventLen;
hci_event_hdr *eventHdr;
struct pollfd pfd;
int result;
A_UCHAR *eventPtr;
A_STATUS status = A_OK;
*ppEventPtr = NULL;
A_MEMZERO(&pfd,sizeof(pfd));
pfd.fd = Socket;
pfd.events = POLLIN;
if (EventCode == EVT_CMD_COMPLETE) {
A_INFO("Waiting for HCI CMD Complete Event, Opcode:0x%4.4X (%d MS) \n",OpCode, TimeoutMs);
} else {
A_INFO("Waiting for HCI Event: %d (%d MS) \n",EventCode, TimeoutMs);
}
while (1) {
/* check socket for a captured event using a short timeout
* the caller usually calls this function when it knows there
* is an event that is likely to be captured */
result = poll(&pfd, 1, TimeoutMs);
if (result < 0) {
if ((errno == EAGAIN) || (errno == EINTR)) {
/* interrupted */
} else {
A_ERR("[%s] Socket Poll Failed! : %d \n", __FUNCTION__, errno);
status = A_ERROR;
}
break;
}
if (result == 0) {
/* no event*/
break;
}
if (!(pfd.revents & POLLIN)) {
break;
}
/* get the packet */
eventLen = read(Socket, pBuffer, MaxLength);
if (eventLen == 0) {
/* no event */
break;
}
if (eventLen < (1 + HCI_EVENT_HDR_SIZE)) {
A_ERR("[%s] Unknown receive packet! len : %d \n", __FUNCTION__, eventLen);
status = A_ERROR;
break;
}
if (pBuffer[0] != HCI_EVENT_PKT) {
A_ERR("[%s] Unsupported packet type : %d \n", __FUNCTION__, pBuffer[0]);
status = A_ERROR;
break;
}
eventPtr = &pBuffer[1];
eventLen--;
eventHdr = (hci_event_hdr *)eventPtr;
eventPtr += HCI_EVENT_HDR_SIZE;
eventLen -= HCI_EVENT_HDR_SIZE;
if (eventHdr->evt != EventCode) {
/* not interested in this one */
continue;
}
if (eventHdr->evt == EVT_CMD_COMPLETE) {
if (eventLen < sizeof(evt_cmd_complete)) {
A_ERR("[%s] EVT_CMD_COMPLETE event is too small! len=%d \n", __FUNCTION__, eventLen);
status = A_ERROR;
break;
} else {
A_UINT16 evOpCode = btohs(BTEV_CMD_COMPLETE_GET_OPCODE(eventPtr));
/* check for opCode match */
if (OpCode != evOpCode) {
/* keep searching */
continue;
}
}
}
/* found it */
*ppEventPtr = eventPtr;
*pEventLength = eventLen;
break;
}
return status;
}
static void CheckHciEventFilter(ABF_BT_INFO *pAbfBtInfo)
{
A_UCHAR buffer[HCI_MAX_EVENT_SIZE];
A_STATUS status;
A_UCHAR *eventPtr;
int eventLen;
do {
status = WaitForHCIEvent(pAbfBtInfo->HCIEventListenerSocket,
100,
buffer,
sizeof(buffer),
EVT_SYNC_CONN_COMPLETE,
0,
&eventPtr,
&eventLen);
if (A_FAILED(status)) {
break;
}
if (eventPtr == NULL) {
break;
}
if (eventLen < sizeof(evt_sync_conn_complete)) {
A_ERR("SYNC_CONN_COMPLETE Event is too small! : %d \n", eventLen);
break;
}
pAbfBtInfo->pInfo->SCOConnectInfo.LinkType = BTEV_GET_BT_CONN_LINK_TYPE(eventPtr);
pAbfBtInfo->pInfo->SCOConnectInfo.TransmissionInterval = BTEV_GET_TRANS_INTERVAL(eventPtr);
pAbfBtInfo->pInfo->SCOConnectInfo.RetransmissionInterval = BTEV_GET_RETRANS_INTERVAL(eventPtr);
pAbfBtInfo->pInfo->SCOConnectInfo.RxPacketLength = BTEV_GET_RX_PKT_LEN(eventPtr);
pAbfBtInfo->pInfo->SCOConnectInfo.TxPacketLength = BTEV_GET_TX_PKT_LEN(eventPtr);
A_INFO("HCI SYNC_CONN_COMPLETE event captured, conn info (%d, %d, %d, %d, %d) \n",
pAbfBtInfo->pInfo->SCOConnectInfo.LinkType,
pAbfBtInfo->pInfo->SCOConnectInfo.TransmissionInterval,
pAbfBtInfo->pInfo->SCOConnectInfo.RetransmissionInterval,
pAbfBtInfo->pInfo->SCOConnectInfo.RxPacketLength,
pAbfBtInfo->pInfo->SCOConnectInfo.TxPacketLength);
/* now valid */
pAbfBtInfo->pInfo->SCOConnectInfo.Valid = TRUE;
} while (FALSE);
}
static A_STATUS SetupHciEventFilter(ABF_BT_INFO *pAbfBtInfo)
{
A_STATUS status = A_ERROR;
struct hci_filter filterSetting;
struct sockaddr_hci addr;
do {
if (pAbfBtInfo->HCIEventListenerSocket >= 0) {
/* close previous */
close(pAbfBtInfo->HCIEventListenerSocket);
}
pAbfBtInfo->HCIEventListenerSocket = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI);
if (pAbfBtInfo->HCIEventListenerSocket < 0) {
A_ERR("[%s] Failed to get raw BT socket: %d \n", __FUNCTION__, errno);
break;
}
hci_filter_clear(&filterSetting);
hci_filter_set_ptype(HCI_EVENT_PKT, &filterSetting);
/* capture SYNC_CONN Complete */
hci_filter_set_event(EVT_SYNC_CONN_COMPLETE, &filterSetting);
if (setsockopt(pAbfBtInfo->HCIEventListenerSocket,
SOL_HCI,
HCI_FILTER,
&filterSetting,
sizeof(filterSetting)) < 0) {
A_ERR("[%s] Failed to set socket opt: %d \n", __FUNCTION__, errno);
break;
}
A_MEMZERO(&addr,sizeof(addr));
/* bind to the current adapter */
addr.hci_family = AF_BLUETOOTH;
addr.hci_dev = pAbfBtInfo->AdapterId;
if (bind(pAbfBtInfo->HCIEventListenerSocket,
(struct sockaddr *)&addr,
sizeof(addr)) < 0) {
A_ERR("[%s] Can't bind to hci:%d (err:%d) \n", __FUNCTION__, pAbfBtInfo->AdapterId, errno);
break;
}
A_INFO("BT Event Filter Set, Mask: 0x%8.8X:%8.8X \n",
filterSetting.event_mask[1], filterSetting.event_mask[0]);
status = A_OK;
} while (FALSE);
if (A_FAILED(status)) {
if (pAbfBtInfo->HCIEventListenerSocket >= 0) {
close(pAbfBtInfo->HCIEventListenerSocket);
pAbfBtInfo->HCIEventListenerSocket = -1;
}
}
return status;
}
/* issue HCI command, currently this ONLY supports simple commands that
* only expect a command complete, the event pointer returned points to the command
* complete event structure for the caller to decode */
static A_STATUS IssueHCICommand(ABF_BT_INFO *pAbfBtInfo,
A_UINT16 OpCode,
A_UCHAR *pCmdData,
int CmdLength,
int EventRecvTimeoutMS,
A_UCHAR *pEventBuffer,
int MaxLength,
A_UCHAR **ppEventPtr,
int *pEventLength)
{
A_STATUS status = A_ERROR;
A_UCHAR hciType = HCI_COMMAND_PKT;
hci_command_hdr hciCommandHdr;
struct iovec iv[3];
int ivcount = 0;
int sk,result;
struct hci_filter filterSetting;
struct sockaddr_hci addr;
do {
sk = socket(AF_BLUETOOTH, SOCK_RAW, BTPROTO_HCI);
if (sk < 0) {
A_ERR("[%s] Failed to get raw BT socket: %d \n", __FUNCTION__, errno);
break;
}
hciCommandHdr.opcode = htobs(OpCode);
hciCommandHdr.plen= CmdLength;
iv[0].iov_base = &hciType;
iv[0].iov_len = 1;
ivcount++;
iv[1].iov_base = &hciCommandHdr;
iv[1].iov_len = HCI_COMMAND_HDR_SIZE;
ivcount++;
if (pCmdData != NULL) {
iv[2].iov_base = pCmdData;
iv[2].iov_len = CmdLength;
ivcount++;
}
/* setup socket to capture the event */
hci_filter_clear(&filterSetting);
hci_filter_set_ptype(HCI_EVENT_PKT, &filterSetting);
hci_filter_set_event(EVT_CMD_COMPLETE, &filterSetting);
if (setsockopt(sk, SOL_HCI, HCI_FILTER, &filterSetting, sizeof(filterSetting)) < 0) {
A_ERR("[%s] Failed to set socket opt: %d \n", __FUNCTION__, errno);
break;
}
A_MEMZERO(&addr,sizeof(addr));
addr.hci_family = AF_BLUETOOTH;
addr.hci_dev = pAbfBtInfo->AdapterId;
if (bind(sk,(struct sockaddr *)&addr, sizeof(addr)) < 0) {
A_ERR("[%s] Can't bind to hci:%d (err:%d) \n", __FUNCTION__, pAbfBtInfo->AdapterId, errno);
break;
}
while ((result = writev(sk, iv, ivcount)) < 0) {
if (errno == EAGAIN || errno == EINTR) {
continue;
}
break;
}
if (result <= 0) {
A_ERR("[%s] Failed to write to hci:%d (err:%d) \n", __FUNCTION__, pAbfBtInfo->AdapterId, errno);
break;
}
status = WaitForHCIEvent(sk,
EventRecvTimeoutMS,
pEventBuffer,
MaxLength,
EVT_CMD_COMPLETE,
OpCode,
ppEventPtr,
pEventLength);
if (A_FAILED(status)) {
break;
}
status = A_OK;
} while (FALSE);
if (sk >= 0) {
close(sk);
}
return status;
}
#define AFH_CHANNEL_MAP_BYTES 10
typedef struct _WLAN_CHANNEL_MAP {
A_UCHAR Map[AFH_CHANNEL_MAP_BYTES];
} WLAN_CHANNEL_MAP;
#define MAX_WLAN_CHANNELS 14
typedef struct _WLAN_CHANNEL_RANGE {
int ChannelNumber;
int Center; /* in Mhz */
} WLAN_CHANNEL_RANGE;
const WLAN_CHANNEL_RANGE g_ChannelTable[MAX_WLAN_CHANNELS] = {
{ 1 , 2412},
{ 2 , 2417},
{ 3 , 2422},
{ 4 , 2427},
{ 5 , 2432},
{ 6 , 2437},
{ 7 , 2442},
{ 8 , 2447},
{ 9 , 2452},
{ 10 , 2457},
{ 11 , 2462},
{ 12 , 2467},
{ 13 , 2472},
{ 14 , 2484},
};
static WLAN_CHANNEL_MAP g_ChannelMapTable[MAX_WLAN_CHANNELS + 1] = {
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x7F}}, /* 0 -- no WLAN */
{ {0x00,0x00,0xC0,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x7F}}, /* 1 */
{ {0x0F,0x00,0x00,0xF8,0xFF,0xFF,0xFF,0xFF,0xFF,0x7F}}, /* 2 */
{ {0xFF,0x01,0x00,0x00,0xFF,0xFF,0xFF,0xFF,0xFF,0x7F}}, /* 3 */
{ {0xFF,0x3F,0x00,0x00,0xE0,0xFF,0xFF,0xFF,0xFF,0x7F}}, /* 4 */
{ {0xFF,0xFF,0x07,0x00,0x00,0xFC,0xFF,0xFF,0xFF,0x7F}}, /* 5 */
{ {0xFF,0xFF,0xFF,0x00,0x00,0x80,0xFF,0xFF,0xFF,0x7F}}, /* 6 */
{ {0xFF,0xFF,0xFF,0x1F,0x00,0x00,0xF0,0xFF,0xFF,0x7F}}, /* 7 */
{ {0xFF,0xFF,0xFF,0xFF,0x03,0x00,0x00,0xFE,0xFF,0x7F}}, /* 8 */
{ {0xFF,0xFF,0xFF,0xFF,0x7F,0x00,0x00,0xC0,0xFF,0x7F}}, /* 9 */
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0x0F,0x00,0x00,0xF8,0x7F}}, /* 10 */
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x01,0x00,0x00,0x7F}}, /* 11 */
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x3F,0x00,0x00,0x60}}, /* 12 */
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x07,0x00,0x00}}, /* 13 */
{ {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0xFF,0x7F,0x00}}, /* 14 */
};
#define AFH_COMMAND_COMPLETE_TIMEOUT_MS 2000
static int LookUpChannel(int FreqMhz)
{
int i;
if (FreqMhz == 0) {
/* not connected */
return 0;
}
for (i = 0; i < MAX_WLAN_CHANNELS; i++) {
if (FreqMhz <= g_ChannelTable[i].Center) {
break;
}
}
return (i < MAX_WLAN_CHANNELS) ? g_ChannelTable[i].ChannelNumber : 0;
}
static A_STATUS IssueAFHChannelClassification(ABF_BT_INFO *pAbfBtInfo, int CurrentWLANChannel)
{
A_UCHAR evtBuffer[HCI_MAX_EVENT_SIZE];
A_STATUS status;
A_UCHAR *eventPtr;
int eventLen;
A_UCHAR *pChannelMap;
A_INFO("WLAN Operating Channel: %d \n", CurrentWLANChannel);
if (CurrentWLANChannel > MAX_WLAN_CHANNELS) {
/* check if this is expressed in Mhz */
if (CurrentWLANChannel >= 2412) {
/* convert Mhz into a channel number */
CurrentWLANChannel = LookUpChannel(CurrentWLANChannel);
} else {
return A_ERROR;
}
}
pChannelMap = &(g_ChannelMapTable[CurrentWLANChannel].Map[0]);
do {
status = IssueHCICommand(pAbfBtInfo,
cmd_opcode_pack(3,0x3F),
pChannelMap,
AFH_CHANNEL_MAP_BYTES,
AFH_COMMAND_COMPLETE_TIMEOUT_MS,
evtBuffer,
sizeof(evtBuffer),
&eventPtr,
&eventLen);
if (A_FAILED(status)) {
break;
}
status = A_ERROR;
if (eventPtr == NULL) {
A_ERR("[%s] Failed to capture AFH command complete event \n", __FUNCTION__);
break;
}
if (eventLen < (sizeof(evt_cmd_complete) + 1)) {
A_ERR("[%s] not enough bytes in AFH command complete event %d \n", __FUNCTION__, eventLen);
break;
}
/* check status parameter that follows the command complete event body */
if (eventPtr[sizeof(evt_cmd_complete)] != 0) {
A_ERR("[%s] AFH command complete event indicated failure : %d \n", __FUNCTION__,
eventPtr[sizeof(evt_cmd_complete)]);
break;
}
A_INFO(" AFH Command successfully issued \n");
//A_DUMP_BUFFER(pChannelMap, AFH_CHANNEL_MAP_BYTES, "AFH Channel Classification Map");
status = A_OK;
} while (FALSE);
return status;
}
void IndicateCurrentWLANOperatingChannel(ATHBT_FILTER_INFO *pFilterInfo, int CurrentWLANChannel)
{
ABF_BT_INFO *pAbfBtInfo = (ABF_BT_INFO *)pFilterInfo->pBtInfo;
if (NULL == pAbfBtInfo) {
return;
}
if (pAbfBtInfo->Flags & ABF_ENABLE_AFH_CHANNEL_CLASSIFICATION) {
IssueAFHChannelClassification(pAbfBtInfo,CurrentWLANChannel);
}
}
| gpl-2.0 |
andrea9a/oslab | drivers/media/platform/soc_camera/rcar_vin.c | 329 | 40572 | /*
* SoC-camera host driver for Renesas R-Car VIN unit
*
* Copyright (C) 2011-2013 Renesas Solutions Corp.
* Copyright (C) 2013 Cogent Embedded, Inc., <source@cogentembedded.com>
*
* Based on V4L2 Driver for SuperH Mobile CEU interface "sh_mobile_ceu_camera.c"
*
* Copyright (C) 2008 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_data/camera-rcar.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <linux/videodev2.h>
#include <media/soc_camera.h>
#include <media/soc_mediabus.h>
#include <media/v4l2-common.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-device.h>
#include <media/v4l2-mediabus.h>
#include <media/v4l2-of.h>
#include <media/v4l2-subdev.h>
#include <media/videobuf2-dma-contig.h>
#include "soc_scale_crop.h"
#define DRV_NAME "rcar_vin"
/* Register offsets for R-Car VIN */
#define VNMC_REG 0x00 /* Video n Main Control Register */
#define VNMS_REG 0x04 /* Video n Module Status Register */
#define VNFC_REG 0x08 /* Video n Frame Capture Register */
#define VNSLPRC_REG 0x0C /* Video n Start Line Pre-Clip Register */
#define VNELPRC_REG 0x10 /* Video n End Line Pre-Clip Register */
#define VNSPPRC_REG 0x14 /* Video n Start Pixel Pre-Clip Register */
#define VNEPPRC_REG 0x18 /* Video n End Pixel Pre-Clip Register */
#define VNSLPOC_REG 0x1C /* Video n Start Line Post-Clip Register */
#define VNELPOC_REG 0x20 /* Video n End Line Post-Clip Register */
#define VNSPPOC_REG 0x24 /* Video n Start Pixel Post-Clip Register */
#define VNEPPOC_REG 0x28 /* Video n End Pixel Post-Clip Register */
#define VNIS_REG 0x2C /* Video n Image Stride Register */
#define VNMB_REG(m) (0x30 + ((m) << 2)) /* Video n Memory Base m Register */
#define VNIE_REG 0x40 /* Video n Interrupt Enable Register */
#define VNINTS_REG 0x44 /* Video n Interrupt Status Register */
#define VNSI_REG 0x48 /* Video n Scanline Interrupt Register */
#define VNMTC_REG 0x4C /* Video n Memory Transfer Control Register */
#define VNYS_REG 0x50 /* Video n Y Scale Register */
#define VNXS_REG 0x54 /* Video n X Scale Register */
#define VNDMR_REG 0x58 /* Video n Data Mode Register */
#define VNDMR2_REG 0x5C /* Video n Data Mode Register 2 */
#define VNUVAOF_REG 0x60 /* Video n UV Address Offset Register */
/* Register bit fields for R-Car VIN */
/* Video n Main Control Register bits */
#define VNMC_FOC (1 << 21)
#define VNMC_YCAL (1 << 19)
#define VNMC_INF_YUV8_BT656 (0 << 16)
#define VNMC_INF_YUV8_BT601 (1 << 16)
#define VNMC_INF_YUV10_BT656 (2 << 16)
#define VNMC_INF_YUV10_BT601 (3 << 16)
#define VNMC_INF_YUV16 (5 << 16)
#define VNMC_VUP (1 << 10)
#define VNMC_IM_ODD (0 << 3)
#define VNMC_IM_ODD_EVEN (1 << 3)
#define VNMC_IM_EVEN (2 << 3)
#define VNMC_IM_FULL (3 << 3)
#define VNMC_BPS (1 << 1)
#define VNMC_ME (1 << 0)
/* Video n Module Status Register bits */
#define VNMS_FBS_MASK (3 << 3)
#define VNMS_FBS_SHIFT 3
#define VNMS_AV (1 << 1)
#define VNMS_CA (1 << 0)
/* Video n Frame Capture Register bits */
#define VNFC_C_FRAME (1 << 1)
#define VNFC_S_FRAME (1 << 0)
/* Video n Interrupt Enable Register bits */
#define VNIE_FIE (1 << 4)
#define VNIE_EFE (1 << 1)
/* Video n Data Mode Register bits */
#define VNDMR_EXRGB (1 << 8)
#define VNDMR_BPSM (1 << 4)
#define VNDMR_DTMD_YCSEP (1 << 1)
#define VNDMR_DTMD_ARGB1555 (1 << 0)
/* Video n Data Mode Register 2 bits */
#define VNDMR2_VPS (1 << 30)
#define VNDMR2_HPS (1 << 29)
#define VNDMR2_FTEV (1 << 17)
#define VIN_MAX_WIDTH 2048
#define VIN_MAX_HEIGHT 2048
enum chip_id {
RCAR_GEN2,
RCAR_H1,
RCAR_M1,
RCAR_E1,
};
enum rcar_vin_state {
STOPPED = 0,
RUNNING,
STOPPING,
};
struct rcar_vin_priv {
void __iomem *base;
spinlock_t lock;
int sequence;
/* State of the VIN module in capturing mode */
enum rcar_vin_state state;
struct soc_camera_host ici;
struct list_head capture;
#define MAX_BUFFER_NUM 3
struct vb2_buffer *queue_buf[MAX_BUFFER_NUM];
struct vb2_alloc_ctx *alloc_ctx;
enum v4l2_field field;
unsigned int pdata_flags;
unsigned int vb_count;
unsigned int nr_hw_slots;
bool request_to_stop;
struct completion capture_stop;
enum chip_id chip;
};
#define is_continuous_transfer(priv) (priv->vb_count > MAX_BUFFER_NUM)
struct rcar_vin_buffer {
struct vb2_buffer vb;
struct list_head list;
};
#define to_buf_list(vb2_buffer) (&container_of(vb2_buffer, \
struct rcar_vin_buffer, \
vb)->list)
struct rcar_vin_cam {
/* VIN offsets within the camera output, before the VIN scaler */
unsigned int vin_left;
unsigned int vin_top;
/* Client output, as seen by the VIN */
unsigned int width;
unsigned int height;
/*
* User window from S_CROP / G_CROP, produced by client cropping and
* scaling, VIN scaling and VIN cropping, mapped back onto the client
* input window
*/
struct v4l2_rect subrect;
/* Camera cropping rectangle */
struct v4l2_rect rect;
const struct soc_mbus_pixelfmt *extra_fmt;
};
/*
* .queue_setup() is called to check whether the driver can accept the requested
* number of buffers and to fill in plane sizes for the current frame format if
* required
*/
static int rcar_vin_videobuf_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt,
unsigned int *count,
unsigned int *num_planes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
if (fmt) {
const struct soc_camera_format_xlate *xlate;
unsigned int bytes_per_line;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd,
fmt->fmt.pix.pixelformat);
if (!xlate)
return -EINVAL;
ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width,
xlate->host_fmt);
if (ret < 0)
return ret;
bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret);
ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line,
fmt->fmt.pix.height);
if (ret < 0)
return ret;
sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret);
} else {
/* Called from VIDIOC_REQBUFS or in compatibility mode */
sizes[0] = icd->sizeimage;
}
alloc_ctxs[0] = priv->alloc_ctx;
if (!vq->num_buffers)
priv->sequence = 0;
if (!*count)
*count = 2;
priv->vb_count = *count;
*num_planes = 1;
/* Number of hardware slots */
if (is_continuous_transfer(priv))
priv->nr_hw_slots = MAX_BUFFER_NUM;
else
priv->nr_hw_slots = 1;
dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]);
return 0;
}
static int rcar_vin_setup(struct rcar_vin_priv *priv)
{
struct soc_camera_device *icd = priv->ici.icd;
struct rcar_vin_cam *cam = icd->host_priv;
u32 vnmc, dmr, interrupts;
bool progressive = false, output_is_yuv = false;
switch (priv->field) {
case V4L2_FIELD_TOP:
vnmc = VNMC_IM_ODD;
break;
case V4L2_FIELD_BOTTOM:
vnmc = VNMC_IM_EVEN;
break;
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
vnmc = VNMC_IM_FULL;
break;
case V4L2_FIELD_INTERLACED_BT:
vnmc = VNMC_IM_FULL | VNMC_FOC;
break;
case V4L2_FIELD_NONE:
if (is_continuous_transfer(priv)) {
vnmc = VNMC_IM_ODD_EVEN;
progressive = true;
} else {
vnmc = VNMC_IM_ODD;
}
break;
default:
vnmc = VNMC_IM_ODD;
break;
}
/* input interface */
switch (icd->current_fmt->code) {
case V4L2_MBUS_FMT_YUYV8_1X16:
/* BT.601/BT.1358 16bit YCbCr422 */
vnmc |= VNMC_INF_YUV16;
break;
case V4L2_MBUS_FMT_YUYV8_2X8:
/* BT.656 8bit YCbCr422 or BT.601 8bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV8_BT656 : VNMC_INF_YUV8_BT601;
break;
case V4L2_MBUS_FMT_YUYV10_2X10:
/* BT.656 10bit YCbCr422 or BT.601 10bit YCbCr422 */
vnmc |= priv->pdata_flags & RCAR_VIN_BT656 ?
VNMC_INF_YUV10_BT656 : VNMC_INF_YUV10_BT601;
break;
default:
break;
}
/* output format */
switch (icd->current_fmt->host_fmt->fourcc) {
case V4L2_PIX_FMT_NV16:
iowrite32(ALIGN(cam->width * cam->height, 0x80),
priv->base + VNUVAOF_REG);
dmr = VNDMR_DTMD_YCSEP;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_YUYV:
dmr = VNDMR_BPSM;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_UYVY:
dmr = 0;
output_is_yuv = true;
break;
case V4L2_PIX_FMT_RGB555X:
dmr = VNDMR_DTMD_ARGB1555;
break;
case V4L2_PIX_FMT_RGB565:
dmr = 0;
break;
case V4L2_PIX_FMT_RGB32:
if (priv->chip == RCAR_GEN2 || priv->chip == RCAR_H1 ||
priv->chip == RCAR_E1) {
dmr = VNDMR_EXRGB;
break;
}
default:
dev_warn(icd->parent, "Invalid fourcc format (0x%x)\n",
icd->current_fmt->host_fmt->fourcc);
return -EINVAL;
}
/* Always update on field change */
vnmc |= VNMC_VUP;
/* If input and output use the same colorspace, use bypass mode */
if (output_is_yuv)
vnmc |= VNMC_BPS;
/* progressive or interlaced mode */
interrupts = progressive ? VNIE_FIE | VNIE_EFE : VNIE_EFE;
/* ack interrupts */
iowrite32(interrupts, priv->base + VNINTS_REG);
/* enable interrupts */
iowrite32(interrupts, priv->base + VNIE_REG);
/* start capturing */
iowrite32(dmr, priv->base + VNDMR_REG);
iowrite32(vnmc | VNMC_ME, priv->base + VNMC_REG);
return 0;
}
static void rcar_vin_capture(struct rcar_vin_priv *priv)
{
if (is_continuous_transfer(priv))
/* Continuous Frame Capture Mode */
iowrite32(VNFC_C_FRAME, priv->base + VNFC_REG);
else
/* Single Frame Capture Mode */
iowrite32(VNFC_S_FRAME, priv->base + VNFC_REG);
}
static void rcar_vin_request_capture_stop(struct rcar_vin_priv *priv)
{
priv->state = STOPPING;
/* set continuous & single transfer off */
iowrite32(0, priv->base + VNFC_REG);
/* disable capture (release DMA buffer), reset */
iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
priv->base + VNMC_REG);
/* update the status if stopped already */
if (!(ioread32(priv->base + VNMS_REG) & VNMS_CA))
priv->state = STOPPED;
}
static int rcar_vin_get_free_hw_slot(struct rcar_vin_priv *priv)
{
int slot;
for (slot = 0; slot < priv->nr_hw_slots; slot++)
if (priv->queue_buf[slot] == NULL)
return slot;
return -1;
}
static int rcar_vin_hw_ready(struct rcar_vin_priv *priv)
{
/* Ensure all HW slots are filled */
return rcar_vin_get_free_hw_slot(priv) < 0 ? 1 : 0;
}
/* Moves a buffer from the queue to the HW slots */
static int rcar_vin_fill_hw_slot(struct rcar_vin_priv *priv)
{
struct vb2_buffer *vb;
dma_addr_t phys_addr_top;
int slot;
if (list_empty(&priv->capture))
return 0;
/* Find a free HW slot */
slot = rcar_vin_get_free_hw_slot(priv);
if (slot < 0)
return 0;
vb = &list_entry(priv->capture.next, struct rcar_vin_buffer, list)->vb;
list_del_init(to_buf_list(vb));
priv->queue_buf[slot] = vb;
phys_addr_top = vb2_dma_contig_plane_dma_addr(vb, 0);
iowrite32(phys_addr_top, priv->base + VNMB_REG(slot));
return 1;
}
static void rcar_vin_videobuf_queue(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
unsigned long size;
size = icd->sizeimage;
if (vb2_plane_size(vb, 0) < size) {
dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n",
vb->v4l2_buf.index, vb2_plane_size(vb, 0), size);
goto error;
}
vb2_set_plane_payload(vb, 0, size);
dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__,
vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0));
spin_lock_irq(&priv->lock);
list_add_tail(to_buf_list(vb), &priv->capture);
rcar_vin_fill_hw_slot(priv);
/* If we weren't running, and have enough buffers, start capturing! */
if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) {
if (rcar_vin_setup(priv)) {
/* Submit error */
list_del_init(to_buf_list(vb));
spin_unlock_irq(&priv->lock);
goto error;
}
priv->request_to_stop = false;
init_completion(&priv->capture_stop);
priv->state = RUNNING;
rcar_vin_capture(priv);
}
spin_unlock_irq(&priv->lock);
return;
error:
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
static void rcar_vin_videobuf_release(struct vb2_buffer *vb)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
unsigned int i;
int buf_in_use = 0;
spin_lock_irq(&priv->lock);
/* Is the buffer in use by the VIN hardware? */
for (i = 0; i < MAX_BUFFER_NUM; i++) {
if (priv->queue_buf[i] == vb) {
buf_in_use = 1;
break;
}
}
if (buf_in_use) {
while (priv->state != STOPPED) {
/* issue stop if running */
if (priv->state == RUNNING)
rcar_vin_request_capture_stop(priv);
/* wait until capturing has been stopped */
if (priv->state == STOPPING) {
priv->request_to_stop = true;
spin_unlock_irq(&priv->lock);
wait_for_completion(&priv->capture_stop);
spin_lock_irq(&priv->lock);
}
}
/*
* Capturing has now stopped. The buffer we have been asked
* to release could be any of the current buffers in use, so
* release all buffers that are in use by HW
*/
for (i = 0; i < MAX_BUFFER_NUM; i++) {
if (priv->queue_buf[i]) {
vb2_buffer_done(priv->queue_buf[i],
VB2_BUF_STATE_ERROR);
priv->queue_buf[i] = NULL;
}
}
} else {
list_del_init(to_buf_list(vb));
}
spin_unlock_irq(&priv->lock);
}
static int rcar_vin_videobuf_init(struct vb2_buffer *vb)
{
INIT_LIST_HEAD(to_buf_list(vb));
return 0;
}
static void rcar_vin_stop_streaming(struct vb2_queue *vq)
{
struct soc_camera_device *icd = soc_camera_from_vb2q(vq);
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct list_head *buf_head, *tmp;
spin_lock_irq(&priv->lock);
list_for_each_safe(buf_head, tmp, &priv->capture)
list_del_init(buf_head);
spin_unlock_irq(&priv->lock);
}
static struct vb2_ops rcar_vin_vb2_ops = {
.queue_setup = rcar_vin_videobuf_setup,
.buf_init = rcar_vin_videobuf_init,
.buf_cleanup = rcar_vin_videobuf_release,
.buf_queue = rcar_vin_videobuf_queue,
.stop_streaming = rcar_vin_stop_streaming,
.wait_prepare = soc_camera_unlock,
.wait_finish = soc_camera_lock,
};
static irqreturn_t rcar_vin_irq(int irq, void *data)
{
struct rcar_vin_priv *priv = data;
u32 int_status;
bool can_run = false, hw_stopped;
int slot;
unsigned int handled = 0;
spin_lock(&priv->lock);
int_status = ioread32(priv->base + VNINTS_REG);
if (!int_status)
goto done;
/* ack interrupts */
iowrite32(int_status, priv->base + VNINTS_REG);
handled = 1;
/* nothing to do if capture status is 'STOPPED' */
if (priv->state == STOPPED)
goto done;
hw_stopped = !(ioread32(priv->base + VNMS_REG) & VNMS_CA);
if (!priv->request_to_stop) {
if (is_continuous_transfer(priv))
slot = (ioread32(priv->base + VNMS_REG) &
VNMS_FBS_MASK) >> VNMS_FBS_SHIFT;
else
slot = 0;
priv->queue_buf[slot]->v4l2_buf.field = priv->field;
priv->queue_buf[slot]->v4l2_buf.sequence = priv->sequence++;
do_gettimeofday(&priv->queue_buf[slot]->v4l2_buf.timestamp);
vb2_buffer_done(priv->queue_buf[slot], VB2_BUF_STATE_DONE);
priv->queue_buf[slot] = NULL;
if (priv->state != STOPPING)
can_run = rcar_vin_fill_hw_slot(priv);
if (hw_stopped || !can_run) {
priv->state = STOPPED;
} else if (is_continuous_transfer(priv) &&
list_empty(&priv->capture) &&
priv->state == RUNNING) {
/*
* The continuous capturing requires an explicit stop
* operation when there is no buffer to be set into
* the VnMBm registers.
*/
rcar_vin_request_capture_stop(priv);
} else {
rcar_vin_capture(priv);
}
} else if (hw_stopped) {
priv->state = STOPPED;
priv->request_to_stop = false;
complete(&priv->capture_stop);
}
done:
spin_unlock(&priv->lock);
return IRQ_RETVAL(handled);
}
static int rcar_vin_add_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
int i;
for (i = 0; i < MAX_BUFFER_NUM; i++)
priv->queue_buf[i] = NULL;
pm_runtime_get_sync(ici->v4l2_dev.dev);
dev_dbg(icd->parent, "R-Car VIN driver attached to camera %d\n",
icd->devnum);
return 0;
}
static void rcar_vin_remove_device(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct vb2_buffer *vb;
int i;
/* disable capture, disable interrupts */
iowrite32(ioread32(priv->base + VNMC_REG) & ~VNMC_ME,
priv->base + VNMC_REG);
iowrite32(0, priv->base + VNIE_REG);
priv->state = STOPPED;
priv->request_to_stop = false;
/* make sure active buffer is cancelled */
spin_lock_irq(&priv->lock);
for (i = 0; i < MAX_BUFFER_NUM; i++) {
vb = priv->queue_buf[i];
if (vb) {
list_del_init(to_buf_list(vb));
vb2_buffer_done(vb, VB2_BUF_STATE_ERROR);
}
}
spin_unlock_irq(&priv->lock);
pm_runtime_put(ici->v4l2_dev.dev);
dev_dbg(icd->parent, "R-Car VIN driver detached from camera %d\n",
icd->devnum);
}
/* Called with .host_lock held */
static int rcar_vin_clock_start(struct soc_camera_host *ici)
{
/* VIN does not have "mclk" */
return 0;
}
/* Called with .host_lock held */
static void rcar_vin_clock_stop(struct soc_camera_host *ici)
{
/* VIN does not have "mclk" */
}
/* rect is guaranteed to not exceed the scaled camera rectangle */
static int rcar_vin_set_rect(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_cam *cam = icd->host_priv;
struct rcar_vin_priv *priv = ici->priv;
unsigned int left_offset, top_offset;
unsigned char dsize = 0;
struct v4l2_rect *cam_subrect = &cam->subrect;
dev_dbg(icd->parent, "Crop %ux%u@%u:%u\n",
icd->user_width, icd->user_height, cam->vin_left, cam->vin_top);
left_offset = cam->vin_left;
top_offset = cam->vin_top;
if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_RGB32 &&
priv->chip == RCAR_E1)
dsize = 1;
dev_dbg(icd->parent, "Cam %ux%u@%u:%u\n",
cam->width, cam->height, cam->vin_left, cam->vin_top);
dev_dbg(icd->parent, "Cam subrect %ux%u@%u:%u\n",
cam_subrect->width, cam_subrect->height,
cam_subrect->left, cam_subrect->top);
/* Set Start/End Pixel/Line Pre-Clip */
iowrite32(left_offset << dsize, priv->base + VNSPPRC_REG);
iowrite32((left_offset + cam->width - 1) << dsize,
priv->base + VNEPPRC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
iowrite32(top_offset / 2, priv->base + VNSLPRC_REG);
iowrite32((top_offset + cam->height) / 2 - 1,
priv->base + VNELPRC_REG);
break;
default:
iowrite32(top_offset, priv->base + VNSLPRC_REG);
iowrite32(top_offset + cam->height - 1,
priv->base + VNELPRC_REG);
break;
}
/* Set Start/End Pixel/Line Post-Clip */
iowrite32(0, priv->base + VNSPPOC_REG);
iowrite32(0, priv->base + VNSLPOC_REG);
iowrite32((cam_subrect->width - 1) << dsize, priv->base + VNEPPOC_REG);
switch (priv->field) {
case V4L2_FIELD_INTERLACED:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
iowrite32(cam_subrect->height / 2 - 1,
priv->base + VNELPOC_REG);
break;
default:
iowrite32(cam_subrect->height - 1, priv->base + VNELPOC_REG);
break;
}
iowrite32(ALIGN(cam->width, 0x10), priv->base + VNIS_REG);
return 0;
}
static void capture_stop_preserve(struct rcar_vin_priv *priv, u32 *vnmc)
{
*vnmc = ioread32(priv->base + VNMC_REG);
/* module disable */
iowrite32(*vnmc & ~VNMC_ME, priv->base + VNMC_REG);
}
static void capture_restore(struct rcar_vin_priv *priv, u32 vnmc)
{
unsigned long timeout = jiffies + 10 * HZ;
/*
* Wait until the end of the current frame. It can take a long time,
* but if it has been aborted by a MRST1 reset, it should exit sooner.
*/
while ((ioread32(priv->base + VNMS_REG) & VNMS_AV) &&
time_before(jiffies, timeout))
msleep(1);
if (time_after(jiffies, timeout)) {
dev_err(priv->ici.v4l2_dev.dev,
"Timeout waiting for frame end! Interface problem?\n");
return;
}
iowrite32(vnmc, priv->base + VNMC_REG);
}
#define VIN_MBUS_FLAGS (V4L2_MBUS_MASTER | \
V4L2_MBUS_PCLK_SAMPLE_RISING | \
V4L2_MBUS_HSYNC_ACTIVE_HIGH | \
V4L2_MBUS_HSYNC_ACTIVE_LOW | \
V4L2_MBUS_VSYNC_ACTIVE_HIGH | \
V4L2_MBUS_VSYNC_ACTIVE_LOW | \
V4L2_MBUS_DATA_ACTIVE_HIGH)
static int rcar_vin_set_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config cfg;
unsigned long common_flags;
u32 vnmc;
u32 val;
int ret;
capture_stop_preserve(priv, &vnmc);
ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
if (!ret) {
common_flags = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
if (!common_flags) {
dev_warn(icd->parent,
"MBUS flags incompatible: camera 0x%x, host 0x%x\n",
cfg.flags, VIN_MBUS_FLAGS);
return -EINVAL;
}
} else if (ret != -ENOIOCTLCMD) {
return ret;
} else {
common_flags = VIN_MBUS_FLAGS;
}
/* Make choises, based on platform preferences */
if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) &&
(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) {
if (priv->pdata_flags & RCAR_VIN_HSYNC_ACTIVE_LOW)
common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH;
else
common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW;
}
if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) &&
(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) {
if (priv->pdata_flags & RCAR_VIN_VSYNC_ACTIVE_LOW)
common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH;
else
common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW;
}
cfg.flags = common_flags;
ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg);
if (ret < 0 && ret != -ENOIOCTLCMD)
return ret;
val = priv->field == V4L2_FIELD_NONE ? VNDMR2_FTEV : 0;
if (!(common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW))
val |= VNDMR2_VPS;
if (!(common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW))
val |= VNDMR2_HPS;
iowrite32(val, priv->base + VNDMR2_REG);
ret = rcar_vin_set_rect(icd);
if (ret < 0)
return ret;
capture_restore(priv, vnmc);
return 0;
}
static int rcar_vin_try_bus_param(struct soc_camera_device *icd,
unsigned char buswidth)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_config cfg;
int ret;
ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg);
if (ret == -ENOIOCTLCMD)
return 0;
else if (ret)
return ret;
if (buswidth > 24)
return -EINVAL;
/* check is there common mbus flags */
ret = soc_mbus_config_compatible(&cfg, VIN_MBUS_FLAGS);
if (ret)
return 0;
dev_warn(icd->parent,
"MBUS flags incompatible: camera 0x%x, host 0x%x\n",
cfg.flags, VIN_MBUS_FLAGS);
return -EINVAL;
}
static bool rcar_vin_packing_supported(const struct soc_mbus_pixelfmt *fmt)
{
return fmt->packing == SOC_MBUS_PACKING_NONE ||
(fmt->bits_per_sample > 8 &&
fmt->packing == SOC_MBUS_PACKING_EXTEND16);
}
static const struct soc_mbus_pixelfmt rcar_vin_formats[] = {
{
.fourcc = V4L2_PIX_FMT_NV16,
.name = "NV16",
.bits_per_sample = 8,
.packing = SOC_MBUS_PACKING_2X8_PADHI,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PLANAR_Y_C,
},
{
.fourcc = V4L2_PIX_FMT_UYVY,
.name = "UYVY",
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
{
.fourcc = V4L2_PIX_FMT_RGB565,
.name = "RGB565",
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
{
.fourcc = V4L2_PIX_FMT_RGB555X,
.name = "ARGB1555",
.bits_per_sample = 16,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
{
.fourcc = V4L2_PIX_FMT_RGB32,
.name = "RGB888",
.bits_per_sample = 32,
.packing = SOC_MBUS_PACKING_NONE,
.order = SOC_MBUS_ORDER_LE,
.layout = SOC_MBUS_LAYOUT_PACKED,
},
};
static int rcar_vin_get_formats(struct soc_camera_device *icd, unsigned int idx,
struct soc_camera_format_xlate *xlate)
{
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
int ret, k, n;
int formats = 0;
struct rcar_vin_cam *cam;
enum v4l2_mbus_pixelcode code;
const struct soc_mbus_pixelfmt *fmt;
ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code);
if (ret < 0)
return 0;
fmt = soc_mbus_get_fmtdesc(code);
if (!fmt) {
dev_warn(dev, "unsupported format code #%u: %d\n", idx, code);
return 0;
}
ret = rcar_vin_try_bus_param(icd, fmt->bits_per_sample);
if (ret < 0)
return 0;
if (!icd->host_priv) {
struct v4l2_mbus_framefmt mf;
struct v4l2_rect rect;
struct device *dev = icd->parent;
int shift;
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
/* Cache current client geometry */
ret = soc_camera_client_g_rect(sd, &rect);
if (ret == -ENOIOCTLCMD) {
/* Sensor driver doesn't support cropping */
rect.left = 0;
rect.top = 0;
rect.width = mf.width;
rect.height = mf.height;
} else if (ret < 0) {
return ret;
}
/*
* If sensor proposes too large format then try smaller ones:
* 1280x960, 640x480, 320x240
*/
for (shift = 0; shift < 3; shift++) {
if (mf.width <= VIN_MAX_WIDTH &&
mf.height <= VIN_MAX_HEIGHT)
break;
mf.width = 1280 >> shift;
mf.height = 960 >> shift;
ret = v4l2_device_call_until_err(sd->v4l2_dev,
soc_camera_grp_id(icd),
video, s_mbus_fmt,
&mf);
if (ret < 0)
return ret;
}
if (shift == 3) {
dev_err(dev,
"Failed to configure the client below %ux%u\n",
mf.width, mf.height);
return -EIO;
}
dev_dbg(dev, "camera fmt %ux%u\n", mf.width, mf.height);
cam = kzalloc(sizeof(*cam), GFP_KERNEL);
if (!cam)
return -ENOMEM;
/*
* We are called with current camera crop,
* initialise subrect with it
*/
cam->rect = rect;
cam->subrect = rect;
cam->width = mf.width;
cam->height = mf.height;
icd->host_priv = cam;
} else {
cam = icd->host_priv;
}
/* Beginning of a pass */
if (!idx)
cam->extra_fmt = NULL;
switch (code) {
case V4L2_MBUS_FMT_YUYV8_1X16:
case V4L2_MBUS_FMT_YUYV8_2X8:
case V4L2_MBUS_FMT_YUYV10_2X10:
if (cam->extra_fmt)
break;
/* Add all our formats that can be generated by VIN */
cam->extra_fmt = rcar_vin_formats;
n = ARRAY_SIZE(rcar_vin_formats);
formats += n;
for (k = 0; xlate && k < n; k++, xlate++) {
xlate->host_fmt = &rcar_vin_formats[k];
xlate->code = code;
dev_dbg(dev, "Providing format %s using code %d\n",
rcar_vin_formats[k].name, code);
}
break;
default:
if (!rcar_vin_packing_supported(fmt))
return 0;
dev_dbg(dev, "Providing format %s in pass-through mode\n",
fmt->name);
break;
}
/* Generic pass-through */
formats++;
if (xlate) {
xlate->host_fmt = fmt;
xlate->code = code;
xlate++;
}
return formats;
}
static void rcar_vin_put_formats(struct soc_camera_device *icd)
{
kfree(icd->host_priv);
icd->host_priv = NULL;
}
static int rcar_vin_set_crop(struct soc_camera_device *icd,
const struct v4l2_crop *a)
{
struct v4l2_crop a_writable = *a;
const struct v4l2_rect *rect = &a_writable.c;
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct v4l2_crop cam_crop;
struct rcar_vin_cam *cam = icd->host_priv;
struct v4l2_rect *cam_rect = &cam_crop.c;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct device *dev = icd->parent;
struct v4l2_mbus_framefmt mf;
u32 vnmc;
int ret, i;
dev_dbg(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height,
rect->left, rect->top);
/* During camera cropping its output window can change too, stop VIN */
capture_stop_preserve(priv, &vnmc);
dev_dbg(dev, "VNMC_REG 0x%x\n", vnmc);
/* Apply iterative camera S_CROP for new input window. */
ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop,
&cam->rect, &cam->subrect);
if (ret < 0)
return ret;
dev_dbg(dev, "camera cropped to %ux%u@%u:%u\n",
cam_rect->width, cam_rect->height,
cam_rect->left, cam_rect->top);
/* On success cam_crop contains current camera crop */
/* Retrieve camera output window */
ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf);
if (ret < 0)
return ret;
if (mf.width > VIN_MAX_WIDTH || mf.height > VIN_MAX_HEIGHT)
return -EINVAL;
/* Cache camera output window */
cam->width = mf.width;
cam->height = mf.height;
icd->user_width = cam->width;
icd->user_height = cam->height;
cam->vin_left = rect->left & ~1;
cam->vin_top = rect->top & ~1;
/* Use VIN cropping to crop to the new window. */
ret = rcar_vin_set_rect(icd);
if (ret < 0)
return ret;
cam->subrect = *rect;
dev_dbg(dev, "VIN cropped to %ux%u@%u:%u\n",
icd->user_width, icd->user_height,
cam->vin_left, cam->vin_top);
/* Restore capture */
for (i = 0; i < MAX_BUFFER_NUM; i++) {
if (priv->queue_buf[i] && priv->state == STOPPED) {
vnmc |= VNMC_ME;
break;
}
}
capture_restore(priv, vnmc);
/* Even if only camera cropping succeeded */
return ret;
}
static int rcar_vin_get_crop(struct soc_camera_device *icd,
struct v4l2_crop *a)
{
struct rcar_vin_cam *cam = icd->host_priv;
a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
a->c = cam->subrect;
return 0;
}
/* Similar to set_crop multistage iterative algorithm */
static int rcar_vin_set_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
struct rcar_vin_priv *priv = ici->priv;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct rcar_vin_cam *cam = icd->host_priv;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_mbus_framefmt mf;
struct device *dev = icd->parent;
__u32 pixfmt = pix->pixelformat;
const struct soc_camera_format_xlate *xlate;
unsigned int vin_sub_width = 0, vin_sub_height = 0;
int ret;
bool can_scale;
enum v4l2_field field;
v4l2_std_id std;
dev_dbg(dev, "S_FMT(pix=0x%x, %ux%u)\n",
pixfmt, pix->width, pix->height);
switch (pix->field) {
default:
pix->field = V4L2_FIELD_NONE;
/* fall-through */
case V4L2_FIELD_NONE:
case V4L2_FIELD_TOP:
case V4L2_FIELD_BOTTOM:
case V4L2_FIELD_INTERLACED_TB:
case V4L2_FIELD_INTERLACED_BT:
field = pix->field;
break;
case V4L2_FIELD_INTERLACED:
/* Query for standard if not explicitly mentioned _TB/_BT */
ret = v4l2_subdev_call(sd, video, querystd, &std);
if (ret < 0)
std = V4L2_STD_625_50;
field = std & V4L2_STD_625_50 ? V4L2_FIELD_INTERLACED_TB :
V4L2_FIELD_INTERLACED_BT;
break;
}
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
dev_warn(dev, "Format %x not found\n", pixfmt);
return -EINVAL;
}
/* Calculate client output geometry */
soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf,
12);
mf.field = pix->field;
mf.colorspace = pix->colorspace;
mf.code = xlate->code;
switch (pixfmt) {
case V4L2_PIX_FMT_RGB32:
can_scale = priv->chip != RCAR_E1;
break;
case V4L2_PIX_FMT_UYVY:
case V4L2_PIX_FMT_YUYV:
case V4L2_PIX_FMT_RGB565:
case V4L2_PIX_FMT_RGB555X:
can_scale = true;
break;
default:
can_scale = false;
break;
}
dev_dbg(dev, "request camera output %ux%u\n", mf.width, mf.height);
ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect,
&mf, &vin_sub_width, &vin_sub_height,
can_scale, 12);
/* Done with the camera. Now see if we can improve the result */
dev_dbg(dev, "Camera %d fmt %ux%u, requested %ux%u\n",
ret, mf.width, mf.height, pix->width, pix->height);
if (ret == -ENOIOCTLCMD)
dev_dbg(dev, "Sensor doesn't support scaling\n");
else if (ret < 0)
return ret;
if (mf.code != xlate->code)
return -EINVAL;
/* Prepare VIN crop */
cam->width = mf.width;
cam->height = mf.height;
/* Use VIN scaling to scale to the requested user window. */
/* We cannot scale up */
if (pix->width > vin_sub_width)
vin_sub_width = pix->width;
if (pix->height > vin_sub_height)
vin_sub_height = pix->height;
pix->colorspace = mf.colorspace;
if (!can_scale) {
pix->width = vin_sub_width;
pix->height = vin_sub_height;
}
/*
* We have calculated CFLCR, the actual configuration will be performed
* in rcar_vin_set_bus_param()
*/
dev_dbg(dev, "W: %u : %u, H: %u : %u\n",
vin_sub_width, pix->width, vin_sub_height, pix->height);
icd->current_fmt = xlate;
priv->field = field;
return 0;
}
static int rcar_vin_try_fmt(struct soc_camera_device *icd,
struct v4l2_format *f)
{
const struct soc_camera_format_xlate *xlate;
struct v4l2_pix_format *pix = &f->fmt.pix;
struct v4l2_subdev *sd = soc_camera_to_subdev(icd);
struct v4l2_mbus_framefmt mf;
__u32 pixfmt = pix->pixelformat;
int width, height;
int ret;
xlate = soc_camera_xlate_by_fourcc(icd, pixfmt);
if (!xlate) {
xlate = icd->current_fmt;
dev_dbg(icd->parent, "Format %x not found, keeping %x\n",
pixfmt, xlate->host_fmt->fourcc);
pixfmt = xlate->host_fmt->fourcc;
pix->pixelformat = pixfmt;
pix->colorspace = icd->colorspace;
}
/* FIXME: calculate using depth and bus width */
v4l_bound_align_image(&pix->width, 2, VIN_MAX_WIDTH, 1,
&pix->height, 4, VIN_MAX_HEIGHT, 2, 0);
width = pix->width;
height = pix->height;
/* let soc-camera calculate these values */
pix->bytesperline = 0;
pix->sizeimage = 0;
/* limit to sensor capabilities */
mf.width = pix->width;
mf.height = pix->height;
mf.field = pix->field;
mf.code = xlate->code;
mf.colorspace = pix->colorspace;
ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd),
video, try_mbus_fmt, &mf);
if (ret < 0)
return ret;
pix->width = mf.width;
pix->height = mf.height;
pix->field = mf.field;
pix->colorspace = mf.colorspace;
if (pixfmt == V4L2_PIX_FMT_NV16) {
/* FIXME: check against rect_max after converting soc-camera */
/* We can scale precisely, need a bigger image from camera */
if (pix->width < width || pix->height < height) {
/*
* We presume, the sensor behaves sanely, i.e. if
* requested a bigger rectangle, it will not return a
* smaller one.
*/
mf.width = VIN_MAX_WIDTH;
mf.height = VIN_MAX_HEIGHT;
ret = v4l2_device_call_until_err(sd->v4l2_dev,
soc_camera_grp_id(icd),
video, try_mbus_fmt,
&mf);
if (ret < 0) {
dev_err(icd->parent,
"client try_fmt() = %d\n", ret);
return ret;
}
}
/* We will scale exactly */
if (mf.width > width)
pix->width = width;
if (mf.height > height)
pix->height = height;
}
return ret;
}
static unsigned int rcar_vin_poll(struct file *file, poll_table *pt)
{
struct soc_camera_device *icd = file->private_data;
return vb2_poll(&icd->vb2_vidq, file, pt);
}
static int rcar_vin_querycap(struct soc_camera_host *ici,
struct v4l2_capability *cap)
{
strlcpy(cap->card, "R_Car_VIN", sizeof(cap->card));
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING;
return 0;
}
static int rcar_vin_init_videobuf2(struct vb2_queue *vq,
struct soc_camera_device *icd)
{
vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
vq->io_modes = VB2_MMAP | VB2_USERPTR;
vq->drv_priv = icd;
vq->ops = &rcar_vin_vb2_ops;
vq->mem_ops = &vb2_dma_contig_memops;
vq->buf_struct_size = sizeof(struct rcar_vin_buffer);
vq->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
return vb2_queue_init(vq);
}
static struct soc_camera_host_ops rcar_vin_host_ops = {
.owner = THIS_MODULE,
.add = rcar_vin_add_device,
.remove = rcar_vin_remove_device,
.clock_start = rcar_vin_clock_start,
.clock_stop = rcar_vin_clock_stop,
.get_formats = rcar_vin_get_formats,
.put_formats = rcar_vin_put_formats,
.get_crop = rcar_vin_get_crop,
.set_crop = rcar_vin_set_crop,
.try_fmt = rcar_vin_try_fmt,
.set_fmt = rcar_vin_set_fmt,
.poll = rcar_vin_poll,
.querycap = rcar_vin_querycap,
.set_bus_param = rcar_vin_set_bus_param,
.init_videobuf2 = rcar_vin_init_videobuf2,
};
#ifdef CONFIG_OF
static struct of_device_id rcar_vin_of_table[] = {
{ .compatible = "renesas,vin-r8a7791", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7790", .data = (void *)RCAR_GEN2 },
{ .compatible = "renesas,vin-r8a7779", .data = (void *)RCAR_H1 },
{ .compatible = "renesas,vin-r8a7778", .data = (void *)RCAR_M1 },
{ },
};
MODULE_DEVICE_TABLE(of, rcar_vin_of_table);
#endif
static struct platform_device_id rcar_vin_id_table[] = {
{ "r8a7791-vin", RCAR_GEN2 },
{ "r8a7790-vin", RCAR_GEN2 },
{ "r8a7779-vin", RCAR_H1 },
{ "r8a7778-vin", RCAR_M1 },
{ "uPD35004-vin", RCAR_E1 },
{},
};
MODULE_DEVICE_TABLE(platform, rcar_vin_id_table);
static int rcar_vin_probe(struct platform_device *pdev)
{
const struct of_device_id *match = NULL;
struct rcar_vin_priv *priv;
struct resource *mem;
struct rcar_vin_platform_data *pdata;
unsigned int pdata_flags;
int irq, ret;
if (pdev->dev.of_node) {
struct v4l2_of_endpoint ep;
struct device_node *np;
match = of_match_device(of_match_ptr(rcar_vin_of_table),
&pdev->dev);
np = of_graph_get_next_endpoint(pdev->dev.of_node, NULL);
if (!np) {
dev_err(&pdev->dev, "could not find endpoint\n");
return -EINVAL;
}
ret = v4l2_of_parse_endpoint(np, &ep);
if (ret) {
dev_err(&pdev->dev, "could not parse endpoint\n");
return ret;
}
if (ep.bus_type == V4L2_MBUS_BT656)
pdata_flags = RCAR_VIN_BT656;
else {
pdata_flags = 0;
if (ep.bus.parallel.flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)
pdata_flags |= RCAR_VIN_HSYNC_ACTIVE_LOW;
if (ep.bus.parallel.flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)
pdata_flags |= RCAR_VIN_VSYNC_ACTIVE_LOW;
}
of_node_put(np);
dev_dbg(&pdev->dev, "pdata_flags = %08x\n", pdata_flags);
} else {
pdata = pdev->dev.platform_data;
if (!pdata || !pdata->flags) {
dev_err(&pdev->dev, "platform data not set\n");
return -EINVAL;
}
pdata_flags = pdata->flags;
}
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (mem == NULL)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq <= 0)
return -EINVAL;
priv = devm_kzalloc(&pdev->dev, sizeof(struct rcar_vin_priv),
GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->base = devm_ioremap_resource(&pdev->dev, mem);
if (IS_ERR(priv->base))
return PTR_ERR(priv->base);
ret = devm_request_irq(&pdev->dev, irq, rcar_vin_irq, IRQF_SHARED,
dev_name(&pdev->dev), priv);
if (ret)
return ret;
priv->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev);
if (IS_ERR(priv->alloc_ctx))
return PTR_ERR(priv->alloc_ctx);
priv->ici.priv = priv;
priv->ici.v4l2_dev.dev = &pdev->dev;
priv->ici.drv_name = dev_name(&pdev->dev);
priv->ici.ops = &rcar_vin_host_ops;
priv->pdata_flags = pdata_flags;
if (!match) {
priv->ici.nr = pdev->id;
priv->chip = pdev->id_entry->driver_data;
} else {
priv->ici.nr = of_alias_get_id(pdev->dev.of_node, "vin");
priv->chip = (enum chip_id)match->data;
}
spin_lock_init(&priv->lock);
INIT_LIST_HEAD(&priv->capture);
priv->state = STOPPED;
pm_suspend_ignore_children(&pdev->dev, true);
pm_runtime_enable(&pdev->dev);
ret = soc_camera_host_register(&priv->ici);
if (ret)
goto cleanup;
return 0;
cleanup:
pm_runtime_disable(&pdev->dev);
vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
return ret;
}
static int rcar_vin_remove(struct platform_device *pdev)
{
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct rcar_vin_priv *priv = container_of(soc_host,
struct rcar_vin_priv, ici);
soc_camera_host_unregister(soc_host);
pm_runtime_disable(&pdev->dev);
vb2_dma_contig_cleanup_ctx(priv->alloc_ctx);
return 0;
}
static struct platform_driver rcar_vin_driver = {
.probe = rcar_vin_probe,
.remove = rcar_vin_remove,
.driver = {
.name = DRV_NAME,
.owner = THIS_MODULE,
.of_match_table = of_match_ptr(rcar_vin_of_table),
},
.id_table = rcar_vin_id_table,
};
module_platform_driver(rcar_vin_driver);
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:rcar_vin");
MODULE_DESCRIPTION("Renesas R-Car VIN camera host driver");
| gpl-2.0 |
Outernet-Project/rpi-linux | arch/arm/mach-imx/anatop.c | 329 | 3457 | /*
* Copyright (C) 2013 Freescale Semiconductor, Inc.
*
* The code contained herein is licensed under the GNU General Public
* License. You may obtain a copy of the GNU General Public License
* Version 2 or later at the following locations:
*
* http://www.opensource.org/licenses/gpl-license.html
* http://www.gnu.org/copyleft/gpl.html
*/
#include <linux/err.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
#include "common.h"
#include "hardware.h"
#define REG_SET 0x4
#define REG_CLR 0x8
#define ANADIG_REG_2P5 0x130
#define ANADIG_REG_CORE 0x140
#define ANADIG_ANA_MISC0 0x150
#define ANADIG_USB1_CHRG_DETECT 0x1b0
#define ANADIG_USB2_CHRG_DETECT 0x210
#define ANADIG_DIGPROG 0x260
#define ANADIG_DIGPROG_IMX6SL 0x280
#define BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG 0x40000
#define BM_ANADIG_REG_CORE_FET_ODRIVE 0x20000000
#define BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG 0x1000
#define BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B 0x80000
#define BM_ANADIG_USB_CHRG_DETECT_EN_B 0x100000
static struct regmap *anatop;
static void imx_anatop_enable_weak2p5(bool enable)
{
u32 reg, val;
regmap_read(anatop, ANADIG_ANA_MISC0, &val);
/* can only be enabled when stop_mode_config is clear. */
reg = ANADIG_REG_2P5;
reg += (enable && (val & BM_ANADIG_ANA_MISC0_STOP_MODE_CONFIG) == 0) ?
REG_SET : REG_CLR;
regmap_write(anatop, reg, BM_ANADIG_REG_2P5_ENABLE_WEAK_LINREG);
}
static void imx_anatop_enable_fet_odrive(bool enable)
{
regmap_write(anatop, ANADIG_REG_CORE + (enable ? REG_SET : REG_CLR),
BM_ANADIG_REG_CORE_FET_ODRIVE);
}
void imx_anatop_pre_suspend(void)
{
imx_anatop_enable_weak2p5(true);
imx_anatop_enable_fet_odrive(true);
}
void imx_anatop_post_resume(void)
{
imx_anatop_enable_fet_odrive(false);
imx_anatop_enable_weak2p5(false);
}
static void imx_anatop_usb_chrg_detect_disable(void)
{
regmap_write(anatop, ANADIG_USB1_CHRG_DETECT,
BM_ANADIG_USB_CHRG_DETECT_EN_B
| BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B);
regmap_write(anatop, ANADIG_USB2_CHRG_DETECT,
BM_ANADIG_USB_CHRG_DETECT_EN_B |
BM_ANADIG_USB_CHRG_DETECT_CHK_CHRG_B);
}
void __init imx_init_revision_from_anatop(void)
{
struct device_node *np;
void __iomem *anatop_base;
unsigned int revision;
u32 digprog;
u16 offset = ANADIG_DIGPROG;
np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
anatop_base = of_iomap(np, 0);
WARN_ON(!anatop_base);
if (of_device_is_compatible(np, "fsl,imx6sl-anatop"))
offset = ANADIG_DIGPROG_IMX6SL;
digprog = readl_relaxed(anatop_base + offset);
iounmap(anatop_base);
switch (digprog & 0xff) {
case 0:
revision = IMX_CHIP_REVISION_1_0;
break;
case 1:
revision = IMX_CHIP_REVISION_1_1;
break;
case 2:
revision = IMX_CHIP_REVISION_1_2;
break;
case 3:
revision = IMX_CHIP_REVISION_1_3;
break;
case 4:
revision = IMX_CHIP_REVISION_1_4;
break;
case 5:
/*
* i.MX6DQ TO1.5 is defined as Rev 1.3 in Data Sheet, marked
* as 'D' in Part Number last character.
*/
revision = IMX_CHIP_REVISION_1_5;
break;
default:
revision = IMX_CHIP_REVISION_UNKNOWN;
}
mxc_set_cpu_type(digprog >> 16 & 0xff);
imx_set_soc_revision(revision);
}
void __init imx_anatop_init(void)
{
anatop = syscon_regmap_lookup_by_compatible("fsl,imx6q-anatop");
if (IS_ERR(anatop)) {
pr_err("%s: failed to find imx6q-anatop regmap!\n", __func__);
return;
}
imx_anatop_usb_chrg_detect_disable();
}
| gpl-2.0 |
kref/linux-oxnas | drivers/pci/hotplug/cpqphp_core.c | 329 | 36552 | /*
* Compaq Hot Plug Controller Driver
*
* Copyright (C) 1995,2001 Compaq Computer Corporation
* Copyright (C) 2001 Greg Kroah-Hartman <greg@kroah.com>
* Copyright (C) 2001 IBM Corp.
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Send feedback to <greg@kroah.com>
*
* Jan 12, 2003 - Added 66/100/133MHz PCI-X support,
* Torben Mathiasen <torben.mathiasen@hp.com>
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/proc_fs.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/pci.h>
#include <linux/pci_hotplug.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/uaccess.h>
#include "cpqphp.h"
#include "cpqphp_nvram.h"
/* Global variables */
int cpqhp_debug;
int cpqhp_legacy_mode;
struct controller *cpqhp_ctrl_list; /* = NULL */
struct pci_func *cpqhp_slot_list[256];
struct irq_routing_table *cpqhp_routing_table;
/* local variables */
static void __iomem *smbios_table;
static void __iomem *smbios_start;
static void __iomem *cpqhp_rom_start;
static bool power_mode;
static bool debug;
static int initialized;
#define DRIVER_VERSION "0.9.8"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>"
#define DRIVER_DESC "Compaq Hot Plug PCI Controller Driver"
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
module_param(power_mode, bool, 0644);
MODULE_PARM_DESC(power_mode, "Power mode enabled or not");
module_param(debug, bool, 0644);
MODULE_PARM_DESC(debug, "Debugging mode enabled or not");
#define CPQHPC_MODULE_MINOR 208
static inline int is_slot64bit(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_WIDTH) == 0x06) ? 1 : 0;
}
static inline int is_slot66mhz(struct slot *slot)
{
return (readb(slot->p_sm_slot + SMBIOS_SLOT_TYPE) == 0x0E) ? 1 : 0;
}
/**
* detect_SMBIOS_pointer - find the System Management BIOS Table in mem region.
* @begin: begin pointer for region to be scanned.
* @end: end pointer for region to be scanned.
*
* Returns pointer to the head of the SMBIOS tables (or %NULL).
*/
static void __iomem * detect_SMBIOS_pointer(void __iomem *begin, void __iomem *end)
{
void __iomem *fp;
void __iomem *endp;
u8 temp1, temp2, temp3, temp4;
int status = 0;
endp = (end - sizeof(u32) + 1);
for (fp = begin; fp <= endp; fp += 16) {
temp1 = readb(fp);
temp2 = readb(fp+1);
temp3 = readb(fp+2);
temp4 = readb(fp+3);
if (temp1 == '_' &&
temp2 == 'S' &&
temp3 == 'M' &&
temp4 == '_') {
status = 1;
break;
}
}
if (!status)
fp = NULL;
dbg("Discovered SMBIOS Entry point at %p\n", fp);
return fp;
}
/**
* init_SERR - Initializes the per slot SERR generation.
* @ctrl: controller to use
*
* For unexpected switch opens
*/
static int init_SERR(struct controller * ctrl)
{
u32 tempdword;
u32 number_of_slots;
u8 physical_slot;
if (!ctrl)
return 1;
tempdword = ctrl->first_slot;
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
/* Loop through slots */
while (number_of_slots) {
physical_slot = tempdword;
writeb(0, ctrl->hpc_reg + SLOT_SERR);
tempdword++;
number_of_slots--;
}
return 0;
}
static int init_cpqhp_routing_table(void)
{
int len;
cpqhp_routing_table = pcibios_get_irq_routing_table();
if (cpqhp_routing_table == NULL)
return -ENOMEM;
len = cpqhp_routing_table_length();
if (len == 0) {
kfree(cpqhp_routing_table);
cpqhp_routing_table = NULL;
return -1;
}
return 0;
}
/* nice debugging output */
static void pci_print_IRQ_route(void)
{
int len;
int loop;
u8 tbus, tdevice, tslot;
len = cpqhp_routing_table_length();
dbg("bus dev func slot\n");
for (loop = 0; loop < len; ++loop) {
tbus = cpqhp_routing_table->slots[loop].bus;
tdevice = cpqhp_routing_table->slots[loop].devfn;
tslot = cpqhp_routing_table->slots[loop].slot;
dbg("%d %d %d %d\n", tbus, tdevice >> 3, tdevice & 0x7, tslot);
}
return;
}
/**
* get_subsequent_smbios_entry: get the next entry from bios table.
* @smbios_start: where to start in the SMBIOS table
* @smbios_table: location of the SMBIOS table
* @curr: %NULL or pointer to previously returned structure
*
* Gets the first entry if previous == NULL;
* otherwise, returns the next entry.
* Uses global SMBIOS Table pointer.
*
* Returns a pointer to an SMBIOS structure or NULL if none found.
*/
static void __iomem *get_subsequent_smbios_entry(void __iomem *smbios_start,
void __iomem *smbios_table,
void __iomem *curr)
{
u8 bail = 0;
u8 previous_byte = 1;
void __iomem *p_temp;
void __iomem *p_max;
if (!smbios_table || !curr)
return NULL;
/* set p_max to the end of the table */
p_max = smbios_start + readw(smbios_table + ST_LENGTH);
p_temp = curr;
p_temp += readb(curr + SMBIOS_GENERIC_LENGTH);
while ((p_temp < p_max) && !bail) {
/* Look for the double NULL terminator
* The first condition is the previous byte
* and the second is the curr
*/
if (!previous_byte && !(readb(p_temp)))
bail = 1;
previous_byte = readb(p_temp);
p_temp++;
}
if (p_temp < p_max)
return p_temp;
else
return NULL;
}
/**
* get_SMBIOS_entry - return the requested SMBIOS entry or %NULL
* @smbios_start: where to start in the SMBIOS table
* @smbios_table: location of the SMBIOS table
* @type: SMBIOS structure type to be returned
* @previous: %NULL or pointer to previously returned structure
*
* Gets the first entry of the specified type if previous == %NULL;
* Otherwise, returns the next entry of the given type.
* Uses global SMBIOS Table pointer.
* Uses get_subsequent_smbios_entry.
*
* Returns a pointer to an SMBIOS structure or %NULL if none found.
*/
static void __iomem *get_SMBIOS_entry(void __iomem *smbios_start,
void __iomem *smbios_table,
u8 type,
void __iomem *previous)
{
if (!smbios_table)
return NULL;
if (!previous)
previous = smbios_start;
else
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
while (previous)
if (readb(previous + SMBIOS_GENERIC_TYPE) != type)
previous = get_subsequent_smbios_entry(smbios_start,
smbios_table, previous);
else
break;
return previous;
}
static void release_slot(struct hotplug_slot *hotplug_slot)
{
struct slot *slot = hotplug_slot->private;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
kfree(slot->hotplug_slot->info);
kfree(slot->hotplug_slot);
kfree(slot);
}
static int ctrl_slot_cleanup (struct controller * ctrl)
{
struct slot *old_slot, *next_slot;
old_slot = ctrl->slot;
ctrl->slot = NULL;
while (old_slot) {
/* memory will be freed by the release_slot callback */
next_slot = old_slot->next;
pci_hp_deregister (old_slot->hotplug_slot);
old_slot = next_slot;
}
cpqhp_remove_debugfs_files(ctrl);
/* Free IRQ associated with hot plug device */
free_irq(ctrl->interrupt, ctrl);
/* Unmap the memory */
iounmap(ctrl->hpc_reg);
/* Finally reclaim PCI mem */
release_mem_region(pci_resource_start(ctrl->pci_dev, 0),
pci_resource_len(ctrl->pci_dev, 0));
return 0;
}
/**
* get_slot_mapping - determine logical slot mapping for PCI device
*
* Won't work for more than one PCI-PCI bridge in a slot.
*
* @bus_num - bus number of PCI device
* @dev_num - device number of PCI device
* @slot - Pointer to u8 where slot number will be returned
*
* Output: SUCCESS or FAILURE
*/
static int
get_slot_mapping(struct pci_bus *bus, u8 bus_num, u8 dev_num, u8 *slot)
{
u32 work;
long len;
long loop;
u8 tbus, tdevice, tslot, bridgeSlot;
dbg("%s: %p, %d, %d, %p\n", __func__, bus, bus_num, dev_num, slot);
bridgeSlot = 0xFF;
len = cpqhp_routing_table_length();
for (loop = 0; loop < len; ++loop) {
tbus = cpqhp_routing_table->slots[loop].bus;
tdevice = cpqhp_routing_table->slots[loop].devfn >> 3;
tslot = cpqhp_routing_table->slots[loop].slot;
if ((tbus == bus_num) && (tdevice == dev_num)) {
*slot = tslot;
return 0;
} else {
/* Did not get a match on the target PCI device. Check
* if the current IRQ table entry is a PCI-to-PCI
* bridge device. If so, and it's secondary bus
* matches the bus number for the target device, I need
* to save the bridge's slot number. If I can not find
* an entry for the target device, I will have to
* assume it's on the other side of the bridge, and
* assign it the bridge's slot.
*/
bus->number = tbus;
pci_bus_read_config_dword(bus, PCI_DEVFN(tdevice, 0),
PCI_CLASS_REVISION, &work);
if ((work >> 8) == PCI_TO_PCI_BRIDGE_CLASS) {
pci_bus_read_config_dword(bus,
PCI_DEVFN(tdevice, 0),
PCI_PRIMARY_BUS, &work);
// See if bridge's secondary bus matches target bus.
if (((work >> 8) & 0x000000FF) == (long) bus_num)
bridgeSlot = tslot;
}
}
}
/* If we got here, we didn't find an entry in the IRQ mapping table for
* the target PCI device. If we did determine that the target device
* is on the other side of a PCI-to-PCI bridge, return the slot number
* for the bridge.
*/
if (bridgeSlot != 0xFF) {
*slot = bridgeSlot;
return 0;
}
/* Couldn't find an entry in the routing table for this PCI device */
return -1;
}
/**
* cpqhp_set_attention_status - Turns the Amber LED for a slot on or off
* @ctrl: struct controller to use
* @func: PCI device/function info
* @status: LED control flag: 1 = LED on, 0 = LED off
*/
static int
cpqhp_set_attention_status(struct controller *ctrl, struct pci_func *func,
u32 status)
{
u8 hp_slot;
if (func == NULL)
return 1;
hp_slot = func->device - ctrl->slot_device_offset;
/* Wait for exclusive access to hardware */
mutex_lock(&ctrl->crit_sect);
if (status == 1)
amber_LED_on (ctrl, hp_slot);
else if (status == 0)
amber_LED_off (ctrl, hp_slot);
else {
/* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
return 1;
}
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
wait_for_ctrl_irq (ctrl);
/* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
return 0;
}
/**
* set_attention_status - Turns the Amber LED for a slot on or off
* @hotplug_slot: slot to change LED on
* @status: LED control flag
*/
static int set_attention_status (struct hotplug_slot *hotplug_slot, u8 status)
{
struct pci_func *slot_func;
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
u8 device;
u8 function;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
device = devfn >> 3;
function = devfn & 0x7;
dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
slot_func = cpqhp_slot_find(bus, device, function);
if (!slot_func)
return -ENODEV;
return cpqhp_set_attention_status(ctrl, slot_func, status);
}
static int process_SI(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
u8 device;
u8 function;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
device = devfn >> 3;
function = devfn & 0x7;
dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
slot_func = cpqhp_slot_find(bus, device, function);
if (!slot_func)
return -ENODEV;
slot_func->bus = bus;
slot_func->device = device;
slot_func->function = function;
slot_func->configured = 0;
dbg("board_added(%p, %p)\n", slot_func, ctrl);
return cpqhp_process_SI(ctrl, slot_func);
}
static int process_SS(struct hotplug_slot *hotplug_slot)
{
struct pci_func *slot_func;
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
u8 bus;
u8 devfn;
u8 device;
u8 function;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
if (cpqhp_get_bus_dev(ctrl, &bus, &devfn, slot->number) == -1)
return -ENODEV;
device = devfn >> 3;
function = devfn & 0x7;
dbg("bus, dev, fn = %d, %d, %d\n", bus, device, function);
slot_func = cpqhp_slot_find(bus, device, function);
if (!slot_func)
return -ENODEV;
dbg("In %s, slot_func = %p, ctrl = %p\n", __func__, slot_func, ctrl);
return cpqhp_process_SS(ctrl, slot_func);
}
static int hardware_test(struct hotplug_slot *hotplug_slot, u32 value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
return cpqhp_hardware_test(ctrl, value);
}
static int get_power_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = get_slot_enabled(ctrl, slot);
return 0;
}
static int get_attention_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_attention_status(ctrl, slot);
return 0;
}
static int get_latch_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = cpq_get_latch_status(ctrl, slot);
return 0;
}
static int get_adapter_status(struct hotplug_slot *hotplug_slot, u8 *value)
{
struct slot *slot = hotplug_slot->private;
struct controller *ctrl = slot->ctrl;
dbg("%s - physical_slot = %s\n", __func__, slot_name(slot));
*value = get_presence_status(ctrl, slot);
return 0;
}
static struct hotplug_slot_ops cpqphp_hotplug_slot_ops = {
.set_attention_status = set_attention_status,
.enable_slot = process_SI,
.disable_slot = process_SS,
.hardware_test = hardware_test,
.get_power_status = get_power_status,
.get_attention_status = get_attention_status,
.get_latch_status = get_latch_status,
.get_adapter_status = get_adapter_status,
};
#define SLOT_NAME_SIZE 10
static int ctrl_slot_setup(struct controller *ctrl,
void __iomem *smbios_start,
void __iomem *smbios_table)
{
struct slot *slot;
struct hotplug_slot *hotplug_slot;
struct hotplug_slot_info *hotplug_slot_info;
struct pci_bus *bus = ctrl->pci_bus;
u8 number_of_slots;
u8 slot_device;
u8 slot_number;
u8 ctrl_slot;
u32 tempdword;
char name[SLOT_NAME_SIZE];
void __iomem *slot_entry= NULL;
int result;
dbg("%s\n", __func__);
tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
number_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
slot_device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
slot_number = ctrl->first_slot;
while (number_of_slots) {
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
result = -ENOMEM;
goto error;
}
slot->hotplug_slot = kzalloc(sizeof(*(slot->hotplug_slot)),
GFP_KERNEL);
if (!slot->hotplug_slot) {
result = -ENOMEM;
goto error_slot;
}
hotplug_slot = slot->hotplug_slot;
hotplug_slot->info = kzalloc(sizeof(*(hotplug_slot->info)),
GFP_KERNEL);
if (!hotplug_slot->info) {
result = -ENOMEM;
goto error_hpslot;
}
hotplug_slot_info = hotplug_slot->info;
slot->ctrl = ctrl;
slot->bus = ctrl->bus;
slot->device = slot_device;
slot->number = slot_number;
dbg("slot->number = %u\n", slot->number);
slot_entry = get_SMBIOS_entry(smbios_start, smbios_table, 9,
slot_entry);
while (slot_entry && (readw(slot_entry + SMBIOS_SLOT_NUMBER) !=
slot->number)) {
slot_entry = get_SMBIOS_entry(smbios_start,
smbios_table, 9, slot_entry);
}
slot->p_sm_slot = slot_entry;
init_timer(&slot->task_event);
slot->task_event.expires = jiffies + 5 * HZ;
slot->task_event.function = cpqhp_pushbutton_thread;
/*FIXME: these capabilities aren't used but if they are
* they need to be correctly implemented
*/
slot->capabilities |= PCISLOT_REPLACE_SUPPORTED;
slot->capabilities |= PCISLOT_INTERLOCK_SUPPORTED;
if (is_slot64bit(slot))
slot->capabilities |= PCISLOT_64_BIT_SUPPORTED;
if (is_slot66mhz(slot))
slot->capabilities |= PCISLOT_66_MHZ_SUPPORTED;
if (bus->cur_bus_speed == PCI_SPEED_66MHz)
slot->capabilities |= PCISLOT_66_MHZ_OPERATION;
ctrl_slot =
slot_device - (readb(ctrl->hpc_reg + SLOT_MASK) >> 4);
/* Check presence */
slot->capabilities |=
((((~tempdword) >> 23) |
((~tempdword) >> 15)) >> ctrl_slot) & 0x02;
/* Check the switch state */
slot->capabilities |=
((~tempdword & 0xFF) >> ctrl_slot) & 0x01;
/* Check the slot enable */
slot->capabilities |=
((read_slot_enable(ctrl) << 2) >> ctrl_slot) & 0x04;
/* register this slot with the hotplug pci core */
hotplug_slot->release = &release_slot;
hotplug_slot->private = slot;
snprintf(name, SLOT_NAME_SIZE, "%u", slot->number);
hotplug_slot->ops = &cpqphp_hotplug_slot_ops;
hotplug_slot_info->power_status = get_slot_enabled(ctrl, slot);
hotplug_slot_info->attention_status =
cpq_get_attention_status(ctrl, slot);
hotplug_slot_info->latch_status =
cpq_get_latch_status(ctrl, slot);
hotplug_slot_info->adapter_status =
get_presence_status(ctrl, slot);
dbg("registering bus %d, dev %d, number %d, "
"ctrl->slot_device_offset %d, slot %d\n",
slot->bus, slot->device,
slot->number, ctrl->slot_device_offset,
slot_number);
result = pci_hp_register(hotplug_slot,
ctrl->pci_dev->bus,
slot->device,
name);
if (result) {
err("pci_hp_register failed with error %d\n", result);
goto error_info;
}
slot->next = ctrl->slot;
ctrl->slot = slot;
number_of_slots--;
slot_device++;
slot_number++;
}
return 0;
error_info:
kfree(hotplug_slot_info);
error_hpslot:
kfree(hotplug_slot);
error_slot:
kfree(slot);
error:
return result;
}
static int one_time_init(void)
{
int loop;
int retval = 0;
if (initialized)
return 0;
power_mode = 0;
retval = init_cpqhp_routing_table();
if (retval)
goto error;
if (cpqhp_debug)
pci_print_IRQ_route();
dbg("Initialize + Start the notification mechanism \n");
retval = cpqhp_event_start_thread();
if (retval)
goto error;
dbg("Initialize slot lists\n");
for (loop = 0; loop < 256; loop++)
cpqhp_slot_list[loop] = NULL;
/* FIXME: We also need to hook the NMI handler eventually.
* this also needs to be worked with Christoph
* register_NMI_handler();
*/
/* Map rom address */
cpqhp_rom_start = ioremap(ROM_PHY_ADDR, ROM_PHY_LEN);
if (!cpqhp_rom_start) {
err ("Could not ioremap memory region for ROM\n");
retval = -EIO;
goto error;
}
/* Now, map the int15 entry point if we are on compaq specific
* hardware
*/
compaq_nvram_init(cpqhp_rom_start);
/* Map smbios table entry point structure */
smbios_table = detect_SMBIOS_pointer(cpqhp_rom_start,
cpqhp_rom_start + ROM_PHY_LEN);
if (!smbios_table) {
err ("Could not find the SMBIOS pointer in memory\n");
retval = -EIO;
goto error_rom_start;
}
smbios_start = ioremap(readl(smbios_table + ST_ADDRESS),
readw(smbios_table + ST_LENGTH));
if (!smbios_start) {
err ("Could not ioremap memory region taken from SMBIOS values\n");
retval = -EIO;
goto error_smbios_start;
}
initialized = 1;
return retval;
error_smbios_start:
iounmap(smbios_start);
error_rom_start:
iounmap(cpqhp_rom_start);
error:
return retval;
}
static int cpqhpc_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
u8 num_of_slots = 0;
u8 hp_slot = 0;
u8 device;
u8 bus_cap;
u16 temp_word;
u16 vendor_id;
u16 subsystem_vid;
u16 subsystem_deviceid;
u32 rc;
struct controller *ctrl;
struct pci_func *func;
struct pci_bus *bus;
int err;
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR MY_NAME ": cannot enable PCI device %s (%d)\n",
pci_name(pdev), err);
return err;
}
bus = pdev->subordinate;
if (!bus) {
dev_notice(&pdev->dev, "the device is not a bridge, "
"skipping\n");
rc = -ENODEV;
goto err_disable_device;
}
/* Need to read VID early b/c it's used to differentiate CPQ and INTC
* discovery
*/
vendor_id = pdev->vendor;
if ((vendor_id != PCI_VENDOR_ID_COMPAQ) &&
(vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
rc = -ENODEV;
goto err_disable_device;
}
dbg("Vendor ID: %x\n", vendor_id);
dbg("revision: %d\n", pdev->revision);
if ((vendor_id == PCI_VENDOR_ID_COMPAQ) && (!pdev->revision)) {
err(msg_HPC_rev_error);
rc = -ENODEV;
goto err_disable_device;
}
/* Check for the proper subsystem IDs
* Intel uses a different SSID programming model than Compaq.
* For Intel, each SSID bit identifies a PHP capability.
* Also Intel HPCs may have RID=0.
*/
if ((pdev->revision <= 2) && (vendor_id != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_not_supported);
return -ENODEV;
}
/* TODO: This code can be made to support non-Compaq or Intel
* subsystem IDs
*/
subsystem_vid = pdev->subsystem_vendor;
dbg("Subsystem Vendor ID: %x\n", subsystem_vid);
if ((subsystem_vid != PCI_VENDOR_ID_COMPAQ) && (subsystem_vid != PCI_VENDOR_ID_INTEL)) {
err(msg_HPC_non_compaq_or_intel);
rc = -ENODEV;
goto err_disable_device;
}
ctrl = kzalloc(sizeof(struct controller), GFP_KERNEL);
if (!ctrl) {
err("%s : out of memory\n", __func__);
rc = -ENOMEM;
goto err_disable_device;
}
subsystem_deviceid = pdev->subsystem_device;
info("Hot Plug Subsystem Device ID: %x\n", subsystem_deviceid);
/* Set Vendor ID, so it can be accessed later from other
* functions
*/
ctrl->vendor_id = vendor_id;
switch (subsystem_vid) {
case PCI_VENDOR_ID_COMPAQ:
if (pdev->revision >= 0x13) { /* CIOBX */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 1;
ctrl->pcix_speed_capability = 1;
pci_read_config_byte(pdev, 0x41, &bus_cap);
if (bus_cap & 0x80) {
dbg("bus max supports 133MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_133MHz_PCIX;
break;
}
if (bus_cap & 0x40) {
dbg("bus max supports 100MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
break;
}
if (bus_cap & 20) {
dbg("bus max supports 66MHz PCI-X\n");
bus->max_bus_speed = PCI_SPEED_66MHz_PCIX;
break;
}
if (bus_cap & 10) {
dbg("bus max supports 66MHz PCI\n");
bus->max_bus_speed = PCI_SPEED_66MHz;
break;
}
break;
}
switch (subsystem_deviceid) {
case PCI_SUB_HPC_ID:
/* Original 6500/7000 implementation */
ctrl->slot_switch_type = 1;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 0;
ctrl->pcix_speed_capability = 0;
break;
case PCI_SUB_HPC_ID2:
/* First Pushbutton implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 0;
ctrl->pcix_speed_capability = 0;
break;
case PCI_SUB_HPC_ID_INTC:
/* Third party (6500/7000) */
ctrl->slot_switch_type = 1;
bus->max_bus_speed = PCI_SPEED_33MHz;
ctrl->push_button = 0;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 0;
ctrl->pcix_speed_capability = 0;
break;
case PCI_SUB_HPC_ID3:
/* First 66 Mhz implementation */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
bus->max_bus_speed = PCI_SPEED_66MHz;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 0;
ctrl->pcix_speed_capability = 0;
break;
case PCI_SUB_HPC_ID4:
/* First PCI-X implementation, 100MHz */
ctrl->push_flag = 1;
ctrl->slot_switch_type = 1;
bus->max_bus_speed = PCI_SPEED_100MHz_PCIX;
ctrl->push_button = 1;
ctrl->pci_config_space = 1;
ctrl->defeature_PHP = 1;
ctrl->pcix_support = 1;
ctrl->pcix_speed_capability = 0;
break;
default:
err(msg_HPC_not_supported);
rc = -ENODEV;
goto err_free_ctrl;
}
break;
case PCI_VENDOR_ID_INTEL:
/* Check for speed capability (0=33, 1=66) */
if (subsystem_deviceid & 0x0001)
bus->max_bus_speed = PCI_SPEED_66MHz;
else
bus->max_bus_speed = PCI_SPEED_33MHz;
/* Check for push button */
if (subsystem_deviceid & 0x0002)
ctrl->push_button = 0;
else
ctrl->push_button = 1;
/* Check for slot switch type (0=mechanical, 1=not mechanical) */
if (subsystem_deviceid & 0x0004)
ctrl->slot_switch_type = 0;
else
ctrl->slot_switch_type = 1;
/* PHP Status (0=De-feature PHP, 1=Normal operation) */
if (subsystem_deviceid & 0x0008)
ctrl->defeature_PHP = 1; /* PHP supported */
else
ctrl->defeature_PHP = 0; /* PHP not supported */
/* Alternate Base Address Register Interface
* (0=not supported, 1=supported)
*/
if (subsystem_deviceid & 0x0010)
ctrl->alternate_base_address = 1;
else
ctrl->alternate_base_address = 0;
/* PCI Config Space Index (0=not supported, 1=supported) */
if (subsystem_deviceid & 0x0020)
ctrl->pci_config_space = 1;
else
ctrl->pci_config_space = 0;
/* PCI-X support */
if (subsystem_deviceid & 0x0080) {
ctrl->pcix_support = 1;
if (subsystem_deviceid & 0x0040)
/* 133MHz PCI-X if bit 7 is 1 */
ctrl->pcix_speed_capability = 1;
else
/* 100MHz PCI-X if bit 7 is 1 and bit 0 is 0, */
/* 66MHz PCI-X if bit 7 is 1 and bit 0 is 1 */
ctrl->pcix_speed_capability = 0;
} else {
/* Conventional PCI */
ctrl->pcix_support = 0;
ctrl->pcix_speed_capability = 0;
}
break;
default:
err(msg_HPC_not_supported);
rc = -ENODEV;
goto err_free_ctrl;
}
/* Tell the user that we found one. */
info("Initializing the PCI hot plug controller residing on PCI bus %d\n",
pdev->bus->number);
dbg("Hotplug controller capabilities:\n");
dbg(" speed_capability %d\n", bus->max_bus_speed);
dbg(" slot_switch_type %s\n", ctrl->slot_switch_type ?
"switch present" : "no switch");
dbg(" defeature_PHP %s\n", ctrl->defeature_PHP ?
"PHP supported" : "PHP not supported");
dbg(" alternate_base_address %s\n", ctrl->alternate_base_address ?
"supported" : "not supported");
dbg(" pci_config_space %s\n", ctrl->pci_config_space ?
"supported" : "not supported");
dbg(" pcix_speed_capability %s\n", ctrl->pcix_speed_capability ?
"supported" : "not supported");
dbg(" pcix_support %s\n", ctrl->pcix_support ?
"supported" : "not supported");
ctrl->pci_dev = pdev;
pci_set_drvdata(pdev, ctrl);
/* make our own copy of the pci bus structure,
* as we like tweaking it a lot */
ctrl->pci_bus = kmemdup(pdev->bus, sizeof(*ctrl->pci_bus), GFP_KERNEL);
if (!ctrl->pci_bus) {
err("out of memory\n");
rc = -ENOMEM;
goto err_free_ctrl;
}
ctrl->bus = pdev->bus->number;
ctrl->rev = pdev->revision;
dbg("bus device function rev: %d %d %d %d\n", ctrl->bus,
PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), ctrl->rev);
mutex_init(&ctrl->crit_sect);
init_waitqueue_head(&ctrl->queue);
/* initialize our threads if they haven't already been started up */
rc = one_time_init();
if (rc) {
goto err_free_bus;
}
dbg("pdev = %p\n", pdev);
dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
dbg("pci resource len %llx\n", (unsigned long long)pci_resource_len(pdev, 0));
if (!request_mem_region(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0), MY_NAME)) {
err("cannot reserve MMIO region\n");
rc = -ENOMEM;
goto err_free_bus;
}
ctrl->hpc_reg = ioremap(pci_resource_start(pdev, 0),
pci_resource_len(pdev, 0));
if (!ctrl->hpc_reg) {
err("cannot remap MMIO region %llx @ %llx\n",
(unsigned long long)pci_resource_len(pdev, 0),
(unsigned long long)pci_resource_start(pdev, 0));
rc = -ENODEV;
goto err_free_mem_region;
}
/* Check for 66Mhz operation */
bus->cur_bus_speed = get_controller_speed(ctrl);
/********************************************************
*
* Save configuration headers for this and
* subordinate PCI buses
*
********************************************************/
/* find the physical slot number of the first hot plug slot */
/* Get slot won't work for devices behind bridges, but
* in this case it will always be called for the "base"
* bus/dev/func of a slot.
* CS: this is leveraging the PCIIRQ routing code from the kernel
* (pci-pc.c: get_irq_routing_table) */
rc = get_slot_mapping(ctrl->pci_bus, pdev->bus->number,
(readb(ctrl->hpc_reg + SLOT_MASK) >> 4),
&(ctrl->first_slot));
dbg("get_slot_mapping: first_slot = %d, returned = %d\n",
ctrl->first_slot, rc);
if (rc) {
err(msg_initialization_err, rc);
goto err_iounmap;
}
/* Store PCI Config Space for all devices on this bus */
rc = cpqhp_save_config(ctrl, ctrl->bus, readb(ctrl->hpc_reg + SLOT_MASK));
if (rc) {
err("%s: unable to save PCI configuration data, error %d\n",
__func__, rc);
goto err_iounmap;
}
/*
* Get IO, memory, and IRQ resources for new devices
*/
/* The next line is required for cpqhp_find_available_resources */
ctrl->interrupt = pdev->irq;
if (ctrl->interrupt < 0x10) {
cpqhp_legacy_mode = 1;
dbg("System seems to be configured for Full Table Mapped MPS mode\n");
}
ctrl->cfgspc_irq = 0;
pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, &ctrl->cfgspc_irq);
rc = cpqhp_find_available_resources(ctrl, cpqhp_rom_start);
ctrl->add_support = !rc;
if (rc) {
dbg("cpqhp_find_available_resources = 0x%x\n", rc);
err("unable to locate PCI configuration resources for hot plug add.\n");
goto err_iounmap;
}
/*
* Finish setting up the hot plug ctrl device
*/
ctrl->slot_device_offset = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
dbg("NumSlots %d \n", ctrl->slot_device_offset);
ctrl->next_event = 0;
/* Setup the slot information structures */
rc = ctrl_slot_setup(ctrl, smbios_start, smbios_table);
if (rc) {
err(msg_initialization_err, 6);
err("%s: unable to save PCI configuration data, error %d\n",
__func__, rc);
goto err_iounmap;
}
/* Mask all general input interrupts */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_MASK);
/* set up the interrupt */
dbg("HPC interrupt = %d \n", ctrl->interrupt);
if (request_irq(ctrl->interrupt, cpqhp_ctrl_intr,
IRQF_SHARED, MY_NAME, ctrl)) {
err("Can't get irq %d for the hotplug pci controller\n",
ctrl->interrupt);
rc = -ENODEV;
goto err_iounmap;
}
/* Enable Shift Out interrupt and clear it, also enable SERR on power
* fault
*/
temp_word = readw(ctrl->hpc_reg + MISC);
temp_word |= 0x4006;
writew(temp_word, ctrl->hpc_reg + MISC);
/* Changed 05/05/97 to clear all interrupts at start */
writel(0xFFFFFFFFL, ctrl->hpc_reg + INT_INPUT_CLEAR);
ctrl->ctrl_int_comp = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
writel(0x0L, ctrl->hpc_reg + INT_MASK);
if (!cpqhp_ctrl_list) {
cpqhp_ctrl_list = ctrl;
ctrl->next = NULL;
} else {
ctrl->next = cpqhp_ctrl_list;
cpqhp_ctrl_list = ctrl;
}
/* turn off empty slots here unless command line option "ON" set
* Wait for exclusive access to hardware
*/
mutex_lock(&ctrl->crit_sect);
num_of_slots = readb(ctrl->hpc_reg + SLOT_MASK) & 0x0F;
/* find first device number for the ctrl */
device = readb(ctrl->hpc_reg + SLOT_MASK) >> 4;
while (num_of_slots) {
dbg("num_of_slots: %d\n", num_of_slots);
func = cpqhp_slot_find(ctrl->bus, device, 0);
if (!func)
break;
hp_slot = func->device - ctrl->slot_device_offset;
dbg("hp_slot: %d\n", hp_slot);
/* We have to save the presence info for these slots */
temp_word = ctrl->ctrl_int_comp >> 16;
func->presence_save = (temp_word >> hp_slot) & 0x01;
func->presence_save |= (temp_word >> (hp_slot + 7)) & 0x02;
if (ctrl->ctrl_int_comp & (0x1L << hp_slot))
func->switch_save = 0;
else
func->switch_save = 0x10;
if (!power_mode)
if (!func->is_a_board) {
green_LED_off(ctrl, hp_slot);
slot_disable(ctrl, hp_slot);
}
device++;
num_of_slots--;
}
if (!power_mode) {
set_SOGO(ctrl);
/* Wait for SOBS to be unset */
wait_for_ctrl_irq(ctrl);
}
rc = init_SERR(ctrl);
if (rc) {
err("init_SERR failed\n");
mutex_unlock(&ctrl->crit_sect);
goto err_free_irq;
}
/* Done with exclusive hardware access */
mutex_unlock(&ctrl->crit_sect);
cpqhp_create_debugfs_files(ctrl);
return 0;
err_free_irq:
free_irq(ctrl->interrupt, ctrl);
err_iounmap:
iounmap(ctrl->hpc_reg);
err_free_mem_region:
release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
err_free_bus:
kfree(ctrl->pci_bus);
err_free_ctrl:
kfree(ctrl);
err_disable_device:
pci_disable_device(pdev);
return rc;
}
static void __exit unload_cpqphpd(void)
{
struct pci_func *next;
struct pci_func *TempSlot;
int loop;
u32 rc;
struct controller *ctrl;
struct controller *tctrl;
struct pci_resource *res;
struct pci_resource *tres;
rc = compaq_nvram_store(cpqhp_rom_start);
ctrl = cpqhp_ctrl_list;
while (ctrl) {
if (ctrl->hpc_reg) {
u16 misc;
rc = read_slot_enable (ctrl);
writeb(0, ctrl->hpc_reg + SLOT_SERR);
writel(0xFFFFFFC0L | ~rc, ctrl->hpc_reg + INT_MASK);
misc = readw(ctrl->hpc_reg + MISC);
misc &= 0xFFFD;
writew(misc, ctrl->hpc_reg + MISC);
}
ctrl_slot_cleanup(ctrl);
res = ctrl->io_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = ctrl->mem_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = ctrl->p_mem_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = ctrl->bus_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
kfree (ctrl->pci_bus);
tctrl = ctrl;
ctrl = ctrl->next;
kfree(tctrl);
}
for (loop = 0; loop < 256; loop++) {
next = cpqhp_slot_list[loop];
while (next != NULL) {
res = next->io_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = next->mem_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = next->p_mem_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
res = next->bus_head;
while (res) {
tres = res;
res = res->next;
kfree(tres);
}
TempSlot = next;
next = next->next;
kfree(TempSlot);
}
}
/* Stop the notification mechanism */
if (initialized)
cpqhp_event_stop_thread();
/* unmap the rom address */
if (cpqhp_rom_start)
iounmap(cpqhp_rom_start);
if (smbios_start)
iounmap(smbios_start);
}
static struct pci_device_id hpcd_pci_tbl[] = {
{
/* handle any PCI Hotplug controller */
.class = ((PCI_CLASS_SYSTEM_PCI_HOTPLUG << 8) | 0x00),
.class_mask = ~0,
/* no matter who makes it */
.vendor = PCI_ANY_ID,
.device = PCI_ANY_ID,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE(pci, hpcd_pci_tbl);
static struct pci_driver cpqhpc_driver = {
.name = "compaq_pci_hotplug",
.id_table = hpcd_pci_tbl,
.probe = cpqhpc_probe,
/* remove: cpqhpc_remove_one, */
};
static int __init cpqhpc_init(void)
{
int result;
cpqhp_debug = debug;
info (DRIVER_DESC " version: " DRIVER_VERSION "\n");
cpqhp_initialize_debugfs();
result = pci_register_driver(&cpqhpc_driver);
dbg("pci_register_driver = %d\n", result);
return result;
}
static void __exit cpqhpc_cleanup(void)
{
dbg("unload_cpqphpd()\n");
unload_cpqphpd();
dbg("pci_unregister_driver\n");
pci_unregister_driver(&cpqhpc_driver);
cpqhp_shutdown_debugfs();
}
module_init(cpqhpc_init);
module_exit(cpqhpc_cleanup);
| gpl-2.0 |
TomGiordano/kernel_zte_blade | drivers/telephony/ixj.c | 841 | 319895 | /****************************************************************************
* ixj.c
*
* Device Driver for Quicknet Technologies, Inc.'s Telephony cards
* including the Internet PhoneJACK, Internet PhoneJACK Lite,
* Internet PhoneJACK PCI, Internet LineJACK, Internet PhoneCARD and
* SmartCABLE
*
* (c) Copyright 1999-2001 Quicknet Technologies, Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Author: Ed Okerson, <eokerson@quicknet.net>
*
* Contributors: Greg Herlein, <gherlein@quicknet.net>
* David W. Erhart, <derhart@quicknet.net>
* John Sellers, <jsellers@quicknet.net>
* Mike Preston, <mpreston@quicknet.net>
*
* Fixes: David Huggins-Daines, <dhd@cepstral.com>
* Fabio Ferrari, <fabio.ferrari@digitro.com.br>
* Artis Kugevics, <artis@mt.lv>
* Daniele Bellucci, <bellucda@tiscali.it>
*
* More information about the hardware related to this driver can be found
* at our website: http://www.quicknet.net
*
* IN NO EVENT SHALL QUICKNET TECHNOLOGIES, INC. BE LIABLE TO ANY PARTY FOR
* DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
* OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF QUICKNET
* TECHNOLOGIES, INC. HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* QUICKNET TECHNOLOGIES, INC. SPECIFICALLY DISCLAIMS ANY WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
* ON AN "AS IS" BASIS, AND QUICKNET TECHNOLOGIES, INC. HAS NO OBLIGATION
* TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
*
***************************************************************************/
/*
* Revision 4.8 2003/07/09 19:39:00 Daniele Bellucci
* Audit some copy_*_user and minor cleanup.
*
* Revision 4.7 2001/08/13 06:19:33 craigs
* Added additional changes from Alan Cox and John Anderson for
* 2.2 to 2.4 cleanup and bounds checking
*
* Revision 4.6 2001/08/13 01:05:05 craigs
* Really fixed PHONE_QUERY_CODEC problem this time
*
* Revision 4.5 2001/08/13 00:11:03 craigs
* Fixed problem in handling of PHONE_QUERY_CODEC, thanks to Shane Anderson
*
* Revision 4.4 2001/08/07 07:58:12 craigs
* Changed back to three digit version numbers
* Added tagbuild target to allow automatic and easy tagging of versions
*
* Revision 4.3 2001/08/07 07:24:47 craigs
* Added ixj-ver.h to allow easy configuration management of driver
* Added display of version number in /prox/ixj
*
* Revision 4.2 2001/08/06 07:07:19 craigs
* Reverted IXJCTL_DSP_TYPE and IXJCTL_DSP_VERSION files to original
* behaviour of returning int rather than short *
*
* Revision 4.1 2001/08/05 00:17:37 craigs
* More changes for correct PCMCIA installation
* Start of changes for backward Linux compatibility
*
* Revision 4.0 2001/08/04 12:33:12 craigs
* New version using GNU autoconf
*
* Revision 3.105 2001/07/20 23:14:32 eokerson
* More work on CallerID generation when using ring cadences.
*
* Revision 3.104 2001/07/06 01:33:55 eokerson
* Some bugfixes from Robert Vojta <vojta@ipex.cz> and a few mods to the Makefile.
*
* Revision 3.103 2001/07/05 19:20:16 eokerson
* Updated HOWTO
* Changed mic gain to 30dB on Internet LineJACK mic/speaker port.
*
* Revision 3.102 2001/07/03 23:51:21 eokerson
* Un-mute mic on Internet LineJACK when in speakerphone mode.
*
* Revision 3.101 2001/07/02 19:26:56 eokerson
* Removed initialiazation of ixjdebug and ixj_convert_loaded so they will go in the .bss instead of the .data
*
* Revision 3.100 2001/07/02 19:18:27 eokerson
* Changed driver to make dynamic allocation possible. We now pass IXJ * between functions instead of array indexes.
* Fixed the way the POTS and PSTN ports interact during a PSTN call to allow local answering.
* Fixed speaker mode on Internet LineJACK.
*
* Revision 3.99 2001/05/09 14:11:16 eokerson
* Fixed kmalloc error in ixj_build_filter_cadence. Thanks David Chan <cat@waulogy.stanford.edu>.
*
* Revision 3.98 2001/05/08 19:55:33 eokerson
* Fixed POTS hookstate detection while it is connected to PSTN port.
*
* Revision 3.97 2001/05/08 00:01:04 eokerson
* Fixed kernel oops when sending caller ID data.
*
* Revision 3.96 2001/05/04 23:09:30 eokerson
* Now uses one kernel timer for each card, instead of one for the entire driver.
*
* Revision 3.95 2001/04/25 22:06:47 eokerson
* Fixed squawking at beginning of some G.723.1 calls.
*
* Revision 3.94 2001/04/03 23:42:00 eokerson
* Added linear volume ioctls
* Added raw filter load ioctl
*
* Revision 3.93 2001/02/27 01:00:06 eokerson
* Fixed blocking in CallerID.
* Reduced size of ixj structure for smaller driver footprint.
*
* Revision 3.92 2001/02/20 22:02:59 eokerson
* Fixed isapnp and pcmcia module compatibility for 2.4.x kernels.
* Improved PSTN ring detection.
* Fixed wink generation on POTS ports.
*
* Revision 3.91 2001/02/13 00:55:44 eokerson
* Turn AEC back on after changing frame sizes.
*
* Revision 3.90 2001/02/12 16:42:00 eokerson
* Added ALAW codec, thanks to Fabio Ferrari for the table based converters to make ALAW from ULAW.
*
* Revision 3.89 2001/02/12 15:41:16 eokerson
* Fix from Artis Kugevics - Tone gains were not being set correctly.
*
* Revision 3.88 2001/02/05 23:25:42 eokerson
* Fixed lockup bugs with deregister.
*
* Revision 3.87 2001/01/29 21:00:39 eokerson
* Fix from Fabio Ferrari <fabio.ferrari@digitro.com.br> to properly handle EAGAIN and EINTR during non-blocking write.
* Updated copyright date.
*
* Revision 3.86 2001/01/23 23:53:46 eokerson
* Fixes to G.729 compatibility.
*
* Revision 3.85 2001/01/23 21:30:36 eokerson
* Added verbage about cards supported.
* Removed commands that put the card in low power mode at some times that it should not be in low power mode.
*
* Revision 3.84 2001/01/22 23:32:10 eokerson
* Some bugfixes from David Huggins-Daines, <dhd@cepstral.com> and other cleanups.
*
* Revision 3.83 2001/01/19 14:51:41 eokerson
* Fixed ixj_WriteDSPCommand to decrement usage counter when command fails.
*
* Revision 3.82 2001/01/19 00:34:49 eokerson
* Added verbosity to write overlap errors.
*
* Revision 3.81 2001/01/18 23:56:54 eokerson
* Fixed PSTN line test functions.
*
* Revision 3.80 2001/01/18 22:29:27 eokerson
* Updated AEC/AGC values for different cards.
*
* Revision 3.79 2001/01/17 02:58:54 eokerson
* Fixed AEC reset after Caller ID.
* Fixed Codec lockup after Caller ID on Call Waiting when not using 30ms frames.
*
* Revision 3.78 2001/01/16 19:43:09 eokerson
* Added support for Linux 2.4.x kernels.
*
* Revision 3.77 2001/01/09 04:00:52 eokerson
* Linetest will now test the line, even if it has previously succeded.
*
* Revision 3.76 2001/01/08 19:27:00 eokerson
* Fixed problem with standard cable on Internet PhoneCARD.
*
* Revision 3.75 2000/12/22 16:52:14 eokerson
* Modified to allow hookstate detection on the POTS port when the PSTN port is selected.
*
* Revision 3.74 2000/12/08 22:41:50 eokerson
* Added capability for G729B.
*
* Revision 3.73 2000/12/07 23:35:16 eokerson
* Added capability to have different ring pattern before CallerID data.
* Added hookstate checks in CallerID routines to stop FSK.
*
* Revision 3.72 2000/12/06 19:31:31 eokerson
* Modified signal behavior to only send one signal per event.
*
* Revision 3.71 2000/12/06 03:23:08 eokerson
* Fixed CallerID on Call Waiting.
*
* Revision 3.70 2000/12/04 21:29:37 eokerson
* Added checking to Smart Cable gain functions.
*
* Revision 3.69 2000/12/04 21:05:20 eokerson
* Changed ixjdebug levels.
* Added ioctls to change gains in Internet Phone CARD Smart Cable.
*
* Revision 3.68 2000/12/04 00:17:21 craigs
* Changed mixer voice gain to +6dB rather than 0dB
*
* Revision 3.67 2000/11/30 21:25:51 eokerson
* Fixed write signal errors.
*
* Revision 3.66 2000/11/29 22:42:44 eokerson
* Fixed PSTN ring detect problems.
*
* Revision 3.65 2000/11/29 07:31:55 craigs
* Added new 425Hz filter co-efficients
* Added card-specific DTMF prescaler initialisation
*
* Revision 3.64 2000/11/28 14:03:32 craigs
* Changed certain mixer initialisations to be 0dB rather than 12dB
* Added additional information to /proc/ixj
*
* Revision 3.63 2000/11/28 11:38:41 craigs
* Added display of AEC modes in AUTO and AGC mode
*
* Revision 3.62 2000/11/28 04:05:44 eokerson
* Improved PSTN ring detection routine.
*
* Revision 3.61 2000/11/27 21:53:12 eokerson
* Fixed flash detection.
*
* Revision 3.60 2000/11/27 15:57:29 eokerson
* More work on G.729 load routines.
*
* Revision 3.59 2000/11/25 21:55:12 eokerson
* Fixed errors in G.729 load routine.
*
* Revision 3.58 2000/11/25 04:08:29 eokerson
* Added board locks around G.729 and TS85 load routines.
*
* Revision 3.57 2000/11/24 05:35:17 craigs
* Added ability to retrieve mixer values on LineJACK
* Added complete initialisation of all mixer values at startup
* Fixed spelling mistake
*
* Revision 3.56 2000/11/23 02:52:11 robertj
* Added cvs change log keyword.
* Fixed bug in capabilities list when using G.729 module.
*
*/
#include "ixj-ver.h"
#define PERFMON_STATS
#define IXJDEBUG 0
#define MAXRINGS 5
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/kernel.h> /* printk() */
#include <linux/fs.h> /* everything... */
#include <linux/errno.h> /* error codes */
#include <linux/slab.h>
#include <linux/smp_lock.h>
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/poll.h>
#include <linux/timer.h>
#include <linux/delay.h>
#include <linux/pci.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <linux/isapnp.h>
#include "ixj.h"
#define TYPE(inode) (iminor(inode) >> 4)
#define NUM(inode) (iminor(inode) & 0xf)
static int ixjdebug;
static int hertz = HZ;
static int samplerate = 100;
module_param(ixjdebug, int, 0);
static struct pci_device_id ixj_pci_tbl[] __devinitdata = {
{ PCI_VENDOR_ID_QUICKNET, PCI_DEVICE_ID_QUICKNET_XJ,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, ixj_pci_tbl);
/************************************************************************
*
* ixjdebug meanings are now bit mapped instead of level based
* Values can be or'ed together to turn on multiple messages
*
* bit 0 (0x0001) = any failure
* bit 1 (0x0002) = general messages
* bit 2 (0x0004) = POTS ringing related
* bit 3 (0x0008) = PSTN events
* bit 4 (0x0010) = PSTN Cadence state details
* bit 5 (0x0020) = Tone detection triggers
* bit 6 (0x0040) = Tone detection cadence details
* bit 7 (0x0080) = ioctl tracking
* bit 8 (0x0100) = signal tracking
* bit 9 (0x0200) = CallerID generation details
*
************************************************************************/
#ifdef IXJ_DYN_ALLOC
static IXJ *ixj[IXJMAX];
#define get_ixj(b) ixj[(b)]
/*
* Allocate a free IXJ device
*/
static IXJ *ixj_alloc()
{
for(cnt=0; cnt<IXJMAX; cnt++)
{
if(ixj[cnt] == NULL || !ixj[cnt]->DSPbase)
{
j = kmalloc(sizeof(IXJ), GFP_KERNEL);
if (j == NULL)
return NULL;
ixj[cnt] = j;
return j;
}
}
return NULL;
}
static void ixj_fsk_free(IXJ *j)
{
kfree(j->fskdata);
j->fskdata = NULL;
}
static void ixj_fsk_alloc(IXJ *j)
{
if(!j->fskdata) {
j->fskdata = kmalloc(8000, GFP_KERNEL);
if (!j->fskdata) {
if(ixjdebug & 0x0200) {
printk("IXJ phone%d - allocate failed\n", j->board);
}
return;
} else {
j->fsksize = 8000;
if(ixjdebug & 0x0200) {
printk("IXJ phone%d - allocate succeded\n", j->board);
}
}
}
}
#else
static IXJ ixj[IXJMAX];
#define get_ixj(b) (&ixj[(b)])
/*
* Allocate a free IXJ device
*/
static IXJ *ixj_alloc(void)
{
int cnt;
for(cnt=0; cnt<IXJMAX; cnt++) {
if(!ixj[cnt].DSPbase)
return &ixj[cnt];
}
return NULL;
}
static inline void ixj_fsk_free(IXJ *j) {;}
static inline void ixj_fsk_alloc(IXJ *j)
{
j->fsksize = 8000;
}
#endif
#ifdef PERFMON_STATS
#define ixj_perfmon(x) ((x)++)
#else
#define ixj_perfmon(x) do { } while(0)
#endif
static int ixj_convert_loaded;
static int ixj_WriteDSPCommand(unsigned short, IXJ *j);
/************************************************************************
*
* These are function definitions to allow external modules to register
* enhanced functionality call backs.
*
************************************************************************/
static int Stub(IXJ * J, unsigned long arg)
{
return 0;
}
static IXJ_REGFUNC ixj_PreRead = &Stub;
static IXJ_REGFUNC ixj_PostRead = &Stub;
static IXJ_REGFUNC ixj_PreWrite = &Stub;
static IXJ_REGFUNC ixj_PostWrite = &Stub;
static void ixj_read_frame(IXJ *j);
static void ixj_write_frame(IXJ *j);
static void ixj_init_timer(IXJ *j);
static void ixj_add_timer(IXJ * j);
static void ixj_timeout(unsigned long ptr);
static int read_filters(IXJ *j);
static int LineMonitor(IXJ *j);
static int ixj_fasync(int fd, struct file *, int mode);
static int ixj_set_port(IXJ *j, int arg);
static int ixj_set_pots(IXJ *j, int arg);
static int ixj_hookstate(IXJ *j);
static int ixj_record_start(IXJ *j);
static void ixj_record_stop(IXJ *j);
static void set_rec_volume(IXJ *j, int volume);
static int get_rec_volume(IXJ *j);
static int set_rec_codec(IXJ *j, int rate);
static void ixj_vad(IXJ *j, int arg);
static int ixj_play_start(IXJ *j);
static void ixj_play_stop(IXJ *j);
static int ixj_set_tone_on(unsigned short arg, IXJ *j);
static int ixj_set_tone_off(unsigned short, IXJ *j);
static int ixj_play_tone(IXJ *j, char tone);
static void ixj_aec_start(IXJ *j, int level);
static int idle(IXJ *j);
static void ixj_ring_on(IXJ *j);
static void ixj_ring_off(IXJ *j);
static void aec_stop(IXJ *j);
static void ixj_ringback(IXJ *j);
static void ixj_busytone(IXJ *j);
static void ixj_dialtone(IXJ *j);
static void ixj_cpt_stop(IXJ *j);
static char daa_int_read(IXJ *j);
static char daa_CR_read(IXJ *j, int cr);
static int daa_set_mode(IXJ *j, int mode);
static int ixj_linetest(IXJ *j);
static int ixj_daa_write(IXJ *j);
static int ixj_daa_cid_read(IXJ *j);
static void DAA_Coeff_US(IXJ *j);
static void DAA_Coeff_UK(IXJ *j);
static void DAA_Coeff_France(IXJ *j);
static void DAA_Coeff_Germany(IXJ *j);
static void DAA_Coeff_Australia(IXJ *j);
static void DAA_Coeff_Japan(IXJ *j);
static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf);
static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr);
static int ixj_init_tone(IXJ *j, IXJ_TONE * ti);
static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp);
static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp);
/* Serial Control Interface funtions */
static int SCI_Control(IXJ *j, int control);
static int SCI_Prepare(IXJ *j);
static int SCI_WaitHighSCI(IXJ *j);
static int SCI_WaitLowSCI(IXJ *j);
static DWORD PCIEE_GetSerialNumber(WORD wAddress);
static int ixj_PCcontrol_wait(IXJ *j);
static void ixj_pre_cid(IXJ *j);
static void ixj_write_cid(IXJ *j);
static void ixj_write_cid_bit(IXJ *j, int bit);
static int set_base_frame(IXJ *j, int size);
static int set_play_codec(IXJ *j, int rate);
static void set_rec_depth(IXJ *j, int depth);
static int ixj_mixer(long val, IXJ *j);
/************************************************************************
CT8020/CT8021 Host Programmers Model
Host address Function Access
DSPbase +
0-1 Aux Software Status Register (reserved) Read Only
2-3 Software Status Register Read Only
4-5 Aux Software Control Register (reserved) Read Write
6-7 Software Control Register Read Write
8-9 Hardware Status Register Read Only
A-B Hardware Control Register Read Write
C-D Host Transmit (Write) Data Buffer Access Port (buffer input)Write Only
E-F Host Recieve (Read) Data Buffer Access Port (buffer input) Read Only
************************************************************************/
static inline void ixj_read_HSR(IXJ *j)
{
j->hsr.bytes.low = inb_p(j->DSPbase + 8);
j->hsr.bytes.high = inb_p(j->DSPbase + 9);
}
static inline int IsControlReady(IXJ *j)
{
ixj_read_HSR(j);
return j->hsr.bits.controlrdy ? 1 : 0;
}
static inline int IsPCControlReady(IXJ *j)
{
j->pccr1.byte = inb_p(j->XILINXbase + 3);
return j->pccr1.bits.crr ? 1 : 0;
}
static inline int IsStatusReady(IXJ *j)
{
ixj_read_HSR(j);
return j->hsr.bits.statusrdy ? 1 : 0;
}
static inline int IsRxReady(IXJ *j)
{
ixj_read_HSR(j);
ixj_perfmon(j->rxreadycheck);
return j->hsr.bits.rxrdy ? 1 : 0;
}
static inline int IsTxReady(IXJ *j)
{
ixj_read_HSR(j);
ixj_perfmon(j->txreadycheck);
return j->hsr.bits.txrdy ? 1 : 0;
}
static inline void set_play_volume(IXJ *j, int volume)
{
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: /dev/phone%d Setting Play Volume to 0x%4.4x\n", j->board, volume);
ixj_WriteDSPCommand(0xCF02, j);
ixj_WriteDSPCommand(volume, j);
}
static int set_play_volume_linear(IXJ *j, int volume)
{
int newvolume, dspplaymax;
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Play Volume to 0x%4.4x\n", j->board, volume);
if(volume > 100 || volume < 0) {
return -1;
}
/* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
switch (j->cardtype) {
case QTI_PHONEJACK:
dspplaymax = 0x380;
break;
case QTI_LINEJACK:
if(j->port == PORT_PSTN) {
dspplaymax = 0x48;
} else {
dspplaymax = 0x100;
}
break;
case QTI_PHONEJACK_LITE:
dspplaymax = 0x380;
break;
case QTI_PHONEJACK_PCI:
dspplaymax = 0x6C;
break;
case QTI_PHONECARD:
dspplaymax = 0x50;
break;
default:
return -1;
}
newvolume = (dspplaymax * volume) / 100;
set_play_volume(j, newvolume);
return 0;
}
static inline void set_play_depth(IXJ *j, int depth)
{
if (depth > 60)
depth = 60;
if (depth < 0)
depth = 0;
ixj_WriteDSPCommand(0x5280 + depth, j);
}
static inline int get_play_volume(IXJ *j)
{
ixj_WriteDSPCommand(0xCF00, j);
return j->ssr.high << 8 | j->ssr.low;
}
static int get_play_volume_linear(IXJ *j)
{
int volume, newvolume, dspplaymax;
/* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
switch (j->cardtype) {
case QTI_PHONEJACK:
dspplaymax = 0x380;
break;
case QTI_LINEJACK:
if(j->port == PORT_PSTN) {
dspplaymax = 0x48;
} else {
dspplaymax = 0x100;
}
break;
case QTI_PHONEJACK_LITE:
dspplaymax = 0x380;
break;
case QTI_PHONEJACK_PCI:
dspplaymax = 0x6C;
break;
case QTI_PHONECARD:
dspplaymax = 100;
break;
default:
return -1;
}
volume = get_play_volume(j);
newvolume = (volume * 100) / dspplaymax;
if(newvolume > 100)
newvolume = 100;
return newvolume;
}
static inline BYTE SLIC_GetState(IXJ *j)
{
if (j->cardtype == QTI_PHONECARD) {
j->pccr1.byte = 0;
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 1;
outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
ixj_PCcontrol_wait(j);
if (j->pslic.bits.powerdown)
return PLD_SLIC_STATE_OC;
else if (!j->pslic.bits.ring0 && !j->pslic.bits.ring1)
return PLD_SLIC_STATE_ACTIVE;
else
return PLD_SLIC_STATE_RINGING;
} else {
j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
}
return j->pld_slicr.bits.state;
}
static bool SLIC_SetState(BYTE byState, IXJ *j)
{
bool fRetVal = false;
if (j->cardtype == QTI_PHONECARD) {
if (j->flags.pcmciasct) {
switch (byState) {
case PLD_SLIC_STATE_TIPOPEN:
case PLD_SLIC_STATE_OC:
j->pslic.bits.powerdown = 1;
j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
fRetVal = true;
break;
case PLD_SLIC_STATE_RINGING:
if (j->readers || j->writers) {
j->pslic.bits.powerdown = 0;
j->pslic.bits.ring0 = 1;
j->pslic.bits.ring1 = 0;
fRetVal = true;
}
break;
case PLD_SLIC_STATE_OHT: /* On-hook transmit */
case PLD_SLIC_STATE_STANDBY:
case PLD_SLIC_STATE_ACTIVE:
if (j->readers || j->writers) {
j->pslic.bits.powerdown = 0;
} else {
j->pslic.bits.powerdown = 1;
}
j->pslic.bits.ring0 = j->pslic.bits.ring1 = 0;
fRetVal = true;
break;
case PLD_SLIC_STATE_APR: /* Active polarity reversal */
case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
default:
fRetVal = false;
break;
}
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 0;
outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
}
} else {
/* Set the C1, C2, C3 & B2EN signals. */
switch (byState) {
case PLD_SLIC_STATE_OC:
j->pld_slicw.bits.c1 = 0;
j->pld_slicw.bits.c2 = 0;
j->pld_slicw.bits.c3 = 0;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_RINGING:
j->pld_slicw.bits.c1 = 1;
j->pld_slicw.bits.c2 = 0;
j->pld_slicw.bits.c3 = 0;
j->pld_slicw.bits.b2en = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_ACTIVE:
j->pld_slicw.bits.c1 = 0;
j->pld_slicw.bits.c2 = 1;
j->pld_slicw.bits.c3 = 0;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_OHT: /* On-hook transmit */
j->pld_slicw.bits.c1 = 1;
j->pld_slicw.bits.c2 = 1;
j->pld_slicw.bits.c3 = 0;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_TIPOPEN:
j->pld_slicw.bits.c1 = 0;
j->pld_slicw.bits.c2 = 0;
j->pld_slicw.bits.c3 = 1;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_STANDBY:
j->pld_slicw.bits.c1 = 1;
j->pld_slicw.bits.c2 = 0;
j->pld_slicw.bits.c3 = 1;
j->pld_slicw.bits.b2en = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_APR: /* Active polarity reversal */
j->pld_slicw.bits.c1 = 0;
j->pld_slicw.bits.c2 = 1;
j->pld_slicw.bits.c3 = 1;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
j->pld_slicw.bits.c1 = 1;
j->pld_slicw.bits.c2 = 1;
j->pld_slicw.bits.c3 = 1;
j->pld_slicw.bits.b2en = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
fRetVal = true;
break;
default:
fRetVal = false;
break;
}
}
return fRetVal;
}
static int ixj_wink(IXJ *j)
{
BYTE slicnow;
slicnow = SLIC_GetState(j);
j->pots_winkstart = jiffies;
SLIC_SetState(PLD_SLIC_STATE_OC, j);
msleep(jiffies_to_msecs(j->winktime));
SLIC_SetState(slicnow, j);
return 0;
}
static void ixj_init_timer(IXJ *j)
{
init_timer(&j->timer);
j->timer.function = ixj_timeout;
j->timer.data = (unsigned long)j;
}
static void ixj_add_timer(IXJ *j)
{
j->timer.expires = jiffies + (hertz / samplerate);
add_timer(&j->timer);
}
static void ixj_tone_timeout(IXJ *j)
{
IXJ_TONE ti;
j->tone_state++;
if (j->tone_state == 3) {
j->tone_state = 0;
if (j->cadence_t) {
j->tone_cadence_state++;
if (j->tone_cadence_state >= j->cadence_t->elements_used) {
switch (j->cadence_t->termination) {
case PLAY_ONCE:
ixj_cpt_stop(j);
break;
case REPEAT_LAST_ELEMENT:
j->tone_cadence_state--;
ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
break;
case REPEAT_ALL:
j->tone_cadence_state = 0;
if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
ixj_init_tone(j, &ti);
}
ixj_set_tone_on(j->cadence_t->ce[0].tone_on_time, j);
ixj_set_tone_off(j->cadence_t->ce[0].tone_off_time, j);
ixj_play_tone(j, j->cadence_t->ce[0].index);
break;
}
} else {
if (j->cadence_t->ce[j->tone_cadence_state].gain0) {
ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
ixj_init_tone(j, &ti);
}
ixj_set_tone_on(j->cadence_t->ce[j->tone_cadence_state].tone_on_time, j);
ixj_set_tone_off(j->cadence_t->ce[j->tone_cadence_state].tone_off_time, j);
ixj_play_tone(j, j->cadence_t->ce[j->tone_cadence_state].index);
}
}
}
}
static inline void ixj_kill_fasync(IXJ *j, IXJ_SIGEVENT event, int dir)
{
if(j->ixj_signals[event]) {
if(ixjdebug & 0x0100)
printk("Sending signal for event %d\n", event);
/* Send apps notice of change */
/* see config.h for macro definition */
kill_fasync(&(j->async_queue), j->ixj_signals[event], dir);
}
}
static void ixj_pstn_state(IXJ *j)
{
int var;
union XOPXR0 XR0, daaint;
var = 10;
XR0.reg = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg;
daaint.reg = 0;
XR0.bitreg.RMR = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
j->pld_scrr.byte = inb_p(j->XILINXbase);
if (j->pld_scrr.bits.daaflag) {
daa_int_read(j);
if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.RING) {
if(time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
daaint.bitreg.RING = 1;
if(ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ DAA Ring Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
}
} else {
daa_set_mode(j, SOP_PU_RESET);
}
}
if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Caller_ID) {
daaint.bitreg.Caller_ID = 1;
j->pstn_cid_intr = 1;
j->pstn_cid_received = jiffies;
if(ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ DAA Caller_ID Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
}
}
if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.Cadence) {
daaint.bitreg.Cadence = 1;
if(ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ DAA Cadence Interrupt /dev/phone%d at %ld\n", j->board, jiffies);
}
}
if(j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK != XR0.bitreg.VDD_OK) {
daaint.bitreg.VDD_OK = 1;
daaint.bitreg.SI_0 = j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK;
}
}
daa_CR_read(j, 1);
if(j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR != XR0.bitreg.RMR && time_after(jiffies, j->pstn_sleeptil) && !(j->flags.pots_pstn && j->hookstate)) {
daaint.bitreg.RMR = 1;
daaint.bitreg.SI_1 = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR;
if(ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ DAA RMR /dev/phone%d was %s for %ld\n", j->board, XR0.bitreg.RMR?"on":"off", jiffies - j->pstn_last_rmr);
}
j->pstn_prev_rmr = j->pstn_last_rmr;
j->pstn_last_rmr = jiffies;
}
switch(j->daa_mode) {
case SOP_PU_SLEEP:
if (daaint.bitreg.RING) {
if (!j->flags.pstn_ringing) {
if (j->daa_mode != SOP_PU_RINGING) {
j->pstn_ring_int = jiffies;
daa_set_mode(j, SOP_PU_RINGING);
}
}
}
break;
case SOP_PU_RINGING:
if (daaint.bitreg.RMR) {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence a state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
}
if (daaint.bitreg.SI_1) { /* Rising edge of RMR */
j->flags.pstn_rmr = 1;
j->pstn_ring_start = jiffies;
j->pstn_ring_stop = 0;
j->ex.bits.pstn_ring = 0;
if (j->cadence_f[4].state == 0) {
j->cadence_f[4].state = 1;
j->cadence_f[4].on1min = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 - var)) / 10000);
j->cadence_f[4].on1dot = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100)) / 10000);
j->cadence_f[4].on1max = jiffies + (long)((j->cadence_f[4].on1 * hertz * (100 + var)) / 10000);
} else if (j->cadence_f[4].state == 2) {
if((time_after(jiffies, j->cadence_f[4].off1min) &&
time_before(jiffies, j->cadence_f[4].off1max))) {
if (j->cadence_f[4].on2) {
j->cadence_f[4].state = 3;
j->cadence_f[4].on2min = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 - var)) / 10000));
j->cadence_f[4].on2dot = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100)) / 10000));
j->cadence_f[4].on2max = jiffies + (long)((j->cadence_f[4].on2 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[4].state = 7;
}
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].off1);
}
j->cadence_f[4].state = 0;
}
} else if (j->cadence_f[4].state == 4) {
if((time_after(jiffies, j->cadence_f[4].off2min) &&
time_before(jiffies, j->cadence_f[4].off2max))) {
if (j->cadence_f[4].on3) {
j->cadence_f[4].state = 5;
j->cadence_f[4].on3min = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 - var)) / 10000));
j->cadence_f[4].on3dot = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100)) / 10000));
j->cadence_f[4].on3max = jiffies + (long)((j->cadence_f[4].on3 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[4].state = 7;
}
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].off2);
}
j->cadence_f[4].state = 0;
}
} else if (j->cadence_f[4].state == 6) {
if((time_after(jiffies, j->cadence_f[4].off3min) &&
time_before(jiffies, j->cadence_f[4].off3max))) {
j->cadence_f[4].state = 7;
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].off3);
}
j->cadence_f[4].state = 0;
}
} else {
j->cadence_f[4].state = 0;
}
} else { /* Falling edge of RMR */
j->pstn_ring_start = 0;
j->pstn_ring_stop = jiffies;
if (j->cadence_f[4].state == 1) {
if(!j->cadence_f[4].on1) {
j->cadence_f[4].state = 7;
} else if((time_after(jiffies, j->cadence_f[4].on1min) &&
time_before(jiffies, j->cadence_f[4].on1max))) {
if (j->cadence_f[4].off1) {
j->cadence_f[4].state = 2;
j->cadence_f[4].off1min = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 - var)) / 10000));
j->cadence_f[4].off1dot = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100)) / 10000));
j->cadence_f[4].off1max = jiffies + (long)((j->cadence_f[4].off1 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[4].state = 7;
}
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].on1);
}
j->cadence_f[4].state = 0;
}
} else if (j->cadence_f[4].state == 3) {
if((time_after(jiffies, j->cadence_f[4].on2min) &&
time_before(jiffies, j->cadence_f[4].on2max))) {
if (j->cadence_f[4].off2) {
j->cadence_f[4].state = 4;
j->cadence_f[4].off2min = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 - var)) / 10000));
j->cadence_f[4].off2dot = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100)) / 10000));
j->cadence_f[4].off2max = jiffies + (long)((j->cadence_f[4].off2 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[4].state = 7;
}
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].on2);
}
j->cadence_f[4].state = 0;
}
} else if (j->cadence_f[4].state == 5) {
if((time_after(jiffies, j->cadence_f[4].on3min) &&
time_before(jiffies, j->cadence_f[4].on3max))) {
if (j->cadence_f[4].off3) {
j->cadence_f[4].state = 6;
j->cadence_f[4].off3min = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 - var)) / 10000));
j->cadence_f[4].off3dot = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100)) / 10000));
j->cadence_f[4].off3max = jiffies + (long)((j->cadence_f[4].off3 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[4].state = 7;
}
} else {
j->cadence_f[4].state = 0;
}
} else {
if (ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring Cadence fail state = %d /dev/phone%d at %ld should be %d\n",
j->cadence_f[4].state, j->board, jiffies - j->pstn_prev_rmr,
j->cadence_f[4].on3);
}
j->cadence_f[4].state = 0;
}
}
if (ixjdebug & 0x0010) {
printk(KERN_INFO "IXJ Ring Cadence b state = %d /dev/phone%d at %ld\n", j->cadence_f[4].state, j->board, jiffies);
}
if (ixjdebug & 0x0010) {
switch(j->cadence_f[4].state) {
case 1:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].on1, j->cadence_f[4].on1min, j->cadence_f[4].on1dot, j->cadence_f[4].on1max);
break;
case 2:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].off1, j->cadence_f[4].off1min, j->cadence_f[4].off1dot, j->cadence_f[4].off1max);
break;
case 3:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].on2, j->cadence_f[4].on2min, j->cadence_f[4].on2dot, j->cadence_f[4].on2max);
break;
case 4:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].off2, j->cadence_f[4].off2min, j->cadence_f[4].off2dot, j->cadence_f[4].off2max);
break;
case 5:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].on3, j->cadence_f[4].on3min, j->cadence_f[4].on3dot, j->cadence_f[4].on3max);
break;
case 6:
printk(KERN_INFO "IXJ /dev/phone%d Next Ring Cadence state at %u min %ld - %ld - max %ld\n", j->board,
j->cadence_f[4].off3, j->cadence_f[4].off3min, j->cadence_f[4].off3dot, j->cadence_f[4].off3max);
break;
}
}
}
if (j->cadence_f[4].state == 7) {
j->cadence_f[4].state = 0;
j->pstn_ring_stop = jiffies;
j->ex.bits.pstn_ring = 1;
ixj_kill_fasync(j, SIG_PSTN_RING, POLL_IN);
if(ixjdebug & 0x0008) {
printk(KERN_INFO "IXJ Ring int set /dev/phone%d at %ld\n", j->board, jiffies);
}
}
if((j->pstn_ring_int != 0 && time_after(jiffies, j->pstn_ring_int + (hertz * 5)) && !j->flags.pstn_rmr) ||
(j->pstn_ring_stop != 0 && time_after(jiffies, j->pstn_ring_stop + (hertz * 5)))) {
if(ixjdebug & 0x0008) {
printk("IXJ DAA no ring in 5 seconds /dev/phone%d at %ld\n", j->board, jiffies);
printk("IXJ DAA pstn ring int /dev/phone%d at %ld\n", j->board, j->pstn_ring_int);
printk("IXJ DAA pstn ring stop /dev/phone%d at %ld\n", j->board, j->pstn_ring_stop);
}
j->pstn_ring_stop = j->pstn_ring_int = 0;
daa_set_mode(j, SOP_PU_SLEEP);
}
outb_p(j->pld_scrw.byte, j->XILINXbase);
if (j->pstn_cid_intr && time_after(jiffies, j->pstn_cid_received + hertz)) {
ixj_daa_cid_read(j);
j->ex.bits.caller_id = 1;
ixj_kill_fasync(j, SIG_CALLER_ID, POLL_IN);
j->pstn_cid_intr = 0;
}
if (daaint.bitreg.Cadence) {
if(ixjdebug & 0x0008) {
printk("IXJ DAA Cadence interrupt going to sleep /dev/phone%d\n", j->board);
}
daa_set_mode(j, SOP_PU_SLEEP);
j->ex.bits.pstn_ring = 0;
}
break;
case SOP_PU_CONVERSATION:
if (daaint.bitreg.VDD_OK) {
if(!daaint.bitreg.SI_0) {
if (!j->pstn_winkstart) {
if(ixjdebug & 0x0008) {
printk("IXJ DAA possible wink /dev/phone%d %ld\n", j->board, jiffies);
}
j->pstn_winkstart = jiffies;
}
} else {
if (j->pstn_winkstart) {
if(ixjdebug & 0x0008) {
printk("IXJ DAA possible wink end /dev/phone%d %ld\n", j->board, jiffies);
}
j->pstn_winkstart = 0;
}
}
}
if (j->pstn_winkstart && time_after(jiffies, j->pstn_winkstart + ((hertz * j->winktime) / 1000))) {
if(ixjdebug & 0x0008) {
printk("IXJ DAA wink detected going to sleep /dev/phone%d %ld\n", j->board, jiffies);
}
daa_set_mode(j, SOP_PU_SLEEP);
j->pstn_winkstart = 0;
j->ex.bits.pstn_wink = 1;
ixj_kill_fasync(j, SIG_PSTN_WINK, POLL_IN);
}
break;
}
}
static void ixj_timeout(unsigned long ptr)
{
int board;
unsigned long jifon;
IXJ *j = (IXJ *)ptr;
board = j->board;
if (j->DSPbase && atomic_read(&j->DSPWrite) == 0 && test_and_set_bit(board, (void *)&j->busyflags) == 0) {
ixj_perfmon(j->timerchecks);
j->hookstate = ixj_hookstate(j);
if (j->tone_state) {
if (!(j->hookstate)) {
ixj_cpt_stop(j);
if (j->m_hook) {
j->m_hook = 0;
j->ex.bits.hookstate = 1;
ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
}
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
if (j->tone_state == 1)
jifon = ((hertz * j->tone_on_time) * 25 / 100000);
else
jifon = ((hertz * j->tone_on_time) * 25 / 100000) + ((hertz * j->tone_off_time) * 25 / 100000);
if (time_before(jiffies, j->tone_start_jif + jifon)) {
if (j->tone_state == 1) {
ixj_play_tone(j, j->tone_index);
if (j->dsp.low == 0x20) {
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
} else {
ixj_play_tone(j, 0);
if (j->dsp.low == 0x20) {
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
}
} else {
ixj_tone_timeout(j);
if (j->flags.dialtone) {
ixj_dialtone(j);
}
if (j->flags.busytone) {
ixj_busytone(j);
if (j->dsp.low == 0x20) {
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
}
if (j->flags.ringback) {
ixj_ringback(j);
if (j->dsp.low == 0x20) {
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
}
if (!j->tone_state) {
ixj_cpt_stop(j);
}
}
}
if (!(j->tone_state && j->dsp.low == 0x20)) {
if (IsRxReady(j)) {
ixj_read_frame(j);
}
if (IsTxReady(j)) {
ixj_write_frame(j);
}
}
if (j->flags.cringing) {
if (j->hookstate & 1) {
j->flags.cringing = 0;
ixj_ring_off(j);
} else if(j->cadence_f[5].enable && ((!j->cadence_f[5].en_filter) || (j->cadence_f[5].en_filter && j->flags.firstring))) {
switch(j->cadence_f[5].state) {
case 0:
j->cadence_f[5].on1dot = jiffies + (long)((j->cadence_f[5].on1 * (hertz * 100) / 10000));
if (time_before(jiffies, j->cadence_f[5].on1dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_on(j);
}
j->cadence_f[5].state = 1;
break;
case 1:
if (time_after(jiffies, j->cadence_f[5].on1dot)) {
j->cadence_f[5].off1dot = jiffies + (long)((j->cadence_f[5].off1 * (hertz * 100) / 10000));
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_off(j);
j->cadence_f[5].state = 2;
}
break;
case 2:
if (time_after(jiffies, j->cadence_f[5].off1dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_on(j);
if (j->cadence_f[5].on2) {
j->cadence_f[5].on2dot = jiffies + (long)((j->cadence_f[5].on2 * (hertz * 100) / 10000));
j->cadence_f[5].state = 3;
} else {
j->cadence_f[5].state = 7;
}
}
break;
case 3:
if (time_after(jiffies, j->cadence_f[5].on2dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_off(j);
if (j->cadence_f[5].off2) {
j->cadence_f[5].off2dot = jiffies + (long)((j->cadence_f[5].off2 * (hertz * 100) / 10000));
j->cadence_f[5].state = 4;
} else {
j->cadence_f[5].state = 7;
}
}
break;
case 4:
if (time_after(jiffies, j->cadence_f[5].off2dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_on(j);
if (j->cadence_f[5].on3) {
j->cadence_f[5].on3dot = jiffies + (long)((j->cadence_f[5].on3 * (hertz * 100) / 10000));
j->cadence_f[5].state = 5;
} else {
j->cadence_f[5].state = 7;
}
}
break;
case 5:
if (time_after(jiffies, j->cadence_f[5].on3dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
ixj_ring_off(j);
if (j->cadence_f[5].off3) {
j->cadence_f[5].off3dot = jiffies + (long)((j->cadence_f[5].off3 * (hertz * 100) / 10000));
j->cadence_f[5].state = 6;
} else {
j->cadence_f[5].state = 7;
}
}
break;
case 6:
if (time_after(jiffies, j->cadence_f[5].off3dot)) {
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
j->cadence_f[5].state = 7;
}
break;
case 7:
if(ixjdebug & 0x0004) {
printk("Ringing cadence state = %d - %ld\n", j->cadence_f[5].state, jiffies);
}
j->flags.cidring = 1;
j->cadence_f[5].state = 0;
break;
}
if (j->flags.cidring && !j->flags.cidsent) {
j->flags.cidsent = 1;
if(j->fskdcnt) {
SLIC_SetState(PLD_SLIC_STATE_OHT, j);
ixj_pre_cid(j);
}
j->flags.cidring = 0;
}
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
} else {
if (time_after(jiffies, j->ring_cadence_jif + (hertz / 2))) {
if (j->flags.cidring && !j->flags.cidsent) {
j->flags.cidsent = 1;
if(j->fskdcnt) {
SLIC_SetState(PLD_SLIC_STATE_OHT, j);
ixj_pre_cid(j);
}
j->flags.cidring = 0;
}
j->ring_cadence_t--;
if (j->ring_cadence_t == -1)
j->ring_cadence_t = 15;
j->ring_cadence_jif = jiffies;
if (j->ring_cadence & 1 << j->ring_cadence_t) {
if(j->flags.cidsent && j->cadence_f[5].en_filter)
j->flags.firstring = 1;
else
ixj_ring_on(j);
} else {
ixj_ring_off(j);
if(!j->flags.cidsent)
j->flags.cidring = 1;
}
}
clear_bit(board, &j->busyflags);
ixj_add_timer(j);
return;
}
}
if (!j->flags.ringing) {
if (j->hookstate) { /* & 1) { */
if (j->dsp.low != 0x20 &&
SLIC_GetState(j) != PLD_SLIC_STATE_ACTIVE) {
SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
}
LineMonitor(j);
read_filters(j);
ixj_WriteDSPCommand(0x511B, j);
j->proc_load = j->ssr.high << 8 | j->ssr.low;
if (!j->m_hook && (j->hookstate & 1)) {
j->m_hook = j->ex.bits.hookstate = 1;
ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
}
} else {
if (j->ex.bits.dtmf_ready) {
j->dtmf_wp = j->dtmf_rp = j->ex.bits.dtmf_ready = 0;
}
if (j->m_hook) {
j->m_hook = 0;
j->ex.bits.hookstate = 1;
ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
}
}
}
if (j->cardtype == QTI_LINEJACK && !j->flags.pstncheck && j->flags.pstn_present) {
ixj_pstn_state(j);
}
if (j->ex.bytes) {
wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
}
clear_bit(board, &j->busyflags);
}
ixj_add_timer(j);
}
static int ixj_status_wait(IXJ *j)
{
unsigned long jif;
jif = jiffies + ((60 * hertz) / 100);
while (!IsStatusReady(j)) {
ixj_perfmon(j->statuswait);
if (time_after(jiffies, jif)) {
ixj_perfmon(j->statuswaitfail);
return -1;
}
}
return 0;
}
static int ixj_PCcontrol_wait(IXJ *j)
{
unsigned long jif;
jif = jiffies + ((60 * hertz) / 100);
while (!IsPCControlReady(j)) {
ixj_perfmon(j->pcontrolwait);
if (time_after(jiffies, jif)) {
ixj_perfmon(j->pcontrolwaitfail);
return -1;
}
}
return 0;
}
static int ixj_WriteDSPCommand(unsigned short cmd, IXJ *j)
{
BYTES bytes;
unsigned long jif;
atomic_inc(&j->DSPWrite);
if(atomic_read(&j->DSPWrite) > 1) {
printk("IXJ %d DSP write overlap attempting command 0x%4.4x\n", j->board, cmd);
return -1;
}
bytes.high = (cmd & 0xFF00) >> 8;
bytes.low = cmd & 0x00FF;
jif = jiffies + ((60 * hertz) / 100);
while (!IsControlReady(j)) {
ixj_perfmon(j->iscontrolready);
if (time_after(jiffies, jif)) {
ixj_perfmon(j->iscontrolreadyfail);
atomic_dec(&j->DSPWrite);
if(atomic_read(&j->DSPWrite) > 0) {
printk("IXJ %d DSP overlaped command 0x%4.4x during control ready failure.\n", j->board, cmd);
while(atomic_read(&j->DSPWrite) > 0) {
atomic_dec(&j->DSPWrite);
}
}
return -1;
}
}
outb(bytes.low, j->DSPbase + 6);
outb(bytes.high, j->DSPbase + 7);
if (ixj_status_wait(j)) {
j->ssr.low = 0xFF;
j->ssr.high = 0xFF;
atomic_dec(&j->DSPWrite);
if(atomic_read(&j->DSPWrite) > 0) {
printk("IXJ %d DSP overlaped command 0x%4.4x during status wait failure.\n", j->board, cmd);
while(atomic_read(&j->DSPWrite) > 0) {
atomic_dec(&j->DSPWrite);
}
}
return -1;
}
/* Read Software Status Register */
j->ssr.low = inb_p(j->DSPbase + 2);
j->ssr.high = inb_p(j->DSPbase + 3);
atomic_dec(&j->DSPWrite);
if(atomic_read(&j->DSPWrite) > 0) {
printk("IXJ %d DSP overlaped command 0x%4.4x\n", j->board, cmd);
while(atomic_read(&j->DSPWrite) > 0) {
atomic_dec(&j->DSPWrite);
}
}
return 0;
}
/***************************************************************************
*
* General Purpose IO Register read routine
*
***************************************************************************/
static inline int ixj_gpio_read(IXJ *j)
{
if (ixj_WriteDSPCommand(0x5143, j))
return -1;
j->gpio.bytes.low = j->ssr.low;
j->gpio.bytes.high = j->ssr.high;
return 0;
}
static inline void LED_SetState(int state, IXJ *j)
{
if (j->cardtype == QTI_LINEJACK) {
j->pld_scrw.bits.led1 = state & 0x1 ? 1 : 0;
j->pld_scrw.bits.led2 = state & 0x2 ? 1 : 0;
j->pld_scrw.bits.led3 = state & 0x4 ? 1 : 0;
j->pld_scrw.bits.led4 = state & 0x8 ? 1 : 0;
outb(j->pld_scrw.byte, j->XILINXbase);
}
}
/*********************************************************************
* GPIO Pins are configured as follows on the Quicknet Internet
* PhoneJACK Telephony Cards
*
* POTS Select GPIO_6=0 GPIO_7=0
* Mic/Speaker Select GPIO_6=0 GPIO_7=1
* Handset Select GPIO_6=1 GPIO_7=0
*
* SLIC Active GPIO_1=0 GPIO_2=1 GPIO_5=0
* SLIC Ringing GPIO_1=1 GPIO_2=1 GPIO_5=0
* SLIC Open Circuit GPIO_1=0 GPIO_2=0 GPIO_5=0
*
* Hook Switch changes reported on GPIO_3
*********************************************************************/
static int ixj_set_port(IXJ *j, int arg)
{
if (j->cardtype == QTI_PHONEJACK_LITE) {
if (arg != PORT_POTS)
return 10;
else
return 0;
}
switch (arg) {
case PORT_POTS:
j->port = PORT_POTS;
switch (j->cardtype) {
case QTI_PHONECARD:
if (j->flags.pcmciasct == 1)
SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j);
else
return 11;
break;
case QTI_PHONEJACK_PCI:
j->pld_slicw.pcib.mic = 0;
j->pld_slicw.pcib.spk = 0;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
break;
case QTI_LINEJACK:
ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
Software Control Register */
return 2;
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb(j->pld_scrw.byte, j->XILINXbase);
j->pld_clock.byte = 0;
outb(j->pld_clock.byte, j->XILINXbase + 0x04);
j->pld_slicw.bits.rly1 = 1;
j->pld_slicw.bits.spken = 0;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
ixj_mixer(0x1200, j); /* Turn Off MIC switch on mixer left */
ixj_mixer(0x1401, j); /* Turn On Mono1 switch on mixer left */
ixj_mixer(0x1300, j); /* Turn Off MIC switch on mixer right */
ixj_mixer(0x1501, j); /* Turn On Mono1 switch on mixer right */
ixj_mixer(0x0E80, j); /*Mic mute */
ixj_mixer(0x0F00, j); /* Set mono out (SLIC) to 0dB */
ixj_mixer(0x0080, j); /* Mute Master Left volume */
ixj_mixer(0x0180, j); /* Mute Master Right volume */
SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
break;
case QTI_PHONEJACK:
j->gpio.bytes.high = 0x0B;
j->gpio.bits.gpio6 = 0;
j->gpio.bits.gpio7 = 0;
ixj_WriteDSPCommand(j->gpio.word, j);
break;
}
break;
case PORT_PSTN:
if (j->cardtype == QTI_LINEJACK) {
ixj_WriteDSPCommand(0xC534, j); /* Write CODEC config to Software Control Register */
j->pld_slicw.bits.rly3 = 0;
j->pld_slicw.bits.rly1 = 1;
j->pld_slicw.bits.spken = 0;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->port = PORT_PSTN;
} else {
return 4;
}
break;
case PORT_SPEAKER:
j->port = PORT_SPEAKER;
switch (j->cardtype) {
case QTI_PHONECARD:
if (j->flags.pcmciasct) {
SLIC_SetState(PLD_SLIC_STATE_OC, j);
}
break;
case QTI_PHONEJACK_PCI:
j->pld_slicw.pcib.mic = 1;
j->pld_slicw.pcib.spk = 1;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
break;
case QTI_LINEJACK:
ixj_set_pots(j, 0); /* Disconnect POTS/PSTN relay */
if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to
Software Control Register */
return 2;
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb(j->pld_scrw.byte, j->XILINXbase);
j->pld_clock.byte = 0;
outb(j->pld_clock.byte, j->XILINXbase + 0x04);
j->pld_slicw.bits.rly1 = 1;
j->pld_slicw.bits.spken = 1;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
ixj_mixer(0x1201, j); /* Turn On MIC switch on mixer left */
ixj_mixer(0x1400, j); /* Turn Off Mono1 switch on mixer left */
ixj_mixer(0x1301, j); /* Turn On MIC switch on mixer right */
ixj_mixer(0x1500, j); /* Turn Off Mono1 switch on mixer right */
ixj_mixer(0x0E06, j); /*Mic un-mute 0dB */
ixj_mixer(0x0F80, j); /* Mute mono out (SLIC) */
ixj_mixer(0x0000, j); /* Set Master Left volume to 0dB */
ixj_mixer(0x0100, j); /* Set Master Right volume to 0dB */
break;
case QTI_PHONEJACK:
j->gpio.bytes.high = 0x0B;
j->gpio.bits.gpio6 = 0;
j->gpio.bits.gpio7 = 1;
ixj_WriteDSPCommand(j->gpio.word, j);
break;
}
break;
case PORT_HANDSET:
if (j->cardtype != QTI_PHONEJACK) {
return 5;
} else {
j->gpio.bytes.high = 0x0B;
j->gpio.bits.gpio6 = 1;
j->gpio.bits.gpio7 = 0;
ixj_WriteDSPCommand(j->gpio.word, j);
j->port = PORT_HANDSET;
}
break;
default:
return 6;
break;
}
return 0;
}
static int ixj_set_pots(IXJ *j, int arg)
{
if (j->cardtype == QTI_LINEJACK) {
if (arg) {
if (j->port == PORT_PSTN) {
j->pld_slicw.bits.rly1 = 0;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->flags.pots_pstn = 1;
return 1;
} else {
j->flags.pots_pstn = 0;
return 0;
}
} else {
j->pld_slicw.bits.rly1 = 1;
outb(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->flags.pots_pstn = 0;
return 1;
}
} else {
return 0;
}
}
static void ixj_ring_on(IXJ *j)
{
if (j->dsp.low == 0x20) /* Internet PhoneJACK */
{
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
j->gpio.bytes.high = 0x0B;
j->gpio.bytes.low = 0x00;
j->gpio.bits.gpio1 = 1;
j->gpio.bits.gpio2 = 1;
j->gpio.bits.gpio5 = 0;
ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring signal */
} else /* Internet LineJACK, Internet PhoneJACK Lite or Internet PhoneJACK PCI */
{
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Ring On /dev/phone%d\n", j->board);
SLIC_SetState(PLD_SLIC_STATE_RINGING, j);
}
}
static int ixj_siadc(IXJ *j, int val)
{
if(j->cardtype == QTI_PHONECARD){
if(j->flags.pcmciascp){
if(val == -1)
return j->siadc.bits.rxg;
if(val < 0 || val > 0x1F)
return -1;
j->siadc.bits.hom = 0; /* Handset Out Mute */
j->siadc.bits.lom = 0; /* Line Out Mute */
j->siadc.bits.rxg = val; /*(0xC000 - 0x41C8) / 0x4EF; RX PGA Gain */
j->psccr.bits.addr = 6; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->siadc.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
return j->siadc.bits.rxg;
}
}
return -1;
}
static int ixj_sidac(IXJ *j, int val)
{
if(j->cardtype == QTI_PHONECARD){
if(j->flags.pcmciascp){
if(val == -1)
return j->sidac.bits.txg;
if(val < 0 || val > 0x1F)
return -1;
j->sidac.bits.srm = 1; /* Speaker Right Mute */
j->sidac.bits.slm = 1; /* Speaker Left Mute */
j->sidac.bits.txg = val; /* (0xC000 - 0x45E4) / 0x5D3; TX PGA Gain */
j->psccr.bits.addr = 7; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->sidac.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
return j->sidac.bits.txg;
}
}
return -1;
}
static int ixj_pcmcia_cable_check(IXJ *j)
{
j->pccr1.byte = inb_p(j->XILINXbase + 0x03);
if (!j->flags.pcmciastate) {
j->pccr2.byte = inb_p(j->XILINXbase + 0x02);
if (j->pccr1.bits.drf || j->pccr2.bits.rstc) {
j->flags.pcmciastate = 4;
return 0;
}
if (j->pccr1.bits.ed) {
j->pccr1.bits.ed = 0;
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 1;
outw_p(j->psccr.byte << 8, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
j->pslic.byte = inw_p(j->XILINXbase + 0x00) & 0xFF;
j->pslic.bits.led2 = j->pslic.bits.det ? 1 : 0;
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 0;
outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
return j->pslic.bits.led2 ? 1 : 0;
} else if (j->flags.pcmciasct) {
return j->r_hook;
} else {
return 1;
}
} else if (j->flags.pcmciastate == 4) {
if (!j->pccr1.bits.drf) {
j->flags.pcmciastate = 3;
}
return 0;
} else if (j->flags.pcmciastate == 3) {
j->pccr2.bits.pwr = 0;
j->pccr2.bits.rstc = 1;
outb(j->pccr2.byte, j->XILINXbase + 0x02);
j->checkwait = jiffies + (hertz * 2);
j->flags.incheck = 1;
j->flags.pcmciastate = 2;
return 0;
} else if (j->flags.pcmciastate == 2) {
if (j->flags.incheck) {
if (time_before(jiffies, j->checkwait)) {
return 0;
} else {
j->flags.incheck = 0;
}
}
j->pccr2.bits.pwr = 0;
j->pccr2.bits.rstc = 0;
outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
j->flags.pcmciastate = 1;
return 0;
} else if (j->flags.pcmciastate == 1) {
j->flags.pcmciastate = 0;
if (!j->pccr1.bits.drf) {
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 1;
outb_p(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
j->flags.pcmciascp = 1; /* Set Cable Present Flag */
j->flags.pcmciasct = (inw_p(j->XILINXbase + 0x00) >> 8) & 0x03; /* Get Cable Type */
if (j->flags.pcmciasct == 3) {
j->flags.pcmciastate = 4;
return 0;
} else if (j->flags.pcmciasct == 0) {
j->pccr2.bits.pwr = 1;
j->pccr2.bits.rstc = 0;
outb_p(j->pccr2.byte, j->XILINXbase + 0x02);
j->port = PORT_SPEAKER;
} else {
j->port = PORT_POTS;
}
j->sic1.bits.cpd = 0; /* Chip Power Down */
j->sic1.bits.mpd = 0; /* MIC Bias Power Down */
j->sic1.bits.hpd = 0; /* Handset Bias Power Down */
j->sic1.bits.lpd = 0; /* Line Bias Power Down */
j->sic1.bits.spd = 1; /* Speaker Drive Power Down */
j->psccr.bits.addr = 1; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->sic1.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
j->sic2.bits.al = 0; /* Analog Loopback DAC analog -> ADC analog */
j->sic2.bits.dl2 = 0; /* Digital Loopback DAC -> ADC one bit */
j->sic2.bits.dl1 = 0; /* Digital Loopback ADC -> DAC one bit */
j->sic2.bits.pll = 0; /* 1 = div 10, 0 = div 5 */
j->sic2.bits.hpd = 0; /* HPF disable */
j->psccr.bits.addr = 2; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->sic2.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
j->psccr.bits.addr = 3; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(0x00, j->XILINXbase + 0x00); /* PLL Divide N1 */
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
j->psccr.bits.addr = 4; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(0x09, j->XILINXbase + 0x00); /* PLL Multiply M1 */
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
j->sirxg.bits.lig = 1; /* Line In Gain */
j->sirxg.bits.lim = 1; /* Line In Mute */
j->sirxg.bits.mcg = 0; /* MIC In Gain was 3 */
j->sirxg.bits.mcm = 0; /* MIC In Mute */
j->sirxg.bits.him = 0; /* Handset In Mute */
j->sirxg.bits.iir = 1; /* IIR */
j->psccr.bits.addr = 5; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->sirxg.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
ixj_siadc(j, 0x17);
ixj_sidac(j, 0x1D);
j->siaatt.bits.sot = 0;
j->psccr.bits.addr = 9; /* R/W Smart Cable Register Address */
j->psccr.bits.rw = 0; /* Read / Write flag */
j->psccr.bits.dev = 0;
outb(j->siaatt.byte, j->XILINXbase + 0x00);
outb(j->psccr.byte, j->XILINXbase + 0x01);
ixj_PCcontrol_wait(j);
if (j->flags.pcmciasct == 1 && !j->readers && !j->writers) {
j->psccr.byte = j->pslic.byte = 0;
j->pslic.bits.powerdown = 1;
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 0;
outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
}
}
return 0;
} else {
j->flags.pcmciascp = 0;
return 0;
}
return 0;
}
static int ixj_hookstate(IXJ *j)
{
int fOffHook = 0;
switch (j->cardtype) {
case QTI_PHONEJACK:
ixj_gpio_read(j);
fOffHook = j->gpio.bits.gpio3read ? 1 : 0;
break;
case QTI_LINEJACK:
case QTI_PHONEJACK_LITE:
case QTI_PHONEJACK_PCI:
SLIC_GetState(j);
if(j->cardtype == QTI_LINEJACK && j->flags.pots_pstn == 1 && (j->readers || j->writers)) {
fOffHook = j->pld_slicr.bits.potspstn ? 1 : 0;
if(fOffHook != j->p_hook) {
if(!j->checkwait) {
j->checkwait = jiffies;
}
if(time_before(jiffies, j->checkwait + 2)) {
fOffHook ^= 1;
} else {
j->checkwait = 0;
}
j->p_hook = fOffHook;
printk("IXJ : /dev/phone%d pots-pstn hookstate check %d at %ld\n", j->board, fOffHook, jiffies);
}
} else {
if (j->pld_slicr.bits.state == PLD_SLIC_STATE_ACTIVE ||
j->pld_slicr.bits.state == PLD_SLIC_STATE_STANDBY) {
if (j->flags.ringing || j->flags.cringing) {
if (!in_interrupt()) {
msleep(20);
}
SLIC_GetState(j);
if (j->pld_slicr.bits.state == PLD_SLIC_STATE_RINGING) {
ixj_ring_on(j);
}
}
if (j->cardtype == QTI_PHONEJACK_PCI) {
j->pld_scrr.byte = inb_p(j->XILINXbase);
fOffHook = j->pld_scrr.pcib.det ? 1 : 0;
} else
fOffHook = j->pld_slicr.bits.det ? 1 : 0;
}
}
break;
case QTI_PHONECARD:
fOffHook = ixj_pcmcia_cable_check(j);
break;
}
if (j->r_hook != fOffHook) {
j->r_hook = fOffHook;
if (j->port == PORT_SPEAKER || j->port == PORT_HANDSET) { // || (j->port == PORT_PSTN && j->flags.pots_pstn == 0)) {
j->ex.bits.hookstate = 1;
ixj_kill_fasync(j, SIG_HOOKSTATE, POLL_IN);
} else if (!fOffHook) {
j->flash_end = jiffies + ((60 * hertz) / 100);
}
}
if (fOffHook) {
if(time_before(jiffies, j->flash_end)) {
j->ex.bits.flash = 1;
j->flash_end = 0;
ixj_kill_fasync(j, SIG_FLASH, POLL_IN);
}
} else {
if(time_before(jiffies, j->flash_end)) {
fOffHook = 1;
}
}
if (j->port == PORT_PSTN && j->daa_mode == SOP_PU_CONVERSATION)
fOffHook |= 2;
if (j->port == PORT_SPEAKER) {
if(j->cardtype == QTI_PHONECARD) {
if(j->flags.pcmciascp && j->flags.pcmciasct) {
fOffHook |= 2;
}
} else {
fOffHook |= 2;
}
}
if (j->port == PORT_HANDSET)
fOffHook |= 2;
return fOffHook;
}
static void ixj_ring_off(IXJ *j)
{
if (j->dsp.low == 0x20) /* Internet PhoneJACK */
{
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Ring Off\n");
j->gpio.bytes.high = 0x0B;
j->gpio.bytes.low = 0x00;
j->gpio.bits.gpio1 = 0;
j->gpio.bits.gpio2 = 1;
j->gpio.bits.gpio5 = 0;
ixj_WriteDSPCommand(j->gpio.word, j);
} else /* Internet LineJACK */
{
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Ring Off\n");
if(!j->flags.cidplay)
SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
SLIC_GetState(j);
}
}
static void ixj_ring_start(IXJ *j)
{
j->flags.cringing = 1;
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Cadence Ringing Start /dev/phone%d\n", j->board);
if (ixj_hookstate(j) & 1) {
if (j->port == PORT_POTS)
ixj_ring_off(j);
j->flags.cringing = 0;
if (ixjdebug & 0x0004)
printk(KERN_INFO "IXJ Cadence Ringing Stopped /dev/phone%d off hook\n", j->board);
} else if(j->cadence_f[5].enable && (!j->cadence_f[5].en_filter)) {
j->ring_cadence_jif = jiffies;
j->flags.cidsent = j->flags.cidring = 0;
j->cadence_f[5].state = 0;
if(j->cadence_f[5].on1)
ixj_ring_on(j);
} else {
j->ring_cadence_jif = jiffies;
j->ring_cadence_t = 15;
if (j->ring_cadence & 1 << j->ring_cadence_t) {
ixj_ring_on(j);
} else {
ixj_ring_off(j);
}
j->flags.cidsent = j->flags.cidring = j->flags.firstring = 0;
}
}
static int ixj_ring(IXJ *j)
{
char cntr;
unsigned long jif;
j->flags.ringing = 1;
if (ixj_hookstate(j) & 1) {
ixj_ring_off(j);
j->flags.ringing = 0;
return 1;
}
for (cntr = 0; cntr < j->maxrings; cntr++) {
jif = jiffies + (1 * hertz);
ixj_ring_on(j);
while (time_before(jiffies, jif)) {
if (ixj_hookstate(j) & 1) {
ixj_ring_off(j);
j->flags.ringing = 0;
return 1;
}
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
}
jif = jiffies + (3 * hertz);
ixj_ring_off(j);
while (time_before(jiffies, jif)) {
if (ixj_hookstate(j) & 1) {
msleep(10);
if (ixj_hookstate(j) & 1) {
j->flags.ringing = 0;
return 1;
}
}
schedule_timeout_interruptible(1);
if (signal_pending(current))
break;
}
}
ixj_ring_off(j);
j->flags.ringing = 0;
return 0;
}
static int ixj_open(struct phone_device *p, struct file *file_p)
{
IXJ *j = get_ixj(p->board);
file_p->private_data = j;
if (!j->DSPbase)
return -ENODEV;
if (file_p->f_mode & FMODE_READ) {
if(!j->readers) {
j->readers++;
} else {
return -EBUSY;
}
}
if (file_p->f_mode & FMODE_WRITE) {
if(!j->writers) {
j->writers++;
} else {
if (file_p->f_mode & FMODE_READ){
j->readers--;
}
return -EBUSY;
}
}
if (j->cardtype == QTI_PHONECARD) {
j->pslic.bits.powerdown = 0;
j->psccr.bits.dev = 3;
j->psccr.bits.rw = 0;
outw_p(j->psccr.byte << 8 | j->pslic.byte, j->XILINXbase + 0x00);
ixj_PCcontrol_wait(j);
}
j->flags.cidplay = 0;
j->flags.cidcw_ack = 0;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Opening board %d\n", p->board);
j->framesread = j->frameswritten = 0;
return 0;
}
static int ixj_release(struct inode *inode, struct file *file_p)
{
IXJ_TONE ti;
int cnt;
IXJ *j = file_p->private_data;
int board = j->p.board;
/*
* Set up locks to ensure that only one process is talking to the DSP at a time.
* This is necessary to keep the DSP from locking up.
*/
while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
schedule_timeout_interruptible(1);
if (ixjdebug & 0x0002)
printk(KERN_INFO "Closing board %d\n", NUM(inode));
if (j->cardtype == QTI_PHONECARD)
ixj_set_port(j, PORT_SPEAKER);
else
ixj_set_port(j, PORT_POTS);
aec_stop(j);
ixj_play_stop(j);
ixj_record_stop(j);
set_play_volume(j, 0x100);
set_rec_volume(j, 0x100);
ixj_ring_off(j);
/* Restore the tone table to default settings. */
ti.tone_index = 10;
ti.gain0 = 1;
ti.freq0 = hz941;
ti.gain1 = 0;
ti.freq1 = hz1209;
ixj_init_tone(j, &ti);
ti.tone_index = 11;
ti.gain0 = 1;
ti.freq0 = hz941;
ti.gain1 = 0;
ti.freq1 = hz1336;
ixj_init_tone(j, &ti);
ti.tone_index = 12;
ti.gain0 = 1;
ti.freq0 = hz941;
ti.gain1 = 0;
ti.freq1 = hz1477;
ixj_init_tone(j, &ti);
ti.tone_index = 13;
ti.gain0 = 1;
ti.freq0 = hz800;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 14;
ti.gain0 = 1;
ti.freq0 = hz1000;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 15;
ti.gain0 = 1;
ti.freq0 = hz1250;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 16;
ti.gain0 = 1;
ti.freq0 = hz950;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 17;
ti.gain0 = 1;
ti.freq0 = hz1100;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 18;
ti.gain0 = 1;
ti.freq0 = hz1400;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 19;
ti.gain0 = 1;
ti.freq0 = hz1500;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 20;
ti.gain0 = 1;
ti.freq0 = hz1600;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 21;
ti.gain0 = 1;
ti.freq0 = hz1800;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 22;
ti.gain0 = 1;
ti.freq0 = hz2100;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 23;
ti.gain0 = 1;
ti.freq0 = hz1300;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 24;
ti.gain0 = 1;
ti.freq0 = hz2450;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ti.tone_index = 25;
ti.gain0 = 1;
ti.freq0 = hz350;
ti.gain1 = 0;
ti.freq1 = hz440;
ixj_init_tone(j, &ti);
ti.tone_index = 26;
ti.gain0 = 1;
ti.freq0 = hz440;
ti.gain1 = 0;
ti.freq1 = hz480;
ixj_init_tone(j, &ti);
ti.tone_index = 27;
ti.gain0 = 1;
ti.freq0 = hz480;
ti.gain1 = 0;
ti.freq1 = hz620;
ixj_init_tone(j, &ti);
set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
j->ex.bits.dtmf_ready = 0;
j->dtmf_state = 0;
j->dtmf_wp = j->dtmf_rp = 0;
j->rec_mode = j->play_mode = -1;
j->flags.ringing = 0;
j->maxrings = MAXRINGS;
j->ring_cadence = USA_RING_CADENCE;
if(j->cadence_f[5].enable) {
j->cadence_f[5].enable = j->cadence_f[5].en_filter = j->cadence_f[5].state = 0;
}
j->drybuffer = 0;
j->winktime = 320;
j->flags.dtmf_oob = 0;
for (cnt = 0; cnt < 4; cnt++)
j->cadence_f[cnt].enable = 0;
idle(j);
if(j->cardtype == QTI_PHONECARD) {
SLIC_SetState(PLD_SLIC_STATE_OC, j);
}
if (file_p->f_mode & FMODE_READ)
j->readers--;
if (file_p->f_mode & FMODE_WRITE)
j->writers--;
if (j->read_buffer && !j->readers) {
kfree(j->read_buffer);
j->read_buffer = NULL;
j->read_buffer_size = 0;
}
if (j->write_buffer && !j->writers) {
kfree(j->write_buffer);
j->write_buffer = NULL;
j->write_buffer_size = 0;
}
j->rec_codec = j->play_codec = 0;
j->rec_frame_size = j->play_frame_size = 0;
j->flags.cidsent = j->flags.cidring = 0;
if(j->cardtype == QTI_LINEJACK && !j->readers && !j->writers) {
ixj_set_port(j, PORT_PSTN);
daa_set_mode(j, SOP_PU_SLEEP);
ixj_set_pots(j, 1);
}
ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
/* Set up the default signals for events */
for (cnt = 0; cnt < 35; cnt++)
j->ixj_signals[cnt] = SIGIO;
/* Set the excetion signal enable flags */
j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
file_p->private_data = NULL;
clear_bit(board, &j->busyflags);
return 0;
}
static int read_filters(IXJ *j)
{
unsigned short fc, cnt, trg;
int var;
trg = 0;
if (ixj_WriteDSPCommand(0x5144, j)) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Read Frame Counter failed!\n");
}
return -1;
}
fc = j->ssr.high << 8 | j->ssr.low;
if (fc == j->frame_count)
return 1;
j->frame_count = fc;
if (j->dtmf_proc)
return 1;
var = 10;
for (cnt = 0; cnt < 4; cnt++) {
if (ixj_WriteDSPCommand(0x5154 + cnt, j)) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Select Filter %d failed!\n", cnt);
}
return -1;
}
if (ixj_WriteDSPCommand(0x515C, j)) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Read Filter History %d failed!\n", cnt);
}
return -1;
}
j->filter_hist[cnt] = j->ssr.high << 8 | j->ssr.low;
if (j->cadence_f[cnt].enable) {
if (j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) {
if (j->cadence_f[cnt].state == 0) {
j->cadence_f[cnt].state = 1;
j->cadence_f[cnt].on1min = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].on1dot = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100)) / 10000));
j->cadence_f[cnt].on1max = jiffies + (long)((j->cadence_f[cnt].on1 * (hertz * (100 + var)) / 10000));
} else if (j->cadence_f[cnt].state == 2 &&
(time_after(jiffies, j->cadence_f[cnt].off1min) &&
time_before(jiffies, j->cadence_f[cnt].off1max))) {
if (j->cadence_f[cnt].on2) {
j->cadence_f[cnt].state = 3;
j->cadence_f[cnt].on2min = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].on2dot = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100)) / 10000));
j->cadence_f[cnt].on2max = jiffies + (long)((j->cadence_f[cnt].on2 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[cnt].state = 7;
}
} else if (j->cadence_f[cnt].state == 4 &&
(time_after(jiffies, j->cadence_f[cnt].off2min) &&
time_before(jiffies, j->cadence_f[cnt].off2max))) {
if (j->cadence_f[cnt].on3) {
j->cadence_f[cnt].state = 5;
j->cadence_f[cnt].on3min = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].on3dot = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100)) / 10000));
j->cadence_f[cnt].on3max = jiffies + (long)((j->cadence_f[cnt].on3 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[cnt].state = 7;
}
} else {
j->cadence_f[cnt].state = 0;
}
} else if (j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)) {
if (j->cadence_f[cnt].state == 1) {
if(!j->cadence_f[cnt].on1) {
j->cadence_f[cnt].state = 7;
} else if((time_after(jiffies, j->cadence_f[cnt].on1min) &&
time_before(jiffies, j->cadence_f[cnt].on1max))) {
if(j->cadence_f[cnt].off1) {
j->cadence_f[cnt].state = 2;
j->cadence_f[cnt].off1min = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].off1dot = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100)) / 10000));
j->cadence_f[cnt].off1max = jiffies + (long)((j->cadence_f[cnt].off1 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[cnt].state = 7;
}
} else {
j->cadence_f[cnt].state = 0;
}
} else if (j->cadence_f[cnt].state == 3) {
if((time_after(jiffies, j->cadence_f[cnt].on2min) &&
time_before(jiffies, j->cadence_f[cnt].on2max))) {
if(j->cadence_f[cnt].off2) {
j->cadence_f[cnt].state = 4;
j->cadence_f[cnt].off2min = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].off2dot = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100)) / 10000));
j->cadence_f[cnt].off2max = jiffies + (long)((j->cadence_f[cnt].off2 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[cnt].state = 7;
}
} else {
j->cadence_f[cnt].state = 0;
}
} else if (j->cadence_f[cnt].state == 5) {
if ((time_after(jiffies, j->cadence_f[cnt].on3min) &&
time_before(jiffies, j->cadence_f[cnt].on3max))) {
if(j->cadence_f[cnt].off3) {
j->cadence_f[cnt].state = 6;
j->cadence_f[cnt].off3min = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 - var)) / 10000));
j->cadence_f[cnt].off3dot = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100)) / 10000));
j->cadence_f[cnt].off3max = jiffies + (long)((j->cadence_f[cnt].off3 * (hertz * (100 + var)) / 10000));
} else {
j->cadence_f[cnt].state = 7;
}
} else {
j->cadence_f[cnt].state = 0;
}
} else {
j->cadence_f[cnt].state = 0;
}
} else {
switch(j->cadence_f[cnt].state) {
case 1:
if(time_after(jiffies, j->cadence_f[cnt].on1dot) &&
!j->cadence_f[cnt].off1 &&
!j->cadence_f[cnt].on2 && !j->cadence_f[cnt].off2 &&
!j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
j->cadence_f[cnt].state = 7;
}
break;
case 3:
if(time_after(jiffies, j->cadence_f[cnt].on2dot) &&
!j->cadence_f[cnt].off2 &&
!j->cadence_f[cnt].on3 && !j->cadence_f[cnt].off3) {
j->cadence_f[cnt].state = 7;
}
break;
case 5:
if(time_after(jiffies, j->cadence_f[cnt].on3dot) &&
!j->cadence_f[cnt].off3) {
j->cadence_f[cnt].state = 7;
}
break;
}
}
if (ixjdebug & 0x0040) {
printk(KERN_INFO "IXJ Tone Cadence state = %d /dev/phone%d at %ld\n", j->cadence_f[cnt].state, j->board, jiffies);
switch(j->cadence_f[cnt].state) {
case 0:
printk(KERN_INFO "IXJ /dev/phone%d No Tone detected\n", j->board);
break;
case 1:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %u %ld - %ld - %ld\n", j->board,
j->cadence_f[cnt].on1, j->cadence_f[cnt].on1min, j->cadence_f[cnt].on1dot, j->cadence_f[cnt].on1max);
break;
case 2:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off1min,
j->cadence_f[cnt].off1max);
break;
case 3:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on2min,
j->cadence_f[cnt].on2max);
break;
case 4:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off2min,
j->cadence_f[cnt].off2max);
break;
case 5:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].on3min,
j->cadence_f[cnt].on3max);
break;
case 6:
printk(KERN_INFO "IXJ /dev/phone%d Next Tone Cadence state at %ld - %ld\n", j->board, j->cadence_f[cnt].off3min,
j->cadence_f[cnt].off3max);
break;
}
}
}
if (j->cadence_f[cnt].state == 7) {
j->cadence_f[cnt].state = 0;
if (j->cadence_f[cnt].enable == 1)
j->cadence_f[cnt].enable = 0;
switch (cnt) {
case 0:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter Cadence 0 triggered %ld\n", jiffies);
}
j->ex.bits.fc0 = 1;
ixj_kill_fasync(j, SIG_FC0, POLL_IN);
break;
case 1:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter Cadence 1 triggered %ld\n", jiffies);
}
j->ex.bits.fc1 = 1;
ixj_kill_fasync(j, SIG_FC1, POLL_IN);
break;
case 2:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter Cadence 2 triggered %ld\n", jiffies);
}
j->ex.bits.fc2 = 1;
ixj_kill_fasync(j, SIG_FC2, POLL_IN);
break;
case 3:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter Cadence 3 triggered %ld\n", jiffies);
}
j->ex.bits.fc3 = 1;
ixj_kill_fasync(j, SIG_FC3, POLL_IN);
break;
}
}
if (j->filter_en[cnt] && ((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12)) ||
(j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3)))) {
if((j->filter_hist[cnt] & 3 && !(j->filter_hist[cnt] & 12))) {
trg = 1;
} else if((j->filter_hist[cnt] & 12 && !(j->filter_hist[cnt] & 3))) {
trg = 0;
}
switch (cnt) {
case 0:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter 0 triggered %d at %ld\n", trg, jiffies);
}
j->ex.bits.f0 = 1;
ixj_kill_fasync(j, SIG_F0, POLL_IN);
break;
case 1:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter 1 triggered %d at %ld\n", trg, jiffies);
}
j->ex.bits.f1 = 1;
ixj_kill_fasync(j, SIG_F1, POLL_IN);
break;
case 2:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter 2 triggered %d at %ld\n", trg, jiffies);
}
j->ex.bits.f2 = 1;
ixj_kill_fasync(j, SIG_F2, POLL_IN);
break;
case 3:
if(ixjdebug & 0x0020) {
printk(KERN_INFO "Filter 3 triggered %d at %ld\n", trg, jiffies);
}
j->ex.bits.f3 = 1;
ixj_kill_fasync(j, SIG_F3, POLL_IN);
break;
}
}
}
return 0;
}
static int LineMonitor(IXJ *j)
{
if (j->dtmf_proc) {
return -1;
}
j->dtmf_proc = 1;
if (ixj_WriteDSPCommand(0x7000, j)) /* Line Monitor */
return -1;
j->dtmf.bytes.high = j->ssr.high;
j->dtmf.bytes.low = j->ssr.low;
if (!j->dtmf_state && j->dtmf.bits.dtmf_valid) {
j->dtmf_state = 1;
j->dtmf_current = j->dtmf.bits.digit;
}
if (j->dtmf_state && !j->dtmf.bits.dtmf_valid) /* && j->dtmf_wp != j->dtmf_rp) */
{
if(!j->cidcw_wait) {
j->dtmfbuffer[j->dtmf_wp] = j->dtmf_current;
j->dtmf_wp++;
if (j->dtmf_wp == 79)
j->dtmf_wp = 0;
j->ex.bits.dtmf_ready = 1;
if(j->ex_sig.bits.dtmf_ready) {
ixj_kill_fasync(j, SIG_DTMF_READY, POLL_IN);
}
}
else if(j->dtmf_current == 0x00 || j->dtmf_current == 0x0D) {
if(ixjdebug & 0x0020) {
printk("IXJ phone%d saw CIDCW Ack DTMF %d from display at %ld\n", j->board, j->dtmf_current, jiffies);
}
j->flags.cidcw_ack = 1;
}
j->dtmf_state = 0;
}
j->dtmf_proc = 0;
return 0;
}
/************************************************************************
*
* Functions to allow alaw <-> ulaw conversions.
*
************************************************************************/
static void ulaw2alaw(unsigned char *buff, unsigned long len)
{
static unsigned char table_ulaw2alaw[] =
{
0x2A, 0x2B, 0x28, 0x29, 0x2E, 0x2F, 0x2C, 0x2D,
0x22, 0x23, 0x20, 0x21, 0x26, 0x27, 0x24, 0x25,
0x3A, 0x3B, 0x38, 0x39, 0x3E, 0x3F, 0x3C, 0x3D,
0x32, 0x33, 0x30, 0x31, 0x36, 0x37, 0x34, 0x35,
0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D, 0x02,
0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05, 0x1A,
0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D, 0x12,
0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15, 0x6B,
0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D, 0x62, 0x63,
0x60, 0x61, 0x66, 0x67, 0x64, 0x65, 0x7B, 0x79,
0x7E, 0x7F, 0x7C, 0x7D, 0x72, 0x73, 0x70, 0x71,
0x76, 0x77, 0x74, 0x75, 0x4B, 0x49, 0x4F, 0x4D,
0x42, 0x43, 0x40, 0x41, 0x46, 0x47, 0x44, 0x45,
0x5A, 0x5B, 0x58, 0x59, 0x5E, 0x5F, 0x5C, 0x5D,
0x52, 0x52, 0x53, 0x53, 0x50, 0x50, 0x51, 0x51,
0x56, 0x56, 0x57, 0x57, 0x54, 0x54, 0x55, 0xD5,
0xAA, 0xAB, 0xA8, 0xA9, 0xAE, 0xAF, 0xAC, 0xAD,
0xA2, 0xA3, 0xA0, 0xA1, 0xA6, 0xA7, 0xA4, 0xA5,
0xBA, 0xBB, 0xB8, 0xB9, 0xBE, 0xBF, 0xBC, 0xBD,
0xB2, 0xB3, 0xB0, 0xB1, 0xB6, 0xB7, 0xB4, 0xB5,
0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D, 0x82,
0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85, 0x9A,
0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D, 0x92,
0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95, 0xEB,
0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED, 0xE2, 0xE3,
0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5, 0xFB, 0xF9,
0xFE, 0xFF, 0xFC, 0xFD, 0xF2, 0xF3, 0xF0, 0xF1,
0xF6, 0xF7, 0xF4, 0xF5, 0xCB, 0xC9, 0xCF, 0xCD,
0xC2, 0xC3, 0xC0, 0xC1, 0xC6, 0xC7, 0xC4, 0xC5,
0xDA, 0xDB, 0xD8, 0xD9, 0xDE, 0xDF, 0xDC, 0xDD,
0xD2, 0xD2, 0xD3, 0xD3, 0xD0, 0xD0, 0xD1, 0xD1,
0xD6, 0xD6, 0xD7, 0xD7, 0xD4, 0xD4, 0xD5, 0xD5
};
while (len--)
{
*buff = table_ulaw2alaw[*(unsigned char *)buff];
buff++;
}
}
static void alaw2ulaw(unsigned char *buff, unsigned long len)
{
static unsigned char table_alaw2ulaw[] =
{
0x29, 0x2A, 0x27, 0x28, 0x2D, 0x2E, 0x2B, 0x2C,
0x21, 0x22, 0x1F, 0x20, 0x25, 0x26, 0x23, 0x24,
0x39, 0x3A, 0x37, 0x38, 0x3D, 0x3E, 0x3B, 0x3C,
0x31, 0x32, 0x2F, 0x30, 0x35, 0x36, 0x33, 0x34,
0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D,
0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
0x1A, 0x1B, 0x18, 0x19, 0x1E, 0x1F, 0x1C, 0x1D,
0x12, 0x13, 0x10, 0x11, 0x16, 0x17, 0x14, 0x15,
0x62, 0x63, 0x60, 0x61, 0x66, 0x67, 0x64, 0x65,
0x5D, 0x5D, 0x5C, 0x5C, 0x5F, 0x5F, 0x5E, 0x5E,
0x74, 0x76, 0x70, 0x72, 0x7C, 0x7E, 0x78, 0x7A,
0x6A, 0x6B, 0x68, 0x69, 0x6E, 0x6F, 0x6C, 0x6D,
0x48, 0x49, 0x46, 0x47, 0x4C, 0x4D, 0x4A, 0x4B,
0x40, 0x41, 0x3F, 0x3F, 0x44, 0x45, 0x42, 0x43,
0x56, 0x57, 0x54, 0x55, 0x5A, 0x5B, 0x58, 0x59,
0x4F, 0x4F, 0x4E, 0x4E, 0x52, 0x53, 0x50, 0x51,
0xA9, 0xAA, 0xA7, 0xA8, 0xAD, 0xAE, 0xAB, 0xAC,
0xA1, 0xA2, 0x9F, 0xA0, 0xA5, 0xA6, 0xA3, 0xA4,
0xB9, 0xBA, 0xB7, 0xB8, 0xBD, 0xBE, 0xBB, 0xBC,
0xB1, 0xB2, 0xAF, 0xB0, 0xB5, 0xB6, 0xB3, 0xB4,
0x8A, 0x8B, 0x88, 0x89, 0x8E, 0x8F, 0x8C, 0x8D,
0x82, 0x83, 0x80, 0x81, 0x86, 0x87, 0x84, 0x85,
0x9A, 0x9B, 0x98, 0x99, 0x9E, 0x9F, 0x9C, 0x9D,
0x92, 0x93, 0x90, 0x91, 0x96, 0x97, 0x94, 0x95,
0xE2, 0xE3, 0xE0, 0xE1, 0xE6, 0xE7, 0xE4, 0xE5,
0xDD, 0xDD, 0xDC, 0xDC, 0xDF, 0xDF, 0xDE, 0xDE,
0xF4, 0xF6, 0xF0, 0xF2, 0xFC, 0xFE, 0xF8, 0xFA,
0xEA, 0xEB, 0xE8, 0xE9, 0xEE, 0xEF, 0xEC, 0xED,
0xC8, 0xC9, 0xC6, 0xC7, 0xCC, 0xCD, 0xCA, 0xCB,
0xC0, 0xC1, 0xBF, 0xBF, 0xC4, 0xC5, 0xC2, 0xC3,
0xD6, 0xD7, 0xD4, 0xD5, 0xDA, 0xDB, 0xD8, 0xD9,
0xCF, 0xCF, 0xCE, 0xCE, 0xD2, 0xD3, 0xD0, 0xD1
};
while (len--)
{
*buff = table_alaw2ulaw[*(unsigned char *)buff];
buff++;
}
}
static ssize_t ixj_read(struct file * file_p, char __user *buf, size_t length, loff_t * ppos)
{
unsigned long i = *ppos;
IXJ * j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
DECLARE_WAITQUEUE(wait, current);
if (j->flags.inread)
return -EALREADY;
j->flags.inread = 1;
add_wait_queue(&j->read_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
mb();
while (!j->read_buffer_ready || (j->dtmf_state && j->flags.dtmf_oob)) {
++j->read_wait;
if (file_p->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->read_q, &wait);
j->flags.inread = 0;
return -EAGAIN;
}
if (!ixj_hookstate(j)) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->read_q, &wait);
j->flags.inread = 0;
return 0;
}
interruptible_sleep_on(&j->read_q);
if (signal_pending(current)) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->read_q, &wait);
j->flags.inread = 0;
return -EINTR;
}
}
remove_wait_queue(&j->read_q, &wait);
set_current_state(TASK_RUNNING);
/* Don't ever copy more than the user asks */
if(j->rec_codec == ALAW)
ulaw2alaw(j->read_buffer, min(length, j->read_buffer_size));
i = copy_to_user(buf, j->read_buffer, min(length, j->read_buffer_size));
j->read_buffer_ready = 0;
if (i) {
j->flags.inread = 0;
return -EFAULT;
} else {
j->flags.inread = 0;
return min(length, j->read_buffer_size);
}
}
static ssize_t ixj_enhanced_read(struct file * file_p, char __user *buf, size_t length,
loff_t * ppos)
{
int pre_retval;
ssize_t read_retval = 0;
IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
pre_retval = ixj_PreRead(j, 0L);
switch (pre_retval) {
case NORMAL:
read_retval = ixj_read(file_p, buf, length, ppos);
ixj_PostRead(j, 0L);
break;
case NOPOST:
read_retval = ixj_read(file_p, buf, length, ppos);
break;
case POSTONLY:
ixj_PostRead(j, 0L);
break;
default:
read_retval = pre_retval;
}
return read_retval;
}
static ssize_t ixj_write(struct file *file_p, const char __user *buf, size_t count, loff_t * ppos)
{
unsigned long i = *ppos;
IXJ *j = file_p->private_data;
DECLARE_WAITQUEUE(wait, current);
if (j->flags.inwrite)
return -EALREADY;
j->flags.inwrite = 1;
add_wait_queue(&j->write_q, &wait);
set_current_state(TASK_INTERRUPTIBLE);
mb();
while (!j->write_buffers_empty) {
++j->write_wait;
if (file_p->f_flags & O_NONBLOCK) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->write_q, &wait);
j->flags.inwrite = 0;
return -EAGAIN;
}
if (!ixj_hookstate(j)) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->write_q, &wait);
j->flags.inwrite = 0;
return 0;
}
interruptible_sleep_on(&j->write_q);
if (signal_pending(current)) {
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->write_q, &wait);
j->flags.inwrite = 0;
return -EINTR;
}
}
set_current_state(TASK_RUNNING);
remove_wait_queue(&j->write_q, &wait);
if (j->write_buffer_wp + count >= j->write_buffer_end)
j->write_buffer_wp = j->write_buffer;
i = copy_from_user(j->write_buffer_wp, buf, min(count, j->write_buffer_size));
if (i) {
j->flags.inwrite = 0;
return -EFAULT;
}
if(j->play_codec == ALAW)
alaw2ulaw(j->write_buffer_wp, min(count, j->write_buffer_size));
j->flags.inwrite = 0;
return min(count, j->write_buffer_size);
}
static ssize_t ixj_enhanced_write(struct file * file_p, const char __user *buf, size_t count, loff_t * ppos)
{
int pre_retval;
ssize_t write_retval = 0;
IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
pre_retval = ixj_PreWrite(j, 0L);
switch (pre_retval) {
case NORMAL:
write_retval = ixj_write(file_p, buf, count, ppos);
if (write_retval > 0) {
ixj_PostWrite(j, 0L);
j->write_buffer_wp += write_retval;
j->write_buffers_empty--;
}
break;
case NOPOST:
write_retval = ixj_write(file_p, buf, count, ppos);
if (write_retval > 0) {
j->write_buffer_wp += write_retval;
j->write_buffers_empty--;
}
break;
case POSTONLY:
ixj_PostWrite(j, 0L);
break;
default:
write_retval = pre_retval;
}
return write_retval;
}
static void ixj_read_frame(IXJ *j)
{
int cnt, dly;
if (j->read_buffer) {
for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
if (!(cnt % 16) && !IsRxReady(j)) {
dly = 0;
while (!IsRxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
/* Throw away word 0 of the 8021 compressed format to get standard G.729. */
if (j->rec_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
inb_p(j->DSPbase + 0x0E);
inb_p(j->DSPbase + 0x0F);
}
*(j->read_buffer + cnt) = inb_p(j->DSPbase + 0x0E);
*(j->read_buffer + cnt + 1) = inb_p(j->DSPbase + 0x0F);
}
++j->framesread;
if (j->intercom != -1) {
if (IsTxReady(get_ixj(j->intercom))) {
for (cnt = 0; cnt < j->rec_frame_size * 2; cnt += 2) {
if (!(cnt % 16) && !IsTxReady(j)) {
dly = 0;
while (!IsTxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
outb_p(*(j->read_buffer + cnt), get_ixj(j->intercom)->DSPbase + 0x0C);
outb_p(*(j->read_buffer + cnt + 1), get_ixj(j->intercom)->DSPbase + 0x0D);
}
get_ixj(j->intercom)->frameswritten++;
}
} else {
j->read_buffer_ready = 1;
wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
if(j->ixj_signals[SIG_READ_READY])
ixj_kill_fasync(j, SIG_READ_READY, POLL_OUT);
}
}
}
static short fsk[][6][20] =
{
{
{
0, 17846, 29934, 32364, 24351, 8481, -10126, -25465, -32587, -29196,
-16384, 1715, 19260, 30591, 32051, 23170, 6813, -11743, -26509, -32722
},
{
-28377, -14876, 3425, 20621, 31163, 31650, 21925, 5126, -13328, -27481,
-32767, -27481, -13328, 5126, 21925, 31650, 31163, 20621, 3425, -14876
},
{
-28377, -32722, -26509, -11743, 6813, 23170, 32051, 30591, 19260, 1715,
-16384, -29196, -32587, -25465, -10126, 8481, 24351, 32364, 29934, 17846
},
{
0, -17846, -29934, -32364, -24351, -8481, 10126, 25465, 32587, 29196,
16384, -1715, -19260, -30591, -32051, -23170, -6813, 11743, 26509, 32722
},
{
28377, 14876, -3425, -20621, -31163, -31650, -21925, -5126, 13328, 27481,
32767, 27481, 13328, -5126, -21925, -31650, -31163, -20621, -3425, 14876
},
{
28377, 32722, 26509, 11743, -6813, -23170, -32051, -30591, -19260, -1715,
16384, 29196, 32587, 25465, 10126, -8481, -24351, -32364, -29934, -17846
}
},
{
{
0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126,
0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126
},
{
-28377, -21925, -13328, -3425, 6813, 16384, 24351, 29934, 32587, 32051,
28377, 21925, 13328, 3425, -6813, -16384, -24351, -29934, -32587, -32051
},
{
-28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925,
28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925
},
{
0, -10126, -19260, -26509, -31163, -32767, -31163, -26509, -19260, -10126,
0, 10126, 19260, 26509, 31163, 32767, 31163, 26509, 19260, 10126
},
{
28377, 21925, 13328, 3425, -6813, -16383, -24351, -29934, -32587, -32051,
-28377, -21925, -13328, -3425, 6813, 16383, 24351, 29934, 32587, 32051
},
{
28377, 32051, 32587, 29934, 24351, 16384, 6813, -3425, -13328, -21925,
-28377, -32051, -32587, -29934, -24351, -16384, -6813, 3425, 13328, 21925
}
}
};
static void ixj_write_cid_bit(IXJ *j, int bit)
{
while (j->fskcnt < 20) {
if(j->fskdcnt < (j->fsksize - 1))
j->fskdata[j->fskdcnt++] = fsk[bit][j->fskz][j->fskcnt];
j->fskcnt += 3;
}
j->fskcnt %= 20;
if (!bit)
j->fskz++;
if (j->fskz >= 6)
j->fskz = 0;
}
static void ixj_write_cid_byte(IXJ *j, char byte)
{
IXJ_CBYTE cb;
cb.cbyte = byte;
ixj_write_cid_bit(j, 0);
ixj_write_cid_bit(j, cb.cbits.b0 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b1 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b2 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b3 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b4 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b5 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b6 ? 1 : 0);
ixj_write_cid_bit(j, cb.cbits.b7 ? 1 : 0);
ixj_write_cid_bit(j, 1);
}
static void ixj_write_cid_seize(IXJ *j)
{
int cnt;
for (cnt = 0; cnt < 150; cnt++) {
ixj_write_cid_bit(j, 0);
ixj_write_cid_bit(j, 1);
}
for (cnt = 0; cnt < 180; cnt++) {
ixj_write_cid_bit(j, 1);
}
}
static void ixj_write_cidcw_seize(IXJ *j)
{
int cnt;
for (cnt = 0; cnt < 80; cnt++) {
ixj_write_cid_bit(j, 1);
}
}
static int ixj_write_cid_string(IXJ *j, char *s, int checksum)
{
int cnt;
for (cnt = 0; cnt < strlen(s); cnt++) {
ixj_write_cid_byte(j, s[cnt]);
checksum = (checksum + s[cnt]);
}
return checksum;
}
static void ixj_pad_fsk(IXJ *j, int pad)
{
int cnt;
for (cnt = 0; cnt < pad; cnt++) {
if(j->fskdcnt < (j->fsksize - 1))
j->fskdata[j->fskdcnt++] = 0x0000;
}
for (cnt = 0; cnt < 720; cnt++) {
if(j->fskdcnt < (j->fsksize - 1))
j->fskdata[j->fskdcnt++] = 0x0000;
}
}
static void ixj_pre_cid(IXJ *j)
{
j->cid_play_codec = j->play_codec;
j->cid_play_frame_size = j->play_frame_size;
j->cid_play_volume = get_play_volume(j);
j->cid_play_flag = j->flags.playing;
j->cid_rec_codec = j->rec_codec;
j->cid_rec_volume = get_rec_volume(j);
j->cid_rec_flag = j->flags.recording;
j->cid_play_aec_level = j->aec_level;
switch(j->baseframe.low) {
case 0xA0:
j->cid_base_frame_size = 20;
break;
case 0x50:
j->cid_base_frame_size = 10;
break;
case 0xF0:
j->cid_base_frame_size = 30;
break;
}
ixj_play_stop(j);
ixj_cpt_stop(j);
j->flags.cidplay = 1;
set_base_frame(j, 30);
set_play_codec(j, LINEAR16);
set_play_volume(j, 0x1B);
ixj_play_start(j);
}
static void ixj_post_cid(IXJ *j)
{
ixj_play_stop(j);
if(j->cidsize > 5000) {
SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
}
j->flags.cidplay = 0;
if(ixjdebug & 0x0200) {
printk("IXJ phone%d Finished Playing CallerID data %ld\n", j->board, jiffies);
}
ixj_fsk_free(j);
j->fskdcnt = 0;
set_base_frame(j, j->cid_base_frame_size);
set_play_codec(j, j->cid_play_codec);
ixj_aec_start(j, j->cid_play_aec_level);
set_play_volume(j, j->cid_play_volume);
set_rec_codec(j, j->cid_rec_codec);
set_rec_volume(j, j->cid_rec_volume);
if(j->cid_rec_flag)
ixj_record_start(j);
if(j->cid_play_flag)
ixj_play_start(j);
if(j->cid_play_flag) {
wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
}
}
static void ixj_write_cid(IXJ *j)
{
char sdmf1[50];
char sdmf2[50];
char sdmf3[80];
char mdmflen, len1, len2, len3;
int pad;
int checksum = 0;
if (j->dsp.low == 0x20 || j->flags.cidplay)
return;
j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
j->cidsize = j->cidcnt = 0;
ixj_fsk_alloc(j);
strcpy(sdmf1, j->cid_send.month);
strcat(sdmf1, j->cid_send.day);
strcat(sdmf1, j->cid_send.hour);
strcat(sdmf1, j->cid_send.min);
strcpy(sdmf2, j->cid_send.number);
strcpy(sdmf3, j->cid_send.name);
len1 = strlen(sdmf1);
len2 = strlen(sdmf2);
len3 = strlen(sdmf3);
mdmflen = len1 + len2 + len3 + 6;
while(1){
ixj_write_cid_seize(j);
ixj_write_cid_byte(j, 0x80);
checksum = 0x80;
ixj_write_cid_byte(j, mdmflen);
checksum = checksum + mdmflen;
ixj_write_cid_byte(j, 0x01);
checksum = checksum + 0x01;
ixj_write_cid_byte(j, len1);
checksum = checksum + len1;
checksum = ixj_write_cid_string(j, sdmf1, checksum);
if(ixj_hookstate(j) & 1)
break;
ixj_write_cid_byte(j, 0x02);
checksum = checksum + 0x02;
ixj_write_cid_byte(j, len2);
checksum = checksum + len2;
checksum = ixj_write_cid_string(j, sdmf2, checksum);
if(ixj_hookstate(j) & 1)
break;
ixj_write_cid_byte(j, 0x07);
checksum = checksum + 0x07;
ixj_write_cid_byte(j, len3);
checksum = checksum + len3;
checksum = ixj_write_cid_string(j, sdmf3, checksum);
if(ixj_hookstate(j) & 1)
break;
checksum %= 256;
checksum ^= 0xFF;
checksum += 1;
ixj_write_cid_byte(j, (char) checksum);
pad = j->fskdcnt % 240;
if (pad) {
pad = 240 - pad;
}
ixj_pad_fsk(j, pad);
break;
}
ixj_write_frame(j);
}
static void ixj_write_cidcw(IXJ *j)
{
IXJ_TONE ti;
char sdmf1[50];
char sdmf2[50];
char sdmf3[80];
char mdmflen, len1, len2, len3;
int pad;
int checksum = 0;
if (j->dsp.low == 0x20 || j->flags.cidplay)
return;
j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
j->cidsize = j->cidcnt = 0;
ixj_fsk_alloc(j);
j->flags.cidcw_ack = 0;
ti.tone_index = 23;
ti.gain0 = 1;
ti.freq0 = hz440;
ti.gain1 = 0;
ti.freq1 = 0;
ixj_init_tone(j, &ti);
ixj_set_tone_on(1500, j);
ixj_set_tone_off(32, j);
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d first tone start at %ld\n", j->board, jiffies);
}
ixj_play_tone(j, 23);
clear_bit(j->board, &j->busyflags);
while(j->tone_state)
schedule_timeout_interruptible(1);
while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
schedule_timeout_interruptible(1);
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d first tone end at %ld\n", j->board, jiffies);
}
ti.tone_index = 24;
ti.gain0 = 1;
ti.freq0 = hz2130;
ti.gain1 = 0;
ti.freq1 = hz2750;
ixj_init_tone(j, &ti);
ixj_set_tone_off(10, j);
ixj_set_tone_on(600, j);
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d second tone start at %ld\n", j->board, jiffies);
}
ixj_play_tone(j, 24);
clear_bit(j->board, &j->busyflags);
while(j->tone_state)
schedule_timeout_interruptible(1);
while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
schedule_timeout_interruptible(1);
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d sent second tone at %ld\n", j->board, jiffies);
}
j->cidcw_wait = jiffies + ((50 * hertz) / 100);
clear_bit(j->board, &j->busyflags);
while(!j->flags.cidcw_ack && time_before(jiffies, j->cidcw_wait))
schedule_timeout_interruptible(1);
while(test_and_set_bit(j->board, (void *)&j->busyflags) != 0)
schedule_timeout_interruptible(1);
j->cidcw_wait = 0;
if(!j->flags.cidcw_ack) {
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d did not receive ACK from display %ld\n", j->board, jiffies);
}
ixj_post_cid(j);
if(j->cid_play_flag) {
wake_up_interruptible(&j->write_q); /* Wake any blocked readers */
}
return;
} else {
ixj_pre_cid(j);
}
j->flags.cidcw_ack = 0;
strcpy(sdmf1, j->cid_send.month);
strcat(sdmf1, j->cid_send.day);
strcat(sdmf1, j->cid_send.hour);
strcat(sdmf1, j->cid_send.min);
strcpy(sdmf2, j->cid_send.number);
strcpy(sdmf3, j->cid_send.name);
len1 = strlen(sdmf1);
len2 = strlen(sdmf2);
len3 = strlen(sdmf3);
mdmflen = len1 + len2 + len3 + 6;
ixj_write_cidcw_seize(j);
ixj_write_cid_byte(j, 0x80);
checksum = 0x80;
ixj_write_cid_byte(j, mdmflen);
checksum = checksum + mdmflen;
ixj_write_cid_byte(j, 0x01);
checksum = checksum + 0x01;
ixj_write_cid_byte(j, len1);
checksum = checksum + len1;
checksum = ixj_write_cid_string(j, sdmf1, checksum);
ixj_write_cid_byte(j, 0x02);
checksum = checksum + 0x02;
ixj_write_cid_byte(j, len2);
checksum = checksum + len2;
checksum = ixj_write_cid_string(j, sdmf2, checksum);
ixj_write_cid_byte(j, 0x07);
checksum = checksum + 0x07;
ixj_write_cid_byte(j, len3);
checksum = checksum + len3;
checksum = ixj_write_cid_string(j, sdmf3, checksum);
checksum %= 256;
checksum ^= 0xFF;
checksum += 1;
ixj_write_cid_byte(j, (char) checksum);
pad = j->fskdcnt % 240;
if (pad) {
pad = 240 - pad;
}
ixj_pad_fsk(j, pad);
if(ixjdebug & 0x0200) {
printk("IXJ cidcw phone%d sent FSK data at %ld\n", j->board, jiffies);
}
}
static void ixj_write_vmwi(IXJ *j, int msg)
{
char mdmflen;
int pad;
int checksum = 0;
if (j->dsp.low == 0x20 || j->flags.cidplay)
return;
j->fskz = j->fskphase = j->fskcnt = j->fskdcnt = 0;
j->cidsize = j->cidcnt = 0;
ixj_fsk_alloc(j);
mdmflen = 3;
if (j->port == PORT_POTS)
SLIC_SetState(PLD_SLIC_STATE_OHT, j);
ixj_write_cid_seize(j);
ixj_write_cid_byte(j, 0x82);
checksum = 0x82;
ixj_write_cid_byte(j, mdmflen);
checksum = checksum + mdmflen;
ixj_write_cid_byte(j, 0x0B);
checksum = checksum + 0x0B;
ixj_write_cid_byte(j, 1);
checksum = checksum + 1;
if(msg) {
ixj_write_cid_byte(j, 0xFF);
checksum = checksum + 0xFF;
}
else {
ixj_write_cid_byte(j, 0x00);
checksum = checksum + 0x00;
}
checksum %= 256;
checksum ^= 0xFF;
checksum += 1;
ixj_write_cid_byte(j, (char) checksum);
pad = j->fskdcnt % 240;
if (pad) {
pad = 240 - pad;
}
ixj_pad_fsk(j, pad);
}
static void ixj_write_frame(IXJ *j)
{
int cnt, frame_count, dly;
IXJ_WORD dat;
frame_count = 0;
if(j->flags.cidplay) {
for(cnt = 0; cnt < 480; cnt++) {
if (!(cnt % 16) && !IsTxReady(j)) {
dly = 0;
while (!IsTxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
dat.word = j->fskdata[j->cidcnt++];
outb_p(dat.bytes.low, j->DSPbase + 0x0C);
outb_p(dat.bytes.high, j->DSPbase + 0x0D);
cnt++;
}
if(j->cidcnt >= j->fskdcnt) {
ixj_post_cid(j);
}
/* This may seem rude, but if we just played one frame of FSK data for CallerID
and there is real audio data in the buffer, we need to throw it away because
we just used it's time slot */
if (j->write_buffer_rp > j->write_buffer_wp) {
j->write_buffer_rp += j->cid_play_frame_size * 2;
if (j->write_buffer_rp >= j->write_buffer_end) {
j->write_buffer_rp = j->write_buffer;
}
j->write_buffers_empty++;
wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
}
} else if (j->write_buffer && j->write_buffers_empty < 1) {
if (j->write_buffer_wp > j->write_buffer_rp) {
frame_count =
(j->write_buffer_wp - j->write_buffer_rp) / (j->play_frame_size * 2);
}
if (j->write_buffer_rp > j->write_buffer_wp) {
frame_count =
(j->write_buffer_wp - j->write_buffer) / (j->play_frame_size * 2) +
(j->write_buffer_end - j->write_buffer_rp) / (j->play_frame_size * 2);
}
if (frame_count >= 1) {
if (j->ver.low == 0x12 && j->play_mode && j->flags.play_first_frame) {
BYTES blankword;
switch (j->play_mode) {
case PLAYBACK_MODE_ULAW:
case PLAYBACK_MODE_ALAW:
blankword.low = blankword.high = 0xFF;
break;
case PLAYBACK_MODE_8LINEAR:
case PLAYBACK_MODE_16LINEAR:
default:
blankword.low = blankword.high = 0x00;
break;
case PLAYBACK_MODE_8LINEAR_WSS:
blankword.low = blankword.high = 0x80;
break;
}
for (cnt = 0; cnt < 16; cnt++) {
if (!(cnt % 16) && !IsTxReady(j)) {
dly = 0;
while (!IsTxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
outb_p((blankword.low), j->DSPbase + 0x0C);
outb_p((blankword.high), j->DSPbase + 0x0D);
}
j->flags.play_first_frame = 0;
} else if (j->play_codec == G723_63 && j->flags.play_first_frame) {
for (cnt = 0; cnt < 24; cnt++) {
BYTES blankword;
if(cnt == 12) {
blankword.low = 0x02;
blankword.high = 0x00;
}
else {
blankword.low = blankword.high = 0x00;
}
if (!(cnt % 16) && !IsTxReady(j)) {
dly = 0;
while (!IsTxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
outb_p((blankword.low), j->DSPbase + 0x0C);
outb_p((blankword.high), j->DSPbase + 0x0D);
}
j->flags.play_first_frame = 0;
}
for (cnt = 0; cnt < j->play_frame_size * 2; cnt += 2) {
if (!(cnt % 16) && !IsTxReady(j)) {
dly = 0;
while (!IsTxReady(j)) {
if (dly++ > 5) {
dly = 0;
break;
}
udelay(10);
}
}
/* Add word 0 to G.729 frames for the 8021. Right now we don't do VAD/CNG */
if (j->play_codec == G729 && (cnt == 0 || cnt == 10 || cnt == 20)) {
if (j->write_buffer_rp[cnt] == 0 &&
j->write_buffer_rp[cnt + 1] == 0 &&
j->write_buffer_rp[cnt + 2] == 0 &&
j->write_buffer_rp[cnt + 3] == 0 &&
j->write_buffer_rp[cnt + 4] == 0 &&
j->write_buffer_rp[cnt + 5] == 0 &&
j->write_buffer_rp[cnt + 6] == 0 &&
j->write_buffer_rp[cnt + 7] == 0 &&
j->write_buffer_rp[cnt + 8] == 0 &&
j->write_buffer_rp[cnt + 9] == 0) {
/* someone is trying to write silence lets make this a type 0 frame. */
outb_p(0x00, j->DSPbase + 0x0C);
outb_p(0x00, j->DSPbase + 0x0D);
} else {
/* so all other frames are type 1. */
outb_p(0x01, j->DSPbase + 0x0C);
outb_p(0x00, j->DSPbase + 0x0D);
}
}
outb_p(*(j->write_buffer_rp + cnt), j->DSPbase + 0x0C);
outb_p(*(j->write_buffer_rp + cnt + 1), j->DSPbase + 0x0D);
*(j->write_buffer_rp + cnt) = 0;
*(j->write_buffer_rp + cnt + 1) = 0;
}
j->write_buffer_rp += j->play_frame_size * 2;
if (j->write_buffer_rp >= j->write_buffer_end) {
j->write_buffer_rp = j->write_buffer;
}
j->write_buffers_empty++;
wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
++j->frameswritten;
}
} else {
j->drybuffer++;
}
if(j->ixj_signals[SIG_WRITE_READY]) {
ixj_kill_fasync(j, SIG_WRITE_READY, POLL_OUT);
}
}
static int idle(IXJ *j)
{
if (ixj_WriteDSPCommand(0x0000, j)) /* DSP Idle */
return 0;
if (j->ssr.high || j->ssr.low) {
return 0;
} else {
j->play_mode = -1;
j->flags.playing = 0;
j->rec_mode = -1;
j->flags.recording = 0;
return 1;
}
}
static int set_base_frame(IXJ *j, int size)
{
unsigned short cmd;
int cnt;
idle(j);
j->cid_play_aec_level = j->aec_level;
aec_stop(j);
for (cnt = 0; cnt < 10; cnt++) {
if (idle(j))
break;
}
if (j->ssr.high || j->ssr.low)
return -1;
if (j->dsp.low != 0x20) {
switch (size) {
case 30:
cmd = 0x07F0;
/* Set Base Frame Size to 240 pg9-10 8021 */
break;
case 20:
cmd = 0x07A0;
/* Set Base Frame Size to 160 pg9-10 8021 */
break;
case 10:
cmd = 0x0750;
/* Set Base Frame Size to 80 pg9-10 8021 */
break;
default:
return -1;
}
} else {
if (size == 30)
return size;
else
return -1;
}
if (ixj_WriteDSPCommand(cmd, j)) {
j->baseframe.high = j->baseframe.low = 0xFF;
return -1;
} else {
j->baseframe.high = j->ssr.high;
j->baseframe.low = j->ssr.low;
/* If the status returned is 0x0000 (pg9-9 8021) the call failed */
if(j->baseframe.high == 0x00 && j->baseframe.low == 0x00) {
return -1;
}
}
ixj_aec_start(j, j->cid_play_aec_level);
return size;
}
static int set_rec_codec(IXJ *j, int rate)
{
int retval = 0;
j->rec_codec = rate;
switch (rate) {
case G723_63:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->rec_frame_size = 12;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case G723_53:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->rec_frame_size = 10;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case TS85:
if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
j->rec_frame_size = 16;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case TS48:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->rec_frame_size = 9;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case TS41:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->rec_frame_size = 8;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case G728:
if (j->dsp.low != 0x20) {
j->rec_frame_size = 48;
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case G729:
if (j->dsp.low != 0x20) {
if (!j->flags.g729_loaded) {
retval = 1;
break;
}
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 10;
break;
case 0x50:
j->rec_frame_size = 5;
break;
default:
j->rec_frame_size = 15;
break;
}
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case G729B:
if (j->dsp.low != 0x20) {
if (!j->flags.g729_loaded) {
retval = 1;
break;
}
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 12;
break;
case 0x50:
j->rec_frame_size = 6;
break;
default:
j->rec_frame_size = 18;
break;
}
j->rec_mode = 0;
} else {
retval = 1;
}
break;
case ULAW:
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 80;
break;
case 0x50:
j->rec_frame_size = 40;
break;
default:
j->rec_frame_size = 120;
break;
}
j->rec_mode = 4;
break;
case ALAW:
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 80;
break;
case 0x50:
j->rec_frame_size = 40;
break;
default:
j->rec_frame_size = 120;
break;
}
j->rec_mode = 4;
break;
case LINEAR16:
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 160;
break;
case 0x50:
j->rec_frame_size = 80;
break;
default:
j->rec_frame_size = 240;
break;
}
j->rec_mode = 5;
break;
case LINEAR8:
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 80;
break;
case 0x50:
j->rec_frame_size = 40;
break;
default:
j->rec_frame_size = 120;
break;
}
j->rec_mode = 6;
break;
case WSS:
switch (j->baseframe.low) {
case 0xA0:
j->rec_frame_size = 80;
break;
case 0x50:
j->rec_frame_size = 40;
break;
default:
j->rec_frame_size = 120;
break;
}
j->rec_mode = 7;
break;
default:
kfree(j->read_buffer);
j->rec_frame_size = 0;
j->rec_mode = -1;
j->read_buffer = NULL;
j->read_buffer_size = 0;
retval = 1;
break;
}
return retval;
}
static int ixj_record_start(IXJ *j)
{
unsigned short cmd = 0x0000;
if (j->read_buffer) {
ixj_record_stop(j);
}
j->flags.recording = 1;
ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
if(ixjdebug & 0x0002)
printk("IXJ %d Starting Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
if (!j->rec_mode) {
switch (j->rec_codec) {
case G723_63:
cmd = 0x5131;
break;
case G723_53:
cmd = 0x5132;
break;
case TS85:
cmd = 0x5130; /* TrueSpeech 8.5 */
break;
case TS48:
cmd = 0x5133; /* TrueSpeech 4.8 */
break;
case TS41:
cmd = 0x5134; /* TrueSpeech 4.1 */
break;
case G728:
cmd = 0x5135;
break;
case G729:
case G729B:
cmd = 0x5136;
break;
default:
return 1;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
}
if (!j->read_buffer) {
if (!j->read_buffer)
j->read_buffer = kmalloc(j->rec_frame_size * 2, GFP_ATOMIC);
if (!j->read_buffer) {
printk("Read buffer allocation for ixj board %d failed!\n", j->board);
return -ENOMEM;
}
}
j->read_buffer_size = j->rec_frame_size * 2;
if (ixj_WriteDSPCommand(0x5102, j)) /* Set Poll sync mode */
return -1;
switch (j->rec_mode) {
case 0:
cmd = 0x1C03; /* Record C1 */
break;
case 4:
if (j->ver.low == 0x12) {
cmd = 0x1E03; /* Record C1 */
} else {
cmd = 0x1E01; /* Record C1 */
}
break;
case 5:
if (j->ver.low == 0x12) {
cmd = 0x1E83; /* Record C1 */
} else {
cmd = 0x1E81; /* Record C1 */
}
break;
case 6:
if (j->ver.low == 0x12) {
cmd = 0x1F03; /* Record C1 */
} else {
cmd = 0x1F01; /* Record C1 */
}
break;
case 7:
if (j->ver.low == 0x12) {
cmd = 0x1F83; /* Record C1 */
} else {
cmd = 0x1F81; /* Record C1 */
}
break;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
if (j->flags.playing) {
ixj_aec_start(j, j->aec_level);
}
return 0;
}
static void ixj_record_stop(IXJ *j)
{
if (ixjdebug & 0x0002)
printk("IXJ %d Stopping Record Codec %d at %ld\n", j->board, j->rec_codec, jiffies);
kfree(j->read_buffer);
j->read_buffer = NULL;
j->read_buffer_size = 0;
if (j->rec_mode > -1) {
ixj_WriteDSPCommand(0x5120, j);
j->rec_mode = -1;
}
j->flags.recording = 0;
}
static void ixj_vad(IXJ *j, int arg)
{
if (arg)
ixj_WriteDSPCommand(0x513F, j);
else
ixj_WriteDSPCommand(0x513E, j);
}
static void set_rec_depth(IXJ *j, int depth)
{
if (depth > 60)
depth = 60;
if (depth < 0)
depth = 0;
ixj_WriteDSPCommand(0x5180 + depth, j);
}
static void set_dtmf_prescale(IXJ *j, int volume)
{
ixj_WriteDSPCommand(0xCF07, j);
ixj_WriteDSPCommand(volume, j);
}
static int get_dtmf_prescale(IXJ *j)
{
ixj_WriteDSPCommand(0xCF05, j);
return j->ssr.high << 8 | j->ssr.low;
}
static void set_rec_volume(IXJ *j, int volume)
{
if(j->aec_level == AEC_AGC) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: /dev/phone%d Setting AGC Threshold to 0x%4.4x\n", j->board, volume);
ixj_WriteDSPCommand(0xCF96, j);
ixj_WriteDSPCommand(volume, j);
} else {
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: /dev/phone %d Setting Record Volume to 0x%4.4x\n", j->board, volume);
ixj_WriteDSPCommand(0xCF03, j);
ixj_WriteDSPCommand(volume, j);
}
}
static int set_rec_volume_linear(IXJ *j, int volume)
{
int newvolume, dsprecmax;
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: /dev/phone %d Setting Linear Record Volume to 0x%4.4x\n", j->board, volume);
if(volume > 100 || volume < 0) {
return -1;
}
/* This should normalize the perceived volumes between the different cards caused by differences in the hardware */
switch (j->cardtype) {
case QTI_PHONEJACK:
dsprecmax = 0x440;
break;
case QTI_LINEJACK:
dsprecmax = 0x180;
ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
break;
case QTI_PHONEJACK_LITE:
dsprecmax = 0x4C0;
break;
case QTI_PHONEJACK_PCI:
dsprecmax = 0x100;
break;
case QTI_PHONECARD:
dsprecmax = 0x400;
break;
default:
return -1;
}
newvolume = (dsprecmax * volume) / 100;
set_rec_volume(j, newvolume);
return 0;
}
static int get_rec_volume(IXJ *j)
{
if(j->aec_level == AEC_AGC) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "Getting AGC Threshold\n");
ixj_WriteDSPCommand(0xCF86, j);
if (ixjdebug & 0x0002)
printk(KERN_INFO "AGC Threshold is 0x%2.2x%2.2x\n", j->ssr.high, j->ssr.low);
return j->ssr.high << 8 | j->ssr.low;
} else {
if (ixjdebug & 0x0002)
printk(KERN_INFO "Getting Record Volume\n");
ixj_WriteDSPCommand(0xCF01, j);
return j->ssr.high << 8 | j->ssr.low;
}
}
static int get_rec_volume_linear(IXJ *j)
{
int volume, newvolume, dsprecmax;
switch (j->cardtype) {
case QTI_PHONEJACK:
dsprecmax = 0x440;
break;
case QTI_LINEJACK:
dsprecmax = 0x180;
break;
case QTI_PHONEJACK_LITE:
dsprecmax = 0x4C0;
break;
case QTI_PHONEJACK_PCI:
dsprecmax = 0x100;
break;
case QTI_PHONECARD:
dsprecmax = 0x400;
break;
default:
return -1;
}
volume = get_rec_volume(j);
newvolume = (volume * 100) / dsprecmax;
if(newvolume > 100)
newvolume = 100;
return newvolume;
}
static int get_rec_level(IXJ *j)
{
int retval;
ixj_WriteDSPCommand(0xCF88, j);
retval = j->ssr.high << 8 | j->ssr.low;
retval = (retval * 256) / 240;
return retval;
}
static void ixj_aec_start(IXJ *j, int level)
{
j->aec_level = level;
if (ixjdebug & 0x0002)
printk(KERN_INFO "AGC set = 0x%2.2x\n", j->aec_level);
if (!level) {
aec_stop(j);
} else {
if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer */
ixj_WriteDSPCommand(0x0300, j);
}
ixj_WriteDSPCommand(0xB001, j); /* AEC On */
ixj_WriteDSPCommand(0xE013, j); /* Advanced AEC C1 */
switch (level) {
case AEC_LOW:
ixj_WriteDSPCommand(0x0000, j); /* Advanced AEC C2 = off */
ixj_WriteDSPCommand(0xE011, j);
ixj_WriteDSPCommand(0xFFFF, j);
ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
ixj_WriteDSPCommand(0x0000, j); /* to off */
break;
case AEC_MED:
ixj_WriteDSPCommand(0x0600, j); /* Advanced AEC C2 = on medium */
ixj_WriteDSPCommand(0xE011, j);
ixj_WriteDSPCommand(0x0080, j);
ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
ixj_WriteDSPCommand(0x0000, j); /* to off */
break;
case AEC_HIGH:
ixj_WriteDSPCommand(0x0C00, j); /* Advanced AEC C2 = on high */
ixj_WriteDSPCommand(0xE011, j);
ixj_WriteDSPCommand(0x0080, j);
ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
ixj_WriteDSPCommand(0x0000, j); /* to off */
break;
case AEC_AGC:
/* First we have to put the AEC into advance auto mode so that AGC will not conflict with it */
ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
ixj_WriteDSPCommand(0xE011, j);
ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
ixj_WriteDSPCommand(0x0224, j);
else
ixj_WriteDSPCommand(0x1224, j);
ixj_WriteDSPCommand(0xE014, j);
ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
/* Now we can set the AGC initial parameters and turn it on */
ixj_WriteDSPCommand(0xCF90, j); /* Set AGC Minumum gain */
ixj_WriteDSPCommand(0x0020, j); /* to 0.125 (-18dB) */
ixj_WriteDSPCommand(0xCF91, j); /* Set AGC Maximum gain */
ixj_WriteDSPCommand(0x1000, j); /* to 16 (24dB) */
ixj_WriteDSPCommand(0xCF92, j); /* Set AGC start gain */
ixj_WriteDSPCommand(0x0800, j); /* to 8 (+18dB) */
ixj_WriteDSPCommand(0xCF93, j); /* Set AGC hold time */
ixj_WriteDSPCommand(0x1F40, j); /* to 2 seconds (units are 250us) */
ixj_WriteDSPCommand(0xCF94, j); /* Set AGC Attack Time Constant */
ixj_WriteDSPCommand(0x0005, j); /* to 8ms */
ixj_WriteDSPCommand(0xCF95, j); /* Set AGC Decay Time Constant */
ixj_WriteDSPCommand(0x000D, j); /* to 4096ms */
ixj_WriteDSPCommand(0xCF96, j); /* Set AGC Attack Threshold */
ixj_WriteDSPCommand(0x1200, j); /* to 25% */
ixj_WriteDSPCommand(0xCF97, j); /* Set AGC Enable */
ixj_WriteDSPCommand(0x0001, j); /* to on */
break;
case AEC_AUTO:
ixj_WriteDSPCommand(0x0002, j); /* Attenuation scaling factor of 2 */
ixj_WriteDSPCommand(0xE011, j);
ixj_WriteDSPCommand(0x0100, j); /* Higher Threshold Floor */
ixj_WriteDSPCommand(0xE012, j); /* Set Train and Lock */
if(j->cardtype == QTI_LINEJACK || j->cardtype == QTI_PHONECARD)
ixj_WriteDSPCommand(0x0224, j);
else
ixj_WriteDSPCommand(0x1224, j);
ixj_WriteDSPCommand(0xE014, j);
ixj_WriteDSPCommand(0x0003, j); /* Lock threshold at 3dB */
ixj_WriteDSPCommand(0xE338, j); /* Set Echo Suppresser Attenuation to 0dB */
break;
}
}
}
static void aec_stop(IXJ *j)
{
j->aec_level = AEC_OFF;
if (j->rec_codec == G729 || j->play_codec == G729 || j->rec_codec == G729B || j->play_codec == G729B) {
ixj_WriteDSPCommand(0xE022, j); /* Move AEC filter buffer back */
ixj_WriteDSPCommand(0x0700, j);
}
if (j->play_mode != -1 && j->rec_mode != -1)
{
ixj_WriteDSPCommand(0xB002, j); /* AEC Stop */
}
}
static int set_play_codec(IXJ *j, int rate)
{
int retval = 0;
j->play_codec = rate;
switch (rate) {
case G723_63:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->play_frame_size = 12;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case G723_53:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->play_frame_size = 10;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case TS85:
if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
j->play_frame_size = 16;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case TS48:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->play_frame_size = 9;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case TS41:
if (j->ver.low != 0x12 || ixj_convert_loaded) {
j->play_frame_size = 8;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case G728:
if (j->dsp.low != 0x20) {
j->play_frame_size = 48;
j->play_mode = 0;
} else {
retval = 1;
}
break;
case G729:
if (j->dsp.low != 0x20) {
if (!j->flags.g729_loaded) {
retval = 1;
break;
}
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 10;
break;
case 0x50:
j->play_frame_size = 5;
break;
default:
j->play_frame_size = 15;
break;
}
j->play_mode = 0;
} else {
retval = 1;
}
break;
case G729B:
if (j->dsp.low != 0x20) {
if (!j->flags.g729_loaded) {
retval = 1;
break;
}
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 12;
break;
case 0x50:
j->play_frame_size = 6;
break;
default:
j->play_frame_size = 18;
break;
}
j->play_mode = 0;
} else {
retval = 1;
}
break;
case ULAW:
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 80;
break;
case 0x50:
j->play_frame_size = 40;
break;
default:
j->play_frame_size = 120;
break;
}
j->play_mode = 2;
break;
case ALAW:
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 80;
break;
case 0x50:
j->play_frame_size = 40;
break;
default:
j->play_frame_size = 120;
break;
}
j->play_mode = 2;
break;
case LINEAR16:
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 160;
break;
case 0x50:
j->play_frame_size = 80;
break;
default:
j->play_frame_size = 240;
break;
}
j->play_mode = 6;
break;
case LINEAR8:
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 80;
break;
case 0x50:
j->play_frame_size = 40;
break;
default:
j->play_frame_size = 120;
break;
}
j->play_mode = 4;
break;
case WSS:
switch (j->baseframe.low) {
case 0xA0:
j->play_frame_size = 80;
break;
case 0x50:
j->play_frame_size = 40;
break;
default:
j->play_frame_size = 120;
break;
}
j->play_mode = 5;
break;
default:
kfree(j->write_buffer);
j->play_frame_size = 0;
j->play_mode = -1;
j->write_buffer = NULL;
j->write_buffer_size = 0;
retval = 1;
break;
}
return retval;
}
static int ixj_play_start(IXJ *j)
{
unsigned short cmd = 0x0000;
if (j->write_buffer) {
ixj_play_stop(j);
}
if(ixjdebug & 0x0002)
printk("IXJ %d Starting Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
j->flags.playing = 1;
ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
j->flags.play_first_frame = 1;
j->drybuffer = 0;
if (!j->play_mode) {
switch (j->play_codec) {
case G723_63:
cmd = 0x5231;
break;
case G723_53:
cmd = 0x5232;
break;
case TS85:
cmd = 0x5230; /* TrueSpeech 8.5 */
break;
case TS48:
cmd = 0x5233; /* TrueSpeech 4.8 */
break;
case TS41:
cmd = 0x5234; /* TrueSpeech 4.1 */
break;
case G728:
cmd = 0x5235;
break;
case G729:
case G729B:
cmd = 0x5236;
break;
default:
return 1;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
}
j->write_buffer = kmalloc(j->play_frame_size * 2, GFP_ATOMIC);
if (!j->write_buffer) {
printk("Write buffer allocation for ixj board %d failed!\n", j->board);
return -ENOMEM;
}
/* j->write_buffers_empty = 2; */
j->write_buffers_empty = 1;
j->write_buffer_size = j->play_frame_size * 2;
j->write_buffer_end = j->write_buffer + j->play_frame_size * 2;
j->write_buffer_rp = j->write_buffer_wp = j->write_buffer;
if (ixj_WriteDSPCommand(0x5202, j)) /* Set Poll sync mode */
return -1;
switch (j->play_mode) {
case 0:
cmd = 0x2C03;
break;
case 2:
if (j->ver.low == 0x12) {
cmd = 0x2C23;
} else {
cmd = 0x2C21;
}
break;
case 4:
if (j->ver.low == 0x12) {
cmd = 0x2C43;
} else {
cmd = 0x2C41;
}
break;
case 5:
if (j->ver.low == 0x12) {
cmd = 0x2C53;
} else {
cmd = 0x2C51;
}
break;
case 6:
if (j->ver.low == 0x12) {
cmd = 0x2C63;
} else {
cmd = 0x2C61;
}
break;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
if (ixj_WriteDSPCommand(0x2000, j)) /* Playback C2 */
return -1;
if (ixj_WriteDSPCommand(0x2000 + j->play_frame_size, j)) /* Playback C3 */
return -1;
if (j->flags.recording) {
ixj_aec_start(j, j->aec_level);
}
return 0;
}
static void ixj_play_stop(IXJ *j)
{
if (ixjdebug & 0x0002)
printk("IXJ %d Stopping Play Codec %d at %ld\n", j->board, j->play_codec, jiffies);
kfree(j->write_buffer);
j->write_buffer = NULL;
j->write_buffer_size = 0;
if (j->play_mode > -1) {
ixj_WriteDSPCommand(0x5221, j); /* Stop playback and flush buffers. 8022 reference page 9-40 */
j->play_mode = -1;
}
j->flags.playing = 0;
}
static inline int get_play_level(IXJ *j)
{
int retval;
ixj_WriteDSPCommand(0xCF8F, j); /* 8022 Reference page 9-38 */
return j->ssr.high << 8 | j->ssr.low;
retval = j->ssr.high << 8 | j->ssr.low;
retval = (retval * 256) / 240;
return retval;
}
static unsigned int ixj_poll(struct file *file_p, poll_table * wait)
{
unsigned int mask = 0;
IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
poll_wait(file_p, &(j->poll_q), wait);
if (j->read_buffer_ready > 0)
mask |= POLLIN | POLLRDNORM; /* readable */
if (j->write_buffers_empty > 0)
mask |= POLLOUT | POLLWRNORM; /* writable */
if (j->ex.bytes)
mask |= POLLPRI;
return mask;
}
static int ixj_play_tone(IXJ *j, char tone)
{
if (!j->tone_state) {
if(ixjdebug & 0x0002) {
printk("IXJ %d starting tone %d at %ld\n", j->board, tone, jiffies);
}
if (j->dsp.low == 0x20) {
idle(j);
}
j->tone_start_jif = jiffies;
j->tone_state = 1;
}
j->tone_index = tone;
if (ixj_WriteDSPCommand(0x6000 + j->tone_index, j))
return -1;
return 0;
}
static int ixj_set_tone_on(unsigned short arg, IXJ *j)
{
j->tone_on_time = arg;
if (ixj_WriteDSPCommand(0x6E04, j)) /* Set Tone On Period */
return -1;
if (ixj_WriteDSPCommand(arg, j))
return -1;
return 0;
}
static int SCI_WaitHighSCI(IXJ *j)
{
int cnt;
j->pld_scrr.byte = inb_p(j->XILINXbase);
if (!j->pld_scrr.bits.sci) {
for (cnt = 0; cnt < 10; cnt++) {
udelay(32);
j->pld_scrr.byte = inb_p(j->XILINXbase);
if ((j->pld_scrr.bits.sci))
return 1;
}
if (ixjdebug & 0x0001)
printk(KERN_INFO "SCI Wait High failed %x\n", j->pld_scrr.byte);
return 0;
} else
return 1;
}
static int SCI_WaitLowSCI(IXJ *j)
{
int cnt;
j->pld_scrr.byte = inb_p(j->XILINXbase);
if (j->pld_scrr.bits.sci) {
for (cnt = 0; cnt < 10; cnt++) {
udelay(32);
j->pld_scrr.byte = inb_p(j->XILINXbase);
if (!(j->pld_scrr.bits.sci))
return 1;
}
if (ixjdebug & 0x0001)
printk(KERN_INFO "SCI Wait Low failed %x\n", j->pld_scrr.byte);
return 0;
} else
return 1;
}
static int SCI_Control(IXJ *j, int control)
{
switch (control) {
case SCI_End:
j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
j->pld_scrw.bits.c1 = 0; /* to no selection */
break;
case SCI_Enable_DAA:
j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
j->pld_scrw.bits.c1 = 0; /* to write to DAA */
break;
case SCI_Enable_Mixer:
j->pld_scrw.bits.c0 = 0; /* Set PLD Serial control interface */
j->pld_scrw.bits.c1 = 1; /* to write to mixer */
break;
case SCI_Enable_EEPROM:
j->pld_scrw.bits.c0 = 1; /* Set PLD Serial control interface */
j->pld_scrw.bits.c1 = 1; /* to write to EEPROM */
break;
default:
return 0;
break;
}
outb_p(j->pld_scrw.byte, j->XILINXbase);
switch (control) {
case SCI_End:
return 1;
break;
case SCI_Enable_DAA:
case SCI_Enable_Mixer:
case SCI_Enable_EEPROM:
if (!SCI_WaitHighSCI(j))
return 0;
break;
default:
return 0;
break;
}
return 1;
}
static int SCI_Prepare(IXJ *j)
{
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
return 1;
}
static int ixj_get_mixer(long val, IXJ *j)
{
int reg = (val & 0x1F00) >> 8;
return j->mix.vol[reg];
}
static int ixj_mixer(long val, IXJ *j)
{
BYTES bytes;
bytes.high = (val & 0x1F00) >> 8;
bytes.low = val & 0x00FF;
/* save mixer value so we can get back later on */
j->mix.vol[bytes.high] = bytes.low;
outb_p(bytes.high & 0x1F, j->XILINXbase + 0x03); /* Load Mixer Address */
outb_p(bytes.low, j->XILINXbase + 0x02); /* Load Mixer Data */
SCI_Control(j, SCI_Enable_Mixer);
SCI_Control(j, SCI_End);
return 0;
}
static int daa_load(BYTES * p_bytes, IXJ *j)
{
outb_p(p_bytes->high, j->XILINXbase + 0x03);
outb_p(p_bytes->low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
else
return 1;
}
static int ixj_daa_cr4(IXJ *j, char reg)
{
BYTES bytes;
switch (j->daa_mode) {
case SOP_PU_SLEEP:
bytes.high = 0x14;
break;
case SOP_PU_RINGING:
bytes.high = 0x54;
break;
case SOP_PU_CONVERSATION:
bytes.high = 0x94;
break;
case SOP_PU_PULSEDIALING:
bytes.high = 0xD4;
break;
}
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = reg;
switch (j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGX) {
case 0:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 0;
break;
case 1:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 2;
break;
case 2:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 1;
break;
case 3:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.bitreg.AGR_Z = 3;
break;
}
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Prepare(j))
return 0;
return 1;
}
static char daa_int_read(IXJ *j)
{
BYTES bytes;
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x38;
bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
if (bytes.low != ALISDAA_ID_BYTE) {
if (ixjdebug & 0x0001)
printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
return 0;
}
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg = bytes.high;
return 1;
}
static char daa_CR_read(IXJ *j, int cr)
{
IXJ_WORD wdata;
BYTES bytes;
if (!SCI_Prepare(j))
return 0;
switch (j->daa_mode) {
case SOP_PU_SLEEP:
bytes.high = 0x30 + cr;
break;
case SOP_PU_RINGING:
bytes.high = 0x70 + cr;
break;
case SOP_PU_CONVERSATION:
bytes.high = 0xB0 + cr;
break;
case SOP_PU_PULSEDIALING:
default:
bytes.high = 0xF0 + cr;
break;
}
bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
if (bytes.low != ALISDAA_ID_BYTE) {
if (ixjdebug & 0x0001)
printk("Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
return 0;
}
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
wdata.word = inw_p(j->XILINXbase + 0x02);
switch(cr){
case 5:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = wdata.bytes.high;
break;
case 4:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = wdata.bytes.high;
break;
case 3:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = wdata.bytes.high;
break;
case 2:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = wdata.bytes.high;
break;
case 1:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = wdata.bytes.high;
break;
case 0:
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = wdata.bytes.high;
break;
default:
return 0;
}
return 1;
}
static int ixj_daa_cid_reset(IXJ *j)
{
int i;
BYTES bytes;
if (ixjdebug & 0x0002)
printk("DAA Clearing CID ram\n");
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x58;
bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_WaitHighSCI(j))
return 0;
for (i = 0; i < ALISDAA_CALLERID_SIZE - 1; i += 2) {
bytes.high = bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
if (i < ALISDAA_CALLERID_SIZE - 1)
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_WaitHighSCI(j))
return 0;
}
if (!SCI_Control(j, SCI_End))
return 0;
if (ixjdebug & 0x0002)
printk("DAA CID ram cleared\n");
return 1;
}
static int ixj_daa_cid_read(IXJ *j)
{
int i;
BYTES bytes;
char CID[ALISDAA_CALLERID_SIZE];
bool mContinue;
char *pIn, *pOut;
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x78;
bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_WaitHighSCI(j))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
if (bytes.low != ALISDAA_ID_BYTE) {
if (ixjdebug & 0x0001)
printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
return 0;
}
for (i = 0; i < ALISDAA_CALLERID_SIZE; i += 2) {
bytes.high = bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_WaitHighSCI(j))
return 0;
CID[i + 0] = inb_p(j->XILINXbase + 0x03);
CID[i + 1] = inb_p(j->XILINXbase + 0x02);
}
if (!SCI_Control(j, SCI_End))
return 0;
pIn = CID;
pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
mContinue = true;
while (mContinue) {
if ((pIn[1] & 0x03) == 0x01) {
pOut[0] = pIn[0];
}
if ((pIn[2] & 0x0c) == 0x04) {
pOut[1] = ((pIn[2] & 0x03) << 6) | ((pIn[1] & 0xfc) >> 2);
}
if ((pIn[3] & 0x30) == 0x10) {
pOut[2] = ((pIn[3] & 0x0f) << 4) | ((pIn[2] & 0xf0) >> 4);
}
if ((pIn[4] & 0xc0) == 0x40) {
pOut[3] = ((pIn[4] & 0x3f) << 2) | ((pIn[3] & 0xc0) >> 6);
} else {
mContinue = false;
}
pIn += 5, pOut += 4;
}
memset(&j->cid, 0, sizeof(PHONE_CID));
pOut = j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID;
pOut += 4;
strncpy(j->cid.month, pOut, 2);
pOut += 2;
strncpy(j->cid.day, pOut, 2);
pOut += 2;
strncpy(j->cid.hour, pOut, 2);
pOut += 2;
strncpy(j->cid.min, pOut, 2);
pOut += 3;
j->cid.numlen = *pOut;
pOut += 1;
strncpy(j->cid.number, pOut, j->cid.numlen);
pOut += j->cid.numlen + 1;
j->cid.namelen = *pOut;
pOut += 1;
strncpy(j->cid.name, pOut, j->cid.namelen);
ixj_daa_cid_reset(j);
return 1;
}
static char daa_get_version(IXJ *j)
{
BYTES bytes;
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x35;
bytes.low = 0x00;
outb_p(bytes.high, j->XILINXbase + 0x03);
outb_p(bytes.low, j->XILINXbase + 0x02);
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
if (bytes.low != ALISDAA_ID_BYTE) {
if (ixjdebug & 0x0001)
printk("DAA Get Version Cannot read DAA ID Byte high = %d low = %d\n", bytes.high, bytes.low);
return 0;
}
if (!SCI_Control(j, SCI_Enable_DAA))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
bytes.high = inb_p(j->XILINXbase + 0x03);
bytes.low = inb_p(j->XILINXbase + 0x02);
if (ixjdebug & 0x0002)
printk("DAA CR5 Byte high = 0x%x low = 0x%x\n", bytes.high, bytes.low);
j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg = bytes.high;
return bytes.high;
}
static int daa_set_mode(IXJ *j, int mode)
{
/* NOTE:
The DAA *MUST* be in the conversation mode if the
PSTN line is to be seized (PSTN line off-hook).
Taking the PSTN line off-hook while the DAA is in
a mode other than conversation mode will cause a
hardware failure of the ALIS-A part.
NOTE:
The DAA can only go to SLEEP, RINGING or PULSEDIALING modes
if the PSTN line is on-hook. Failure to have the PSTN line
in the on-hook state WILL CAUSE A HARDWARE FAILURE OF THE
ALIS-A part.
*/
BYTES bytes;
j->flags.pstn_rmr = 0;
if (!SCI_Prepare(j))
return 0;
switch (mode) {
case SOP_PU_RESET:
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly2 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
bytes.high = 0x10;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
j->daa_mode = SOP_PU_SLEEP;
break;
case SOP_PU_SLEEP:
if(j->daa_mode == SOP_PU_SLEEP)
{
break;
}
if (ixjdebug & 0x0008)
printk(KERN_INFO "phone DAA: SOP_PU_SLEEP at %ld\n", jiffies);
/* if(j->daa_mode == SOP_PU_CONVERSATION) */
{
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly2 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
bytes.high = 0x10;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
}
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly2 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
bytes.high = 0x10;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
j->daa_mode = SOP_PU_SLEEP;
j->flags.pstn_ringing = 0;
j->ex.bits.pstn_ring = 0;
j->pstn_sleeptil = jiffies + (hertz / 4);
wake_up_interruptible(&j->read_q); /* Wake any blocked readers */
wake_up_interruptible(&j->write_q); /* Wake any blocked writers */
wake_up_interruptible(&j->poll_q); /* Wake any blocked selects */
break;
case SOP_PU_RINGING:
if (ixjdebug & 0x0008)
printk(KERN_INFO "phone DAA: SOP_PU_RINGING at %ld\n", jiffies);
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly2 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
bytes.high = 0x50;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
j->daa_mode = SOP_PU_RINGING;
break;
case SOP_PU_CONVERSATION:
if (ixjdebug & 0x0008)
printk(KERN_INFO "phone DAA: SOP_PU_CONVERSATION at %ld\n", jiffies);
bytes.high = 0x90;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
j->pld_slicw.bits.rly2 = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->pld_scrw.bits.daafsyncen = 1; /* Turn on DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->daa_mode = SOP_PU_CONVERSATION;
j->flags.pstn_ringing = 0;
j->ex.bits.pstn_ring = 0;
j->pstn_sleeptil = jiffies;
j->pstn_ring_start = j->pstn_ring_stop = j->pstn_ring_int = 0;
break;
case SOP_PU_PULSEDIALING:
if (ixjdebug & 0x0008)
printk(KERN_INFO "phone DAA: SOP_PU_PULSEDIALING at %ld\n", jiffies);
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly2 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
bytes.high = 0xD0;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
daa_load(&bytes, j);
if (!SCI_Prepare(j))
return 0;
j->daa_mode = SOP_PU_PULSEDIALING;
break;
default:
break;
}
return 1;
}
static int ixj_daa_write(IXJ *j)
{
BYTES bytes;
j->flags.pstncheck = 1;
daa_set_mode(j, SOP_PU_SLEEP);
if (!SCI_Prepare(j))
return 0;
outb_p(j->pld_scrw.byte, j->XILINXbase);
bytes.high = 0x14;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg;
bytes.low = j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x1F;
bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.XOP_xr6_W.reg;
bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg;
bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg;
bytes.low = j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg;
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.XOP_xr0_W.reg;
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Prepare(j))
return 0;
bytes.high = 0x00;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x01;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x02;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x03;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x04;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x05;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x06;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x07;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x08;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x09;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0A;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0B;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0C;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0D;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0E;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
if (!SCI_Control(j, SCI_End))
return 0;
if (!SCI_WaitLowSCI(j))
return 0;
bytes.high = 0x0F;
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2];
bytes.low = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1];
if (!daa_load(&bytes, j))
return 0;
bytes.high = j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0];
bytes.low = 0x00;
if (!daa_load(&bytes, j))
return 0;
udelay(32);
j->pld_scrr.byte = inb_p(j->XILINXbase);
if (!SCI_Control(j, SCI_End))
return 0;
outb_p(j->pld_scrw.byte, j->XILINXbase);
if (ixjdebug & 0x0002)
printk("DAA Coefficients Loaded\n");
j->flags.pstncheck = 0;
return 1;
}
static int ixj_set_tone_off(unsigned short arg, IXJ *j)
{
j->tone_off_time = arg;
if (ixj_WriteDSPCommand(0x6E05, j)) /* Set Tone Off Period */
return -1;
if (ixj_WriteDSPCommand(arg, j))
return -1;
return 0;
}
static int ixj_get_tone_on(IXJ *j)
{
if (ixj_WriteDSPCommand(0x6E06, j)) /* Get Tone On Period */
return -1;
return 0;
}
static int ixj_get_tone_off(IXJ *j)
{
if (ixj_WriteDSPCommand(0x6E07, j)) /* Get Tone Off Period */
return -1;
return 0;
}
static void ixj_busytone(IXJ *j)
{
j->flags.ringback = 0;
j->flags.dialtone = 0;
j->flags.busytone = 1;
ixj_set_tone_on(0x07D0, j);
ixj_set_tone_off(0x07D0, j);
ixj_play_tone(j, 27);
}
static void ixj_dialtone(IXJ *j)
{
j->flags.ringback = 0;
j->flags.dialtone = 1;
j->flags.busytone = 0;
if (j->dsp.low == 0x20) {
return;
} else {
ixj_set_tone_on(0xFFFF, j);
ixj_set_tone_off(0x0000, j);
ixj_play_tone(j, 25);
}
}
static void ixj_cpt_stop(IXJ *j)
{
if(j->tone_state || j->tone_cadence_state)
{
j->flags.dialtone = 0;
j->flags.busytone = 0;
j->flags.ringback = 0;
ixj_set_tone_on(0x0001, j);
ixj_set_tone_off(0x0000, j);
ixj_play_tone(j, 0);
j->tone_state = j->tone_cadence_state = 0;
if (j->cadence_t) {
kfree(j->cadence_t->ce);
kfree(j->cadence_t);
j->cadence_t = NULL;
}
}
if (j->play_mode == -1 && j->rec_mode == -1)
idle(j);
if (j->play_mode != -1 && j->dsp.low == 0x20)
ixj_play_start(j);
if (j->rec_mode != -1 && j->dsp.low == 0x20)
ixj_record_start(j);
}
static void ixj_ringback(IXJ *j)
{
j->flags.busytone = 0;
j->flags.dialtone = 0;
j->flags.ringback = 1;
ixj_set_tone_on(0x0FA0, j);
ixj_set_tone_off(0x2EE0, j);
ixj_play_tone(j, 26);
}
static void ixj_testram(IXJ *j)
{
ixj_WriteDSPCommand(0x3001, j); /* Test External SRAM */
}
static int ixj_build_cadence(IXJ *j, IXJ_CADENCE __user * cp)
{
ixj_cadence *lcp;
IXJ_CADENCE_ELEMENT __user *cep;
IXJ_CADENCE_ELEMENT *lcep;
IXJ_TONE ti;
int err;
lcp = kmalloc(sizeof(ixj_cadence), GFP_KERNEL);
if (lcp == NULL)
return -ENOMEM;
err = -EFAULT;
if (copy_from_user(&lcp->elements_used,
&cp->elements_used, sizeof(int)))
goto out;
if (copy_from_user(&lcp->termination,
&cp->termination, sizeof(IXJ_CADENCE_TERM)))
goto out;
if (get_user(cep, &cp->ce))
goto out;
err = -EINVAL;
if ((unsigned)lcp->elements_used >= ~0U/sizeof(IXJ_CADENCE_ELEMENT))
goto out;
err = -ENOMEM;
lcep = kmalloc(sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used, GFP_KERNEL);
if (!lcep)
goto out;
err = -EFAULT;
if (copy_from_user(lcep, cep, sizeof(IXJ_CADENCE_ELEMENT) * lcp->elements_used))
goto out1;
if (j->cadence_t) {
kfree(j->cadence_t->ce);
kfree(j->cadence_t);
}
lcp->ce = (void *) lcep;
j->cadence_t = lcp;
j->tone_cadence_state = 0;
ixj_set_tone_on(lcp->ce[0].tone_on_time, j);
ixj_set_tone_off(lcp->ce[0].tone_off_time, j);
if (j->cadence_t->ce[j->tone_cadence_state].freq0) {
ti.tone_index = j->cadence_t->ce[j->tone_cadence_state].index;
ti.freq0 = j->cadence_t->ce[j->tone_cadence_state].freq0;
ti.gain0 = j->cadence_t->ce[j->tone_cadence_state].gain0;
ti.freq1 = j->cadence_t->ce[j->tone_cadence_state].freq1;
ti.gain1 = j->cadence_t->ce[j->tone_cadence_state].gain1;
ixj_init_tone(j, &ti);
}
ixj_play_tone(j, lcp->ce[0].index);
return 1;
out1:
kfree(lcep);
out:
kfree(lcp);
return err;
}
static int ixj_build_filter_cadence(IXJ *j, IXJ_FILTER_CADENCE __user * cp)
{
IXJ_FILTER_CADENCE *lcp;
lcp = memdup_user(cp, sizeof(IXJ_FILTER_CADENCE));
if (IS_ERR(lcp)) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Could not allocate memory for cadence or could not copy cadence to kernel\n");
}
return PTR_ERR(lcp);
}
if (lcp->filter > 5) {
if(ixjdebug & 0x0001) {
printk(KERN_INFO "Cadence out of range\n");
}
kfree(lcp);
return -1;
}
j->cadence_f[lcp->filter].state = 0;
j->cadence_f[lcp->filter].enable = lcp->enable;
j->filter_en[lcp->filter] = j->cadence_f[lcp->filter].en_filter = lcp->en_filter;
j->cadence_f[lcp->filter].on1 = lcp->on1;
j->cadence_f[lcp->filter].on1min = 0;
j->cadence_f[lcp->filter].on1max = 0;
j->cadence_f[lcp->filter].off1 = lcp->off1;
j->cadence_f[lcp->filter].off1min = 0;
j->cadence_f[lcp->filter].off1max = 0;
j->cadence_f[lcp->filter].on2 = lcp->on2;
j->cadence_f[lcp->filter].on2min = 0;
j->cadence_f[lcp->filter].on2max = 0;
j->cadence_f[lcp->filter].off2 = lcp->off2;
j->cadence_f[lcp->filter].off2min = 0;
j->cadence_f[lcp->filter].off2max = 0;
j->cadence_f[lcp->filter].on3 = lcp->on3;
j->cadence_f[lcp->filter].on3min = 0;
j->cadence_f[lcp->filter].on3max = 0;
j->cadence_f[lcp->filter].off3 = lcp->off3;
j->cadence_f[lcp->filter].off3min = 0;
j->cadence_f[lcp->filter].off3max = 0;
if(ixjdebug & 0x0002) {
printk(KERN_INFO "Cadence %d loaded\n", lcp->filter);
}
kfree(lcp);
return 0;
}
static void add_caps(IXJ *j)
{
j->caps = 0;
j->caplist[j->caps].cap = PHONE_VENDOR_QUICKNET;
strcpy(j->caplist[j->caps].desc, "Quicknet Technologies, Inc. (www.quicknet.net)");
j->caplist[j->caps].captype = vendor;
j->caplist[j->caps].handle = j->caps++;
j->caplist[j->caps].captype = device;
switch (j->cardtype) {
case QTI_PHONEJACK:
strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK");
break;
case QTI_LINEJACK:
strcpy(j->caplist[j->caps].desc, "Quicknet Internet LineJACK");
break;
case QTI_PHONEJACK_LITE:
strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK Lite");
break;
case QTI_PHONEJACK_PCI:
strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneJACK PCI");
break;
case QTI_PHONECARD:
strcpy(j->caplist[j->caps].desc, "Quicknet Internet PhoneCARD");
break;
}
j->caplist[j->caps].cap = j->cardtype;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "POTS");
j->caplist[j->caps].captype = port;
j->caplist[j->caps].cap = pots;
j->caplist[j->caps].handle = j->caps++;
/* add devices that can do speaker/mic */
switch (j->cardtype) {
case QTI_PHONEJACK:
case QTI_LINEJACK:
case QTI_PHONEJACK_PCI:
case QTI_PHONECARD:
strcpy(j->caplist[j->caps].desc, "SPEAKER");
j->caplist[j->caps].captype = port;
j->caplist[j->caps].cap = speaker;
j->caplist[j->caps].handle = j->caps++;
default:
break;
}
/* add devices that can do handset */
switch (j->cardtype) {
case QTI_PHONEJACK:
strcpy(j->caplist[j->caps].desc, "HANDSET");
j->caplist[j->caps].captype = port;
j->caplist[j->caps].cap = handset;
j->caplist[j->caps].handle = j->caps++;
break;
default:
break;
}
/* add devices that can do PSTN */
switch (j->cardtype) {
case QTI_LINEJACK:
strcpy(j->caplist[j->caps].desc, "PSTN");
j->caplist[j->caps].captype = port;
j->caplist[j->caps].cap = pstn;
j->caplist[j->caps].handle = j->caps++;
break;
default:
break;
}
/* add codecs - all cards can do uLaw, linear 8/16, and Windows sound system */
strcpy(j->caplist[j->caps].desc, "ULAW");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = ULAW;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "LINEAR 16 bit");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = LINEAR16;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "LINEAR 8 bit");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = LINEAR8;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "Windows Sound System");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = WSS;
j->caplist[j->caps].handle = j->caps++;
/* software ALAW codec, made from ULAW */
strcpy(j->caplist[j->caps].desc, "ALAW");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = ALAW;
j->caplist[j->caps].handle = j->caps++;
/* version 12 of the 8020 does the following codecs in a broken way */
if (j->dsp.low != 0x20 || j->ver.low != 0x12) {
strcpy(j->caplist[j->caps].desc, "G.723.1 6.3kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = G723_63;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "G.723.1 5.3kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = G723_53;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.8kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = TS48;
j->caplist[j->caps].handle = j->caps++;
strcpy(j->caplist[j->caps].desc, "TrueSpeech 4.1kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = TS41;
j->caplist[j->caps].handle = j->caps++;
}
/* 8020 chips can do TS8.5 native, and 8021/8022 can load it */
if (j->dsp.low == 0x20 || j->flags.ts85_loaded) {
strcpy(j->caplist[j->caps].desc, "TrueSpeech 8.5kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = TS85;
j->caplist[j->caps].handle = j->caps++;
}
/* 8021 chips can do G728 */
if (j->dsp.low == 0x21) {
strcpy(j->caplist[j->caps].desc, "G.728 16kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = G728;
j->caplist[j->caps].handle = j->caps++;
}
/* 8021/8022 chips can do G729 if loaded */
if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
strcpy(j->caplist[j->caps].desc, "G.729A 8kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = G729;
j->caplist[j->caps].handle = j->caps++;
}
if (j->dsp.low != 0x20 && j->flags.g729_loaded) {
strcpy(j->caplist[j->caps].desc, "G.729B 8kbps");
j->caplist[j->caps].captype = codec;
j->caplist[j->caps].cap = G729B;
j->caplist[j->caps].handle = j->caps++;
}
}
static int capabilities_check(IXJ *j, struct phone_capability *pcreq)
{
int cnt;
int retval = 0;
for (cnt = 0; cnt < j->caps; cnt++) {
if (pcreq->captype == j->caplist[cnt].captype
&& pcreq->cap == j->caplist[cnt].cap) {
retval = 1;
break;
}
}
return retval;
}
static long do_ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
{
IXJ_TONE ti;
IXJ_FILTER jf;
IXJ_FILTER_RAW jfr;
void __user *argp = (void __user *)arg;
struct inode *inode = file_p->f_path.dentry->d_inode;
unsigned int minor = iminor(inode);
unsigned int raise, mant;
int board = NUM(inode);
IXJ *j = get_ixj(NUM(inode));
int retval = 0;
/*
* Set up locks to ensure that only one process is talking to the DSP at a time.
* This is necessary to keep the DSP from locking up.
*/
while(test_and_set_bit(board, (void *)&j->busyflags) != 0)
schedule_timeout_interruptible(1);
if (ixjdebug & 0x0040)
printk("phone%d ioctl, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
if (minor >= IXJMAX) {
clear_bit(board, &j->busyflags);
return -ENODEV;
}
/*
* Check ioctls only root can use.
*/
if (!capable(CAP_SYS_ADMIN)) {
switch (cmd) {
case IXJCTL_TESTRAM:
case IXJCTL_HZ:
retval = -EPERM;
}
}
switch (cmd) {
case IXJCTL_TESTRAM:
ixj_testram(j);
retval = (j->ssr.high << 8) + j->ssr.low;
break;
case IXJCTL_CARDTYPE:
retval = j->cardtype;
break;
case IXJCTL_SERIAL:
retval = j->serial;
break;
case IXJCTL_VERSION:
{
char arg_str[100];
snprintf(arg_str, sizeof(arg_str),
"\nDriver version %i.%i.%i", IXJ_VER_MAJOR,
IXJ_VER_MINOR, IXJ_BLD_VER);
if (copy_to_user(argp, arg_str, strlen(arg_str)))
retval = -EFAULT;
}
break;
case PHONE_RING_CADENCE:
j->ring_cadence = arg;
break;
case IXJCTL_CIDCW:
if(arg) {
if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
retval = -EFAULT;
break;
}
} else {
memset(&j->cid_send, 0, sizeof(PHONE_CID));
}
ixj_write_cidcw(j);
break;
/* Binary compatbility */
case OLD_PHONE_RING_START:
arg = 0;
/* Fall through */
case PHONE_RING_START:
if(arg) {
if (copy_from_user(&j->cid_send, argp, sizeof(PHONE_CID))) {
retval = -EFAULT;
break;
}
ixj_write_cid(j);
} else {
memset(&j->cid_send, 0, sizeof(PHONE_CID));
}
ixj_ring_start(j);
break;
case PHONE_RING_STOP:
j->flags.cringing = 0;
if(j->cadence_f[5].enable) {
j->cadence_f[5].state = 0;
}
ixj_ring_off(j);
break;
case PHONE_RING:
retval = ixj_ring(j);
break;
case PHONE_EXCEPTION:
retval = j->ex.bytes;
if(j->ex.bits.flash) {
j->flash_end = 0;
j->ex.bits.flash = 0;
}
j->ex.bits.pstn_ring = 0;
j->ex.bits.caller_id = 0;
j->ex.bits.pstn_wink = 0;
j->ex.bits.f0 = 0;
j->ex.bits.f1 = 0;
j->ex.bits.f2 = 0;
j->ex.bits.f3 = 0;
j->ex.bits.fc0 = 0;
j->ex.bits.fc1 = 0;
j->ex.bits.fc2 = 0;
j->ex.bits.fc3 = 0;
j->ex.bits.reserved = 0;
break;
case PHONE_HOOKSTATE:
j->ex.bits.hookstate = 0;
retval = j->hookstate; //j->r_hook;
break;
case IXJCTL_SET_LED:
LED_SetState(arg, j);
break;
case PHONE_FRAME:
retval = set_base_frame(j, arg);
break;
case PHONE_REC_CODEC:
retval = set_rec_codec(j, arg);
break;
case PHONE_VAD:
ixj_vad(j, arg);
break;
case PHONE_REC_START:
ixj_record_start(j);
break;
case PHONE_REC_STOP:
ixj_record_stop(j);
break;
case PHONE_REC_DEPTH:
set_rec_depth(j, arg);
break;
case PHONE_REC_VOLUME:
if(arg == -1) {
retval = get_rec_volume(j);
}
else {
set_rec_volume(j, arg);
retval = arg;
}
break;
case PHONE_REC_VOLUME_LINEAR:
if(arg == -1) {
retval = get_rec_volume_linear(j);
}
else {
set_rec_volume_linear(j, arg);
retval = arg;
}
break;
case IXJCTL_DTMF_PRESCALE:
if(arg == -1) {
retval = get_dtmf_prescale(j);
}
else {
set_dtmf_prescale(j, arg);
retval = arg;
}
break;
case PHONE_REC_LEVEL:
retval = get_rec_level(j);
break;
case IXJCTL_SC_RXG:
retval = ixj_siadc(j, arg);
break;
case IXJCTL_SC_TXG:
retval = ixj_sidac(j, arg);
break;
case IXJCTL_AEC_START:
ixj_aec_start(j, arg);
break;
case IXJCTL_AEC_STOP:
aec_stop(j);
break;
case IXJCTL_AEC_GET_LEVEL:
retval = j->aec_level;
break;
case PHONE_PLAY_CODEC:
retval = set_play_codec(j, arg);
break;
case PHONE_PLAY_START:
retval = ixj_play_start(j);
break;
case PHONE_PLAY_STOP:
ixj_play_stop(j);
break;
case PHONE_PLAY_DEPTH:
set_play_depth(j, arg);
break;
case PHONE_PLAY_VOLUME:
if(arg == -1) {
retval = get_play_volume(j);
}
else {
set_play_volume(j, arg);
retval = arg;
}
break;
case PHONE_PLAY_VOLUME_LINEAR:
if(arg == -1) {
retval = get_play_volume_linear(j);
}
else {
set_play_volume_linear(j, arg);
retval = arg;
}
break;
case PHONE_PLAY_LEVEL:
retval = get_play_level(j);
break;
case IXJCTL_DSP_TYPE:
retval = (j->dsp.high << 8) + j->dsp.low;
break;
case IXJCTL_DSP_VERSION:
retval = (j->ver.high << 8) + j->ver.low;
break;
case IXJCTL_HZ:
hertz = arg;
break;
case IXJCTL_RATE:
if (arg > hertz)
retval = -1;
else
samplerate = arg;
break;
case IXJCTL_DRYBUFFER_READ:
put_user(j->drybuffer, (unsigned long __user *) argp);
break;
case IXJCTL_DRYBUFFER_CLEAR:
j->drybuffer = 0;
break;
case IXJCTL_FRAMES_READ:
put_user(j->framesread, (unsigned long __user *) argp);
break;
case IXJCTL_FRAMES_WRITTEN:
put_user(j->frameswritten, (unsigned long __user *) argp);
break;
case IXJCTL_READ_WAIT:
put_user(j->read_wait, (unsigned long __user *) argp);
break;
case IXJCTL_WRITE_WAIT:
put_user(j->write_wait, (unsigned long __user *) argp);
break;
case PHONE_MAXRINGS:
j->maxrings = arg;
break;
case PHONE_SET_TONE_ON_TIME:
ixj_set_tone_on(arg, j);
break;
case PHONE_SET_TONE_OFF_TIME:
ixj_set_tone_off(arg, j);
break;
case PHONE_GET_TONE_ON_TIME:
if (ixj_get_tone_on(j)) {
retval = -1;
} else {
retval = (j->ssr.high << 8) + j->ssr.low;
}
break;
case PHONE_GET_TONE_OFF_TIME:
if (ixj_get_tone_off(j)) {
retval = -1;
} else {
retval = (j->ssr.high << 8) + j->ssr.low;
}
break;
case PHONE_PLAY_TONE:
if (!j->tone_state)
retval = ixj_play_tone(j, arg);
else
retval = -1;
break;
case PHONE_GET_TONE_STATE:
retval = j->tone_state;
break;
case PHONE_DTMF_READY:
retval = j->ex.bits.dtmf_ready;
break;
case PHONE_GET_DTMF:
if (ixj_hookstate(j)) {
if (j->dtmf_rp != j->dtmf_wp) {
retval = j->dtmfbuffer[j->dtmf_rp];
j->dtmf_rp++;
if (j->dtmf_rp == 79)
j->dtmf_rp = 0;
if (j->dtmf_rp == j->dtmf_wp) {
j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
}
}
}
break;
case PHONE_GET_DTMF_ASCII:
if (ixj_hookstate(j)) {
if (j->dtmf_rp != j->dtmf_wp) {
switch (j->dtmfbuffer[j->dtmf_rp]) {
case 10:
retval = 42; /* '*'; */
break;
case 11:
retval = 48; /*'0'; */
break;
case 12:
retval = 35; /*'#'; */
break;
case 28:
retval = 65; /*'A'; */
break;
case 29:
retval = 66; /*'B'; */
break;
case 30:
retval = 67; /*'C'; */
break;
case 31:
retval = 68; /*'D'; */
break;
default:
retval = 48 + j->dtmfbuffer[j->dtmf_rp];
break;
}
j->dtmf_rp++;
if (j->dtmf_rp == 79)
j->dtmf_rp = 0;
if(j->dtmf_rp == j->dtmf_wp)
{
j->ex.bits.dtmf_ready = j->dtmf_rp = j->dtmf_wp = 0;
}
}
}
break;
case PHONE_DTMF_OOB:
j->flags.dtmf_oob = arg;
break;
case PHONE_DIALTONE:
ixj_dialtone(j);
break;
case PHONE_BUSY:
ixj_busytone(j);
break;
case PHONE_RINGBACK:
ixj_ringback(j);
break;
case PHONE_WINK:
if(j->cardtype == QTI_PHONEJACK)
retval = -1;
else
retval = ixj_wink(j);
break;
case PHONE_CPT_STOP:
ixj_cpt_stop(j);
break;
case PHONE_QUERY_CODEC:
{
struct phone_codec_data pd;
int val;
int proto_size[] = {
-1,
12, 10, 16, 9, 8, 48, 5,
40, 40, 80, 40, 40, 6
};
if(copy_from_user(&pd, argp, sizeof(pd))) {
retval = -EFAULT;
break;
}
if(pd.type<1 || pd.type>13) {
retval = -EPROTONOSUPPORT;
break;
}
if(pd.type<G729)
val=proto_size[pd.type];
else switch(j->baseframe.low)
{
case 0xA0:val=2*proto_size[pd.type];break;
case 0x50:val=proto_size[pd.type];break;
default:val=proto_size[pd.type]*3;break;
}
pd.buf_min=pd.buf_max=pd.buf_opt=val;
if(copy_to_user(argp, &pd, sizeof(pd)))
retval = -EFAULT;
break;
}
case IXJCTL_DSP_IDLE:
idle(j);
break;
case IXJCTL_MIXER:
if ((arg & 0xff) == 0xff)
retval = ixj_get_mixer(arg, j);
else
ixj_mixer(arg, j);
break;
case IXJCTL_DAA_COEFF_SET:
switch (arg) {
case DAA_US:
DAA_Coeff_US(j);
retval = ixj_daa_write(j);
break;
case DAA_UK:
DAA_Coeff_UK(j);
retval = ixj_daa_write(j);
break;
case DAA_FRANCE:
DAA_Coeff_France(j);
retval = ixj_daa_write(j);
break;
case DAA_GERMANY:
DAA_Coeff_Germany(j);
retval = ixj_daa_write(j);
break;
case DAA_AUSTRALIA:
DAA_Coeff_Australia(j);
retval = ixj_daa_write(j);
break;
case DAA_JAPAN:
DAA_Coeff_Japan(j);
retval = ixj_daa_write(j);
break;
default:
retval = 1;
break;
}
break;
case IXJCTL_DAA_AGAIN:
ixj_daa_cr4(j, arg | 0x02);
break;
case IXJCTL_PSTN_LINETEST:
retval = ixj_linetest(j);
break;
case IXJCTL_VMWI:
ixj_write_vmwi(j, arg);
break;
case IXJCTL_CID:
if (copy_to_user(argp, &j->cid, sizeof(PHONE_CID)))
retval = -EFAULT;
j->ex.bits.caller_id = 0;
break;
case IXJCTL_WINK_DURATION:
j->winktime = arg;
break;
case IXJCTL_PORT:
if (arg)
retval = ixj_set_port(j, arg);
else
retval = j->port;
break;
case IXJCTL_POTS_PSTN:
retval = ixj_set_pots(j, arg);
break;
case PHONE_CAPABILITIES:
add_caps(j);
retval = j->caps;
break;
case PHONE_CAPABILITIES_LIST:
add_caps(j);
if (copy_to_user(argp, j->caplist, sizeof(struct phone_capability) * j->caps))
retval = -EFAULT;
break;
case PHONE_CAPABILITIES_CHECK:
{
struct phone_capability cap;
if (copy_from_user(&cap, argp, sizeof(cap)))
retval = -EFAULT;
else {
add_caps(j);
retval = capabilities_check(j, &cap);
}
}
break;
case PHONE_PSTN_SET_STATE:
daa_set_mode(j, arg);
break;
case PHONE_PSTN_GET_STATE:
retval = j->daa_mode;
j->ex.bits.pstn_ring = 0;
break;
case IXJCTL_SET_FILTER:
if (copy_from_user(&jf, argp, sizeof(jf)))
retval = -EFAULT;
retval = ixj_init_filter(j, &jf);
break;
case IXJCTL_SET_FILTER_RAW:
if (copy_from_user(&jfr, argp, sizeof(jfr)))
retval = -EFAULT;
else
retval = ixj_init_filter_raw(j, &jfr);
break;
case IXJCTL_GET_FILTER_HIST:
if(arg<0||arg>3)
retval = -EINVAL;
else
retval = j->filter_hist[arg];
break;
case IXJCTL_INIT_TONE:
if (copy_from_user(&ti, argp, sizeof(ti)))
retval = -EFAULT;
else
retval = ixj_init_tone(j, &ti);
break;
case IXJCTL_TONE_CADENCE:
retval = ixj_build_cadence(j, argp);
break;
case IXJCTL_FILTER_CADENCE:
retval = ixj_build_filter_cadence(j, argp);
break;
case IXJCTL_SIGCTL:
if (copy_from_user(&j->sigdef, argp, sizeof(IXJ_SIGDEF))) {
retval = -EFAULT;
break;
}
j->ixj_signals[j->sigdef.event] = j->sigdef.signal;
if(j->sigdef.event < 33) {
raise = 1;
for(mant = 0; mant < j->sigdef.event; mant++){
raise *= 2;
}
if(j->sigdef.signal)
j->ex_sig.bytes |= raise;
else
j->ex_sig.bytes &= (raise^0xffff);
}
break;
case IXJCTL_INTERCOM_STOP:
if(arg < 0 || arg >= IXJMAX)
return -EINVAL;
j->intercom = -1;
ixj_record_stop(j);
ixj_play_stop(j);
idle(j);
get_ixj(arg)->intercom = -1;
ixj_record_stop(get_ixj(arg));
ixj_play_stop(get_ixj(arg));
idle(get_ixj(arg));
break;
case IXJCTL_INTERCOM_START:
if(arg < 0 || arg >= IXJMAX)
return -EINVAL;
j->intercom = arg;
ixj_record_start(j);
ixj_play_start(j);
get_ixj(arg)->intercom = board;
ixj_play_start(get_ixj(arg));
ixj_record_start(get_ixj(arg));
break;
}
if (ixjdebug & 0x0040)
printk("phone%d ioctl end, cmd: 0x%x, arg: 0x%lx\n", minor, cmd, arg);
clear_bit(board, &j->busyflags);
return retval;
}
static long ixj_ioctl(struct file *file_p, unsigned int cmd, unsigned long arg)
{
long ret;
lock_kernel();
ret = do_ixj_ioctl(file_p, cmd, arg);
unlock_kernel();
return ret;
}
static int ixj_fasync(int fd, struct file *file_p, int mode)
{
IXJ *j = get_ixj(NUM(file_p->f_path.dentry->d_inode));
return fasync_helper(fd, file_p, mode, &j->async_queue);
}
static const struct file_operations ixj_fops =
{
.owner = THIS_MODULE,
.read = ixj_enhanced_read,
.write = ixj_enhanced_write,
.poll = ixj_poll,
.unlocked_ioctl = ixj_ioctl,
.release = ixj_release,
.fasync = ixj_fasync
};
static int ixj_linetest(IXJ *j)
{
j->flags.pstncheck = 1; /* Testing */
j->flags.pstn_present = 0; /* Assume the line is not there */
daa_int_read(j); /*Clear DAA Interrupt flags */
/* */
/* Hold all relays in the normally de-energized position. */
/* */
j->pld_slicw.bits.rly1 = 0;
j->pld_slicw.bits.rly2 = 0;
j->pld_slicw.bits.rly3 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicr.byte = inb_p(j->XILINXbase + 0x01);
if (j->pld_slicr.bits.potspstn) {
j->flags.pots_pstn = 1;
j->flags.pots_correct = 0;
LED_SetState(0x4, j);
} else {
j->flags.pots_pstn = 0;
j->pld_slicw.bits.rly1 = 0;
j->pld_slicw.bits.rly2 = 0;
j->pld_slicw.bits.rly3 = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
daa_set_mode(j, SOP_PU_CONVERSATION);
msleep(1000);
daa_int_read(j);
daa_set_mode(j, SOP_PU_RESET);
if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
j->flags.pots_correct = 0; /* Should not be line voltage on POTS port. */
LED_SetState(0x4, j);
j->pld_slicw.bits.rly3 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
} else {
j->flags.pots_correct = 1;
LED_SetState(0x8, j);
j->pld_slicw.bits.rly1 = 1;
j->pld_slicw.bits.rly2 = 0;
j->pld_slicw.bits.rly3 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
}
}
j->pld_slicw.bits.rly3 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
daa_set_mode(j, SOP_PU_CONVERSATION);
msleep(1000);
daa_int_read(j);
daa_set_mode(j, SOP_PU_RESET);
if (j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK) {
j->pstn_sleeptil = jiffies + (hertz / 4);
j->flags.pstn_present = 1;
} else {
j->flags.pstn_present = 0;
}
if (j->flags.pstn_present) {
if (j->flags.pots_correct) {
LED_SetState(0xA, j);
} else {
LED_SetState(0x6, j);
}
} else {
if (j->flags.pots_correct) {
LED_SetState(0x9, j);
} else {
LED_SetState(0x5, j);
}
}
j->flags.pstncheck = 0; /* Testing */
return j->flags.pstn_present;
}
static int ixj_selfprobe(IXJ *j)
{
unsigned short cmd;
int cnt;
BYTES bytes;
init_waitqueue_head(&j->poll_q);
init_waitqueue_head(&j->read_q);
init_waitqueue_head(&j->write_q);
while(atomic_read(&j->DSPWrite) > 0)
atomic_dec(&j->DSPWrite);
if (ixjdebug & 0x0002)
printk(KERN_INFO "Write IDLE to Software Control Register\n");
ixj_WriteDSPCommand(0x0FE0, j); /* Put the DSP in full power mode. */
if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
return -1;
/* The read values of the SSR should be 0x00 for the IDLE command */
if (j->ssr.low || j->ssr.high)
return -1;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Get Device ID Code\n");
if (ixj_WriteDSPCommand(0x3400, j)) /* Get Device ID Code */
return -1;
j->dsp.low = j->ssr.low;
j->dsp.high = j->ssr.high;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Get Device Version Code\n");
if (ixj_WriteDSPCommand(0x3800, j)) /* Get Device Version Code */
return -1;
j->ver.low = j->ssr.low;
j->ver.high = j->ssr.high;
if (!j->cardtype) {
if (j->dsp.low == 0x21) {
bytes.high = bytes.low = inb_p(j->XILINXbase + 0x02);
outb_p(bytes.low ^ 0xFF, j->XILINXbase + 0x02);
/* Test for Internet LineJACK or Internet PhoneJACK Lite */
bytes.low = inb_p(j->XILINXbase + 0x02);
if (bytes.low == bytes.high) /* Register is read only on */
/* Internet PhoneJack Lite */
{
j->cardtype = QTI_PHONEJACK_LITE;
if (!request_region(j->XILINXbase, 4, "ixj control")) {
printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
return -1;
}
j->pld_slicw.pcib.e1 = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase);
} else {
j->cardtype = QTI_LINEJACK;
if (!request_region(j->XILINXbase, 8, "ixj control")) {
printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
return -1;
}
}
} else if (j->dsp.low == 0x22) {
j->cardtype = QTI_PHONEJACK_PCI;
request_region(j->XILINXbase, 4, "ixj control");
j->pld_slicw.pcib.e1 = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase);
} else
j->cardtype = QTI_PHONEJACK;
} else {
switch (j->cardtype) {
case QTI_PHONEJACK:
if (!j->dsp.low != 0x20) {
j->dsp.high = 0x80;
j->dsp.low = 0x20;
ixj_WriteDSPCommand(0x3800, j);
j->ver.low = j->ssr.low;
j->ver.high = j->ssr.high;
}
break;
case QTI_LINEJACK:
if (!request_region(j->XILINXbase, 8, "ixj control")) {
printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
return -1;
}
break;
case QTI_PHONEJACK_LITE:
case QTI_PHONEJACK_PCI:
if (!request_region(j->XILINXbase, 4, "ixj control")) {
printk(KERN_INFO "ixj: can't get I/O address 0x%x\n", j->XILINXbase);
return -1;
}
j->pld_slicw.pcib.e1 = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase);
break;
case QTI_PHONECARD:
break;
}
}
if (j->dsp.low == 0x20 || j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "Write CODEC config to Software Control Register\n");
if (ixj_WriteDSPCommand(0xC462, j)) /* Write CODEC config to Software Control Register */
return -1;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Write CODEC timing to Software Control Register\n");
if (j->cardtype == QTI_PHONEJACK) {
cmd = 0x9FF2;
} else {
cmd = 0x9FF5;
}
if (ixj_WriteDSPCommand(cmd, j)) /* Write CODEC timing to Software Control Register */
return -1;
} else {
if (set_base_frame(j, 30) != 30)
return -1;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Write CODEC config to Software Control Register\n");
if (j->cardtype == QTI_PHONECARD) {
if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
return -1;
}
if (j->cardtype == QTI_LINEJACK) {
if (ixj_WriteDSPCommand(0xC528, j)) /* Write CODEC config to Software Control Register */
return -1;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Turn on the PLD Clock at 8Khz\n");
j->pld_clock.byte = 0;
outb_p(j->pld_clock.byte, j->XILINXbase + 0x04);
}
}
if (j->dsp.low == 0x20) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "Configure GPIO pins\n");
j->gpio.bytes.high = 0x09;
/* bytes.low = 0xEF; 0xF7 */
j->gpio.bits.gpio1 = 1;
j->gpio.bits.gpio2 = 1;
j->gpio.bits.gpio3 = 0;
j->gpio.bits.gpio4 = 1;
j->gpio.bits.gpio5 = 1;
j->gpio.bits.gpio6 = 1;
j->gpio.bits.gpio7 = 1;
ixj_WriteDSPCommand(j->gpio.word, j); /* Set GPIO pin directions */
if (ixjdebug & 0x0002)
printk(KERN_INFO "Enable SLIC\n");
j->gpio.bytes.high = 0x0B;
j->gpio.bytes.low = 0x00;
j->gpio.bits.gpio1 = 0;
j->gpio.bits.gpio2 = 1;
j->gpio.bits.gpio5 = 0;
ixj_WriteDSPCommand(j->gpio.word, j); /* send the ring stop signal */
j->port = PORT_POTS;
} else {
if (j->cardtype == QTI_LINEJACK) {
LED_SetState(0x1, j);
msleep(100);
LED_SetState(0x2, j);
msleep(100);
LED_SetState(0x4, j);
msleep(100);
LED_SetState(0x8, j);
msleep(100);
LED_SetState(0x0, j);
daa_get_version(j);
if (ixjdebug & 0x0002)
printk("Loading DAA Coefficients\n");
DAA_Coeff_US(j);
if (!ixj_daa_write(j)) {
printk("DAA write failed on board %d\n", j->board);
return -1;
}
if(!ixj_daa_cid_reset(j)) {
printk("DAA CID reset failed on board %d\n", j->board);
return -1;
}
j->flags.pots_correct = 0;
j->flags.pstn_present = 0;
ixj_linetest(j);
if (j->flags.pots_correct) {
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly1 = 1;
j->pld_slicw.bits.spken = 1;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
j->port = PORT_POTS;
}
ixj_set_port(j, PORT_PSTN);
ixj_set_pots(j, 1);
if (ixjdebug & 0x0002)
printk(KERN_INFO "Enable Mixer\n");
ixj_mixer(0x0000, j); /*Master Volume Left unmute 0db */
ixj_mixer(0x0100, j); /*Master Volume Right unmute 0db */
ixj_mixer(0x0203, j); /*Voice Left Volume unmute 6db */
ixj_mixer(0x0303, j); /*Voice Right Volume unmute 6db */
ixj_mixer(0x0480, j); /*FM Left mute */
ixj_mixer(0x0580, j); /*FM Right mute */
ixj_mixer(0x0680, j); /*CD Left mute */
ixj_mixer(0x0780, j); /*CD Right mute */
ixj_mixer(0x0880, j); /*Line Left mute */
ixj_mixer(0x0980, j); /*Line Right mute */
ixj_mixer(0x0A80, j); /*Aux left mute */
ixj_mixer(0x0B80, j); /*Aux right mute */
ixj_mixer(0x0C00, j); /*Mono1 unmute 12db */
ixj_mixer(0x0D80, j); /*Mono2 mute */
ixj_mixer(0x0E80, j); /*Mic mute */
ixj_mixer(0x0F00, j); /*Mono Out Volume unmute 0db */
ixj_mixer(0x1000, j); /*Voice Left and Right out only */
ixj_mixer(0x110C, j);
ixj_mixer(0x1200, j); /*Mono1 switch on mixer left */
ixj_mixer(0x1401, j);
ixj_mixer(0x1300, j); /*Mono1 switch on mixer right */
ixj_mixer(0x1501, j);
ixj_mixer(0x1700, j); /*Clock select */
ixj_mixer(0x1800, j); /*ADC input from mixer */
ixj_mixer(0x1901, j); /*Mic gain 30db */
if (ixjdebug & 0x0002)
printk(KERN_INFO "Setting Default US Ring Cadence Detection\n");
j->cadence_f[4].state = 0;
j->cadence_f[4].on1 = 0; /*Cadence Filter 4 is used for PSTN ring cadence */
j->cadence_f[4].off1 = 0;
j->cadence_f[4].on2 = 0;
j->cadence_f[4].off2 = 0;
j->cadence_f[4].on3 = 0;
j->cadence_f[4].off3 = 0; /* These should represent standard US ring pulse. */
j->pstn_last_rmr = jiffies;
} else {
if (j->cardtype == QTI_PHONECARD) {
ixj_WriteDSPCommand(0xCF07, j);
ixj_WriteDSPCommand(0x00B0, j);
ixj_set_port(j, PORT_SPEAKER);
} else {
ixj_set_port(j, PORT_POTS);
SLIC_SetState(PLD_SLIC_STATE_STANDBY, j);
/* SLIC_SetState(PLD_SLIC_STATE_ACTIVE, j); */
}
}
}
j->intercom = -1;
j->framesread = j->frameswritten = 0;
j->read_wait = j->write_wait = 0;
j->rxreadycheck = j->txreadycheck = 0;
/* initialise the DTMF prescale to a sensible value */
if (j->cardtype == QTI_LINEJACK) {
set_dtmf_prescale(j, 0x10);
} else {
set_dtmf_prescale(j, 0x40);
}
set_play_volume(j, 0x100);
set_rec_volume(j, 0x100);
if (ixj_WriteDSPCommand(0x0000, j)) /* Write IDLE to Software Control Register */
return -1;
/* The read values of the SSR should be 0x00 for the IDLE command */
if (j->ssr.low || j->ssr.high)
return -1;
if (ixjdebug & 0x0002)
printk(KERN_INFO "Enable Line Monitor\n");
if (ixjdebug & 0x0002)
printk(KERN_INFO "Set Line Monitor to Asyncronous Mode\n");
if (ixj_WriteDSPCommand(0x7E01, j)) /* Asynchronous Line Monitor */
return -1;
if (ixjdebug & 0x002)
printk(KERN_INFO "Enable DTMF Detectors\n");
if (ixj_WriteDSPCommand(0x5151, j)) /* Enable DTMF detection */
return -1;
if (ixj_WriteDSPCommand(0x6E01, j)) /* Set Asyncronous Tone Generation */
return -1;
set_rec_depth(j, 2); /* Set Record Channel Limit to 2 frames */
set_play_depth(j, 2); /* Set Playback Channel Limit to 2 frames */
j->ex.bits.dtmf_ready = 0;
j->dtmf_state = 0;
j->dtmf_wp = j->dtmf_rp = 0;
j->rec_mode = j->play_mode = -1;
j->flags.ringing = 0;
j->maxrings = MAXRINGS;
j->ring_cadence = USA_RING_CADENCE;
j->drybuffer = 0;
j->winktime = 320;
j->flags.dtmf_oob = 0;
for (cnt = 0; cnt < 4; cnt++)
j->cadence_f[cnt].enable = 0;
/* must be a device on the specified address */
ixj_WriteDSPCommand(0x0FE3, j); /* Put the DSP in 1/5 power mode. */
/* Set up the default signals for events */
for (cnt = 0; cnt < 35; cnt++)
j->ixj_signals[cnt] = SIGIO;
/* Set the excetion signal enable flags */
j->ex_sig.bits.dtmf_ready = j->ex_sig.bits.hookstate = j->ex_sig.bits.flash = j->ex_sig.bits.pstn_ring =
j->ex_sig.bits.caller_id = j->ex_sig.bits.pstn_wink = j->ex_sig.bits.f0 = j->ex_sig.bits.f1 = j->ex_sig.bits.f2 =
j->ex_sig.bits.f3 = j->ex_sig.bits.fc0 = j->ex_sig.bits.fc1 = j->ex_sig.bits.fc2 = j->ex_sig.bits.fc3 = 1;
#ifdef IXJ_DYN_ALLOC
j->fskdata = NULL;
#endif
j->fskdcnt = 0;
j->cidcw_wait = 0;
/* Register with the Telephony for Linux subsystem */
j->p.f_op = &ixj_fops;
j->p.open = ixj_open;
j->p.board = j->board;
phone_register_device(&j->p, PHONE_UNIT_ANY);
ixj_init_timer(j);
ixj_add_timer(j);
return 0;
}
/*
* Exported service for pcmcia card handling
*/
IXJ *ixj_pcmcia_probe(unsigned long dsp, unsigned long xilinx)
{
IXJ *j = ixj_alloc();
j->board = 0;
j->DSPbase = dsp;
j->XILINXbase = xilinx;
j->cardtype = QTI_PHONECARD;
ixj_selfprobe(j);
return j;
}
EXPORT_SYMBOL(ixj_pcmcia_probe); /* Fpr PCMCIA */
static int ixj_get_status_proc(char *buf)
{
int len;
int cnt;
IXJ *j;
len = 0;
len += sprintf(buf + len, "\nDriver version %i.%i.%i", IXJ_VER_MAJOR, IXJ_VER_MINOR, IXJ_BLD_VER);
len += sprintf(buf + len, "\nsizeof IXJ struct %Zd bytes", sizeof(IXJ));
len += sprintf(buf + len, "\nsizeof DAA struct %Zd bytes", sizeof(DAA_REGS));
len += sprintf(buf + len, "\nUsing old telephony API");
len += sprintf(buf + len, "\nDebug Level %d\n", ixjdebug);
for (cnt = 0; cnt < IXJMAX; cnt++) {
j = get_ixj(cnt);
if(j==NULL)
continue;
if (j->DSPbase) {
len += sprintf(buf + len, "\nCard Num %d", cnt);
len += sprintf(buf + len, "\nDSP Base Address 0x%4.4x", j->DSPbase);
if (j->cardtype != QTI_PHONEJACK)
len += sprintf(buf + len, "\nXILINX Base Address 0x%4.4x", j->XILINXbase);
len += sprintf(buf + len, "\nDSP Type %2.2x%2.2x", j->dsp.high, j->dsp.low);
len += sprintf(buf + len, "\nDSP Version %2.2x.%2.2x", j->ver.high, j->ver.low);
len += sprintf(buf + len, "\nSerial Number %8.8x", j->serial);
switch (j->cardtype) {
case (QTI_PHONEJACK):
len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK");
break;
case (QTI_LINEJACK):
len += sprintf(buf + len, "\nCard Type = Internet LineJACK");
if (j->flags.g729_loaded)
len += sprintf(buf + len, " w/G.729 A/B");
len += sprintf(buf + len, " Country = %d", j->daa_country);
break;
case (QTI_PHONEJACK_LITE):
len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK Lite");
if (j->flags.g729_loaded)
len += sprintf(buf + len, " w/G.729 A/B");
break;
case (QTI_PHONEJACK_PCI):
len += sprintf(buf + len, "\nCard Type = Internet PhoneJACK PCI");
if (j->flags.g729_loaded)
len += sprintf(buf + len, " w/G.729 A/B");
break;
case (QTI_PHONECARD):
len += sprintf(buf + len, "\nCard Type = Internet PhoneCARD");
if (j->flags.g729_loaded)
len += sprintf(buf + len, " w/G.729 A/B");
len += sprintf(buf + len, "\nSmart Cable %spresent", j->pccr1.bits.drf ? "not " : "");
if (!j->pccr1.bits.drf)
len += sprintf(buf + len, "\nSmart Cable type %d", j->flags.pcmciasct);
len += sprintf(buf + len, "\nSmart Cable state %d", j->flags.pcmciastate);
break;
default:
len += sprintf(buf + len, "\nCard Type = %d", j->cardtype);
break;
}
len += sprintf(buf + len, "\nReaders %d", j->readers);
len += sprintf(buf + len, "\nWriters %d", j->writers);
add_caps(j);
len += sprintf(buf + len, "\nCapabilities %d", j->caps);
if (j->dsp.low != 0x20)
len += sprintf(buf + len, "\nDSP Processor load %d", j->proc_load);
if (j->flags.cidsent)
len += sprintf(buf + len, "\nCaller ID data sent");
else
len += sprintf(buf + len, "\nCaller ID data not sent");
len += sprintf(buf + len, "\nPlay CODEC ");
switch (j->play_codec) {
case G723_63:
len += sprintf(buf + len, "G.723.1 6.3");
break;
case G723_53:
len += sprintf(buf + len, "G.723.1 5.3");
break;
case TS85:
len += sprintf(buf + len, "TrueSpeech 8.5");
break;
case TS48:
len += sprintf(buf + len, "TrueSpeech 4.8");
break;
case TS41:
len += sprintf(buf + len, "TrueSpeech 4.1");
break;
case G728:
len += sprintf(buf + len, "G.728");
break;
case G729:
len += sprintf(buf + len, "G.729");
break;
case G729B:
len += sprintf(buf + len, "G.729B");
break;
case ULAW:
len += sprintf(buf + len, "uLaw");
break;
case ALAW:
len += sprintf(buf + len, "aLaw");
break;
case LINEAR16:
len += sprintf(buf + len, "16 bit Linear");
break;
case LINEAR8:
len += sprintf(buf + len, "8 bit Linear");
break;
case WSS:
len += sprintf(buf + len, "Windows Sound System");
break;
default:
len += sprintf(buf + len, "NO CODEC CHOSEN");
break;
}
len += sprintf(buf + len, "\nRecord CODEC ");
switch (j->rec_codec) {
case G723_63:
len += sprintf(buf + len, "G.723.1 6.3");
break;
case G723_53:
len += sprintf(buf + len, "G.723.1 5.3");
break;
case TS85:
len += sprintf(buf + len, "TrueSpeech 8.5");
break;
case TS48:
len += sprintf(buf + len, "TrueSpeech 4.8");
break;
case TS41:
len += sprintf(buf + len, "TrueSpeech 4.1");
break;
case G728:
len += sprintf(buf + len, "G.728");
break;
case G729:
len += sprintf(buf + len, "G.729");
break;
case G729B:
len += sprintf(buf + len, "G.729B");
break;
case ULAW:
len += sprintf(buf + len, "uLaw");
break;
case ALAW:
len += sprintf(buf + len, "aLaw");
break;
case LINEAR16:
len += sprintf(buf + len, "16 bit Linear");
break;
case LINEAR8:
len += sprintf(buf + len, "8 bit Linear");
break;
case WSS:
len += sprintf(buf + len, "Windows Sound System");
break;
default:
len += sprintf(buf + len, "NO CODEC CHOSEN");
break;
}
len += sprintf(buf + len, "\nAEC ");
switch (j->aec_level) {
case AEC_OFF:
len += sprintf(buf + len, "Off");
break;
case AEC_LOW:
len += sprintf(buf + len, "Low");
break;
case AEC_MED:
len += sprintf(buf + len, "Med");
break;
case AEC_HIGH:
len += sprintf(buf + len, "High");
break;
case AEC_AUTO:
len += sprintf(buf + len, "Auto");
break;
case AEC_AGC:
len += sprintf(buf + len, "AEC/AGC");
break;
default:
len += sprintf(buf + len, "unknown(%i)", j->aec_level);
break;
}
len += sprintf(buf + len, "\nRec volume 0x%x", get_rec_volume(j));
len += sprintf(buf + len, "\nPlay volume 0x%x", get_play_volume(j));
len += sprintf(buf + len, "\nDTMF prescale 0x%x", get_dtmf_prescale(j));
len += sprintf(buf + len, "\nHook state %d", j->hookstate); /* j->r_hook); */
if (j->cardtype == QTI_LINEJACK) {
len += sprintf(buf + len, "\nPOTS Correct %d", j->flags.pots_correct);
len += sprintf(buf + len, "\nPSTN Present %d", j->flags.pstn_present);
len += sprintf(buf + len, "\nPSTN Check %d", j->flags.pstncheck);
len += sprintf(buf + len, "\nPOTS to PSTN %d", j->flags.pots_pstn);
switch (j->daa_mode) {
case SOP_PU_SLEEP:
len += sprintf(buf + len, "\nDAA PSTN On Hook");
break;
case SOP_PU_RINGING:
len += sprintf(buf + len, "\nDAA PSTN Ringing");
len += sprintf(buf + len, "\nRinging state = %d", j->cadence_f[4].state);
break;
case SOP_PU_CONVERSATION:
len += sprintf(buf + len, "\nDAA PSTN Off Hook");
break;
case SOP_PU_PULSEDIALING:
len += sprintf(buf + len, "\nDAA PSTN Pulse Dialing");
break;
}
len += sprintf(buf + len, "\nDAA RMR = %d", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.bitreg.RMR);
len += sprintf(buf + len, "\nDAA VDD OK = %d", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.bitreg.VDD_OK);
len += sprintf(buf + len, "\nDAA CR0 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg);
len += sprintf(buf + len, "\nDAA CR1 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg);
len += sprintf(buf + len, "\nDAA CR2 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg);
len += sprintf(buf + len, "\nDAA CR3 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg);
len += sprintf(buf + len, "\nDAA CR4 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg);
len += sprintf(buf + len, "\nDAA CR5 = 0x%02x", j->m_DAAShadowRegs.SOP_REGS.SOP.cr5.reg);
len += sprintf(buf + len, "\nDAA XR0 = 0x%02x", j->m_DAAShadowRegs.XOP_REGS.XOP.xr0.reg);
len += sprintf(buf + len, "\nDAA ringstop %ld - jiffies %ld", j->pstn_ring_stop, jiffies);
}
switch (j->port) {
case PORT_POTS:
len += sprintf(buf + len, "\nPort POTS");
break;
case PORT_PSTN:
len += sprintf(buf + len, "\nPort PSTN");
break;
case PORT_SPEAKER:
len += sprintf(buf + len, "\nPort SPEAKER/MIC");
break;
case PORT_HANDSET:
len += sprintf(buf + len, "\nPort HANDSET");
break;
}
if (j->dsp.low == 0x21 || j->dsp.low == 0x22) {
len += sprintf(buf + len, "\nSLIC state ");
switch (SLIC_GetState(j)) {
case PLD_SLIC_STATE_OC:
len += sprintf(buf + len, "OC");
break;
case PLD_SLIC_STATE_RINGING:
len += sprintf(buf + len, "RINGING");
break;
case PLD_SLIC_STATE_ACTIVE:
len += sprintf(buf + len, "ACTIVE");
break;
case PLD_SLIC_STATE_OHT: /* On-hook transmit */
len += sprintf(buf + len, "OHT");
break;
case PLD_SLIC_STATE_TIPOPEN:
len += sprintf(buf + len, "TIPOPEN");
break;
case PLD_SLIC_STATE_STANDBY:
len += sprintf(buf + len, "STANDBY");
break;
case PLD_SLIC_STATE_APR: /* Active polarity reversal */
len += sprintf(buf + len, "APR");
break;
case PLD_SLIC_STATE_OHTPR: /* OHT polarity reversal */
len += sprintf(buf + len, "OHTPR");
break;
default:
len += sprintf(buf + len, "%d", SLIC_GetState(j));
break;
}
}
len += sprintf(buf + len, "\nBase Frame %2.2x.%2.2x", j->baseframe.high, j->baseframe.low);
len += sprintf(buf + len, "\nCID Base Frame %2d", j->cid_base_frame_size);
#ifdef PERFMON_STATS
len += sprintf(buf + len, "\nTimer Checks %ld", j->timerchecks);
len += sprintf(buf + len, "\nRX Ready Checks %ld", j->rxreadycheck);
len += sprintf(buf + len, "\nTX Ready Checks %ld", j->txreadycheck);
len += sprintf(buf + len, "\nFrames Read %ld", j->framesread);
len += sprintf(buf + len, "\nFrames Written %ld", j->frameswritten);
len += sprintf(buf + len, "\nDry Buffer %ld", j->drybuffer);
len += sprintf(buf + len, "\nRead Waits %ld", j->read_wait);
len += sprintf(buf + len, "\nWrite Waits %ld", j->write_wait);
len += sprintf(buf + len, "\nStatus Waits %ld", j->statuswait);
len += sprintf(buf + len, "\nStatus Wait Fails %ld", j->statuswaitfail);
len += sprintf(buf + len, "\nPControl Waits %ld", j->pcontrolwait);
len += sprintf(buf + len, "\nPControl Wait Fails %ld", j->pcontrolwaitfail);
len += sprintf(buf + len, "\nIs Control Ready Checks %ld", j->iscontrolready);
len += sprintf(buf + len, "\nIs Control Ready Check failures %ld", j->iscontrolreadyfail);
#endif
len += sprintf(buf + len, "\n");
}
}
return len;
}
static int ixj_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
int len = ixj_get_status_proc(page);
if (len <= off+count) *eof = 1;
*start = page + off;
len -= off;
if (len>count) len = count;
if (len<0) len = 0;
return len;
}
static void cleanup(void)
{
int cnt;
IXJ *j;
for (cnt = 0; cnt < IXJMAX; cnt++) {
j = get_ixj(cnt);
if(j != NULL && j->DSPbase) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Deleting timer for /dev/phone%d\n", cnt);
del_timer(&j->timer);
if (j->cardtype == QTI_LINEJACK) {
j->pld_scrw.bits.daafsyncen = 0; /* Turn off DAA Frame Sync */
outb_p(j->pld_scrw.byte, j->XILINXbase);
j->pld_slicw.bits.rly1 = 0;
j->pld_slicw.bits.rly2 = 0;
j->pld_slicw.bits.rly3 = 0;
outb_p(j->pld_slicw.byte, j->XILINXbase + 0x01);
LED_SetState(0x0, j);
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
release_region(j->XILINXbase, 8);
} else if (j->cardtype == QTI_PHONEJACK_LITE || j->cardtype == QTI_PHONEJACK_PCI) {
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Releasing XILINX address for /dev/phone%d\n", cnt);
release_region(j->XILINXbase, 4);
}
kfree(j->read_buffer);
kfree(j->write_buffer);
if (j->dev)
pnp_device_detach(j->dev);
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Unregistering /dev/phone%d from LTAPI\n", cnt);
phone_unregister_device(&j->p);
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Releasing DSP address for /dev/phone%d\n", cnt);
release_region(j->DSPbase, 16);
#ifdef IXJ_DYN_ALLOC
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Freeing memory for /dev/phone%d\n", cnt);
kfree(j);
ixj[cnt] = NULL;
#endif
}
}
if (ixjdebug & 0x0002)
printk(KERN_INFO "IXJ: Removing /proc/ixj\n");
remove_proc_entry ("ixj", NULL);
}
/* Typedefs */
typedef struct {
BYTE length;
DWORD bits;
} DATABLOCK;
static void PCIEE_WriteBit(WORD wEEPROMAddress, BYTE lastLCC, BYTE byData)
{
lastLCC = lastLCC & 0xfb;
lastLCC = lastLCC | (byData ? 4 : 0);
outb(lastLCC, wEEPROMAddress); /*set data out bit as appropriate */
mdelay(1);
lastLCC = lastLCC | 0x01;
outb(lastLCC, wEEPROMAddress); /*SK rising edge */
byData = byData << 1;
lastLCC = lastLCC & 0xfe;
mdelay(1);
outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
}
static BYTE PCIEE_ReadBit(WORD wEEPROMAddress, BYTE lastLCC)
{
mdelay(1);
lastLCC = lastLCC | 0x01;
outb(lastLCC, wEEPROMAddress); /*SK rising edge */
lastLCC = lastLCC & 0xfe;
mdelay(1);
outb(lastLCC, wEEPROMAddress); /*after delay, SK falling edge */
return ((inb(wEEPROMAddress) >> 3) & 1);
}
static bool PCIEE_ReadWord(WORD wAddress, WORD wLoc, WORD * pwResult)
{
BYTE lastLCC;
WORD wEEPROMAddress = wAddress + 3;
DWORD i;
BYTE byResult;
*pwResult = 0;
lastLCC = inb(wEEPROMAddress);
lastLCC = lastLCC | 0x02;
lastLCC = lastLCC & 0xfe;
outb(lastLCC, wEEPROMAddress); /* CS hi, SK lo */
mdelay(1); /* delay */
PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
PCIEE_WriteBit(wEEPROMAddress, lastLCC, 1);
PCIEE_WriteBit(wEEPROMAddress, lastLCC, 0);
for (i = 0; i < 8; i++) {
PCIEE_WriteBit(wEEPROMAddress, lastLCC, wLoc & 0x80 ? 1 : 0);
wLoc <<= 1;
}
for (i = 0; i < 16; i++) {
byResult = PCIEE_ReadBit(wEEPROMAddress, lastLCC);
*pwResult = (*pwResult << 1) | byResult;
}
mdelay(1); /* another delay */
lastLCC = lastLCC & 0xfd;
outb(lastLCC, wEEPROMAddress); /* negate CS */
return 0;
}
static DWORD PCIEE_GetSerialNumber(WORD wAddress)
{
WORD wLo, wHi;
if (PCIEE_ReadWord(wAddress, 62, &wLo))
return 0;
if (PCIEE_ReadWord(wAddress, 63, &wHi))
return 0;
return (((DWORD) wHi << 16) | wLo);
}
static int dspio[IXJMAX + 1] =
{
0,
};
static int xio[IXJMAX + 1] =
{
0,
};
module_param_array(dspio, int, NULL, 0);
module_param_array(xio, int, NULL, 0);
MODULE_DESCRIPTION("Quicknet VoIP Telephony card module - www.quicknet.net");
MODULE_AUTHOR("Ed Okerson <eokerson@quicknet.net>");
MODULE_LICENSE("GPL");
static void __exit ixj_exit(void)
{
cleanup();
}
static IXJ *new_ixj(unsigned long port)
{
IXJ *res;
if (!request_region(port, 16, "ixj DSP")) {
printk(KERN_INFO "ixj: can't get I/O address 0x%lx\n", port);
return NULL;
}
res = ixj_alloc();
if (!res) {
release_region(port, 16);
printk(KERN_INFO "ixj: out of memory\n");
return NULL;
}
res->DSPbase = port;
return res;
}
static int __init ixj_probe_isapnp(int *cnt)
{
int probe = 0;
int func = 0x110;
struct pnp_dev *dev = NULL, *old_dev = NULL;
while (1) {
do {
IXJ *j;
int result;
old_dev = dev;
dev = pnp_find_dev(NULL, ISAPNP_VENDOR('Q', 'T', 'I'),
ISAPNP_FUNCTION(func), old_dev);
if (!dev || !dev->card)
break;
result = pnp_device_attach(dev);
if (result < 0) {
printk("pnp attach failed %d \n", result);
break;
}
if (pnp_activate_dev(dev) < 0) {
printk("pnp activate failed (out of resources?)\n");
pnp_device_detach(dev);
return -ENOMEM;
}
if (!pnp_port_valid(dev, 0)) {
pnp_device_detach(dev);
return -ENODEV;
}
j = new_ixj(pnp_port_start(dev, 0));
if (!j)
break;
if (func != 0x110)
j->XILINXbase = pnp_port_start(dev, 1); /* get real port */
switch (func) {
case (0x110):
j->cardtype = QTI_PHONEJACK;
break;
case (0x310):
j->cardtype = QTI_LINEJACK;
break;
case (0x410):
j->cardtype = QTI_PHONEJACK_LITE;
break;
}
j->board = *cnt;
probe = ixj_selfprobe(j);
if(!probe) {
j->serial = dev->card->serial;
j->dev = dev;
switch (func) {
case 0x110:
printk(KERN_INFO "ixj: found Internet PhoneJACK at 0x%x\n", j->DSPbase);
break;
case 0x310:
printk(KERN_INFO "ixj: found Internet LineJACK at 0x%x\n", j->DSPbase);
break;
case 0x410:
printk(KERN_INFO "ixj: found Internet PhoneJACK Lite at 0x%x\n", j->DSPbase);
break;
}
}
++*cnt;
} while (dev);
if (func == 0x410)
break;
if (func == 0x310)
func = 0x410;
if (func == 0x110)
func = 0x310;
dev = NULL;
}
return probe;
}
static int __init ixj_probe_isa(int *cnt)
{
int i, probe;
/* Use passed parameters for older kernels without PnP */
for (i = 0; i < IXJMAX; i++) {
if (dspio[i]) {
IXJ *j = new_ixj(dspio[i]);
if (!j)
break;
j->XILINXbase = xio[i];
j->cardtype = 0;
j->board = *cnt;
probe = ixj_selfprobe(j);
j->dev = NULL;
++*cnt;
}
}
return 0;
}
static int __init ixj_probe_pci(int *cnt)
{
struct pci_dev *pci = NULL;
int i, probe = 0;
IXJ *j = NULL;
for (i = 0; i < IXJMAX - *cnt; i++) {
pci = pci_get_device(PCI_VENDOR_ID_QUICKNET,
PCI_DEVICE_ID_QUICKNET_XJ, pci);
if (!pci)
break;
if (pci_enable_device(pci))
break;
j = new_ixj(pci_resource_start(pci, 0));
if (!j)
break;
j->serial = (PCIEE_GetSerialNumber)pci_resource_start(pci, 2);
j->XILINXbase = j->DSPbase + 0x10;
j->cardtype = QTI_PHONEJACK_PCI;
j->board = *cnt;
probe = ixj_selfprobe(j);
if (!probe)
printk(KERN_INFO "ixj: found Internet PhoneJACK PCI at 0x%x\n", j->DSPbase);
++*cnt;
}
pci_dev_put(pci);
return probe;
}
static int __init ixj_init(void)
{
int cnt = 0;
int probe = 0;
cnt = 0;
/* These might be no-ops, see above. */
if ((probe = ixj_probe_isapnp(&cnt)) < 0) {
return probe;
}
if ((probe = ixj_probe_isa(&cnt)) < 0) {
return probe;
}
if ((probe = ixj_probe_pci(&cnt)) < 0) {
return probe;
}
printk(KERN_INFO "ixj driver initialized.\n");
create_proc_read_entry ("ixj", 0, NULL, ixj_read_proc, NULL);
return probe;
}
module_init(ixj_init);
module_exit(ixj_exit);
static void DAA_Coeff_US(IXJ *j)
{
int i;
j->daa_country = DAA_US;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 0E,32,E2,2F,C2,5A,C0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x03;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0x4B;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x5D;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xCD;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x24;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xC5;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 72,85,00,0E,2B,3A,D0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x71;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x1A;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 03,8F,48,F2,8F,48,70,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x05;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x72;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x3F;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x3B;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 04,8F,38,7F,9B,EA,B0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x05;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x3E;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): 16,55,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x41;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): 52,D3,11,42 */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
/* Bytes for TH-filter part 1 (00): 00,42,48,81,B3,80,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA5;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,F2,33,A0,68,AB,8A,AD */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAB;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xCC;
/* Bytes for TH-filter part 3 (02): 00,88,DA,54,A4,BA,2D,BB */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xD2;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x24;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xA9;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x3B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xA6;
/* ; (10K, 0.68uF) */
/* */
/* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):B2,45,0F,8E */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
/* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1C; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0xB3; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0xAB; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xAB; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x54; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x2D; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0x62; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x2D; */
/* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x2D; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x62; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBB; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x2A; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7D; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A; */
/* j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD4; */
/* */
/* Levelmetering Ringing (0D):B2,45,0F,8E */
/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA; */
/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x05; */
/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F; */
/* j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E; */
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* */
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FE ; CLK gen. by crystal */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):03 ; SEL Bit==0, HP-disabled */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):3C Cadence, RING, Caller ID, VDD_OK */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x3C;
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off == 1 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x3B; /*0x32; */
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):40 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
/* */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static void DAA_Coeff_UK(IXJ *j)
{
int i;
j->daa_country = DAA_UK;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 00,C2,BB,A8,CB,81,A0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xC2;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xA8;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xCB;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 40,00,00,0A,A4,33,E0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x40;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xA4;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 07,9B,ED,24,B2,A2,A0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9B;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xED;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x24;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0xB2;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 0F,92,F2,B2,87,D2,30,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x92;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF2;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0xB2;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xD2;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x30;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): 1B,A5,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xA5;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): E2,27,10,D6 */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x27;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
/* Bytes for TH-filter part 1 (00): 80,2D,38,8B,D0,00,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x2D;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x38;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x8B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xD0;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,5A,53,F0,0B,5F,84,D4 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x53;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xF0;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x0B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5F;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x84;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xD4;
/* Bytes for TH-filter part 3 (02): 00,88,6A,A4,8F,52,F5,32 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x6A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA4;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x8F;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xF5;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x32;
/* ; idle */
/* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V less possible? */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FF */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):00 ; */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):46 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static void DAA_Coeff_France(IXJ *j)
{
int i;
j->daa_country = DAA_FRANCE;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 02,A2,43,2C,22,AF,A0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0x43;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2C;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xAF;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 67,CE,00,0C,22,33,E0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x67;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xCE;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x2C;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 07,9A,28,F6,23,4A,B0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x9A;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x28;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0xF6;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x23;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x4A;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 03,8F,F9,2F,9E,FA,20,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xF9;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9E;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xFA;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): 16,B5,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x16;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): 52,C7,10,D6 */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0xE2;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xC7;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
/* Bytes for TH-filter part 1 (00): 00,42,48,81,A6,80,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,AC,2A,30,78,AC,8A,2C */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAC;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x30;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x78;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0xAC;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x8A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x2C;
/* Bytes for TH-filter part 3 (02): 00,88,DA,A5,22,BA,2C,45 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0xA5;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x2C;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x45;
/* ; idle */
/* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FF */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):00 ; */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):36 ; */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x36;
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):46 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x46; /* 0x46 ??? Should it be 0x00? */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static void DAA_Coeff_Germany(IXJ *j)
{
int i;
j->daa_country = DAA_GERMANY;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 00,CE,BB,B8,D2,81,B0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xCE;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xBB;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0xB8;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xD2;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 45,8F,00,0C,D2,3A,D0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x45;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x8F;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0C;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0xD2;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xD0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 07,AA,E2,34,24,89,20,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0xAA;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x24;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x89;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x20;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 02,87,FA,37,9A,CA,B0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x87;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xFA;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x37;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x9A;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): 72,D5,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x72;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xD5;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): 72,42,13,4B */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x72;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x13;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0x4B;
/* Bytes for TH-filter part 1 (00): 80,52,48,81,AD,80,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAD;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,42,5A,20,E8,1A,81,27 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0xE8;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x1A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x27;
/* Bytes for TH-filter part 3 (02): 00,88,63,26,BD,4B,A3,C2 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x63;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x26;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0xBD;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x4B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xC2;
/* ; (10K, 0.68uF) */
/* Bytes for Ringing part 1 (03):1B,3B,9B,BA,D4,1C,B3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x9B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0xD4;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x1C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):13,42,A6,BA,D4,73,CA,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x13;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0xD4;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x73;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):B2,45,0F,8E */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xB2;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FF ; all Filters enabled, CLK from ext. source */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 ; Manual Ring, Ring metering enabled */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 ; Analog Gain 0dB, FSC internal */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):00 ; SEL Bit==0, HP-enabled */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):1C ; Ring, CID, VDDOK Interrupts enabled */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):32 ; B-Filter Off==1, U0=3.5V, R=200Ohm */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x32;
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):40 ; VDD=4.25 V */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static void DAA_Coeff_Australia(IXJ *j)
{
int i;
j->daa_country = DAA_AUSTRALIA;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 00,A3,AA,28,B3,82,D0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xAA;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x28;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0x82;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xD0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 70,96,00,09,32,6B,C0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x70;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0x96;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x6B;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xC0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 07,96,E2,34,32,9B,30,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x96;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0xE2;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x34;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x9B;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0x30;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 0F,9A,E9,2F,22,CC,A0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x9A;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0xE9;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x2F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xCC;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): CB,45,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0xCB;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0x45;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): 1B,67,10,D6 */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0x67;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
/* Bytes for TH-filter part 1 (00): 80,52,48,81,AF,80,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAF;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,DB,52,B0,38,01,82,AC */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xDB;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0xB0;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x38;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x01;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x82;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0xAC;
/* Bytes for TH-filter part 3 (02): 00,88,4A,3E,2C,3B,24,46 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0x4A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x3E;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x2C;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0x3B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x24;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0x46;
/* ; idle */
/* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):32,45,B5,84 ; 50Hz 20V */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x45;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x84;
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FF */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):00 ; */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):2B ; */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x2B;
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):40 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static void DAA_Coeff_Japan(IXJ *j)
{
int i;
j->daa_country = DAA_JAPAN;
/*----------------------------------------------- */
/* CAO */
for (i = 0; i < ALISDAA_CALLERID_SIZE; i++) {
j->m_DAAShadowRegs.CAO_REGS.CAO.CallerID[i] = 0;
}
/* Bytes for IM-filter part 1 (04): 06,BD,E2,2D,BA,F9,A0,00 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[7] = 0x06;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[6] = 0xBD;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[5] = 0xE2;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[4] = 0x2D;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[3] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[2] = 0xF9;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[1] = 0xA0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_1[0] = 0x00;
/* Bytes for IM-filter part 2 (05): 6F,F7,00,0E,34,33,E0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[7] = 0x6F;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[6] = 0xF7;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[5] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[4] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[3] = 0x34;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[2] = 0x33;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[1] = 0xE0;
j->m_DAAShadowRegs.COP_REGS.COP.IMFilterCoeff_2[0] = 0x08;
/* Bytes for FRX-filter (08): 02,8F,68,77,9C,58,F0,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[6] = 0x8F;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[5] = 0x68;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[4] = 0x77;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[3] = 0x9C;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[2] = 0x58;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[1] = 0xF0;
j->m_DAAShadowRegs.COP_REGS.COP.FRXFilterCoeff[0] = 0x08;
/* Bytes for FRR-filter (07): 03,8F,38,73,87,EA,20,08 */
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[7] = 0x03;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[6] = 0x8F;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[5] = 0x38;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[4] = 0x73;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[3] = 0x87;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[2] = 0xEA;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[1] = 0x20;
j->m_DAAShadowRegs.COP_REGS.COP.FRRFilterCoeff[0] = 0x08;
/* Bytes for AX-filter (0A): 51,C5,DD,CA */
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[3] = 0x51;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[2] = 0xC5;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[1] = 0xDD;
j->m_DAAShadowRegs.COP_REGS.COP.AXFilterCoeff[0] = 0xCA;
/* Bytes for AR-filter (09): 25,A7,10,D6 */
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[3] = 0x25;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[2] = 0xA7;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[1] = 0x10;
j->m_DAAShadowRegs.COP_REGS.COP.ARFilterCoeff[0] = 0xD6;
/* Bytes for TH-filter part 1 (00): 00,42,48,81,AE,80,00,98 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[6] = 0x42;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[5] = 0x48;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[4] = 0x81;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[3] = 0xAE;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[2] = 0x80;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_1[0] = 0x98;
/* Bytes for TH-filter part 2 (01): 02,AB,2A,20,99,5B,89,28 */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[7] = 0x02;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[6] = 0xAB;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[5] = 0x2A;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[4] = 0x20;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[2] = 0x5B;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[1] = 0x89;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_2[0] = 0x28;
/* Bytes for TH-filter part 3 (02): 00,88,DA,25,34,C5,4C,BA */
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[7] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[6] = 0x88;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[5] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[4] = 0x25;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[3] = 0x34;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[2] = 0xC5;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[1] = 0x4C;
j->m_DAAShadowRegs.COP_REGS.COP.THFilterCoeff_3[0] = 0xBA;
/* ; idle */
/* Bytes for Ringing part 1 (03):1B,3C,93,3A,22,12,A3,23 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[7] = 0x1B;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[6] = 0x3C;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[5] = 0x93;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[4] = 0x3A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[2] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[1] = 0xA3;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_1[0] = 0x23;
/* Bytes for Ringing part 2 (06):12,A2,A6,BA,22,7A,0A,D5 */
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[7] = 0x12;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[6] = 0xA2;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[5] = 0xA6;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[4] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[3] = 0x22;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[2] = 0x7A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[1] = 0x0A;
j->m_DAAShadowRegs.COP_REGS.COP.RingerImpendance_2[0] = 0xD5;
/* Levelmetering Ringing (0D):AA,35,0F,8E ; 25Hz 30V ????????? */
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[3] = 0xAA;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[2] = 0x35;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[1] = 0x0F;
j->m_DAAShadowRegs.COP_REGS.COP.LevelmeteringRinging[0] = 0x8E;
/* Caller ID 1st Tone (0E):CA,0E,CA,09,99,99,99,99 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[7] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[6] = 0x0E;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[5] = 0xCA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[4] = 0x09;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[3] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[2] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[1] = 0x99;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID1stTone[0] = 0x99;
/* Caller ID 2nd Tone (0F):FD,B5,BA,07,DA,00,00,00 */
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[7] = 0xFD;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[6] = 0xB5;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[5] = 0xBA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[4] = 0x07;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[3] = 0xDA;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[2] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[1] = 0x00;
j->m_DAAShadowRegs.COP_REGS.COP.CallerID2ndTone[0] = 0x00;
/* ;CR Registers */
/* Config. Reg. 0 (filters) (cr0):FF */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr0.reg = 0xFF;
/* Config. Reg. 1 (dialing) (cr1):05 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr1.reg = 0x05;
/* Config. Reg. 2 (caller ID) (cr2):04 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr2.reg = 0x04;
/* Config. Reg. 3 (testloops) (cr3):00 ; */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr3.reg = 0x00;
/* Config. Reg. 4 (analog gain) (cr4):02 */
j->m_DAAShadowRegs.SOP_REGS.SOP.cr4.reg = 0x02;
/* Config. Reg. 5 (Version) (cr5):02 */
/* Config. Reg. 6 (Reserved) (cr6):00 */
/* Config. Reg. 7 (Reserved) (cr7):00 */
/* ;xr Registers */
/* Ext. Reg. 0 (Interrupt Reg.) (xr0):02 */
j->m_DAAShadowRegs.XOP_xr0_W.reg = 0x02; /* SO_1 set to '1' because it is inverted. */
/* Ext. Reg. 1 (Interrupt enable) (xr1):1C */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr1.reg = 0x1C; /* RING, Caller ID, VDD_OK */
/* Ext. Reg. 2 (Cadence Time Out) (xr2):7D */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr2.reg = 0x7D;
/* Ext. Reg. 3 (DC Char) (xr3):22 ; */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr3.reg = 0x22;
/* Ext. Reg. 4 (Cadence) (xr4):00 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr4.reg = 0x00;
/* Ext. Reg. 5 (Ring timer) (xr5):22 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr5.reg = 0x22;
/* Ext. Reg. 6 (Power State) (xr6):00 */
j->m_DAAShadowRegs.XOP_xr6_W.reg = 0x00;
/* Ext. Reg. 7 (Vdd) (xr7):40 */
j->m_DAAShadowRegs.XOP_REGS.XOP.xr7.reg = 0x40; /* 0x40 ??? Should it be 0x00? */
/* DTMF Tone 1 (0B): 11,B3,5A,2C ; 697 Hz */
/* 12,33,5A,C3 ; 770 Hz */
/* 13,3C,5B,32 ; 852 Hz */
/* 1D,1B,5C,CC ; 941 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[3] = 0x11;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[2] = 0xB3;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[1] = 0x5A;
j->m_DAAShadowRegs.COP_REGS.COP.Tone1Coeff[0] = 0x2C;
/* DTMF Tone 2 (0C): 32,32,52,B3 ; 1209 Hz */
/* EC,1D,52,22 ; 1336 Hz */
/* AA,AC,51,D2 ; 1477 Hz */
/* 9B,3B,51,25 ; 1633 Hz */
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[3] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[2] = 0x32;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[1] = 0x52;
j->m_DAAShadowRegs.COP_REGS.COP.Tone2Coeff[0] = 0xB3;
}
static s16 tone_table[][19] =
{
{ /* f20_50[] 11 */
32538, /* A1 = 1.985962 */
-32325, /* A2 = -0.986511 */
-343, /* B2 = -0.010493 */
0, /* B1 = 0 */
343, /* B0 = 0.010493 */
32619, /* A1 = 1.990906 */
-32520, /* A2 = -0.992462 */
19179, /* B2 = 0.585327 */
-19178, /* B1 = -1.170593 */
19179, /* B0 = 0.585327 */
32723, /* A1 = 1.997314 */
-32686, /* A2 = -0.997528 */
9973, /* B2 = 0.304352 */
-9955, /* B1 = -0.607605 */
9973, /* B0 = 0.304352 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f133_200[] 12 */
32072, /* A1 = 1.95752 */
-31896, /* A2 = -0.973419 */
-435, /* B2 = -0.013294 */
0, /* B1 = 0 */
435, /* B0 = 0.013294 */
32188, /* A1 = 1.9646 */
-32400, /* A2 = -0.98877 */
15139, /* B2 = 0.462036 */
-14882, /* B1 = -0.908356 */
15139, /* B0 = 0.462036 */
32473, /* A1 = 1.981995 */
-32524, /* A2 = -0.992584 */
23200, /* B2 = 0.708008 */
-23113, /* B1 = -1.410706 */
23200, /* B0 = 0.708008 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f300 13 */
31769, /* A1 = -1.939026 */
-32584, /* A2 = 0.994385 */
-475, /* B2 = -0.014522 */
0, /* B1 = 0.000000 */
475, /* B0 = 0.014522 */
31789, /* A1 = -1.940247 */
-32679, /* A2 = 0.997284 */
17280, /* B2 = 0.527344 */
-16865, /* B1 = -1.029358 */
17280, /* B0 = 0.527344 */
31841, /* A1 = -1.943481 */
-32681, /* A2 = 0.997345 */
543, /* B2 = 0.016579 */
-525, /* B1 = -0.032097 */
543, /* B0 = 0.016579 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f300_420[] 14 */
30750, /* A1 = 1.876892 */
-31212, /* A2 = -0.952515 */
-804, /* B2 = -0.024541 */
0, /* B1 = 0 */
804, /* B0 = 0.024541 */
30686, /* A1 = 1.872925 */
-32145, /* A2 = -0.980988 */
14747, /* B2 = 0.450043 */
-13703, /* B1 = -0.836395 */
14747, /* B0 = 0.450043 */
31651, /* A1 = 1.931824 */
-32321, /* A2 = -0.986389 */
24425, /* B2 = 0.745422 */
-23914, /* B1 = -1.459595 */
24427, /* B0 = 0.745483 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f330 15 */
31613, /* A1 = -1.929565 */
-32646, /* A2 = 0.996277 */
-185, /* B2 = -0.005657 */
0, /* B1 = 0.000000 */
185, /* B0 = 0.005657 */
31620, /* A1 = -1.929932 */
-32713, /* A2 = 0.998352 */
19253, /* B2 = 0.587585 */
-18566, /* B1 = -1.133179 */
19253, /* B0 = 0.587585 */
31674, /* A1 = -1.933228 */
-32715, /* A2 = 0.998413 */
2575, /* B2 = 0.078590 */
-2495, /* B1 = -0.152283 */
2575, /* B0 = 0.078590 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f300_425[] 16 */
30741, /* A1 = 1.876282 */
-31475, /* A2 = -0.960541 */
-703, /* B2 = -0.021484 */
0, /* B1 = 0 */
703, /* B0 = 0.021484 */
30688, /* A1 = 1.873047 */
-32248, /* A2 = -0.984161 */
14542, /* B2 = 0.443787 */
-13523, /* B1 = -0.825439 */
14542, /* B0 = 0.443817 */
31494, /* A1 = 1.922302 */
-32366, /* A2 = -0.987762 */
21577, /* B2 = 0.658508 */
-21013, /* B1 = -1.282532 */
21577, /* B0 = 0.658508 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f330_440[] 17 */
30627, /* A1 = 1.869324 */
-31338, /* A2 = -0.95636 */
-843, /* B2 = -0.025749 */
0, /* B1 = 0 */
843, /* B0 = 0.025749 */
30550, /* A1 = 1.864685 */
-32221, /* A2 = -0.983337 */
13594, /* B2 = 0.414886 */
-12589, /* B1 = -0.768402 */
13594, /* B0 = 0.414886 */
31488, /* A1 = 1.921936 */
-32358, /* A2 = -0.987518 */
24684, /* B2 = 0.753296 */
-24029, /* B1 = -1.466614 */
24684, /* B0 = 0.753296 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f340 18 */
31546, /* A1 = -1.925476 */
-32646, /* A2 = 0.996277 */
-445, /* B2 = -0.013588 */
0, /* B1 = 0.000000 */
445, /* B0 = 0.013588 */
31551, /* A1 = -1.925781 */
-32713, /* A2 = 0.998352 */
23884, /* B2 = 0.728882 */
-22979, /* B1 = -1.402527 */
23884, /* B0 = 0.728882 */
31606, /* A1 = -1.929138 */
-32715, /* A2 = 0.998413 */
863, /* B2 = 0.026367 */
-835, /* B1 = -0.050985 */
863, /* B0 = 0.026367 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f350_400[] 19 */
31006, /* A1 = 1.892517 */
-32029, /* A2 = -0.977448 */
-461, /* B2 = -0.014096 */
0, /* B1 = 0 */
461, /* B0 = 0.014096 */
30999, /* A1 = 1.892029 */
-32487, /* A2 = -0.991455 */
11325, /* B2 = 0.345612 */
-10682, /* B1 = -0.651978 */
11325, /* B0 = 0.345612 */
31441, /* A1 = 1.919067 */
-32526, /* A2 = -0.992615 */
24324, /* B2 = 0.74231 */
-23535, /* B1 = -1.436523 */
24324, /* B0 = 0.74231 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f350_440[] */
30634, /* A1 = 1.869751 */
-31533, /* A2 = -0.962341 */
-680, /* B2 = -0.020782 */
0, /* B1 = 0 */
680, /* B0 = 0.020782 */
30571, /* A1 = 1.865906 */
-32277, /* A2 = -0.985016 */
12894, /* B2 = 0.393524 */
-11945, /* B1 = -0.729065 */
12894, /* B0 = 0.393524 */
31367, /* A1 = 1.91449 */
-32379, /* A2 = -0.988129 */
23820, /* B2 = 0.726929 */
-23104, /* B1 = -1.410217 */
23820, /* B0 = 0.726929 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f350_450[] */
30552, /* A1 = 1.864807 */
-31434, /* A2 = -0.95929 */
-690, /* B2 = -0.021066 */
0, /* B1 = 0 */
690, /* B0 = 0.021066 */
30472, /* A1 = 1.859924 */
-32248, /* A2 = -0.984161 */
13385, /* B2 = 0.408478 */
-12357, /* B1 = -0.754242 */
13385, /* B0 = 0.408478 */
31358, /* A1 = 1.914001 */
-32366, /* A2 = -0.987732 */
26488, /* B2 = 0.80835 */
-25692, /* B1 = -1.568176 */
26490, /* B0 = 0.808411 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f360 */
31397, /* A1 = -1.916321 */
-32623, /* A2 = 0.995605 */
-117, /* B2 = -0.003598 */
0, /* B1 = 0.000000 */
117, /* B0 = 0.003598 */
31403, /* A1 = -1.916687 */
-32700, /* A2 = 0.997925 */
3388, /* B2 = 0.103401 */
-3240, /* B1 = -0.197784 */
3388, /* B0 = 0.103401 */
31463, /* A1 = -1.920410 */
-32702, /* A2 = 0.997986 */
13346, /* B2 = 0.407288 */
-12863, /* B1 = -0.785126 */
13346, /* B0 = 0.407288 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f380_420[] */
30831, /* A1 = 1.881775 */
-32064, /* A2 = -0.978546 */
-367, /* B2 = -0.01122 */
0, /* B1 = 0 */
367, /* B0 = 0.01122 */
30813, /* A1 = 1.880737 */
-32456, /* A2 = -0.990509 */
11068, /* B2 = 0.337769 */
-10338, /* B1 = -0.631042 */
11068, /* B0 = 0.337769 */
31214, /* A1 = 1.905212 */
-32491, /* A2 = -0.991577 */
16374, /* B2 = 0.499695 */
-15781, /* B1 = -0.963196 */
16374, /* B0 = 0.499695 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f392 */
31152, /* A1 = -1.901428 */
-32613, /* A2 = 0.995300 */
-314, /* B2 = -0.009605 */
0, /* B1 = 0.000000 */
314, /* B0 = 0.009605 */
31156, /* A1 = -1.901672 */
-32694, /* A2 = 0.997742 */
28847, /* B2 = 0.880371 */
-2734, /* B1 = -0.166901 */
28847, /* B0 = 0.880371 */
31225, /* A1 = -1.905823 */
-32696, /* A2 = 0.997803 */
462, /* B2 = 0.014108 */
-442, /* B1 = -0.027019 */
462, /* B0 = 0.014108 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f400_425[] */
30836, /* A1 = 1.882141 */
-32296, /* A2 = -0.985596 */
-324, /* B2 = -0.009903 */
0, /* B1 = 0 */
324, /* B0 = 0.009903 */
30825, /* A1 = 1.881409 */
-32570, /* A2 = -0.993958 */
16847, /* B2 = 0.51416 */
-15792, /* B1 = -0.963898 */
16847, /* B0 = 0.51416 */
31106, /* A1 = 1.89856 */
-32584, /* A2 = -0.994415 */
9579, /* B2 = 0.292328 */
-9164, /* B1 = -0.559357 */
9579, /* B0 = 0.292328 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f400_440[] */
30702, /* A1 = 1.873962 */
-32134, /* A2 = -0.980682 */
-517, /* B2 = -0.015793 */
0, /* B1 = 0 */
517, /* B0 = 0.015793 */
30676, /* A1 = 1.872375 */
-32520, /* A2 = -0.992462 */
8144, /* B2 = 0.24855 */
-7596, /* B1 = -0.463684 */
8144, /* B0 = 0.24855 */
31084, /* A1 = 1.897217 */
-32547, /* A2 = -0.993256 */
22713, /* B2 = 0.693176 */
-21734, /* B1 = -1.326599 */
22713, /* B0 = 0.693176 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f400_450[] */
30613, /* A1 = 1.86853 */
-32031, /* A2 = -0.977509 */
-618, /* B2 = -0.018866 */
0, /* B1 = 0 */
618, /* B0 = 0.018866 */
30577, /* A1 = 1.866272 */
-32491, /* A2 = -0.991577 */
9612, /* B2 = 0.293335 */
-8935, /* B1 = -0.54541 */
9612, /* B0 = 0.293335 */
31071, /* A1 = 1.896484 */
-32524, /* A2 = -0.992584 */
21596, /* B2 = 0.659058 */
-20667, /* B1 = -1.261414 */
21596, /* B0 = 0.659058 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f420 */
30914, /* A1 = -1.886841 */
-32584, /* A2 = 0.994385 */
-426, /* B2 = -0.013020 */
0, /* B1 = 0.000000 */
426, /* B0 = 0.013020 */
30914, /* A1 = -1.886841 */
-32679, /* A2 = 0.997314 */
17520, /* B2 = 0.534668 */
-16471, /* B1 = -1.005310 */
17520, /* B0 = 0.534668 */
31004, /* A1 = -1.892334 */
-32683, /* A2 = 0.997406 */
819, /* B2 = 0.025023 */
-780, /* B1 = -0.047619 */
819, /* B0 = 0.025023 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
#if 0
{ /* f425 */
30881, /* A1 = -1.884827 */
-32603, /* A2 = 0.994965 */
-496, /* B2 = -0.015144 */
0, /* B1 = 0.000000 */
496, /* B0 = 0.015144 */
30880, /* A1 = -1.884766 */
-32692, /* A2 = 0.997711 */
24767, /* B2 = 0.755859 */
-23290, /* B1 = -1.421509 */
24767, /* B0 = 0.755859 */
30967, /* A1 = -1.890076 */
-32694, /* A2 = 0.997772 */
728, /* B2 = 0.022232 */
-691, /* B1 = -0.042194 */
728, /* B0 = 0.022232 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
#else
{
30850,
-32534,
-504,
0,
504,
30831,
-32669,
24303,
-22080,
24303,
30994,
-32673,
1905,
-1811,
1905,
5,
129,
17,
0xff5
},
#endif
{ /* f425_450[] */
30646, /* A1 = 1.870544 */
-32327, /* A2 = -0.986572 */
-287, /* B2 = -0.008769 */
0, /* B1 = 0 */
287, /* B0 = 0.008769 */
30627, /* A1 = 1.869324 */
-32607, /* A2 = -0.995087 */
13269, /* B2 = 0.404968 */
-12376, /* B1 = -0.755432 */
13269, /* B0 = 0.404968 */
30924, /* A1 = 1.887512 */
-32619, /* A2 = -0.995453 */
19950, /* B2 = 0.608826 */
-18940, /* B1 = -1.156006 */
19950, /* B0 = 0.608826 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f425_475[] */
30396, /* A1 = 1.855225 */
-32014, /* A2 = -0.97699 */
-395, /* B2 = -0.012055 */
0, /* B1 = 0 */
395, /* B0 = 0.012055 */
30343, /* A1 = 1.85199 */
-32482, /* A2 = -0.991302 */
17823, /* B2 = 0.543945 */
-16431, /* B1 = -1.002869 */
17823, /* B0 = 0.543945 */
30872, /* A1 = 1.884338 */
-32516, /* A2 = -0.99231 */
18124, /* B2 = 0.553101 */
-17246, /* B1 = -1.052673 */
18124, /* B0 = 0.553101 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f435 */
30796, /* A1 = -1.879639 */
-32603, /* A2 = 0.994965 */
-254, /* B2 = -0.007762 */
0, /* B1 = 0.000000 */
254, /* B0 = 0.007762 */
30793, /* A1 = -1.879456 */
-32692, /* A2 = 0.997711 */
18934, /* B2 = 0.577820 */
-17751, /* B1 = -1.083496 */
18934, /* B0 = 0.577820 */
30882, /* A1 = -1.884888 */
-32694, /* A2 = 0.997772 */
1858, /* B2 = 0.056713 */
-1758, /* B1 = -0.107357 */
1858, /* B0 = 0.056713 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f440_450[] */
30641, /* A1 = 1.870239 */
-32458, /* A2 = -0.99057 */
-155, /* B2 = -0.004735 */
0, /* B1 = 0 */
155, /* B0 = 0.004735 */
30631, /* A1 = 1.869568 */
-32630, /* A2 = -0.995789 */
11453, /* B2 = 0.349548 */
-10666, /* B1 = -0.651001 */
11453, /* B0 = 0.349548 */
30810, /* A1 = 1.880554 */
-32634, /* A2 = -0.995941 */
12237, /* B2 = 0.373474 */
-11588, /* B1 = -0.707336 */
12237, /* B0 = 0.373474 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f440_480[] */
30367, /* A1 = 1.853455 */
-32147, /* A2 = -0.981079 */
-495, /* B2 = -0.015113 */
0, /* B1 = 0 */
495, /* B0 = 0.015113 */
30322, /* A1 = 1.850769 */
-32543, /* A2 = -0.993134 */
10031, /* B2 = 0.306152 */
-9252, /* B1 = -0.564728 */
10031, /* B0 = 0.306152 */
30770, /* A1 = 1.878052 */
-32563, /* A2 = -0.993774 */
22674, /* B2 = 0.691956 */
-21465, /* B1 = -1.31012 */
22674, /* B0 = 0.691956 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f445 */
30709, /* A1 = -1.874329 */
-32603, /* A2 = 0.994965 */
-83, /* B2 = -0.002545 */
0, /* B1 = 0.000000 */
83, /* B0 = 0.002545 */
30704, /* A1 = -1.874084 */
-32692, /* A2 = 0.997711 */
10641, /* B2 = 0.324738 */
-9947, /* B1 = -0.607147 */
10641, /* B0 = 0.324738 */
30796, /* A1 = -1.879639 */
-32694, /* A2 = 0.997772 */
10079, /* B2 = 0.307587 */
9513, /* B1 = 0.580688 */
10079, /* B0 = 0.307587 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f450 */
30664, /* A1 = -1.871643 */
-32603, /* A2 = 0.994965 */
-164, /* B2 = -0.005029 */
0, /* B1 = 0.000000 */
164, /* B0 = 0.005029 */
30661, /* A1 = -1.871399 */
-32692, /* A2 = 0.997711 */
15294, /* B2 = 0.466736 */
-14275, /* B1 = -0.871307 */
15294, /* B0 = 0.466736 */
30751, /* A1 = -1.876953 */
-32694, /* A2 = 0.997772 */
3548, /* B2 = 0.108284 */
-3344, /* B1 = -0.204155 */
3548, /* B0 = 0.108284 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f452 */
30653, /* A1 = -1.870911 */
-32615, /* A2 = 0.995361 */
-209, /* B2 = -0.006382 */
0, /* B1 = 0.000000 */
209, /* B0 = 0.006382 */
30647, /* A1 = -1.870605 */
-32702, /* A2 = 0.997986 */
18971, /* B2 = 0.578979 */
-17716, /* B1 = -1.081299 */
18971, /* B0 = 0.578979 */
30738, /* A1 = -1.876099 */
-32702, /* A2 = 0.998016 */
2967, /* B2 = 0.090561 */
-2793, /* B1 = -0.170502 */
2967, /* B0 = 0.090561 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f475 */
30437, /* A1 = -1.857727 */
-32603, /* A2 = 0.994965 */
-264, /* B2 = -0.008062 */
0, /* B1 = 0.000000 */
264, /* B0 = 0.008062 */
30430, /* A1 = -1.857300 */
-32692, /* A2 = 0.997711 */
21681, /* B2 = 0.661682 */
-20082, /* B1 = -1.225708 */
21681, /* B0 = 0.661682 */
30526, /* A1 = -1.863220 */
-32694, /* A2 = 0.997742 */
1559, /* B2 = 0.047600 */
-1459, /* B1 = -0.089096 */
1559, /* B0 = 0.047600 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f480_620[] */
28975, /* A1 = 1.768494 */
-30955, /* A2 = -0.944672 */
-1026, /* B2 = -0.03133 */
0, /* B1 = 0 */
1026, /* B0 = 0.03133 */
28613, /* A1 = 1.746399 */
-32089, /* A2 = -0.979309 */
14214, /* B2 = 0.433807 */
-12202, /* B1 = -0.744812 */
14214, /* B0 = 0.433807 */
30243, /* A1 = 1.845947 */
-32238, /* A2 = -0.983856 */
24825, /* B2 = 0.757629 */
-23402, /* B1 = -1.428345 */
24825, /* B0 = 0.757629 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f494 */
30257, /* A1 = -1.846741 */
-32605, /* A2 = 0.995056 */
-249, /* B2 = -0.007625 */
0, /* B1 = 0.000000 */
249, /* B0 = 0.007625 */
30247, /* A1 = -1.846191 */
-32694, /* A2 = 0.997772 */
18088, /* B2 = 0.552002 */
-16652, /* B1 = -1.016418 */
18088, /* B0 = 0.552002 */
30348, /* A1 = -1.852295 */
-32696, /* A2 = 0.997803 */
2099, /* B2 = 0.064064 */
-1953, /* B1 = -0.119202 */
2099, /* B0 = 0.064064 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f500 */
30202, /* A1 = -1.843431 */
-32624, /* A2 = 0.995622 */
-413, /* B2 = -0.012622 */
0, /* B1 = 0.000000 */
413, /* B0 = 0.012622 */
30191, /* A1 = -1.842721 */
-32714, /* A2 = 0.998364 */
25954, /* B2 = 0.792057 */
-23890, /* B1 = -1.458131 */
25954, /* B0 = 0.792057 */
30296, /* A1 = -1.849172 */
-32715, /* A2 = 0.998397 */
2007, /* B2 = 0.061264 */
-1860, /* B1 = -0.113568 */
2007, /* B0 = 0.061264 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f520 */
30001, /* A1 = -1.831116 */
-32613, /* A2 = 0.995270 */
-155, /* B2 = -0.004750 */
0, /* B1 = 0.000000 */
155, /* B0 = 0.004750 */
29985, /* A1 = -1.830200 */
-32710, /* A2 = 0.998260 */
6584, /* B2 = 0.200928 */
-6018, /* B1 = -0.367355 */
6584, /* B0 = 0.200928 */
30105, /* A1 = -1.837524 */
-32712, /* A2 = 0.998291 */
23812, /* B2 = 0.726685 */
-21936, /* B1 = -1.338928 */
23812, /* B0 = 0.726685 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f523 */
29964, /* A1 = -1.828918 */
-32601, /* A2 = 0.994904 */
-101, /* B2 = -0.003110 */
0, /* B1 = 0.000000 */
101, /* B0 = 0.003110 */
29949, /* A1 = -1.827942 */
-32700, /* A2 = 0.997925 */
11041, /* B2 = 0.336975 */
-10075, /* B1 = -0.614960 */
11041, /* B0 = 0.336975 */
30070, /* A1 = -1.835388 */
-32702, /* A2 = 0.997986 */
16762, /* B2 = 0.511536 */
-15437, /* B1 = -0.942230 */
16762, /* B0 = 0.511536 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f525 */
29936, /* A1 = -1.827209 */
-32584, /* A2 = 0.994415 */
-91, /* B2 = -0.002806 */
0, /* B1 = 0.000000 */
91, /* B0 = 0.002806 */
29921, /* A1 = -1.826233 */
-32688, /* A2 = 0.997559 */
11449, /* B2 = 0.349396 */
-10426, /* B1 = -0.636383 */
11449, /* B0 = 0.349396 */
30045, /* A1 = -1.833862 */
-32688, /* A2 = 0.997589 */
13055, /* B2 = 0.398407 */
-12028, /* B1 = -0.734161 */
13055, /* B0 = 0.398407 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f540_660[] */
28499, /* A1 = 1.739441 */
-31129, /* A2 = -0.949982 */
-849, /* B2 = -0.025922 */
0, /* B1 = 0 */
849, /* B0 = 0.025922 */
28128, /* A1 = 1.716797 */
-32130, /* A2 = -0.98056 */
14556, /* B2 = 0.444214 */
-12251, /* B1 = -0.747772 */
14556, /* B0 = 0.444244 */
29667, /* A1 = 1.81073 */
-32244, /* A2 = -0.984039 */
23038, /* B2 = 0.703064 */
-21358, /* B1 = -1.303589 */
23040, /* B0 = 0.703125 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f587 */
29271, /* A1 = -1.786560 */
-32599, /* A2 = 0.994873 */
-490, /* B2 = -0.014957 */
0, /* B1 = 0.000000 */
490, /* B0 = 0.014957 */
29246, /* A1 = -1.785095 */
-32700, /* A2 = 0.997925 */
28961, /* B2 = 0.883850 */
-25796, /* B1 = -1.574463 */
28961, /* B0 = 0.883850 */
29383, /* A1 = -1.793396 */
-32700, /* A2 = 0.997955 */
1299, /* B2 = 0.039650 */
-1169, /* B1 = -0.071396 */
1299, /* B0 = 0.039650 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f590 */
29230, /* A1 = -1.784058 */
-32584, /* A2 = 0.994415 */
-418, /* B2 = -0.012757 */
0, /* B1 = 0.000000 */
418, /* B0 = 0.012757 */
29206, /* A1 = -1.782593 */
-32688, /* A2 = 0.997559 */
36556, /* B2 = 1.115601 */
-32478, /* B1 = -1.982300 */
36556, /* B0 = 1.115601 */
29345, /* A1 = -1.791077 */
-32688, /* A2 = 0.997589 */
897, /* B2 = 0.027397 */
-808, /* B1 = -0.049334 */
897, /* B0 = 0.027397 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f600 */
29116, /* A1 = -1.777100 */
-32603, /* A2 = 0.994965 */
-165, /* B2 = -0.005039 */
0, /* B1 = 0.000000 */
165, /* B0 = 0.005039 */
29089, /* A1 = -1.775452 */
-32708, /* A2 = 0.998199 */
6963, /* B2 = 0.212494 */
-6172, /* B1 = -0.376770 */
6963, /* B0 = 0.212494 */
29237, /* A1 = -1.784485 */
-32710, /* A2 = 0.998230 */
24197, /* B2 = 0.738464 */
-21657, /* B1 = -1.321899 */
24197, /* B0 = 0.738464 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f660 */
28376, /* A1 = -1.731934 */
-32567, /* A2 = 0.993896 */
-363, /* B2 = -0.011102 */
0, /* B1 = 0.000000 */
363, /* B0 = 0.011102 */
28337, /* A1 = -1.729614 */
-32683, /* A2 = 0.997434 */
21766, /* B2 = 0.664246 */
-18761, /* B1 = -1.145081 */
21766, /* B0 = 0.664246 */
28513, /* A1 = -1.740356 */
-32686, /* A2 = 0.997498 */
2509, /* B2 = 0.076584 */
-2196, /* B1 = -0.134041 */
2509, /* B0 = 0.076584 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f700 */
27844, /* A1 = -1.699463 */
-32563, /* A2 = 0.993744 */
-366, /* B2 = -0.011187 */
0, /* B1 = 0.000000 */
366, /* B0 = 0.011187 */
27797, /* A1 = -1.696655 */
-32686, /* A2 = 0.997498 */
22748, /* B2 = 0.694214 */
-19235, /* B1 = -1.174072 */
22748, /* B0 = 0.694214 */
27995, /* A1 = -1.708740 */
-32688, /* A2 = 0.997559 */
2964, /* B2 = 0.090477 */
-2546, /* B1 = -0.155449 */
2964, /* B0 = 0.090477 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f740 */
27297, /* A1 = -1.666077 */
-32551, /* A2 = 0.993408 */
-345, /* B2 = -0.010540 */
0, /* B1 = 0.000000 */
345, /* B0 = 0.010540 */
27240, /* A1 = -1.662598 */
-32683, /* A2 = 0.997406 */
22560, /* B2 = 0.688477 */
-18688, /* B1 = -1.140625 */
22560, /* B0 = 0.688477 */
27461, /* A1 = -1.676147 */
-32684, /* A2 = 0.997467 */
3541, /* B2 = 0.108086 */
-2985, /* B1 = -0.182220 */
3541, /* B0 = 0.108086 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f750 */
27155, /* A1 = -1.657410 */
-32551, /* A2 = 0.993408 */
-462, /* B2 = -0.014117 */
0, /* B1 = 0.000000 */
462, /* B0 = 0.014117 */
27097, /* A1 = -1.653870 */
-32683, /* A2 = 0.997406 */
32495, /* B2 = 0.991699 */
-26776, /* B1 = -1.634338 */
32495, /* B0 = 0.991699 */
27321, /* A1 = -1.667542 */
-32684, /* A2 = 0.997467 */
1835, /* B2 = 0.056007 */
-1539, /* B1 = -0.093948 */
1835, /* B0 = 0.056007 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f750_1450[] */
19298, /* A1 = 1.177917 */
-24471, /* A2 = -0.746796 */
-4152, /* B2 = -0.126709 */
0, /* B1 = 0 */
4152, /* B0 = 0.126709 */
12902, /* A1 = 0.787476 */
-29091, /* A2 = -0.887817 */
12491, /* B2 = 0.38121 */
-1794, /* B1 = -0.109528 */
12494, /* B0 = 0.381317 */
26291, /* A1 = 1.604736 */
-30470, /* A2 = -0.929901 */
28859, /* B2 = 0.880737 */
-26084, /* B1 = -1.592102 */
28861, /* B0 = 0.880798 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f770 */
26867, /* A1 = -1.639832 */
-32551, /* A2 = 0.993408 */
-123, /* B2 = -0.003755 */
0, /* B1 = 0.000000 */
123, /* B0 = 0.003755 */
26805, /* A1 = -1.636108 */
-32683, /* A2 = 0.997406 */
17297, /* B2 = 0.527863 */
-14096, /* B1 = -0.860382 */
17297, /* B0 = 0.527863 */
27034, /* A1 = -1.650085 */
-32684, /* A2 = 0.997467 */
12958, /* B2 = 0.395477 */
-10756, /* B1 = -0.656525 */
12958, /* B0 = 0.395477 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f800 */
26413, /* A1 = -1.612122 */
-32547, /* A2 = 0.993286 */
-223, /* B2 = -0.006825 */
0, /* B1 = 0.000000 */
223, /* B0 = 0.006825 */
26342, /* A1 = -1.607849 */
-32686, /* A2 = 0.997498 */
6391, /* B2 = 0.195053 */
-5120, /* B1 = -0.312531 */
6391, /* B0 = 0.195053 */
26593, /* A1 = -1.623108 */
-32688, /* A2 = 0.997559 */
23681, /* B2 = 0.722717 */
-19328, /* B1 = -1.179688 */
23681, /* B0 = 0.722717 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f816 */
26168, /* A1 = -1.597209 */
-32528, /* A2 = 0.992706 */
-235, /* B2 = -0.007182 */
0, /* B1 = 0.000000 */
235, /* B0 = 0.007182 */
26092, /* A1 = -1.592590 */
-32675, /* A2 = 0.997192 */
20823, /* B2 = 0.635498 */
-16510, /* B1 = -1.007751 */
20823, /* B0 = 0.635498 */
26363, /* A1 = -1.609070 */
-32677, /* A2 = 0.997253 */
6739, /* B2 = 0.205688 */
-5459, /* B1 = -0.333206 */
6739, /* B0 = 0.205688 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f850 */
25641, /* A1 = -1.565063 */
-32536, /* A2 = 0.992950 */
-121, /* B2 = -0.003707 */
0, /* B1 = 0.000000 */
121, /* B0 = 0.003707 */
25560, /* A1 = -1.560059 */
-32684, /* A2 = 0.997437 */
18341, /* B2 = 0.559753 */
-14252, /* B1 = -0.869904 */
18341, /* B0 = 0.559753 */
25837, /* A1 = -1.577026 */
-32684, /* A2 = 0.997467 */
16679, /* B2 = 0.509003 */
-13232, /* B1 = -0.807648 */
16679, /* B0 = 0.509003 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f857_1645[] */
16415, /* A1 = 1.001953 */
-23669, /* A2 = -0.722321 */
-4549, /* B2 = -0.138847 */
0, /* B1 = 0 */
4549, /* B0 = 0.138847 */
8456, /* A1 = 0.516174 */
-28996, /* A2 = -0.884918 */
13753, /* B2 = 0.419724 */
-12, /* B1 = -0.000763 */
13757, /* B0 = 0.419846 */
24632, /* A1 = 1.503418 */
-30271, /* A2 = -0.923828 */
29070, /* B2 = 0.887146 */
-25265, /* B1 = -1.542114 */
29073, /* B0 = 0.887268 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f900 */
24806, /* A1 = -1.514099 */
-32501, /* A2 = 0.991852 */
-326, /* B2 = -0.009969 */
0, /* B1 = 0.000000 */
326, /* B0 = 0.009969 */
24709, /* A1 = -1.508118 */
-32659, /* A2 = 0.996674 */
20277, /* B2 = 0.618835 */
-15182, /* B1 = -0.926636 */
20277, /* B0 = 0.618835 */
25022, /* A1 = -1.527222 */
-32661, /* A2 = 0.996735 */
4320, /* B2 = 0.131836 */
-3331, /* B1 = -0.203339 */
4320, /* B0 = 0.131836 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f900_1300[] */
19776, /* A1 = 1.207092 */
-27437, /* A2 = -0.837341 */
-2666, /* B2 = -0.081371 */
0, /* B1 = 0 */
2666, /* B0 = 0.081371 */
16302, /* A1 = 0.995026 */
-30354, /* A2 = -0.926361 */
10389, /* B2 = 0.317062 */
-3327, /* B1 = -0.203064 */
10389, /* B0 = 0.317062 */
24299, /* A1 = 1.483154 */
-30930, /* A2 = -0.943909 */
25016, /* B2 = 0.763428 */
-21171, /* B1 = -1.292236 */
25016, /* B0 = 0.763428 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f935_1215[] */
20554, /* A1 = 1.254517 */
-28764, /* A2 = -0.877838 */
-2048, /* B2 = -0.062515 */
0, /* B1 = 0 */
2048, /* B0 = 0.062515 */
18209, /* A1 = 1.11145 */
-30951, /* A2 = -0.94458 */
9390, /* B2 = 0.286575 */
-3955, /* B1 = -0.241455 */
9390, /* B0 = 0.286575 */
23902, /* A1 = 1.458923 */
-31286, /* A2 = -0.954803 */
23252, /* B2 = 0.709595 */
-19132, /* B1 = -1.167725 */
23252, /* B0 = 0.709595 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f941_1477[] */
17543, /* A1 = 1.07074 */
-26220, /* A2 = -0.800201 */
-3298, /* B2 = -0.100647 */
0, /* B1 = 0 */
3298, /* B0 = 0.100647 */
12423, /* A1 = 0.75827 */
-30036, /* A2 = -0.916626 */
12651, /* B2 = 0.386078 */
-2444, /* B1 = -0.14917 */
12653, /* B0 = 0.386154 */
23518, /* A1 = 1.435425 */
-30745, /* A2 = -0.938293 */
27282, /* B2 = 0.832581 */
-22529, /* B1 = -1.375122 */
27286, /* B0 = 0.832703 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f942 */
24104, /* A1 = -1.471252 */
-32507, /* A2 = 0.992065 */
-351, /* B2 = -0.010722 */
0, /* B1 = 0.000000 */
351, /* B0 = 0.010722 */
23996, /* A1 = -1.464600 */
-32671, /* A2 = 0.997040 */
22848, /* B2 = 0.697266 */
-16639, /* B1 = -1.015564 */
22848, /* B0 = 0.697266 */
24332, /* A1 = -1.485168 */
-32673, /* A2 = 0.997101 */
4906, /* B2 = 0.149727 */
-3672, /* B1 = -0.224174 */
4906, /* B0 = 0.149727 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f950 */
23967, /* A1 = -1.462830 */
-32507, /* A2 = 0.992065 */
-518, /* B2 = -0.015821 */
0, /* B1 = 0.000000 */
518, /* B0 = 0.015821 */
23856, /* A1 = -1.456055 */
-32671, /* A2 = 0.997040 */
26287, /* B2 = 0.802246 */
-19031, /* B1 = -1.161560 */
26287, /* B0 = 0.802246 */
24195, /* A1 = -1.476746 */
-32673, /* A2 = 0.997101 */
2890, /* B2 = 0.088196 */
-2151, /* B1 = -0.131317 */
2890, /* B0 = 0.088196 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f950_1400[] */
18294, /* A1 = 1.116638 */
-26962, /* A2 = -0.822845 */
-2914, /* B2 = -0.088936 */
0, /* B1 = 0 */
2914, /* B0 = 0.088936 */
14119, /* A1 = 0.861786 */
-30227, /* A2 = -0.922455 */
11466, /* B2 = 0.349945 */
-2833, /* B1 = -0.172943 */
11466, /* B0 = 0.349945 */
23431, /* A1 = 1.430115 */
-30828, /* A2 = -0.940796 */
25331, /* B2 = 0.773071 */
-20911, /* B1 = -1.276367 */
25331, /* B0 = 0.773071 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f975 */
23521, /* A1 = -1.435608 */
-32489, /* A2 = 0.991516 */
-193, /* B2 = -0.005915 */
0, /* B1 = 0.000000 */
193, /* B0 = 0.005915 */
23404, /* A1 = -1.428467 */
-32655, /* A2 = 0.996582 */
17740, /* B2 = 0.541412 */
-12567, /* B1 = -0.767029 */
17740, /* B0 = 0.541412 */
23753, /* A1 = -1.449829 */
-32657, /* A2 = 0.996613 */
9090, /* B2 = 0.277405 */
-6662, /* B1 = -0.406647 */
9090, /* B0 = 0.277405 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1000 */
23071, /* A1 = -1.408203 */
-32489, /* A2 = 0.991516 */
-293, /* B2 = -0.008965 */
0, /* B1 = 0.000000 */
293, /* B0 = 0.008965 */
22951, /* A1 = -1.400818 */
-32655, /* A2 = 0.996582 */
5689, /* B2 = 0.173645 */
-3951, /* B1 = -0.241150 */
5689, /* B0 = 0.173645 */
23307, /* A1 = -1.422607 */
-32657, /* A2 = 0.996613 */
18692, /* B2 = 0.570435 */
-13447, /* B1 = -0.820770 */
18692, /* B0 = 0.570435 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1020 */
22701, /* A1 = -1.385620 */
-32474, /* A2 = 0.991058 */
-292, /* B2 = -0.008933 */
0, /*163840 , B1 = 10.000000 */
292, /* B0 = 0.008933 */
22564, /* A1 = -1.377258 */
-32655, /* A2 = 0.996552 */
20756, /* B2 = 0.633423 */
-14176, /* B1 = -0.865295 */
20756, /* B0 = 0.633423 */
22960, /* A1 = -1.401428 */
-32657, /* A2 = 0.996613 */
6520, /* B2 = 0.198990 */
-4619, /* B1 = -0.281937 */
6520, /* B0 = 0.198990 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1050 */
22142, /* A1 = -1.351501 */
-32474, /* A2 = 0.991058 */
-147, /* B2 = -0.004493 */
0, /* B1 = 0.000000 */
147, /* B0 = 0.004493 */
22000, /* A1 = -1.342834 */
-32655, /* A2 = 0.996552 */
15379, /* B2 = 0.469360 */
-10237, /* B1 = -0.624847 */
15379, /* B0 = 0.469360 */
22406, /* A1 = -1.367554 */
-32657, /* A2 = 0.996613 */
17491, /* B2 = 0.533783 */
-12096, /* B1 = -0.738312 */
17491, /* B0 = 0.533783 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1100_1750[] */
12973, /* A1 = 0.79184 */
-24916, /* A2 = -0.760376 */
6655, /* B2 = 0.203102 */
367, /* B1 = 0.0224 */
6657, /* B0 = 0.203171 */
5915, /* A1 = 0.361053 */
-29560, /* A2 = -0.90213 */
-7777, /* B2 = -0.23735 */
0, /* B1 = 0 */
7777, /* B0 = 0.23735 */
20510, /* A1 = 1.251892 */
-30260, /* A2 = -0.923462 */
26662, /* B2 = 0.81366 */
-20573, /* B1 = -1.255737 */
26668, /* B0 = 0.813843 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1140 */
20392, /* A1 = -1.244629 */
-32460, /* A2 = 0.990601 */
-270, /* B2 = -0.008240 */
0, /* B1 = 0.000000 */
270, /* B0 = 0.008240 */
20218, /* A1 = -1.234009 */
-32655, /* A2 = 0.996582 */
21337, /* B2 = 0.651154 */
-13044, /* B1 = -0.796143 */
21337, /* B0 = 0.651154 */
20684, /* A1 = -1.262512 */
-32657, /* A2 = 0.996643 */
8572, /* B2 = 0.261612 */
-5476, /* B1 = -0.334244 */
8572, /* B0 = 0.261612 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1200 */
19159, /* A1 = -1.169373 */
-32456, /* A2 = 0.990509 */
-335, /* B2 = -0.010252 */
0, /* B1 = 0.000000 */
335, /* B0 = 0.010252 */
18966, /* A1 = -1.157593 */
-32661, /* A2 = 0.996735 */
6802, /* B2 = 0.207588 */
-3900, /* B1 = -0.238098 */
6802, /* B0 = 0.207588 */
19467, /* A1 = -1.188232 */
-32661, /* A2 = 0.996765 */
25035, /* B2 = 0.764008 */
-15049, /* B1 = -0.918579 */
25035, /* B0 = 0.764008 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1209 */
18976, /* A1 = -1.158264 */
-32439, /* A2 = 0.989990 */
-183, /* B2 = -0.005588 */
0, /* B1 = 0.000000 */
183, /* B0 = 0.005588 */
18774, /* A1 = -1.145874 */
-32650, /* A2 = 0.996429 */
15468, /* B2 = 0.472076 */
-8768, /* B1 = -0.535217 */
15468, /* B0 = 0.472076 */
19300, /* A1 = -1.177979 */
-32652, /* A2 = 0.996490 */
19840, /* B2 = 0.605499 */
-11842, /* B1 = -0.722809 */
19840, /* B0 = 0.605499 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1330 */
16357, /* A1 = -0.998413 */
-32368, /* A2 = 0.987793 */
-217, /* B2 = -0.006652 */
0, /* B1 = 0.000000 */
217, /* B0 = 0.006652 */
16107, /* A1 = -0.983126 */
-32601, /* A2 = 0.994904 */
11602, /* B2 = 0.354065 */
-5555, /* B1 = -0.339111 */
11602, /* B0 = 0.354065 */
16722, /* A1 = -1.020630 */
-32603, /* A2 = 0.994965 */
15574, /* B2 = 0.475311 */
-8176, /* B1 = -0.499069 */
15574, /* B0 = 0.475311 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1336 */
16234, /* A1 = -0.990875 */
32404, /* A2 = -0.988922 */
-193, /* B2 = -0.005908 */
0, /* B1 = 0.000000 */
193, /* B0 = 0.005908 */
15986, /* A1 = -0.975769 */
-32632, /* A2 = 0.995880 */
18051, /* B2 = 0.550903 */
-8658, /* B1 = -0.528473 */
18051, /* B0 = 0.550903 */
16591, /* A1 = -1.012695 */
-32634, /* A2 = 0.995941 */
15736, /* B2 = 0.480240 */
-8125, /* B1 = -0.495926 */
15736, /* B0 = 0.480240 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1366 */
15564, /* A1 = -0.949982 */
-32404, /* A2 = 0.988922 */
-269, /* B2 = -0.008216 */
0, /* B1 = 0.000000 */
269, /* B0 = 0.008216 */
15310, /* A1 = -0.934479 */
-32632, /* A2 = 0.995880 */
10815, /* B2 = 0.330063 */
-4962, /* B1 = -0.302887 */
10815, /* B0 = 0.330063 */
15924, /* A1 = -0.971924 */
-32634, /* A2 = 0.995941 */
18880, /* B2 = 0.576172 */
-9364, /* B1 = -0.571594 */
18880, /* B0 = 0.576172 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1380 */
15247, /* A1 = -0.930603 */
-32397, /* A2 = 0.988708 */
-244, /* B2 = -0.007451 */
0, /* B1 = 0.000000 */
244, /* B0 = 0.007451 */
14989, /* A1 = -0.914886 */
-32627, /* A2 = 0.995697 */
18961, /* B2 = 0.578644 */
-8498, /* B1 = -0.518707 */
18961, /* B0 = 0.578644 */
15608, /* A1 = -0.952667 */
-32628, /* A2 = 0.995758 */
11145, /* B2 = 0.340134 */
-5430, /* B1 = -0.331467 */
11145, /* B0 = 0.340134 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1400 */
14780, /* A1 = -0.902130 */
-32393, /* A2 = 0.988586 */
-396, /* B2 = -0.012086 */
0, /* B1 = 0.000000 */
396, /* B0 = 0.012086 */
14510, /* A1 = -0.885651 */
-32630, /* A2 = 0.995819 */
6326, /* B2 = 0.193069 */
-2747, /* B1 = -0.167671 */
6326, /* B0 = 0.193069 */
15154, /* A1 = -0.924957 */
-32632, /* A2 = 0.995850 */
23235, /* B2 = 0.709076 */
-10983, /* B1 = -0.670380 */
23235, /* B0 = 0.709076 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1477 */
13005, /* A1 = -0.793793 */
-32368, /* A2 = 0.987823 */
-500, /* B2 = -0.015265 */
0, /* B1 = 0.000000 */
500, /* B0 = 0.015265 */
12708, /* A1 = -0.775665 */
-32615, /* A2 = 0.995331 */
11420, /* B2 = 0.348526 */
-4306, /* B1 = -0.262833 */
11420, /* B0 = 0.348526 */
13397, /* A1 = -0.817688 */
-32615, /* A2 = 0.995361 */
9454, /* B2 = 0.288528 */
-3981, /* B1 = -0.243027 */
9454, /* B0 = 0.288528 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1600 */
10046, /* A1 = -0.613190 */
-32331, /* A2 = 0.986694 */
-455, /* B2 = -0.013915 */
0, /* B1 = 0.000000 */
455, /* B0 = 0.013915 */
9694, /* A1 = -0.591705 */
-32601, /* A2 = 0.994934 */
6023, /* B2 = 0.183815 */
-1708, /* B1 = -0.104279 */
6023, /* B0 = 0.183815 */
10478, /* A1 = -0.639587 */
-32603, /* A2 = 0.994965 */
22031, /* B2 = 0.672333 */
-7342, /* B1 = -0.448151 */
22031, /* B0 = 0.672333 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1633_1638[] */
9181, /* A1 = 0.560394 */
-32256, /* A2 = -0.984375 */
-556, /* B2 = -0.016975 */
0, /* B1 = 0 */
556, /* B0 = 0.016975 */
8757, /* A1 = 0.534515 */
-32574, /* A2 = -0.99408 */
8443, /* B2 = 0.25769 */
-2135, /* B1 = -0.130341 */
8443, /* B0 = 0.25769 */
9691, /* A1 = 0.591522 */
-32574, /* A2 = -0.99411 */
15446, /* B2 = 0.471375 */
-4809, /* B1 = -0.293579 */
15446, /* B0 = 0.471375 */
7, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1800 */
5076, /* A1 = -0.309875 */
-32304, /* A2 = 0.985840 */
-508, /* B2 = -0.015503 */
0, /* B1 = 0.000000 */
508, /* B0 = 0.015503 */
4646, /* A1 = -0.283600 */
-32605, /* A2 = 0.995026 */
6742, /* B2 = 0.205780 */
-878, /* B1 = -0.053635 */
6742, /* B0 = 0.205780 */
5552, /* A1 = -0.338928 */
-32605, /* A2 = 0.995056 */
23667, /* B2 = 0.722260 */
-4297, /* B1 = -0.262329 */
23667, /* B0 = 0.722260 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
{ /* f1860 */
3569, /* A1 = -0.217865 */
-32292, /* A2 = 0.985504 */
-239, /* B2 = -0.007322 */
0, /* B1 = 0.000000 */
239, /* B0 = 0.007322 */
3117, /* A1 = -0.190277 */
-32603, /* A2 = 0.994965 */
18658, /* B2 = 0.569427 */
-1557, /* B1 = -0.095032 */
18658, /* B0 = 0.569427 */
4054, /* A1 = -0.247437 */
-32603, /* A2 = 0.994965 */
18886, /* B2 = 0.576385 */
-2566, /* B1 = -0.156647 */
18886, /* B0 = 0.576385 */
5, /* Internal filter scaling */
159, /* Minimum in-band energy threshold */
21, /* 21/32 in-band to broad-band ratio */
0x0FF5 /* shift-mask 0x0FF (look at 16 half-frames) bit count = 5 */
},
};
static int ixj_init_filter(IXJ *j, IXJ_FILTER * jf)
{
unsigned short cmd;
int cnt, max;
if (jf->filter > 3) {
return -1;
}
if (ixj_WriteDSPCommand(0x5154 + jf->filter, j)) /* Select Filter */
return -1;
if (!jf->enable) {
if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
return -1;
else
return 0;
} else {
if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
return -1;
/* Select the filter (f0 - f3) to use. */
if (ixj_WriteDSPCommand(0x5154 + jf->filter, j))
return -1;
}
if (jf->freq < 12 && jf->freq > 3) {
/* Select the frequency for the selected filter. */
if (ixj_WriteDSPCommand(0x5170 + jf->freq, j))
return -1;
} else if (jf->freq > 11) {
/* We need to load a programmable filter set for undefined */
/* frequencies. So we will point the filter to a programmable set. */
/* Since there are only 4 filters and 4 programmable sets, we will */
/* just point the filter to the same number set and program it for the */
/* frequency we want. */
if (ixj_WriteDSPCommand(0x5170 + jf->filter, j))
return -1;
if (j->ver.low != 0x12) {
cmd = 0x515B;
max = 19;
} else {
cmd = 0x515E;
max = 15;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
for (cnt = 0; cnt < max; cnt++) {
if (ixj_WriteDSPCommand(tone_table[jf->freq - 12][cnt], j))
return -1;
}
}
j->filter_en[jf->filter] = jf->enable;
return 0;
}
static int ixj_init_filter_raw(IXJ *j, IXJ_FILTER_RAW * jfr)
{
unsigned short cmd;
int cnt, max;
if (jfr->filter > 3) {
return -1;
}
if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j)) /* Select Filter */
return -1;
if (!jfr->enable) {
if (ixj_WriteDSPCommand(0x5152, j)) /* Disable Filter */
return -1;
else
return 0;
} else {
if (ixj_WriteDSPCommand(0x5153, j)) /* Enable Filter */
return -1;
/* Select the filter (f0 - f3) to use. */
if (ixj_WriteDSPCommand(0x5154 + jfr->filter, j))
return -1;
}
/* We need to load a programmable filter set for undefined */
/* frequencies. So we will point the filter to a programmable set. */
/* Since there are only 4 filters and 4 programmable sets, we will */
/* just point the filter to the same number set and program it for the */
/* frequency we want. */
if (ixj_WriteDSPCommand(0x5170 + jfr->filter, j))
return -1;
if (j->ver.low != 0x12) {
cmd = 0x515B;
max = 19;
} else {
cmd = 0x515E;
max = 15;
}
if (ixj_WriteDSPCommand(cmd, j))
return -1;
for (cnt = 0; cnt < max; cnt++) {
if (ixj_WriteDSPCommand(jfr->coeff[cnt], j))
return -1;
}
j->filter_en[jfr->filter] = jfr->enable;
return 0;
}
static int ixj_init_tone(IXJ *j, IXJ_TONE * ti)
{
int freq0, freq1;
unsigned short data;
if (ti->freq0) {
freq0 = ti->freq0;
} else {
freq0 = 0x7FFF;
}
if (ti->freq1) {
freq1 = ti->freq1;
} else {
freq1 = 0x7FFF;
}
if(ti->tone_index > 12 && ti->tone_index < 28)
{
if (ixj_WriteDSPCommand(0x6800 + ti->tone_index, j))
return -1;
if (ixj_WriteDSPCommand(0x6000 + (ti->gain1 << 4) + ti->gain0, j))
return -1;
data = freq0;
if (ixj_WriteDSPCommand(data, j))
return -1;
data = freq1;
if (ixj_WriteDSPCommand(data, j))
return -1;
}
return freq0;
}
| gpl-2.0 |
TeslaProject/android_kernel_moto_shamu | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 2377 | 2499 | #include <linux/pagemap.h>
#include <linux/slab.h>
#include <subdev/fb.h>
#include "nouveau_drm.h"
#include "nouveau_ttm.h"
struct nouveau_sgdma_be {
/* this has to be the first field so populate/unpopulated in
* nouve_bo.c works properly, otherwise have to move them here
*/
struct ttm_dma_tt ttm;
struct drm_device *dev;
struct nouveau_mem *node;
};
static void
nouveau_sgdma_destroy(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
if (ttm) {
ttm_dma_tt_fini(&nvbe->ttm);
kfree(nvbe);
}
}
static int
nv04_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
u64 size = mem->num_pages << 12;
if (ttm->sg) {
node->sg = ttm->sg;
nouveau_vm_map_sg_table(&node->vma[0], 0, size, node);
} else {
node->pages = nvbe->ttm.dma_address;
nouveau_vm_map_sg(&node->vma[0], 0, size, node);
}
nvbe->node = node;
return 0;
}
static int
nv04_sgdma_unbind(struct ttm_tt *ttm)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
nouveau_vm_unmap(&nvbe->node->vma[0]);
return 0;
}
static struct ttm_backend_func nv04_sgdma_backend = {
.bind = nv04_sgdma_bind,
.unbind = nv04_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
static int
nv50_sgdma_bind(struct ttm_tt *ttm, struct ttm_mem_reg *mem)
{
struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm;
struct nouveau_mem *node = mem->mm_node;
/* noop: bound in move_notify() */
if (ttm->sg) {
node->sg = ttm->sg;
} else
node->pages = nvbe->ttm.dma_address;
return 0;
}
static int
nv50_sgdma_unbind(struct ttm_tt *ttm)
{
/* noop: unbound in move_notify() */
return 0;
}
static struct ttm_backend_func nv50_sgdma_backend = {
.bind = nv50_sgdma_bind,
.unbind = nv50_sgdma_unbind,
.destroy = nouveau_sgdma_destroy
};
struct ttm_tt *
nouveau_sgdma_create_ttm(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct nouveau_drm *drm = nouveau_bdev(bdev);
struct nouveau_sgdma_be *nvbe;
nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL);
if (!nvbe)
return NULL;
nvbe->dev = drm->dev;
if (nv_device(drm->device)->card_type < NV_50)
nvbe->ttm.ttm.func = &nv04_sgdma_backend;
else
nvbe->ttm.ttm.func = &nv50_sgdma_backend;
if (ttm_dma_tt_init(&nvbe->ttm, bdev, size, page_flags, dummy_read_page)) {
kfree(nvbe);
return NULL;
}
return &nvbe->ttm.ttm;
}
| gpl-2.0 |
TimmyTossPot/rk30-kernel | drivers/net/xen-netfront.c | 2377 | 48165 | /*
* Virtual network driver for conversing with remote driver backends.
*
* Copyright (c) 2002-2005, K A Fraser
* Copyright (c) 2005, XenSource Ltd
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/if_ether.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/moduleparam.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <net/ip.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
#include <xen/events.h>
#include <xen/page.h>
#include <xen/grant_table.h>
#include <xen/interface/io/netif.h>
#include <xen/interface/memory.h>
#include <xen/interface/grant_table.h>
static const struct ethtool_ops xennet_ethtool_ops;
struct netfront_cb {
struct page *page;
unsigned offset;
};
#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
#define RX_COPY_THRESHOLD 256
#define GRANT_INVALID_REF 0
#define NET_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, PAGE_SIZE)
#define NET_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, PAGE_SIZE)
#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
struct netfront_info {
struct list_head list;
struct net_device *netdev;
struct napi_struct napi;
unsigned int evtchn;
struct xenbus_device *xbdev;
spinlock_t tx_lock;
struct xen_netif_tx_front_ring tx;
int tx_ring_ref;
/*
* {tx,rx}_skbs store outstanding skbuffs. Free tx_skb entries
* are linked from tx_skb_freelist through skb_entry.link.
*
* NB. Freelist index entries are always going to be less than
* PAGE_OFFSET, whereas pointers to skbs will always be equal or
* greater than PAGE_OFFSET: we use this property to distinguish
* them.
*/
union skb_entry {
struct sk_buff *skb;
unsigned long link;
} tx_skbs[NET_TX_RING_SIZE];
grant_ref_t gref_tx_head;
grant_ref_t grant_tx_ref[NET_TX_RING_SIZE];
unsigned tx_skb_freelist;
spinlock_t rx_lock ____cacheline_aligned_in_smp;
struct xen_netif_rx_front_ring rx;
int rx_ring_ref;
/* Receive-ring batched refills. */
#define RX_MIN_TARGET 8
#define RX_DFL_MIN_TARGET 64
#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
unsigned rx_min_target, rx_max_target, rx_target;
struct sk_buff_head rx_batch;
struct timer_list rx_refill_timer;
struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
grant_ref_t gref_rx_head;
grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
unsigned long rx_pfn_array[NET_RX_RING_SIZE];
struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
struct mmu_update rx_mmu[NET_RX_RING_SIZE];
/* Statistics */
unsigned long rx_gso_checksum_fixup;
};
struct netfront_rx_info {
struct xen_netif_rx_response rx;
struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
};
static void skb_entry_set_link(union skb_entry *list, unsigned short id)
{
list->link = id;
}
static int skb_entry_is_link(const union skb_entry *list)
{
BUILD_BUG_ON(sizeof(list->skb) != sizeof(list->link));
return (unsigned long)list->skb < PAGE_OFFSET;
}
/*
* Access macros for acquiring freeing slots in tx_skbs[].
*/
static void add_id_to_freelist(unsigned *head, union skb_entry *list,
unsigned short id)
{
skb_entry_set_link(&list[id], *head);
*head = id;
}
static unsigned short get_id_from_freelist(unsigned *head,
union skb_entry *list)
{
unsigned int id = *head;
*head = list[id].link;
return id;
}
static int xennet_rxidx(RING_IDX idx)
{
return idx & (NET_RX_RING_SIZE - 1);
}
static struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
struct sk_buff *skb = np->rx_skbs[i];
np->rx_skbs[i] = NULL;
return skb;
}
static grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
RING_IDX ri)
{
int i = xennet_rxidx(ri);
grant_ref_t ref = np->grant_rx_ref[i];
np->grant_rx_ref[i] = GRANT_INVALID_REF;
return ref;
}
#ifdef CONFIG_SYSFS
static int xennet_sysfs_addif(struct net_device *netdev);
static void xennet_sysfs_delif(struct net_device *netdev);
#else /* !CONFIG_SYSFS */
#define xennet_sysfs_addif(dev) (0)
#define xennet_sysfs_delif(dev) do { } while (0)
#endif
static int xennet_can_sg(struct net_device *dev)
{
return dev->features & NETIF_F_SG;
}
static void rx_refill_timeout(unsigned long data)
{
struct net_device *dev = (struct net_device *)data;
struct netfront_info *np = netdev_priv(dev);
napi_schedule(&np->napi);
}
static int netfront_tx_slot_available(struct netfront_info *np)
{
return (np->tx.req_prod_pvt - np->tx.rsp_cons) <
(TX_MAX_TARGET - MAX_SKB_FRAGS - 2);
}
static void xennet_maybe_wake_tx(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
if (unlikely(netif_queue_stopped(dev)) &&
netfront_tx_slot_available(np) &&
likely(netif_running(dev)))
netif_wake_queue(dev);
}
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
struct page *page;
int i, batch_target, notify;
RING_IDX req_prod = np->rx.req_prod_pvt;
grant_ref_t ref;
unsigned long pfn;
void *vaddr;
struct xen_netif_rx_request *req;
if (unlikely(!netif_carrier_ok(dev)))
return;
/*
* Allocate skbuffs greedily, even though we batch updates to the
* receive ring. This creates a less bursty demand on the memory
* allocator, so should reduce the chance of failed allocation requests
* both for ourself and for other kernel subsystems.
*/
batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
GFP_ATOMIC | __GFP_NOWARN);
if (unlikely(!skb))
goto no_skb;
/* Align ip header to a 16 bytes boundary */
skb_reserve(skb, NET_IP_ALIGN);
page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
if (!page) {
kfree_skb(skb);
no_skb:
/* Any skbuffs queued for refill? Force them out. */
if (i != 0)
goto refill;
/* Could not allocate any skbuffs. Try again later. */
mod_timer(&np->rx_refill_timer,
jiffies + (HZ/10));
break;
}
skb_shinfo(skb)->frags[0].page = page;
skb_shinfo(skb)->nr_frags = 1;
__skb_queue_tail(&np->rx_batch, skb);
}
/* Is the batch large enough to be worthwhile? */
if (i < (np->rx_target/2)) {
if (req_prod > np->rx.sring->req_prod)
goto push;
return;
}
/* Adjust our fill target if we risked running out of buffers. */
if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
((np->rx_target *= 2) > np->rx_max_target))
np->rx_target = np->rx_max_target;
refill:
for (i = 0; ; i++) {
skb = __skb_dequeue(&np->rx_batch);
if (skb == NULL)
break;
skb->dev = dev;
id = xennet_rxidx(req_prod + i);
BUG_ON(np->rx_skbs[id]);
np->rx_skbs[id] = skb;
ref = gnttab_claim_grant_reference(&np->gref_rx_head);
BUG_ON((signed short)ref < 0);
np->grant_rx_ref[id] = ref;
pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
vaddr = page_address(skb_shinfo(skb)->frags[0].page);
req = RING_GET_REQUEST(&np->rx, req_prod + i);
gnttab_grant_foreign_access_ref(ref,
np->xbdev->otherend_id,
pfn_to_mfn(pfn),
0);
req->id = id;
req->gref = ref;
}
wmb(); /* barrier so backend seens requests */
/* Above is a suitable barrier to ensure backend will see requests. */
np->rx.req_prod_pvt = req_prod + i;
push:
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
}
static int xennet_open(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
napi_enable(&np->napi);
spin_lock_bh(&np->rx_lock);
if (netif_carrier_ok(dev)) {
xennet_alloc_rx_buffers(dev);
np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
napi_schedule(&np->napi);
}
spin_unlock_bh(&np->rx_lock);
netif_start_queue(dev);
return 0;
}
static void xennet_tx_buf_gc(struct net_device *dev)
{
RING_IDX cons, prod;
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct sk_buff *skb;
BUG_ON(!netif_carrier_ok(dev));
do {
prod = np->tx.sring->rsp_prod;
rmb(); /* Ensure we see responses up to 'rp'. */
for (cons = np->tx.rsp_cons; cons != prod; cons++) {
struct xen_netif_tx_response *txrsp;
txrsp = RING_GET_RESPONSE(&np->tx, cons);
if (txrsp->status == XEN_NETIF_RSP_NULL)
continue;
id = txrsp->id;
skb = np->tx_skbs[id].skb;
if (unlikely(gnttab_query_foreign_access(
np->grant_tx_ref[id]) != 0)) {
printk(KERN_ALERT "xennet_tx_buf_gc: warning "
"-- grant still in use by backend "
"domain.\n");
BUG();
}
gnttab_end_foreign_access_ref(
np->grant_tx_ref[id], GNTMAP_readonly);
gnttab_release_grant_reference(
&np->gref_tx_head, np->grant_tx_ref[id]);
np->grant_tx_ref[id] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, id);
dev_kfree_skb_irq(skb);
}
np->tx.rsp_cons = prod;
/*
* Set a new event, then check for race with update of tx_cons.
* Note that it is essential to schedule a callback, no matter
* how few buffers are pending. Even if there is space in the
* transmit ring, higher layers may be blocked because too much
* data is outstanding: in such cases notification from Xen is
* likely to be the only kick that we'll get.
*/
np->tx.sring->rsp_event =
prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
mb(); /* update shared area */
} while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
xennet_maybe_wake_tx(dev);
}
static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
struct xen_netif_tx_request *tx)
{
struct netfront_info *np = netdev_priv(dev);
char *data = skb->data;
unsigned long mfn;
RING_IDX prod = np->tx.req_prod_pvt;
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
unsigned int id;
grant_ref_t ref;
int i;
/* While the header overlaps a page boundary (including being
larger than a page), split it it into page-sized chunks. */
while (len > PAGE_SIZE - offset) {
tx->size = PAGE_SIZE - offset;
tx->flags |= XEN_NETTXF_more_data;
len -= tx->size;
data += tx->size;
offset = 0;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb);
tx = RING_GET_REQUEST(&np->tx, prod++);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
tx->flags = 0;
}
/* Grant backend access to each skb fragment page. */
for (i = 0; i < frags; i++) {
skb_frag_t *frag = skb_shinfo(skb)->frags + i;
tx->flags |= XEN_NETTXF_more_data;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb_get(skb);
tx = RING_GET_REQUEST(&np->tx, prod++);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = pfn_to_mfn(page_to_pfn(frag->page));
gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = frag->page_offset;
tx->size = frag->size;
tx->flags = 0;
}
np->tx.req_prod_pvt = prod;
}
static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
unsigned short id;
struct netfront_info *np = netdev_priv(dev);
struct xen_netif_tx_request *tx;
struct xen_netif_extra_info *extra;
char *data = skb->data;
RING_IDX i;
grant_ref_t ref;
unsigned long mfn;
int notify;
int frags = skb_shinfo(skb)->nr_frags;
unsigned int offset = offset_in_page(data);
unsigned int len = skb_headlen(skb);
frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
frags);
dump_stack();
goto drop;
}
spin_lock_irq(&np->tx_lock);
if (unlikely(!netif_carrier_ok(dev) ||
(frags > 1 && !xennet_can_sg(dev)) ||
netif_needs_gso(skb, netif_skb_features(skb)))) {
spin_unlock_irq(&np->tx_lock);
goto drop;
}
i = np->tx.req_prod_pvt;
id = get_id_from_freelist(&np->tx_skb_freelist, np->tx_skbs);
np->tx_skbs[id].skb = skb;
tx = RING_GET_REQUEST(&np->tx, i);
tx->id = id;
ref = gnttab_claim_grant_reference(&np->gref_tx_head);
BUG_ON((signed short)ref < 0);
mfn = virt_to_mfn(data);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id, mfn, GNTMAP_readonly);
tx->gref = np->grant_tx_ref[id] = ref;
tx->offset = offset;
tx->size = len;
extra = NULL;
tx->flags = 0;
if (skb->ip_summed == CHECKSUM_PARTIAL)
/* local packet? */
tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
/* remote but checksummed. */
tx->flags |= XEN_NETTXF_data_validated;
if (skb_shinfo(skb)->gso_size) {
struct xen_netif_extra_info *gso;
gso = (struct xen_netif_extra_info *)
RING_GET_REQUEST(&np->tx, ++i);
if (extra)
extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
else
tx->flags |= XEN_NETTXF_extra_info;
gso->u.gso.size = skb_shinfo(skb)->gso_size;
gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
gso->u.gso.pad = 0;
gso->u.gso.features = 0;
gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
gso->flags = 0;
extra = gso;
}
np->tx.req_prod_pvt = i + 1;
xennet_make_frags(skb, dev, tx);
tx->size = skb->len;
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
if (notify)
notify_remote_via_irq(np->netdev->irq);
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
/* Note: It is not safe to access skb after xennet_tx_buf_gc()! */
xennet_tx_buf_gc(dev);
if (!netfront_tx_slot_available(np))
netif_stop_queue(dev);
spin_unlock_irq(&np->tx_lock);
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int xennet_close(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
netif_stop_queue(np->netdev);
napi_disable(&np->napi);
return 0;
}
static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
grant_ref_t ref)
{
int new = xennet_rxidx(np->rx.req_prod_pvt);
BUG_ON(np->rx_skbs[new]);
np->rx_skbs[new] = skb;
np->grant_rx_ref[new] = ref;
RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
np->rx.req_prod_pvt++;
}
static int xennet_get_extras(struct netfront_info *np,
struct xen_netif_extra_info *extras,
RING_IDX rp)
{
struct xen_netif_extra_info *extra;
struct device *dev = &np->netdev->dev;
RING_IDX cons = np->rx.rsp_cons;
int err = 0;
do {
struct sk_buff *skb;
grant_ref_t ref;
if (unlikely(cons + 1 == rp)) {
if (net_ratelimit())
dev_warn(dev, "Missing extra info\n");
err = -EBADR;
break;
}
extra = (struct xen_netif_extra_info *)
RING_GET_RESPONSE(&np->rx, ++cons);
if (unlikely(!extra->type ||
extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
if (net_ratelimit())
dev_warn(dev, "Invalid extra type: %d\n",
extra->type);
err = -EINVAL;
} else {
memcpy(&extras[extra->type - 1], extra,
sizeof(*extra));
}
skb = xennet_get_rx_skb(np, cons);
ref = xennet_get_rx_ref(np, cons);
xennet_move_rx_slot(np, skb, ref);
} while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
np->rx.rsp_cons = cons;
return err;
}
static int xennet_get_responses(struct netfront_info *np,
struct netfront_rx_info *rinfo, RING_IDX rp,
struct sk_buff_head *list)
{
struct xen_netif_rx_response *rx = &rinfo->rx;
struct xen_netif_extra_info *extras = rinfo->extras;
struct device *dev = &np->netdev->dev;
RING_IDX cons = np->rx.rsp_cons;
struct sk_buff *skb = xennet_get_rx_skb(np, cons);
grant_ref_t ref = xennet_get_rx_ref(np, cons);
int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
int frags = 1;
int err = 0;
unsigned long ret;
if (rx->flags & XEN_NETRXF_extra_info) {
err = xennet_get_extras(np, extras, rp);
cons = np->rx.rsp_cons;
}
for (;;) {
if (unlikely(rx->status < 0 ||
rx->offset + rx->status > PAGE_SIZE)) {
if (net_ratelimit())
dev_warn(dev, "rx->offset: %x, size: %u\n",
rx->offset, rx->status);
xennet_move_rx_slot(np, skb, ref);
err = -EINVAL;
goto next;
}
/*
* This definitely indicates a bug, either in this driver or in
* the backend driver. In future this should flag the bad
* situation to the system controller to reboot the backed.
*/
if (ref == GRANT_INVALID_REF) {
if (net_ratelimit())
dev_warn(dev, "Bad rx response id %d.\n",
rx->id);
err = -EINVAL;
goto next;
}
ret = gnttab_end_foreign_access_ref(ref, 0);
BUG_ON(!ret);
gnttab_release_grant_reference(&np->gref_rx_head, ref);
__skb_queue_tail(list, skb);
next:
if (!(rx->flags & XEN_NETRXF_more_data))
break;
if (cons + frags == rp) {
if (net_ratelimit())
dev_warn(dev, "Need more frags\n");
err = -ENOENT;
break;
}
rx = RING_GET_RESPONSE(&np->rx, cons + frags);
skb = xennet_get_rx_skb(np, cons + frags);
ref = xennet_get_rx_ref(np, cons + frags);
frags++;
}
if (unlikely(frags > max)) {
if (net_ratelimit())
dev_warn(dev, "Too many frags\n");
err = -E2BIG;
}
if (unlikely(err))
np->rx.rsp_cons = cons + frags;
return err;
}
static int xennet_set_skb_gso(struct sk_buff *skb,
struct xen_netif_extra_info *gso)
{
if (!gso->u.gso.size) {
if (net_ratelimit())
printk(KERN_WARNING "GSO size must not be zero.\n");
return -EINVAL;
}
/* Currently only TCPv4 S.O. is supported. */
if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
if (net_ratelimit())
printk(KERN_WARNING "Bad GSO type %d.\n", gso->u.gso.type);
return -EINVAL;
}
skb_shinfo(skb)->gso_size = gso->u.gso.size;
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
return 0;
}
static RING_IDX xennet_fill_frags(struct netfront_info *np,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct skb_shared_info *shinfo = skb_shinfo(skb);
int nr_frags = shinfo->nr_frags;
RING_IDX cons = np->rx.rsp_cons;
skb_frag_t *frag = shinfo->frags + nr_frags;
struct sk_buff *nskb;
while ((nskb = __skb_dequeue(list))) {
struct xen_netif_rx_response *rx =
RING_GET_RESPONSE(&np->rx, ++cons);
frag->page = skb_shinfo(nskb)->frags[0].page;
frag->page_offset = rx->offset;
frag->size = rx->status;
skb->data_len += rx->status;
skb_shinfo(nskb)->nr_frags = 0;
kfree_skb(nskb);
frag++;
nr_frags++;
}
shinfo->nr_frags = nr_frags;
return cons;
}
static int checksum_setup(struct net_device *dev, struct sk_buff *skb)
{
struct iphdr *iph;
unsigned char *th;
int err = -EPROTO;
int recalculate_partial_csum = 0;
/*
* A GSO SKB must be CHECKSUM_PARTIAL. However some buggy
* peers can fail to set NETRXF_csum_blank when sending a GSO
* frame. In this case force the SKB to CHECKSUM_PARTIAL and
* recalculate the partial checksum.
*/
if (skb->ip_summed != CHECKSUM_PARTIAL && skb_is_gso(skb)) {
struct netfront_info *np = netdev_priv(dev);
np->rx_gso_checksum_fixup++;
skb->ip_summed = CHECKSUM_PARTIAL;
recalculate_partial_csum = 1;
}
/* A non-CHECKSUM_PARTIAL SKB does not require setup. */
if (skb->ip_summed != CHECKSUM_PARTIAL)
return 0;
if (skb->protocol != htons(ETH_P_IP))
goto out;
iph = (void *)skb->data;
th = skb->data + 4 * iph->ihl;
if (th >= skb_tail_pointer(skb))
goto out;
skb->csum_start = th - skb->head;
switch (iph->protocol) {
case IPPROTO_TCP:
skb->csum_offset = offsetof(struct tcphdr, check);
if (recalculate_partial_csum) {
struct tcphdr *tcph = (struct tcphdr *)th;
tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_TCP, 0);
}
break;
case IPPROTO_UDP:
skb->csum_offset = offsetof(struct udphdr, check);
if (recalculate_partial_csum) {
struct udphdr *udph = (struct udphdr *)th;
udph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
skb->len - iph->ihl*4,
IPPROTO_UDP, 0);
}
break;
default:
if (net_ratelimit())
printk(KERN_ERR "Attempting to checksum a non-"
"TCP/UDP packet, dropping a protocol"
" %d packet", iph->protocol);
goto out;
}
if ((th + skb->csum_offset + 2) > skb_tail_pointer(skb))
goto out;
err = 0;
out:
return err;
}
static int handle_incoming_queue(struct net_device *dev,
struct sk_buff_head *rxq)
{
int packets_dropped = 0;
struct sk_buff *skb;
while ((skb = __skb_dequeue(rxq)) != NULL) {
struct page *page = NETFRONT_SKB_CB(skb)->page;
void *vaddr = page_address(page);
unsigned offset = NETFRONT_SKB_CB(skb)->offset;
memcpy(skb->data, vaddr + offset,
skb_headlen(skb));
if (page != skb_shinfo(skb)->frags[0].page)
__free_page(page);
/* Ethernet work: Delayed to here as it peeks the header. */
skb->protocol = eth_type_trans(skb, dev);
if (checksum_setup(dev, skb)) {
kfree_skb(skb);
packets_dropped++;
dev->stats.rx_errors++;
continue;
}
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
/* Pass it up. */
netif_receive_skb(skb);
}
return packets_dropped;
}
static int xennet_poll(struct napi_struct *napi, int budget)
{
struct netfront_info *np = container_of(napi, struct netfront_info, napi);
struct net_device *dev = np->netdev;
struct sk_buff *skb;
struct netfront_rx_info rinfo;
struct xen_netif_rx_response *rx = &rinfo.rx;
struct xen_netif_extra_info *extras = rinfo.extras;
RING_IDX i, rp;
int work_done;
struct sk_buff_head rxq;
struct sk_buff_head errq;
struct sk_buff_head tmpq;
unsigned long flags;
unsigned int len;
int err;
spin_lock(&np->rx_lock);
skb_queue_head_init(&rxq);
skb_queue_head_init(&errq);
skb_queue_head_init(&tmpq);
rp = np->rx.sring->rsp_prod;
rmb(); /* Ensure we see queued responses up to 'rp'. */
i = np->rx.rsp_cons;
work_done = 0;
while ((i != rp) && (work_done < budget)) {
memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
memset(extras, 0, sizeof(rinfo.extras));
err = xennet_get_responses(np, &rinfo, rp, &tmpq);
if (unlikely(err)) {
err:
while ((skb = __skb_dequeue(&tmpq)))
__skb_queue_tail(&errq, skb);
dev->stats.rx_errors++;
i = np->rx.rsp_cons;
continue;
}
skb = __skb_dequeue(&tmpq);
if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
struct xen_netif_extra_info *gso;
gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
if (unlikely(xennet_set_skb_gso(skb, gso))) {
__skb_queue_head(&tmpq, skb);
np->rx.rsp_cons += skb_queue_len(&tmpq);
goto err;
}
}
NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
NETFRONT_SKB_CB(skb)->offset = rx->offset;
len = rx->status;
if (len > RX_COPY_THRESHOLD)
len = RX_COPY_THRESHOLD;
skb_put(skb, len);
if (rx->status > len) {
skb_shinfo(skb)->frags[0].page_offset =
rx->offset + len;
skb_shinfo(skb)->frags[0].size = rx->status - len;
skb->data_len = rx->status - len;
} else {
skb_shinfo(skb)->frags[0].page = NULL;
skb_shinfo(skb)->nr_frags = 0;
}
i = xennet_fill_frags(np, skb, &tmpq);
/*
* Truesize approximates the size of true data plus
* any supervisor overheads. Adding hypervisor
* overheads has been shown to significantly reduce
* achievable bandwidth with the default receive
* buffer size. It is therefore not wise to account
* for it here.
*
* After alloc_skb(RX_COPY_THRESHOLD), truesize is set
* to RX_COPY_THRESHOLD + the supervisor
* overheads. Here, we add the size of the data pulled
* in xennet_fill_frags().
*
* We also adjust for any unused space in the main
* data area by subtracting (RX_COPY_THRESHOLD -
* len). This is especially important with drivers
* which split incoming packets into header and data,
* using only 66 bytes of the main data area (see the
* e1000 driver for example.) On such systems,
* without this last adjustement, our achievable
* receive throughout using the standard receive
* buffer size was cut by 25%(!!!).
*/
skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
skb->len += skb->data_len;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
else if (rx->flags & XEN_NETRXF_data_validated)
skb->ip_summed = CHECKSUM_UNNECESSARY;
__skb_queue_tail(&rxq, skb);
np->rx.rsp_cons = ++i;
work_done++;
}
__skb_queue_purge(&errq);
work_done -= handle_incoming_queue(dev, &rxq);
/* If we get a callback with very few responses, reduce fill target. */
/* NB. Note exponential increase, linear decrease. */
if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
((3*np->rx_target) / 4)) &&
(--np->rx_target < np->rx_min_target))
np->rx_target = np->rx_min_target;
xennet_alloc_rx_buffers(dev);
if (work_done < budget) {
int more_to_do = 0;
local_irq_save(flags);
RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
if (!more_to_do)
__napi_complete(napi);
local_irq_restore(flags);
}
spin_unlock(&np->rx_lock);
return work_done;
}
static int xennet_change_mtu(struct net_device *dev, int mtu)
{
int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
if (mtu > max)
return -EINVAL;
dev->mtu = mtu;
return 0;
}
static void xennet_release_tx_bufs(struct netfront_info *np)
{
struct sk_buff *skb;
int i;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
/* Skip over entries which are actually freelist references */
if (skb_entry_is_link(&np->tx_skbs[i]))
continue;
skb = np->tx_skbs[i].skb;
gnttab_end_foreign_access_ref(np->grant_tx_ref[i],
GNTMAP_readonly);
gnttab_release_grant_reference(&np->gref_tx_head,
np->grant_tx_ref[i]);
np->grant_tx_ref[i] = GRANT_INVALID_REF;
add_id_to_freelist(&np->tx_skb_freelist, np->tx_skbs, i);
dev_kfree_skb_irq(skb);
}
}
static void xennet_release_rx_bufs(struct netfront_info *np)
{
struct mmu_update *mmu = np->rx_mmu;
struct multicall_entry *mcl = np->rx_mcl;
struct sk_buff_head free_list;
struct sk_buff *skb;
unsigned long mfn;
int xfer = 0, noxfer = 0, unused = 0;
int id, ref;
dev_warn(&np->netdev->dev, "%s: fix me for copying receiver.\n",
__func__);
return;
skb_queue_head_init(&free_list);
spin_lock_bh(&np->rx_lock);
for (id = 0; id < NET_RX_RING_SIZE; id++) {
ref = np->grant_rx_ref[id];
if (ref == GRANT_INVALID_REF) {
unused++;
continue;
}
skb = np->rx_skbs[id];
mfn = gnttab_end_foreign_transfer_ref(ref);
gnttab_release_grant_reference(&np->gref_rx_head, ref);
np->grant_rx_ref[id] = GRANT_INVALID_REF;
if (0 == mfn) {
skb_shinfo(skb)->nr_frags = 0;
dev_kfree_skb(skb);
noxfer++;
continue;
}
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Remap the page. */
struct page *page = skb_shinfo(skb)->frags[0].page;
unsigned long pfn = page_to_pfn(page);
void *vaddr = page_address(page);
MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
mfn_pte(mfn, PAGE_KERNEL),
0);
mcl++;
mmu->ptr = ((u64)mfn << PAGE_SHIFT)
| MMU_MACHPHYS_UPDATE;
mmu->val = pfn;
mmu++;
set_phys_to_machine(pfn, mfn);
}
__skb_queue_tail(&free_list, skb);
xfer++;
}
dev_info(&np->netdev->dev, "%s: %d xfer, %d noxfer, %d unused\n",
__func__, xfer, noxfer, unused);
if (xfer) {
if (!xen_feature(XENFEAT_auto_translated_physmap)) {
/* Do all the remapping work and M2P updates. */
MULTI_mmu_update(mcl, np->rx_mmu, mmu - np->rx_mmu,
NULL, DOMID_SELF);
mcl++;
HYPERVISOR_multicall(np->rx_mcl, mcl - np->rx_mcl);
}
}
__skb_queue_purge(&free_list);
spin_unlock_bh(&np->rx_lock);
}
static void xennet_uninit(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
xennet_release_tx_bufs(np);
xennet_release_rx_bufs(np);
gnttab_free_grant_references(np->gref_tx_head);
gnttab_free_grant_references(np->gref_rx_head);
}
static u32 xennet_fix_features(struct net_device *dev, u32 features)
{
struct netfront_info *np = netdev_priv(dev);
int val;
if (features & NETIF_F_SG) {
if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
"%d", &val) < 0)
val = 0;
if (!val)
features &= ~NETIF_F_SG;
}
if (features & NETIF_F_TSO) {
if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-gso-tcpv4", "%d", &val) < 0)
val = 0;
if (!val)
features &= ~NETIF_F_TSO;
}
return features;
}
static int xennet_set_features(struct net_device *dev, u32 features)
{
if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
netdev_info(dev, "Reducing MTU because no SG offload");
dev->mtu = ETH_DATA_LEN;
}
return 0;
}
static const struct net_device_ops xennet_netdev_ops = {
.ndo_open = xennet_open,
.ndo_uninit = xennet_uninit,
.ndo_stop = xennet_close,
.ndo_start_xmit = xennet_start_xmit,
.ndo_change_mtu = xennet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
.ndo_fix_features = xennet_fix_features,
.ndo_set_features = xennet_set_features,
};
static struct net_device * __devinit xennet_create_dev(struct xenbus_device *dev)
{
int i, err;
struct net_device *netdev;
struct netfront_info *np;
netdev = alloc_etherdev(sizeof(struct netfront_info));
if (!netdev) {
printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
__func__);
return ERR_PTR(-ENOMEM);
}
np = netdev_priv(netdev);
np->xbdev = dev;
spin_lock_init(&np->tx_lock);
spin_lock_init(&np->rx_lock);
skb_queue_head_init(&np->rx_batch);
np->rx_target = RX_DFL_MIN_TARGET;
np->rx_min_target = RX_DFL_MIN_TARGET;
np->rx_max_target = RX_MAX_TARGET;
init_timer(&np->rx_refill_timer);
np->rx_refill_timer.data = (unsigned long)netdev;
np->rx_refill_timer.function = rx_refill_timeout;
/* Initialise tx_skbs as a free chain containing every entry. */
np->tx_skb_freelist = 0;
for (i = 0; i < NET_TX_RING_SIZE; i++) {
skb_entry_set_link(&np->tx_skbs[i], i+1);
np->grant_tx_ref[i] = GRANT_INVALID_REF;
}
/* Clear out rx_skbs */
for (i = 0; i < NET_RX_RING_SIZE; i++) {
np->rx_skbs[i] = NULL;
np->grant_rx_ref[i] = GRANT_INVALID_REF;
}
/* A grant for every tx ring slot */
if (gnttab_alloc_grant_references(TX_MAX_TARGET,
&np->gref_tx_head) < 0) {
printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
err = -ENOMEM;
goto exit;
}
/* A grant for every rx ring slot */
if (gnttab_alloc_grant_references(RX_MAX_TARGET,
&np->gref_rx_head) < 0) {
printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
err = -ENOMEM;
goto exit_free_tx;
}
netdev->netdev_ops = &xennet_netdev_ops;
netif_napi_add(netdev, &np->napi, xennet_poll, 64);
netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_GSO_ROBUST;
netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
/*
* Assume that all hw features are available for now. This set
* will be adjusted by the call to netdev_update_features() in
* xennet_connect() which is the earliest point where we can
* negotiate with the backend regarding supported features.
*/
netdev->features |= netdev->hw_features;
SET_ETHTOOL_OPS(netdev, &xennet_ethtool_ops);
SET_NETDEV_DEV(netdev, &dev->dev);
np->netdev = netdev;
netif_carrier_off(netdev);
return netdev;
exit_free_tx:
gnttab_free_grant_references(np->gref_tx_head);
exit:
free_netdev(netdev);
return ERR_PTR(err);
}
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffers for communication with the backend, and
* inform the backend of the appropriate details for those.
*/
static int __devinit netfront_probe(struct xenbus_device *dev,
const struct xenbus_device_id *id)
{
int err;
struct net_device *netdev;
struct netfront_info *info;
netdev = xennet_create_dev(dev);
if (IS_ERR(netdev)) {
err = PTR_ERR(netdev);
xenbus_dev_fatal(dev, err, "creating netdev");
return err;
}
info = netdev_priv(netdev);
dev_set_drvdata(&dev->dev, info);
err = register_netdev(info->netdev);
if (err) {
printk(KERN_WARNING "%s: register_netdev err=%d\n",
__func__, err);
goto fail;
}
err = xennet_sysfs_addif(info->netdev);
if (err) {
unregister_netdev(info->netdev);
printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
__func__, err);
goto fail;
}
return 0;
fail:
free_netdev(netdev);
dev_set_drvdata(&dev->dev, NULL);
return err;
}
static void xennet_end_access(int ref, void *page)
{
/* This frees the page as a side-effect */
if (ref != GRANT_INVALID_REF)
gnttab_end_foreign_access(ref, 0, (unsigned long)page);
}
static void xennet_disconnect_backend(struct netfront_info *info)
{
/* Stop old i/f to prevent errors whilst we rebuild the state. */
spin_lock_bh(&info->rx_lock);
spin_lock_irq(&info->tx_lock);
netif_carrier_off(info->netdev);
spin_unlock_irq(&info->tx_lock);
spin_unlock_bh(&info->rx_lock);
if (info->netdev->irq)
unbind_from_irqhandler(info->netdev->irq, info->netdev);
info->evtchn = info->netdev->irq = 0;
/* End access and free the pages */
xennet_end_access(info->tx_ring_ref, info->tx.sring);
xennet_end_access(info->rx_ring_ref, info->rx.sring);
info->tx_ring_ref = GRANT_INVALID_REF;
info->rx_ring_ref = GRANT_INVALID_REF;
info->tx.sring = NULL;
info->rx.sring = NULL;
}
/**
* We are reconnecting to the backend, due to a suspend/resume, or a backend
* driver restart. We tear down our netif structure and recreate it, but
* leave the device-layer structures intact so that this is transparent to the
* rest of the kernel.
*/
static int netfront_resume(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "%s\n", dev->nodename);
xennet_disconnect_backend(info);
return 0;
}
static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
{
char *s, *e, *macstr;
int i;
macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
if (IS_ERR(macstr))
return PTR_ERR(macstr);
for (i = 0; i < ETH_ALEN; i++) {
mac[i] = simple_strtoul(s, &e, 16);
if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
kfree(macstr);
return -ENOENT;
}
s = e+1;
}
kfree(macstr);
return 0;
}
static irqreturn_t xennet_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct netfront_info *np = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&np->tx_lock, flags);
if (likely(netif_carrier_ok(dev))) {
xennet_tx_buf_gc(dev);
/* Under tx_lock: protects access to rx shared-ring indexes. */
if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
napi_schedule(&np->napi);
}
spin_unlock_irqrestore(&np->tx_lock, flags);
return IRQ_HANDLED;
}
static int setup_netfront(struct xenbus_device *dev, struct netfront_info *info)
{
struct xen_netif_tx_sring *txs;
struct xen_netif_rx_sring *rxs;
int err;
struct net_device *netdev = info->netdev;
info->tx_ring_ref = GRANT_INVALID_REF;
info->rx_ring_ref = GRANT_INVALID_REF;
info->rx.sring = NULL;
info->tx.sring = NULL;
netdev->irq = 0;
err = xen_net_read_mac(dev, netdev->dev_addr);
if (err) {
xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
goto fail;
}
txs = (struct xen_netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!txs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating tx ring page");
goto fail;
}
SHARED_RING_INIT(txs);
FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(txs));
if (err < 0) {
free_page((unsigned long)txs);
goto fail;
}
info->tx_ring_ref = err;
rxs = (struct xen_netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
if (!rxs) {
err = -ENOMEM;
xenbus_dev_fatal(dev, err, "allocating rx ring page");
goto fail;
}
SHARED_RING_INIT(rxs);
FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
if (err < 0) {
free_page((unsigned long)rxs);
goto fail;
}
info->rx_ring_ref = err;
err = xenbus_alloc_evtchn(dev, &info->evtchn);
if (err)
goto fail;
err = bind_evtchn_to_irqhandler(info->evtchn, xennet_interrupt,
0, netdev->name, netdev);
if (err < 0)
goto fail;
netdev->irq = err;
return 0;
fail:
return err;
}
/* Common code used when first setting up, and when resuming. */
static int talk_to_netback(struct xenbus_device *dev,
struct netfront_info *info)
{
const char *message;
struct xenbus_transaction xbt;
int err;
/* Create shared ring, alloc event channel. */
err = setup_netfront(dev, info);
if (err)
goto out;
again:
err = xenbus_transaction_start(&xbt);
if (err) {
xenbus_dev_fatal(dev, err, "starting transaction");
goto destroy_ring;
}
err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref", "%u",
info->tx_ring_ref);
if (err) {
message = "writing tx ring-ref";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref", "%u",
info->rx_ring_ref);
if (err) {
message = "writing rx ring-ref";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename,
"event-channel", "%u", info->evtchn);
if (err) {
message = "writing event-channel";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
1);
if (err) {
message = "writing request-rx-copy";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
if (err) {
message = "writing feature-rx-notify";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
if (err) {
message = "writing feature-sg";
goto abort_transaction;
}
err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d", 1);
if (err) {
message = "writing feature-gso-tcpv4";
goto abort_transaction;
}
err = xenbus_transaction_end(xbt, 0);
if (err) {
if (err == -EAGAIN)
goto again;
xenbus_dev_fatal(dev, err, "completing transaction");
goto destroy_ring;
}
return 0;
abort_transaction:
xenbus_transaction_end(xbt, 1);
xenbus_dev_fatal(dev, err, "%s", message);
destroy_ring:
xennet_disconnect_backend(info);
out:
return err;
}
static int xennet_connect(struct net_device *dev)
{
struct netfront_info *np = netdev_priv(dev);
int i, requeue_idx, err;
struct sk_buff *skb;
grant_ref_t ref;
struct xen_netif_rx_request *req;
unsigned int feature_rx_copy;
err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
"feature-rx-copy", "%u", &feature_rx_copy);
if (err != 1)
feature_rx_copy = 0;
if (!feature_rx_copy) {
dev_info(&dev->dev,
"backend does not support copying receive path\n");
return -ENODEV;
}
err = talk_to_netback(np->xbdev, np);
if (err)
return err;
rtnl_lock();
netdev_update_features(dev);
rtnl_unlock();
spin_lock_bh(&np->rx_lock);
spin_lock_irq(&np->tx_lock);
/* Step 1: Discard all pending TX packet fragments. */
xennet_release_tx_bufs(np);
/* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
if (!np->rx_skbs[i])
continue;
skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
req = RING_GET_REQUEST(&np->rx, requeue_idx);
gnttab_grant_foreign_access_ref(
ref, np->xbdev->otherend_id,
pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
frags->page)),
0);
req->gref = ref;
req->id = requeue_idx;
requeue_idx++;
}
np->rx.req_prod_pvt = requeue_idx;
/*
* Step 3: All public and private state should now be sane. Get
* ready to start sending and receiving packets and give the driver
* domain a kick because we've probably just requeued some
* packets.
*/
netif_carrier_on(np->netdev);
notify_remote_via_irq(np->netdev->irq);
xennet_tx_buf_gc(dev);
xennet_alloc_rx_buffers(dev);
spin_unlock_irq(&np->tx_lock);
spin_unlock_bh(&np->rx_lock);
return 0;
}
/**
* Callback received when the backend's state changes.
*/
static void netback_changed(struct xenbus_device *dev,
enum xenbus_state backend_state)
{
struct netfront_info *np = dev_get_drvdata(&dev->dev);
struct net_device *netdev = np->netdev;
dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state));
switch (backend_state) {
case XenbusStateInitialising:
case XenbusStateInitialised:
case XenbusStateReconfiguring:
case XenbusStateReconfigured:
case XenbusStateConnected:
case XenbusStateUnknown:
case XenbusStateClosed:
break;
case XenbusStateInitWait:
if (dev->state != XenbusStateInitialising)
break;
if (xennet_connect(netdev) != 0)
break;
xenbus_switch_state(dev, XenbusStateConnected);
netif_notify_peers(netdev);
break;
case XenbusStateClosing:
xenbus_frontend_closed(dev);
break;
}
}
static const struct xennet_stat {
char name[ETH_GSTRING_LEN];
u16 offset;
} xennet_stats[] = {
{
"rx_gso_checksum_fixup",
offsetof(struct netfront_info, rx_gso_checksum_fixup)
},
};
static int xennet_get_sset_count(struct net_device *dev, int string_set)
{
switch (string_set) {
case ETH_SS_STATS:
return ARRAY_SIZE(xennet_stats);
default:
return -EINVAL;
}
}
static void xennet_get_ethtool_stats(struct net_device *dev,
struct ethtool_stats *stats, u64 * data)
{
void *np = netdev_priv(dev);
int i;
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
data[i] = *(unsigned long *)(np + xennet_stats[i].offset);
}
static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 * data)
{
int i;
switch (stringset) {
case ETH_SS_STATS:
for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
memcpy(data + i * ETH_GSTRING_LEN,
xennet_stats[i].name, ETH_GSTRING_LEN);
break;
}
}
static const struct ethtool_ops xennet_ethtool_ops =
{
.get_link = ethtool_op_get_link,
.get_sset_count = xennet_get_sset_count,
.get_ethtool_stats = xennet_get_ethtool_stats,
.get_strings = xennet_get_strings,
};
#ifdef CONFIG_SYSFS
static ssize_t show_rxbuf_min(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_min_target);
}
static ssize_t store_rxbuf_min(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
char *endp;
unsigned long target;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
target = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EBADMSG;
if (target < RX_MIN_TARGET)
target = RX_MIN_TARGET;
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
spin_lock_bh(&np->rx_lock);
if (target > np->rx_max_target)
np->rx_max_target = target;
np->rx_min_target = target;
if (target > np->rx_target)
np->rx_target = target;
xennet_alloc_rx_buffers(netdev);
spin_unlock_bh(&np->rx_lock);
return len;
}
static ssize_t show_rxbuf_max(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_max_target);
}
static ssize_t store_rxbuf_max(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *np = netdev_priv(netdev);
char *endp;
unsigned long target;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
target = simple_strtoul(buf, &endp, 0);
if (endp == buf)
return -EBADMSG;
if (target < RX_MIN_TARGET)
target = RX_MIN_TARGET;
if (target > RX_MAX_TARGET)
target = RX_MAX_TARGET;
spin_lock_bh(&np->rx_lock);
if (target < np->rx_min_target)
np->rx_min_target = target;
np->rx_max_target = target;
if (target < np->rx_target)
np->rx_target = target;
xennet_alloc_rx_buffers(netdev);
spin_unlock_bh(&np->rx_lock);
return len;
}
static ssize_t show_rxbuf_cur(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct net_device *netdev = to_net_dev(dev);
struct netfront_info *info = netdev_priv(netdev);
return sprintf(buf, "%u\n", info->rx_target);
}
static struct device_attribute xennet_attrs[] = {
__ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
__ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
__ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
};
static int xennet_sysfs_addif(struct net_device *netdev)
{
int i;
int err;
for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
err = device_create_file(&netdev->dev,
&xennet_attrs[i]);
if (err)
goto fail;
}
return 0;
fail:
while (--i >= 0)
device_remove_file(&netdev->dev, &xennet_attrs[i]);
return err;
}
static void xennet_sysfs_delif(struct net_device *netdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
device_remove_file(&netdev->dev, &xennet_attrs[i]);
}
#endif /* CONFIG_SYSFS */
static struct xenbus_device_id netfront_ids[] = {
{ "vif" },
{ "" }
};
static int __devexit xennet_remove(struct xenbus_device *dev)
{
struct netfront_info *info = dev_get_drvdata(&dev->dev);
dev_dbg(&dev->dev, "%s\n", dev->nodename);
unregister_netdev(info->netdev);
xennet_disconnect_backend(info);
del_timer_sync(&info->rx_refill_timer);
xennet_sysfs_delif(info->netdev);
free_netdev(info->netdev);
return 0;
}
static struct xenbus_driver netfront_driver = {
.name = "vif",
.owner = THIS_MODULE,
.ids = netfront_ids,
.probe = netfront_probe,
.remove = __devexit_p(xennet_remove),
.resume = netfront_resume,
.otherend_changed = netback_changed,
};
static int __init netif_init(void)
{
if (!xen_domain())
return -ENODEV;
if (xen_initial_domain())
return 0;
printk(KERN_INFO "Initialising Xen virtual ethernet driver.\n");
return xenbus_register_frontend(&netfront_driver);
}
module_init(netif_init);
static void __exit netif_exit(void)
{
if (xen_initial_domain())
return;
xenbus_unregister_driver(&netfront_driver);
}
module_exit(netif_exit);
MODULE_DESCRIPTION("Xen virtual network device frontend");
MODULE_LICENSE("GPL");
MODULE_ALIAS("xen:vif");
MODULE_ALIAS("xennet");
| gpl-2.0 |
SimpleAOSP-Kernel/kernel_grouper | arch/powerpc/kvm/booke_emulate.c | 3145 | 6920 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*
* Copyright IBM Corp. 2008
*
* Authors: Hollis Blanchard <hollisb@us.ibm.com>
*/
#include <linux/kvm_host.h>
#include <asm/disassemble.h>
#include "booke.h"
#define OP_19_XOP_RFI 50
#define OP_31_XOP_MFMSR 83
#define OP_31_XOP_WRTEE 131
#define OP_31_XOP_MTMSR 146
#define OP_31_XOP_WRTEEI 163
static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu)
{
vcpu->arch.pc = vcpu->arch.shared->srr0;
kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1);
}
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int rs;
int rt;
switch (get_op(inst)) {
case 19:
switch (get_xop(inst)) {
case OP_19_XOP_RFI:
kvmppc_emul_rfi(vcpu);
kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
*advance = 0;
break;
default:
emulated = EMULATE_FAIL;
break;
}
break;
case 31:
switch (get_xop(inst)) {
case OP_31_XOP_MFMSR:
rt = get_rt(inst);
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
break;
case OP_31_XOP_MTMSR:
rs = get_rs(inst);
kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
break;
case OP_31_XOP_WRTEE:
rs = get_rs(inst);
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
case OP_31_XOP_WRTEEI:
vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
| (inst & MSR_EE);
kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_DEAR:
vcpu->arch.shared->dar = spr_val; break;
case SPRN_ESR:
vcpu->arch.esr = spr_val; break;
case SPRN_DBCR0:
vcpu->arch.dbcr0 = spr_val; break;
case SPRN_DBCR1:
vcpu->arch.dbcr1 = spr_val; break;
case SPRN_DBSR:
vcpu->arch.dbsr &= ~spr_val; break;
case SPRN_TSR:
vcpu->arch.tsr &= ~spr_val; break;
case SPRN_TCR:
vcpu->arch.tcr = spr_val;
kvmppc_emulate_dec(vcpu);
break;
/* Note: SPRG4-7 are user-readable. These values are
* loaded into the real SPRGs when resuming the
* guest. */
case SPRN_SPRG4:
vcpu->arch.sprg4 = spr_val; break;
case SPRN_SPRG5:
vcpu->arch.sprg5 = spr_val; break;
case SPRN_SPRG6:
vcpu->arch.sprg6 = spr_val; break;
case SPRN_SPRG7:
vcpu->arch.sprg7 = spr_val; break;
case SPRN_IVPR:
vcpu->arch.ivpr = spr_val;
break;
case SPRN_IVOR0:
vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
break;
case SPRN_IVOR1:
vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
break;
case SPRN_IVOR2:
vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
break;
case SPRN_IVOR3:
vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
break;
case SPRN_IVOR4:
vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
break;
case SPRN_IVOR5:
vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
break;
case SPRN_IVOR6:
vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
break;
case SPRN_IVOR7:
vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR8:
vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
break;
case SPRN_IVOR9:
vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
break;
case SPRN_IVOR10:
vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
break;
case SPRN_IVOR11:
vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
break;
case SPRN_IVOR12:
vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
break;
case SPRN_IVOR13:
vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
break;
case SPRN_IVOR14:
vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
break;
case SPRN_IVOR15:
vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_IVPR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break;
case SPRN_DEAR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
case SPRN_ESR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break;
case SPRN_DBCR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
case SPRN_DBCR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
case SPRN_DBSR:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
case SPRN_IVOR0:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
break;
case SPRN_IVOR1:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]);
break;
case SPRN_IVOR2:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
break;
case SPRN_IVOR3:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]);
break;
case SPRN_IVOR4:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]);
break;
case SPRN_IVOR5:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]);
break;
case SPRN_IVOR6:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]);
break;
case SPRN_IVOR7:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]);
break;
case SPRN_IVOR8:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
break;
case SPRN_IVOR9:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]);
break;
case SPRN_IVOR10:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]);
break;
case SPRN_IVOR11:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]);
break;
case SPRN_IVOR12:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]);
break;
case SPRN_IVOR13:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]);
break;
case SPRN_IVOR14:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]);
break;
case SPRN_IVOR15:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]);
break;
default:
emulated = EMULATE_FAIL;
}
return emulated;
}
| gpl-2.0 |
androidarmv6/android_kernel_lge_msm7x27-3.0.x | arch/sparc/prom/p1275.c | 3145 | 1269 | /*
* p1275.c: Sun IEEE 1275 PROM low level interface routines
*
* Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/string.h>
#include <linux/spinlock.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/system.h>
#include <asm/spitfire.h>
#include <asm/pstate.h>
#include <asm/ldc.h>
struct {
long prom_callback; /* 0x00 */
void (*prom_cif_handler)(long *); /* 0x08 */
unsigned long prom_cif_stack; /* 0x10 */
} p1275buf;
extern void prom_world(int);
extern void prom_cif_direct(unsigned long *args);
extern void prom_cif_callback(void);
/*
* This provides SMP safety on the p1275buf.
*/
DEFINE_RAW_SPINLOCK(prom_entry_lock);
void p1275_cmd_direct(unsigned long *args)
{
unsigned long flags;
raw_local_save_flags(flags);
raw_local_irq_restore((unsigned long)PIL_NMI);
raw_spin_lock(&prom_entry_lock);
prom_world(1);
prom_cif_direct(args);
prom_world(0);
raw_spin_unlock(&prom_entry_lock);
raw_local_irq_restore(flags);
}
void prom_cif_init(void *cif_handler, void *cif_stack)
{
p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
p1275buf.prom_cif_stack = (unsigned long)cif_stack;
}
| gpl-2.0 |
yamahata/linux-umem | net/ipv4/netfilter/nf_nat_standalone.c | 3145 | 8411 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2006 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/icmp.h>
#include <linux/gfp.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/spinlock.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_nat.h>
#include <net/netfilter/nf_nat_rule.h>
#include <net/netfilter/nf_nat_protocol.h>
#include <net/netfilter/nf_nat_core.h>
#include <net/netfilter/nf_nat_helper.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#ifdef CONFIG_XFRM
static void nat_decode_session(struct sk_buff *skb, struct flowi *fl)
{
struct flowi4 *fl4 = &fl->u.ip4;
const struct nf_conn *ct;
const struct nf_conntrack_tuple *t;
enum ip_conntrack_info ctinfo;
enum ip_conntrack_dir dir;
unsigned long statusbit;
ct = nf_ct_get(skb, &ctinfo);
if (ct == NULL)
return;
dir = CTINFO2DIR(ctinfo);
t = &ct->tuplehash[dir].tuple;
if (dir == IP_CT_DIR_ORIGINAL)
statusbit = IPS_DST_NAT;
else
statusbit = IPS_SRC_NAT;
if (ct->status & statusbit) {
fl4->daddr = t->dst.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_dport = t->dst.u.tcp.port;
}
statusbit ^= IPS_NAT_MASK;
if (ct->status & statusbit) {
fl4->saddr = t->src.u3.ip;
if (t->dst.protonum == IPPROTO_TCP ||
t->dst.protonum == IPPROTO_UDP ||
t->dst.protonum == IPPROTO_UDPLITE ||
t->dst.protonum == IPPROTO_DCCP ||
t->dst.protonum == IPPROTO_SCTP)
fl4->fl4_sport = t->src.u.tcp.port;
}
}
#endif
static unsigned int
nf_nat_fn(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
struct nf_conn_nat *nat;
/* maniptype == SRC for postrouting. */
enum nf_nat_manip_type maniptype = HOOK2MANIP(hooknum);
/* We never see fragments: conntrack defrags on pre-routing
and local-out, and nf_nat_out protects post-routing. */
NF_CT_ASSERT(!ip_is_fragment(ip_hdr(skb)));
ct = nf_ct_get(skb, &ctinfo);
/* Can't track? It's not due to stress, or conntrack would
have dropped it. Hence it's the user's responsibilty to
packet filter it out, or implement conntrack/NAT for that
protocol. 8) --RR */
if (!ct)
return NF_ACCEPT;
/* Don't try to NAT if this packet is not conntracked */
if (nf_ct_is_untracked(ct))
return NF_ACCEPT;
nat = nfct_nat(ct);
if (!nat) {
/* NAT module was loaded late. */
if (nf_ct_is_confirmed(ct))
return NF_ACCEPT;
nat = nf_ct_ext_add(ct, NF_CT_EXT_NAT, GFP_ATOMIC);
if (nat == NULL) {
pr_debug("failed to add NAT extension\n");
return NF_ACCEPT;
}
}
switch (ctinfo) {
case IP_CT_RELATED:
case IP_CT_RELATED_REPLY:
if (ip_hdr(skb)->protocol == IPPROTO_ICMP) {
if (!nf_nat_icmp_reply_translation(ct, ctinfo,
hooknum, skb))
return NF_DROP;
else
return NF_ACCEPT;
}
/* Fall thru... (Only ICMPs can be IP_CT_IS_REPLY) */
case IP_CT_NEW:
/* Seen it before? This can happen for loopback, retrans,
or local packets.. */
if (!nf_nat_initialized(ct, maniptype)) {
unsigned int ret;
ret = nf_nat_rule_find(skb, hooknum, in, out, ct);
if (ret != NF_ACCEPT)
return ret;
} else
pr_debug("Already setup manip %s for ct %p\n",
maniptype == NF_NAT_MANIP_SRC ? "SRC" : "DST",
ct);
break;
default:
/* ESTABLISHED */
NF_CT_ASSERT(ctinfo == IP_CT_ESTABLISHED ||
ctinfo == IP_CT_ESTABLISHED_REPLY);
}
return nf_nat_packet(ct, ctinfo, hooknum, skb);
}
static unsigned int
nf_nat_in(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
unsigned int ret;
__be32 daddr = ip_hdr(skb)->daddr;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
daddr != ip_hdr(skb)->daddr)
skb_dst_drop(skb);
return ret;
}
static unsigned int
nf_nat_out(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
#ifdef CONFIG_XFRM
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
#endif
unsigned int ret;
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
#ifdef CONFIG_XFRM
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if ((ct->tuplehash[dir].tuple.src.u3.ip !=
ct->tuplehash[!dir].tuple.dst.u3.ip) ||
(ct->tuplehash[dir].tuple.src.u.all !=
ct->tuplehash[!dir].tuple.dst.u.all)
)
return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP;
}
#endif
return ret;
}
static unsigned int
nf_nat_local_fn(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
const struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
unsigned int ret;
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
ret = nf_nat_fn(hooknum, skb, in, out, okfn);
if (ret != NF_DROP && ret != NF_STOLEN &&
(ct = nf_ct_get(skb, &ctinfo)) != NULL) {
enum ip_conntrack_dir dir = CTINFO2DIR(ctinfo);
if (ct->tuplehash[dir].tuple.dst.u3.ip !=
ct->tuplehash[!dir].tuple.src.u3.ip) {
if (ip_route_me_harder(skb, RTN_UNSPEC))
ret = NF_DROP;
}
#ifdef CONFIG_XFRM
else if (ct->tuplehash[dir].tuple.dst.u.all !=
ct->tuplehash[!dir].tuple.src.u.all)
if (ip_xfrm_me_harder(skb))
ret = NF_DROP;
#endif
}
return ret;
}
/* We must be after connection tracking and before packet filtering. */
static struct nf_hook_ops nf_nat_ops[] __read_mostly = {
/* Before packet filtering, change destination */
{
.hook = nf_nat_in,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_out,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_NAT_SRC,
},
/* Before packet filtering, change destination */
{
.hook = nf_nat_local_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_NAT_DST,
},
/* After packet filtering, change source */
{
.hook = nf_nat_fn,
.owner = THIS_MODULE,
.pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_NAT_SRC,
},
};
static int __init nf_nat_standalone_init(void)
{
int ret = 0;
need_ipv4_conntrack();
#ifdef CONFIG_XFRM
BUG_ON(ip_nat_decode_session != NULL);
RCU_INIT_POINTER(ip_nat_decode_session, nat_decode_session);
#endif
ret = nf_nat_rule_init();
if (ret < 0) {
pr_err("nf_nat_init: can't setup rules.\n");
goto cleanup_decode_session;
}
ret = nf_register_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
if (ret < 0) {
pr_err("nf_nat_init: can't register hooks.\n");
goto cleanup_rule_init;
}
return ret;
cleanup_rule_init:
nf_nat_rule_cleanup();
cleanup_decode_session:
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
return ret;
}
static void __exit nf_nat_standalone_fini(void)
{
nf_unregister_hooks(nf_nat_ops, ARRAY_SIZE(nf_nat_ops));
nf_nat_rule_cleanup();
#ifdef CONFIG_XFRM
RCU_INIT_POINTER(ip_nat_decode_session, NULL);
synchronize_net();
#endif
/* Conntrack caches are unregistered in nf_conntrack_cleanup */
}
module_init(nf_nat_standalone_init);
module_exit(nf_nat_standalone_fini);
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_nat");
| gpl-2.0 |
austinkelleher/linux | fs/lockd/svc4proc.c | 3913 | 14324 | /*
* linux/fs/lockd/svc4proc.c
*
* Lockd server procedures. We don't implement the NLM_*_RES
* procedures because we don't use the async procedures.
*
* Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
*/
#include <linux/types.h>
#include <linux/time.h>
#include <linux/lockd/lockd.h>
#include <linux/lockd/share.h>
#include <linux/sunrpc/svc_xprt.h>
#define NLMDBG_FACILITY NLMDBG_CLIENT
/*
* Obtain client and file from arguments
*/
static __be32
nlm4svc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_host **hostp, struct nlm_file **filp)
{
struct nlm_host *host = NULL;
struct nlm_file *file = NULL;
struct nlm_lock *lock = &argp->lock;
__be32 error = 0;
/* nfsd callbacks must have been installed for this procedure */
if (!nlmsvc_ops)
return nlm_lck_denied_nolocks;
/* Obtain host handle */
if (!(host = nlmsvc_lookup_host(rqstp, lock->caller, lock->len))
|| (argp->monitor && nsm_monitor(host) < 0))
goto no_locks;
*hostp = host;
/* Obtain file pointer. Not used by FREE_ALL call. */
if (filp != NULL) {
if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0)
goto no_locks;
*filp = file;
/* Set up the missing parts of the file_lock structure */
lock->fl.fl_file = file->f_file;
lock->fl.fl_owner = (fl_owner_t) host;
lock->fl.fl_lmops = &nlmsvc_lock_operations;
}
return 0;
no_locks:
nlmsvc_release_host(host);
if (error)
return error;
return nlm_lck_denied_nolocks;
}
/*
* NULL: Test for presence of service
*/
static __be32
nlm4svc_proc_null(struct svc_rqst *rqstp, void *argp, void *resp)
{
dprintk("lockd: NULL called\n");
return rpc_success;
}
/*
* TEST: Check for conflicting lock
*/
static __be32
nlm4svc_proc_test(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
dprintk("lockd: TEST4 called\n");
resp->cookie = argp->cookie;
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now check for conflicting locks */
resp->status = nlmsvc_testlock(rqstp, file, host, &argp->lock, &resp->lock, &resp->cookie);
if (resp->status == nlm_drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: TEST4 status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
static __be32
nlm4svc_proc_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
__be32 rc = rpc_success;
dprintk("lockd: LOCK called\n");
resp->cookie = argp->cookie;
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
#if 0
/* If supplied state doesn't match current state, we assume it's
* an old request that time-warped somehow. Any error return would
* do in this case because it's irrelevant anyway.
*
* NB: We don't retrieve the remote host's state yet.
*/
if (host->h_nsmstate && host->h_nsmstate != argp->state) {
resp->status = nlm_lck_denied_nolocks;
} else
#endif
/* Now try to lock the file */
resp->status = nlmsvc_lock(rqstp, file, host, &argp->lock,
argp->block, &argp->cookie,
argp->reclaim);
if (resp->status == nlm_drop_reply)
rc = rpc_drop_reply;
else
dprintk("lockd: LOCK status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rc;
}
static __be32
nlm4svc_proc_cancel(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
dprintk("lockd: CANCEL called\n");
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Try to cancel request. */
resp->status = nlmsvc_cancel_blocked(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: CANCEL status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
/*
* UNLOCK: release a lock
*/
static __be32
nlm4svc_proc_unlock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
dprintk("lockd: UNLOCK called\n");
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now try to remove the lock */
resp->status = nlmsvc_unlock(SVC_NET(rqstp), file, &argp->lock);
dprintk("lockd: UNLOCK status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
/*
* GRANTED: A server calls us to tell that a process' lock request
* was granted
*/
static __be32
nlm4svc_proc_granted(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
resp->cookie = argp->cookie;
dprintk("lockd: GRANTED called\n");
resp->status = nlmclnt_grant(svc_addr(rqstp), &argp->lock);
dprintk("lockd: GRANTED status %d\n", ntohl(resp->status));
return rpc_success;
}
/*
* This is the generic lockd callback for async RPC calls
*/
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
dprintk("lockd: %5u callback returned %d\n", task->tk_pid,
-task->tk_status);
}
static void nlm4svc_callback_release(void *data)
{
nlmsvc_release_call(data);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
.rpc_call_done = nlm4svc_callback_exit,
.rpc_release = nlm4svc_callback_release,
};
/*
* `Async' versions of the above service routines. They aren't really,
* because we send the callback before the reply proper. I hope this
* doesn't break any clients.
*/
static __be32 nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_args *argp,
__be32 (*func)(struct svc_rqst *, struct nlm_args *, struct nlm_res *))
{
struct nlm_host *host;
struct nlm_rqst *call;
__be32 stat;
host = nlmsvc_lookup_host(rqstp,
argp->lock.caller,
argp->lock.len);
if (host == NULL)
return rpc_system_err;
call = nlm_alloc_call(host);
nlmsvc_release_host(host);
if (call == NULL)
return rpc_system_err;
stat = func(rqstp, argp, &call->a_res);
if (stat != 0) {
nlmsvc_release_call(call);
return stat;
}
call->a_flags = RPC_TASK_ASYNC;
if (nlm_async_reply(call, proc, &nlm4svc_callback_ops) < 0)
return rpc_system_err;
return rpc_success;
}
static __be32 nlm4svc_proc_test_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: TEST_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_TEST_RES, argp, nlm4svc_proc_test);
}
static __be32 nlm4svc_proc_lock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: LOCK_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_LOCK_RES, argp, nlm4svc_proc_lock);
}
static __be32 nlm4svc_proc_cancel_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: CANCEL_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_CANCEL_RES, argp, nlm4svc_proc_cancel);
}
static __be32 nlm4svc_proc_unlock_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: UNLOCK_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_UNLOCK_RES, argp, nlm4svc_proc_unlock);
}
static __be32 nlm4svc_proc_granted_msg(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
dprintk("lockd: GRANTED_MSG called\n");
return nlm4svc_callback(rqstp, NLMPROC_GRANTED_RES, argp, nlm4svc_proc_granted);
}
/*
* SHARE: create a DOS share or alter existing share.
*/
static __be32
nlm4svc_proc_share(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
dprintk("lockd: SHARE called\n");
resp->cookie = argp->cookie;
/* Don't accept new lock requests during grace period */
if (locks_in_grace(SVC_NET(rqstp)) && !argp->reclaim) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now try to create the share */
resp->status = nlmsvc_share_file(host, file, argp);
dprintk("lockd: SHARE status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
/*
* UNSHARE: Release a DOS share.
*/
static __be32
nlm4svc_proc_unshare(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
struct nlm_host *host;
struct nlm_file *file;
dprintk("lockd: UNSHARE called\n");
resp->cookie = argp->cookie;
/* Don't accept requests during grace period */
if (locks_in_grace(SVC_NET(rqstp))) {
resp->status = nlm_lck_denied_grace_period;
return rpc_success;
}
/* Obtain client and file */
if ((resp->status = nlm4svc_retrieve_args(rqstp, argp, &host, &file)))
return resp->status == nlm_drop_reply ? rpc_drop_reply :rpc_success;
/* Now try to lock the file */
resp->status = nlmsvc_unshare_file(host, file, argp);
dprintk("lockd: UNSHARE status %d\n", ntohl(resp->status));
nlmsvc_release_host(host);
nlm_release_file(file);
return rpc_success;
}
/*
* NM_LOCK: Create an unmonitored lock
*/
static __be32
nlm4svc_proc_nm_lock(struct svc_rqst *rqstp, struct nlm_args *argp,
struct nlm_res *resp)
{
dprintk("lockd: NM_LOCK called\n");
argp->monitor = 0; /* just clean the monitor flag */
return nlm4svc_proc_lock(rqstp, argp, resp);
}
/*
* FREE_ALL: Release all locks and shares held by client
*/
static __be32
nlm4svc_proc_free_all(struct svc_rqst *rqstp, struct nlm_args *argp,
void *resp)
{
struct nlm_host *host;
/* Obtain client */
if (nlm4svc_retrieve_args(rqstp, argp, &host, NULL))
return rpc_success;
nlmsvc_free_host_resources(host);
nlmsvc_release_host(host);
return rpc_success;
}
/*
* SM_NOTIFY: private callback from statd (not part of official NLM proto)
*/
static __be32
nlm4svc_proc_sm_notify(struct svc_rqst *rqstp, struct nlm_reboot *argp,
void *resp)
{
dprintk("lockd: SM_NOTIFY called\n");
if (!nlm_privileged_requester(rqstp)) {
char buf[RPC_MAX_ADDRBUFLEN];
printk(KERN_WARNING "lockd: rejected NSM callback from %s\n",
svc_print_addr(rqstp, buf, sizeof(buf)));
return rpc_system_err;
}
nlm_host_rebooted(argp);
return rpc_success;
}
/*
* client sent a GRANTED_RES, let's remove the associated block
*/
static __be32
nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
void *resp)
{
if (!nlmsvc_ops)
return rpc_success;
dprintk("lockd: GRANTED_RES called\n");
nlmsvc_grant_reply(&argp->cookie, argp->status);
return rpc_success;
}
/*
* NLM Server procedures.
*/
#define nlm4svc_encode_norep nlm4svc_encode_void
#define nlm4svc_decode_norep nlm4svc_decode_void
#define nlm4svc_decode_testres nlm4svc_decode_void
#define nlm4svc_decode_lockres nlm4svc_decode_void
#define nlm4svc_decode_unlockres nlm4svc_decode_void
#define nlm4svc_decode_cancelres nlm4svc_decode_void
#define nlm4svc_decode_grantedres nlm4svc_decode_void
#define nlm4svc_proc_none nlm4svc_proc_null
#define nlm4svc_proc_test_res nlm4svc_proc_null
#define nlm4svc_proc_lock_res nlm4svc_proc_null
#define nlm4svc_proc_cancel_res nlm4svc_proc_null
#define nlm4svc_proc_unlock_res nlm4svc_proc_null
struct nlm_void { int dummy; };
#define PROC(name, xargt, xrest, argt, rest, respsize) \
{ .pc_func = (svc_procfunc) nlm4svc_proc_##name, \
.pc_decode = (kxdrproc_t) nlm4svc_decode_##xargt, \
.pc_encode = (kxdrproc_t) nlm4svc_encode_##xrest, \
.pc_release = NULL, \
.pc_argsize = sizeof(struct nlm_##argt), \
.pc_ressize = sizeof(struct nlm_##rest), \
.pc_xdrressize = respsize, \
}
#define Ck (1+XDR_QUADLEN(NLM_MAXCOOKIELEN)) /* cookie */
#define No (1+1024/4) /* netobj */
#define St 1 /* status */
#define Rg 4 /* range (offset + length) */
struct svc_procedure nlmsvc_procedures4[] = {
PROC(null, void, void, void, void, 1),
PROC(test, testargs, testres, args, res, Ck+St+2+No+Rg),
PROC(lock, lockargs, res, args, res, Ck+St),
PROC(cancel, cancargs, res, args, res, Ck+St),
PROC(unlock, unlockargs, res, args, res, Ck+St),
PROC(granted, testargs, res, args, res, Ck+St),
PROC(test_msg, testargs, norep, args, void, 1),
PROC(lock_msg, lockargs, norep, args, void, 1),
PROC(cancel_msg, cancargs, norep, args, void, 1),
PROC(unlock_msg, unlockargs, norep, args, void, 1),
PROC(granted_msg, testargs, norep, args, void, 1),
PROC(test_res, testres, norep, res, void, 1),
PROC(lock_res, lockres, norep, res, void, 1),
PROC(cancel_res, cancelres, norep, res, void, 1),
PROC(unlock_res, unlockres, norep, res, void, 1),
PROC(granted_res, res, norep, res, void, 1),
/* statd callback */
PROC(sm_notify, reboot, void, reboot, void, 1),
PROC(none, void, void, void, void, 0),
PROC(none, void, void, void, void, 0),
PROC(none, void, void, void, void, 0),
PROC(share, shareargs, shareres, args, res, Ck+St+1),
PROC(unshare, shareargs, shareres, args, res, Ck+St+1),
PROC(nm_lock, lockargs, res, args, res, Ck+St),
PROC(free_all, notify, void, args, void, 1),
};
| gpl-2.0 |
yank555-lu/N3-CM11 | arch/x86/kernel/time.c | 4681 | 2220 | /*
* Copyright (c) 1991,1992,1995 Linus Torvalds
* Copyright (c) 1994 Alan Modra
* Copyright (c) 1995 Markus Kuhn
* Copyright (c) 1996 Ingo Molnar
* Copyright (c) 1998 Andrea Arcangeli
* Copyright (c) 2002,2006 Vojtech Pavlik
* Copyright (c) 2003 Andi Kleen
*
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/i8253.h>
#include <linux/time.h>
#include <linux/export.h>
#include <linux/mca.h>
#include <asm/vsyscall.h>
#include <asm/x86_init.h>
#include <asm/i8259.h>
#include <asm/timer.h>
#include <asm/hpet.h>
#include <asm/time.h>
#ifdef CONFIG_X86_64
DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES;
#endif
unsigned long profile_pc(struct pt_regs *regs)
{
unsigned long pc = instruction_pointer(regs);
if (!user_mode_vm(regs) && in_lock_functions(pc)) {
#ifdef CONFIG_FRAME_POINTER
return *(unsigned long *)(regs->bp + sizeof(long));
#else
unsigned long *sp =
(unsigned long *)kernel_stack_pointer(regs);
/*
* Return address is either directly at stack pointer
* or above a saved flags. Eflags has bits 22-31 zero,
* kernel addresses don't.
*/
if (sp[0] >> 22)
return sp[0];
if (sp[1] >> 22)
return sp[1];
#endif
}
return pc;
}
EXPORT_SYMBOL(profile_pc);
/*
* Default timer interrupt handler for PIT/HPET
*/
static irqreturn_t timer_interrupt(int irq, void *dev_id)
{
global_clock_event->event_handler(global_clock_event);
/* MCA bus quirk: Acknowledge irq0 by setting bit 7 in port 0x61 */
if (MCA_bus)
outb_p(inb_p(0x61)| 0x80, 0x61);
return IRQ_HANDLED;
}
static struct irqaction irq0 = {
.handler = timer_interrupt,
.flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_IRQPOLL | IRQF_TIMER,
.name = "timer"
};
void __init setup_default_timer_irq(void)
{
setup_irq(0, &irq0);
}
/* Default timer init function */
void __init hpet_time_init(void)
{
if (!hpet_enable())
setup_pit_timer();
setup_default_timer_irq();
}
static __init void x86_late_time_init(void)
{
x86_init.timers.timer_init();
tsc_init();
}
/*
* Initialize TSC and delay the periodic timer init to
* late x86_late_time_init() so ioremap works.
*/
void __init time_init(void)
{
late_time_init = x86_late_time_init;
}
| gpl-2.0 |
thanhphat11/android_kernel_pantech_910 | drivers/mtd/nand/txx9ndfmc.c | 4937 | 12325 | /*
* TXx9 NAND flash memory controller driver
* Based on RBTX49xx patch from CELF patch archive.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* (C) Copyright TOSHIBA CORPORATION 2004-2007
* All Rights Reserved.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/io.h>
#include <asm/txx9/ndfmc.h>
/* TXX9 NDFMC Registers */
#define TXX9_NDFDTR 0x00
#define TXX9_NDFMCR 0x04
#define TXX9_NDFSR 0x08
#define TXX9_NDFISR 0x0c
#define TXX9_NDFIMR 0x10
#define TXX9_NDFSPR 0x14
#define TXX9_NDFRSTR 0x18 /* not TX4939 */
/* NDFMCR : NDFMC Mode Control */
#define TXX9_NDFMCR_WE 0x80
#define TXX9_NDFMCR_ECC_ALL 0x60
#define TXX9_NDFMCR_ECC_RESET 0x60
#define TXX9_NDFMCR_ECC_READ 0x40
#define TXX9_NDFMCR_ECC_ON 0x20
#define TXX9_NDFMCR_ECC_OFF 0x00
#define TXX9_NDFMCR_CE 0x10
#define TXX9_NDFMCR_BSPRT 0x04 /* TX4925/TX4926 only */
#define TXX9_NDFMCR_ALE 0x02
#define TXX9_NDFMCR_CLE 0x01
/* TX4939 only */
#define TXX9_NDFMCR_X16 0x0400
#define TXX9_NDFMCR_DMAREQ_MASK 0x0300
#define TXX9_NDFMCR_DMAREQ_NODMA 0x0000
#define TXX9_NDFMCR_DMAREQ_128 0x0100
#define TXX9_NDFMCR_DMAREQ_256 0x0200
#define TXX9_NDFMCR_DMAREQ_512 0x0300
#define TXX9_NDFMCR_CS_MASK 0x0c
#define TXX9_NDFMCR_CS(ch) ((ch) << 2)
/* NDFMCR : NDFMC Status */
#define TXX9_NDFSR_BUSY 0x80
/* TX4939 only */
#define TXX9_NDFSR_DMARUN 0x40
/* NDFMCR : NDFMC Reset */
#define TXX9_NDFRSTR_RST 0x01
struct txx9ndfmc_priv {
struct platform_device *dev;
struct nand_chip chip;
struct mtd_info mtd;
int cs;
const char *mtdname;
};
#define MAX_TXX9NDFMC_DEV 4
struct txx9ndfmc_drvdata {
struct mtd_info *mtds[MAX_TXX9NDFMC_DEV];
void __iomem *base;
unsigned char hold; /* in gbusclock */
unsigned char spw; /* in gbusclock */
struct nand_hw_control hw_control;
};
static struct platform_device *mtd_to_platdev(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
struct txx9ndfmc_priv *txx9_priv = chip->priv;
return txx9_priv->dev;
}
static void __iomem *ndregaddr(struct platform_device *dev, unsigned int reg)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
return drvdata->base + (reg << plat->shift);
}
static u32 txx9ndfmc_read(struct platform_device *dev, unsigned int reg)
{
return __raw_readl(ndregaddr(dev, reg));
}
static void txx9ndfmc_write(struct platform_device *dev,
u32 val, unsigned int reg)
{
__raw_writel(val, ndregaddr(dev, reg));
}
static uint8_t txx9ndfmc_read_byte(struct mtd_info *mtd)
{
struct platform_device *dev = mtd_to_platdev(mtd);
return txx9ndfmc_read(dev, TXX9_NDFDTR);
}
static void txx9ndfmc_write_buf(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct platform_device *dev = mtd_to_platdev(mtd);
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_WE, TXX9_NDFMCR);
while (len--)
__raw_writel(*buf++, ndfdtr);
txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
}
static void txx9ndfmc_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
struct platform_device *dev = mtd_to_platdev(mtd);
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
while (len--)
*buf++ = __raw_readl(ndfdtr);
}
static int txx9ndfmc_verify_buf(struct mtd_info *mtd, const uint8_t *buf,
int len)
{
struct platform_device *dev = mtd_to_platdev(mtd);
void __iomem *ndfdtr = ndregaddr(dev, TXX9_NDFDTR);
while (len--)
if (*buf++ != (uint8_t)__raw_readl(ndfdtr))
return -EFAULT;
return 0;
}
static void txx9ndfmc_cmd_ctrl(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *chip = mtd->priv;
struct txx9ndfmc_priv *txx9_priv = chip->priv;
struct platform_device *dev = txx9_priv->dev;
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
if (ctrl & NAND_CTRL_CHANGE) {
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~(TXX9_NDFMCR_CLE | TXX9_NDFMCR_ALE | TXX9_NDFMCR_CE);
mcr |= ctrl & NAND_CLE ? TXX9_NDFMCR_CLE : 0;
mcr |= ctrl & NAND_ALE ? TXX9_NDFMCR_ALE : 0;
/* TXX9_NDFMCR_CE bit is 0:high 1:low */
mcr |= ctrl & NAND_NCE ? TXX9_NDFMCR_CE : 0;
if (txx9_priv->cs >= 0 && (ctrl & NAND_NCE)) {
mcr &= ~TXX9_NDFMCR_CS_MASK;
mcr |= TXX9_NDFMCR_CS(txx9_priv->cs);
}
txx9ndfmc_write(dev, mcr, TXX9_NDFMCR);
}
if (cmd != NAND_CMD_NONE)
txx9ndfmc_write(dev, cmd & 0xff, TXX9_NDFDTR);
if (plat->flags & NDFMC_PLAT_FLAG_DUMMYWRITE) {
/* dummy write to update external latch */
if ((ctrl & NAND_CTRL_CHANGE) && cmd == NAND_CMD_NONE)
txx9ndfmc_write(dev, 0, TXX9_NDFDTR);
}
mmiowb();
}
static int txx9ndfmc_dev_ready(struct mtd_info *mtd)
{
struct platform_device *dev = mtd_to_platdev(mtd);
return !(txx9ndfmc_read(dev, TXX9_NDFSR) & TXX9_NDFSR_BUSY);
}
static int txx9ndfmc_calculate_ecc(struct mtd_info *mtd, const uint8_t *dat,
uint8_t *ecc_code)
{
struct platform_device *dev = mtd_to_platdev(mtd);
struct nand_chip *chip = mtd->priv;
int eccbytes;
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_READ, TXX9_NDFMCR);
for (eccbytes = chip->ecc.bytes; eccbytes > 0; eccbytes -= 3) {
ecc_code[1] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code[0] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code[2] = txx9ndfmc_read(dev, TXX9_NDFDTR);
ecc_code += 3;
}
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
return 0;
}
static int txx9ndfmc_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
struct nand_chip *chip = mtd->priv;
int eccsize;
int corrected = 0;
int stat;
for (eccsize = chip->ecc.size; eccsize > 0; eccsize -= 256) {
stat = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
if (stat < 0)
return stat;
corrected += stat;
buf += 256;
read_ecc += 3;
calc_ecc += 3;
}
return corrected;
}
static void txx9ndfmc_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct platform_device *dev = mtd_to_platdev(mtd);
u32 mcr = txx9ndfmc_read(dev, TXX9_NDFMCR);
mcr &= ~TXX9_NDFMCR_ECC_ALL;
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_RESET, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_OFF, TXX9_NDFMCR);
txx9ndfmc_write(dev, mcr | TXX9_NDFMCR_ECC_ON, TXX9_NDFMCR);
}
static void txx9ndfmc_initialize(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int tmout = 100;
if (plat->flags & NDFMC_PLAT_FLAG_NO_RSTR)
; /* no NDFRSTR. Write to NDFSPR resets the NDFMC. */
else {
/* reset NDFMC */
txx9ndfmc_write(dev,
txx9ndfmc_read(dev, TXX9_NDFRSTR) |
TXX9_NDFRSTR_RST,
TXX9_NDFRSTR);
while (txx9ndfmc_read(dev, TXX9_NDFRSTR) & TXX9_NDFRSTR_RST) {
if (--tmout == 0) {
dev_err(&dev->dev, "reset failed.\n");
break;
}
udelay(1);
}
}
/* setup Hold Time, Strobe Pulse Width */
txx9ndfmc_write(dev, (drvdata->hold << 4) | drvdata->spw, TXX9_NDFSPR);
txx9ndfmc_write(dev,
(plat->flags & NDFMC_PLAT_FLAG_USE_BSPRT) ?
TXX9_NDFMCR_BSPRT : 0, TXX9_NDFMCR);
}
#define TXX9NDFMC_NS_TO_CYC(gbusclk, ns) \
DIV_ROUND_UP((ns) * DIV_ROUND_UP(gbusclk, 1000), 1000000)
static int txx9ndfmc_nand_scan(struct mtd_info *mtd)
{
struct nand_chip *chip = mtd->priv;
int ret;
ret = nand_scan_ident(mtd, 1, NULL);
if (!ret) {
if (mtd->writesize >= 512) {
/* Hardware ECC 6 byte ECC per 512 Byte data */
chip->ecc.size = 512;
chip->ecc.bytes = 6;
}
ret = nand_scan_tail(mtd);
}
return ret;
}
static int __init txx9ndfmc_probe(struct platform_device *dev)
{
struct txx9ndfmc_platform_data *plat = dev->dev.platform_data;
int hold, spw;
int i;
struct txx9ndfmc_drvdata *drvdata;
unsigned long gbusclk = plat->gbus_clock;
struct resource *res;
res = platform_get_resource(dev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
drvdata->base = devm_request_and_ioremap(&dev->dev, res);
if (!drvdata->base)
return -EBUSY;
hold = plat->hold ?: 20; /* tDH */
spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */
hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold);
spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw);
if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD)
hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */
spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */
hold = clamp(hold, 1, 15);
drvdata->hold = hold;
spw = clamp(spw, 1, 15);
drvdata->spw = spw;
dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n",
(gbusclk + 500000) / 1000000, hold, spw);
spin_lock_init(&drvdata->hw_control.lock);
init_waitqueue_head(&drvdata->hw_control.wq);
platform_set_drvdata(dev, drvdata);
txx9ndfmc_initialize(dev);
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct txx9ndfmc_priv *txx9_priv;
struct nand_chip *chip;
struct mtd_info *mtd;
if (!(plat->ch_mask & (1 << i)))
continue;
txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv),
GFP_KERNEL);
if (!txx9_priv) {
dev_err(&dev->dev, "Unable to allocate "
"TXx9 NDFMC MTD device structure.\n");
continue;
}
chip = &txx9_priv->chip;
mtd = &txx9_priv->mtd;
mtd->owner = THIS_MODULE;
mtd->priv = chip;
chip->read_byte = txx9ndfmc_read_byte;
chip->read_buf = txx9ndfmc_read_buf;
chip->write_buf = txx9ndfmc_write_buf;
chip->verify_buf = txx9ndfmc_verify_buf;
chip->cmd_ctrl = txx9ndfmc_cmd_ctrl;
chip->dev_ready = txx9ndfmc_dev_ready;
chip->ecc.calculate = txx9ndfmc_calculate_ecc;
chip->ecc.correct = txx9ndfmc_correct_data;
chip->ecc.hwctl = txx9ndfmc_enable_hwecc;
chip->ecc.mode = NAND_ECC_HW;
/* txx9ndfmc_nand_scan will overwrite ecc.size and ecc.bytes */
chip->ecc.size = 256;
chip->ecc.bytes = 3;
chip->ecc.strength = 1;
chip->chip_delay = 100;
chip->controller = &drvdata->hw_control;
chip->priv = txx9_priv;
txx9_priv->dev = dev;
if (plat->ch_mask != 1) {
txx9_priv->cs = i;
txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u",
dev_name(&dev->dev), i);
} else {
txx9_priv->cs = -1;
txx9_priv->mtdname = kstrdup(dev_name(&dev->dev),
GFP_KERNEL);
}
if (!txx9_priv->mtdname) {
kfree(txx9_priv);
dev_err(&dev->dev, "Unable to allocate MTD name.\n");
continue;
}
if (plat->wide_mask & (1 << i))
chip->options |= NAND_BUSWIDTH_16;
if (txx9ndfmc_nand_scan(mtd)) {
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
continue;
}
mtd->name = txx9_priv->mtdname;
mtd_device_parse_register(mtd, NULL, NULL, NULL, 0);
drvdata->mtds[i] = mtd;
}
return 0;
}
static int __exit txx9ndfmc_remove(struct platform_device *dev)
{
struct txx9ndfmc_drvdata *drvdata = platform_get_drvdata(dev);
int i;
platform_set_drvdata(dev, NULL);
if (!drvdata)
return 0;
for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) {
struct mtd_info *mtd = drvdata->mtds[i];
struct nand_chip *chip;
struct txx9ndfmc_priv *txx9_priv;
if (!mtd)
continue;
chip = mtd->priv;
txx9_priv = chip->priv;
nand_release(mtd);
kfree(txx9_priv->mtdname);
kfree(txx9_priv);
}
return 0;
}
#ifdef CONFIG_PM
static int txx9ndfmc_resume(struct platform_device *dev)
{
if (platform_get_drvdata(dev))
txx9ndfmc_initialize(dev);
return 0;
}
#else
#define txx9ndfmc_resume NULL
#endif
static struct platform_driver txx9ndfmc_driver = {
.remove = __exit_p(txx9ndfmc_remove),
.resume = txx9ndfmc_resume,
.driver = {
.name = "txx9ndfmc",
.owner = THIS_MODULE,
},
};
static int __init txx9ndfmc_init(void)
{
return platform_driver_probe(&txx9ndfmc_driver, txx9ndfmc_probe);
}
static void __exit txx9ndfmc_exit(void)
{
platform_driver_unregister(&txx9ndfmc_driver);
}
module_init(txx9ndfmc_init);
module_exit(txx9ndfmc_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("TXx9 SoC NAND flash controller driver");
MODULE_ALIAS("platform:txx9ndfmc");
| gpl-2.0 |
MoKee/android_kernel_sony_tianchi | drivers/mtd/nand/tmio_nand.c | 4937 | 14494 | /*
* Toshiba TMIO NAND flash controller driver
*
* Slightly murky pre-git history of the driver:
*
* Copyright (c) Ian Molton 2004, 2005, 2008
* Original work, independent of sharps code. Included hardware ECC support.
* Hard ECC did not work for writes in the early revisions.
* Copyright (c) Dirk Opfer 2005.
* Modifications developed from sharps code but
* NOT containing any, ported onto Ians base.
* Copyright (c) Chris Humbert 2005
* Copyright (c) Dmitry Baryshkov 2008
* Minor fixes
*
* Parts copyright Sebastian Carlier
*
* This file is licensed under
* the terms of the GNU General Public License version 2. This program
* is licensed "as is" without any warranty of any kind, whether express
* or implied.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/mfd/core.h>
#include <linux/mfd/tmio.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h>
#include <linux/slab.h>
/*--------------------------------------------------------------------------*/
/*
* NAND Flash Host Controller Configuration Register
*/
#define CCR_COMMAND 0x04 /* w Command */
#define CCR_BASE 0x10 /* l NAND Flash Control Reg Base Addr */
#define CCR_INTP 0x3d /* b Interrupt Pin */
#define CCR_INTE 0x48 /* b Interrupt Enable */
#define CCR_EC 0x4a /* b Event Control */
#define CCR_ICC 0x4c /* b Internal Clock Control */
#define CCR_ECCC 0x5b /* b ECC Control */
#define CCR_NFTC 0x60 /* b NAND Flash Transaction Control */
#define CCR_NFM 0x61 /* b NAND Flash Monitor */
#define CCR_NFPSC 0x62 /* b NAND Flash Power Supply Control */
#define CCR_NFDC 0x63 /* b NAND Flash Detect Control */
/*
* NAND Flash Control Register
*/
#define FCR_DATA 0x00 /* bwl Data Register */
#define FCR_MODE 0x04 /* b Mode Register */
#define FCR_STATUS 0x05 /* b Status Register */
#define FCR_ISR 0x06 /* b Interrupt Status Register */
#define FCR_IMR 0x07 /* b Interrupt Mask Register */
/* FCR_MODE Register Command List */
#define FCR_MODE_DATA 0x94 /* Data Data_Mode */
#define FCR_MODE_COMMAND 0x95 /* Data Command_Mode */
#define FCR_MODE_ADDRESS 0x96 /* Data Address_Mode */
#define FCR_MODE_HWECC_CALC 0xB4 /* HW-ECC Data */
#define FCR_MODE_HWECC_RESULT 0xD4 /* HW-ECC Calc result Read_Mode */
#define FCR_MODE_HWECC_RESET 0xF4 /* HW-ECC Reset */
#define FCR_MODE_POWER_ON 0x0C /* Power Supply ON to SSFDC card */
#define FCR_MODE_POWER_OFF 0x08 /* Power Supply OFF to SSFDC card */
#define FCR_MODE_LED_OFF 0x00 /* LED OFF */
#define FCR_MODE_LED_ON 0x04 /* LED ON */
#define FCR_MODE_EJECT_ON 0x68 /* Ejection events active */
#define FCR_MODE_EJECT_OFF 0x08 /* Ejection events ignored */
#define FCR_MODE_LOCK 0x6C /* Lock_Mode. Eject Switch Invalid */
#define FCR_MODE_UNLOCK 0x0C /* UnLock_Mode. Eject Switch is valid */
#define FCR_MODE_CONTROLLER_ID 0x40 /* Controller ID Read */
#define FCR_MODE_STANDBY 0x00 /* SSFDC card Changes Standby State */
#define FCR_MODE_WE 0x80
#define FCR_MODE_ECC1 0x40
#define FCR_MODE_ECC0 0x20
#define FCR_MODE_CE 0x10
#define FCR_MODE_PCNT1 0x08
#define FCR_MODE_PCNT0 0x04
#define FCR_MODE_ALE 0x02
#define FCR_MODE_CLE 0x01
#define FCR_STATUS_BUSY 0x80
/*--------------------------------------------------------------------------*/
struct tmio_nand {
struct mtd_info mtd;
struct nand_chip chip;
struct platform_device *dev;
void __iomem *ccr;
void __iomem *fcr;
unsigned long fcr_base;
unsigned int irq;
/* for tmio_nand_read_byte */
u8 read;
unsigned read_good:1;
};
#define mtd_to_tmio(m) container_of(m, struct tmio_nand, mtd)
/*--------------------------------------------------------------------------*/
static void tmio_nand_hwcontrol(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
struct nand_chip *chip = mtd->priv;
if (ctrl & NAND_CTRL_CHANGE) {
u8 mode;
if (ctrl & NAND_NCE) {
mode = FCR_MODE_DATA;
if (ctrl & NAND_CLE)
mode |= FCR_MODE_CLE;
else
mode &= ~FCR_MODE_CLE;
if (ctrl & NAND_ALE)
mode |= FCR_MODE_ALE;
else
mode &= ~FCR_MODE_ALE;
} else {
mode = FCR_MODE_STANDBY;
}
tmio_iowrite8(mode, tmio->fcr + FCR_MODE);
tmio->read_good = 0;
}
if (cmd != NAND_CMD_NONE)
tmio_iowrite8(cmd, chip->IO_ADDR_W);
}
static int tmio_nand_dev_ready(struct mtd_info *mtd)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
return !(tmio_ioread8(tmio->fcr + FCR_STATUS) & FCR_STATUS_BUSY);
}
static irqreturn_t tmio_irq(int irq, void *__tmio)
{
struct tmio_nand *tmio = __tmio;
struct nand_chip *nand_chip = &tmio->chip;
/* disable RDYREQ interrupt */
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
if (unlikely(!waitqueue_active(&nand_chip->controller->wq)))
dev_warn(&tmio->dev->dev, "spurious interrupt\n");
wake_up(&nand_chip->controller->wq);
return IRQ_HANDLED;
}
/*
*The TMIO core has a RDYREQ interrupt on the posedge of #SMRB.
*This interrupt is normally disabled, but for long operations like
*erase and write, we enable it to wake us up. The irq handler
*disables the interrupt.
*/
static int
tmio_nand_wait(struct mtd_info *mtd, struct nand_chip *nand_chip)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
long timeout;
/* enable RDYREQ interrupt */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
tmio_iowrite8(0x81, tmio->fcr + FCR_IMR);
timeout = wait_event_timeout(nand_chip->controller->wq,
tmio_nand_dev_ready(mtd),
msecs_to_jiffies(nand_chip->state == FL_ERASING ? 400 : 20));
if (unlikely(!tmio_nand_dev_ready(mtd))) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
dev_warn(&tmio->dev->dev, "still busy with %s after %d ms\n",
nand_chip->state == FL_ERASING ? "erase" : "program",
nand_chip->state == FL_ERASING ? 400 : 20);
} else if (unlikely(!timeout)) {
tmio_iowrite8(0x00, tmio->fcr + FCR_IMR);
dev_warn(&tmio->dev->dev, "timeout waiting for interrupt\n");
}
nand_chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
return nand_chip->read_byte(mtd);
}
/*
*The TMIO controller combines two 8-bit data bytes into one 16-bit
*word. This function separates them so nand_base.c works as expected,
*especially its NAND_CMD_READID routines.
*
*To prevent stale data from being read, tmio_nand_hwcontrol() clears
*tmio->read_good.
*/
static u_char tmio_nand_read_byte(struct mtd_info *mtd)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
unsigned int data;
if (tmio->read_good--)
return tmio->read;
data = tmio_ioread16(tmio->fcr + FCR_DATA);
tmio->read = data >> 8;
return data;
}
/*
*The TMIO controller converts an 8-bit NAND interface to a 16-bit
*bus interface, so all data reads and writes must be 16-bit wide.
*Thus, we implement 16-bit versions of the read, write, and verify
*buffer functions.
*/
static void
tmio_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_iowrite16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
static void tmio_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_ioread16_rep(tmio->fcr + FCR_DATA, buf, len >> 1);
}
static int
tmio_nand_verify_buf(struct mtd_info *mtd, const u_char *buf, int len)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
u16 *p = (u16 *) buf;
for (len >>= 1; len; len--)
if (*(p++) != tmio_ioread16(tmio->fcr + FCR_DATA))
return -EFAULT;
return 0;
}
static void tmio_nand_enable_hwecc(struct mtd_info *mtd, int mode)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
tmio_iowrite8(FCR_MODE_HWECC_RESET, tmio->fcr + FCR_MODE);
tmio_ioread8(tmio->fcr + FCR_DATA); /* dummy read */
tmio_iowrite8(FCR_MODE_HWECC_CALC, tmio->fcr + FCR_MODE);
}
static int tmio_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat,
u_char *ecc_code)
{
struct tmio_nand *tmio = mtd_to_tmio(mtd);
unsigned int ecc;
tmio_iowrite8(FCR_MODE_HWECC_RESULT, tmio->fcr + FCR_MODE);
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[1] = ecc; /* 000-255 LP7-0 */
ecc_code[0] = ecc >> 8; /* 000-255 LP15-8 */
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[2] = ecc; /* 000-255 CP5-0,11b */
ecc_code[4] = ecc >> 8; /* 256-511 LP7-0 */
ecc = tmio_ioread16(tmio->fcr + FCR_DATA);
ecc_code[3] = ecc; /* 256-511 LP15-8 */
ecc_code[5] = ecc >> 8; /* 256-511 CP5-0,11b */
tmio_iowrite8(FCR_MODE_DATA, tmio->fcr + FCR_MODE);
return 0;
}
static int tmio_nand_correct_data(struct mtd_info *mtd, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
int r0, r1;
/* assume ecc.size = 512 and ecc.bytes = 6 */
r0 = __nand_correct_data(buf, read_ecc, calc_ecc, 256);
if (r0 < 0)
return r0;
r1 = __nand_correct_data(buf + 256, read_ecc + 3, calc_ecc + 3, 256);
if (r1 < 0)
return r1;
return r0 + r1;
}
static int tmio_hw_init(struct platform_device *dev, struct tmio_nand *tmio)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
int ret;
if (cell->enable) {
ret = cell->enable(dev);
if (ret)
return ret;
}
/* (4Ch) CLKRUN Enable 1st spcrunc */
tmio_iowrite8(0x81, tmio->ccr + CCR_ICC);
/* (10h)BaseAddress 0x1000 spba.spba2 */
tmio_iowrite16(tmio->fcr_base, tmio->ccr + CCR_BASE);
tmio_iowrite16(tmio->fcr_base >> 16, tmio->ccr + CCR_BASE + 2);
/* (04h)Command Register I/O spcmd */
tmio_iowrite8(0x02, tmio->ccr + CCR_COMMAND);
/* (62h) Power Supply Control ssmpwc */
/* HardPowerOFF - SuspendOFF - PowerSupplyWait_4MS */
tmio_iowrite8(0x02, tmio->ccr + CCR_NFPSC);
/* (63h) Detect Control ssmdtc */
tmio_iowrite8(0x02, tmio->ccr + CCR_NFDC);
/* Interrupt status register clear sintst */
tmio_iowrite8(0x0f, tmio->fcr + FCR_ISR);
/* After power supply, Media are reset smode */
tmio_iowrite8(FCR_MODE_POWER_ON, tmio->fcr + FCR_MODE);
tmio_iowrite8(FCR_MODE_COMMAND, tmio->fcr + FCR_MODE);
tmio_iowrite8(NAND_CMD_RESET, tmio->fcr + FCR_DATA);
/* Standby Mode smode */
tmio_iowrite8(FCR_MODE_STANDBY, tmio->fcr + FCR_MODE);
mdelay(5);
return 0;
}
static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
tmio_iowrite8(FCR_MODE_POWER_OFF, tmio->fcr + FCR_MODE);
if (cell->disable)
cell->disable(dev);
}
static int tmio_probe(struct platform_device *dev)
{
struct tmio_nand_data *data = dev->dev.platform_data;
struct resource *fcr = platform_get_resource(dev,
IORESOURCE_MEM, 0);
struct resource *ccr = platform_get_resource(dev,
IORESOURCE_MEM, 1);
int irq = platform_get_irq(dev, 0);
struct tmio_nand *tmio;
struct mtd_info *mtd;
struct nand_chip *nand_chip;
int retval;
if (data == NULL)
dev_warn(&dev->dev, "NULL platform data!\n");
tmio = kzalloc(sizeof *tmio, GFP_KERNEL);
if (!tmio) {
retval = -ENOMEM;
goto err_kzalloc;
}
tmio->dev = dev;
platform_set_drvdata(dev, tmio);
mtd = &tmio->mtd;
nand_chip = &tmio->chip;
mtd->priv = nand_chip;
mtd->name = "tmio-nand";
tmio->ccr = ioremap(ccr->start, resource_size(ccr));
if (!tmio->ccr) {
retval = -EIO;
goto err_iomap_ccr;
}
tmio->fcr_base = fcr->start & 0xfffff;
tmio->fcr = ioremap(fcr->start, resource_size(fcr));
if (!tmio->fcr) {
retval = -EIO;
goto err_iomap_fcr;
}
retval = tmio_hw_init(dev, tmio);
if (retval)
goto err_hwinit;
/* Set address of NAND IO lines */
nand_chip->IO_ADDR_R = tmio->fcr;
nand_chip->IO_ADDR_W = tmio->fcr;
/* Set address of hardware control function */
nand_chip->cmd_ctrl = tmio_nand_hwcontrol;
nand_chip->dev_ready = tmio_nand_dev_ready;
nand_chip->read_byte = tmio_nand_read_byte;
nand_chip->write_buf = tmio_nand_write_buf;
nand_chip->read_buf = tmio_nand_read_buf;
nand_chip->verify_buf = tmio_nand_verify_buf;
/* set eccmode using hardware ECC */
nand_chip->ecc.mode = NAND_ECC_HW;
nand_chip->ecc.size = 512;
nand_chip->ecc.bytes = 6;
nand_chip->ecc.strength = 2;
nand_chip->ecc.hwctl = tmio_nand_enable_hwecc;
nand_chip->ecc.calculate = tmio_nand_calculate_ecc;
nand_chip->ecc.correct = tmio_nand_correct_data;
if (data)
nand_chip->badblock_pattern = data->badblock_pattern;
/* 15 us command delay time */
nand_chip->chip_delay = 15;
retval = request_irq(irq, &tmio_irq,
IRQF_DISABLED, dev_name(&dev->dev), tmio);
if (retval) {
dev_err(&dev->dev, "request_irq error %d\n", retval);
goto err_irq;
}
tmio->irq = irq;
nand_chip->waitfunc = tmio_nand_wait;
/* Scan to find existence of the device */
if (nand_scan(mtd, 1)) {
retval = -ENODEV;
goto err_scan;
}
/* Register the partitions */
retval = mtd_device_parse_register(mtd, NULL, NULL,
data ? data->partition : NULL,
data ? data->num_partitions : 0);
if (!retval)
return retval;
nand_release(mtd);
err_scan:
if (tmio->irq)
free_irq(tmio->irq, tmio);
err_irq:
tmio_hw_stop(dev, tmio);
err_hwinit:
iounmap(tmio->fcr);
err_iomap_fcr:
iounmap(tmio->ccr);
err_iomap_ccr:
kfree(tmio);
err_kzalloc:
return retval;
}
static int tmio_remove(struct platform_device *dev)
{
struct tmio_nand *tmio = platform_get_drvdata(dev);
nand_release(&tmio->mtd);
if (tmio->irq)
free_irq(tmio->irq, tmio);
tmio_hw_stop(dev, tmio);
iounmap(tmio->fcr);
iounmap(tmio->ccr);
kfree(tmio);
return 0;
}
#ifdef CONFIG_PM
static int tmio_suspend(struct platform_device *dev, pm_message_t state)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
if (cell->suspend)
cell->suspend(dev);
tmio_hw_stop(dev, platform_get_drvdata(dev));
return 0;
}
static int tmio_resume(struct platform_device *dev)
{
const struct mfd_cell *cell = mfd_get_cell(dev);
/* FIXME - is this required or merely another attack of the broken
* SHARP platform? Looks suspicious.
*/
tmio_hw_init(dev, platform_get_drvdata(dev));
if (cell->resume)
cell->resume(dev);
return 0;
}
#else
#define tmio_suspend NULL
#define tmio_resume NULL
#endif
static struct platform_driver tmio_driver = {
.driver.name = "tmio-nand",
.driver.owner = THIS_MODULE,
.probe = tmio_probe,
.remove = tmio_remove,
.suspend = tmio_suspend,
.resume = tmio_resume,
};
module_platform_driver(tmio_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Ian Molton, Dirk Opfer, Chris Humbert, Dmitry Baryshkov");
MODULE_DESCRIPTION("NAND flash driver on Toshiba Mobile IO controller");
MODULE_ALIAS("platform:tmio-nand");
| gpl-2.0 |
charles1018/Nexus_5 | drivers/char/nwflash.c | 4937 | 13964 | /*
* Flash memory interface rev.5 driver for the Intel
* Flash chips used on the NetWinder.
*
* 20/08/2000 RMK use __ioremap to map flash into virtual memory
* make a few more places use "volatile"
* 22/05/2001 RMK - Lock read against write
* - merge printk level changes (with mods) from Alan Cox.
* - use *ppos as the file position, not file->f_pos.
* - fix check for out of range pos and r/w size
*
* Please note that we are tampering with the only flash chip in the
* machine, which contains the bootup code. We therefore have the
* power to convert these machines into doorstops...
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/proc_fs.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/rwsem.h>
#include <linux/init.h>
#include <linux/mutex.h>
#include <linux/jiffies.h>
#include <asm/hardware/dec21285.h>
#include <asm/io.h>
#include <asm/leds.h>
#include <asm/mach-types.h>
#include <asm/uaccess.h>
/*****************************************************************************/
#include <asm/nwflash.h>
#define NWFLASH_VERSION "6.4"
static DEFINE_MUTEX(flash_mutex);
static void kick_open(void);
static int get_flash_id(void);
static int erase_block(int nBlock);
static int write_block(unsigned long p, const char __user *buf, int count);
#define KFLASH_SIZE 1024*1024 //1 Meg
#define KFLASH_SIZE4 4*1024*1024 //4 Meg
#define KFLASH_ID 0x89A6 //Intel flash
#define KFLASH_ID4 0xB0D4 //Intel flash 4Meg
static bool flashdebug; //if set - we will display progress msgs
static int gbWriteEnable;
static int gbWriteBase64Enable;
static volatile unsigned char *FLASH_BASE;
static int gbFlashSize = KFLASH_SIZE;
static DEFINE_MUTEX(nwflash_mutex);
static int get_flash_id(void)
{
volatile unsigned int c1, c2;
/*
* try to get flash chip ID
*/
kick_open();
c2 = inb(0x80);
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x90;
udelay(15);
c1 = *(volatile unsigned char *) FLASH_BASE;
c2 = inb(0x80);
/*
* on 4 Meg flash the second byte is actually at offset 2...
*/
if (c1 == 0xB0)
c2 = *(volatile unsigned char *) (FLASH_BASE + 2);
else
c2 = *(volatile unsigned char *) (FLASH_BASE + 1);
c2 += (c1 << 8);
/*
* set it back to read mode
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
if (c2 == KFLASH_ID4)
gbFlashSize = KFLASH_SIZE4;
return c2;
}
static long flash_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
mutex_lock(&flash_mutex);
switch (cmd) {
case CMD_WRITE_DISABLE:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
break;
case CMD_WRITE_ENABLE:
gbWriteEnable = 1;
break;
case CMD_WRITE_BASE64K_ENABLE:
gbWriteBase64Enable = 1;
break;
default:
gbWriteBase64Enable = 0;
gbWriteEnable = 0;
mutex_unlock(&flash_mutex);
return -EINVAL;
}
mutex_unlock(&flash_mutex);
return 0;
}
static ssize_t flash_read(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
ssize_t ret;
if (flashdebug)
printk(KERN_DEBUG "flash_read: flash_read: offset=0x%llx, "
"buffer=%p, count=0x%zx.\n", *ppos, buf, size);
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
ret = simple_read_from_buffer(buf, size, ppos, (void *)FLASH_BASE, gbFlashSize);
mutex_unlock(&nwflash_mutex);
return ret;
}
static ssize_t flash_write(struct file *file, const char __user *buf,
size_t size, loff_t * ppos)
{
unsigned long p = *ppos;
unsigned int count = size;
int written;
int nBlock, temp, rc;
int i, j;
if (flashdebug)
printk("flash_write: offset=0x%lX, buffer=0x%p, count=0x%X.\n",
p, buf, count);
if (!gbWriteEnable)
return -EINVAL;
if (p < 64 * 1024 && (!gbWriteBase64Enable))
return -EINVAL;
/*
* check for out of range pos or count
*/
if (p >= gbFlashSize)
return count ? -ENXIO : 0;
if (count > gbFlashSize - p)
count = gbFlashSize - p;
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
/*
* We now lock against reads and writes. --rmk
*/
if (mutex_lock_interruptible(&nwflash_mutex))
return -ERESTARTSYS;
written = 0;
leds_event(led_claim);
leds_event(led_green_on);
nBlock = (int) p >> 16; //block # of 64K bytes
/*
* # of 64K blocks to erase and write
*/
temp = ((int) (p + count) >> 16) - nBlock + 1;
/*
* write ends at exactly 64k boundary?
*/
if (((int) (p + count) & 0xFFFF) == 0)
temp -= 1;
if (flashdebug)
printk(KERN_DEBUG "flash_write: writing %d block(s) "
"starting at %d.\n", temp, nBlock);
for (; temp; temp--, nBlock++) {
if (flashdebug)
printk(KERN_DEBUG "flash_write: erasing block %d.\n", nBlock);
/*
* first we have to erase the block(s), where we will write...
*/
i = 0;
j = 0;
RetryBlock:
do {
rc = erase_block(nBlock);
i++;
} while (rc && i < 10);
if (rc) {
printk(KERN_ERR "flash_write: erase error %x\n", rc);
break;
}
if (flashdebug)
printk(KERN_DEBUG "flash_write: writing offset %lX, "
"from buf %p, bytes left %X.\n", p, buf,
count - written);
/*
* write_block will limit write to space left in this block
*/
rc = write_block(p, buf, count - written);
j++;
/*
* if somehow write verify failed? Can't happen??
*/
if (!rc) {
/*
* retry up to 10 times
*/
if (j < 10)
goto RetryBlock;
else
/*
* else quit with error...
*/
rc = -1;
}
if (rc < 0) {
printk(KERN_ERR "flash_write: write error %X\n", rc);
break;
}
p += rc;
buf += rc;
written += rc;
*ppos += rc;
if (flashdebug)
printk(KERN_DEBUG "flash_write: written 0x%X bytes OK.\n", written);
}
/*
* restore reg on exit
*/
leds_event(led_release);
mutex_unlock(&nwflash_mutex);
return written;
}
/*
* The memory devices use the full 32/64 bits of the offset, and so we cannot
* check against negative addresses: they are ok. The return value is weird,
* though, in that case (0).
*
* also note that seeking relative to the "end of file" isn't supported:
* it has no meaning, so it returns -EINVAL.
*/
static loff_t flash_llseek(struct file *file, loff_t offset, int orig)
{
loff_t ret;
mutex_lock(&flash_mutex);
if (flashdebug)
printk(KERN_DEBUG "flash_llseek: offset=0x%X, orig=0x%X.\n",
(unsigned int) offset, orig);
switch (orig) {
case 0:
if (offset < 0) {
ret = -EINVAL;
break;
}
if ((unsigned int) offset > gbFlashSize) {
ret = -EINVAL;
break;
}
file->f_pos = (unsigned int) offset;
ret = file->f_pos;
break;
case 1:
if ((file->f_pos + offset) > gbFlashSize) {
ret = -EINVAL;
break;
}
if ((file->f_pos + offset) < 0) {
ret = -EINVAL;
break;
}
file->f_pos += offset;
ret = file->f_pos;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&flash_mutex);
return ret;
}
/*
* assume that main Write routine did the parameter checking...
* so just go ahead and erase, what requested!
*/
static int erase_block(int nBlock)
{
volatile unsigned int c1;
volatile unsigned char *pWritePtr;
unsigned long timeout;
int temp, temp1;
/*
* orange LED == erase
*/
leds_event(led_amber_on);
/*
* reset footbridge to the correct offset 0 (...0..3)
*/
*CSR_ROMWRITEREG = 0;
/*
* dummy ROM read
*/
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
kick_open();
/*
* reset status if old errors
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
/*
* erase a block...
* aim at the middle of a current block...
*/
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + 0x8000 + (nBlock << 16)));
/*
* dummy read
*/
c1 = *pWritePtr;
kick_open();
/*
* erase
*/
*(volatile unsigned char *) pWritePtr = 0x20;
/*
* confirm
*/
*(volatile unsigned char *) pWritePtr = 0xD0;
/*
* wait 10 ms
*/
msleep(10);
/*
* wait while erasing in process (up to 10 sec)
*/
timeout = jiffies + 10 * HZ;
c1 = 0;
while (!(c1 & 0x80) && time_before(jiffies, timeout)) {
msleep(10);
/*
* read any address
*/
c1 = *(volatile unsigned char *) (pWritePtr);
// printk("Flash_erase: status=%X.\n",c1);
}
/*
* set flash for normal read access
*/
kick_open();
// *(volatile unsigned char*)(FLASH_BASE+0x8000) = 0xFF;
*(volatile unsigned char *) pWritePtr = 0xFF; //back to normal operation
/*
* check if erase errors were reported
*/
if (c1 & 0x20) {
printk(KERN_ERR "flash_erase: err at %p\n", pWritePtr);
/*
* reset error
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
return -2;
}
/*
* just to make sure - verify if erased OK...
*/
msleep(10);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + (nBlock << 16)));
for (temp = 0; temp < 16 * 1024; temp++, pWritePtr += 4) {
if ((temp1 = *(volatile unsigned int *) pWritePtr) != 0xFFFFFFFF) {
printk(KERN_ERR "flash_erase: verify err at %p = %X\n",
pWritePtr, temp1);
return -1;
}
}
return 0;
}
/*
* write_block will limit number of bytes written to the space in this block
*/
static int write_block(unsigned long p, const char __user *buf, int count)
{
volatile unsigned int c1;
volatile unsigned int c2;
unsigned char *pWritePtr;
unsigned int uAddress;
unsigned int offset;
unsigned long timeout;
unsigned long timeout1;
/*
* red LED == write
*/
leds_event(led_amber_off);
leds_event(led_red_on);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
/*
* check if write will end in this block....
*/
offset = p & 0xFFFF;
if (offset + count > 0x10000)
count = 0x10000 - offset;
/*
* wait up to 30 sec for this block
*/
timeout = jiffies + 30 * HZ;
for (offset = 0; offset < count; offset++, pWritePtr++) {
uAddress = (unsigned int) pWritePtr;
uAddress &= 0xFFFFFFFC;
if (__get_user(c2, buf + offset))
return -EFAULT;
WriteRetry:
/*
* dummy read
*/
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
/*
* kick open the write gate
*/
kick_open();
/*
* program footbridge to the correct offset...0..3
*/
*CSR_ROMWRITEREG = (unsigned int) pWritePtr & 3;
/*
* write cmd
*/
*(volatile unsigned char *) (uAddress) = 0x40;
/*
* data to write
*/
*(volatile unsigned char *) (uAddress) = c2;
/*
* get status
*/
*(volatile unsigned char *) (FLASH_BASE + 0x10000) = 0x70;
c1 = 0;
/*
* wait up to 1 sec for this byte
*/
timeout1 = jiffies + 1 * HZ;
/*
* while not ready...
*/
while (!(c1 & 0x80) && time_before(jiffies, timeout1))
c1 = *(volatile unsigned char *) (FLASH_BASE + 0x8000);
/*
* if timeout getting status
*/
if (time_after_eq(jiffies, timeout1)) {
kick_open();
/*
* reset err
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
goto WriteRetry;
}
/*
* switch on read access, as a default flash operation mode
*/
kick_open();
/*
* read access
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0xFF;
/*
* if hardware reports an error writing, and not timeout -
* reset the chip and retry
*/
if (c1 & 0x10) {
kick_open();
/*
* reset err
*/
*(volatile unsigned char *) (FLASH_BASE + 0x8000) = 0x50;
/*
* before timeout?
*/
if (time_before(jiffies, timeout)) {
if (flashdebug)
printk(KERN_DEBUG "write_block: Retrying write at 0x%X)n",
pWritePtr - FLASH_BASE);
/*
* no LED == waiting
*/
leds_event(led_amber_off);
/*
* wait couple ms
*/
msleep(10);
/*
* red LED == write
*/
leds_event(led_red_on);
goto WriteRetry;
} else {
printk(KERN_ERR "write_block: timeout at 0x%X\n",
pWritePtr - FLASH_BASE);
/*
* return error -2
*/
return -2;
}
}
}
/*
* green LED == read/verify
*/
leds_event(led_amber_off);
leds_event(led_green_on);
msleep(10);
pWritePtr = (unsigned char *) ((unsigned int) (FLASH_BASE + p));
for (offset = 0; offset < count; offset++) {
char c, c1;
if (__get_user(c, buf))
return -EFAULT;
buf++;
if ((c1 = *pWritePtr++) != c) {
printk(KERN_ERR "write_block: verify error at 0x%X (%02X!=%02X)\n",
pWritePtr - FLASH_BASE, c1, c);
return 0;
}
}
return count;
}
static void kick_open(void)
{
unsigned long flags;
/*
* we want to write a bit pattern XXX1 to Xilinx to enable
* the write gate, which will be open for about the next 2ms.
*/
spin_lock_irqsave(&nw_gpio_lock, flags);
nw_cpld_modify(CPLD_FLASH_WR_ENABLE, CPLD_FLASH_WR_ENABLE);
spin_unlock_irqrestore(&nw_gpio_lock, flags);
/*
* let the ISA bus to catch on...
*/
udelay(25);
}
static const struct file_operations flash_fops =
{
.owner = THIS_MODULE,
.llseek = flash_llseek,
.read = flash_read,
.write = flash_write,
.unlocked_ioctl = flash_ioctl,
};
static struct miscdevice flash_miscdev =
{
FLASH_MINOR,
"nwflash",
&flash_fops
};
static int __init nwflash_init(void)
{
int ret = -ENODEV;
if (machine_is_netwinder()) {
int id;
FLASH_BASE = ioremap(DC21285_FLASH, KFLASH_SIZE4);
if (!FLASH_BASE)
goto out;
id = get_flash_id();
if ((id != KFLASH_ID) && (id != KFLASH_ID4)) {
ret = -ENXIO;
iounmap((void *)FLASH_BASE);
printk("Flash: incorrect ID 0x%04X.\n", id);
goto out;
}
printk("Flash ROM driver v.%s, flash device ID 0x%04X, size %d Mb.\n",
NWFLASH_VERSION, id, gbFlashSize / (1024 * 1024));
ret = misc_register(&flash_miscdev);
if (ret < 0) {
iounmap((void *)FLASH_BASE);
}
}
out:
return ret;
}
static void __exit nwflash_exit(void)
{
misc_deregister(&flash_miscdev);
iounmap((void *)FLASH_BASE);
}
MODULE_LICENSE("GPL");
module_param(flashdebug, bool, 0644);
module_init(nwflash_init);
module_exit(nwflash_exit);
| gpl-2.0 |
lawnn/kernel10c | sound/pci/lola/lola_pcm.c | 8009 | 18922 | /*
* Support for Digigram Lola PCI-e boards
*
* Copyright (c) 2011 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/dma-mapping.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "lola.h"
#define LOLA_MAX_BDL_ENTRIES 8
#define LOLA_MAX_BUF_SIZE (1024*1024*1024)
#define LOLA_BDL_ENTRY_SIZE (16 * 16)
static struct lola_pcm *lola_get_pcm(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
return &chip->pcm[substream->stream];
}
static struct lola_stream *lola_get_stream(struct snd_pcm_substream *substream)
{
struct lola_pcm *pcm = lola_get_pcm(substream);
unsigned int idx = substream->number;
return &pcm->streams[idx];
}
static unsigned int lola_get_lrc(struct lola *chip)
{
return lola_readl(chip, BAR1, LRC);
}
static unsigned int lola_get_tstamp(struct lola *chip, bool quick_no_sync)
{
unsigned int tstamp = lola_get_lrc(chip) >> 8;
if (chip->granularity) {
unsigned int wait_banks = quick_no_sync ? 0 : 8;
tstamp += (wait_banks + 1) * chip->granularity - 1;
tstamp -= tstamp % chip->granularity;
}
return tstamp << 8;
}
/* clear any pending interrupt status */
static void lola_stream_clear_pending_irq(struct lola *chip,
struct lola_stream *str)
{
unsigned int val = lola_dsd_read(chip, str->dsd, STS);
val &= LOLA_DSD_STS_DESE | LOLA_DSD_STS_BCIS;
if (val)
lola_dsd_write(chip, str->dsd, STS, val);
}
static void lola_stream_start(struct lola *chip, struct lola_stream *str,
unsigned int tstamp)
{
lola_stream_clear_pending_irq(chip, str);
lola_dsd_write(chip, str->dsd, CTL,
LOLA_DSD_CTL_SRUN |
LOLA_DSD_CTL_IOCE |
LOLA_DSD_CTL_DEIE |
LOLA_DSD_CTL_VLRCV |
tstamp);
}
static void lola_stream_stop(struct lola *chip, struct lola_stream *str,
unsigned int tstamp)
{
lola_dsd_write(chip, str->dsd, CTL,
LOLA_DSD_CTL_IOCE |
LOLA_DSD_CTL_DEIE |
LOLA_DSD_CTL_VLRCV |
tstamp);
lola_stream_clear_pending_irq(chip, str);
}
static void wait_for_srst_clear(struct lola *chip, struct lola_stream *str)
{
unsigned long end_time = jiffies + msecs_to_jiffies(200);
while (time_before(jiffies, end_time)) {
unsigned int val;
val = lola_dsd_read(chip, str->dsd, CTL);
if (!(val & LOLA_DSD_CTL_SRST))
return;
msleep(1);
}
printk(KERN_WARNING SFX "SRST not clear (stream %d)\n", str->dsd);
}
static int lola_stream_wait_for_fifo(struct lola *chip,
struct lola_stream *str,
bool ready)
{
unsigned int val = ready ? LOLA_DSD_STS_FIFORDY : 0;
unsigned long end_time = jiffies + msecs_to_jiffies(200);
while (time_before(jiffies, end_time)) {
unsigned int reg = lola_dsd_read(chip, str->dsd, STS);
if ((reg & LOLA_DSD_STS_FIFORDY) == val)
return 0;
msleep(1);
}
printk(KERN_WARNING SFX "FIFO not ready (stream %d)\n", str->dsd);
return -EIO;
}
/* sync for FIFO ready/empty for all linked streams;
* clear paused flag when FIFO gets ready again
*/
static int lola_sync_wait_for_fifo(struct lola *chip,
struct snd_pcm_substream *substream,
bool ready)
{
unsigned int val = ready ? LOLA_DSD_STS_FIFORDY : 0;
unsigned long end_time = jiffies + msecs_to_jiffies(200);
struct snd_pcm_substream *s;
int pending = 0;
while (time_before(jiffies, end_time)) {
pending = 0;
snd_pcm_group_for_each_entry(s, substream) {
struct lola_stream *str;
if (s->pcm->card != substream->pcm->card)
continue;
str = lola_get_stream(s);
if (str->prepared && str->paused) {
unsigned int reg;
reg = lola_dsd_read(chip, str->dsd, STS);
if ((reg & LOLA_DSD_STS_FIFORDY) != val) {
pending = str->dsd + 1;
break;
}
if (ready)
str->paused = 0;
}
}
if (!pending)
return 0;
msleep(1);
}
printk(KERN_WARNING SFX "FIFO not ready (pending %d)\n", pending - 1);
return -EIO;
}
/* finish pause - prepare for a new resume */
static void lola_sync_pause(struct lola *chip,
struct snd_pcm_substream *substream)
{
struct snd_pcm_substream *s;
lola_sync_wait_for_fifo(chip, substream, false);
snd_pcm_group_for_each_entry(s, substream) {
struct lola_stream *str;
if (s->pcm->card != substream->pcm->card)
continue;
str = lola_get_stream(s);
if (str->paused && str->prepared)
lola_dsd_write(chip, str->dsd, CTL, LOLA_DSD_CTL_SRUN |
LOLA_DSD_CTL_IOCE | LOLA_DSD_CTL_DEIE);
}
lola_sync_wait_for_fifo(chip, substream, true);
}
static void lola_stream_reset(struct lola *chip, struct lola_stream *str)
{
if (str->prepared) {
if (str->paused)
lola_sync_pause(chip, str->substream);
str->prepared = 0;
lola_dsd_write(chip, str->dsd, CTL,
LOLA_DSD_CTL_IOCE | LOLA_DSD_CTL_DEIE);
lola_stream_wait_for_fifo(chip, str, false);
lola_stream_clear_pending_irq(chip, str);
lola_dsd_write(chip, str->dsd, CTL, LOLA_DSD_CTL_SRST);
lola_dsd_write(chip, str->dsd, LVI, 0);
lola_dsd_write(chip, str->dsd, BDPU, 0);
lola_dsd_write(chip, str->dsd, BDPL, 0);
wait_for_srst_clear(chip, str);
}
}
static struct snd_pcm_hardware lola_pcm_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE |
SNDRV_PCM_FMTBIT_S32_LE |
SNDRV_PCM_FMTBIT_FLOAT_LE),
.rates = SNDRV_PCM_RATE_8000_192000,
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 1,
.channels_max = 2,
.buffer_bytes_max = LOLA_MAX_BUF_SIZE,
.period_bytes_min = 128,
.period_bytes_max = LOLA_MAX_BUF_SIZE / 2,
.periods_min = 2,
.periods_max = LOLA_MAX_BDL_ENTRIES,
.fifo_size = 0,
};
static int lola_pcm_open(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_pcm *pcm = lola_get_pcm(substream);
struct lola_stream *str = lola_get_stream(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
mutex_lock(&chip->open_mutex);
if (str->opened) {
mutex_unlock(&chip->open_mutex);
return -EBUSY;
}
str->substream = substream;
str->master = NULL;
str->opened = 1;
runtime->hw = lola_pcm_hw;
runtime->hw.channels_max = pcm->num_streams - str->index;
if (chip->sample_rate) {
/* sample rate is locked */
runtime->hw.rate_min = chip->sample_rate;
runtime->hw.rate_max = chip->sample_rate;
} else {
runtime->hw.rate_min = chip->sample_rate_min;
runtime->hw.rate_max = chip->sample_rate_max;
}
chip->ref_count_rate++;
snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS);
/* period size = multiple of chip->granularity (8, 16 or 32 frames)*/
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
chip->granularity);
snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
chip->granularity);
mutex_unlock(&chip->open_mutex);
return 0;
}
static void lola_cleanup_slave_streams(struct lola_pcm *pcm,
struct lola_stream *str)
{
int i;
for (i = str->index + 1; i < pcm->num_streams; i++) {
struct lola_stream *s = &pcm->streams[i];
if (s->master != str)
break;
s->master = NULL;
s->opened = 0;
}
}
static int lola_pcm_close(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_stream *str = lola_get_stream(substream);
mutex_lock(&chip->open_mutex);
if (str->substream == substream) {
str->substream = NULL;
str->opened = 0;
}
if (--chip->ref_count_rate == 0) {
/* release sample rate */
chip->sample_rate = 0;
}
mutex_unlock(&chip->open_mutex);
return 0;
}
static int lola_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
struct lola_stream *str = lola_get_stream(substream);
str->bufsize = 0;
str->period_bytes = 0;
str->format_verb = 0;
return snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
}
static int lola_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_pcm *pcm = lola_get_pcm(substream);
struct lola_stream *str = lola_get_stream(substream);
mutex_lock(&chip->open_mutex);
lola_stream_reset(chip, str);
lola_cleanup_slave_streams(pcm, str);
mutex_unlock(&chip->open_mutex);
return snd_pcm_lib_free_pages(substream);
}
/*
* set up a BDL entry
*/
static int setup_bdle(struct snd_pcm_substream *substream,
struct lola_stream *str, u32 **bdlp,
int ofs, int size)
{
u32 *bdl = *bdlp;
while (size > 0) {
dma_addr_t addr;
int chunk;
if (str->frags >= LOLA_MAX_BDL_ENTRIES)
return -EINVAL;
addr = snd_pcm_sgbuf_get_addr(substream, ofs);
/* program the address field of the BDL entry */
bdl[0] = cpu_to_le32((u32)addr);
bdl[1] = cpu_to_le32(upper_32_bits(addr));
/* program the size field of the BDL entry */
chunk = snd_pcm_sgbuf_get_chunk_size(substream, ofs, size);
bdl[2] = cpu_to_le32(chunk);
/* program the IOC to enable interrupt
* only when the whole fragment is processed
*/
size -= chunk;
bdl[3] = size ? 0 : cpu_to_le32(0x01);
bdl += 4;
str->frags++;
ofs += chunk;
}
*bdlp = bdl;
return ofs;
}
/*
* set up BDL entries
*/
static int lola_setup_periods(struct lola *chip, struct lola_pcm *pcm,
struct snd_pcm_substream *substream,
struct lola_stream *str)
{
u32 *bdl;
int i, ofs, periods, period_bytes;
period_bytes = str->period_bytes;
periods = str->bufsize / period_bytes;
/* program the initial BDL entries */
bdl = (u32 *)(pcm->bdl.area + LOLA_BDL_ENTRY_SIZE * str->index);
ofs = 0;
str->frags = 0;
for (i = 0; i < periods; i++) {
ofs = setup_bdle(substream, str, &bdl, ofs, period_bytes);
if (ofs < 0)
goto error;
}
return 0;
error:
snd_printk(KERN_ERR SFX "Too many BDL entries: buffer=%d, period=%d\n",
str->bufsize, period_bytes);
return -EINVAL;
}
static unsigned int lola_get_format_verb(struct snd_pcm_substream *substream)
{
unsigned int verb;
switch (substream->runtime->format) {
case SNDRV_PCM_FORMAT_S16_LE:
verb = 0x00000000;
break;
case SNDRV_PCM_FORMAT_S24_LE:
verb = 0x00000200;
break;
case SNDRV_PCM_FORMAT_S32_LE:
verb = 0x00000300;
break;
case SNDRV_PCM_FORMAT_FLOAT_LE:
verb = 0x00001300;
break;
default:
return 0;
}
verb |= substream->runtime->channels;
return verb;
}
static int lola_set_stream_config(struct lola *chip,
struct lola_stream *str,
int channels)
{
int i, err;
unsigned int verb, val;
/* set format info for all channels
* (with only one command for the first channel)
*/
err = lola_codec_read(chip, str->nid, LOLA_VERB_SET_STREAM_FORMAT,
str->format_verb, 0, &val, NULL);
if (err < 0) {
printk(KERN_ERR SFX "Cannot set stream format 0x%x\n",
str->format_verb);
return err;
}
/* update stream - channel config */
for (i = 0; i < channels; i++) {
verb = (str->index << 6) | i;
err = lola_codec_read(chip, str[i].nid,
LOLA_VERB_SET_CHANNEL_STREAMID, 0, verb,
&val, NULL);
if (err < 0) {
printk(KERN_ERR SFX "Cannot set stream channel %d\n", i);
return err;
}
}
return 0;
}
/*
* set up the SD for streaming
*/
static int lola_setup_controller(struct lola *chip, struct lola_pcm *pcm,
struct lola_stream *str)
{
dma_addr_t bdl;
if (str->prepared)
return -EINVAL;
/* set up BDL */
bdl = pcm->bdl.addr + LOLA_BDL_ENTRY_SIZE * str->index;
lola_dsd_write(chip, str->dsd, BDPL, (u32)bdl);
lola_dsd_write(chip, str->dsd, BDPU, upper_32_bits(bdl));
/* program the stream LVI (last valid index) of the BDL */
lola_dsd_write(chip, str->dsd, LVI, str->frags - 1);
lola_stream_clear_pending_irq(chip, str);
lola_dsd_write(chip, str->dsd, CTL,
LOLA_DSD_CTL_IOCE | LOLA_DSD_CTL_DEIE | LOLA_DSD_CTL_SRUN);
str->prepared = 1;
return lola_stream_wait_for_fifo(chip, str, true);
}
static int lola_pcm_prepare(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_pcm *pcm = lola_get_pcm(substream);
struct lola_stream *str = lola_get_stream(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
unsigned int bufsize, period_bytes, format_verb;
int i, err;
mutex_lock(&chip->open_mutex);
lola_stream_reset(chip, str);
lola_cleanup_slave_streams(pcm, str);
if (str->index + runtime->channels > pcm->num_streams) {
mutex_unlock(&chip->open_mutex);
return -EINVAL;
}
for (i = 1; i < runtime->channels; i++) {
str[i].master = str;
str[i].opened = 1;
}
mutex_unlock(&chip->open_mutex);
bufsize = snd_pcm_lib_buffer_bytes(substream);
period_bytes = snd_pcm_lib_period_bytes(substream);
format_verb = lola_get_format_verb(substream);
str->bufsize = bufsize;
str->period_bytes = period_bytes;
str->format_verb = format_verb;
err = lola_setup_periods(chip, pcm, substream, str);
if (err < 0)
return err;
err = lola_set_sample_rate(chip, runtime->rate);
if (err < 0)
return err;
chip->sample_rate = runtime->rate; /* sample rate gets locked */
err = lola_set_stream_config(chip, str, runtime->channels);
if (err < 0)
return err;
err = lola_setup_controller(chip, pcm, str);
if (err < 0) {
lola_stream_reset(chip, str);
return err;
}
return 0;
}
static int lola_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_stream *str;
struct snd_pcm_substream *s;
unsigned int start;
unsigned int tstamp;
bool sync_streams;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
case SNDRV_PCM_TRIGGER_RESUME:
start = 1;
break;
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_STOP:
start = 0;
break;
default:
return -EINVAL;
}
/*
* sample correct synchronization is only needed starting several
* streams. On stop or if only one stream do as quick as possible
*/
sync_streams = (start && snd_pcm_stream_linked(substream));
tstamp = lola_get_tstamp(chip, !sync_streams);
spin_lock(&chip->reg_lock);
snd_pcm_group_for_each_entry(s, substream) {
if (s->pcm->card != substream->pcm->card)
continue;
str = lola_get_stream(s);
if (start)
lola_stream_start(chip, str, tstamp);
else
lola_stream_stop(chip, str, tstamp);
str->running = start;
str->paused = !start;
snd_pcm_trigger_done(s, substream);
}
spin_unlock(&chip->reg_lock);
return 0;
}
static snd_pcm_uframes_t lola_pcm_pointer(struct snd_pcm_substream *substream)
{
struct lola *chip = snd_pcm_substream_chip(substream);
struct lola_stream *str = lola_get_stream(substream);
unsigned int pos = lola_dsd_read(chip, str->dsd, LPIB);
if (pos >= str->bufsize)
pos = 0;
return bytes_to_frames(substream->runtime, pos);
}
void lola_pcm_update(struct lola *chip, struct lola_pcm *pcm, unsigned int bits)
{
int i;
for (i = 0; bits && i < pcm->num_streams; i++) {
if (bits & (1 << i)) {
struct lola_stream *str = &pcm->streams[i];
if (str->substream && str->running)
snd_pcm_period_elapsed(str->substream);
bits &= ~(1 << i);
}
}
}
static struct snd_pcm_ops lola_pcm_ops = {
.open = lola_pcm_open,
.close = lola_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.hw_params = lola_pcm_hw_params,
.hw_free = lola_pcm_hw_free,
.prepare = lola_pcm_prepare,
.trigger = lola_pcm_trigger,
.pointer = lola_pcm_pointer,
.page = snd_pcm_sgbuf_ops_page,
};
int __devinit lola_create_pcm(struct lola *chip)
{
struct snd_pcm *pcm;
int i, err;
for (i = 0; i < 2; i++) {
err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
PAGE_SIZE, &chip->pcm[i].bdl);
if (err < 0)
return err;
}
err = snd_pcm_new(chip->card, "Digigram Lola", 0,
chip->pcm[SNDRV_PCM_STREAM_PLAYBACK].num_streams,
chip->pcm[SNDRV_PCM_STREAM_CAPTURE].num_streams,
&pcm);
if (err < 0)
return err;
strlcpy(pcm->name, "Digigram Lola", sizeof(pcm->name));
pcm->private_data = chip;
for (i = 0; i < 2; i++) {
if (chip->pcm[i].num_streams)
snd_pcm_set_ops(pcm, i, &lola_pcm_ops);
}
/* buffer pre-allocation */
snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG,
snd_dma_pci_data(chip->pci),
1024 * 64, 32 * 1024 * 1024);
return 0;
}
void lola_free_pcm(struct lola *chip)
{
snd_dma_free_pages(&chip->pcm[0].bdl);
snd_dma_free_pages(&chip->pcm[1].bdl);
}
/*
*/
static int lola_init_stream(struct lola *chip, struct lola_stream *str,
int idx, int nid, int dir)
{
unsigned int val;
int err;
str->nid = nid;
str->index = idx;
str->dsd = idx;
if (dir == PLAY)
str->dsd += MAX_STREAM_IN_COUNT;
err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val);
if (err < 0) {
printk(KERN_ERR SFX "Can't read wcaps for 0x%x\n", nid);
return err;
}
if (dir == PLAY) {
/* test TYPE and bits 0..11 (no test bit9 : Digital = 0/1) */
if ((val & 0x00f00dff) != 0x00000010) {
printk(KERN_ERR SFX "Invalid wcaps 0x%x for 0x%x\n",
val, nid);
return -EINVAL;
}
} else {
/* test TYPE and bits 0..11 (no test bit9 : Digital = 0/1)
* (bug : ignore bit8: Conn list = 0/1)
*/
if ((val & 0x00f00cff) != 0x00100010) {
printk(KERN_ERR SFX "Invalid wcaps 0x%x for 0x%x\n",
val, nid);
return -EINVAL;
}
/* test bit9:DIGITAL and bit12:SRC_PRESENT*/
if ((val & 0x00001200) == 0x00001200)
chip->input_src_caps_mask |= (1 << idx);
}
err = lola_read_param(chip, nid, LOLA_PAR_STREAM_FORMATS, &val);
if (err < 0) {
printk(KERN_ERR SFX "Can't read FORMATS 0x%x\n", nid);
return err;
}
val &= 3;
if (val == 3)
str->can_float = true;
if (!(val & 1)) {
printk(KERN_ERR SFX "Invalid formats 0x%x for 0x%x", val, nid);
return -EINVAL;
}
return 0;
}
int __devinit lola_init_pcm(struct lola *chip, int dir, int *nidp)
{
struct lola_pcm *pcm = &chip->pcm[dir];
int i, nid, err;
nid = *nidp;
for (i = 0; i < pcm->num_streams; i++, nid++) {
err = lola_init_stream(chip, &pcm->streams[i], i, nid, dir);
if (err < 0)
return err;
}
*nidp = nid;
return 0;
}
| gpl-2.0 |
TeamWin/android_kernel_huawei_mt2l03 | drivers/message/i2o/device.c | 10569 | 15238 | /*
* Functions to handle I2O devices
*
* Copyright (C) 2004 Markus Lidel <Markus.Lidel@shadowconnect.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Fixes/additions:
* Markus Lidel <Markus.Lidel@shadowconnect.com>
* initial version.
*/
#include <linux/module.h>
#include <linux/i2o.h>
#include <linux/delay.h>
#include <linux/string.h>
#include <linux/slab.h>
#include "core.h"
/**
* i2o_device_issue_claim - claim or release a device
* @dev: I2O device to claim or release
* @cmd: claim or release command
* @type: type of claim
*
* Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent
* is set by cmd. dev is the I2O device which should be claim or
* released and the type is the claim type (see the I2O spec).
*
* Returs 0 on success or negative error code on failure.
*/
static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
u32 type)
{
struct i2o_message *msg;
msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg))
return PTR_ERR(msg);
msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
msg->u.head[1] =
cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
msg->body[0] = cpu_to_le32(type);
return i2o_msg_post_wait(dev->iop, msg, 60);
}
/**
* i2o_device_claim - claim a device for use by an OSM
* @dev: I2O device to claim
*
* Do the leg work to assign a device to a given OSM. If the claim succeeds,
* the owner is the primary. If the attempt fails a negative errno code
* is returned. On success zero is returned.
*/
int i2o_device_claim(struct i2o_device *dev)
{
int rc = 0;
mutex_lock(&dev->lock);
rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
if (!rc)
pr_debug("i2o: claim of device %d succeeded\n",
dev->lct_data.tid);
else
pr_debug("i2o: claim of device %d failed %d\n",
dev->lct_data.tid, rc);
mutex_unlock(&dev->lock);
return rc;
}
/**
* i2o_device_claim_release - release a device that the OSM is using
* @dev: device to release
*
* Drop a claim by an OSM on a given I2O device.
*
* AC - some devices seem to want to refuse an unclaim until they have
* finished internal processing. It makes sense since you don't want a
* new device to go reconfiguring the entire system until you are done.
* Thus we are prepared to wait briefly.
*
* Returns 0 on success or negative error code on failure.
*/
int i2o_device_claim_release(struct i2o_device *dev)
{
int tries;
int rc = 0;
mutex_lock(&dev->lock);
/*
* If the controller takes a nonblocking approach to
* releases we have to sleep/poll for a few times.
*/
for (tries = 0; tries < 10; tries++) {
rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE,
I2O_CLAIM_PRIMARY);
if (!rc)
break;
ssleep(1);
}
if (!rc)
pr_debug("i2o: claim release of device %d succeeded\n",
dev->lct_data.tid);
else
pr_debug("i2o: claim release of device %d failed %d\n",
dev->lct_data.tid, rc);
mutex_unlock(&dev->lock);
return rc;
}
/**
* i2o_device_release - release the memory for a I2O device
* @dev: I2O device which should be released
*
* Release the allocated memory. This function is called if refcount of
* device reaches 0 automatically.
*/
static void i2o_device_release(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
pr_debug("i2o: device %s released\n", dev_name(dev));
kfree(i2o_dev);
}
/**
* i2o_device_show_class_id - Displays class id of I2O device
* @dev: device of which the class id should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the class id should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
static ssize_t i2o_device_show_class_id(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id);
return strlen(buf) + 1;
}
/**
* i2o_device_show_tid - Displays TID of I2O device
* @dev: device of which the TID should be displayed
* @attr: pointer to device attribute
* @buf: buffer into which the TID should be printed
*
* Returns the number of bytes which are printed into the buffer.
*/
static ssize_t i2o_device_show_tid(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid);
return strlen(buf) + 1;
}
/* I2O device attributes */
struct device_attribute i2o_device_attrs[] = {
__ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
__ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
__ATTR_NULL
};
/**
* i2o_device_alloc - Allocate a I2O device and initialize it
*
* Allocate the memory for a I2O device and initialize locks and lists
*
* Returns the allocated I2O device or a negative error code if the device
* could not be allocated.
*/
static struct i2o_device *i2o_device_alloc(void)
{
struct i2o_device *dev;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&dev->list);
mutex_init(&dev->lock);
dev->device.bus = &i2o_bus_type;
dev->device.release = &i2o_device_release;
return dev;
}
/**
* i2o_device_add - allocate a new I2O device and add it to the IOP
* @c: I2O controller that the device is on
* @entry: LCT entry of the I2O device
*
* Allocate a new I2O device and initialize it with the LCT entry. The
* device is appended to the device list of the controller.
*
* Returns zero on success, or a -ve errno.
*/
static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
{
struct i2o_device *i2o_dev, *tmp;
int rc;
i2o_dev = i2o_device_alloc();
if (IS_ERR(i2o_dev)) {
printk(KERN_ERR "i2o: unable to allocate i2o device\n");
return PTR_ERR(i2o_dev);
}
i2o_dev->lct_data = *entry;
dev_set_name(&i2o_dev->device, "%d:%03x", c->unit,
i2o_dev->lct_data.tid);
i2o_dev->iop = c;
i2o_dev->device.parent = &c->device;
rc = device_register(&i2o_dev->device);
if (rc)
goto err;
list_add_tail(&i2o_dev->list, &c->devices);
/* create user entries for this device */
tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
if (tmp && (tmp != i2o_dev)) {
rc = sysfs_create_link(&i2o_dev->device.kobj,
&tmp->device.kobj, "user");
if (rc)
goto unreg_dev;
}
/* create user entries referring to this device */
list_for_each_entry(tmp, &c->devices, list)
if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
&& (tmp != i2o_dev)) {
rc = sysfs_create_link(&tmp->device.kobj,
&i2o_dev->device.kobj, "user");
if (rc)
goto rmlink1;
}
/* create parent entries for this device */
tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
if (tmp && (tmp != i2o_dev)) {
rc = sysfs_create_link(&i2o_dev->device.kobj,
&tmp->device.kobj, "parent");
if (rc)
goto rmlink1;
}
/* create parent entries referring to this device */
list_for_each_entry(tmp, &c->devices, list)
if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
&& (tmp != i2o_dev)) {
rc = sysfs_create_link(&tmp->device.kobj,
&i2o_dev->device.kobj, "parent");
if (rc)
goto rmlink2;
}
i2o_driver_notify_device_add_all(i2o_dev);
pr_debug("i2o: device %s added\n", dev_name(&i2o_dev->device));
return 0;
rmlink2:
/* If link creating failed halfway, we loop whole list to cleanup.
* And we don't care wrong removing of link, because sysfs_remove_link
* will take care of it.
*/
list_for_each_entry(tmp, &c->devices, list) {
if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
sysfs_remove_link(&tmp->device.kobj, "parent");
}
sysfs_remove_link(&i2o_dev->device.kobj, "parent");
rmlink1:
list_for_each_entry(tmp, &c->devices, list)
if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
sysfs_remove_link(&tmp->device.kobj, "user");
sysfs_remove_link(&i2o_dev->device.kobj, "user");
unreg_dev:
list_del(&i2o_dev->list);
device_unregister(&i2o_dev->device);
err:
kfree(i2o_dev);
return rc;
}
/**
* i2o_device_remove - remove an I2O device from the I2O core
* @i2o_dev: I2O device which should be released
*
* Is used on I2O controller removal or LCT modification, when the device
* is removed from the system. Note that the device could still hang
* around until the refcount reaches 0.
*/
void i2o_device_remove(struct i2o_device *i2o_dev)
{
struct i2o_device *tmp;
struct i2o_controller *c = i2o_dev->iop;
i2o_driver_notify_device_remove_all(i2o_dev);
sysfs_remove_link(&i2o_dev->device.kobj, "parent");
sysfs_remove_link(&i2o_dev->device.kobj, "user");
list_for_each_entry(tmp, &c->devices, list) {
if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
sysfs_remove_link(&tmp->device.kobj, "parent");
if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
sysfs_remove_link(&tmp->device.kobj, "user");
}
list_del(&i2o_dev->list);
device_unregister(&i2o_dev->device);
}
/**
* i2o_device_parse_lct - Parse a previously fetched LCT and create devices
* @c: I2O controller from which the LCT should be parsed.
*
* The Logical Configuration Table tells us what we can talk to on the
* board. For every entry we create an I2O device, which is registered in
* the I2O core.
*
* Returns 0 on success or negative error code on failure.
*/
int i2o_device_parse_lct(struct i2o_controller *c)
{
struct i2o_device *dev, *tmp;
i2o_lct *lct;
u32 *dlct = c->dlct.virt;
int max = 0, i = 0;
u16 table_size;
u32 buf;
mutex_lock(&c->lct_lock);
kfree(c->lct);
buf = le32_to_cpu(*dlct++);
table_size = buf & 0xffff;
lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
if (!lct) {
mutex_unlock(&c->lct_lock);
return -ENOMEM;
}
lct->lct_ver = buf >> 28;
lct->boot_tid = buf >> 16 & 0xfff;
lct->table_size = table_size;
lct->change_ind = le32_to_cpu(*dlct++);
lct->iop_flags = le32_to_cpu(*dlct++);
table_size -= 3;
pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
lct->table_size);
while (table_size > 0) {
i2o_lct_entry *entry = &lct->lct_entry[max];
int found = 0;
buf = le32_to_cpu(*dlct++);
entry->entry_size = buf & 0xffff;
entry->tid = buf >> 16 & 0xfff;
entry->change_ind = le32_to_cpu(*dlct++);
entry->device_flags = le32_to_cpu(*dlct++);
buf = le32_to_cpu(*dlct++);
entry->class_id = buf & 0xfff;
entry->version = buf >> 12 & 0xf;
entry->vendor_id = buf >> 16;
entry->sub_class = le32_to_cpu(*dlct++);
buf = le32_to_cpu(*dlct++);
entry->user_tid = buf & 0xfff;
entry->parent_tid = buf >> 12 & 0xfff;
entry->bios_info = buf >> 24;
memcpy(&entry->identity_tag, dlct, 8);
dlct += 2;
entry->event_capabilities = le32_to_cpu(*dlct++);
/* add new devices, which are new in the LCT */
list_for_each_entry_safe(dev, tmp, &c->devices, list) {
if (entry->tid == dev->lct_data.tid) {
found = 1;
break;
}
}
if (!found)
i2o_device_add(c, entry);
table_size -= 9;
max++;
}
/* remove devices, which are not in the LCT anymore */
list_for_each_entry_safe(dev, tmp, &c->devices, list) {
int found = 0;
for (i = 0; i < max; i++) {
if (lct->lct_entry[i].tid == dev->lct_data.tid) {
found = 1;
break;
}
}
if (!found)
i2o_device_remove(dev);
}
mutex_unlock(&c->lct_lock);
return 0;
}
/*
* Run time support routines
*/
/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
*
* This function can be used for all UtilParamsGet/Set operations.
* The OperationList is given in oplist-buffer,
* and results are returned in reslist-buffer.
* Note that the minimum sized reslist is 8 bytes and contains
* ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
*/
int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
int oplen, void *reslist, int reslen)
{
struct i2o_message *msg;
int i = 0;
int rc;
struct i2o_dma res;
struct i2o_controller *c = i2o_dev->iop;
struct device *dev = &c->pdev->dev;
res.virt = NULL;
if (i2o_dma_alloc(dev, &res, reslen))
return -ENOMEM;
msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
if (IS_ERR(msg)) {
i2o_dma_free(dev, &res);
return PTR_ERR(msg);
}
i = 0;
msg->u.head[1] =
cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
msg->body[i++] = cpu_to_le32(0x00000000);
msg->body[i++] = cpu_to_le32(0x4C000000 | oplen); /* OperationList */
memcpy(&msg->body[i], oplist, oplen);
i += (oplen / 4 + (oplen % 4 ? 1 : 0));
msg->body[i++] = cpu_to_le32(0xD0000000 | res.len); /* ResultList */
msg->body[i++] = cpu_to_le32(res.phys);
msg->u.head[0] =
cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
SGL_OFFSET_5);
rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
/* This only looks like a memory leak - don't "fix" it. */
if (rc == -ETIMEDOUT)
return rc;
memcpy(reslist, res.virt, res.len);
i2o_dma_free(dev, &res);
return rc;
}
/*
* Query one field group value or a whole scalar group.
*/
int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
void *buf, int buflen)
{
u32 opblk[] = { cpu_to_le32(0x00000001),
cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
cpu_to_le32((s16) field << 16 | 0x00000001)
};
u8 *resblk; /* 8 bytes for header */
int rc;
resblk = kmalloc(buflen + 8, GFP_KERNEL);
if (!resblk)
return -ENOMEM;
rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
sizeof(opblk), resblk, buflen + 8);
memcpy(buf, resblk + 8, buflen); /* cut off header */
kfree(resblk);
return rc;
}
/*
* if oper == I2O_PARAMS_TABLE_GET, get from all rows
* if fieldcount == -1 return all fields
* ibuf and ibuflen are unused (use NULL, 0)
* else return specific fields
* ibuf contains fieldindexes
*
* if oper == I2O_PARAMS_LIST_GET, get from specific rows
* if fieldcount == -1 return all fields
* ibuf contains rowcount, keyvalues
* else return specific fields
* fieldcount is # of fieldindexes
* ibuf contains fieldindexes, rowcount, keyvalues
*
* You could also use directly function i2o_issue_params().
*/
int i2o_parm_table_get(struct i2o_device *dev, int oper, int group,
int fieldcount, void *ibuf, int ibuflen, void *resblk,
int reslen)
{
u16 *opblk;
int size;
size = 10 + ibuflen;
if (size % 4)
size += 4 - size % 4;
opblk = kmalloc(size, GFP_KERNEL);
if (opblk == NULL) {
printk(KERN_ERR "i2o: no memory for query buffer.\n");
return -ENOMEM;
}
opblk[0] = 1; /* operation count */
opblk[1] = 0; /* pad */
opblk[2] = oper;
opblk[3] = group;
opblk[4] = fieldcount;
memcpy(opblk + 5, ibuf, ibuflen); /* other params */
size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
size, resblk, reslen);
kfree(opblk);
if (size > reslen)
return reslen;
return size;
}
EXPORT_SYMBOL(i2o_device_claim);
EXPORT_SYMBOL(i2o_device_claim_release);
EXPORT_SYMBOL(i2o_parm_field_get);
EXPORT_SYMBOL(i2o_parm_table_get);
EXPORT_SYMBOL(i2o_parm_issue);
| gpl-2.0 |
XileForce/Vindicator | fs/nfs/symlink.c | 12617 | 1661 | /*
* linux/fs/nfs/symlink.c
*
* Copyright (C) 1992 Rick Sladkey
*
* Optimization changes Copyright (C) 1994 Florian La Roche
*
* Jun 7 1999, cache symlink lookups in the page cache. -DaveM
*
* nfs symlink handling code
*/
#include <linux/time.h>
#include <linux/errno.h>
#include <linux/sunrpc/clnt.h>
#include <linux/nfs.h>
#include <linux/nfs2.h>
#include <linux/nfs_fs.h>
#include <linux/pagemap.h>
#include <linux/stat.h>
#include <linux/mm.h>
#include <linux/string.h>
#include <linux/namei.h>
/* Symlink caching in the page cache is even more simplistic
* and straight-forward than readdir caching.
*/
static int nfs_symlink_filler(struct inode *inode, struct page *page)
{
int error;
error = NFS_PROTO(inode)->readlink(inode, page, 0, PAGE_SIZE);
if (error < 0)
goto error;
SetPageUptodate(page);
unlock_page(page);
return 0;
error:
SetPageError(page);
unlock_page(page);
return -EIO;
}
static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
{
struct inode *inode = dentry->d_inode;
struct page *page;
void *err;
err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
if (err)
goto read_failed;
page = read_cache_page(&inode->i_data, 0,
(filler_t *)nfs_symlink_filler, inode);
if (IS_ERR(page)) {
err = page;
goto read_failed;
}
nd_set_link(nd, kmap(page));
return page;
read_failed:
nd_set_link(nd, err);
return NULL;
}
/*
* symlinks can't do much...
*/
const struct inode_operations nfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = nfs_follow_link,
.put_link = page_put_link,
.getattr = nfs_getattr,
.setattr = nfs_setattr,
};
| gpl-2.0 |
aloksinha2001/rk3066-kernel | drivers/media/video/cx18/cx18-scb.c | 13897 | 5794 | /*
* cx18 System Control Block initialization
*
* Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl>
* Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
* 02111-1307 USA
*/
#include "cx18-driver.h"
#include "cx18-io.h"
#include "cx18-scb.h"
void cx18_init_scb(struct cx18 *cx)
{
cx18_setup_page(cx, SCB_OFFSET);
cx18_memset_io(cx, cx->scb, 0, 0x10000);
cx18_writel(cx, IRQ_APU_TO_CPU, &cx->scb->apu2cpu_irq);
cx18_writel(cx, IRQ_CPU_TO_APU_ACK, &cx->scb->cpu2apu_irq_ack);
cx18_writel(cx, IRQ_HPU_TO_CPU, &cx->scb->hpu2cpu_irq);
cx18_writel(cx, IRQ_CPU_TO_HPU_ACK, &cx->scb->cpu2hpu_irq_ack);
cx18_writel(cx, IRQ_PPU_TO_CPU, &cx->scb->ppu2cpu_irq);
cx18_writel(cx, IRQ_CPU_TO_PPU_ACK, &cx->scb->cpu2ppu_irq_ack);
cx18_writel(cx, IRQ_EPU_TO_CPU, &cx->scb->epu2cpu_irq);
cx18_writel(cx, IRQ_CPU_TO_EPU_ACK, &cx->scb->cpu2epu_irq_ack);
cx18_writel(cx, IRQ_CPU_TO_APU, &cx->scb->cpu2apu_irq);
cx18_writel(cx, IRQ_APU_TO_CPU_ACK, &cx->scb->apu2cpu_irq_ack);
cx18_writel(cx, IRQ_HPU_TO_APU, &cx->scb->hpu2apu_irq);
cx18_writel(cx, IRQ_APU_TO_HPU_ACK, &cx->scb->apu2hpu_irq_ack);
cx18_writel(cx, IRQ_PPU_TO_APU, &cx->scb->ppu2apu_irq);
cx18_writel(cx, IRQ_APU_TO_PPU_ACK, &cx->scb->apu2ppu_irq_ack);
cx18_writel(cx, IRQ_EPU_TO_APU, &cx->scb->epu2apu_irq);
cx18_writel(cx, IRQ_APU_TO_EPU_ACK, &cx->scb->apu2epu_irq_ack);
cx18_writel(cx, IRQ_CPU_TO_HPU, &cx->scb->cpu2hpu_irq);
cx18_writel(cx, IRQ_HPU_TO_CPU_ACK, &cx->scb->hpu2cpu_irq_ack);
cx18_writel(cx, IRQ_APU_TO_HPU, &cx->scb->apu2hpu_irq);
cx18_writel(cx, IRQ_HPU_TO_APU_ACK, &cx->scb->hpu2apu_irq_ack);
cx18_writel(cx, IRQ_PPU_TO_HPU, &cx->scb->ppu2hpu_irq);
cx18_writel(cx, IRQ_HPU_TO_PPU_ACK, &cx->scb->hpu2ppu_irq_ack);
cx18_writel(cx, IRQ_EPU_TO_HPU, &cx->scb->epu2hpu_irq);
cx18_writel(cx, IRQ_HPU_TO_EPU_ACK, &cx->scb->hpu2epu_irq_ack);
cx18_writel(cx, IRQ_CPU_TO_PPU, &cx->scb->cpu2ppu_irq);
cx18_writel(cx, IRQ_PPU_TO_CPU_ACK, &cx->scb->ppu2cpu_irq_ack);
cx18_writel(cx, IRQ_APU_TO_PPU, &cx->scb->apu2ppu_irq);
cx18_writel(cx, IRQ_PPU_TO_APU_ACK, &cx->scb->ppu2apu_irq_ack);
cx18_writel(cx, IRQ_HPU_TO_PPU, &cx->scb->hpu2ppu_irq);
cx18_writel(cx, IRQ_PPU_TO_HPU_ACK, &cx->scb->ppu2hpu_irq_ack);
cx18_writel(cx, IRQ_EPU_TO_PPU, &cx->scb->epu2ppu_irq);
cx18_writel(cx, IRQ_PPU_TO_EPU_ACK, &cx->scb->ppu2epu_irq_ack);
cx18_writel(cx, IRQ_CPU_TO_EPU, &cx->scb->cpu2epu_irq);
cx18_writel(cx, IRQ_EPU_TO_CPU_ACK, &cx->scb->epu2cpu_irq_ack);
cx18_writel(cx, IRQ_APU_TO_EPU, &cx->scb->apu2epu_irq);
cx18_writel(cx, IRQ_EPU_TO_APU_ACK, &cx->scb->epu2apu_irq_ack);
cx18_writel(cx, IRQ_HPU_TO_EPU, &cx->scb->hpu2epu_irq);
cx18_writel(cx, IRQ_EPU_TO_HPU_ACK, &cx->scb->epu2hpu_irq_ack);
cx18_writel(cx, IRQ_PPU_TO_EPU, &cx->scb->ppu2epu_irq);
cx18_writel(cx, IRQ_EPU_TO_PPU_ACK, &cx->scb->epu2ppu_irq_ack);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2cpu_mb),
&cx->scb->apu2cpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2cpu_mb),
&cx->scb->hpu2cpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2cpu_mb),
&cx->scb->ppu2cpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2cpu_mb),
&cx->scb->epu2cpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2apu_mb),
&cx->scb->cpu2apu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2apu_mb),
&cx->scb->hpu2apu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2apu_mb),
&cx->scb->ppu2apu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2apu_mb),
&cx->scb->epu2apu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2hpu_mb),
&cx->scb->cpu2hpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2hpu_mb),
&cx->scb->apu2hpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2hpu_mb),
&cx->scb->ppu2hpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2hpu_mb),
&cx->scb->epu2hpu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2ppu_mb),
&cx->scb->cpu2ppu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2ppu_mb),
&cx->scb->apu2ppu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2ppu_mb),
&cx->scb->hpu2ppu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, epu2ppu_mb),
&cx->scb->epu2ppu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu2epu_mb),
&cx->scb->cpu2epu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, apu2epu_mb),
&cx->scb->apu2epu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, hpu2epu_mb),
&cx->scb->hpu2epu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, ppu2epu_mb),
&cx->scb->ppu2epu_mb_offset);
cx18_writel(cx, SCB_OFFSET + offsetof(struct cx18_scb, cpu_state),
&cx->scb->ipc_offset);
cx18_writel(cx, 1, &cx->scb->epu_state);
}
| gpl-2.0 |
OpenLD/linux-wetek-3.10.y | arch/ia64/oprofile/perfmon.c | 14153 | 1863 | /**
* @file perfmon.c
*
* @remark Copyright 2003 OProfile authors
* @remark Read the file COPYING
*
* @author John Levon <levon@movementarian.org>
*/
#include <linux/kernel.h>
#include <linux/oprofile.h>
#include <linux/sched.h>
#include <asm/perfmon.h>
#include <asm/ptrace.h>
#include <asm/errno.h>
static int allow_ints;
static int
perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
struct pt_regs *regs, unsigned long stamp)
{
int event = arg->pmd_eventid;
arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
/* the owner of the oprofile event buffer may have exited
* without perfmon being shutdown (e.g. SIGSEGV)
*/
if (allow_ints)
oprofile_add_sample(regs, event);
return 0;
}
static int perfmon_start(void)
{
allow_ints = 1;
return 0;
}
static void perfmon_stop(void)
{
allow_ints = 0;
}
#define OPROFILE_FMT_UUID { \
0x77, 0x7a, 0x6e, 0x61, 0x20, 0x65, 0x73, 0x69, 0x74, 0x6e, 0x72, 0x20, 0x61, 0x65, 0x0a, 0x6c }
static pfm_buffer_fmt_t oprofile_fmt = {
.fmt_name = "oprofile_format",
.fmt_uuid = OPROFILE_FMT_UUID,
.fmt_handler = perfmon_handler,
};
static char *get_cpu_type(void)
{
__u8 family = local_cpu_data->family;
switch (family) {
case 0x07:
return "ia64/itanium";
case 0x1f:
return "ia64/itanium2";
default:
return "ia64/ia64";
}
}
/* all the ops are handled via userspace for IA64 perfmon */
static int using_perfmon;
int perfmon_init(struct oprofile_operations *ops)
{
int ret = pfm_register_buffer_fmt(&oprofile_fmt);
if (ret)
return -ENODEV;
ops->cpu_type = get_cpu_type();
ops->start = perfmon_start;
ops->stop = perfmon_stop;
using_perfmon = 1;
printk(KERN_INFO "oprofile: using perfmon.\n");
return 0;
}
void perfmon_exit(void)
{
if (!using_perfmon)
return;
pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
}
| gpl-2.0 |
mpe/powerpc | drivers/hv/hv_snapshot.c | 74 | 11964 | // SPDX-License-Identifier: GPL-2.0-only
/*
* An implementation of host initiated guest snapshot.
*
* Copyright (C) 2013, Microsoft, Inc.
* Author : K. Y. Srinivasan <kys@microsoft.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/net.h>
#include <linux/nls.h>
#include <linux/connector.h>
#include <linux/workqueue.h>
#include <linux/hyperv.h>
#include <asm/hyperv-tlfs.h>
#include "hyperv_vmbus.h"
#include "hv_utils_transport.h"
#define VSS_MAJOR 5
#define VSS_MINOR 0
#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
#define VSS_VER_COUNT 1
static const int vss_versions[] = {
VSS_VERSION
};
#define FW_VER_COUNT 1
static const int fw_versions[] = {
UTIL_FW_VERSION
};
/*
* Timeout values are based on expecations from host
*/
#define VSS_FREEZE_TIMEOUT (15 * 60)
/*
* Global state maintained for transaction that is being processed. For a class
* of integration services, including the "VSS service", the specified protocol
* is a "request/response" protocol which means that there can only be single
* outstanding transaction from the host at any given point in time. We use
* this to simplify memory management in this driver - we cache and process
* only one message at a time.
*
* While the request/response protocol is guaranteed by the host, we further
* ensure this by serializing packet processing in this driver - we do not
* read additional packets from the VMBUs until the current packet is fully
* handled.
*/
static struct {
int state; /* hvutil_device_state */
int recv_len; /* number of bytes received. */
struct vmbus_channel *recv_channel; /* chn we got the request */
u64 recv_req_id; /* request ID. */
struct hv_vss_msg *msg; /* current message */
} vss_transaction;
static void vss_respond_to_host(int error);
/*
* This state maintains the version number registered by the daemon.
*/
static int dm_reg_value;
static const char vss_devname[] = "vmbus/hv_vss";
static __u8 *recv_buffer;
static struct hvutil_transport *hvt;
static void vss_timeout_func(struct work_struct *dummy);
static void vss_handle_request(struct work_struct *dummy);
static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
static DECLARE_WORK(vss_handle_request_work, vss_handle_request);
static void vss_poll_wrapper(void *channel)
{
/* Transaction is finished, reset the state here to avoid races. */
vss_transaction.state = HVUTIL_READY;
tasklet_schedule(&((struct vmbus_channel *)channel)->callback_event);
}
/*
* Callback when data is received from user mode.
*/
static void vss_timeout_func(struct work_struct *dummy)
{
/*
* Timeout waiting for userspace component to reply happened.
*/
pr_warn("VSS: timeout waiting for daemon to reply\n");
vss_respond_to_host(HV_E_FAIL);
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
static void vss_register_done(void)
{
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
pr_debug("VSS: userspace daemon registered\n");
}
static int vss_handle_handshake(struct hv_vss_msg *vss_msg)
{
u32 our_ver = VSS_OP_REGISTER1;
switch (vss_msg->vss_hdr.operation) {
case VSS_OP_REGISTER:
/* Daemon doesn't expect us to reply */
dm_reg_value = VSS_OP_REGISTER;
break;
case VSS_OP_REGISTER1:
/* Daemon expects us to reply with our own version */
if (hvutil_transport_send(hvt, &our_ver, sizeof(our_ver),
vss_register_done))
return -EFAULT;
dm_reg_value = VSS_OP_REGISTER1;
break;
default:
return -EINVAL;
}
pr_info("VSS: userspace daemon ver. %d connected\n", dm_reg_value);
return 0;
}
static int vss_on_msg(void *msg, int len)
{
struct hv_vss_msg *vss_msg = (struct hv_vss_msg *)msg;
if (len != sizeof(*vss_msg)) {
pr_debug("VSS: Message size does not match length\n");
return -EINVAL;
}
if (vss_msg->vss_hdr.operation == VSS_OP_REGISTER ||
vss_msg->vss_hdr.operation == VSS_OP_REGISTER1) {
/*
* Don't process registration messages if we're in the middle
* of a transaction processing.
*/
if (vss_transaction.state > HVUTIL_READY) {
pr_debug("VSS: Got unexpected registration request\n");
return -EINVAL;
}
return vss_handle_handshake(vss_msg);
} else if (vss_transaction.state == HVUTIL_USERSPACE_REQ) {
vss_transaction.state = HVUTIL_USERSPACE_RECV;
if (vss_msg->vss_hdr.operation == VSS_OP_HOT_BACKUP)
vss_transaction.msg->vss_cf.flags =
VSS_HBU_NO_AUTO_RECOVERY;
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(vss_msg->error);
/* Transaction is finished, reset the state. */
hv_poll_channel(vss_transaction.recv_channel,
vss_poll_wrapper);
}
} else {
/* This is a spurious call! */
pr_debug("VSS: Transaction not active\n");
return -EINVAL;
}
return 0;
}
static void vss_send_op(void)
{
int op = vss_transaction.msg->vss_hdr.operation;
int rc;
struct hv_vss_msg *vss_msg;
/* The transaction state is wrong. */
if (vss_transaction.state != HVUTIL_HOSTMSG_RECEIVED) {
pr_debug("VSS: Unexpected attempt to send to daemon\n");
return;
}
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
return;
vss_msg->vss_hdr.operation = op;
vss_transaction.state = HVUTIL_USERSPACE_REQ;
schedule_delayed_work(&vss_timeout_work, op == VSS_OP_FREEZE ?
VSS_FREEZE_TIMEOUT * HZ : HV_UTIL_TIMEOUT * HZ);
rc = hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
if (rc) {
pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
if (cancel_delayed_work_sync(&vss_timeout_work)) {
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_READY;
}
}
kfree(vss_msg);
}
static void vss_handle_request(struct work_struct *dummy)
{
switch (vss_transaction.msg->vss_hdr.operation) {
/*
* Initiate a "freeze/thaw" operation in the guest.
* We respond to the host once the operation is complete.
*
* We send the message to the user space daemon and the operation is
* performed in the daemon.
*/
case VSS_OP_THAW:
case VSS_OP_FREEZE:
case VSS_OP_HOT_BACKUP:
if (vss_transaction.state < HVUTIL_READY) {
/* Userspace is not registered yet */
pr_debug("VSS: Not ready for request.\n");
vss_respond_to_host(HV_E_FAIL);
return;
}
pr_debug("VSS: Received request for op code: %d\n",
vss_transaction.msg->vss_hdr.operation);
vss_transaction.state = HVUTIL_HOSTMSG_RECEIVED;
vss_send_op();
return;
case VSS_OP_GET_DM_INFO:
vss_transaction.msg->dm_info.flags = 0;
break;
default:
break;
}
vss_respond_to_host(0);
hv_poll_channel(vss_transaction.recv_channel, vss_poll_wrapper);
}
/*
* Send a response back to the host.
*/
static void
vss_respond_to_host(int error)
{
struct icmsg_hdr *icmsghdrp;
u32 buf_len;
struct vmbus_channel *channel;
u64 req_id;
/*
* Copy the global state for completing the transaction. Note that
* only one transaction can be active at a time.
*/
buf_len = vss_transaction.recv_len;
channel = vss_transaction.recv_channel;
req_id = vss_transaction.recv_req_id;
icmsghdrp = (struct icmsg_hdr *)
&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (channel->onchannel_callback == NULL)
/*
* We have raced with util driver being unloaded;
* silently return.
*/
return;
icmsghdrp->status = error;
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
VM_PKT_DATA_INBAND, 0);
}
/*
* This callback is invoked when we get a VSS message from the host.
* The host ensures that only one VSS transaction can be active at a time.
*/
void hv_vss_onchannelcallback(void *context)
{
struct vmbus_channel *channel = context;
u32 recvlen;
u64 requestid;
struct hv_vss_msg *vss_msg;
int vss_srv_version;
struct icmsg_hdr *icmsghdrp;
if (vss_transaction.state > HVUTIL_READY)
return;
if (vmbus_recvpacket(channel, recv_buffer, HV_HYP_PAGE_SIZE * 2, &recvlen, &requestid)) {
pr_err_ratelimited("VSS request received. Could not read into recv buf\n");
return;
}
if (!recvlen)
return;
/* Ensure recvlen is big enough to read header data */
if (recvlen < ICMSG_HDR) {
pr_err_ratelimited("VSS request received. Packet length too small: %d\n",
recvlen);
return;
}
icmsghdrp = (struct icmsg_hdr *)&recv_buffer[sizeof(struct vmbuspipe_hdr)];
if (icmsghdrp->icmsgtype == ICMSGTYPE_NEGOTIATE) {
if (vmbus_prep_negotiate_resp(icmsghdrp,
recv_buffer, recvlen,
fw_versions, FW_VER_COUNT,
vss_versions, VSS_VER_COUNT,
NULL, &vss_srv_version)) {
pr_info("VSS IC version %d.%d\n",
vss_srv_version >> 16,
vss_srv_version & 0xFFFF);
}
} else if (icmsghdrp->icmsgtype == ICMSGTYPE_VSS) {
/* Ensure recvlen is big enough to contain hv_vss_msg */
if (recvlen < ICMSG_HDR + sizeof(struct hv_vss_msg)) {
pr_err_ratelimited("Invalid VSS msg. Packet length too small: %u\n",
recvlen);
return;
}
vss_msg = (struct hv_vss_msg *)&recv_buffer[ICMSG_HDR];
/*
* Stash away this global state for completing the
* transaction; note transactions are serialized.
*/
vss_transaction.recv_len = recvlen;
vss_transaction.recv_req_id = requestid;
vss_transaction.msg = (struct hv_vss_msg *)vss_msg;
schedule_work(&vss_handle_request_work);
return;
} else {
pr_err_ratelimited("VSS request received. Invalid msg type: %d\n",
icmsghdrp->icmsgtype);
return;
}
icmsghdrp->icflags = ICMSGHDRFLAG_TRANSACTION |
ICMSGHDRFLAG_RESPONSE;
vmbus_sendpacket(channel, recv_buffer, recvlen, requestid,
VM_PKT_DATA_INBAND, 0);
}
static void vss_on_reset(void)
{
if (cancel_delayed_work_sync(&vss_timeout_work))
vss_respond_to_host(HV_E_FAIL);
vss_transaction.state = HVUTIL_DEVICE_INIT;
}
int
hv_vss_init(struct hv_util_service *srv)
{
if (vmbus_proto_version < VERSION_WIN8_1) {
pr_warn("Integration service 'Backup (volume snapshot)'"
" not supported on this host version.\n");
return -ENOTSUPP;
}
recv_buffer = srv->recv_buffer;
vss_transaction.recv_channel = srv->channel;
vss_transaction.recv_channel->max_pkt_size = HV_HYP_PAGE_SIZE * 2;
/*
* When this driver loads, the user level daemon that
* processes the host requests may not yet be running.
* Defer processing channel callbacks until the daemon
* has registered.
*/
vss_transaction.state = HVUTIL_DEVICE_INIT;
hvt = hvutil_transport_init(vss_devname, CN_VSS_IDX, CN_VSS_VAL,
vss_on_msg, vss_on_reset);
if (!hvt) {
pr_warn("VSS: Failed to initialize transport\n");
return -EFAULT;
}
return 0;
}
static void hv_vss_cancel_work(void)
{
cancel_delayed_work_sync(&vss_timeout_work);
cancel_work_sync(&vss_handle_request_work);
}
int hv_vss_pre_suspend(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
struct hv_vss_msg *vss_msg;
/*
* Fake a THAW message for the user space daemon in case the daemon
* has frozen the file systems. It doesn't matter if there is already
* a message pending to be delivered to the user space since we force
* vss_transaction.state to be HVUTIL_READY, so the user space daemon's
* write() will fail with EINVAL (see vss_on_msg()), and the daemon
* will reset the device by closing and re-opening it.
*/
vss_msg = kzalloc(sizeof(*vss_msg), GFP_KERNEL);
if (!vss_msg)
return -ENOMEM;
tasklet_disable(&channel->callback_event);
vss_msg->vss_hdr.operation = VSS_OP_THAW;
/* Cancel any possible pending work. */
hv_vss_cancel_work();
/* We don't care about the return value. */
hvutil_transport_send(hvt, vss_msg, sizeof(*vss_msg), NULL);
kfree(vss_msg);
vss_transaction.state = HVUTIL_READY;
/* tasklet_enable() will be called in hv_vss_pre_resume(). */
return 0;
}
int hv_vss_pre_resume(void)
{
struct vmbus_channel *channel = vss_transaction.recv_channel;
tasklet_enable(&channel->callback_event);
return 0;
}
void hv_vss_deinit(void)
{
vss_transaction.state = HVUTIL_DEVICE_DYING;
hv_vss_cancel_work();
hvutil_transport_destroy(hvt);
}
| gpl-2.0 |
felipesanches/linux-media | drivers/power/reset/restart-poweroff.c | 74 | 1714 | /*
* Power off by restarting and let u-boot keep hold of the machine
* until the user presses a button for example.
*
* Andrew Lunn <andrew@lunn.ch>
*
* Copyright (C) 2012 Andrew Lunn
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/of_platform.h>
#include <linux/module.h>
#include <linux/reboot.h>
#include <asm/system_misc.h>
static void restart_poweroff_do_poweroff(void)
{
reboot_mode = REBOOT_HARD;
machine_restart(NULL);
}
static int restart_poweroff_probe(struct platform_device *pdev)
{
/* If a pm_power_off function has already been added, leave it alone */
if (pm_power_off != NULL) {
dev_err(&pdev->dev,
"pm_power_off function already registered");
return -EBUSY;
}
pm_power_off = &restart_poweroff_do_poweroff;
return 0;
}
static int restart_poweroff_remove(struct platform_device *pdev)
{
if (pm_power_off == &restart_poweroff_do_poweroff)
pm_power_off = NULL;
return 0;
}
static const struct of_device_id of_restart_poweroff_match[] = {
{ .compatible = "restart-poweroff", },
{},
};
static struct platform_driver restart_poweroff_driver = {
.probe = restart_poweroff_probe,
.remove = restart_poweroff_remove,
.driver = {
.name = "poweroff-restart",
.of_match_table = of_restart_poweroff_match,
},
};
module_platform_driver(restart_poweroff_driver);
MODULE_AUTHOR("Andrew Lunn <andrew@lunn.ch");
MODULE_DESCRIPTION("restart poweroff driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:poweroff-restart");
| gpl-2.0 |
alex-zhang/git | builtin/push.c | 74 | 18163 | /*
* "git push"
*/
#include "cache.h"
#include "refs.h"
#include "run-command.h"
#include "builtin.h"
#include "remote.h"
#include "transport.h"
#include "parse-options.h"
#include "submodule.h"
#include "send-pack.h"
static const char * const push_usage[] = {
N_("git push [<options>] [<repository> [<refspec>...]]"),
NULL,
};
static int thin = 1;
static int deleterefs;
static const char *receivepack;
static int verbosity;
static int progress = -1;
static struct push_cas_option cas;
static const char **refspec;
static int refspec_nr;
static int refspec_alloc;
static void add_refspec(const char *ref)
{
refspec_nr++;
ALLOC_GROW(refspec, refspec_nr, refspec_alloc);
refspec[refspec_nr-1] = ref;
}
static const char *map_refspec(const char *ref,
struct remote *remote, struct ref *local_refs)
{
struct ref *matched = NULL;
/* Does "ref" uniquely name our ref? */
if (count_refspec_match(ref, local_refs, &matched) != 1)
return ref;
if (remote->push) {
struct refspec query;
memset(&query, 0, sizeof(struct refspec));
query.src = matched->name;
if (!query_refspecs(remote->push, remote->push_refspec_nr, &query) &&
query.dst) {
struct strbuf buf = STRBUF_INIT;
strbuf_addf(&buf, "%s%s:%s",
query.force ? "+" : "",
query.src, query.dst);
return strbuf_detach(&buf, NULL);
}
}
if (push_default == PUSH_DEFAULT_UPSTREAM &&
starts_with(matched->name, "refs/heads/")) {
struct branch *branch = branch_get(matched->name + 11);
if (branch->merge_nr == 1 && branch->merge[0]->src) {
struct strbuf buf = STRBUF_INIT;
strbuf_addf(&buf, "%s:%s",
ref, branch->merge[0]->src);
return strbuf_detach(&buf, NULL);
}
}
return ref;
}
static void set_refspecs(const char **refs, int nr, const char *repo)
{
struct remote *remote = NULL;
struct ref *local_refs = NULL;
int i;
for (i = 0; i < nr; i++) {
const char *ref = refs[i];
if (!strcmp("tag", ref)) {
struct strbuf tagref = STRBUF_INIT;
if (nr <= ++i)
die(_("tag shorthand without <tag>"));
ref = refs[i];
if (deleterefs)
strbuf_addf(&tagref, ":refs/tags/%s", ref);
else
strbuf_addf(&tagref, "refs/tags/%s", ref);
ref = strbuf_detach(&tagref, NULL);
} else if (deleterefs) {
struct strbuf delref = STRBUF_INIT;
if (strchr(ref, ':'))
die(_("--delete only accepts plain target ref names"));
strbuf_addf(&delref, ":%s", ref);
ref = strbuf_detach(&delref, NULL);
} else if (!strchr(ref, ':')) {
if (!remote) {
/* lazily grab remote and local_refs */
remote = remote_get(repo);
local_refs = get_local_heads();
}
ref = map_refspec(ref, remote, local_refs);
}
add_refspec(ref);
}
}
static int push_url_of_remote(struct remote *remote, const char ***url_p)
{
if (remote->pushurl_nr) {
*url_p = remote->pushurl;
return remote->pushurl_nr;
}
*url_p = remote->url;
return remote->url_nr;
}
static NORETURN int die_push_simple(struct branch *branch, struct remote *remote) {
/*
* There's no point in using shorten_unambiguous_ref here,
* as the ambiguity would be on the remote side, not what
* we have locally. Plus, this is supposed to be the simple
* mode. If the user is doing something crazy like setting
* upstream to a non-branch, we should probably be showing
* them the big ugly fully qualified ref.
*/
const char *advice_maybe = "";
const char *short_upstream = branch->merge[0]->src;
skip_prefix(short_upstream, "refs/heads/", &short_upstream);
/*
* Don't show advice for people who explicitly set
* push.default.
*/
if (push_default == PUSH_DEFAULT_UNSPECIFIED)
advice_maybe = _("\n"
"To choose either option permanently, "
"see push.default in 'git help config'.");
die(_("The upstream branch of your current branch does not match\n"
"the name of your current branch. To push to the upstream branch\n"
"on the remote, use\n"
"\n"
" git push %s HEAD:%s\n"
"\n"
"To push to the branch of the same name on the remote, use\n"
"\n"
" git push %s %s\n"
"%s"),
remote->name, short_upstream,
remote->name, branch->name, advice_maybe);
}
static const char message_detached_head_die[] =
N_("You are not currently on a branch.\n"
"To push the history leading to the current (detached HEAD)\n"
"state now, use\n"
"\n"
" git push %s HEAD:<name-of-remote-branch>\n");
static void setup_push_upstream(struct remote *remote, struct branch *branch,
int triangular, int simple)
{
struct strbuf refspec = STRBUF_INIT;
if (!branch)
die(_(message_detached_head_die), remote->name);
if (!branch->merge_nr || !branch->merge || !branch->remote_name)
die(_("The current branch %s has no upstream branch.\n"
"To push the current branch and set the remote as upstream, use\n"
"\n"
" git push --set-upstream %s %s\n"),
branch->name,
remote->name,
branch->name);
if (branch->merge_nr != 1)
die(_("The current branch %s has multiple upstream branches, "
"refusing to push."), branch->name);
if (triangular)
die(_("You are pushing to remote '%s', which is not the upstream of\n"
"your current branch '%s', without telling me what to push\n"
"to update which remote branch."),
remote->name, branch->name);
if (simple) {
/* Additional safety */
if (strcmp(branch->refname, branch->merge[0]->src))
die_push_simple(branch, remote);
}
strbuf_addf(&refspec, "%s:%s", branch->name, branch->merge[0]->src);
add_refspec(refspec.buf);
}
static void setup_push_current(struct remote *remote, struct branch *branch)
{
if (!branch)
die(_(message_detached_head_die), remote->name);
add_refspec(branch->name);
}
static char warn_unspecified_push_default_msg[] =
N_("push.default is unset; its implicit value has changed in\n"
"Git 2.0 from 'matching' to 'simple'. To squelch this message\n"
"and maintain the traditional behavior, use:\n"
"\n"
" git config --global push.default matching\n"
"\n"
"To squelch this message and adopt the new behavior now, use:\n"
"\n"
" git config --global push.default simple\n"
"\n"
"When push.default is set to 'matching', git will push local branches\n"
"to the remote branches that already exist with the same name.\n"
"\n"
"Since Git 2.0, Git defaults to the more conservative 'simple'\n"
"behavior, which only pushes the current branch to the corresponding\n"
"remote branch that 'git pull' uses to update the current branch.\n"
"\n"
"See 'git help config' and search for 'push.default' for further information.\n"
"(the 'simple' mode was introduced in Git 1.7.11. Use the similar mode\n"
"'current' instead of 'simple' if you sometimes use older versions of Git)");
static void warn_unspecified_push_default_configuration(void)
{
static int warn_once;
if (warn_once++)
return;
warning("%s\n", _(warn_unspecified_push_default_msg));
}
static int is_workflow_triangular(struct remote *remote)
{
struct remote *fetch_remote = remote_get(NULL);
return (fetch_remote && fetch_remote != remote);
}
static void setup_default_push_refspecs(struct remote *remote)
{
struct branch *branch = branch_get(NULL);
int triangular = is_workflow_triangular(remote);
switch (push_default) {
default:
case PUSH_DEFAULT_MATCHING:
add_refspec(":");
break;
case PUSH_DEFAULT_UNSPECIFIED:
warn_unspecified_push_default_configuration();
/* fallthru */
case PUSH_DEFAULT_SIMPLE:
if (triangular)
setup_push_current(remote, branch);
else
setup_push_upstream(remote, branch, triangular, 1);
break;
case PUSH_DEFAULT_UPSTREAM:
setup_push_upstream(remote, branch, triangular, 0);
break;
case PUSH_DEFAULT_CURRENT:
setup_push_current(remote, branch);
break;
case PUSH_DEFAULT_NOTHING:
die(_("You didn't specify any refspecs to push, and "
"push.default is \"nothing\"."));
break;
}
}
static const char message_advice_pull_before_push[] =
N_("Updates were rejected because the tip of your current branch is behind\n"
"its remote counterpart. Integrate the remote changes (e.g.\n"
"'git pull ...') before pushing again.\n"
"See the 'Note about fast-forwards' in 'git push --help' for details.");
static const char message_advice_checkout_pull_push[] =
N_("Updates were rejected because a pushed branch tip is behind its remote\n"
"counterpart. Check out this branch and integrate the remote changes\n"
"(e.g. 'git pull ...') before pushing again.\n"
"See the 'Note about fast-forwards' in 'git push --help' for details.");
static const char message_advice_ref_fetch_first[] =
N_("Updates were rejected because the remote contains work that you do\n"
"not have locally. This is usually caused by another repository pushing\n"
"to the same ref. You may want to first integrate the remote changes\n"
"(e.g., 'git pull ...') before pushing again.\n"
"See the 'Note about fast-forwards' in 'git push --help' for details.");
static const char message_advice_ref_already_exists[] =
N_("Updates were rejected because the tag already exists in the remote.");
static const char message_advice_ref_needs_force[] =
N_("You cannot update a remote ref that points at a non-commit object,\n"
"or update a remote ref to make it point at a non-commit object,\n"
"without using the '--force' option.\n");
static void advise_pull_before_push(void)
{
if (!advice_push_non_ff_current || !advice_push_update_rejected)
return;
advise(_(message_advice_pull_before_push));
}
static void advise_checkout_pull_push(void)
{
if (!advice_push_non_ff_matching || !advice_push_update_rejected)
return;
advise(_(message_advice_checkout_pull_push));
}
static void advise_ref_already_exists(void)
{
if (!advice_push_already_exists || !advice_push_update_rejected)
return;
advise(_(message_advice_ref_already_exists));
}
static void advise_ref_fetch_first(void)
{
if (!advice_push_fetch_first || !advice_push_update_rejected)
return;
advise(_(message_advice_ref_fetch_first));
}
static void advise_ref_needs_force(void)
{
if (!advice_push_needs_force || !advice_push_update_rejected)
return;
advise(_(message_advice_ref_needs_force));
}
static int push_with_options(struct transport *transport, int flags)
{
int err;
unsigned int reject_reasons;
transport_set_verbosity(transport, verbosity, progress);
if (receivepack)
transport_set_option(transport,
TRANS_OPT_RECEIVEPACK, receivepack);
transport_set_option(transport, TRANS_OPT_THIN, thin ? "yes" : NULL);
if (!is_empty_cas(&cas)) {
if (!transport->smart_options)
die("underlying transport does not support --%s option",
CAS_OPT_NAME);
transport->smart_options->cas = &cas;
}
if (verbosity > 0)
fprintf(stderr, _("Pushing to %s\n"), transport->url);
err = transport_push(transport, refspec_nr, refspec, flags,
&reject_reasons);
if (err != 0)
error(_("failed to push some refs to '%s'"), transport->url);
err |= transport_disconnect(transport);
if (!err)
return 0;
if (reject_reasons & REJECT_NON_FF_HEAD) {
advise_pull_before_push();
} else if (reject_reasons & REJECT_NON_FF_OTHER) {
advise_checkout_pull_push();
} else if (reject_reasons & REJECT_ALREADY_EXISTS) {
advise_ref_already_exists();
} else if (reject_reasons & REJECT_FETCH_FIRST) {
advise_ref_fetch_first();
} else if (reject_reasons & REJECT_NEEDS_FORCE) {
advise_ref_needs_force();
}
return 1;
}
static int do_push(const char *repo, int flags)
{
int i, errs;
struct remote *remote = pushremote_get(repo);
const char **url;
int url_nr;
if (!remote) {
if (repo)
die(_("bad repository '%s'"), repo);
die(_("No configured push destination.\n"
"Either specify the URL from the command-line or configure a remote repository using\n"
"\n"
" git remote add <name> <url>\n"
"\n"
"and then push using the remote name\n"
"\n"
" git push <name>\n"));
}
if (remote->mirror)
flags |= (TRANSPORT_PUSH_MIRROR|TRANSPORT_PUSH_FORCE);
if ((flags & TRANSPORT_PUSH_ALL) && refspec) {
if (!strcmp(*refspec, "refs/tags/*"))
return error(_("--all and --tags are incompatible"));
return error(_("--all can't be combined with refspecs"));
}
if ((flags & TRANSPORT_PUSH_MIRROR) && refspec) {
if (!strcmp(*refspec, "refs/tags/*"))
return error(_("--mirror and --tags are incompatible"));
return error(_("--mirror can't be combined with refspecs"));
}
if ((flags & (TRANSPORT_PUSH_ALL|TRANSPORT_PUSH_MIRROR)) ==
(TRANSPORT_PUSH_ALL|TRANSPORT_PUSH_MIRROR)) {
return error(_("--all and --mirror are incompatible"));
}
if (!refspec && !(flags & TRANSPORT_PUSH_ALL)) {
if (remote->push_refspec_nr) {
refspec = remote->push_refspec;
refspec_nr = remote->push_refspec_nr;
} else if (!(flags & TRANSPORT_PUSH_MIRROR))
setup_default_push_refspecs(remote);
}
errs = 0;
url_nr = push_url_of_remote(remote, &url);
if (url_nr) {
for (i = 0; i < url_nr; i++) {
struct transport *transport =
transport_get(remote, url[i]);
if (push_with_options(transport, flags))
errs++;
}
} else {
struct transport *transport =
transport_get(remote, NULL);
if (push_with_options(transport, flags))
errs++;
}
return !!errs;
}
static int option_parse_recurse_submodules(const struct option *opt,
const char *arg, int unset)
{
int *flags = opt->value;
if (*flags & (TRANSPORT_RECURSE_SUBMODULES_CHECK |
TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND))
die("%s can only be used once.", opt->long_name);
if (arg) {
if (!strcmp(arg, "check"))
*flags |= TRANSPORT_RECURSE_SUBMODULES_CHECK;
else if (!strcmp(arg, "on-demand"))
*flags |= TRANSPORT_RECURSE_SUBMODULES_ON_DEMAND;
else
die("bad %s argument: %s", opt->long_name, arg);
} else
die("option %s needs an argument (check|on-demand)",
opt->long_name);
return 0;
}
static void set_push_cert_flags(int *flags, int v)
{
switch (v) {
case SEND_PACK_PUSH_CERT_NEVER:
*flags &= ~(TRANSPORT_PUSH_CERT_ALWAYS | TRANSPORT_PUSH_CERT_IF_ASKED);
break;
case SEND_PACK_PUSH_CERT_ALWAYS:
*flags |= TRANSPORT_PUSH_CERT_ALWAYS;
*flags &= ~TRANSPORT_PUSH_CERT_IF_ASKED;
break;
case SEND_PACK_PUSH_CERT_IF_ASKED:
*flags |= TRANSPORT_PUSH_CERT_IF_ASKED;
*flags &= ~TRANSPORT_PUSH_CERT_ALWAYS;
break;
}
}
static int git_push_config(const char *k, const char *v, void *cb)
{
int *flags = cb;
int status;
status = git_gpg_config(k, v, NULL);
if (status)
return status;
if (!strcmp(k, "push.followtags")) {
if (git_config_bool(k, v))
*flags |= TRANSPORT_PUSH_FOLLOW_TAGS;
else
*flags &= ~TRANSPORT_PUSH_FOLLOW_TAGS;
return 0;
} else if (!strcmp(k, "push.gpgsign")) {
const char *value;
if (!git_config_get_value("push.gpgsign", &value)) {
switch (git_config_maybe_bool("push.gpgsign", value)) {
case 0:
set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_NEVER);
break;
case 1:
set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_ALWAYS);
break;
default:
if (value && !strcasecmp(value, "if-asked"))
set_push_cert_flags(flags, SEND_PACK_PUSH_CERT_IF_ASKED);
else
return error("Invalid value for '%s'", k);
}
}
}
return git_default_config(k, v, NULL);
}
int cmd_push(int argc, const char **argv, const char *prefix)
{
int flags = 0;
int tags = 0;
int push_cert = -1;
int rc;
const char *repo = NULL; /* default repository */
struct option options[] = {
OPT__VERBOSITY(&verbosity),
OPT_STRING( 0 , "repo", &repo, N_("repository"), N_("repository")),
OPT_BIT( 0 , "all", &flags, N_("push all refs"), TRANSPORT_PUSH_ALL),
OPT_BIT( 0 , "mirror", &flags, N_("mirror all refs"),
(TRANSPORT_PUSH_MIRROR|TRANSPORT_PUSH_FORCE)),
OPT_BOOL( 0, "delete", &deleterefs, N_("delete refs")),
OPT_BOOL( 0 , "tags", &tags, N_("push tags (can't be used with --all or --mirror)")),
OPT_BIT('n' , "dry-run", &flags, N_("dry run"), TRANSPORT_PUSH_DRY_RUN),
OPT_BIT( 0, "porcelain", &flags, N_("machine-readable output"), TRANSPORT_PUSH_PORCELAIN),
OPT_BIT('f', "force", &flags, N_("force updates"), TRANSPORT_PUSH_FORCE),
{ OPTION_CALLBACK,
0, CAS_OPT_NAME, &cas, N_("refname>:<expect"),
N_("require old value of ref to be at this value"),
PARSE_OPT_OPTARG, parseopt_push_cas_option },
{ OPTION_CALLBACK, 0, "recurse-submodules", &flags, "check|on-demand",
N_("control recursive pushing of submodules"),
PARSE_OPT_OPTARG, option_parse_recurse_submodules },
OPT_BOOL( 0 , "thin", &thin, N_("use thin pack")),
OPT_STRING( 0 , "receive-pack", &receivepack, "receive-pack", N_("receive pack program")),
OPT_STRING( 0 , "exec", &receivepack, "receive-pack", N_("receive pack program")),
OPT_BIT('u', "set-upstream", &flags, N_("set upstream for git pull/status"),
TRANSPORT_PUSH_SET_UPSTREAM),
OPT_BOOL(0, "progress", &progress, N_("force progress reporting")),
OPT_BIT(0, "prune", &flags, N_("prune locally removed refs"),
TRANSPORT_PUSH_PRUNE),
OPT_BIT(0, "no-verify", &flags, N_("bypass pre-push hook"), TRANSPORT_PUSH_NO_HOOK),
OPT_BIT(0, "follow-tags", &flags, N_("push missing but relevant tags"),
TRANSPORT_PUSH_FOLLOW_TAGS),
{ OPTION_CALLBACK,
0, "signed", &push_cert, "yes|no|if-asked", N_("GPG sign the push"),
PARSE_OPT_OPTARG, option_parse_push_signed },
OPT_BIT(0, "atomic", &flags, N_("request atomic transaction on remote side"), TRANSPORT_PUSH_ATOMIC),
OPT_END()
};
packet_trace_identity("push");
git_config(git_push_config, &flags);
argc = parse_options(argc, argv, prefix, options, push_usage, 0);
set_push_cert_flags(&flags, push_cert);
if (deleterefs && (tags || (flags & (TRANSPORT_PUSH_ALL | TRANSPORT_PUSH_MIRROR))))
die(_("--delete is incompatible with --all, --mirror and --tags"));
if (deleterefs && argc < 2)
die(_("--delete doesn't make sense without any refs"));
if (tags)
add_refspec("refs/tags/*");
if (argc > 0) {
repo = argv[0];
set_refspecs(argv + 1, argc - 1, repo);
}
rc = do_push(repo, flags);
if (rc == -1)
usage_with_options(push_usage, options);
else
return rc;
}
| gpl-2.0 |
kmihelich/linux-espressobin | drivers/base/soc.c | 330 | 4011 | /*
* Copyright (C) ST-Ericsson SA 2011
*
* Author: Lee Jones <lee.jones@linaro.org> for ST-Ericsson.
* License terms: GNU General Public License (GPL), version 2
*/
#include <linux/sysfs.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/idr.h>
#include <linux/spinlock.h>
#include <linux/sys_soc.h>
#include <linux/err.h>
static DEFINE_IDA(soc_ida);
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
char *buf);
struct soc_device {
struct device dev;
struct soc_device_attribute *attr;
int soc_dev_num;
};
static struct bus_type soc_bus_type = {
.name = "soc",
};
static DEVICE_ATTR(machine, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(family, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(soc_id, S_IRUGO, soc_info_get, NULL);
static DEVICE_ATTR(revision, S_IRUGO, soc_info_get, NULL);
struct device *soc_device_to_device(struct soc_device *soc_dev)
{
return &soc_dev->dev;
}
static umode_t soc_attribute_mode(struct kobject *kobj,
struct attribute *attr,
int index)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if ((attr == &dev_attr_machine.attr)
&& (soc_dev->attr->machine != NULL))
return attr->mode;
if ((attr == &dev_attr_family.attr)
&& (soc_dev->attr->family != NULL))
return attr->mode;
if ((attr == &dev_attr_revision.attr)
&& (soc_dev->attr->revision != NULL))
return attr->mode;
if ((attr == &dev_attr_soc_id.attr)
&& (soc_dev->attr->soc_id != NULL))
return attr->mode;
/* Unknown or unfilled attribute. */
return 0;
}
static ssize_t soc_info_get(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
if (attr == &dev_attr_machine)
return sprintf(buf, "%s\n", soc_dev->attr->machine);
if (attr == &dev_attr_family)
return sprintf(buf, "%s\n", soc_dev->attr->family);
if (attr == &dev_attr_revision)
return sprintf(buf, "%s\n", soc_dev->attr->revision);
if (attr == &dev_attr_soc_id)
return sprintf(buf, "%s\n", soc_dev->attr->soc_id);
return -EINVAL;
}
static struct attribute *soc_attr[] = {
&dev_attr_machine.attr,
&dev_attr_family.attr,
&dev_attr_soc_id.attr,
&dev_attr_revision.attr,
NULL,
};
static const struct attribute_group soc_attr_group = {
.attrs = soc_attr,
.is_visible = soc_attribute_mode,
};
static const struct attribute_group *soc_attr_groups[] = {
&soc_attr_group,
NULL,
};
static void soc_release(struct device *dev)
{
struct soc_device *soc_dev = container_of(dev, struct soc_device, dev);
kfree(soc_dev);
}
struct soc_device *soc_device_register(struct soc_device_attribute *soc_dev_attr)
{
struct soc_device *soc_dev;
int ret;
soc_dev = kzalloc(sizeof(*soc_dev), GFP_KERNEL);
if (!soc_dev) {
ret = -ENOMEM;
goto out1;
}
/* Fetch a unique (reclaimable) SOC ID. */
ret = ida_simple_get(&soc_ida, 0, 0, GFP_KERNEL);
if (ret < 0)
goto out2;
soc_dev->soc_dev_num = ret;
soc_dev->attr = soc_dev_attr;
soc_dev->dev.bus = &soc_bus_type;
soc_dev->dev.groups = soc_attr_groups;
soc_dev->dev.release = soc_release;
dev_set_name(&soc_dev->dev, "soc%d", soc_dev->soc_dev_num);
ret = device_register(&soc_dev->dev);
if (ret)
goto out3;
return soc_dev;
out3:
ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
out2:
kfree(soc_dev);
out1:
return ERR_PTR(ret);
}
/* Ensure soc_dev->attr is freed prior to calling soc_device_unregister. */
void soc_device_unregister(struct soc_device *soc_dev)
{
ida_simple_remove(&soc_ida, soc_dev->soc_dev_num);
device_unregister(&soc_dev->dev);
}
static int __init soc_bus_register(void)
{
return bus_register(&soc_bus_type);
}
core_initcall(soc_bus_register);
static void __exit soc_bus_unregister(void)
{
ida_destroy(&soc_ida);
bus_unregister(&soc_bus_type);
}
module_exit(soc_bus_unregister);
| gpl-2.0 |
x13thangelx/droid2we-kernel | arch/parisc/kernel/pci-dma.c | 586 | 15993 | /*
** PARISC 1.1 Dynamic DMA mapping support.
** This implementation is for PA-RISC platforms that do not support
** I/O TLBs (aka DMA address translation hardware).
** See Documentation/PCI/PCI-DMA-mapping.txt for interface definitions.
**
** (c) Copyright 1999,2000 Hewlett-Packard Company
** (c) Copyright 2000 Grant Grundler
** (c) Copyright 2000 Philipp Rumpf <prumpf@tux.org>
** (c) Copyright 2000 John Marvin
**
** "leveraged" from 2.3.47: arch/ia64/kernel/pci-dma.c.
** (I assume it's from David Mosberger-Tang but there was no Copyright)
**
** AFAIK, all PA7100LC and PA7300LC platforms can use this code.
**
** - ggg
*/
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/scatterlist.h>
#include <asm/cacheflush.h>
#include <asm/dma.h> /* for DMA_CHUNK_SIZE */
#include <asm/io.h>
#include <asm/page.h> /* get_order */
#include <asm/pgalloc.h>
#include <asm/uaccess.h>
#include <asm/tlbflush.h> /* for purge_tlb_*() macros */
static struct proc_dir_entry * proc_gsc_root __read_mostly = NULL;
static unsigned long pcxl_used_bytes __read_mostly = 0;
static unsigned long pcxl_used_pages __read_mostly = 0;
extern unsigned long pcxl_dma_start; /* Start of pcxl dma mapping area */
static spinlock_t pcxl_res_lock;
static char *pcxl_res_map;
static int pcxl_res_hint;
static int pcxl_res_size;
#ifdef DEBUG_PCXL_RESOURCE
#define DBG_RES(x...) printk(x)
#else
#define DBG_RES(x...)
#endif
/*
** Dump a hex representation of the resource map.
*/
#ifdef DUMP_RESMAP
static
void dump_resmap(void)
{
u_long *res_ptr = (unsigned long *)pcxl_res_map;
u_long i = 0;
printk("res_map: ");
for(; i < (pcxl_res_size / sizeof(unsigned long)); ++i, ++res_ptr)
printk("%08lx ", *res_ptr);
printk("\n");
}
#else
static inline void dump_resmap(void) {;}
#endif
static int pa11_dma_supported( struct device *dev, u64 mask)
{
return 1;
}
static inline int map_pte_uncached(pte_t * pte,
unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
if (!pte_none(*pte))
printk(KERN_ERR "map_pte_uncached: page already exists\n");
set_pte(pte, __mk_pte(*paddr_ptr, PAGE_KERNEL_UNC));
purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
(*paddr_ptr) += PAGE_SIZE;
pte++;
} while (vaddr < end);
return 0;
}
static inline int map_pmd_uncached(pmd_t * pmd, unsigned long vaddr,
unsigned long size, unsigned long *paddr_ptr)
{
unsigned long end;
unsigned long orig_vaddr = vaddr;
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
pte_t * pte = pte_alloc_kernel(pmd, vaddr);
if (!pte)
return -ENOMEM;
if (map_pte_uncached(pte, orig_vaddr, end - vaddr, paddr_ptr))
return -ENOMEM;
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
return 0;
}
static inline int map_uncached_pages(unsigned long vaddr, unsigned long size,
unsigned long paddr)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
pmd_t *pmd;
pmd = pmd_alloc(NULL, dir, vaddr);
if (!pmd)
return -ENOMEM;
if (map_pmd_uncached(pmd, vaddr, end - vaddr, &paddr))
return -ENOMEM;
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
return 0;
}
static inline void unmap_uncached_pte(pmd_t * pmd, unsigned long vaddr,
unsigned long size)
{
pte_t * pte;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pmd_none(*pmd))
return;
if (pmd_bad(*pmd)) {
pmd_ERROR(*pmd);
pmd_clear(pmd);
return;
}
pte = pte_offset_map(pmd, vaddr);
vaddr &= ~PMD_MASK;
end = vaddr + size;
if (end > PMD_SIZE)
end = PMD_SIZE;
do {
unsigned long flags;
pte_t page = *pte;
pte_clear(&init_mm, vaddr, pte);
purge_tlb_start(flags);
pdtlb_kernel(orig_vaddr);
purge_tlb_end(flags);
vaddr += PAGE_SIZE;
orig_vaddr += PAGE_SIZE;
pte++;
if (pte_none(page) || pte_present(page))
continue;
printk(KERN_CRIT "Whee.. Swapped out page in kernel page table\n");
} while (vaddr < end);
}
static inline void unmap_uncached_pmd(pgd_t * dir, unsigned long vaddr,
unsigned long size)
{
pmd_t * pmd;
unsigned long end;
unsigned long orig_vaddr = vaddr;
if (pgd_none(*dir))
return;
if (pgd_bad(*dir)) {
pgd_ERROR(*dir);
pgd_clear(dir);
return;
}
pmd = pmd_offset(dir, vaddr);
vaddr &= ~PGDIR_MASK;
end = vaddr + size;
if (end > PGDIR_SIZE)
end = PGDIR_SIZE;
do {
unmap_uncached_pte(pmd, orig_vaddr, end - vaddr);
vaddr = (vaddr + PMD_SIZE) & PMD_MASK;
orig_vaddr += PMD_SIZE;
pmd++;
} while (vaddr < end);
}
static void unmap_uncached_pages(unsigned long vaddr, unsigned long size)
{
pgd_t * dir;
unsigned long end = vaddr + size;
dir = pgd_offset_k(vaddr);
do {
unmap_uncached_pmd(dir, vaddr, end - vaddr);
vaddr = vaddr + PGDIR_SIZE;
dir++;
} while (vaddr && (vaddr < end));
}
#define PCXL_SEARCH_LOOP(idx, mask, size) \
for(; res_ptr < res_end; ++res_ptr) \
{ \
if(0 == ((*res_ptr) & mask)) { \
*res_ptr |= mask; \
idx = (int)((u_long)res_ptr - (u_long)pcxl_res_map); \
pcxl_res_hint = idx + (size >> 3); \
goto resource_found; \
} \
}
#define PCXL_FIND_FREE_MAPPING(idx, mask, size) { \
u##size *res_ptr = (u##size *)&(pcxl_res_map[pcxl_res_hint & ~((size >> 3) - 1)]); \
u##size *res_end = (u##size *)&pcxl_res_map[pcxl_res_size]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
res_ptr = (u##size *)&pcxl_res_map[0]; \
PCXL_SEARCH_LOOP(idx, mask, size); \
}
unsigned long
pcxl_alloc_range(size_t size)
{
int res_idx;
u_long mask, flags;
unsigned int pages_needed = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_needed;
DBG_RES("pcxl_alloc_range() size: %d pages_needed %d pages_mask 0x%08lx\n",
size, pages_needed, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_needed <= 8) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 8);
} else if(pages_needed <= 16) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 16);
} else if(pages_needed <= 32) {
PCXL_FIND_FREE_MAPPING(res_idx, mask, 32);
} else {
panic("%s: pcxl_alloc_range() Too many pages to map.\n",
__FILE__);
}
dump_resmap();
panic("%s: pcxl_alloc_range() out of dma mapping resources\n",
__FILE__);
resource_found:
DBG_RES("pcxl_alloc_range() res_idx %d mask 0x%08lx res_hint: %d\n",
res_idx, mask, pcxl_res_hint);
pcxl_used_pages += pages_needed;
pcxl_used_bytes += ((pages_needed >> 3) ? (pages_needed >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
/*
** return the corresponding vaddr in the pcxl dma map
*/
return (pcxl_dma_start + (res_idx << (PAGE_SHIFT + 3)));
}
#define PCXL_FREE_MAPPINGS(idx, m, size) \
u##size *res_ptr = (u##size *)&(pcxl_res_map[(idx) + (((size >> 3) - 1) & (~((size >> 3) - 1)))]); \
/* BUG_ON((*res_ptr & m) != m); */ \
*res_ptr &= ~m;
/*
** clear bits in the pcxl resource map
*/
static void
pcxl_free_range(unsigned long vaddr, size_t size)
{
u_long mask, flags;
unsigned int res_idx = (vaddr - pcxl_dma_start) >> (PAGE_SHIFT + 3);
unsigned int pages_mapped = size >> PAGE_SHIFT;
mask = (u_long) -1L;
mask >>= BITS_PER_LONG - pages_mapped;
DBG_RES("pcxl_free_range() res_idx: %d size: %d pages_mapped %d mask 0x%08lx\n",
res_idx, size, pages_mapped, mask);
spin_lock_irqsave(&pcxl_res_lock, flags);
if(pages_mapped <= 8) {
PCXL_FREE_MAPPINGS(res_idx, mask, 8);
} else if(pages_mapped <= 16) {
PCXL_FREE_MAPPINGS(res_idx, mask, 16);
} else if(pages_mapped <= 32) {
PCXL_FREE_MAPPINGS(res_idx, mask, 32);
} else {
panic("%s: pcxl_free_range() Too many pages to unmap.\n",
__FILE__);
}
pcxl_used_pages -= (pages_mapped ? pages_mapped : 1);
pcxl_used_bytes -= ((pages_mapped >> 3) ? (pages_mapped >> 3) : 1);
spin_unlock_irqrestore(&pcxl_res_lock, flags);
dump_resmap();
}
static int proc_pcxl_dma_show(struct seq_file *m, void *v)
{
#if 0
u_long i = 0;
unsigned long *res_ptr = (u_long *)pcxl_res_map;
#endif
unsigned long total_pages = pcxl_res_size << 3; /* 8 bits per byte */
seq_printf(m, "\nDMA Mapping Area size : %d bytes (%ld pages)\n",
PCXL_DMA_MAP_SIZE, total_pages);
seq_printf(m, "Resource bitmap : %d bytes\n", pcxl_res_size);
seq_puts(m, " total: free: used: % used:\n");
seq_printf(m, "blocks %8d %8ld %8ld %8ld%%\n", pcxl_res_size,
pcxl_res_size - pcxl_used_bytes, pcxl_used_bytes,
(pcxl_used_bytes * 100) / pcxl_res_size);
seq_printf(m, "pages %8ld %8ld %8ld %8ld%%\n", total_pages,
total_pages - pcxl_used_pages, pcxl_used_pages,
(pcxl_used_pages * 100 / total_pages));
#if 0
seq_puts(m, "\nResource bitmap:");
for(; i < (pcxl_res_size / sizeof(u_long)); ++i, ++res_ptr) {
if ((i & 7) == 0)
seq_puts(m,"\n ");
seq_printf(m, "%s %08lx", buf, *res_ptr);
}
#endif
seq_putc(m, '\n');
return 0;
}
static int proc_pcxl_dma_open(struct inode *inode, struct file *file)
{
return single_open(file, proc_pcxl_dma_show, NULL);
}
static const struct file_operations proc_pcxl_dma_ops = {
.owner = THIS_MODULE,
.open = proc_pcxl_dma_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init
pcxl_dma_init(void)
{
if (pcxl_dma_start == 0)
return 0;
spin_lock_init(&pcxl_res_lock);
pcxl_res_size = PCXL_DMA_MAP_SIZE >> (PAGE_SHIFT + 3);
pcxl_res_hint = 0;
pcxl_res_map = (char *)__get_free_pages(GFP_KERNEL,
get_order(pcxl_res_size));
memset(pcxl_res_map, 0, pcxl_res_size);
proc_gsc_root = proc_mkdir("gsc", NULL);
if (!proc_gsc_root)
printk(KERN_WARNING
"pcxl_dma_init: Unable to create gsc /proc dir entry\n");
else {
struct proc_dir_entry* ent;
ent = proc_create("pcxl_dma", 0, proc_gsc_root,
&proc_pcxl_dma_ops);
if (!ent)
printk(KERN_WARNING
"pci-dma.c: Unable to create pcxl_dma /proc entry.\n");
}
return 0;
}
__initcall(pcxl_dma_init);
static void * pa11_dma_alloc_consistent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flag)
{
unsigned long vaddr;
unsigned long paddr;
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
vaddr = pcxl_alloc_range(size);
paddr = __get_free_pages(flag, order);
flush_kernel_dcache_range(paddr, size);
paddr = __pa(paddr);
map_uncached_pages(vaddr, size, paddr);
*dma_handle = (dma_addr_t) paddr;
#if 0
/* This probably isn't needed to support EISA cards.
** ISA cards will certainly only support 24-bit DMA addressing.
** Not clear if we can, want, or need to support ISA.
*/
if (!dev || *dev->coherent_dma_mask < 0xffffffff)
gfp |= GFP_DMA;
#endif
return (void *)vaddr;
}
static void pa11_dma_free_consistent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
{
int order;
order = get_order(size);
size = 1 << (order + PAGE_SHIFT);
unmap_uncached_pages((unsigned long)vaddr, size);
pcxl_free_range((unsigned long)vaddr, size);
free_pages((unsigned long)__va(dma_handle), order);
}
static dma_addr_t pa11_dma_map_single(struct device *dev, void *addr, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) addr, size);
return virt_to_phys(addr);
}
static void pa11_dma_unmap_single(struct device *dev, dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
/*
* For PCI_DMA_FROMDEVICE this flush is not necessary for the
* simple map/unmap case. However, it IS necessary if if
* pci_dma_sync_single_* has been called and the buffer reused.
*/
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle), size);
return;
}
static int pa11_dma_map_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
for (i = 0; i < nents; i++, sglist++ ) {
unsigned long vaddr = sg_virt_addr(sglist);
sg_dma_address(sglist) = (dma_addr_t) virt_to_phys(vaddr);
sg_dma_len(sglist) = sglist->length;
flush_kernel_dcache_range(vaddr, sglist->length);
}
return nents;
}
static void pa11_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
BUG_ON(direction == DMA_NONE);
if (direction == DMA_TO_DEVICE)
return;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
return;
}
static void pa11_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, enum dma_data_direction direction)
{
BUG_ON(direction == DMA_NONE);
flush_kernel_dcache_range((unsigned long) phys_to_virt(dma_handle) + offset, size);
}
static void pa11_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
static void pa11_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sglist, int nents, enum dma_data_direction direction)
{
int i;
/* once we do combining we'll need to use phys_to_virt(sg_dma_address(sglist)) */
for (i = 0; i < nents; i++, sglist++ )
flush_kernel_dcache_range(sg_virt_addr(sglist), sglist->length);
}
struct hppa_dma_ops pcxl_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = pa11_dma_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_consistent,
.free_consistent = pa11_dma_free_consistent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
static void *fail_alloc_consistent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
return NULL;
}
static void *pa11_dma_alloc_noncoherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag)
{
void *addr;
addr = (void *)__get_free_pages(flag, get_order(size));
if (addr)
*dma_handle = (dma_addr_t)virt_to_phys(addr);
return addr;
}
static void pa11_dma_free_noncoherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t iova)
{
free_pages((unsigned long)vaddr, get_order(size));
return;
}
struct hppa_dma_ops pcx_dma_ops = {
.dma_supported = pa11_dma_supported,
.alloc_consistent = fail_alloc_consistent,
.alloc_noncoherent = pa11_dma_alloc_noncoherent,
.free_consistent = pa11_dma_free_noncoherent,
.map_single = pa11_dma_map_single,
.unmap_single = pa11_dma_unmap_single,
.map_sg = pa11_dma_map_sg,
.unmap_sg = pa11_dma_unmap_sg,
.dma_sync_single_for_cpu = pa11_dma_sync_single_for_cpu,
.dma_sync_single_for_device = pa11_dma_sync_single_for_device,
.dma_sync_sg_for_cpu = pa11_dma_sync_sg_for_cpu,
.dma_sync_sg_for_device = pa11_dma_sync_sg_for_device,
};
| gpl-2.0 |
tinyclub/linux-loongson-community | scripts/kconfig/conf.c | 842 | 16080 | /*
* Copyright (C) 2002 Roman Zippel <zippel@linux-m68k.org>
* Released under the terms of the GNU GPL v2.0.
*/
#include <locale.h>
#include <ctype.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <unistd.h>
#include <getopt.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <errno.h>
#include "lkc.h"
static void conf(struct menu *menu);
static void check_conf(struct menu *menu);
static void xfgets(char *str, int size, FILE *in);
enum input_mode {
oldaskconfig,
silentoldconfig,
oldconfig,
allnoconfig,
allyesconfig,
allmodconfig,
alldefconfig,
randconfig,
defconfig,
savedefconfig,
listnewconfig,
olddefconfig,
} input_mode = oldaskconfig;
static int indent = 1;
static int tty_stdio;
static int valid_stdin = 1;
static int sync_kconfig;
static int conf_cnt;
static char line[128];
static struct menu *rootEntry;
static void print_help(struct menu *menu)
{
struct gstr help = str_new();
menu_get_ext_help(menu, &help);
printf("\n%s\n", str_get(&help));
str_free(&help);
}
static void strip(char *str)
{
char *p = str;
int l;
while ((isspace(*p)))
p++;
l = strlen(p);
if (p != str)
memmove(str, p, l + 1);
if (!l)
return;
p = str + l - 1;
while ((isspace(*p)))
*p-- = 0;
}
static void check_stdin(void)
{
if (!valid_stdin) {
printf(_("aborted!\n\n"));
printf(_("Console input/output is redirected. "));
printf(_("Run 'make oldconfig' to update configuration.\n\n"));
exit(1);
}
}
static int conf_askvalue(struct symbol *sym, const char *def)
{
enum symbol_type type = sym_get_type(sym);
if (!sym_has_value(sym))
printf(_("(NEW) "));
line[0] = '\n';
line[1] = 0;
if (!sym_is_changable(sym)) {
printf("%s\n", def);
line[0] = '\n';
line[1] = 0;
return 0;
}
switch (input_mode) {
case oldconfig:
case silentoldconfig:
if (sym_has_value(sym)) {
printf("%s\n", def);
return 0;
}
check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
xfgets(line, 128, stdin);
if (!tty_stdio)
printf("\n");
return 1;
default:
break;
}
switch (type) {
case S_INT:
case S_HEX:
case S_STRING:
printf("%s\n", def);
return 1;
default:
;
}
printf("%s", line);
return 1;
}
static int conf_string(struct menu *menu)
{
struct symbol *sym = menu->sym;
const char *def;
while (1) {
printf("%*s%s ", indent - 1, "", _(menu->prompt->text));
printf("(%s) ", sym->name);
def = sym_get_string_value(sym);
if (sym_get_string_value(sym))
printf("[%s] ", def);
if (!conf_askvalue(sym, def))
return 0;
switch (line[0]) {
case '\n':
break;
case '?':
/* print help */
if (line[1] == '\n') {
print_help(menu);
def = NULL;
break;
}
/* fall through */
default:
line[strlen(line)-1] = 0;
def = line;
}
if (def && sym_set_string_value(sym, def))
return 0;
}
}
static int conf_sym(struct menu *menu)
{
struct symbol *sym = menu->sym;
tristate oldval, newval;
while (1) {
printf("%*s%s ", indent - 1, "", _(menu->prompt->text));
if (sym->name)
printf("(%s) ", sym->name);
putchar('[');
oldval = sym_get_tristate_value(sym);
switch (oldval) {
case no:
putchar('N');
break;
case mod:
putchar('M');
break;
case yes:
putchar('Y');
break;
}
if (oldval != no && sym_tristate_within_range(sym, no))
printf("/n");
if (oldval != mod && sym_tristate_within_range(sym, mod))
printf("/m");
if (oldval != yes && sym_tristate_within_range(sym, yes))
printf("/y");
if (menu_has_help(menu))
printf("/?");
printf("] ");
if (!conf_askvalue(sym, sym_get_string_value(sym)))
return 0;
strip(line);
switch (line[0]) {
case 'n':
case 'N':
newval = no;
if (!line[1] || !strcmp(&line[1], "o"))
break;
continue;
case 'm':
case 'M':
newval = mod;
if (!line[1])
break;
continue;
case 'y':
case 'Y':
newval = yes;
if (!line[1] || !strcmp(&line[1], "es"))
break;
continue;
case 0:
newval = oldval;
break;
case '?':
goto help;
default:
continue;
}
if (sym_set_tristate_value(sym, newval))
return 0;
help:
print_help(menu);
}
}
static int conf_choice(struct menu *menu)
{
struct symbol *sym, *def_sym;
struct menu *child;
bool is_new;
sym = menu->sym;
is_new = !sym_has_value(sym);
if (sym_is_changable(sym)) {
conf_sym(menu);
sym_calc_value(sym);
switch (sym_get_tristate_value(sym)) {
case no:
return 1;
case mod:
return 0;
case yes:
break;
}
} else {
switch (sym_get_tristate_value(sym)) {
case no:
return 1;
case mod:
printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu)));
return 0;
case yes:
break;
}
}
while (1) {
int cnt, def;
printf("%*s%s\n", indent - 1, "", _(menu_get_prompt(menu)));
def_sym = sym_get_choice_value(sym);
cnt = def = 0;
line[0] = 0;
for (child = menu->list; child; child = child->next) {
if (!menu_is_visible(child))
continue;
if (!child->sym) {
printf("%*c %s\n", indent, '*', _(menu_get_prompt(child)));
continue;
}
cnt++;
if (child->sym == def_sym) {
def = cnt;
printf("%*c", indent, '>');
} else
printf("%*c", indent, ' ');
printf(" %d. %s", cnt, _(menu_get_prompt(child)));
if (child->sym->name)
printf(" (%s)", child->sym->name);
if (!sym_has_value(child->sym))
printf(_(" (NEW)"));
printf("\n");
}
printf(_("%*schoice"), indent - 1, "");
if (cnt == 1) {
printf("[1]: 1\n");
goto conf_childs;
}
printf("[1-%d", cnt);
if (menu_has_help(menu))
printf("?");
printf("]: ");
switch (input_mode) {
case oldconfig:
case silentoldconfig:
if (!is_new) {
cnt = def;
printf("%d\n", cnt);
break;
}
check_stdin();
/* fall through */
case oldaskconfig:
fflush(stdout);
xfgets(line, 128, stdin);
strip(line);
if (line[0] == '?') {
print_help(menu);
continue;
}
if (!line[0])
cnt = def;
else if (isdigit(line[0]))
cnt = atoi(line);
else
continue;
break;
default:
break;
}
conf_childs:
for (child = menu->list; child; child = child->next) {
if (!child->sym || !menu_is_visible(child))
continue;
if (!--cnt)
break;
}
if (!child)
continue;
if (line[0] && line[strlen(line) - 1] == '?') {
print_help(child);
continue;
}
sym_set_choice_value(sym, child->sym);
for (child = child->list; child; child = child->next) {
indent += 2;
conf(child);
indent -= 2;
}
return 1;
}
}
static void conf(struct menu *menu)
{
struct symbol *sym;
struct property *prop;
struct menu *child;
if (!menu_is_visible(menu))
return;
sym = menu->sym;
prop = menu->prompt;
if (prop) {
const char *prompt;
switch (prop->type) {
case P_MENU:
if ((input_mode == silentoldconfig ||
input_mode == listnewconfig ||
input_mode == olddefconfig) &&
rootEntry != menu) {
check_conf(menu);
return;
}
/* fall through */
case P_COMMENT:
prompt = menu_get_prompt(menu);
if (prompt)
printf("%*c\n%*c %s\n%*c\n",
indent, '*',
indent, '*', _(prompt),
indent, '*');
default:
;
}
}
if (!sym)
goto conf_childs;
if (sym_is_choice(sym)) {
conf_choice(menu);
if (sym->curr.tri != mod)
return;
goto conf_childs;
}
switch (sym->type) {
case S_INT:
case S_HEX:
case S_STRING:
conf_string(menu);
break;
default:
conf_sym(menu);
break;
}
conf_childs:
if (sym)
indent += 2;
for (child = menu->list; child; child = child->next)
conf(child);
if (sym)
indent -= 2;
}
static void check_conf(struct menu *menu)
{
struct symbol *sym;
struct menu *child;
if (!menu_is_visible(menu))
return;
sym = menu->sym;
if (sym && !sym_has_value(sym)) {
if (sym_is_changable(sym) ||
(sym_is_choice(sym) && sym_get_tristate_value(sym) == yes)) {
if (input_mode == listnewconfig) {
if (sym->name && !sym_is_choice_value(sym)) {
printf("%s%s\n", CONFIG_, sym->name);
}
} else if (input_mode != olddefconfig) {
if (!conf_cnt++)
printf(_("*\n* Restart config...\n*\n"));
rootEntry = menu_get_parent_menu(menu);
conf(rootEntry);
}
}
}
for (child = menu->list; child; child = child->next)
check_conf(child);
}
static struct option long_opts[] = {
{"oldaskconfig", no_argument, NULL, oldaskconfig},
{"oldconfig", no_argument, NULL, oldconfig},
{"silentoldconfig", no_argument, NULL, silentoldconfig},
{"defconfig", optional_argument, NULL, defconfig},
{"savedefconfig", required_argument, NULL, savedefconfig},
{"allnoconfig", no_argument, NULL, allnoconfig},
{"allyesconfig", no_argument, NULL, allyesconfig},
{"allmodconfig", no_argument, NULL, allmodconfig},
{"alldefconfig", no_argument, NULL, alldefconfig},
{"randconfig", no_argument, NULL, randconfig},
{"listnewconfig", no_argument, NULL, listnewconfig},
{"olddefconfig", no_argument, NULL, olddefconfig},
/*
* oldnoconfig is an alias of olddefconfig, because people already
* are dependent on its behavior(sets new symbols to their default
* value but not 'n') with the counter-intuitive name.
*/
{"oldnoconfig", no_argument, NULL, olddefconfig},
{NULL, 0, NULL, 0}
};
static void conf_usage(const char *progname)
{
printf("Usage: %s [-s] [option] <kconfig-file>\n", progname);
printf("[option] is _one_ of the following:\n");
printf(" --listnewconfig List new options\n");
printf(" --oldaskconfig Start a new configuration using a line-oriented program\n");
printf(" --oldconfig Update a configuration using a provided .config as base\n");
printf(" --silentoldconfig Same as oldconfig, but quietly, additionally update deps\n");
printf(" --olddefconfig Same as silentoldconfig but sets new symbols to their default value\n");
printf(" --oldnoconfig An alias of olddefconfig\n");
printf(" --defconfig <file> New config with default defined in <file>\n");
printf(" --savedefconfig <file> Save the minimal current configuration to <file>\n");
printf(" --allnoconfig New config where all options are answered with no\n");
printf(" --allyesconfig New config where all options are answered with yes\n");
printf(" --allmodconfig New config where all options are answered with mod\n");
printf(" --alldefconfig New config with all symbols set to default\n");
printf(" --randconfig New config with random answer to all options\n");
}
int main(int ac, char **av)
{
const char *progname = av[0];
int opt;
const char *name, *defconfig_file = NULL /* gcc uninit */;
struct stat tmpstat;
setlocale(LC_ALL, "");
bindtextdomain(PACKAGE, LOCALEDIR);
textdomain(PACKAGE);
tty_stdio = isatty(0) && isatty(1) && isatty(2);
while ((opt = getopt_long(ac, av, "s", long_opts, NULL)) != -1) {
if (opt == 's') {
conf_set_message_callback(NULL);
continue;
}
input_mode = (enum input_mode)opt;
switch (opt) {
case silentoldconfig:
sync_kconfig = 1;
break;
case defconfig:
case savedefconfig:
defconfig_file = optarg;
break;
case randconfig:
{
struct timeval now;
unsigned int seed;
char *seed_env;
/*
* Use microseconds derived seed,
* compensate for systems where it may be zero
*/
gettimeofday(&now, NULL);
seed = (unsigned int)((now.tv_sec + 1) * (now.tv_usec + 1));
seed_env = getenv("KCONFIG_SEED");
if( seed_env && *seed_env ) {
char *endp;
int tmp = (int)strtol(seed_env, &endp, 0);
if (*endp == '\0') {
seed = tmp;
}
}
fprintf( stderr, "KCONFIG_SEED=0x%X\n", seed );
srand(seed);
break;
}
case oldaskconfig:
case oldconfig:
case allnoconfig:
case allyesconfig:
case allmodconfig:
case alldefconfig:
case listnewconfig:
case olddefconfig:
break;
case '?':
conf_usage(progname);
exit(1);
break;
}
}
if (ac == optind) {
printf(_("%s: Kconfig file missing\n"), av[0]);
conf_usage(progname);
exit(1);
}
name = av[optind];
conf_parse(name);
//zconfdump(stdout);
if (sync_kconfig) {
name = conf_get_configname();
if (stat(name, &tmpstat)) {
fprintf(stderr, _("***\n"
"*** Configuration file \"%s\" not found!\n"
"***\n"
"*** Please run some configurator (e.g. \"make oldconfig\" or\n"
"*** \"make menuconfig\" or \"make xconfig\").\n"
"***\n"), name);
exit(1);
}
}
switch (input_mode) {
case defconfig:
if (!defconfig_file)
defconfig_file = conf_get_default_confname();
if (conf_read(defconfig_file)) {
printf(_("***\n"
"*** Can't find default configuration \"%s\"!\n"
"***\n"), defconfig_file);
exit(1);
}
break;
case savedefconfig:
case silentoldconfig:
case oldaskconfig:
case oldconfig:
case listnewconfig:
case olddefconfig:
conf_read(NULL);
break;
case allnoconfig:
case allyesconfig:
case allmodconfig:
case alldefconfig:
case randconfig:
name = getenv("KCONFIG_ALLCONFIG");
if (!name)
break;
if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
if (conf_read_simple(name, S_DEF_USER)) {
fprintf(stderr,
_("*** Can't read seed configuration \"%s\"!\n"),
name);
exit(1);
}
break;
}
switch (input_mode) {
case allnoconfig: name = "allno.config"; break;
case allyesconfig: name = "allyes.config"; break;
case allmodconfig: name = "allmod.config"; break;
case alldefconfig: name = "alldef.config"; break;
case randconfig: name = "allrandom.config"; break;
default: break;
}
if (conf_read_simple(name, S_DEF_USER) &&
conf_read_simple("all.config", S_DEF_USER)) {
fprintf(stderr,
_("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
name);
exit(1);
}
break;
default:
break;
}
if (sync_kconfig) {
if (conf_get_changed()) {
name = getenv("KCONFIG_NOSILENTUPDATE");
if (name && *name) {
fprintf(stderr,
_("\n*** The configuration requires explicit update.\n\n"));
return 1;
}
}
valid_stdin = tty_stdio;
}
switch (input_mode) {
case allnoconfig:
conf_set_all_new_symbols(def_no);
break;
case allyesconfig:
conf_set_all_new_symbols(def_yes);
break;
case allmodconfig:
conf_set_all_new_symbols(def_mod);
break;
case alldefconfig:
conf_set_all_new_symbols(def_default);
break;
case randconfig:
/* Really nothing to do in this loop */
while (conf_set_all_new_symbols(def_random)) ;
break;
case defconfig:
conf_set_all_new_symbols(def_default);
break;
case savedefconfig:
break;
case oldaskconfig:
rootEntry = &rootmenu;
conf(&rootmenu);
input_mode = silentoldconfig;
/* fall through */
case oldconfig:
case listnewconfig:
case olddefconfig:
case silentoldconfig:
/* Update until a loop caused no more changes */
do {
conf_cnt = 0;
check_conf(&rootmenu);
} while (conf_cnt &&
(input_mode != listnewconfig &&
input_mode != olddefconfig));
break;
}
if (sync_kconfig) {
/* silentoldconfig is used during the build so we shall update autoconf.
* All other commands are only used to generate a config.
*/
if (conf_get_changed() && conf_write(NULL)) {
fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n"));
exit(1);
}
if (conf_write_autoconf()) {
fprintf(stderr, _("\n*** Error during update of the configuration.\n\n"));
return 1;
}
} else if (input_mode == savedefconfig) {
if (conf_write_defconfig(defconfig_file)) {
fprintf(stderr, _("n*** Error while saving defconfig to: %s\n\n"),
defconfig_file);
return 1;
}
} else if (input_mode != listnewconfig) {
if (conf_write(NULL)) {
fprintf(stderr, _("\n*** Error during writing of the configuration.\n\n"));
exit(1);
}
}
return 0;
}
/*
* Helper function to facilitate fgets() by Jean Sacren.
*/
void xfgets(char *str, int size, FILE *in)
{
if (fgets(str, size, in) == NULL)
fprintf(stderr, "\nError in reading or end of file.\n");
}
| gpl-2.0 |
moulecorp/greezly | arch/arm/plat-samsung/dma-ops.c | 842 | 3716 | /* linux/arch/arm/plat-samsung/dma-ops.c
*
* Copyright (c) 2011 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* Samsung DMA Operations
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/amba/pl330.h>
#include <linux/scatterlist.h>
#include <linux/export.h>
#include <mach/dma.h>
#if defined(CONFIG_PL330_DMA)
#define dma_filter pl330_filter
#elif defined(CONFIG_S3C64XX_PL080)
#define dma_filter pl08x_filter_id
#endif
static unsigned samsung_dmadev_request(enum dma_ch dma_ch,
struct samsung_dma_req *param,
struct device *dev, char *ch_name)
{
dma_cap_mask_t mask;
dma_cap_zero(mask);
dma_cap_set(param->cap, mask);
if (dev->of_node)
return (unsigned)dma_request_slave_channel(dev, ch_name);
else
return (unsigned)dma_request_channel(mask, dma_filter,
(void *)dma_ch);
}
static int samsung_dmadev_release(unsigned ch, void *param)
{
dma_release_channel((struct dma_chan *)ch);
return 0;
}
static int samsung_dmadev_config(unsigned ch,
struct samsung_dma_config *param)
{
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_slave_config slave_config;
if (param->direction == DMA_DEV_TO_MEM) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = param->direction;
slave_config.src_addr = param->fifo;
slave_config.src_addr_width = param->width;
slave_config.src_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
} else if (param->direction == DMA_MEM_TO_DEV) {
memset(&slave_config, 0, sizeof(struct dma_slave_config));
slave_config.direction = param->direction;
slave_config.dst_addr = param->fifo;
slave_config.dst_addr_width = param->width;
slave_config.dst_maxburst = 1;
dmaengine_slave_config(chan, &slave_config);
} else {
pr_warn("unsupported direction\n");
return -EINVAL;
}
return 0;
}
static int samsung_dmadev_prepare(unsigned ch,
struct samsung_dma_prep *param)
{
struct scatterlist sg;
struct dma_chan *chan = (struct dma_chan *)ch;
struct dma_async_tx_descriptor *desc;
switch (param->cap) {
case DMA_SLAVE:
sg_init_table(&sg, 1);
sg_dma_len(&sg) = param->len;
sg_set_page(&sg, pfn_to_page(PFN_DOWN(param->buf)),
param->len, offset_in_page(param->buf));
sg_dma_address(&sg) = param->buf;
desc = dmaengine_prep_slave_sg(chan,
&sg, 1, param->direction, DMA_PREP_INTERRUPT);
break;
case DMA_CYCLIC:
desc = dmaengine_prep_dma_cyclic(chan, param->buf,
param->len, param->period, param->direction,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
break;
default:
dev_err(&chan->dev->device, "unsupported format\n");
return -EFAULT;
}
if (!desc) {
dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
return -EFAULT;
}
desc->callback = param->fp;
desc->callback_param = param->fp_param;
dmaengine_submit((struct dma_async_tx_descriptor *)desc);
return 0;
}
static inline int samsung_dmadev_trigger(unsigned ch)
{
dma_async_issue_pending((struct dma_chan *)ch);
return 0;
}
static inline int samsung_dmadev_flush(unsigned ch)
{
return dmaengine_terminate_all((struct dma_chan *)ch);
}
static struct samsung_dma_ops dmadev_ops = {
.request = samsung_dmadev_request,
.release = samsung_dmadev_release,
.config = samsung_dmadev_config,
.prepare = samsung_dmadev_prepare,
.trigger = samsung_dmadev_trigger,
.started = NULL,
.flush = samsung_dmadev_flush,
.stop = samsung_dmadev_flush,
};
void *samsung_dmadev_get_ops(void)
{
return &dmadev_ops;
}
EXPORT_SYMBOL(samsung_dmadev_get_ops);
| gpl-2.0 |
zeferot/test | drivers/watchdog/pika_wdt.c | 2122 | 6878 | /*
* PIKA FPGA based Watchdog Timer
*
* Copyright (c) 2008 PIKA Technologies
* Sean MacLennan <smaclennan@pikatech.com>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/watchdog.h>
#include <linux/reboot.h>
#include <linux/jiffies.h>
#include <linux/timer.h>
#include <linux/bitops.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/of_platform.h>
#define DRV_NAME "PIKA-WDT"
/* Hardware timeout in seconds */
#define WDT_HW_TIMEOUT 2
/* Timer heartbeat (500ms) */
#define WDT_TIMEOUT (HZ/2)
/* User land timeout */
#define WDT_HEARTBEAT 15
static int heartbeat = WDT_HEARTBEAT;
module_param(heartbeat, int, 0);
MODULE_PARM_DESC(heartbeat, "Watchdog heartbeats in seconds. "
"(default = " __MODULE_STRING(WDT_HEARTBEAT) ")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started "
"(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
static struct {
void __iomem *fpga;
unsigned long next_heartbeat; /* the next_heartbeat for the timer */
unsigned long open;
char expect_close;
int bootstatus;
struct timer_list timer; /* The timer that pings the watchdog */
} pikawdt_private;
static struct watchdog_info ident = {
.identity = DRV_NAME,
.options = WDIOF_CARDRESET |
WDIOF_SETTIMEOUT |
WDIOF_KEEPALIVEPING |
WDIOF_MAGICCLOSE,
};
/*
* Reload the watchdog timer. (ie, pat the watchdog)
*/
static inline void pikawdt_reset(void)
{
/* -- FPGA: Reset Control Register (32bit R/W) (Offset: 0x14) --
* Bit 7, WTCHDG_EN: When set to 1, the watchdog timer is enabled.
* Once enabled, it cannot be disabled. The watchdog can be
* kicked by performing any write access to the reset
* control register (this register).
* Bit 8-11, WTCHDG_TIMEOUT_SEC: Sets the watchdog timeout value in
* seconds. Valid ranges are 1 to 15 seconds. The value can
* be modified dynamically.
*/
unsigned reset = in_be32(pikawdt_private.fpga + 0x14);
/* enable with max timeout - 15 seconds */
reset |= (1 << 7) + (WDT_HW_TIMEOUT << 8);
out_be32(pikawdt_private.fpga + 0x14, reset);
}
/*
* Timer tick
*/
static void pikawdt_ping(unsigned long data)
{
if (time_before(jiffies, pikawdt_private.next_heartbeat) ||
(!nowayout && !pikawdt_private.open)) {
pikawdt_reset();
mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
} else
pr_crit("I will reset your machine !\n");
}
static void pikawdt_keepalive(void)
{
pikawdt_private.next_heartbeat = jiffies + heartbeat * HZ;
}
static void pikawdt_start(void)
{
pikawdt_keepalive();
mod_timer(&pikawdt_private.timer, jiffies + WDT_TIMEOUT);
}
/*
* Watchdog device is opened, and watchdog starts running.
*/
static int pikawdt_open(struct inode *inode, struct file *file)
{
/* /dev/watchdog can only be opened once */
if (test_and_set_bit(0, &pikawdt_private.open))
return -EBUSY;
pikawdt_start();
return nonseekable_open(inode, file);
}
/*
* Close the watchdog device.
*/
static int pikawdt_release(struct inode *inode, struct file *file)
{
/* stop internal ping */
if (!pikawdt_private.expect_close)
del_timer(&pikawdt_private.timer);
clear_bit(0, &pikawdt_private.open);
pikawdt_private.expect_close = 0;
return 0;
}
/*
* Pat the watchdog whenever device is written to.
*/
static ssize_t pikawdt_write(struct file *file, const char __user *data,
size_t len, loff_t *ppos)
{
if (!len)
return 0;
/* Scan for magic character */
if (!nowayout) {
size_t i;
pikawdt_private.expect_close = 0;
for (i = 0; i < len; i++) {
char c;
if (get_user(c, data + i))
return -EFAULT;
if (c == 'V') {
pikawdt_private.expect_close = 42;
break;
}
}
}
pikawdt_keepalive();
return len;
}
/*
* Handle commands from user-space.
*/
static long pikawdt_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
void __user *argp = (void __user *)arg;
int __user *p = argp;
int new_value;
switch (cmd) {
case WDIOC_GETSUPPORT:
return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0;
case WDIOC_GETSTATUS:
return put_user(0, p);
case WDIOC_GETBOOTSTATUS:
return put_user(pikawdt_private.bootstatus, p);
case WDIOC_KEEPALIVE:
pikawdt_keepalive();
return 0;
case WDIOC_SETTIMEOUT:
if (get_user(new_value, p))
return -EFAULT;
heartbeat = new_value;
pikawdt_keepalive();
return put_user(new_value, p); /* return current value */
case WDIOC_GETTIMEOUT:
return put_user(heartbeat, p);
}
return -ENOTTY;
}
static const struct file_operations pikawdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = pikawdt_open,
.release = pikawdt_release,
.write = pikawdt_write,
.unlocked_ioctl = pikawdt_ioctl,
};
static struct miscdevice pikawdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &pikawdt_fops,
};
static int __init pikawdt_init(void)
{
struct device_node *np;
void __iomem *fpga;
static u32 post1;
int ret;
np = of_find_compatible_node(NULL, NULL, "pika,fpga");
if (np == NULL) {
pr_err("Unable to find fpga\n");
return -ENOENT;
}
pikawdt_private.fpga = of_iomap(np, 0);
of_node_put(np);
if (pikawdt_private.fpga == NULL) {
pr_err("Unable to map fpga\n");
return -ENOMEM;
}
ident.firmware_version = in_be32(pikawdt_private.fpga + 0x1c) & 0xffff;
/* POST information is in the sd area. */
np = of_find_compatible_node(NULL, NULL, "pika,fpga-sd");
if (np == NULL) {
pr_err("Unable to find fpga-sd\n");
ret = -ENOENT;
goto out;
}
fpga = of_iomap(np, 0);
of_node_put(np);
if (fpga == NULL) {
pr_err("Unable to map fpga-sd\n");
ret = -ENOMEM;
goto out;
}
/* -- FPGA: POST Test Results Register 1 (32bit R/W) (Offset: 0x4040) --
* Bit 31, WDOG: Set to 1 when the last reset was caused by a watchdog
* timeout.
*/
post1 = in_be32(fpga + 0x40);
if (post1 & 0x80000000)
pikawdt_private.bootstatus = WDIOF_CARDRESET;
iounmap(fpga);
setup_timer(&pikawdt_private.timer, pikawdt_ping, 0);
ret = misc_register(&pikawdt_miscdev);
if (ret) {
pr_err("Unable to register miscdev\n");
goto out;
}
pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
heartbeat, nowayout);
return 0;
out:
iounmap(pikawdt_private.fpga);
return ret;
}
static void __exit pikawdt_exit(void)
{
misc_deregister(&pikawdt_miscdev);
iounmap(pikawdt_private.fpga);
}
module_init(pikawdt_init);
module_exit(pikawdt_exit);
MODULE_AUTHOR("Sean MacLennan <smaclennan@pikatech.com>");
MODULE_DESCRIPTION("PIKA FPGA based Watchdog Timer");
MODULE_LICENSE("GPL");
| gpl-2.0 |
identisoft-rashid/ec3_kernel_pre_4.1 | drivers/net/ethernet/i825xx/lasi_82596.c | 2122 | 6815 | /* lasi_82596.c -- driver for the intel 82596 ethernet controller, as
munged into HPPA boxen .
This driver is based upon 82596.c, original credits are below...
but there were too many hoops which HP wants jumped through to
keep this code in there in a sane manner.
3 primary sources of the mess --
1) hppa needs *lots* of cacheline flushing to keep this kind of
MMIO running.
2) The 82596 needs to see all of its pointers as their physical
address. Thus virt_to_bus/bus_to_virt are *everywhere*.
3) The implementation HP is using seems to be significantly pickier
about when and how the command and RX units are started. some
command ordering was changed.
Examination of the mach driver leads one to believe that there
might be a saner way to pull this off... anyone who feels like a
full rewrite can be my guest.
Split 02/13/2000 Sam Creasey (sammy@oh.verio.com)
02/01/2000 Initial modifications for parisc by Helge Deller (deller@gmx.de)
03/02/2000 changes for better/correct(?) cache-flushing (deller)
*/
/* 82596.c: A generic 82596 ethernet driver for linux. */
/*
Based on Apricot.c
Written 1994 by Mark Evans.
This driver is for the Apricot 82596 bus-master interface
Modularised 12/94 Mark Evans
Modified to support the 82596 ethernet chips on 680x0 VME boards.
by Richard Hirst <richard@sleepie.demon.co.uk>
Renamed to be 82596.c
980825: Changed to receive directly in to sk_buffs which are
allocated at open() time. Eliminates copy on incoming frames
(small ones are still copied). Shared data now held in a
non-cached page, so we can run on 68060 in copyback mode.
TBD:
* look at deferring rx frames rather than discarding (as per tulip)
* handle tx ring full as per tulip
* performance test to tune rx_copybreak
Most of my modifications relate to the braindead big-endian
implementation by Intel. When the i596 is operating in
'big-endian' mode, it thinks a 32 bit value of 0x12345678
should be stored as 0x56781234. This is a real pain, when
you have linked lists which are shared by the 680x0 and the
i596.
Driver skeleton
Written 1993 by Donald Becker.
Copyright 1993 United States Government as represented by the Director,
National Security Agency. This software may only be used and distributed
according to the terms of the GNU General Public License as modified by SRC,
incorporated herein by reference.
The author may be reached as becker@scyld.com, or C/O
Scyld Computing Corporation, 410 Severn Ave., Suite 210, Annapolis MD 21403
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ptrace.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/pdc.h>
#include <asm/parisc-device.h>
#define LASI_82596_DRIVER_VERSION "LASI 82596 driver - Revision: 1.30"
#define PA_I82596_RESET 0 /* Offsets relative to LASI-LAN-Addr.*/
#define PA_CPU_PORT_L_ACCESS 4
#define PA_CHANNEL_ATTENTION 8
#define OPT_SWAP_PORT 0x0001 /* Need to wordswp on the MPU port */
#define DMA_ALLOC dma_alloc_noncoherent
#define DMA_FREE dma_free_noncoherent
#define DMA_WBACK(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_TO_DEVICE); } while (0)
#define DMA_INV(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_FROM_DEVICE); } while (0)
#define DMA_WBACK_INV(ndev, addr, len) \
do { dma_cache_sync((ndev)->dev.parent, (void *)addr, len, DMA_BIDIRECTIONAL); } while (0)
#define SYSBUS 0x0000006c;
/* big endian CPU, 82596 "big" endian mode */
#define SWAP32(x) (((u32)(x)<<16) | ((((u32)(x)))>>16))
#define SWAP16(x) (x)
#include "lib82596.c"
MODULE_AUTHOR("Richard Hirst");
MODULE_DESCRIPTION("i82596 driver");
MODULE_LICENSE("GPL");
module_param(i596_debug, int, 0);
MODULE_PARM_DESC(i596_debug, "lasi_82596 debug mask");
static inline void ca(struct net_device *dev)
{
gsc_writel(0, dev->base_addr + PA_CHANNEL_ATTENTION);
}
static void mpu_port(struct net_device *dev, int c, dma_addr_t x)
{
struct i596_private *lp = netdev_priv(dev);
u32 v = (u32) (c) | (u32) (x);
u16 a, b;
if (lp->options & OPT_SWAP_PORT) {
a = v >> 16;
b = v & 0xffff;
} else {
a = v & 0xffff;
b = v >> 16;
}
gsc_writel(a, dev->base_addr + PA_CPU_PORT_L_ACCESS);
udelay(1);
gsc_writel(b, dev->base_addr + PA_CPU_PORT_L_ACCESS);
}
#define LAN_PROM_ADDR 0xF0810000
static int
lan_init_chip(struct parisc_device *dev)
{
struct net_device *netdevice;
struct i596_private *lp;
int retval;
int i;
if (!dev->irq) {
printk(KERN_ERR "%s: IRQ not found for i82596 at 0x%lx\n",
__FILE__, (unsigned long)dev->hpa.start);
return -ENODEV;
}
printk(KERN_INFO "Found i82596 at 0x%lx, IRQ %d\n",
(unsigned long)dev->hpa.start, dev->irq);
netdevice = alloc_etherdev(sizeof(struct i596_private));
if (!netdevice)
return -ENOMEM;
SET_NETDEV_DEV(netdevice, &dev->dev);
parisc_set_drvdata (dev, netdevice);
netdevice->base_addr = dev->hpa.start;
netdevice->irq = dev->irq;
if (pdc_lan_station_id(netdevice->dev_addr, netdevice->base_addr)) {
for (i = 0; i < 6; i++) {
netdevice->dev_addr[i] = gsc_readb(LAN_PROM_ADDR + i);
}
printk(KERN_INFO
"%s: MAC of HP700 LAN read from EEPROM\n", __FILE__);
}
lp = netdev_priv(netdevice);
lp->options = dev->id.sversion == 0x72 ? OPT_SWAP_PORT : 0;
retval = i82596_probe(netdevice);
if (retval) {
free_netdev(netdevice);
return -ENODEV;
}
return retval;
}
static int lan_remove_chip(struct parisc_device *pdev)
{
struct net_device *dev = parisc_get_drvdata(pdev);
struct i596_private *lp = netdev_priv(dev);
unregister_netdev (dev);
DMA_FREE(&pdev->dev, sizeof(struct i596_private),
(void *)lp->dma, lp->dma_addr);
free_netdev (dev);
return 0;
}
static struct parisc_device_id lan_tbl[] = {
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x0008a },
{ HPHW_FIO, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00072 },
{ 0, }
};
MODULE_DEVICE_TABLE(parisc, lan_tbl);
static struct parisc_driver lan_driver = {
.name = "lasi_82596",
.id_table = lan_tbl,
.probe = lan_init_chip,
.remove = lan_remove_chip,
};
static int lasi_82596_init(void)
{
printk(KERN_INFO LASI_82596_DRIVER_VERSION "\n");
return register_parisc_driver(&lan_driver);
}
module_init(lasi_82596_init);
static void __exit lasi_82596_exit(void)
{
unregister_parisc_driver(&lan_driver);
}
module_exit(lasi_82596_exit);
| gpl-2.0 |
parheliamm/SCH-i939_Kernel | drivers/staging/xgifb/vb_setmode.c | 2378 | 234058 |
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/types.h>
#include <linux/version.h>
#include "XGIfb.h"
#include "vb_def.h"
#include "vgatypes.h"
#include "vb_struct.h"
#include "vb_util.h"
#include "vb_table.h"
#include "vb_setmode.h"
#define IndexMask 0xff
#ifndef XGI_MASK_DUAL_CHIP
#define XGI_MASK_DUAL_CHIP 0x04 /* SR3A */
#endif
static unsigned short XGINew_MDA_DAC[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15, 0x15,
0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F, 0x3F};
static unsigned short XGINew_CGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
static unsigned short XGINew_EGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x05, 0x15,
0x20, 0x30, 0x24, 0x34, 0x21, 0x31, 0x25, 0x35,
0x08, 0x18, 0x0C, 0x1C, 0x09, 0x19, 0x0D, 0x1D,
0x28, 0x38, 0x2C, 0x3C, 0x29, 0x39, 0x2D, 0x3D,
0x02, 0x12, 0x06, 0x16, 0x03, 0x13, 0x07, 0x17,
0x22, 0x32, 0x26, 0x36, 0x23, 0x33, 0x27, 0x37,
0x0A, 0x1A, 0x0E, 0x1E, 0x0B, 0x1B, 0x0F, 0x1F,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F};
static unsigned short XGINew_VGA_DAC[] = {
0x00, 0x10, 0x04, 0x14, 0x01, 0x11, 0x09, 0x15,
0x2A, 0x3A, 0x2E, 0x3E, 0x2B, 0x3B, 0x2F, 0x3F,
0x00, 0x05, 0x08, 0x0B, 0x0E, 0x11, 0x14, 0x18,
0x1C, 0x20, 0x24, 0x28, 0x2D, 0x32, 0x38, 0x3F,
0x00, 0x10, 0x1F, 0x2F, 0x3F, 0x1F, 0x27, 0x2F,
0x37, 0x3F, 0x2D, 0x31, 0x36, 0x3A, 0x3F, 0x00,
0x07, 0x0E, 0x15, 0x1C, 0x0E, 0x11, 0x15, 0x18,
0x1C, 0x14, 0x16, 0x18, 0x1A, 0x1C, 0x00, 0x04,
0x08, 0x0C, 0x10, 0x08, 0x0A, 0x0C, 0x0E, 0x10,
0x0B, 0x0C, 0x0D, 0x0F, 0x10};
void InitTo330Pointer(unsigned char ChipType, struct vb_device_info *pVBInfo)
{
pVBInfo->SModeIDTable = (struct XGI_StStruct *) XGI330_SModeIDTable;
pVBInfo->StandTable = (struct XGI_StandTableStruct *) XGI330_StandTable;
pVBInfo->EModeIDTable = (struct XGI_ExtStruct *) XGI330_EModeIDTable;
pVBInfo->RefIndex = (struct XGI_Ext2Struct *) XGI330_RefIndex;
pVBInfo->XGINEWUB_CRT1Table
= (struct XGI_CRT1TableStruct *) XGI_CRT1Table;
/* add for new UNIVGABIOS */
/* XGINew_UBLCDDataTable =
* (struct XGI_LCDDataTablStruct *) XGI_LCDDataTable; */
/* XGINew_UBTVDataTable = (XGI_TVDataTablStruct *) XGI_TVDataTable; */
pVBInfo->MCLKData = (struct XGI_MCLKDataStruct *) XGI340New_MCLKData;
pVBInfo->ECLKData = (struct XGI_ECLKDataStruct *) XGI340_ECLKData;
pVBInfo->VCLKData = (struct XGI_VCLKDataStruct *) XGI_VCLKData;
pVBInfo->VBVCLKData = (struct XGI_VBVCLKDataStruct *) XGI_VBVCLKData;
pVBInfo->ScreenOffset = XGI330_ScreenOffset;
pVBInfo->StResInfo = (struct XGI_StResInfoStruct *) XGI330_StResInfo;
pVBInfo->ModeResInfo
= (struct XGI_ModeResInfoStruct *) XGI330_ModeResInfo;
pVBInfo->pOutputSelect = &XGI330_OutputSelect;
pVBInfo->pSoftSetting = &XGI330_SoftSetting;
pVBInfo->pSR07 = &XGI330_SR07;
pVBInfo->LCDResInfo = 0;
pVBInfo->LCDTypeInfo = 0;
pVBInfo->LCDInfo = 0;
pVBInfo->VBInfo = 0;
pVBInfo->TVInfo = 0;
pVBInfo->SR15 = XGI340_SR13;
pVBInfo->CR40 = XGI340_cr41;
pVBInfo->SR25 = XGI330_sr25;
pVBInfo->pSR31 = &XGI330_sr31;
pVBInfo->pSR32 = &XGI330_sr32;
pVBInfo->CR6B = XGI340_CR6B;
pVBInfo->CR6E = XGI340_CR6E;
pVBInfo->CR6F = XGI340_CR6F;
pVBInfo->CR89 = XGI340_CR89;
pVBInfo->AGPReg = XGI340_AGPReg;
pVBInfo->SR16 = XGI340_SR16;
pVBInfo->pCRCF = &XG40_CRCF;
pVBInfo->pXGINew_DRAMTypeDefinition = &XG40_DRAMTypeDefinition;
pVBInfo->CR49 = XGI330_CR49;
pVBInfo->pSR1F = &XGI330_SR1F;
pVBInfo->pSR21 = &XGI330_SR21;
pVBInfo->pSR22 = &XGI330_SR22;
pVBInfo->pSR23 = &XGI330_SR23;
pVBInfo->pSR24 = &XGI330_SR24;
pVBInfo->pSR33 = &XGI330_SR33;
pVBInfo->pCRT2Data_1_2 = &XGI330_CRT2Data_1_2;
pVBInfo->pCRT2Data_4_D = &XGI330_CRT2Data_4_D;
pVBInfo->pCRT2Data_4_E = &XGI330_CRT2Data_4_E;
pVBInfo->pCRT2Data_4_10 = &XGI330_CRT2Data_4_10;
pVBInfo->pRGBSenseData = &XGI330_RGBSenseData;
pVBInfo->pVideoSenseData = &XGI330_VideoSenseData;
pVBInfo->pYCSenseData = &XGI330_YCSenseData;
pVBInfo->pRGBSenseData2 = &XGI330_RGBSenseData2;
pVBInfo->pVideoSenseData2 = &XGI330_VideoSenseData2;
pVBInfo->pYCSenseData2 = &XGI330_YCSenseData2;
pVBInfo->NTSCTiming = XGI330_NTSCTiming;
pVBInfo->PALTiming = XGI330_PALTiming;
pVBInfo->HiTVExtTiming = XGI330_HiTVExtTiming;
pVBInfo->HiTVSt1Timing = XGI330_HiTVSt1Timing;
pVBInfo->HiTVSt2Timing = XGI330_HiTVSt2Timing;
pVBInfo->HiTVTextTiming = XGI330_HiTVTextTiming;
pVBInfo->YPbPr750pTiming = XGI330_YPbPr750pTiming;
pVBInfo->YPbPr525pTiming = XGI330_YPbPr525pTiming;
pVBInfo->YPbPr525iTiming = XGI330_YPbPr525iTiming;
pVBInfo->HiTVGroup3Data = XGI330_HiTVGroup3Data;
pVBInfo->HiTVGroup3Simu = XGI330_HiTVGroup3Simu;
pVBInfo->HiTVGroup3Text = XGI330_HiTVGroup3Text;
pVBInfo->Ren525pGroup3 = XGI330_Ren525pGroup3;
pVBInfo->Ren750pGroup3 = XGI330_Ren750pGroup3;
pVBInfo->TimingH = (struct XGI_TimingHStruct *) XGI_TimingH;
pVBInfo->TimingV = (struct XGI_TimingVStruct *) XGI_TimingV;
pVBInfo->UpdateCRT1 = (struct XGI_XG21CRT1Struct *) XGI_UpdateCRT1Table;
pVBInfo->CHTVVCLKUNTSC = XGI330_CHTVVCLKUNTSC;
pVBInfo->CHTVVCLKONTSC = XGI330_CHTVVCLKONTSC;
pVBInfo->CHTVVCLKUPAL = XGI330_CHTVVCLKUPAL;
pVBInfo->CHTVVCLKOPAL = XGI330_CHTVVCLKOPAL;
/* 310 customization related */
if ((pVBInfo->VBType & VB_XGI301LV) || (pVBInfo->VBType & VB_XGI302LV))
pVBInfo->LCDCapList = XGI_LCDDLCapList;
else
pVBInfo->LCDCapList = XGI_LCDCapList;
if ((ChipType == XG21) || (ChipType == XG27))
pVBInfo->XG21_LVDSCapList = XGI21_LCDCapList;
pVBInfo->XGI_TVDelayList = XGI301TVDelayList;
pVBInfo->XGI_TVDelayList2 = XGI301TVDelayList2;
pVBInfo->pXGINew_I2CDefinition = &XG40_I2CDefinition;
if (ChipType >= XG20)
pVBInfo->pXGINew_CR97 = &XG20_CR97;
if (ChipType == XG27) {
pVBInfo->MCLKData
= (struct XGI_MCLKDataStruct *) XGI27New_MCLKData;
pVBInfo->CR40 = XGI27_cr41;
pVBInfo->pXGINew_CR97 = &XG27_CR97;
pVBInfo->pSR36 = &XG27_SR36;
pVBInfo->pCR8F = &XG27_CR8F;
pVBInfo->pCRD0 = XG27_CRD0;
pVBInfo->pCRDE = XG27_CRDE;
pVBInfo->pSR40 = &XG27_SR40;
pVBInfo->pSR41 = &XG27_SR41;
}
if (ChipType >= XG20) {
pVBInfo->pDVOSetting = &XG21_DVOSetting;
pVBInfo->pCR2E = &XG21_CR2E;
pVBInfo->pCR2F = &XG21_CR2F;
pVBInfo->pCR46 = &XG21_CR46;
pVBInfo->pCR47 = &XG21_CR47;
}
}
static unsigned char XGI_GetModePtr(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char index;
if (ModeNo <= 0x13)
index = pVBInfo->SModeIDTable[ModeIdIndex].St_StTableIndex;
else {
if (pVBInfo->ModeType <= 0x02)
index = 0x1B; /* 02 -> ModeEGA */
else
index = 0x0F;
}
return index; /* Get pVBInfo->StandTable index */
}
/*
unsigned char XGI_SetBIOSData(unsigned short ModeNo,
unsigned short ModeIdIndex) {
return (0);
}
*/
/* unsigned char XGI_ClearBankRegs(unsigned short ModeNo,
unsigned short ModeIdIndex) {
return( 0 ) ;
}
*/
static void XGI_SetSeqRegs(unsigned short ModeNo,
unsigned short StandTableIndex,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char tempah, SRdata;
unsigned short i, modeflag;
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
xgifb_reg_set(pVBInfo->P3c4, 0x00, 0x03); /* Set SR0 */
tempah = pVBInfo->StandTable[StandTableIndex].SR[0];
i = SetCRT2ToLCDA;
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
tempah |= 0x01;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD)) {
if (pVBInfo->VBInfo & SetInSlaveMode)
tempah |= 0x01;
}
}
tempah |= 0x20; /* screen off */
xgifb_reg_set(pVBInfo->P3c4, 0x01, tempah); /* Set SR1 */
for (i = 02; i <= 04; i++) {
/* Get SR2,3,4 from file */
SRdata = pVBInfo->StandTable[StandTableIndex].SR[i - 1];
xgifb_reg_set(pVBInfo->P3c4, i, SRdata); /* Set SR2 3 4 */
}
}
static void XGI_SetMiscRegs(unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char Miscdata;
/* Get Misc from file */
Miscdata = pVBInfo->StandTable[StandTableIndex].MISC;
/*
if (pVBInfo->VBType & (VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
Miscdata |= 0x0C;
}
}
*/
outb(Miscdata, pVBInfo->P3c2); /* Set Misc(3c2) */
}
static void XGI_SetCRTCRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char CRTCdata;
unsigned short i;
CRTCdata = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
CRTCdata &= 0x7f;
xgifb_reg_set(pVBInfo->P3d4, 0x11, CRTCdata); /* Unlock CRTC */
for (i = 0; i <= 0x18; i++) {
/* Get CRTC from file */
CRTCdata = pVBInfo->StandTable[StandTableIndex].CRTC[i];
xgifb_reg_set(pVBInfo->P3d4, i, CRTCdata); /* Set CRTC(3d4) */
}
/*
if ((HwDeviceExtension->jChipType == XGI_630) &&
(HwDeviceExtension->jChipRevision == 0x30)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
xgifb_reg_set(pVBInfo->P3d4, 0x18, 0xFE);
}
}
}
*/
}
static void XGI_SetATTRegs(unsigned short ModeNo,
unsigned short StandTableIndex,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char ARdata;
unsigned short i, modeflag;
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
for (i = 0; i <= 0x13; i++) {
ARdata = pVBInfo->StandTable[StandTableIndex].ATTR[i];
if (modeflag & Charx8Dot) { /* ifndef Dot9 */
if (i == 0x13) {
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
ARdata = 0;
} else {
if (pVBInfo->VBInfo & (SetCRT2ToTV
| SetCRT2ToLCD)) {
if (pVBInfo->VBInfo &
SetInSlaveMode)
ARdata = 0;
}
}
}
}
inb(pVBInfo->P3da); /* reset 3da */
outb(i, pVBInfo->P3c0); /* set index */
outb(ARdata, pVBInfo->P3c0); /* set data */
}
inb(pVBInfo->P3da); /* reset 3da */
outb(0x14, pVBInfo->P3c0); /* set index */
outb(0x00, pVBInfo->P3c0); /* set data */
inb(pVBInfo->P3da); /* Enable Attribute */
outb(0x20, pVBInfo->P3c0);
}
static void XGI_SetGRCRegs(unsigned short StandTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char GRdata;
unsigned short i;
for (i = 0; i <= 0x08; i++) {
/* Get GR from file */
GRdata = pVBInfo->StandTable[StandTableIndex].GRC[i];
xgifb_reg_set(pVBInfo->P3ce, i, GRdata); /* Set GR(3ce) */
}
if (pVBInfo->ModeType > ModeVGA) {
GRdata = (unsigned char) xgifb_reg_get(pVBInfo->P3ce, 0x05);
GRdata &= 0xBF; /* 256 color disable */
xgifb_reg_set(pVBInfo->P3ce, 0x05, GRdata);
}
}
static void XGI_ClearExt1Regs(struct vb_device_info *pVBInfo)
{
unsigned short i;
for (i = 0x0A; i <= 0x0E; i++)
xgifb_reg_set(pVBInfo->P3c4, i, 0x00); /* Clear SR0A-SR0E */
}
static unsigned char XGI_SetDefaultVCLK(struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x20);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, pVBInfo->VCLKData[0].SR2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, pVBInfo->VCLKData[0].SR2C);
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, 0x10);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, pVBInfo->VCLKData[1].SR2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, pVBInfo->VCLKData[1].SR2C);
xgifb_reg_and(pVBInfo->P3c4, 0x31, ~0x30);
return 0;
}
static unsigned char XGI_AjustCRT2Rate(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex, unsigned short *i,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempbx, resinfo, modeflag, infoflag;
if (ModeNo <= 0x13)
/* si+St_ModeFlag */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
tempbx = pVBInfo->RefIndex[RefreshRateTableIndex + (*i)].ModeID;
tempax = 0;
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
tempax |= SupportRAMDAC2;
if (pVBInfo->VBType & VB_XGI301C)
tempax |= SupportCRT2in301C;
}
/* 301b */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
tempax |= SupportLCD;
if (pVBInfo->LCDResInfo != Panel1280x1024) {
if (pVBInfo->LCDResInfo != Panel1280x960) {
if (pVBInfo->LCDInfo &
LCDNonExpanding) {
if (resinfo >= 9) {
tempax = 0;
return 0;
}
}
}
}
}
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) { /* for HiTV */
if ((pVBInfo->VBType & VB_XGI301LV) &&
(pVBInfo->VBExtInfo == VB_YPbPr1080i)) {
tempax |= SupportYPbPr;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
if (resinfo == 3)
return 0;
if (resinfo > 7)
return 0;
}
} else {
tempax |= SupportHiVisionTV;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (resinfo == 4)
return 0;
if (resinfo == 3) {
if (pVBInfo->SetFlag
& TVSimuMode)
return 0;
}
if (resinfo > 7)
return 0;
}
}
} else {
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO |
SetCRT2ToSVIDEO |
SetCRT2ToSCART |
SetCRT2ToYPbPr |
SetCRT2ToHiVisionTV)) {
tempax |= SupportTV;
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV
| VB_XGI301C)) {
tempax |= SupportTV1024;
}
if (!(pVBInfo->VBInfo & SetPALTV)) {
if (modeflag & NoSupportSimuTV) {
if (pVBInfo->VBInfo &
SetInSlaveMode) {
if (!(pVBInfo->VBInfo &
SetNotSimuMode)) {
return 0;
}
}
}
}
}
}
} else { /* for LVDS */
if (pVBInfo->IF_DEF_CH7005 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV)
tempax |= SupportCHTV;
}
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
tempax |= SupportLCD;
if (resinfo > 0x08)
return 0; /* 1024x768 */
if (pVBInfo->LCDResInfo < Panel1024x768) {
if (resinfo > 0x07)
return 0; /* 800x600 */
if (resinfo == 0x04)
return 0; /* 512x384 */
}
}
}
for (; pVBInfo->RefIndex[RefreshRateTableIndex + (*i)].ModeID ==
tempbx; (*i)--) {
infoflag = pVBInfo->RefIndex[RefreshRateTableIndex + (*i)].
Ext_InfoFlag;
if (infoflag & tempax)
return 1;
if ((*i) == 0)
break;
}
for ((*i) = 0;; (*i)++) {
infoflag = pVBInfo->RefIndex[RefreshRateTableIndex + (*i)].
Ext_InfoFlag;
if (pVBInfo->RefIndex[RefreshRateTableIndex + (*i)].ModeID
!= tempbx) {
return 0;
}
if (infoflag & tempax)
return 1;
}
return 1;
}
static void XGI_SetSync(unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short sync, temp;
/* di+0x00 */
sync = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag >> 8;
sync &= 0xC0;
temp = 0x2F;
temp |= sync;
outb(temp, pVBInfo->P3c2); /* Set Misc(3c2) */
}
static void XGI_SetCRT1Timing_H(struct vb_device_info *pVBInfo,
struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char data, data1, pushax;
unsigned short i, j;
/* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
/* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
/* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
/* unlock cr0-7 */
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data);
data = pVBInfo->TimingH[0].data[0];
xgifb_reg_set(pVBInfo->P3d4, 0, data);
for (i = 0x01; i <= 0x04; i++) {
data = pVBInfo->TimingH[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 1), data);
}
for (i = 0x05; i <= 0x06; i++) {
data = pVBInfo->TimingH[0].data[i];
xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i + 6), data);
}
j = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0e);
j &= 0x1F;
data = pVBInfo->TimingH[0].data[7];
data &= 0xE0;
data |= j;
xgifb_reg_set(pVBInfo->P3c4, 0x0e, data);
if (HwDeviceExtension->jChipType >= XG20) {
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x04);
data = data - 1;
xgifb_reg_set(pVBInfo->P3d4, 0x04, data);
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x05);
data1 = data;
data1 &= 0xE0;
data &= 0x1F;
if (data == 0) {
pushax = data;
data = (unsigned char) xgifb_reg_get(pVBInfo->P3c4,
0x0c);
data &= 0xFB;
xgifb_reg_set(pVBInfo->P3c4, 0x0c, data);
data = pushax;
}
data = data - 1;
data |= data1;
xgifb_reg_set(pVBInfo->P3d4, 0x05, data);
data = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0e);
data = data >> 5;
data = data + 3;
if (data > 7)
data = data - 7;
data = data << 5;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0e, ~0xE0, data);
}
}
static void XGI_SetCRT1Timing_V(unsigned short ModeIdIndex,
unsigned short ModeNo,
struct vb_device_info *pVBInfo)
{
unsigned char data;
unsigned short i, j;
/* xgifb_reg_set(pVBInfo->P3d4, 0x51, 0); */
/* xgifb_reg_set(pVBInfo->P3d4, 0x56, 0); */
/* xgifb_reg_and_or(pVBInfo->P3d4, 0x11, 0x7f, 0x00); */
for (i = 0x00; i <= 0x01; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 6), data);
}
for (i = 0x02; i <= 0x03; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x0e), data);
}
for (i = 0x04; i <= 0x05; i++) {
data = pVBInfo->TimingV[0].data[i];
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 0x11), data);
}
j = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x0a);
j &= 0xC0;
data = pVBInfo->TimingV[0].data[6];
data &= 0x3F;
data |= j;
xgifb_reg_set(pVBInfo->P3c4, 0x0a, data);
data = pVBInfo->TimingV[0].data[6];
data &= 0x80;
data = data >> 2;
if (ModeNo <= 0x13)
i = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
i = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
i &= DoubleScanMode;
if (i)
data |= 0x80;
j = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x09);
j &= 0x5F;
data |= j;
xgifb_reg_set(pVBInfo->P3d4, 0x09, data);
}
static void XGI_SetCRT1CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo,
struct xgi_hw_device_info *HwDeviceExtension)
{
unsigned char index, data;
unsigned short i;
/* Get index */
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
index = index & IndexMask;
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
for (i = 0; i < 8; i++)
pVBInfo->TimingH[0].data[i]
= pVBInfo->XGINEWUB_CRT1Table[index].CR[i];
for (i = 0; i < 7; i++)
pVBInfo->TimingV[0].data[i]
= pVBInfo->XGINEWUB_CRT1Table[index].CR[i + 8];
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
if (pVBInfo->ModeType > 0x03)
xgifb_reg_set(pVBInfo->P3d4, 0x14, 0x4F);
}
/* --------------------------------------------------------------------- */
/* Function : XGI_SetXG21CRTC */
/* Input : Stand or enhance CRTC table */
/* Output : Fill CRT Hsync/Vsync to SR2E/SR2F/SR30/SR33/SR34/SR3F */
/* Description : Set LCD timing */
/* --------------------------------------------------------------------- */
static void XGI_SetXG21CRTC(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx;
unsigned short Temp1, Temp2, Temp3;
if (ModeNo <= 0x13) {
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
/* CR04 HRS */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[4];
/* SR2E [7:0]->HRS */
xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
/* Tempbx: CR05 HRE */
Tempbx = pVBInfo->StandTable[StandTableIndex].CRTC[5];
Tempbx &= 0x1F; /* Tempbx: HRE[4:0] */
Tempcx = Tempax;
Tempcx &= 0xE0; /* Tempcx: HRS[7:5] */
Tempdx = Tempcx | Tempbx; /* Tempdx(HRE): HRS[7:5]HRE[4:0] */
if (Tempbx < (Tempax & 0x1F)) /* IF HRE < HRS */
Tempdx |= 0x20; /* Tempdx: HRE = HRE + 0x20 */
Tempdx <<= 2; /* Tempdx << 2 */
/* SR2F [7:2]->HRE */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempdx);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
/* Tempax: CR16 VRS */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[16];
Tempbx = Tempax; /* Tempbx=Tempax */
Tempax &= 0x01; /* Tempax: VRS[0] */
xgifb_reg_or(pVBInfo->P3c4, 0x33, Tempax); /* SR33[0]->VRS */
/* Tempax: CR7 VRS */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[7];
Tempdx = Tempbx >> 1; /* Tempdx: VRS[7:1] */
Tempcx = Tempax & 0x04; /* Tempcx: CR7[2] */
Tempcx <<= 5; /* Tempcx[7]: VRS[8] */
Tempdx |= Tempcx; /* Tempdx: VRS[8:1] */
/* SR34[7:0]: VRS[8:1] */
xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempdx);
/* Temp1[8]: VRS[8] unsigned char -> unsigned short */
Temp1 = Tempcx << 1;
Temp1 |= Tempbx; /* Temp1[8:0]: VRS[8:0] */
Tempax &= 0x80; /* Tempax[7]: CR7[7] */
Temp2 = Tempax << 2; /* Temp2[9]: VRS[9] */
Temp1 |= Temp2; /* Temp1[9:0]: VRS[9:0] */
/* CR16 VRE */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[17];
Tempax &= 0x0F; /* Tempax[3:0]: VRE[3:0] */
Temp2 = Temp1 & 0x3F0; /* Temp2[9:4]: VRS[9:4] */
Temp2 |= Tempax; /* Temp2[9:0]: VRE[9:0] */
Temp3 = Temp1 & 0x0F; /* Temp3[3:0]: VRS[3:0] */
if (Tempax < Temp3) /* VRE[3:0]<VRS[3:0] */
Temp2 |= 0x10; /* Temp2: VRE + 0x10 */
Temp2 &= 0xFF; /* Temp2[7:0]: VRE[7:0] */
Tempax = (unsigned char) Temp2; /* Tempax[7:0]: VRE[7:0] */
Tempax <<= 2; /* Tempax << 2: VRE[5:0] */
Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */
Temp1 >>= 9; /* [10:9]->[1:0] */
Tempbx = (unsigned char) Temp1; /* Tempbx[1:0]: VRS[10:9] */
Tempax |= Tempbx; /* VRE[5:0]VRS[10:9] */
Tempax &= 0x7F;
/* SR3F D[7:2]->VRE D[1:0]->VRS */
xgifb_reg_set(pVBInfo->P3c4, 0x3F, Tempax);
} else {
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
/* Tempax: CR4 HRS */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[3];
Tempcx = Tempax; /* Tempcx: HRS */
/* SR2E[7:0]->HRS */
xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
Tempdx = pVBInfo->XGINEWUB_CRT1Table[index].CR[5]; /* SRB */
Tempdx &= 0xC0; /* Tempdx[7:6]: SRB[7:6] */
Temp1 = Tempdx; /* Temp1[7:6]: HRS[9:8] */
Temp1 <<= 2; /* Temp1[9:8]: HRS[9:8] */
Temp1 |= Tempax; /* Temp1[9:0]: HRS[9:0] */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[4]; /* CR5 HRE */
Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
Tempbx = pVBInfo->XGINEWUB_CRT1Table[index].CR[6]; /* SRC */
Tempbx &= 0x04; /* Tempbx[2]: HRE[5] */
Tempbx <<= 3; /* Tempbx[5]: HRE[5] */
Tempax |= Tempbx; /* Tempax[5:0]: HRE[5:0] */
Temp2 = Temp1 & 0x3C0; /* Temp2[9:6]: HRS[9:6] */
Temp2 |= Tempax; /* Temp2[9:0]: HRE[9:0] */
Tempcx &= 0x3F; /* Tempcx[5:0]: HRS[5:0] */
if (Tempax < Tempcx) /* HRE < HRS */
Temp2 |= 0x40; /* Temp2 + 0x40 */
Temp2 &= 0xFF;
Tempax = (unsigned char) Temp2; /* Tempax: HRE[7:0] */
Tempax <<= 2; /* Tempax[7:2]: HRE[5:0] */
Tempdx >>= 6; /* Tempdx[7:6]->[1:0] HRS[9:8] */
Tempax |= Tempdx; /* HRE[5:0]HRS[9:8] */
/* SR2F D[7:2]->HRE, D[1:0]->HRS */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
/* CR10 VRS */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[10];
Tempbx = Tempax; /* Tempbx: VRS */
Tempax &= 0x01; /* Tempax[0]: VRS[0] */
xgifb_reg_or(pVBInfo->P3c4, 0x33, Tempax); /* SR33[0]->VRS[0] */
/* CR7[2][7] VRE */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[9];
Tempcx = Tempbx >> 1; /* Tempcx[6:0]: VRS[7:1] */
Tempdx = Tempax & 0x04; /* Tempdx[2]: CR7[2] */
Tempdx <<= 5; /* Tempdx[7]: VRS[8] */
Tempcx |= Tempdx; /* Tempcx[7:0]: VRS[8:1] */
xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempcx); /* SR34[8:1]->VRS */
Temp1 = Tempdx; /* Temp1[7]: Tempdx[7] */
Temp1 <<= 1; /* Temp1[8]: VRS[8] */
Temp1 |= Tempbx; /* Temp1[8:0]: VRS[8:0] */
Tempax &= 0x80;
Temp2 = Tempax << 2; /* Temp2[9]: VRS[9] */
Temp1 |= Temp2; /* Temp1[9:0]: VRS[9:0] */
/* Tempax: SRA */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[14];
Tempax &= 0x08; /* Tempax[3]: VRS[3] */
Temp2 = Tempax;
Temp2 <<= 7; /* Temp2[10]: VRS[10] */
Temp1 |= Temp2; /* Temp1[10:0]: VRS[10:0] */
/* Tempax: CR11 VRE */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[11];
Tempax &= 0x0F; /* Tempax[3:0]: VRE[3:0] */
/* Tempbx: SRA */
Tempbx = pVBInfo->XGINEWUB_CRT1Table[index].CR[14];
Tempbx &= 0x20; /* Tempbx[5]: VRE[5] */
Tempbx >>= 1; /* Tempbx[4]: VRE[4] */
Tempax |= Tempbx; /* Tempax[4:0]: VRE[4:0] */
Temp2 = Temp1 & 0x7E0; /* Temp2[10:5]: VRS[10:5] */
Temp2 |= Tempax; /* Temp2[10:5]: VRE[10:5] */
Temp3 = Temp1 & 0x1F; /* Temp3[4:0]: VRS[4:0] */
if (Tempax < Temp3) /* VRE < VRS */
Temp2 |= 0x20; /* VRE + 0x20 */
Temp2 &= 0xFF;
Tempax = (unsigned char) Temp2; /* Tempax: VRE[7:0] */
Tempax <<= 2; /* Tempax[7:0]; VRE[5:0]00 */
Temp1 &= 0x600; /* Temp1[10:9]: VRS[10:9] */
Temp1 >>= 9; /* Temp1[1:0]: VRS[10:9] */
Tempbx = (unsigned char) Temp1;
Tempax |= Tempbx; /* Tempax[7:0]: VRE[5:0]VRS[10:9] */
Tempax &= 0x7F;
/* SR3F D[7:2]->VRE D[1:0]->VRS */
xgifb_reg_set(pVBInfo->P3c4, 0x3F, Tempax);
}
}
static void XGI_SetXG27CRTC(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short StandTableIndex, index, Tempax, Tempbx, Tempcx, Tempdx;
if (ModeNo <= 0x13) {
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
/* CR04 HRS */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[4];
/* SR2E [7:0]->HRS */
xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
/* Tempbx: CR05 HRE */
Tempbx = pVBInfo->StandTable[StandTableIndex].CRTC[5];
Tempbx &= 0x1F; /* Tempbx: HRE[4:0] */
Tempcx = Tempax;
Tempcx &= 0xE0; /* Tempcx: HRS[7:5] */
Tempdx = Tempcx | Tempbx; /* Tempdx(HRE): HRS[7:5]HRE[4:0] */
if (Tempbx < (Tempax & 0x1F)) /* IF HRE < HRS */
Tempdx |= 0x20; /* Tempdx: HRE = HRE + 0x20 */
Tempdx <<= 2; /* Tempdx << 2 */
/* SR2F [7:2]->HRE */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempdx);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
/* Tempax: CR10 VRS */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[16];
xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempax); /* SR34[7:0]->VRS */
Tempcx = Tempax; /* Tempcx=Tempax=VRS[7:0] */
/* Tempax[7][2]: CR7[7][2] VRS[9][8] */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[7];
Tempbx = Tempax; /* Tempbx=CR07 */
Tempax &= 0x04; /* Tempax[2]: CR07[2] VRS[8] */
Tempax >>= 2;
/* SR35 D[0]->VRS D[8] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
Tempcx |= (Tempax << 8); /* Tempcx[8] |= VRS[8] */
Tempcx |= (Tempbx & 0x80) << 2; /* Tempcx[9] |= VRS[9] */
/* CR11 VRE */
Tempax = pVBInfo->StandTable[StandTableIndex].CRTC[17];
Tempax &= 0x0F; /* Tempax: VRE[3:0] */
Tempbx = Tempcx; /* Tempbx=Tempcx=VRS[9:0] */
Tempbx &= 0x3F0; /* Tempbx[9:4]: VRS[9:4] */
Tempbx |= Tempax; /* Tempbx[9:0]: VRE[9:0] */
if (Tempax <= (Tempcx & 0x0F)) /* VRE[3:0]<=VRS[3:0] */
Tempbx |= 0x10; /* Tempbx: VRE + 0x10 */
/* Tempax[7:0]: VRE[7:0] */
Tempax = (unsigned char) Tempbx & 0xFF;
Tempax <<= 2; /* Tempax << 2: VRE[5:0] */
Tempcx = (Tempcx & 0x600) >> 8; /* Tempcx VRS[10:9] */
/* SR3F D[7:2]->VRE D[5:0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, Tempax);
/* SR35 D[2:1]->VRS[10:9] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x06, Tempcx);
} else {
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
/* Tempax: CR4 HRS */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[3];
Tempbx = Tempax; /* Tempbx: HRS[7:0] */
/* SR2E[7:0]->HRS */
xgifb_reg_set(pVBInfo->P3c4, 0x2E, Tempax);
/* SR0B */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[5];
Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
Tempbx |= (Tempax << 2); /* Tempbx: HRS[9:0] */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[4]; /* CR5 HRE */
Tempax &= 0x1F; /* Tempax[4:0]: HRE[4:0] */
Tempcx = Tempax; /* Tempcx: HRE[4:0] */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[6]; /* SRC */
Tempax &= 0x04; /* Tempax[2]: HRE[5] */
Tempax <<= 3; /* Tempax[5]: HRE[5] */
Tempcx |= Tempax; /* Tempcx[5:0]: HRE[5:0] */
Tempbx = Tempbx & 0x3C0; /* Tempbx[9:6]: HRS[9:6] */
Tempbx |= Tempcx; /* Tempbx: HRS[9:6]HRE[5:0] */
/* Tempax: CR4 HRS */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[3];
Tempax &= 0x3F; /* Tempax: HRS[5:0] */
if (Tempcx <= Tempax) /* HRE[5:0] < HRS[5:0] */
Tempbx += 0x40; /* Tempbx= Tempbx + 0x40 : HRE[9:0]*/
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[5]; /* SR0B */
Tempax &= 0xC0; /* Tempax[7:6]: SR0B[7:6]: HRS[9:8]*/
Tempax >>= 6; /* Tempax[1:0]: HRS[9:8]*/
Tempax |= ((Tempbx << 2) & 0xFF); /* Tempax[7:2]: HRE[5:0] */
/* SR2F [7:2][1:0]: HRE[5:0]HRS[9:8] */
xgifb_reg_set(pVBInfo->P3c4, 0x2F, Tempax);
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, 0xE3, 00);
/* CR10 VRS */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[10];
/* SR34[7:0]->VRS[7:0] */
xgifb_reg_set(pVBInfo->P3c4, 0x34, Tempax);
Tempcx = Tempax; /* Tempcx <= VRS[7:0] */
/* CR7[7][2] VRS[9][8] */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[9];
Tempbx = Tempax; /* Tempbx <= CR07[7:0] */
Tempax = Tempax & 0x04; /* Tempax[2]: CR7[2]: VRS[8] */
Tempax >>= 2; /* Tempax[0]: VRS[8] */
/* SR35[0]: VRS[8] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x01, Tempax);
Tempcx |= (Tempax << 8); /* Tempcx <= VRS[8:0] */
Tempcx |= ((Tempbx & 0x80) << 2); /* Tempcx <= VRS[9:0] */
/* Tempax: SR0A */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[14];
Tempax &= 0x08; /* SR0A[3] VRS[10] */
Tempcx |= (Tempax << 7); /* Tempcx <= VRS[10:0] */
/* Tempax: CR11 VRE */
Tempax = pVBInfo->XGINEWUB_CRT1Table[index].CR[11];
Tempax &= 0x0F; /* Tempax[3:0]: VRE[3:0] */
/* Tempbx: SR0A */
Tempbx = pVBInfo->XGINEWUB_CRT1Table[index].CR[14];
Tempbx &= 0x20; /* Tempbx[5]: SR0A[5]: VRE[4] */
Tempbx >>= 1; /* Tempbx[4]: VRE[4] */
Tempax |= Tempbx; /* Tempax[4:0]: VRE[4:0] */
Tempbx = Tempcx; /* Tempbx: VRS[10:0] */
Tempbx &= 0x7E0; /* Tempbx[10:5]: VRS[10:5] */
Tempbx |= Tempax; /* Tempbx: VRS[10:5]VRE[4:0] */
if (Tempbx <= Tempcx) /* VRE <= VRS */
Tempbx |= 0x20; /* VRE + 0x20 */
/* Tempax: Tempax[7:0]; VRE[5:0]00 */
Tempax = (Tempbx << 2) & 0xFF;
/* SR3F[7:2]:VRE[5:0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, Tempax);
Tempax = Tempcx >> 8;
/* SR35[2:0]:VRS[10:8] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, Tempax);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_SetXG21LCD */
/* Input : */
/* Output : FCLK duty cycle, FCLK delay compensation */
/* Description : All values set zero */
/* --------------------------------------------------------------------- */
static void XGI_SetXG21LCD(struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex, unsigned short ModeNo)
{
unsigned short Data, Temp, b3CC;
unsigned short XGI_P3cc;
XGI_P3cc = pVBInfo->P3cc;
xgifb_reg_set(pVBInfo->P3d4, 0x2E, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x2F, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
if (((*pVBInfo->pDVOSetting) & 0xC0) == 0xC0) {
xgifb_reg_set(pVBInfo->P3d4, 0x2E, *pVBInfo->pCR2E);
xgifb_reg_set(pVBInfo->P3d4, 0x2F, *pVBInfo->pCR2F);
xgifb_reg_set(pVBInfo->P3d4, 0x46, *pVBInfo->pCR46);
xgifb_reg_set(pVBInfo->P3d4, 0x47, *pVBInfo->pCR47);
}
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
if (Temp & 0x01) {
xgifb_reg_or(pVBInfo->P3c4, 0x06, 0x40); /* 18 bits FP */
xgifb_reg_or(pVBInfo->P3c4, 0x09, 0x40);
}
xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x01); /* Negative blank polarity */
xgifb_reg_and(pVBInfo->P3c4, 0x30, ~0x20);
xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80);
if (ModeNo <= 0x13) {
b3CC = (unsigned char) inb(XGI_P3cc);
if (b3CC & 0x40)
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
if (b3CC & 0x80)
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
} else {
Data = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
if (Data & 0x4000)
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
if (Data & 0x8000)
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
}
}
static void XGI_SetXG27LCD(struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex,
unsigned short ModeNo)
{
unsigned short Data, Temp, b3CC;
unsigned short XGI_P3cc;
XGI_P3cc = pVBInfo->P3cc;
xgifb_reg_set(pVBInfo->P3d4, 0x2E, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x2F, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x00);
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x00);
Temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
if ((Temp & 0x03) == 0) { /* dual 12 */
xgifb_reg_set(pVBInfo->P3d4, 0x46, 0x13);
xgifb_reg_set(pVBInfo->P3d4, 0x47, 0x13);
}
if (((*pVBInfo->pDVOSetting) & 0xC0) == 0xC0) {
xgifb_reg_set(pVBInfo->P3d4, 0x2E, *pVBInfo->pCR2E);
xgifb_reg_set(pVBInfo->P3d4, 0x2F, *pVBInfo->pCR2F);
xgifb_reg_set(pVBInfo->P3d4, 0x46, *pVBInfo->pCR46);
xgifb_reg_set(pVBInfo->P3d4, 0x47, *pVBInfo->pCR47);
}
XGI_SetXG27FPBits(pVBInfo);
xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x01); /* Negative blank polarity */
xgifb_reg_and(pVBInfo->P3c4, 0x30, ~0x20); /* Hsync polarity */
xgifb_reg_and(pVBInfo->P3c4, 0x35, ~0x80); /* Vsync polarity */
if (ModeNo <= 0x13) {
b3CC = (unsigned char) inb(XGI_P3cc);
if (b3CC & 0x40)
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
if (b3CC & 0x80)
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
} else {
Data = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
if (Data & 0x4000)
/* Hsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x30, 0x20);
if (Data & 0x8000)
/* Vsync polarity */
xgifb_reg_or(pVBInfo->P3c4, 0x35, 0x80);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_UpdateXG21CRTC */
/* Input : */
/* Output : CRT1 CRTC */
/* Description : Modify CRT1 Hsync/Vsync to fix LCD mode timing */
/* --------------------------------------------------------------------- */
static void XGI_UpdateXG21CRTC(unsigned short ModeNo,
struct vb_device_info *pVBInfo,
unsigned short RefreshRateTableIndex)
{
int i, index = -1;
xgifb_reg_and(pVBInfo->P3d4, 0x11, 0x7F); /* Unlock CR0~7 */
if (ModeNo <= 0x13) {
for (i = 0; i < 12; i++) {
if (ModeNo == pVBInfo->UpdateCRT1[i].ModeID)
index = i;
}
} else {
if (ModeNo == 0x2E &&
(pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC ==
RES640x480x60))
index = 12;
else if (ModeNo == 0x2E &&
(pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT1CRTC == RES640x480x72))
index = 13;
else if (ModeNo == 0x2F)
index = 14;
else if (ModeNo == 0x50)
index = 15;
else if (ModeNo == 0x59)
index = 16;
}
if (index != -1) {
xgifb_reg_set(pVBInfo->P3d4, 0x02,
pVBInfo->UpdateCRT1[index].CR02);
xgifb_reg_set(pVBInfo->P3d4, 0x03,
pVBInfo->UpdateCRT1[index].CR03);
xgifb_reg_set(pVBInfo->P3d4, 0x15,
pVBInfo->UpdateCRT1[index].CR15);
xgifb_reg_set(pVBInfo->P3d4, 0x16,
pVBInfo->UpdateCRT1[index].CR16);
}
}
static void XGI_SetCRT1DE(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short resindex, tempax, tempbx, tempcx, temp, modeflag;
unsigned char data;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
tempax = pVBInfo->StResInfo[resindex].HTotal;
tempbx = pVBInfo->StResInfo[resindex].VTotal;
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
tempax = pVBInfo->ModeResInfo[resindex].HTotal;
tempbx = pVBInfo->ModeResInfo[resindex].VTotal;
}
if (modeflag & HalfDCLK)
tempax = tempax >> 1;
if (ModeNo > 0x13) {
if (modeflag & HalfDCLK)
tempax = tempax << 1;
temp = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
if (temp & InterlaceMode)
tempbx = tempbx >> 1;
if (modeflag & DoubleScanMode)
tempbx = tempbx << 1;
}
tempcx = 8;
/* if (!(modeflag & Charx8Dot)) */
/* tempcx = 9; */
tempax /= tempcx;
tempax -= 1;
tempbx -= 1;
tempcx = tempax;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3d4, 0x11, data); /* Unlock CRTC */
xgifb_reg_set(pVBInfo->P3d4, 0x01, (unsigned short) (tempcx & 0xff));
xgifb_reg_and_or(pVBInfo->P3d4, 0x0b, ~0x0c,
(unsigned short) ((tempcx & 0x0ff00) >> 10));
xgifb_reg_set(pVBInfo->P3d4, 0x12, (unsigned short) (tempbx & 0xff));
tempax = 0;
tempbx = tempbx >> 8;
if (tempbx & 0x01)
tempax |= 0x02;
if (tempbx & 0x02)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x42, tempax);
data = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x07);
data &= 0xFF;
tempax = 0;
if (tempbx & 0x04)
tempax |= 0x02;
xgifb_reg_and_or(pVBInfo->P3d4, 0x0a, ~0x02, tempax);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp);
}
unsigned short XGI_GetResInfo(unsigned short ModeNo,
unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short resindex;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
else
/* si+Ext_ResInfo */
resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
return resindex;
}
static void XGI_SetCRT1Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short temp, ah, al, temp2, i, DisplayUnit;
/* GetOffset */
temp = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeInfo;
temp = temp >> 8;
temp = pVBInfo->ScreenOffset[temp];
temp2 = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
temp2 &= InterlaceMode;
if (temp2)
temp = temp << 1;
temp2 = pVBInfo->ModeType - ModeEGA;
switch (temp2) {
case 0:
temp2 = 1;
break;
case 1:
temp2 = 2;
break;
case 2:
temp2 = 4;
break;
case 3:
temp2 = 4;
break;
case 4:
temp2 = 6;
break;
case 5:
temp2 = 8;
break;
default:
break;
}
if ((ModeNo >= 0x26) && (ModeNo <= 0x28))
temp = temp * temp2 + temp2 / 2;
else
temp *= temp2;
/* SetOffset */
DisplayUnit = temp;
temp2 = temp;
temp = temp >> 8; /* ah */
temp &= 0x0F;
i = xgifb_reg_get(pVBInfo->P3c4, 0x0E);
i &= 0xF0;
i |= temp;
xgifb_reg_set(pVBInfo->P3c4, 0x0E, i);
temp = (unsigned char) temp2;
temp &= 0xFF; /* al */
xgifb_reg_set(pVBInfo->P3d4, 0x13, temp);
/* SetDisplayUnit */
temp2 = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
temp2 &= InterlaceMode;
if (temp2)
DisplayUnit >>= 1;
DisplayUnit = DisplayUnit << 5;
ah = (DisplayUnit & 0xff00) >> 8;
al = DisplayUnit & 0x00ff;
if (al == 0)
ah += 1;
else
ah += 2;
if (HwDeviceExtension->jChipType >= XG20)
if ((ModeNo == 0x4A) | (ModeNo == 0x49))
ah -= 1;
xgifb_reg_set(pVBInfo->P3c4, 0x10, ah);
}
static unsigned short XGI_GetVCLK2Ptr(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
unsigned short LCDXlat1VCLK[4] = { VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2 };
unsigned short LCDXlat2VCLK[4] = { VCLK108_2 + 5,
VCLK108_2 + 5,
VCLK108_2 + 5,
VCLK108_2 + 5 };
unsigned short LVDSXlat1VCLK[4] = { VCLK40, VCLK40, VCLK40, VCLK40 };
unsigned short LVDSXlat2VCLK[4] = { VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2 };
unsigned short LVDSXlat3VCLK[4] = { VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2,
VCLK65 + 2 };
unsigned short CRT2Index, VCLKIndex;
unsigned short modeflag, resinfo;
unsigned char *CHTVVCLKPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
CRT2Index = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
} else {
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
CRT2Index = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT2CRTC;
}
if (pVBInfo->IF_DEF_LVDS == 0) {
CRT2Index = CRT2Index >> 6; /* for LCD */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) { /*301b*/
if (pVBInfo->LCDResInfo != Panel1024x768)
VCLKIndex = LCDXlat2VCLK[CRT2Index];
else
VCLKIndex = LCDXlat1VCLK[CRT2Index];
} else { /* for TV */
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (pVBInfo->SetFlag & RPLLDIV2XO) {
VCLKIndex = HiTVVCLKDIV2;
VCLKIndex += 25;
} else {
VCLKIndex = HiTVVCLK;
VCLKIndex += 25;
}
if (pVBInfo->SetFlag & TVSimuMode) {
if (modeflag & Charx8Dot) {
VCLKIndex =
HiTVSimuVCLK;
VCLKIndex += 25;
} else {
VCLKIndex =
HiTVTextVCLK;
VCLKIndex += 25;
}
}
/* 301lv */
if (pVBInfo->VBType & VB_XGI301LV) {
if (!(pVBInfo->VBExtInfo ==
VB_YPbPr1080i)) {
VCLKIndex =
YPbPr750pVCLK;
if (!(pVBInfo->VBExtInfo
==
VB_YPbPr750p)) {
VCLKIndex =
YPbPr525pVCLK;
if (!(pVBInfo->VBExtInfo
== VB_YPbPr525p)) {
VCLKIndex
= YPbPr525iVCLK_2;
if (!(pVBInfo->SetFlag
& RPLLDIV2XO))
VCLKIndex
= YPbPr525iVCLK;
}
}
}
}
} else {
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->SetFlag &
RPLLDIV2XO) {
VCLKIndex = TVVCLKDIV2;
VCLKIndex += 25;
} else {
VCLKIndex = TVVCLK;
VCLKIndex += 25;
}
}
}
} else { /* for CRT2 */
/* Port 3cch */
VCLKIndex = (unsigned char) inb(
(pVBInfo->P3ca + 0x02));
VCLKIndex = ((VCLKIndex >> 2) & 0x03);
if (ModeNo > 0x13) {
/* di+Ext_CRTVCLK */
VCLKIndex =
pVBInfo->RefIndex[
RefreshRateTableIndex].
Ext_CRTVCLK;
VCLKIndex &= IndexMask;
}
}
}
} else { /* LVDS */
if (ModeNo <= 0x13)
VCLKIndex = CRT2Index;
else
VCLKIndex = CRT2Index;
if (pVBInfo->IF_DEF_CH7005 == 1) {
if (!(pVBInfo->VBInfo & SetCRT2ToLCD)) {
VCLKIndex &= 0x1f;
tempbx = 0;
if (pVBInfo->VBInfo & SetPALTV)
tempbx += 2;
if (pVBInfo->VBInfo & SetCHTVOverScan)
tempbx += 1;
switch (tempbx) {
case 0:
CHTVVCLKPtr = pVBInfo->CHTVVCLKUNTSC;
break;
case 1:
CHTVVCLKPtr = pVBInfo->CHTVVCLKONTSC;
break;
case 2:
CHTVVCLKPtr = pVBInfo->CHTVVCLKUPAL;
break;
case 3:
CHTVVCLKPtr = pVBInfo->CHTVVCLKOPAL;
break;
default:
break;
}
VCLKIndex = CHTVVCLKPtr[VCLKIndex];
}
} else {
VCLKIndex = VCLKIndex >> 6;
if ((pVBInfo->LCDResInfo == Panel800x600) ||
(pVBInfo->LCDResInfo == Panel320x480))
VCLKIndex = LVDSXlat1VCLK[VCLKIndex];
else if ((pVBInfo->LCDResInfo == Panel1024x768) ||
(pVBInfo->LCDResInfo == Panel1024x768x75))
VCLKIndex = LVDSXlat2VCLK[VCLKIndex];
else
VCLKIndex = LVDSXlat3VCLK[VCLKIndex];
}
}
/* VCLKIndex = VCLKIndex&IndexMask; */
return VCLKIndex;
}
static void XGI_SetCRT1VCLK(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char index, data;
unsigned short vclkindex;
if (pVBInfo->IF_DEF_LVDS == 1) {
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
xgifb_reg_set(pVBInfo->P3c4, 0x2B,
pVBInfo->VCLKData[index].SR2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C,
pVBInfo->VCLKData[index].SR2C);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
} else if ((pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) && (pVBInfo->VBInfo
& SetCRT2ToLCDA)) {
vclkindex = XGI_GetVCLK2Ptr(ModeNo, ModeIdIndex,
RefreshRateTableIndex, HwDeviceExtension,
pVBInfo);
data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
data = pVBInfo->VBVCLKData[vclkindex].Part4_A;
xgifb_reg_set(pVBInfo->P3c4, 0x2B, data);
data = pVBInfo->VBVCLKData[vclkindex].Part4_B;
xgifb_reg_set(pVBInfo->P3c4, 0x2C, data);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
} else {
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
data = xgifb_reg_get(pVBInfo->P3c4, 0x31) & 0xCF;
xgifb_reg_set(pVBInfo->P3c4, 0x31, data);
xgifb_reg_set(pVBInfo->P3c4, 0x2B,
pVBInfo->VCLKData[index].SR2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C,
pVBInfo->VCLKData[index].SR2C);
xgifb_reg_set(pVBInfo->P3c4, 0x2D, 0x01);
}
if (HwDeviceExtension->jChipType >= XG20) {
if (pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag &
HalfDCLK) {
data = xgifb_reg_get(pVBInfo->P3c4, 0x2B);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, data);
data = xgifb_reg_get(pVBInfo->P3c4, 0x2C);
index = data;
index &= 0xE0;
data &= 0x1F;
data = data << 1;
data += 1;
data |= index;
xgifb_reg_set(pVBInfo->P3c4, 0x2C, data);
}
}
}
static void XGI_SetCRT1FIFO(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short data;
data = xgifb_reg_get(pVBInfo->P3c4, 0x3D);
data &= 0xfe;
xgifb_reg_set(pVBInfo->P3c4, 0x3D, data); /* diable auto-threshold */
if (ModeNo > 0x13) {
xgifb_reg_set(pVBInfo->P3c4, 0x08, 0x34);
data = xgifb_reg_get(pVBInfo->P3c4, 0x09);
data &= 0xC0;
xgifb_reg_set(pVBInfo->P3c4, 0x09, data | 0x30);
data = xgifb_reg_get(pVBInfo->P3c4, 0x3D);
data |= 0x01;
xgifb_reg_set(pVBInfo->P3c4, 0x3D, data);
} else {
if (HwDeviceExtension->jChipType == XG27) {
xgifb_reg_set(pVBInfo->P3c4, 0x08, 0x0E);
data = xgifb_reg_get(pVBInfo->P3c4, 0x09);
data &= 0xC0;
xgifb_reg_set(pVBInfo->P3c4, 0x09, data | 0x20);
} else {
xgifb_reg_set(pVBInfo->P3c4, 0x08, 0xAE);
data = xgifb_reg_get(pVBInfo->P3c4, 0x09);
data &= 0xF0;
xgifb_reg_set(pVBInfo->P3c4, 0x09, data);
}
}
if (HwDeviceExtension->jChipType == XG21)
XGI_SetXG21FPBits(pVBInfo); /* Fix SR9[7:6] can't read back */
}
static void XGI_SetVCLKState(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2 = 0;
short VCLK;
unsigned char index;
if (ModeNo <= 0x13)
VCLK = 0;
else {
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
index &= IndexMask;
VCLK = pVBInfo->VCLKData[index].CLOCK;
}
data = xgifb_reg_get(pVBInfo->P3c4, 0x32);
data &= 0xf3;
if (VCLK >= 200)
data |= 0x0c; /* VCLK > 200 */
if (HwDeviceExtension->jChipType >= XG20)
data &= ~0x04; /* 2 pixel mode */
xgifb_reg_set(pVBInfo->P3c4, 0x32, data);
if (HwDeviceExtension->jChipType < XG20) {
data = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
data &= 0xE7;
if (VCLK < 200)
data |= 0x10;
xgifb_reg_set(pVBInfo->P3c4, 0x1F, data);
}
/* Jong for Adavantech LCD ripple issue
if ((VCLK >= 0) && (VCLK < 135))
data2 = 0x03;
else if ((VCLK >= 135) && (VCLK < 160))
data2 = 0x02;
else if ((VCLK >= 160) && (VCLK < 260))
data2 = 0x01;
else if (VCLK > 260)
data2 = 0x00;
*/
data2 = 0x00;
xgifb_reg_and_or(pVBInfo->P3c4, 0x07, 0xFC, data2);
if (HwDeviceExtension->jChipType >= XG27)
xgifb_reg_and_or(pVBInfo->P3c4, 0x40, 0xFC, data2 & 0x03);
}
static void XGI_SetCRT1ModeRegs(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2, data3, infoflag = 0, modeflag, resindex,
xres;
if (ModeNo > 0x13) {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
infoflag = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_InfoFlag;
} else
/* si+St_ModeFlag */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
if (xgifb_reg_get(pVBInfo->P3d4, 0x31) & 0x01)
xgifb_reg_and_or(pVBInfo->P3c4, 0x1F, 0x3F, 0x00);
if (ModeNo > 0x13)
data = infoflag;
else
data = 0;
data2 = 0;
if (ModeNo > 0x13) {
if (pVBInfo->ModeType > 0x02) {
data2 |= 0x02;
data3 = pVBInfo->ModeType - ModeVGA;
data3 = data3 << 2;
data2 |= data3;
}
}
data &= InterlaceMode;
if (data)
data2 |= 0x20;
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x3F, data2);
/* xgifb_reg_set(pVBInfo->P3c4,0x06,data2); */
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13)
xres = pVBInfo->StResInfo[resindex].HTotal;
else
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
data = 0x0000;
if (infoflag & InterlaceMode) {
if (xres == 1024)
data = 0x0035;
else if (xres == 1280)
data = 0x0048;
}
data2 = data & 0x00FF;
xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFF, data2);
data2 = (data & 0xFF00) >> 8;
xgifb_reg_and_or(pVBInfo->P3d4, 0x19, 0xFC, data2);
if (modeflag & HalfDCLK)
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xF7, 0x08);
data2 = 0;
if (modeflag & LineCompareOff)
data2 |= 0x08;
if (ModeNo > 0x13) {
if (pVBInfo->ModeType == ModeEGA)
data2 |= 0x40;
}
xgifb_reg_and_or(pVBInfo->P3c4, 0x0F, ~0x48, data2);
data = 0x60;
if (pVBInfo->ModeType != ModeText) {
data = data ^ 0x60;
if (pVBInfo->ModeType != ModeEGA)
data = data ^ 0xA0;
}
xgifb_reg_and_or(pVBInfo->P3c4, 0x21, 0x1F, data);
XGI_SetVCLKState(HwDeviceExtension, ModeNo, RefreshRateTableIndex,
pVBInfo);
/* if (modeflag&HalfDCLK) //030305 fix lowresolution bug */
/* if (XGINew_IF_DEF_NEW_LOWRES) */
/* XGI_VesaLowResolution(ModeNo, ModeIdIndex);
* //030305 fix lowresolution bug */
data = xgifb_reg_get(pVBInfo->P3d4, 0x31);
if (HwDeviceExtension->jChipType == XG27) {
if (data & 0x40)
data = 0x2c;
else
data = 0x6c;
xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
xgifb_reg_or(pVBInfo->P3d4, 0x51, 0x10);
} else if (HwDeviceExtension->jChipType >= XG20) {
if (data & 0x40)
data = 0x33;
else
data = 0x73;
xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
xgifb_reg_set(pVBInfo->P3d4, 0x51, 0x02);
} else {
if (data & 0x40)
data = 0x2c;
else
data = 0x6c;
xgifb_reg_set(pVBInfo->P3d4, 0x52, data);
}
}
/*
void XGI_VesaLowResolution(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short modeflag;
if (ModeNo > 0x13)
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
else
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
if (ModeNo > 0x13) {
if (modeflag & DoubleScanMode) {
if (modeflag & HalfDCLK) {
if (pVBInfo->VBType & VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C)) {
if (!(pVBInfo->VBInfo &
SetCRT2ToRAMDAC)) {
if (pVBInfo->VBInfo &
SetInSlaveMode) {
xgifb_reg_and_or(
pVBInfo->P3c4,
0x01,
0xf7,
0x00);
xgifb_reg_and_or(
pVBInfo->P3c4,
0x0f,
0x7f,
0x00);
return;
}
}
}
xgifb_reg_and_or(pVBInfo->P3c4,
0x0f,
0xff,
0x80);
xgifb_reg_and_or(pVBInfo->P3c4,
0x01,
0xf7,
0x00);
return;
}
}
}
xgifb_reg_and_or(pVBInfo->P3c4, 0x0f, 0x7f, 0x00);
}
*/
static void XGI_WriteDAC(unsigned short dl,
unsigned short ah,
unsigned short al,
unsigned short dh,
struct vb_device_info *pVBInfo)
{
unsigned short temp, bh, bl;
bh = ah;
bl = al;
if (dl != 0) {
temp = bh;
bh = dh;
dh = temp;
if (dl == 1) {
temp = bl;
bl = dh;
dh = temp;
} else {
temp = bl;
bl = bh;
bh = temp;
}
}
outb((unsigned short) dh, pVBInfo->P3c9);
outb((unsigned short) bh, pVBInfo->P3c9);
outb((unsigned short) bl, pVBInfo->P3c9);
}
static void XGI_LoadDAC(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data, data2, time, i, j, k, m, n, o, si, di, bx, dl, al,
ah, dh, *table = NULL;
if (ModeNo <= 0x13)
data = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
data = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
data &= DACInfoFlag;
time = 64;
if (data == 0x00)
table = XGINew_MDA_DAC;
else if (data == 0x08)
table = XGINew_CGA_DAC;
else if (data == 0x10)
table = XGINew_EGA_DAC;
else if (data == 0x18) {
time = 256;
table = XGINew_VGA_DAC;
}
if (time == 256)
j = 16;
else
j = time;
outb(0xFF, pVBInfo->P3c6);
outb(0x00, pVBInfo->P3c8);
for (i = 0; i < j; i++) {
data = table[i];
for (k = 0; k < 3; k++) {
data2 = 0;
if (data & 0x01)
data2 = 0x2A;
if (data & 0x02)
data2 += 0x15;
outb(data2, pVBInfo->P3c9);
data = data >> 2;
}
}
if (time == 256) {
for (i = 16; i < 32; i++) {
data = table[i];
for (k = 0; k < 3; k++)
outb(data, pVBInfo->P3c9);
}
si = 32;
for (m = 0; m < 9; m++) {
di = si;
bx = si + 0x04;
dl = 0;
for (n = 0; n < 3; n++) {
for (o = 0; o < 5; o++) {
dh = table[si];
ah = table[di];
al = table[bx];
si++;
XGI_WriteDAC(dl, ah, al, dh, pVBInfo);
}
si -= 2;
for (o = 0; o < 3; o++) {
dh = table[bx];
ah = table[di];
al = table[si];
si--;
XGI_WriteDAC(dl, ah, al, dh, pVBInfo);
}
dl++;
}
si += 5;
}
}
}
static void XGI_GetLVDSResInfo(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short resindex, xres, yres, modeflag;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
else
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
/* if (ModeNo > 0x13) */
/* modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag; */
/* else */
/* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag; */
if (ModeNo <= 0x13)
/* si+St_ResInfo */
resindex = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
else
/* si+Ext_ResInfo */
resindex = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
/* resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo); */
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal;
yres = pVBInfo->ModeResInfo[resindex].VTotal;
}
if (ModeNo > 0x13) {
if (modeflag & HalfDCLK)
xres = xres << 1;
if (modeflag & DoubleScanMode)
yres = yres << 1;
}
/* if (modeflag & Charx8Dot) */
/* { */
if (xres == 720)
xres = 640;
/* } */
pVBInfo->VGAHDE = xres;
pVBInfo->HDE = xres;
pVBInfo->VGAVDE = yres;
pVBInfo->VDE = yres;
}
static void *XGI_GetLcdPtr(unsigned short BX, unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempcx, tempbx, tempal, modeflag, table;
struct XGI330_LCDDataTablStruct *tempdi = NULL;
tempbx = BX;
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
tempal = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
tempal = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
}
tempal = tempal & 0x0f;
if (tempbx <= 1) { /* ExpLink */
if (ModeNo <= 0x13) {
/* find no Ext_CRT2CRTC2 */
tempal = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
} else {
tempal = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT2CRTC;
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
if (ModeNo <= 0x13)
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC2;
else
tempal = pVBInfo->RefIndex[
RefreshRateTableIndex].
Ext_CRT2CRTC2;
}
if (tempbx & 0x01)
tempal = (tempal >> 4);
tempal = (tempal & 0x0f);
}
tempcx = LCDLenList[tempbx]; /* mov cl,byte ptr cs:LCDLenList[bx] */
if (pVBInfo->LCDInfo & EnableScalingLCD) { /* ScaleLCD */
if ((tempbx == 5) || (tempbx) == 7)
tempcx = LCDDesDataLen2;
else if ((tempbx == 3) || (tempbx == 8))
tempcx = LVDSDesDataLen2;
}
/* mov di, word ptr cs:LCDDataList[bx] */
/* tempdi = pVideoMemory[LCDDataList + tempbx * 2] |
(pVideoMemory[LCDDataList + tempbx * 2 + 1] << 8); */
switch (tempbx) {
case 0:
tempdi = XGI_EPLLCDCRT1Ptr_H;
break;
case 1:
tempdi = XGI_EPLLCDCRT1Ptr_V;
break;
case 2:
tempdi = XGI_EPLLCDDataPtr;
break;
case 3:
tempdi = XGI_EPLLCDDesDataPtr;
break;
case 4:
tempdi = XGI_LCDDataTable;
break;
case 5:
tempdi = XGI_LCDDesDataTable;
break;
case 6:
tempdi = XGI_EPLCHLCDRegPtr;
break;
case 7:
case 8:
case 9:
tempdi = NULL;
break;
default:
break;
}
if (tempdi == NULL) /* OEMUtil */
return NULL;
table = tempbx;
i = 0;
while (tempdi[i].PANELID != 0xff) {
tempdx = pVBInfo->LCDResInfo;
if (tempbx & 0x0080) { /* OEMUtil */
tempbx &= (~0x0080);
tempdx = pVBInfo->LCDTypeInfo;
}
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempdx &= (~PanelResInfo);
if (tempdi[i].PANELID == tempdx) {
tempbx = tempdi[i].MASK;
tempdx = pVBInfo->LCDInfo;
if (ModeNo <= 0x13) /* alan 09/10/2003 */
tempdx |= SetLCDStdMode;
if (modeflag & HalfDCLK)
tempdx |= SetLCDLowResolution;
tempbx &= tempdx;
if (tempbx == tempdi[i].CAP)
break;
}
i++;
}
if (table == 0) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_LVDSCRT11024x768_1_H[tempal];
break;
case 1:
return &XGI_LVDSCRT11024x768_2_H[tempal];
break;
case 2:
return &XGI_LVDSCRT11280x1024_1_H[tempal];
break;
case 3:
return &XGI_LVDSCRT11280x1024_2_H[tempal];
break;
case 4:
return &XGI_LVDSCRT11400x1050_1_H[tempal];
break;
case 5:
return &XGI_LVDSCRT11400x1050_2_H[tempal];
break;
case 6:
return &XGI_LVDSCRT11600x1200_1_H[tempal];
break;
case 7:
return &XGI_LVDSCRT11024x768_1_Hx75[tempal];
break;
case 8:
return &XGI_LVDSCRT11024x768_2_Hx75[tempal];
break;
case 9:
return &XGI_LVDSCRT11280x1024_1_Hx75[tempal];
break;
case 10:
return &XGI_LVDSCRT11280x1024_2_Hx75[tempal];
break;
default:
break;
}
} else if (table == 1) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_LVDSCRT11024x768_1_V[tempal];
break;
case 1:
return &XGI_LVDSCRT11024x768_2_V[tempal];
break;
case 2:
return &XGI_LVDSCRT11280x1024_1_V[tempal];
break;
case 3:
return &XGI_LVDSCRT11280x1024_2_V[tempal];
break;
case 4:
return &XGI_LVDSCRT11400x1050_1_V[tempal];
break;
case 5:
return &XGI_LVDSCRT11400x1050_2_V[tempal];
break;
case 6:
return &XGI_LVDSCRT11600x1200_1_V[tempal];
break;
case 7:
return &XGI_LVDSCRT11024x768_1_Vx75[tempal];
break;
case 8:
return &XGI_LVDSCRT11024x768_2_Vx75[tempal];
break;
case 9:
return &XGI_LVDSCRT11280x1024_1_Vx75[tempal];
break;
case 10:
return &XGI_LVDSCRT11280x1024_2_Vx75[tempal];
break;
default:
break;
}
} else if (table == 2) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_LVDS1024x768Data_1[tempal];
break;
case 1:
return &XGI_LVDS1024x768Data_2[tempal];
break;
case 2:
return &XGI_LVDS1280x1024Data_1[tempal];
break;
case 3:
return &XGI_LVDS1280x1024Data_2[tempal];
break;
case 4:
return &XGI_LVDS1400x1050Data_1[tempal];
break;
case 5:
return &XGI_LVDS1400x1050Data_2[tempal];
break;
case 6:
return &XGI_LVDS1600x1200Data_1[tempal];
break;
case 7:
return &XGI_LVDSNoScalingData[tempal];
break;
case 8:
return &XGI_LVDS1024x768Data_1x75[tempal];
break;
case 9:
return &XGI_LVDS1024x768Data_2x75[tempal];
break;
case 10:
return &XGI_LVDS1280x1024Data_1x75[tempal];
break;
case 11:
return &XGI_LVDS1280x1024Data_2x75[tempal];
break;
case 12:
return &XGI_LVDSNoScalingDatax75[tempal];
break;
default:
break;
}
} else if (table == 3) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_LVDS1024x768Des_1[tempal];
break;
case 1:
return &XGI_LVDS1024x768Des_3[tempal];
break;
case 2:
return &XGI_LVDS1024x768Des_2[tempal];
break;
case 3:
return &XGI_LVDS1280x1024Des_1[tempal];
break;
case 4:
return &XGI_LVDS1280x1024Des_2[tempal];
break;
case 5:
return &XGI_LVDS1400x1050Des_1[tempal];
break;
case 6:
return &XGI_LVDS1400x1050Des_2[tempal];
break;
case 7:
return &XGI_LVDS1600x1200Des_1[tempal];
break;
case 8:
return &XGI_LVDSNoScalingDesData[tempal];
break;
case 9:
return &XGI_LVDS1024x768Des_1x75[tempal];
break;
case 10:
return &XGI_LVDS1024x768Des_3x75[tempal];
break;
case 11:
return &XGI_LVDS1024x768Des_2x75[tempal];
break;
case 12:
return &XGI_LVDS1280x1024Des_1x75[tempal];
break;
case 13:
return &XGI_LVDS1280x1024Des_2x75[tempal];
break;
case 14:
return &XGI_LVDSNoScalingDesDatax75[tempal];
break;
default:
break;
}
} else if (table == 4) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtLCD1024x768Data[tempal];
break;
case 1:
return &XGI_StLCD1024x768Data[tempal];
break;
case 2:
return &XGI_CetLCD1024x768Data[tempal];
break;
case 3:
return &XGI_ExtLCD1280x1024Data[tempal];
break;
case 4:
return &XGI_StLCD1280x1024Data[tempal];
break;
case 5:
return &XGI_CetLCD1280x1024Data[tempal];
break;
case 6:
return &XGI_ExtLCD1400x1050Data[tempal];
break;
case 7:
return &XGI_StLCD1400x1050Data[tempal];
break;
case 8:
return &XGI_CetLCD1400x1050Data[tempal];
break;
case 9:
return &XGI_ExtLCD1600x1200Data[tempal];
break;
case 10:
return &XGI_StLCD1600x1200Data[tempal];
break;
case 11:
return &XGI_NoScalingData[tempal];
break;
case 12:
return &XGI_ExtLCD1024x768x75Data[tempal];
break;
case 13:
return &XGI_ExtLCD1024x768x75Data[tempal];
break;
case 14:
return &XGI_CetLCD1024x768x75Data[tempal];
break;
case 15:
return &XGI_ExtLCD1280x1024x75Data[tempal];
break;
case 16:
return &XGI_StLCD1280x1024x75Data[tempal];
break;
case 17:
return &XGI_CetLCD1280x1024x75Data[tempal];
break;
case 18:
return &XGI_NoScalingDatax75[tempal];
break;
default:
break;
}
} else if (table == 5) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtLCDDes1024x768Data[tempal];
break;
case 1:
return &XGI_StLCDDes1024x768Data[tempal];
break;
case 2:
return &XGI_CetLCDDes1024x768Data[tempal];
break;
case 3:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_ExtLCDDLDes1280x1024Data[tempal];
else
return &XGI_ExtLCDDes1280x1024Data[tempal];
break;
case 4:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_StLCDDLDes1280x1024Data[tempal];
else
return &XGI_StLCDDes1280x1024Data[tempal];
break;
case 5:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_CetLCDDLDes1280x1024Data[tempal];
else
return &XGI_CetLCDDes1280x1024Data[tempal];
break;
case 6:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_ExtLCDDLDes1400x1050Data[tempal];
else
return &XGI_ExtLCDDes1400x1050Data[tempal];
break;
case 7:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_StLCDDLDes1400x1050Data[tempal];
else
return &XGI_StLCDDes1400x1050Data[tempal];
break;
case 8:
return &XGI_CetLCDDes1400x1050Data[tempal];
break;
case 9:
return &XGI_CetLCDDes1400x1050Data2[tempal];
break;
case 10:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_ExtLCDDLDes1600x1200Data[tempal];
else
return &XGI_ExtLCDDes1600x1200Data[tempal];
break;
case 11:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_StLCDDLDes1600x1200Data[tempal];
else
return &XGI_StLCDDes1600x1200Data[tempal];
break;
case 12:
return &XGI_NoScalingDesData[tempal];
break;
case 13:
return &XGI_ExtLCDDes1024x768x75Data[tempal];
break;
case 14:
return &XGI_StLCDDes1024x768x75Data[tempal];
break;
case 15:
return &XGI_CetLCDDes1024x768x75Data[tempal];
break;
case 16:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_ExtLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_ExtLCDDes1280x1024x75Data[tempal];
break;
case 17:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_StLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_StLCDDes1280x1024x75Data[tempal];
break;
case 18:
if ((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV))
return &XGI_CetLCDDLDes1280x1024x75Data[tempal];
else
return &XGI_CetLCDDes1280x1024x75Data[tempal];
break;
case 19:
return &XGI_NoScalingDesDatax75[tempal];
break;
default:
break;
}
} else if (table == 6) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_CH7017LV1024x768[tempal];
break;
case 1:
return &XGI_CH7017LV1400x1050[tempal];
break;
default:
break;
}
}
return NULL;
}
static void *XGI_GetTVPtr(unsigned short BX, unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i, tempdx, tempbx, tempal, modeflag, table;
struct XGI330_TVDataTablStruct *tempdi = NULL;
tempbx = BX;
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
tempal = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
tempal = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
}
tempal = tempal & 0x3f;
table = tempbx;
switch (tempbx) {
case 0:
tempdi = NULL; /*EPLCHTVCRT1Ptr_H;*/
if (pVBInfo->IF_DEF_CH7007 == 1)
tempdi = XGI_EPLCHTVCRT1Ptr;
break;
case 1:
tempdi = NULL; /*EPLCHTVCRT1Ptr_V;*/
if (pVBInfo->IF_DEF_CH7007 == 1)
tempdi = XGI_EPLCHTVCRT1Ptr;
break;
case 2:
tempdi = XGI_EPLCHTVDataPtr;
break;
case 3:
tempdi = NULL;
break;
case 4:
tempdi = XGI_TVDataTable;
break;
case 5:
tempdi = NULL;
break;
case 6:
tempdi = XGI_EPLCHTVRegPtr;
break;
default:
break;
}
if (tempdi == NULL) /* OEMUtil */
return NULL;
tempdx = pVBInfo->TVInfo;
if (pVBInfo->VBInfo & SetInSlaveMode)
tempdx = tempdx | SetTVLockMode;
if (modeflag & HalfDCLK)
tempdx = tempdx | SetTVLowResolution;
i = 0;
while (tempdi[i].MASK != 0xffff) {
if ((tempdx & tempdi[i].MASK) == tempdi[i].CAP)
break;
i++;
}
/* 07/05/22 */
if (table == 0x00) {
} else if (table == 0x01) {
} else if (table == 0x04) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_ExtPALData[tempal];
break;
case 1:
return &XGI_ExtNTSCData[tempal];
break;
case 2:
return &XGI_StPALData[tempal];
break;
case 3:
return &XGI_StNTSCData[tempal];
break;
case 4:
return &XGI_ExtHiTVData[tempal];
break;
case 5:
return &XGI_St2HiTVData[tempal];
break;
case 6:
return &XGI_ExtYPbPr525iData[tempal];
break;
case 7:
return &XGI_ExtYPbPr525pData[tempal];
break;
case 8:
return &XGI_ExtYPbPr750pData[tempal];
break;
case 9:
return &XGI_StYPbPr525iData[tempal];
break;
case 10:
return &XGI_StYPbPr525pData[tempal];
break;
case 11:
return &XGI_StYPbPr750pData[tempal];
break;
case 12: /* avoid system hang */
return &XGI_ExtNTSCData[tempal];
break;
case 13:
return &XGI_St1HiTVData[tempal];
break;
default:
break;
}
} else if (table == 0x02) {
switch (tempdi[i].DATAPTR) {
case 0:
return &XGI_CHTVUNTSCData[tempal];
break;
case 1:
return &XGI_CHTVONTSCData[tempal];
break;
case 2:
return &XGI_CHTVUPALData[tempal];
break;
case 3:
return &XGI_CHTVOPALData[tempal];
break;
default:
break;
}
} else if (table == 0x06) {
}
return NULL;
}
static void XGI_GetLVDSData(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
struct XGI330_LVDSDataStruct *LCDPtr = NULL;
struct XGI330_CHTVDataStruct *TVPtr = NULL;
tempbx = 2;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
LCDPtr = (struct XGI330_LVDSDataStruct *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
pVBInfo->VGAHT = LCDPtr->VGAHT;
pVBInfo->VGAVT = LCDPtr->VGAVT;
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
}
if (pVBInfo->IF_DEF_CH7017 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV) {
TVPtr = (struct XGI330_CHTVDataStruct *) XGI_GetTVPtr(
tempbx, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
pVBInfo->VGAHT = TVPtr->VGAHT;
pVBInfo->VGAVT = TVPtr->VGAVT;
pVBInfo->HT = TVPtr->LCDHT;
pVBInfo->VT = TVPtr->LCDVT;
}
}
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
if (!(pVBInfo->LCDInfo & (SetLCDtoNonExpanding
| EnableScalingLCD))) {
if ((pVBInfo->LCDResInfo == Panel1024x768) ||
(pVBInfo->LCDResInfo == Panel1024x768x75)) {
pVBInfo->HDE = 1024;
pVBInfo->VDE = 768;
} else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
(pVBInfo->LCDResInfo == Panel1280x1024x75)) {
pVBInfo->HDE = 1280;
pVBInfo->VDE = 1024;
} else if (pVBInfo->LCDResInfo == Panel1400x1050) {
pVBInfo->HDE = 1400;
pVBInfo->VDE = 1050;
} else {
pVBInfo->HDE = 1600;
pVBInfo->VDE = 1200;
}
}
}
}
static void XGI_ModCRT1Regs(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned char index;
unsigned short tempbx, i;
struct XGI_LVDSCRT1HDataStruct *LCDPtr = NULL;
struct XGI_LVDSCRT1VDataStruct *LCDPtr1 = NULL;
/* struct XGI330_CHTVDataStruct *TVPtr = NULL; */
struct XGI_CH7007TV_TimingHStruct *CH7007TV_TimingHPtr = NULL;
struct XGI_CH7007TV_TimingVStruct *CH7007TV_TimingVPtr = NULL;
if (ModeNo <= 0x13)
index = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
else
index = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRT2CRTC;
index = index & IndexMask;
if ((pVBInfo->IF_DEF_ScaleLCD == 0) ||
((pVBInfo->IF_DEF_ScaleLCD == 1) &&
(!(pVBInfo->LCDInfo & EnableScalingLCD)))) {
tempbx = 0;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
LCDPtr = (struct XGI_LVDSCRT1HDataStruct *)
XGI_GetLcdPtr(tempbx, ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
for (i = 0; i < 8; i++)
pVBInfo->TimingH[0].data[i] = LCDPtr[0].Reg[i];
}
if (pVBInfo->IF_DEF_CH7007 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV) {
CH7007TV_TimingHPtr =
(struct XGI_CH7007TV_TimingHStruct *)
XGI_GetTVPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
for (i = 0; i < 8; i++)
pVBInfo->TimingH[0].data[i] =
CH7007TV_TimingHPtr[0].data[i];
}
}
/* if (pVBInfo->IF_DEF_CH7017 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV)
TVPtr = (struct XGI330_CHTVDataStruct *)
XGI_GetTVPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
}
*/
XGI_SetCRT1Timing_H(pVBInfo, HwDeviceExtension);
if (pVBInfo->IF_DEF_CH7007 == 1) {
xgifb_reg_set(pVBInfo->P3c4, 0x2E,
CH7007TV_TimingHPtr[0].data[8]);
xgifb_reg_set(pVBInfo->P3c4, 0x2F,
CH7007TV_TimingHPtr[0].data[9]);
}
tempbx = 1;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
LCDPtr1 = (struct XGI_LVDSCRT1VDataStruct *)
XGI_GetLcdPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
for (i = 0; i < 7; i++)
pVBInfo->TimingV[0].data[i] = LCDPtr1[0].Reg[i];
}
if (pVBInfo->IF_DEF_CH7007 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV) {
CH7007TV_TimingVPtr =
(struct XGI_CH7007TV_TimingVStruct *)
XGI_GetTVPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
for (i = 0; i < 7; i++)
pVBInfo->TimingV[0].data[i] =
CH7007TV_TimingVPtr[0].data[i];
}
}
/* if (pVBInfo->IF_DEF_CH7017 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV)
TVPtr = (struct XGI330_CHTVDataStruct *)
XGI_GetTVPtr(tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
}
*/
XGI_SetCRT1Timing_V(ModeIdIndex, ModeNo, pVBInfo);
if (pVBInfo->IF_DEF_CH7007 == 1) {
xgifb_reg_and_or(pVBInfo->P3c4, 0x33, ~0x01,
CH7007TV_TimingVPtr[0].data[7] & 0x01);
xgifb_reg_set(pVBInfo->P3c4, 0x34,
CH7007TV_TimingVPtr[0].data[8]);
xgifb_reg_set(pVBInfo->P3c4, 0x3F,
CH7007TV_TimingVPtr[0].data[9]);
}
}
}
static unsigned short XGI_GetLCDCapPtr(struct vb_device_info *pVBInfo)
{
unsigned char tempal, tempah, tempbl, i;
tempah = xgifb_reg_get(pVBInfo->P3d4, 0x36);
tempal = tempah & 0x0F;
tempah = tempah & 0xF0;
i = 0;
tempbl = pVBInfo->LCDCapList[i].LCD_ID;
while (tempbl != 0xFF) {
if (tempbl & 0x80) { /* OEMUtil */
tempal = tempah;
tempbl = tempbl & ~(0x80);
}
if (tempal == tempbl)
break;
i++;
tempbl = pVBInfo->LCDCapList[i].LCD_ID;
}
return i;
}
static unsigned short XGI_GetLCDCapPtr1(struct vb_device_info *pVBInfo)
{
unsigned short tempah, tempal, tempbl, i;
tempal = pVBInfo->LCDResInfo;
tempah = pVBInfo->LCDTypeInfo;
i = 0;
tempbl = pVBInfo->LCDCapList[i].LCD_ID;
while (tempbl != 0xFF) {
if ((tempbl & 0x80) && (tempbl != 0x80)) {
tempal = tempah;
tempbl &= ~0x80;
}
if (tempal == tempbl)
break;
i++;
tempbl = pVBInfo->LCDCapList[i].LCD_ID;
}
if (tempbl == 0xFF) {
pVBInfo->LCDResInfo = Panel1024x768;
pVBInfo->LCDTypeInfo = 0;
i = 0;
}
return i;
}
static void XGI_GetLCDSync(unsigned short *HSyncWidth,
unsigned short *VSyncWidth,
struct vb_device_info *pVBInfo)
{
unsigned short Index;
Index = XGI_GetLCDCapPtr(pVBInfo);
*HSyncWidth = pVBInfo->LCDCapList[Index].LCD_HSyncWidth;
*VSyncWidth = pVBInfo->LCDCapList[Index].LCD_VSyncWidth;
return;
}
static void XGI_SetLVDSRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx, tempax, tempcx, tempdx, push1, push2, modeflag;
unsigned long temp, temp1, temp2, temp3, push3;
struct XGI330_LCDDataDesStruct *LCDPtr = NULL;
struct XGI330_LCDDataDesStruct2 *LCDPtr1 = NULL;
if (ModeNo > 0x13)
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
else
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
if (!(pVBInfo->SetFlag & Win9xDOSMode)) {
if ((pVBInfo->IF_DEF_CH7017 == 0) || (pVBInfo->VBInfo
& (SetCRT2ToLCD | SetCRT2ToLCDA))) {
if (pVBInfo->IF_DEF_OEMUtil == 1) {
tempbx = 8;
LCDPtr = (struct XGI330_LCDDataDesStruct *)
XGI_GetLcdPtr(tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
}
if ((pVBInfo->IF_DEF_OEMUtil == 0) ||
(LCDPtr == NULL)) {
tempbx = 3;
if (pVBInfo->LCDInfo & EnableScalingLCD)
LCDPtr1 =
(struct XGI330_LCDDataDesStruct2 *)
XGI_GetLcdPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
else
LCDPtr =
(struct XGI330_LCDDataDesStruct *)
XGI_GetLcdPtr(
tempbx,
ModeNo,
ModeIdIndex,
RefreshRateTableIndex,
pVBInfo);
}
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
push1 = tempbx;
push2 = tempax;
/* GetLCDResInfo */
if ((pVBInfo->LCDResInfo == Panel1024x768) ||
(pVBInfo->LCDResInfo == Panel1024x768x75)) {
tempax = 1024;
tempbx = 768;
} else if ((pVBInfo->LCDResInfo == Panel1280x1024) ||
(pVBInfo->LCDResInfo == Panel1280x1024x75)) {
tempax = 1280;
tempbx = 1024;
} else if (pVBInfo->LCDResInfo == Panel1400x1050) {
tempax = 1400;
tempbx = 1050;
} else {
tempax = 1600;
tempbx = 1200;
}
if (pVBInfo->LCDInfo & SetLCDtoNonExpanding) {
pVBInfo->HDE = tempax;
pVBInfo->VDE = tempbx;
pVBInfo->VGAHDE = tempax;
pVBInfo->VGAVDE = tempbx;
}
if ((pVBInfo->IF_DEF_ScaleLCD == 1) &&
(pVBInfo->LCDInfo & EnableScalingLCD)) {
tempax = pVBInfo->HDE;
tempbx = pVBInfo->VDE;
}
tempax = pVBInfo->HT;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempbx = LCDPtr1->LCDHDES;
else
tempbx = LCDPtr->LCDHDES;
tempcx = pVBInfo->HDE;
tempbx = tempbx & 0x0fff;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
xgifb_reg_set(pVBInfo->Part1Port, 0x1A, tempbx & 0x07);
tempcx = tempcx >> 3;
tempbx = tempbx >> 3;
xgifb_reg_set(pVBInfo->Part1Port, 0x16,
(unsigned short) (tempbx & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x17,
(unsigned short) (tempcx & 0xff));
tempax = pVBInfo->HT;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempbx = LCDPtr1->LCDHRS;
else
tempbx = LCDPtr->LCDHRS;
tempcx = push2;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempcx = LCDPtr1->LCDHSync;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
tempax = tempbx & 0x07;
tempax = tempax >> 5;
tempcx = tempcx >> 3;
tempbx = tempbx >> 3;
tempcx &= 0x1f;
tempax |= tempcx;
xgifb_reg_set(pVBInfo->Part1Port, 0x15, tempax);
xgifb_reg_set(pVBInfo->Part1Port, 0x14,
(unsigned short) (tempbx & 0xff));
tempax = pVBInfo->VT;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempbx = LCDPtr1->LCDVDES;
else
tempbx = LCDPtr->LCDVDES;
tempcx = pVBInfo->VDE;
tempbx = tempbx & 0x0fff;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
xgifb_reg_set(pVBInfo->Part1Port, 0x1b,
(unsigned short) (tempbx & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x1c,
(unsigned short) (tempcx & 0xff));
tempbx = (tempbx >> 8) & 0x07;
tempcx = (tempcx >> 8) & 0x07;
xgifb_reg_set(pVBInfo->Part1Port, 0x1d,
(unsigned short) ((tempcx << 3)
| tempbx));
tempax = pVBInfo->VT;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempbx = LCDPtr1->LCDVRS;
else
tempbx = LCDPtr->LCDVRS;
/* tempbx = tempbx >> 4; */
tempcx = push1;
if (pVBInfo->LCDInfo & EnableScalingLCD)
tempcx = LCDPtr1->LCDVSync;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
xgifb_reg_set(pVBInfo->Part1Port, 0x18,
(unsigned short) (tempbx & 0xff));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, ~0x0f,
(unsigned short) (tempcx & 0x0f));
tempax = ((tempbx >> 8) & 0x07) << 3;
tempbx = pVBInfo->VGAVDE;
if (tempbx != pVBInfo->VDE)
tempax |= 0x40;
if (pVBInfo->LCDInfo & EnableLVDSDDA)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1a, 0x07,
tempax);
tempcx = pVBInfo->VGAVT;
tempbx = pVBInfo->VDE;
tempax = pVBInfo->VGAVDE;
tempcx -= tempax;
temp = tempax; /* 0430 ylshieh */
temp1 = (temp << 18) / tempbx;
tempdx = (unsigned short) ((temp << 18) % tempbx);
if (tempdx != 0)
temp1 += 1;
temp2 = temp1;
push3 = temp2;
xgifb_reg_set(pVBInfo->Part1Port, 0x37,
(unsigned short) (temp2 & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x36,
(unsigned short) ((temp2 >> 8) & 0xff));
tempbx = (unsigned short) (temp2 >> 16);
tempax = tempbx & 0x03;
tempbx = pVBInfo->VGAVDE;
if (tempbx == pVBInfo->VDE)
tempax |= 0x04;
xgifb_reg_set(pVBInfo->Part1Port, 0x35, tempax);
if (pVBInfo->VBType & VB_XGI301C) {
temp2 = push3;
xgifb_reg_set(pVBInfo->Part4Port,
0x3c,
(unsigned short) (temp2 & 0xff));
xgifb_reg_set(pVBInfo->Part4Port,
0x3b,
(unsigned short) ((temp2 >> 8) &
0xff));
tempbx = (unsigned short) (temp2 >> 16);
xgifb_reg_and_or(pVBInfo->Part4Port, 0x3a,
~0xc0,
(unsigned short) ((tempbx &
0xff) << 6));
tempcx = pVBInfo->VGAVDE;
if (tempcx == pVBInfo->VDE)
xgifb_reg_and_or(pVBInfo->Part4Port,
0x30, ~0x0c, 0x00);
else
xgifb_reg_and_or(pVBInfo->Part4Port,
0x30, ~0x0c, 0x08);
}
tempcx = pVBInfo->VGAHDE;
tempbx = pVBInfo->HDE;
temp1 = tempcx << 16;
tempax = (unsigned short) (temp1 / tempbx);
if ((tempbx & 0xffff) == (tempcx & 0xffff))
tempax = 65535;
temp3 = tempax;
temp1 = pVBInfo->VGAHDE << 16;
temp1 /= temp3;
temp3 = temp3 << 16;
temp1 -= 1;
temp3 = (temp3 & 0xffff0000) + (temp1 & 0xffff);
tempax = (unsigned short) (temp3 & 0xff);
xgifb_reg_set(pVBInfo->Part1Port, 0x1f, tempax);
temp1 = pVBInfo->VGAVDE << 18;
temp1 = temp1 / push3;
tempbx = (unsigned short) (temp1 & 0xffff);
if (pVBInfo->LCDResInfo == Panel1024x768)
tempbx -= 1;
tempax = ((tempbx >> 8) & 0xff) << 3;
tempax |= (unsigned short) ((temp3 >> 8) & 0x07);
xgifb_reg_set(pVBInfo->Part1Port, 0x20,
(unsigned short) (tempax & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x21,
(unsigned short) (tempbx & 0xff));
temp3 = temp3 >> 16;
if (modeflag & HalfDCLK)
temp3 = temp3 >> 1;
xgifb_reg_set(pVBInfo->Part1Port, 0x22,
(unsigned short) ((temp3 >> 8) & 0xff));
xgifb_reg_set(pVBInfo->Part1Port, 0x23,
(unsigned short) (temp3 & 0xff));
}
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_GETLCDVCLKPtr */
/* Input : */
/* Output : al -> VCLK Index */
/* Description : */
/* --------------------------------------------------------------------- */
static void XGI_GetLCDVCLKPtr(unsigned char *di_0, unsigned char *di_1,
struct vb_device_info *pVBInfo)
{
unsigned short index;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_ScaleLCD == 1) {
if (pVBInfo->LCDInfo & EnableScalingLCD)
return;
}
/* index = XGI_GetLCDCapPtr(pVBInfo); */
index = XGI_GetLCDCapPtr1(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCD) { /* LCDB */
*di_0 = pVBInfo->LCDCapList[index].LCUCHAR_VCLKData1;
*di_1 = pVBInfo->LCDCapList[index].LCUCHAR_VCLKData2;
} else { /* LCDA */
*di_0 = pVBInfo->LCDCapList[index].LCDA_VCLKData1;
*di_1 = pVBInfo->LCDCapList[index].LCDA_VCLKData2;
}
}
return;
}
static unsigned char XGI_GetVCLKPtr(unsigned short RefreshRateTableIndex,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short index, modeflag;
unsigned short tempbx;
unsigned char tempal;
unsigned char *CHTVVCLKPtr = NULL;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
if ((pVBInfo->SetFlag & ProgrammingCRT2) &&
(!(pVBInfo->LCDInfo & EnableScalingLCD))) { /* {LCDA/LCDB} */
index = XGI_GetLCDCapPtr(pVBInfo);
tempal = pVBInfo->LCDCapList[index].LCD_VCLK;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
return tempal;
/* {TV} */
if (pVBInfo->VBType &
(VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
tempal = HiTVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
tempal = HiTVVCLK;
if (pVBInfo->TVInfo & TVSimuMode) {
tempal = HiTVSimuVCLK;
if (!(modeflag & Charx8Dot))
tempal = HiTVTextVCLK;
}
return tempal;
}
if (pVBInfo->TVInfo & SetYPbPrMode750p) {
tempal = YPbPr750pVCLK;
return tempal;
}
if (pVBInfo->TVInfo & SetYPbPrMode525p) {
tempal = YPbPr525pVCLK;
return tempal;
}
tempal = NTSC1024VCLK;
if (!(pVBInfo->TVInfo & NTSC1024x768)) {
tempal = TVVCLKDIV2;
if (!(pVBInfo->TVInfo & RPLLDIV2XO))
tempal = TVVCLK;
}
if (pVBInfo->VBInfo & SetCRT2ToTV)
return tempal;
}
/* else if ((pVBInfo->IF_DEF_CH7017==1) &&
(pVBInfo->VBType&VB_CH7017)) {
if (ModeNo<=0x13)
*tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC;
else
*tempal = pVBInfo->RefIndex[
RefreshRateTableIndex].Ext_CRT2CRTC;
*tempal = *tempal & 0x1F;
tempbx = 0;
if (pVBInfo->TVInfo & SetPALTV)
tempbx = tempbx + 2;
if (pVBInfo->TVInfo & SetCHTVOverScan)
tempbx++;
tempbx = tempbx << 1;
} */
} /* {End of VB} */
if ((pVBInfo->IF_DEF_CH7007 == 1) &&
(pVBInfo->VBType & VB_CH7007)) { /* [Billy] 07/05/08 CH7007 */
/* VideoDebugPrint((
0,
"XGI_GetVCLKPtr: pVBInfo->IF_DEF_CH7007==1\n")); */
if ((pVBInfo->VBInfo & SetCRT2ToTV)) {
if (ModeNo <= 0x13) {
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
St_CRT2CRTC;
} else {
tempal = pVBInfo->RefIndex[
RefreshRateTableIndex].Ext_CRT2CRTC;
}
tempal = tempal & 0x0F;
tempbx = 0;
if (pVBInfo->TVInfo & SetPALTV)
tempbx = tempbx + 2;
if (pVBInfo->TVInfo & SetCHTVOverScan)
tempbx++;
/** tempbx = tempbx << 1; CH7007 ? **/
/* [Billy]07/05/29 CH7007 */
if (pVBInfo->IF_DEF_CH7007 == 1) {
switch (tempbx) {
case 0:
CHTVVCLKPtr = XGI7007_CHTVVCLKUNTSC;
break;
case 1:
CHTVVCLKPtr = XGI7007_CHTVVCLKONTSC;
break;
case 2:
CHTVVCLKPtr = XGI7007_CHTVVCLKUPAL;
break;
case 3:
CHTVVCLKPtr = XGI7007_CHTVVCLKOPAL;
break;
default:
break;
}
}
/* else {
switch(tempbx) {
case 0:
CHTVVCLKPtr = pVBInfo->CHTVVCLKUNTSC;
break;
case 1:
CHTVVCLKPtr = pVBInfo->CHTVVCLKONTSC;
break;
case 2:
CHTVVCLKPtr = pVBInfo->CHTVVCLKUPAL;
break;
case 3:
CHTVVCLKPtr = pVBInfo->CHTVVCLKOPAL;
break;
default:
break;
}
}
*/
tempal = CHTVVCLKPtr[tempal];
return tempal;
}
}
tempal = (unsigned char) inb((pVBInfo->P3ca + 0x02));
tempal = tempal >> 2;
tempal &= 0x03;
/* for Dot8 Scaling LCD */
if ((pVBInfo->LCDInfo & EnableScalingLCD) && (modeflag & Charx8Dot))
tempal = tempal ^ tempal; /* ; set to VCLK25MHz always */
if (ModeNo <= 0x13)
return tempal;
tempal = pVBInfo->RefIndex[RefreshRateTableIndex].Ext_CRTVCLK;
return tempal;
}
static void XGI_GetVCLKLen(unsigned char tempal, unsigned char *di_0,
unsigned char *di_1, struct vb_device_info *pVBInfo)
{
if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 2007/05/16 */
/* VideoDebugPrint((
0,
"XGI_GetVCLKLen: pVBInfo->IF_DEF_CH7007==1\n")); */
*di_0 = (unsigned char) XGI_CH7007VCLKData[tempal].SR2B;
*di_1 = (unsigned char) XGI_CH7007VCLKData[tempal].SR2C;
} else if (pVBInfo->VBType & (VB_XGI301 | VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) && (pVBInfo->SetFlag
& ProgrammingCRT2)) {
*di_0 = (unsigned char) XGI_VBVCLKData[tempal].SR2B;
*di_1 = XGI_VBVCLKData[tempal].SR2C;
}
} else {
*di_0 = XGI_VCLKData[tempal].SR2B;
*di_1 = XGI_VCLKData[tempal].SR2C;
}
}
static void XGI_SetCRT2ECLK(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
int i;
tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
for (i = 0; i < 4; i++) {
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, ~0x30,
(unsigned short) (0x10 * i));
if (pVBInfo->IF_DEF_CH7007 == 1) {
xgifb_reg_set(pVBInfo->P3c4, 0x2b, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2c, di_1);
} else if ((!(pVBInfo->VBInfo & SetCRT2ToLCDA))
&& (!(pVBInfo->VBInfo & SetInSlaveMode))) {
xgifb_reg_set(pVBInfo->P3c4, 0x2e, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2f, di_1);
} else {
xgifb_reg_set(pVBInfo->P3c4, 0x2b, di_0);
xgifb_reg_set(pVBInfo->P3c4, 0x2c, di_1);
}
}
}
static void XGI_UpdateModeInfo(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempcl, tempch, temp, tempbl, tempax;
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempcl = 0;
tempch = 0;
temp = xgifb_reg_get(pVBInfo->P3c4, 0x01);
if (!(temp & 0x20)) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x17);
if (temp & 0x80) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x53);
if (!(temp & 0x40))
tempcl |= ActiveCRT1;
}
}
temp = xgifb_reg_get(pVBInfo->Part1Port, 0x2e);
temp &= 0x0f;
if (!(temp == 0x08)) {
/* Check ChannelA by Part1_13 [2003/10/03] */
tempax = xgifb_reg_get(pVBInfo->Part1Port, 0x13);
if (tempax & 0x04)
tempcl = tempcl | ActiveLCD;
temp &= 0x05;
if (!(tempcl & ActiveLCD))
if (temp == 0x01)
tempcl |= ActiveCRT2;
if (temp == 0x04)
tempcl |= ActiveLCD;
if (temp == 0x05) {
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x00);
if (!(temp & 0x08))
tempch |= ActiveAVideo;
if (!(temp & 0x04))
tempch |= ActiveSVideo;
if (temp & 0x02)
tempch |= ActiveSCART;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (temp & 0x01)
tempch |= ActiveHiTV;
}
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
temp = xgifb_reg_get(
pVBInfo->Part2Port,
0x4d);
if (temp & 0x10)
tempch |= ActiveYPbPr;
}
if (tempch != 0)
tempcl |= ActiveTV;
}
}
temp = xgifb_reg_get(pVBInfo->P3d4, 0x3d);
if (tempcl & ActiveLCD) {
if ((pVBInfo->SetFlag & ReserveTVOption)) {
if (temp & ActiveTV)
tempcl |= ActiveTV;
}
}
temp = tempcl;
tempbl = ~ModeSwitchStatus;
xgifb_reg_and_or(pVBInfo->P3d4, 0x3d, tempbl, temp);
if (!(pVBInfo->SetFlag & ReserveTVOption))
xgifb_reg_set(pVBInfo->P3d4, 0x3e, tempch);
} else {
return;
}
}
void XGI_GetVGAType(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
/*
if ( HwDeviceExtension->jChipType >= XG20 ) {
pVBInfo->Set_VGAType = XG20;
} else {
pVBInfo->Set_VGAType = VGA_XGI340;
}
*/
pVBInfo->Set_VGAType = HwDeviceExtension->jChipType;
}
void XGI_GetVBType(struct vb_device_info *pVBInfo)
{
unsigned short flag, tempbx, tempah;
if (pVBInfo->IF_DEF_CH7007 == 1) {
pVBInfo->VBType = VB_CH7007;
return;
}
if (pVBInfo->IF_DEF_LVDS == 0) {
tempbx = VB_XGI302B;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if (flag != 0x02) {
tempbx = VB_XGI301;
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x01);
if (flag >= 0xB0) {
tempbx = VB_XGI301B;
if (flag >= 0xC0) {
tempbx = VB_XGI301C;
if (flag >= 0xD0) {
tempbx = VB_XGI301LV;
if (flag >= 0xE0) {
tempbx = VB_XGI302LV;
tempah = xgifb_reg_get(
pVBInfo->Part4Port,
0x39);
if (tempah != 0xFF)
tempbx =
VB_XGI301C;
}
}
}
if (tempbx & (VB_XGI301B | VB_XGI302B)) {
flag = xgifb_reg_get(
pVBInfo->Part4Port,
0x23);
if (!(flag & 0x02))
tempbx = tempbx | VB_NoLCD;
}
}
}
pVBInfo->VBType = tempbx;
}
/*
else if (pVBInfo->IF_DEF_CH7017 == 1)
pVBInfo->VBType = VB_CH7017;
else //LVDS
pVBInfo->VBType = VB_LVDS_NS;
*/
}
void XGI_GetVBInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, push, tempbx, temp, modeflag;
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
pVBInfo->SetFlag = 0;
pVBInfo->ModeType = modeflag & ModeInfoFlag;
tempbx = 0;
if (pVBInfo->VBType & 0xFFFF) {
/* Check Display Device */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x30);
tempbx = tempbx | temp;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x31);
push = temp;
push = push << 8;
tempax = temp << 8;
tempbx = tempbx | tempax;
temp = (SetCRT2ToDualEdge | SetCRT2ToYPbPr | SetCRT2ToLCDA
| SetInSlaveMode | DisableCRT2Display);
temp = 0xFFFF ^ temp;
tempbx &= temp;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (pVBInfo->IF_DEF_LCDA == 1) {
if ((pVBInfo->Set_VGAType >= XG20)
|| (pVBInfo->Set_VGAType >= XG40)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
/* if ((pVBInfo->VBType & VB_XGI302B)
|| (pVBInfo->VBType & VB_XGI301LV)
|| (pVBInfo->VBType & VB_XGI302LV)
|| (pVBInfo->VBType & VB_XGI301C))
*/
if (pVBInfo->VBType &
(VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C)) {
if (temp & EnableDualEdge) {
tempbx |=
SetCRT2ToDualEdge;
if (temp & SetToLCDA)
tempbx |=
SetCRT2ToLCDA;
}
}
} else if (pVBInfo->IF_DEF_CH7017 == 1) {
if (pVBInfo->VBType & VB_CH7017) {
if (temp & EnableDualEdge) {
tempbx |=
SetCRT2ToDualEdge;
if (temp & SetToLCDA)
tempbx |=
SetCRT2ToLCDA;
}
}
}
}
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
/* [Billy] 07/05/04 */
if (((pVBInfo->IF_DEF_LVDS == 0) &&
((pVBInfo->VBType & VB_XGI301LV) ||
(pVBInfo->VBType & VB_XGI302LV) ||
(pVBInfo->VBType & VB_XGI301C))) ||
((pVBInfo->IF_DEF_CH7017 == 1) &&
(pVBInfo->VBType & VB_CH7017)) ||
((pVBInfo->IF_DEF_CH7007 == 1) &&
(pVBInfo->VBType & VB_CH7007))) {
if (temp & SetYPbPr) { /* temp = CR38 */
if (pVBInfo->IF_DEF_HiVision == 1) {
/* shampoo add for new
* scratch */
temp = xgifb_reg_get(
pVBInfo->P3d4,
0x35);
temp &= YPbPrMode;
tempbx |= SetCRT2ToHiVisionTV;
if (temp != YPbPrMode1080i) {
tempbx &=
(~SetCRT2ToHiVisionTV);
tempbx |=
SetCRT2ToYPbPr;
}
}
/* tempbx |= SetCRT2ToYPbPr; */
}
}
}
tempax = push; /* restore CR31 */
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->IF_DEF_YPbPr == 1) {
if (pVBInfo->IF_DEF_HiVision == 1)
temp = 0x09FC;
else
temp = 0x097C;
} else {
if (pVBInfo->IF_DEF_HiVision == 1)
temp = 0x01FC;
else
temp = 0x017C;
}
} else { /* 3nd party chip */
if (pVBInfo->IF_DEF_CH7017 == 1)
temp = (SetCRT2ToTV |
SetCRT2ToLCD |
SetCRT2ToLCDA);
/* [Billy] 07/05/03 */
else if (pVBInfo->IF_DEF_CH7007 == 1)
temp = SetCRT2ToTV;
else
temp = SetCRT2ToLCD;
}
if (!(tempbx & temp)) {
tempax |= DisableCRT2Display;
tempbx = 0;
}
if (pVBInfo->IF_DEF_LCDA == 1) { /* Select Display Device */
if (!(pVBInfo->VBType & VB_NoLCD)) {
if (tempbx & SetCRT2ToLCDA) {
if (tempbx & SetSimuScanMode)
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
SwitchToCRT2));
else
tempbx &= (~(SetCRT2ToLCD |
SetCRT2ToRAMDAC |
SetCRT2ToTV |
SwitchToCRT2));
}
}
}
/* shampoo add */
/* for driver abnormal */
if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode))) {
if (pVBInfo->IF_DEF_CRT2Monitor == 1) {
if (tempbx & SetCRT2ToRAMDAC) {
tempbx &= (0xFF00 |
SetCRT2ToRAMDAC |
SwitchToCRT2 |
SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
}
} else {
tempbx &= (~(SetCRT2ToRAMDAC |
SetCRT2ToLCD |
SetCRT2ToTV));
}
}
if (!(pVBInfo->VBType & VB_NoLCD)) {
if (tempbx & SetCRT2ToLCD) {
tempbx &= (0xFF00 |
SetCRT2ToLCD |
SwitchToCRT2 |
SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
}
}
if (tempbx & SetCRT2ToSCART) {
tempbx &= (0xFF00 |
SetCRT2ToSCART |
SwitchToCRT2 |
SetSimuScanMode);
tempbx &= (0x00FF | (~SetCRT2ToYPbPr));
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
if (tempbx & SetCRT2ToYPbPr)
tempbx &= (0xFF00 |
SwitchToCRT2 |
SetSimuScanMode);
}
if (pVBInfo->IF_DEF_HiVision == 1) {
if (tempbx & SetCRT2ToHiVisionTV)
tempbx &= (0xFF00 |
SetCRT2ToHiVisionTV |
SwitchToCRT2 |
SetSimuScanMode);
}
if (tempax & DisableCRT2Display) { /* Set Display Device Info */
if (!(tempbx & (SwitchToCRT2 | SetSimuScanMode)))
tempbx = DisableCRT2Display;
}
if (!(tempbx & DisableCRT2Display)) {
if ((!(tempbx & DriverMode)) ||
(!(modeflag & CRT2Mode))) {
if (pVBInfo->IF_DEF_LCDA == 1) {
if (!(tempbx & SetCRT2ToLCDA))
tempbx |= (SetInSlaveMode |
SetSimuScanMode);
}
if (pVBInfo->IF_DEF_VideoCapture == 1) {
if (((HwDeviceExtension->jChipType ==
XG40) &&
(pVBInfo->Set_VGAType == XG40)) ||
((HwDeviceExtension->jChipType ==
XG41) &&
(pVBInfo->Set_VGAType == XG41)) ||
((HwDeviceExtension->jChipType ==
XG42) &&
(pVBInfo->Set_VGAType == XG42)) ||
((HwDeviceExtension->jChipType ==
XG45) &&
(pVBInfo->Set_VGAType == XG45))) {
if (ModeNo <= 13) {
if (!(tempbx &
SetCRT2ToRAMDAC)) {
/*CRT2 not need
* to support*/
tempbx &=
(0x00FF |
(~SetInSlaveMode));
pVBInfo->SetFlag
|= EnableVCMode;
}
}
}
}
}
/* LCD+TV can't support in slave mode
* (Force LCDA+TV->LCDB) */
if ((tempbx & SetInSlaveMode) &&
(tempbx & SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD |
SetCRT2ToLCDA |
SetCRT2ToDualEdge);
pVBInfo->SetFlag |= ReserveTVOption;
}
}
}
pVBInfo->VBInfo = tempbx;
}
void XGI_GetTVInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempbx = 0, resinfo = 0, modeflag, index1;
tempbx = 0;
resinfo = 0;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].
St_ModeFlag; /* si+St_ModeFlag */
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].
St_ResInfo; /* si+St_ResInfo */
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].
Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].
Ext_RESINFO; /* si+Ext_ResInfo */
}
if (pVBInfo->VBInfo & SetCRT2ToTV) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x35);
tempbx = temp;
if (tempbx & SetPALTV) {
tempbx &= (SetCHTVOverScan |
SetPALMTV |
SetPALNTV |
SetPALTV);
if (tempbx & SetPALMTV)
/* set to NTSC if PAL-M */
tempbx &= ~SetPALTV;
} else
tempbx &= (SetCHTVOverScan |
SetNTSCJ |
SetPALTV);
/*
if (pVBInfo->IF_DEF_LVDS == 0) {
//PAL-M/PAL-N Info
index1 = xgifb_reg_get(pVBInfo->P3d4, 0x38);
//00:PAL, 01:PAL-M, 10:PAL-N
temp2 = (index1 & 0xC0) >> 5;
tempbx |= temp2;
if (temp2 & 0x02) //PAL-M
tempbx &= (~SetPALTV);
}
*/
}
if (pVBInfo->IF_DEF_CH7017 == 1) {
tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
if (tempbx & TVOverScan)
tempbx |= SetCHTVOverScan;
}
if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 07/05/04 */
tempbx = xgifb_reg_get(pVBInfo->P3d4, 0x35);
if (tempbx & TVOverScan)
tempbx |= SetCHTVOverScan;
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempbx |= SetPALTV;
}
if (pVBInfo->IF_DEF_YPbPr == 1) {
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
index1 = xgifb_reg_get(pVBInfo->P3d4, 0x35);
index1 &= YPbPrMode;
if (index1 == YPbPrMode525i)
tempbx |= SetYPbPrMode525i;
if (index1 == YPbPrMode525p)
tempbx = tempbx | SetYPbPrMode525p;
if (index1 == YPbPrMode750p)
tempbx = tempbx | SetYPbPrMode750p;
}
}
if (pVBInfo->IF_DEF_HiVision == 1) {
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
tempbx = tempbx | SetYPbPrMode1080i | SetPALTV;
}
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
if ((pVBInfo->VBInfo & SetInSlaveMode) &&
(!(pVBInfo->VBInfo & SetNotSimuMode)))
tempbx |= TVSimuMode;
if (!(tempbx & SetPALTV) &&
(modeflag > 13) &&
(resinfo == 8)) /* NTSC 1024x768, */
tempbx |= NTSC1024x768;
tempbx |= RPLLDIV2XO;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (pVBInfo->VBInfo & SetInSlaveMode)
tempbx &= (~RPLLDIV2XO);
} else {
if (tempbx &
(SetYPbPrMode525p | SetYPbPrMode750p))
tempbx &= (~RPLLDIV2XO);
else if (!(pVBInfo->VBType &
(VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C))) {
if (tempbx & TVSimuMode)
tempbx &= (~RPLLDIV2XO);
}
}
}
}
pVBInfo->TVInfo = tempbx;
}
unsigned char XGI_GetLCDInfo(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp, tempax, tempbx, modeflag, resinfo = 0, LCDIdIndex;
pVBInfo->LCDResInfo = 0;
pVBInfo->LCDTypeInfo = 0;
pVBInfo->LCDInfo = 0;
if (ModeNo <= 0x13) {
/* si+St_ModeFlag // */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/* si+Ext_ResInfo // */
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
}
temp = xgifb_reg_get(pVBInfo->P3d4, 0x36); /* Get LCD Res.Info */
tempbx = temp & 0x0F;
if (tempbx == 0)
tempbx = Panel1024x768; /* default */
/* LCD75 [2003/8/22] Vicent */
if ((tempbx == Panel1024x768) || (tempbx == Panel1280x1024)) {
if (pVBInfo->VBInfo & DriverMode) {
tempax = xgifb_reg_get(pVBInfo->P3d4, 0x33);
if (pVBInfo->VBInfo & SetCRT2ToLCDA)
tempax &= 0x0F;
else
tempax = tempax >> 4;
if ((resinfo == 6) || (resinfo == 9)) {
if (tempax >= 3)
tempbx |= PanelRef75Hz;
} else if ((resinfo == 7) || (resinfo == 8)) {
if (tempax >= 4)
tempbx |= PanelRef75Hz;
}
}
}
pVBInfo->LCDResInfo = tempbx;
/* End of LCD75 */
if (pVBInfo->IF_DEF_OEMUtil == 1)
pVBInfo->LCDTypeInfo = (temp & 0xf0) >> 4;
if (!(pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
return 0;
tempbx = 0;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
temp &= (ScalingLCD | LCDNonExpanding | LCDSyncBit | SetPWDEnable);
if ((pVBInfo->IF_DEF_ScaleLCD == 1) && (temp & LCDNonExpanding))
temp &= ~EnableScalingLCD;
tempbx |= temp;
LCDIdIndex = XGI_GetLCDCapPtr1(pVBInfo);
tempax = pVBInfo->LCDCapList[LCDIdIndex].LCD_Capability;
if (pVBInfo->IF_DEF_LVDS == 0) { /* shampoo */
if (((pVBInfo->VBType & VB_XGI302LV) || (pVBInfo->VBType
& VB_XGI301C)) && (tempax & LCDDualLink)) {
tempbx |= SetLCDDualLink;
}
}
if (pVBInfo->IF_DEF_CH7017 == 1) {
if (tempax & LCDDualLink)
tempbx |= SetLCDDualLink;
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if ((pVBInfo->LCDResInfo == Panel1400x1050) && (pVBInfo->VBInfo
& SetCRT2ToLCD) && (ModeNo > 0x13) && (resinfo
== 9) && (!(tempbx & EnableScalingLCD)))
/* set to center in 1280x1024 LCDB for Panel1400x1050 */
tempbx |= SetLCDtoNonExpanding;
}
/*
if (tempax & LCDBToA) {
tempbx |= SetLCDBToA;
}
*/
if (pVBInfo->IF_DEF_ExpLink == 1) {
if (modeflag & HalfDCLK) {
/* if (!(pVBInfo->LCDInfo&LCDNonExpanding)) */
if (!(tempbx & SetLCDtoNonExpanding)) {
tempbx |= EnableLVDSDDA;
} else {
if (ModeNo > 0x13) {
if (pVBInfo->LCDResInfo
== Panel1024x768) {
if (resinfo == 4) {/* 512x384 */
tempbx |= EnableLVDSDDA;
}
}
}
}
}
}
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->VBInfo & SetNotSimuMode)
tempbx |= LCDVESATiming;
} else {
tempbx |= LCDVESATiming;
}
pVBInfo->LCDInfo = tempbx;
if (pVBInfo->IF_DEF_PWD == 1) {
if (pVBInfo->LCDInfo & SetPWDEnable) {
if ((pVBInfo->VBType & VB_XGI302LV) ||
(pVBInfo->VBType & VB_XGI301C)) {
if (!(tempax & PWDEnable))
pVBInfo->LCDInfo &= ~SetPWDEnable;
}
}
}
if (pVBInfo->IF_DEF_LVDS == 0) {
if (tempax & (LockLCDBToA | StLCDBToA)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (!(tempax & LockLCDBToA)) {
if (ModeNo <= 0x13) {
pVBInfo->VBInfo &=
~(SetSimuScanMode |
SetInSlaveMode |
SetCRT2ToLCD);
pVBInfo->VBInfo |=
SetCRT2ToLCDA |
SetCRT2ToDualEdge;
}
}
}
}
}
/*
if (pVBInfo->IF_DEF_LVDS == 0) {
if (tempax & (LockLCDBToA | StLCDBToA)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (!((!(tempax & LockLCDBToA)) &&
(ModeNo > 0x13))) {
pVBInfo->VBInfo &=
~(SetSimuScanMode |
SetInSlaveMode |
SetCRT2ToLCD);
pVBInfo->VBInfo |=
SetCRT2ToLCDA |
SetCRT2ToDualEdge;
}
}
}
}
*/
return 1;
}
unsigned char XGI_SearchModeID(unsigned short ModeNo,
unsigned short *ModeIdIndex, struct vb_device_info *pVBInfo)
{
if (ModeNo <= 5)
ModeNo |= 1;
if (ModeNo <= 0x13) {
/* for (*ModeIdIndex=0;
*ModeIdIndex < sizeof(pVBInfo->SModeIDTable)
/ sizeof(struct XGI_StStruct);
(*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID ==
ModeNo)
break;
if (pVBInfo->SModeIDTable[*ModeIdIndex].St_ModeID ==
0xFF)
return 0;
}
if (ModeNo == 0x07)
(*ModeIdIndex)++; /* 400 lines */
if (ModeNo <= 3)
(*ModeIdIndex) += 2; /* 400 lines */
/* else 350 lines */
} else {
/* for (*ModeIdIndex=0;
*ModeIdIndex < sizeof(pVBInfo->EModeIDTable)
/ sizeof(struct XGI_ExtStruct);
(*ModeIdIndex)++) */
for (*ModeIdIndex = 0;; (*ModeIdIndex)++) {
if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID ==
ModeNo)
break;
if (pVBInfo->EModeIDTable[*ModeIdIndex].Ext_ModeID ==
0xFF)
return 0;
}
}
return 1;
}
/* win2000 MM adapter not support standard mode! */
#if 0
static unsigned char XGINew_CheckMemorySize(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short memorysize, modeflag, temp, temp1, tmp;
/*
if ((HwDeviceExtension->jChipType == XGI_650) ||
(HwDeviceExtension->jChipType == XGI_650M)) {
return 1;
}
*/
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/* ModeType = modeflag&ModeInfoFlag; // Get mode type */
memorysize = modeflag & MemoryInfoFlag;
memorysize = memorysize > MemorySizeShift;
memorysize++; /* Get memory size */
temp = xgifb_reg_get(pVBInfo->P3c4, 0x14); /* Get DRAM Size */
tmp = temp;
if (HwDeviceExtension->jChipType == XG40) {
/* memory size per channel SR14[7:4] */
temp = 1 << ((temp & 0x0F0) >> 4);
if ((tmp & 0x0c) == 0x0C) { /* Qual channels */
temp <<= 2;
} else if ((tmp & 0x0c) == 0x08) { /* Dual channels */
temp <<= 1;
}
} else if (HwDeviceExtension->jChipType == XG42) {
/* memory size per channel SR14[7:4] */
temp = 1 << ((temp & 0x0F0) >> 4);
if ((tmp & 0x04) == 0x04) { /* Dual channels */
temp <<= 1;
}
} else if (HwDeviceExtension->jChipType == XG45) {
/* memory size per channel SR14[7:4] */
temp = 1 << ((temp & 0x0F0) >> 4);
if ((tmp & 0x0c) == 0x0C) { /* Qual channels */
temp <<= 2;
} else if ((tmp & 0x0c) == 0x08) { /* triple channels */
temp1 = temp;
temp <<= 1;
temp += temp1;
} else if ((tmp & 0x0c) == 0x04) { /* Dual channels */
temp <<= 1;
}
}
if (temp < memorysize)
return 0;
else
return 1;
}
#endif
/*
void XGINew_IsLowResolution(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned char XGINew_CheckMemorySize(
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short data ;
unsigned short ModeFlag ;
data = xgifb_reg_get(pVBInfo->P3c4, 0x0F);
data &= 0x7F;
xgifb_reg_set(pVBInfo->P3c4, 0x0F, data);
if (ModeNo > 0x13) {
ModeFlag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
if ((ModeFlag & HalfDCLK) && (ModeFlag & DoubleScanMode)) {
data = xgifb_reg_get(pVBInfo->P3c4, 0x0F);
data |= 0x80;
xgifb_reg_set(pVBInfo->P3c4, 0x0F, data);
data = xgifb_reg_get(pVBInfo->P3c4, 0x01);
data &= 0xF7;
xgifb_reg_set(pVBInfo->P3c4, 0x01, data);
}
}
}
*/
static unsigned char XG21GPIODataTransfer(unsigned char ujDate)
{
unsigned char ujRet = 0;
unsigned char i = 0;
for (i = 0; i < 8; i++) {
ujRet = ujRet << 1;
/* ujRet |= GETBITS(ujDate >> i, 0:0); */
ujRet |= (ujDate >> i) & 1;
}
return ujRet;
}
/*----------------------------------------------------------------------------*/
/* output */
/* bl[5] : LVDS signal */
/* bl[1] : LVDS backlight */
/* bl[0] : LVDS VDD */
/*----------------------------------------------------------------------------*/
static unsigned char XGI_XG21GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x23); /* enable GPIO write */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
temp = XG21GPIODataTransfer(temp);
temp &= 0x23;
xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
return temp;
}
/*----------------------------------------------------------------------------*/
/* output */
/* bl[5] : LVDS signal */
/* bl[1] : LVDS backlight */
/* bl[0] : LVDS VDD */
/*----------------------------------------------------------------------------*/
static unsigned char XGI_XG27GetPSCValue(struct vb_device_info *pVBInfo)
{
unsigned char CR4A, CRB4, temp;
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x0C); /* enable GPIO write */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
temp &= 0x0C;
temp >>= 2;
xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A);
CRB4 = xgifb_reg_get(pVBInfo->P3d4, 0xB4);
temp |= ((CRB4 & 0x04) << 3);
return temp;
}
void XGI_DisplayOn(struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x00);
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG21BLSignalVDD(0x01, 0x01, pVBInfo);
XGI_XG21SetPanelDelay(2, pVBInfo);
}
if (!(XGI_XG21GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
XGI_XG21SetPanelDelay(3, pVBInfo);
/* LVDS backlight on */
XGI_XG21BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
/* DVO/DVI signal on */
XGI_XG21BLSignalVDD(0x20, 0x20, pVBInfo);
}
}
/* [Billy] 07/05/23 For CH7007 */
if (pVBInfo->IF_DEF_CH7007 == 1) {
}
if (pXGIHWDE->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 1) {
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x1)) {
/* LVDS VDD on */
XGI_XG27BLSignalVDD(0x01, 0x01, pVBInfo);
XGI_XG21SetPanelDelay(2, pVBInfo);
}
if (!(XGI_XG27GetPSCValue(pVBInfo) & 0x20))
/* LVDS signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
XGI_XG21SetPanelDelay(3, pVBInfo);
/* LVDS backlight on */
XGI_XG27BLSignalVDD(0x02, 0x02, pVBInfo);
} else {
/* DVO/DVI signal on */
XGI_XG27BLSignalVDD(0x20, 0x20, pVBInfo);
}
}
}
void XGI_DisplayOff(struct xgi_hw_device_info *pXGIHWDE,
struct vb_device_info *pVBInfo)
{
if (pXGIHWDE->jChipType == XG21) {
if (pVBInfo->IF_DEF_LVDS == 1) {
/* LVDS backlight off */
XGI_XG21BLSignalVDD(0x02, 0x00, pVBInfo);
XGI_XG21SetPanelDelay(3, pVBInfo);
} else {
/* DVO/DVI signal off */
XGI_XG21BLSignalVDD(0x20, 0x00, pVBInfo);
}
}
if (pVBInfo->IF_DEF_CH7007 == 1) { /* [Billy] 07/05/23 For CH7007 */
/* if (IsCH7007TVMode(pVBInfo) == 0) */
{
}
}
if (pXGIHWDE->jChipType == XG27) {
if ((XGI_XG27GetPSCValue(pVBInfo) & 0x2)) {
/* LVDS backlight off */
XGI_XG27BLSignalVDD(0x02, 0x00, pVBInfo);
XGI_XG21SetPanelDelay(3, pVBInfo);
}
if (pVBInfo->IF_DEF_LVDS == 0)
/* DVO/DVI signal off */
XGI_XG27BLSignalVDD(0x20, 0x00, pVBInfo);
}
xgifb_reg_and_or(pVBInfo->P3c4, 0x01, 0xDF, 0x20);
}
static void XGI_WaitDisply(struct vb_device_info *pVBInfo)
{
while ((inb(pVBInfo->P3da) & 0x01))
break;
while (!(inb(pVBInfo->P3da) & 0x01))
break;
}
#if 0
static void XGI_WaitDisplay(struct vb_device_info *pVBInfo)
{
while (!(inb(pVBInfo->P3da) & 0x01))
;
while (inb(pVBInfo->P3da) & 0x01)
;
}
#endif
static void XGI_AutoThreshold(struct vb_device_info *pVBInfo)
{
if (!(pVBInfo->SetFlag & Win9xDOSMode))
xgifb_reg_or(pVBInfo->Part1Port, 0x01, 0x40);
}
static void XGI_SaveCRT2Info(unsigned short ModeNo,
struct vb_device_info *pVBInfo)
{
unsigned short temp1, temp2;
/* reserve CR34 for CRT1 Mode No */
xgifb_reg_set(pVBInfo->P3d4, 0x34, ModeNo);
temp1 = (pVBInfo->VBInfo & SetInSlaveMode) >> 8;
temp2 = ~(SetInSlaveMode >> 8);
xgifb_reg_and_or(pVBInfo->P3d4, 0x31, temp2, temp1);
}
static void XGI_GetCRT2ResInfo(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short xres, yres, modeflag, resindex;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
/* si+St_ResInfo */
/* modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;*/
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/*
if (pVBInfo->IF_DEF_FSTN) {
xres *= 2;
yres *= 2;
} else {
*/
if (modeflag & HalfDCLK)
xres *= 2;
if (modeflag & DoubleScanMode)
yres *= 2;
/* } */
}
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->LCDResInfo == Panel1600x1200) {
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (yres == 1024)
yres = 1056;
}
}
if (pVBInfo->LCDResInfo == Panel1280x1024) {
if (yres == 400)
yres = 405;
else if (yres == 350)
yres = 360;
if (pVBInfo->LCDInfo & LCDVESATiming) {
if (yres == 360)
yres = 375;
}
}
if (pVBInfo->LCDResInfo == Panel1024x768) {
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (!(pVBInfo->LCDInfo
& LCDNonExpanding)) {
if (yres == 350)
yres = 357;
else if (yres == 400)
yres = 420;
else if (yres == 480)
yres = 525;
}
}
}
}
if (xres == 720)
xres = 640;
}
pVBInfo->VGAHDE = xres;
pVBInfo->HDE = xres;
pVBInfo->VGAVDE = yres;
pVBInfo->VDE = yres;
}
static unsigned char XGI_IsLCDDualLink(struct vb_device_info *pVBInfo)
{
if ((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) &&
(pVBInfo->LCDInfo & SetLCDDualLink)) /* shampoo0129 */
return 1;
return 0;
}
static void XGI_GetRAMDAC2DATA(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempbx, temp1, temp2, modeflag = 0, tempcx,
StandTableIndex, CRT1Index;
pVBInfo->RVBHCMAX = 1;
pVBInfo->RVBHCFACT = 1;
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
tempax = pVBInfo->StandTable[StandTableIndex].CRTC[0];
tempbx = pVBInfo->StandTable[StandTableIndex].CRTC[6];
temp1 = pVBInfo->StandTable[StandTableIndex].CRTC[7];
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
CRT1Index = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT1CRTC;
CRT1Index &= IndexMask;
temp1 = (unsigned short) pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[0];
temp2 = (unsigned short) pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[5];
tempax = (temp1 & 0xFF) | ((temp2 & 0x03) << 8);
tempbx = (unsigned short) pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[8];
tempcx = (unsigned short) pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[14] << 8;
tempcx &= 0x0100;
tempcx = tempcx << 2;
tempbx |= tempcx;
temp1 = (unsigned short) pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[9];
}
if (temp1 & 0x01)
tempbx |= 0x0100;
if (temp1 & 0x20)
tempbx |= 0x0200;
tempax += 5;
if (modeflag & Charx8Dot)
tempax *= 8;
else
tempax *= 9;
pVBInfo->VGAHT = tempax;
pVBInfo->HT = tempax;
tempbx++;
pVBInfo->VGAVT = tempbx;
pVBInfo->VT = tempbx;
}
static void XGI_GetCRT2Data(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempax = 0, tempbx, modeflag, resinfo;
struct XGI_LCDDataStruct *LCDPtr = NULL;
struct XGI_TVDataStruct *TVPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
} else {
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
}
pVBInfo->NewFlickerMode = 0;
pVBInfo->RVBHRS = 50;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
XGI_GetRAMDAC2DATA(ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
return;
}
tempbx = 4;
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
LCDPtr = (struct XGI_LCDDataStruct *) XGI_GetLcdPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
pVBInfo->RVBHCMAX = LCDPtr->RVBHCMAX;
pVBInfo->RVBHCFACT = LCDPtr->RVBHCFACT;
pVBInfo->VGAHT = LCDPtr->VGAHT;
pVBInfo->VGAVT = LCDPtr->VGAVT;
pVBInfo->HT = LCDPtr->LCDHT;
pVBInfo->VT = LCDPtr->LCDVT;
if (pVBInfo->LCDResInfo == Panel1024x768) {
tempax = 1024;
tempbx = 768;
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (pVBInfo->VGAVDE == 357)
tempbx = 527;
else if (pVBInfo->VGAVDE == 420)
tempbx = 620;
else if (pVBInfo->VGAVDE == 525)
tempbx = 775;
else if (pVBInfo->VGAVDE == 600)
tempbx = 775;
/* else if (pVBInfo->VGAVDE==350) tempbx=560; */
/* else if (pVBInfo->VGAVDE==400) tempbx=640; */
else
tempbx = 768;
} else
tempbx = 768;
} else if (pVBInfo->LCDResInfo == Panel1024x768x75) {
tempax = 1024;
tempbx = 768;
} else if (pVBInfo->LCDResInfo == Panel1280x1024) {
tempax = 1280;
if (pVBInfo->VGAVDE == 360)
tempbx = 768;
else if (pVBInfo->VGAVDE == 375)
tempbx = 800;
else if (pVBInfo->VGAVDE == 405)
tempbx = 864;
else
tempbx = 1024;
} else if (pVBInfo->LCDResInfo == Panel1280x1024x75) {
tempax = 1280;
tempbx = 1024;
} else if (pVBInfo->LCDResInfo == Panel1280x960) {
tempax = 1280;
if (pVBInfo->VGAVDE == 350)
tempbx = 700;
else if (pVBInfo->VGAVDE == 400)
tempbx = 800;
else if (pVBInfo->VGAVDE == 1024)
tempbx = 960;
else
tempbx = 960;
} else if (pVBInfo->LCDResInfo == Panel1400x1050) {
tempax = 1400;
tempbx = 1050;
if (pVBInfo->VGAVDE == 1024) {
tempax = 1280;
tempbx = 1024;
}
} else if (pVBInfo->LCDResInfo == Panel1600x1200) {
tempax = 1600;
tempbx = 1200; /* alan 10/14/2003 */
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (pVBInfo->VGAVDE == 350)
tempbx = 875;
else if (pVBInfo->VGAVDE == 400)
tempbx = 1000;
}
}
if (pVBInfo->LCDInfo & LCDNonExpanding) {
tempax = pVBInfo->VGAHDE;
tempbx = pVBInfo->VGAVDE;
}
pVBInfo->HDE = tempax;
pVBInfo->VDE = tempbx;
return;
}
if (pVBInfo->VBInfo & (SetCRT2ToTV)) {
tempbx = 4;
TVPtr = (struct XGI_TVDataStruct *) XGI_GetTVPtr(tempbx,
ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
pVBInfo->RVBHCMAX = TVPtr->RVBHCMAX;
pVBInfo->RVBHCFACT = TVPtr->RVBHCFACT;
pVBInfo->VGAHT = TVPtr->VGAHT;
pVBInfo->VGAVT = TVPtr->VGAVT;
pVBInfo->HDE = TVPtr->TVHDE;
pVBInfo->VDE = TVPtr->TVVDE;
pVBInfo->RVBHRS = TVPtr->RVBHRS;
pVBInfo->NewFlickerMode = TVPtr->FlickerMode;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (resinfo == 0x08)
pVBInfo->NewFlickerMode = 0x40;
else if (resinfo == 0x09)
pVBInfo->NewFlickerMode = 0x40;
else if (resinfo == 0x12)
pVBInfo->NewFlickerMode = 0x40;
if (pVBInfo->VGAVDE == 350)
pVBInfo->TVInfo |= TVSimuMode;
tempax = ExtHiTVHT;
tempbx = ExtHiTVVT;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (pVBInfo->TVInfo & TVSimuMode) {
tempax = StHiTVHT;
tempbx = StHiTVVT;
if (!(modeflag & Charx8Dot)) {
tempax = StHiTextTVHT;
tempbx = StHiTextTVVT;
}
}
}
} else if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
if (pVBInfo->TVInfo & SetYPbPrMode750p) {
tempax = YPbPrTV750pHT; /* Ext750pTVHT */
tempbx = YPbPrTV750pVT; /* Ext750pTVVT */
}
if (pVBInfo->TVInfo & SetYPbPrMode525p) {
tempax = YPbPrTV525pHT; /* Ext525pTVHT */
tempbx = YPbPrTV525pVT; /* Ext525pTVVT */
} else if (pVBInfo->TVInfo & SetYPbPrMode525i) {
tempax = YPbPrTV525iHT; /* Ext525iTVHT */
tempbx = YPbPrTV525iVT; /* Ext525iTVVT */
if (pVBInfo->TVInfo & NTSC1024x768)
tempax = NTSC1024x768HT;
}
} else {
tempax = PALHT;
tempbx = PALVT;
if (!(pVBInfo->TVInfo & SetPALTV)) {
tempax = NTSCHT;
tempbx = NTSCVT;
if (pVBInfo->TVInfo & NTSC1024x768)
tempax = NTSC1024x768HT;
}
}
pVBInfo->HT = tempax;
pVBInfo->VT = tempbx;
return;
}
}
static void XGI_SetCRT2VCLK(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned char di_0, di_1, tempal;
tempal = XGI_GetVCLKPtr(RefreshRateTableIndex, ModeNo, ModeIdIndex,
pVBInfo);
XGI_GetVCLKLen(tempal, &di_0, &di_1, pVBInfo);
XGI_GetLCDVCLKPtr(&di_0, &di_1, pVBInfo);
if (pVBInfo->VBType & VB_XGI301) { /* shampoo 0129 */
/* 301 */
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, 0x10);
xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, di_0);
} else { /* 301b/302b/301lv/302lv */
xgifb_reg_set(pVBInfo->Part4Port, 0x0A, di_0);
xgifb_reg_set(pVBInfo->Part4Port, 0x0B, di_1);
}
xgifb_reg_set(pVBInfo->Part4Port, 0x00, 0x12);
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x28);
else
xgifb_reg_or(pVBInfo->Part4Port, 0x12, 0x08);
}
static unsigned short XGI_GetColorDepth(unsigned short ModeNo,
unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short ColorDepth[6] = { 1, 2, 4, 4, 6, 8 };
short index;
unsigned short modeflag;
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
index = (modeflag & ModeInfoFlag) - ModeEGA;
if (index < 0)
index = 0;
return ColorDepth[index];
}
static unsigned short XGI_GetOffset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short temp, colordepth, modeinfo, index, infoflag,
ColorDepth[] = { 0x01, 0x02, 0x04 };
modeinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeInfo;
if (ModeNo <= 0x14)
infoflag = 0;
else
infoflag = pVBInfo->
RefIndex[RefreshRateTableIndex].Ext_InfoFlag;
index = (modeinfo >> 8) & 0xFF;
temp = pVBInfo->ScreenOffset[index];
if (infoflag & InterlaceMode)
temp = temp << 1;
colordepth = XGI_GetColorDepth(ModeNo, ModeIdIndex, pVBInfo);
if ((ModeNo >= 0x7C) && (ModeNo <= 0x7E)) {
temp = ModeNo - 0x7C;
colordepth = ColorDepth[temp];
temp = 0x6B;
if (infoflag & InterlaceMode)
temp = temp << 1;
return temp * colordepth;
} else {
return temp * colordepth;
}
}
static void XGI_SetCRT2Offset(unsigned short ModeNo,
unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short offset;
unsigned char temp;
if (pVBInfo->VBInfo & SetInSlaveMode)
return;
offset = XGI_GetOffset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
temp = (unsigned char) (offset & 0xFF);
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
temp = (unsigned char) ((offset & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part1Port, 0x09, temp);
temp = (unsigned char) (((offset >> 3) & 0xFF) + 1);
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
}
static void XGI_SetCRT2FIFO(struct vb_device_info *pVBInfo)
{
/* threshold high ,disable auto threshold */
xgifb_reg_set(pVBInfo->Part1Port, 0x01, 0x3B);
/* threshold low default 04h */
xgifb_reg_and_or(pVBInfo->Part1Port, 0x02, ~(0x3F), 0x04);
}
static void XGI_PreSetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempcx = 0, CRT1Index = 0, resinfo = 0;
if (ModeNo > 0x13) {
CRT1Index = pVBInfo->
RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
}
XGI_SetCRT2Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT2FIFO(pVBInfo);
/* XGI_SetCRT2Sync(ModeNo,RefreshRateTableIndex); */
for (tempcx = 4; tempcx < 7; tempcx++)
xgifb_reg_set(pVBInfo->Part1Port, tempcx, 0x0);
xgifb_reg_set(pVBInfo->Part1Port, 0x50, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x02, 0x44); /* temp 0206 */
}
static void XGI_SetGroup1(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short temp = 0, tempax = 0, tempbx = 0, tempcx = 0,
pushbx = 0, CRT1Index = 0, modeflag, resinfo = 0;
if (ModeNo > 0x13) {
CRT1Index = pVBInfo->
RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
}
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
/* bainy change table name */
if (modeflag & HalfDCLK) {
/* BTVGA2HT 0x08,0x09 */
temp = (pVBInfo->VGAHT / 2 - 1) & 0x0FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x08, temp);
temp = (((pVBInfo->VGAHT / 2 - 1) & 0xFF00) >> 8) << 4;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x09, ~0x0F0, temp);
/* BTVGA2HDEE 0x0A,0x0C */
temp = (pVBInfo->VGAHDE / 2 + 16) & 0x0FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp);
tempcx = ((pVBInfo->VGAHT - pVBInfo->VGAHDE) / 2) >> 2;
pushbx = pVBInfo->VGAHDE / 2 + 16;
tempcx = tempcx >> 1;
tempbx = pushbx + tempcx; /* bx BTVGA@HRS 0x0B,0x0C */
tempcx += tempbx;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
tempbx = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[4];
tempbx |= ((pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[14] &
0xC0) << 2);
tempbx = (tempbx - 3) << 3; /* (VGAHRS-3)*8 */
tempcx = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[5];
tempcx &= 0x1F;
temp = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[15];
temp = (temp & 0x04) << (5 - 2); /* VGAHRE D[5] */
tempcx = ((tempcx | temp) - 3) << 3; /* (VGAHRE-3)*8 */
}
tempbx += 4;
tempcx += 4;
if (tempcx > (pVBInfo->VGAHT / 2))
tempcx = pVBInfo->VGAHT / 2;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
} else {
temp = (pVBInfo->VGAHT - 1) & 0x0FF; /* BTVGA2HT 0x08,0x09 */
xgifb_reg_set(pVBInfo->Part1Port, 0x08, temp);
temp = (((pVBInfo->VGAHT - 1) & 0xFF00) >> 8) << 4;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x09, ~0x0F0, temp);
/* BTVGA2HDEE 0x0A,0x0C */
temp = (pVBInfo->VGAHDE + 16) & 0x0FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp);
tempcx = (pVBInfo->VGAHT - pVBInfo->VGAHDE) >> 2; /* cx */
pushbx = pVBInfo->VGAHDE + 16;
tempcx = tempcx >> 1;
tempbx = pushbx + tempcx; /* bx BTVGA@HRS 0x0B,0x0C */
tempcx += tempbx;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
tempbx = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[3];
tempbx |= ((pVBInfo->
XGINEWUB_CRT1Table[CRT1Index].CR[5] &
0xC0) << 2);
tempbx = (tempbx - 3) << 3; /* (VGAHRS-3)*8 */
tempcx = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[4];
tempcx &= 0x1F;
temp = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[6];
temp = (temp & 0x04) << (5 - 2); /* VGAHRE D[5] */
tempcx = ((tempcx | temp) - 3) << 3; /* (VGAHRE-3)*8 */
tempbx += 16;
tempcx += 16;
}
if (tempcx > pVBInfo->VGAHT)
tempcx = pVBInfo->VGAHT;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
}
tempax = (tempax & 0x00FF) | (tempbx & 0xFF00);
tempbx = pushbx;
tempbx = (tempbx & 0x00FF) | ((tempbx & 0xFF00) << 4);
tempax |= (tempbx & 0xFF00);
temp = (tempax & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
tempcx = (pVBInfo->VGAVT - 1);
temp = tempcx & 0x00FF;
if (pVBInfo->IF_DEF_CH7005 == 1) {
if (pVBInfo->VBInfo & 0x0C)
temp--;
}
xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
tempbx = pVBInfo->VGAVDE - 1;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0F, temp);
temp = ((tempbx & 0xFF00) << 3) >> 8;
temp |= ((tempcx & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part1Port, 0x12, temp);
tempax = pVBInfo->VGAVDE;
tempbx = pVBInfo->VGAVDE;
tempcx = pVBInfo->VGAVT;
/* BTVGA2VRS 0x10,0x11 */
tempbx = (pVBInfo->VGAVT + pVBInfo->VGAVDE) >> 1;
/* BTVGA2VRE 0x11 */
tempcx = ((pVBInfo->VGAVT - pVBInfo->VGAVDE) >> 4) + tempbx + 1;
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC) {
tempbx = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[10];
temp = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[9];
if (temp & 0x04)
tempbx |= 0x0100;
if (temp & 0x080)
tempbx |= 0x0200;
temp = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[14];
if (temp & 0x08)
tempbx |= 0x0400;
temp = pVBInfo->XGINEWUB_CRT1Table[CRT1Index].CR[11];
tempcx = (tempcx & 0xFF00) | (temp & 0x00FF);
}
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
temp = ((tempbx & 0xFF00) >> 8) << 4;
temp = ((tempcx & 0x000F) | (temp));
xgifb_reg_set(pVBInfo->Part1Port, 0x11, temp);
tempax = 0;
if (modeflag & DoubleScanMode)
tempax |= 0x80;
if (modeflag & HalfDCLK)
tempax |= 0x40;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2C, ~0x0C0, tempax);
}
static unsigned short XGI_GetVGAHT2(struct vb_device_info *pVBInfo)
{
unsigned long tempax, tempbx;
tempbx = ((pVBInfo->VGAVT - pVBInfo->VGAVDE) * pVBInfo->RVBHCMAX)
& 0xFFFF;
tempax = (pVBInfo->VT - pVBInfo->VDE) * pVBInfo->RVBHCFACT;
tempax = (tempax * pVBInfo->HT) / tempbx;
return (unsigned short) tempax;
}
static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, tempax, tempbx = 0, tempcx, temp, resinfo,
modeflag, CRT1Index;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
} else {
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
CRT1Index = pVBInfo->
RefIndex[RefreshRateTableIndex].Ext_CRT1CRTC;
CRT1Index &= IndexMask;
}
if (!(pVBInfo->VBInfo & SetInSlaveMode))
return;
temp = 0xFF; /* set MAX HT */
xgifb_reg_set(pVBInfo->Part1Port, 0x03, temp);
/* if (modeflag & Charx8Dot) */
/* tempcx = 0x08; */
/* else */
tempcx = 0x08;
if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
modeflag |= Charx8Dot;
tempax = pVBInfo->VGAHDE; /* 0x04 Horizontal Display End */
if (modeflag & HalfDCLK)
tempax = tempax >> 1;
tempax = (tempax / tempcx) - 1;
tempbx |= ((tempax & 0x00FF) << 8);
temp = tempax & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x04, temp);
temp = (tempbx & 0xFF00) >> 8;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)))
temp += 2;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (pVBInfo->VBExtInfo == VB_YPbPr1080i) {
if (resinfo == 7)
temp -= 2;
}
} else if (resinfo == 7) {
temp -= 2;
}
}
}
/* 0x05 Horizontal Display Start */
xgifb_reg_set(pVBInfo->Part1Port, 0x05, temp);
/* 0x06 Horizontal Blank end */
xgifb_reg_set(pVBInfo->Part1Port, 0x06, 0x03);
if (!(pVBInfo->VBInfo & DisableCRT2Display)) { /* 030226 bainy */
if (pVBInfo->VBInfo & SetCRT2ToTV)
tempax = pVBInfo->VGAHT;
else
tempax = XGI_GetVGAHT2(pVBInfo);
}
if (tempax >= pVBInfo->VGAHT)
tempax = pVBInfo->VGAHT;
if (modeflag & HalfDCLK)
tempax = tempax >> 1;
tempax = (tempax / tempcx) - 5;
tempcx = tempax; /* 20030401 0x07 horizontal Retrace Start */
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
temp = (tempbx & 0x00FF) - 1;
if (!(modeflag & HalfDCLK)) {
temp -= 6;
if (pVBInfo->TVInfo & TVSimuMode) {
temp -= 4;
if (ModeNo > 0x13)
temp -= 10;
}
}
} else {
/* tempcx = tempbx & 0x00FF ; */
tempbx = (tempbx & 0xFF00) >> 8;
tempcx = (tempcx + tempbx) >> 1;
temp = (tempcx & 0x00FF) + 2;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
temp -= 1;
if (!(modeflag & HalfDCLK)) {
if ((modeflag & Charx8Dot)) {
temp += 4;
if (pVBInfo->VGAHDE >= 800)
temp -= 6;
}
}
} else {
if (!(modeflag & HalfDCLK)) {
temp -= 4;
if (pVBInfo->LCDResInfo != Panel1280x960) {
if (pVBInfo->VGAHDE >= 800) {
temp -= 7;
if (pVBInfo->ModeType ==
ModeEGA) {
if (pVBInfo->VGAVDE ==
1024) {
temp += 15;
if (pVBInfo->LCDResInfo != Panel1280x1024) {
temp +=
7;
}
}
}
if (pVBInfo->VGAHDE >= 1280) {
if (pVBInfo->LCDResInfo
!= Panel1280x960) {
if (pVBInfo->LCDInfo
& LCDNonExpanding) {
temp
+= 28;
}
}
}
}
}
}
}
}
/* 0x07 Horizontal Retrace Start */
xgifb_reg_set(pVBInfo->Part1Port, 0x07, temp);
/* 0x08 Horizontal Retrace End */
xgifb_reg_set(pVBInfo->Part1Port, 0x08, 0);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->TVInfo & TVSimuMode) {
if ((ModeNo == 0x06) || (ModeNo == 0x10) || (ModeNo
== 0x11) || (ModeNo == 0x13) || (ModeNo
== 0x0F)) {
xgifb_reg_set(pVBInfo->Part1Port, 0x07, 0x5b);
xgifb_reg_set(pVBInfo->Part1Port, 0x08, 0x03);
}
if ((ModeNo == 0x00) || (ModeNo == 0x01)) {
if (pVBInfo->TVInfo & SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x2A);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x61);
} else {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x2A);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x41);
xgifb_reg_set(pVBInfo->Part1Port,
0x0C, 0xF0);
}
}
if ((ModeNo == 0x02) || (ModeNo == 0x03) || (ModeNo
== 0x07)) {
if (pVBInfo->TVInfo & SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x54);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x00);
} else {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x55);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x00);
xgifb_reg_set(pVBInfo->Part1Port,
0x0C, 0xF0);
}
}
if ((ModeNo == 0x04) || (ModeNo == 0x05) || (ModeNo
== 0x0D) || (ModeNo == 0x50)) {
if (pVBInfo->TVInfo & SetNTSCTV) {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x30);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x03);
} else {
xgifb_reg_set(pVBInfo->Part1Port,
0x07, 0x2f);
xgifb_reg_set(pVBInfo->Part1Port,
0x08, 0x02);
}
}
}
}
xgifb_reg_set(pVBInfo->Part1Port, 0x18, 0x03); /* 0x18 SR0B */
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0xF0, 0x00);
xgifb_reg_set(pVBInfo->Part1Port, 0x09, 0xFF); /* 0x09 Set Max VT */
tempbx = pVBInfo->VGAVT;
push1 = tempbx;
tempcx = 0x121;
tempbx = pVBInfo->VGAVDE; /* 0x0E Virtical Display End */
if (tempbx == 357)
tempbx = 350;
if (tempbx == 360)
tempbx = 350;
if (tempbx == 375)
tempbx = 350;
if (tempbx == 405)
tempbx = 400;
if (tempbx == 525)
tempbx = 480;
push2 = tempbx;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (pVBInfo->LCDResInfo == Panel1024x768) {
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (tempbx == 350)
tempbx += 5;
if (tempbx == 480)
tempbx += 5;
}
}
}
tempbx--;
temp = tempbx & 0x00FF;
tempbx--;
temp = tempbx & 0x00FF;
/* 0x10 vertical Blank Start */
xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
tempbx = push2;
tempbx--;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0E, temp);
if (tempbx & 0x0100)
tempcx |= 0x0002;
tempax = 0x000B;
if (modeflag & DoubleScanMode)
tempax |= 0x08000;
if (tempbx & 0x0200)
tempcx |= 0x0040;
temp = (tempax & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part1Port, 0x0B, temp);
if (tempbx & 0x0400)
tempcx |= 0x0600;
/* 0x11 Vertival Blank End */
xgifb_reg_set(pVBInfo->Part1Port, 0x11, 0x00);
tempax = push1;
tempax -= tempbx; /* 0x0C Vertical Retrace Start */
tempax = tempax >> 2;
push1 = tempax; /* push ax */
if (resinfo != 0x09) {
tempax = tempax << 1;
tempbx += tempax;
}
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
tempbx -= 10;
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
if (pVBInfo->TVInfo & SetPALTV) {
if (pVBInfo->VBType &
VB_XGI301LV) {
if (!(pVBInfo->TVInfo &
(SetYPbPrMode525p |
SetYPbPrMode750p |
SetYPbPrMode1080i)))
tempbx += 40;
} else {
tempbx += 40;
}
}
}
}
} else {
tempbx -= 10;
}
} else {
if (pVBInfo->TVInfo & TVSimuMode) {
if (pVBInfo->TVInfo & SetPALTV) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (!(pVBInfo->TVInfo &
(SetYPbPrMode525p |
SetYPbPrMode750p |
SetYPbPrMode1080i)))
tempbx += 40;
} else {
tempbx += 40;
}
}
}
}
tempax = push1;
tempax = tempax >> 2;
tempax++;
tempax += tempbx;
push1 = tempax; /* push ax */
if ((pVBInfo->TVInfo & SetPALTV)) {
if (tempbx <= 513) {
if (tempax >= 513)
tempbx = 513;
}
}
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0C, temp);
tempbx--;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x10, temp);
if (tempbx & 0x0100)
tempcx |= 0x0008;
if (tempbx & 0x0200)
xgifb_reg_and_or(pVBInfo->Part1Port, 0x0B, 0x0FF, 0x20);
tempbx++;
if (tempbx & 0x0100)
tempcx |= 0x0004;
if (tempbx & 0x0200)
tempcx |= 0x0080;
if (tempbx & 0x0400)
tempcx |= 0x0C00;
tempbx = push1; /* pop ax */
temp = tempbx & 0x00FF;
temp &= 0x0F;
/* 0x0D vertical Retrace End */
xgifb_reg_set(pVBInfo->Part1Port, 0x0D, temp);
if (tempbx & 0x0010)
tempcx |= 0x2000;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part1Port, 0x0A, temp); /* 0x0A CR07 */
temp = (tempcx & 0x0FF00) >> 8;
xgifb_reg_set(pVBInfo->Part1Port, 0x17, temp); /* 0x17 SR0A */
tempax = modeflag;
temp = (tempax & 0xFF00) >> 8;
temp = (temp >> 1) & 0x09;
if (pVBInfo->VBType & (VB_XGI301LV | VB_XGI302LV | VB_XGI301C))
temp |= 0x01;
xgifb_reg_set(pVBInfo->Part1Port, 0x16, temp); /* 0x16 SR01 */
xgifb_reg_set(pVBInfo->Part1Port, 0x0F, 0); /* 0x0F CR14 */
xgifb_reg_set(pVBInfo->Part1Port, 0x12, 0); /* 0x12 CR17 */
if (pVBInfo->LCDInfo & LCDRGB18Bit)
temp = 0x80;
else
temp = 0x00;
xgifb_reg_set(pVBInfo->Part1Port, 0x1A, temp); /* 0x1A SR0E */
return;
}
static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short i, j, tempax, tempbx, tempcx, temp, push1, push2,
modeflag, resinfo, crt2crtc;
unsigned char *TimingPoint;
unsigned long longtemp, tempeax, tempebx, temp2, tempecx;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
crt2crtc = pVBInfo->SModeIDTable[ModeIdIndex].St_CRT2CRTC;
} else {
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
crt2crtc = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT2CRTC;
}
tempax = 0;
if (!(pVBInfo->VBInfo & SetCRT2ToAVIDEO))
tempax |= 0x0800;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
tempax |= 0x0400;
if (pVBInfo->VBInfo & SetCRT2ToSCART)
tempax |= 0x0200;
if (!(pVBInfo->TVInfo & SetPALTV))
tempax |= 0x1000;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
tempax |= 0x0100;
if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
tempax &= 0xfe00;
tempax = (tempax & 0xff00) >> 8;
xgifb_reg_set(pVBInfo->Part2Port, 0x0, tempax);
TimingPoint = pVBInfo->NTSCTiming;
if (pVBInfo->TVInfo & SetPALTV)
TimingPoint = pVBInfo->PALTiming;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
TimingPoint = pVBInfo->HiTVExtTiming;
if (pVBInfo->VBInfo & SetInSlaveMode)
TimingPoint = pVBInfo->HiTVSt2Timing;
if (pVBInfo->SetFlag & TVSimuMode)
TimingPoint = pVBInfo->HiTVSt1Timing;
if (!(modeflag & Charx8Dot))
TimingPoint = pVBInfo->HiTVTextTiming;
}
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
if (pVBInfo->TVInfo & SetYPbPrMode525i)
TimingPoint = pVBInfo->YPbPr525iTiming;
if (pVBInfo->TVInfo & SetYPbPrMode525p)
TimingPoint = pVBInfo->YPbPr525pTiming;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
TimingPoint = pVBInfo->YPbPr750pTiming;
}
for (i = 0x01, j = 0; i <= 0x2D; i++, j++)
xgifb_reg_set(pVBInfo->Part2Port, i, TimingPoint[j]);
for (i = 0x39; i <= 0x45; i++, j++)
/* di->temp2[j] */
xgifb_reg_set(pVBInfo->Part2Port, i, TimingPoint[j]);
if (pVBInfo->VBInfo & SetCRT2ToTV)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x3A, 0x1F, 0x00);
temp = pVBInfo->NewFlickerMode;
temp &= 0x80;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xFF, temp);
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
tempax = 950;
if (pVBInfo->TVInfo & SetPALTV)
tempax = 520;
else
tempax = 440;
if (pVBInfo->VDE <= tempax) {
tempax -= pVBInfo->VDE;
tempax = tempax >> 2;
tempax = (tempax & 0x00FF) | ((tempax & 0x00FF) << 8);
push1 = tempax;
temp = (tempax & 0xFF00) >> 8;
temp += (unsigned short) TimingPoint[0];
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
| SetCRT2ToYPbPr)) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x17; /* NTSC */
if (pVBInfo->TVInfo & SetPALTV)
temp = 0x19; /* PAL */
}
}
}
xgifb_reg_set(pVBInfo->Part2Port, 0x01, temp);
tempax = push1;
temp = (tempax & 0xFF00) >> 8;
temp += TimingPoint[1];
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if ((pVBInfo->VBInfo & (SetCRT2ToAVIDEO
| SetCRT2ToSVIDEO | SetCRT2ToSCART
| SetCRT2ToYPbPr))) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= 1024) {
temp = 0x1D; /* NTSC */
if (pVBInfo->TVInfo & SetPALTV)
temp = 0x52; /* PAL */
}
}
}
xgifb_reg_set(pVBInfo->Part2Port, 0x02, temp);
}
/* 301b */
tempcx = pVBInfo->HT;
if (XGI_IsLCDDualLink(pVBInfo))
tempcx = tempcx >> 1;
tempcx -= 2;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x1B, temp);
temp = (tempcx & 0xFF00) >> 8;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1D, ~0x0F, temp);
tempcx = pVBInfo->HT >> 1;
push1 = tempcx; /* push cx */
tempcx += 7;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
tempcx -= 4;
temp = tempcx & 0x00FF;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x22, 0x0F, temp);
tempbx = TimingPoint[j] | ((TimingPoint[j + 1]) << 8);
tempbx += tempcx;
push2 = tempbx;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x24, temp);
temp = (tempbx & 0xFF00) >> 8;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x25, 0x0F, temp);
tempbx = push2;
tempbx = tempbx + 8;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
tempbx = tempbx - 4;
tempcx = tempbx;
}
temp = (tempbx & 0x00FF) << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x29, 0x0F, temp);
j += 2;
tempcx += (TimingPoint[j] | ((TimingPoint[j + 1]) << 8));
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x27, temp);
temp = ((tempcx & 0xFF00) >> 8) << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x28, 0x0F, temp);
tempcx += 8;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
tempcx -= 4;
temp = tempcx & 0xFF;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2A, 0x0F, temp);
tempcx = push1; /* pop cx */
j += 2;
temp = TimingPoint[j] | ((TimingPoint[j + 1]) << 8);
tempcx -= temp;
temp = tempcx & 0x00FF;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2D, 0x0F, temp);
tempcx -= 11;
if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
tempax = XGI_GetVGAHT2(pVBInfo);
tempcx = tempax - 1;
}
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x2E, temp);
tempbx = pVBInfo->VDE;
if (pVBInfo->VGAVDE == 360)
tempbx = 746;
if (pVBInfo->VGAVDE == 375)
tempbx = 746;
if (pVBInfo->VGAVDE == 405)
tempbx = 853;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (pVBInfo->VBType &
(VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
if (!(pVBInfo->TVInfo &
(SetYPbPrMode525p | SetYPbPrMode750p)))
tempbx = tempbx >> 1;
} else
tempbx = tempbx >> 1;
}
tempbx -= 2;
temp = tempbx & 0x00FF;
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (ModeNo == 0x2f)
temp += 1;
}
}
} else {
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (ModeNo == 0x2f)
temp += 1;
}
}
}
xgifb_reg_set(pVBInfo->Part2Port, 0x2F, temp);
temp = (tempcx & 0xFF00) >> 8;
temp |= ((tempbx & 0xFF00) >> 8) << 6;
if (!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (pVBInfo->TVInfo & SetYPbPrMode1080i) {
temp |= 0x10;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
temp |= 0x20;
}
} else {
temp |= 0x10;
if (!(pVBInfo->VBInfo & SetCRT2ToSVIDEO))
temp |= 0x20;
}
}
xgifb_reg_set(pVBInfo->Part2Port, 0x30, temp);
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) { /* TV gatingno */
tempbx = pVBInfo->VDE;
tempcx = tempbx - 2;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->TVInfo & (SetYPbPrMode525p
| SetYPbPrMode750p)))
tempbx = tempbx >> 1;
}
if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
temp = 0;
if (tempcx & 0x0400)
temp |= 0x20;
if (tempbx & 0x0400)
temp |= 0x40;
xgifb_reg_set(pVBInfo->Part4Port, 0x10, temp);
}
temp = (((tempbx - 3) & 0x0300) >> 8) << 5;
xgifb_reg_set(pVBInfo->Part2Port, 0x46, temp);
temp = (tempbx - 3) & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x47, temp);
}
tempbx = tempbx & 0x00FF;
if (!(modeflag & HalfDCLK)) {
tempcx = pVBInfo->VGAHDE;
if (tempcx >= pVBInfo->HDE) {
tempbx |= 0x2000;
tempax &= 0x00FF;
}
}
tempcx = 0x0101;
if (pVBInfo->VBInfo & SetCRT2ToTV) { /*301b*/
if (pVBInfo->VGAHDE >= 1024) {
tempcx = 0x1920;
if (pVBInfo->VGAHDE >= 1280) {
tempcx = 0x1420;
tempbx = tempbx & 0xDFFF;
}
}
}
if (!(tempbx & 0x2000)) {
if (modeflag & HalfDCLK)
tempcx = (tempcx & 0xFF00) | ((tempcx & 0x00FF) << 1);
push1 = tempbx;
tempeax = pVBInfo->VGAHDE;
tempebx = (tempcx & 0xFF00) >> 8;
longtemp = tempeax * tempebx;
tempecx = tempcx & 0x00FF;
longtemp = longtemp / tempecx;
/* 301b */
tempecx = 8 * 1024;
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempecx = tempecx * 8;
}
longtemp = longtemp * tempecx;
tempecx = pVBInfo->HDE;
temp2 = longtemp % tempecx;
tempeax = longtemp / tempecx;
if (temp2 != 0)
tempeax += 1;
tempax = (unsigned short) tempeax;
/* 301b */
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempcx = ((tempax & 0xFF00) >> 5) >> 8;
}
/* end 301b */
tempbx = push1;
tempbx = (unsigned short) (((tempeax & 0x0000FF00) & 0x1F00)
| (tempbx & 0x00FF));
tempax = (unsigned short) (((tempeax & 0x000000FF) << 8)
| (tempax & 0x00FF));
temp = (tempax & 0xFF00) >> 8;
} else {
temp = (tempax & 0x00FF) >> 8;
}
xgifb_reg_set(pVBInfo->Part2Port, 0x44, temp);
temp = (tempbx & 0xFF00) >> 8;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x45, ~0x03F, temp);
temp = tempcx & 0x00FF;
if (tempbx & 0x2000)
temp = 0;
if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
temp |= 0x18;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x46, ~0x1F, temp);
if (pVBInfo->TVInfo & SetPALTV) {
tempbx = 0x0382;
tempcx = 0x007e;
} else {
tempbx = 0x0369;
tempcx = 0x0061;
}
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x4b, temp);
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x4c, temp);
temp = ((tempcx & 0xFF00) >> 8) & 0x03;
temp = temp << 2;
temp |= ((tempbx & 0xFF00) >> 8) & 0x03;
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
temp |= 0x10;
if (pVBInfo->TVInfo & SetYPbPrMode525p)
temp |= 0x20;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
temp |= 0x60;
}
xgifb_reg_set(pVBInfo->Part2Port, 0x4d, temp);
temp = xgifb_reg_get(pVBInfo->Part2Port, 0x43); /* 301b change */
xgifb_reg_set(pVBInfo->Part2Port, 0x43, (unsigned short) (temp - 3));
if (!(pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))) {
if (pVBInfo->TVInfo & NTSC1024x768) {
TimingPoint = XGI_NTSC1024AdjTime;
for (i = 0x1c, j = 0; i <= 0x30; i++, j++) {
xgifb_reg_set(pVBInfo->Part2Port, i,
TimingPoint[j]);
}
xgifb_reg_set(pVBInfo->Part2Port, 0x43, 0x72);
}
}
/* [ycchen] 01/14/03 Modify for 301C PALM Support */
if (pVBInfo->VBType & VB_XGI301C) {
if (pVBInfo->TVInfo & SetPALMTV)
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x08,
0x08); /* PALM Mode */
}
if (pVBInfo->TVInfo & SetPALMTV) {
tempax = (unsigned char) xgifb_reg_get(pVBInfo->Part2Port,
0x01);
tempax--;
xgifb_reg_and(pVBInfo->Part2Port, 0x01, tempax);
/* if ( !( pVBInfo->VBType & VB_XGI301C ) ) */
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xEF);
}
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV) {
if (!(pVBInfo->VBInfo & SetInSlaveMode))
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, 0x00);
}
if (pVBInfo->VBInfo & SetCRT2ToTV)
return;
}
static void XGI_SetLCDRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
unsigned short RefreshRateTableIndex,
struct vb_device_info *pVBInfo)
{
unsigned short push1, push2, pushbx, tempax, tempbx, tempcx, temp,
tempah, tempbh, tempch, resinfo, modeflag, CRT1Index;
struct XGI_LCDDesStruct *LCDBDesPtr = NULL;
if (ModeNo <= 0x13) {
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
resinfo = pVBInfo->SModeIDTable[ModeIdIndex].St_ResInfo;
} else {
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
resinfo = pVBInfo->EModeIDTable[ModeIdIndex].Ext_RESINFO;
CRT1Index = pVBInfo->RefIndex[RefreshRateTableIndex].
Ext_CRT1CRTC;
CRT1Index &= IndexMask;
}
if (!(pVBInfo->VBInfo & SetCRT2ToLCD))
return;
tempbx = pVBInfo->HDE; /* RHACTE=HDE-1 */
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
tempbx -= 1;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x2C, temp);
temp = (tempbx & 0xFF00) >> 8;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x2B, 0x0F, temp);
temp = 0x01;
if (pVBInfo->LCDResInfo == Panel1280x1024) {
if (pVBInfo->ModeType == ModeEGA) {
if (pVBInfo->VGAHDE >= 1024) {
temp = 0x02;
if (pVBInfo->LCDInfo & LCDVESATiming)
temp = 0x01;
}
}
}
xgifb_reg_set(pVBInfo->Part2Port, 0x0B, temp);
tempbx = pVBInfo->VDE; /* RTVACTEO=(VDE-1)&0xFF */
push1 = tempbx;
tempbx--;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x03, temp);
temp = ((tempbx & 0xFF00) >> 8) & 0x07;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0C, ~0x07, temp);
tempcx = pVBInfo->VT - 1;
push2 = tempcx + 1;
temp = tempcx & 0x00FF; /* RVTVT=VT-1 */
xgifb_reg_set(pVBInfo->Part2Port, 0x19, temp);
temp = (tempcx & 0xFF00) >> 8;
temp = temp << 5;
xgifb_reg_set(pVBInfo->Part2Port, 0x1A, temp);
xgifb_reg_and_or(pVBInfo->Part2Port, 0x09, 0xF0, 0x00);
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0xF0, 0x00);
xgifb_reg_and_or(pVBInfo->Part2Port, 0x17, 0xFB, 0x00);
xgifb_reg_and_or(pVBInfo->Part2Port, 0x18, 0xDF, 0x00);
/* Customized LCDB Des no add */
tempbx = 5;
LCDBDesPtr = (struct XGI_LCDDesStruct *) XGI_GetLcdPtr(tempbx, ModeNo,
ModeIdIndex, RefreshRateTableIndex, pVBInfo);
tempah = pVBInfo->LCDResInfo;
tempah &= PanelResInfo;
if ((tempah == Panel1024x768) || (tempah == Panel1024x768x75)) {
tempbx = 1024;
tempcx = 768;
} else if ((tempah == Panel1280x1024) ||
(tempah == Panel1280x1024x75)) {
tempbx = 1280;
tempcx = 1024;
} else if (tempah == Panel1400x1050) {
tempbx = 1400;
tempcx = 1050;
} else {
tempbx = 1600;
tempcx = 1200;
}
if (pVBInfo->LCDInfo & EnableScalingLCD) {
tempbx = pVBInfo->HDE;
tempcx = pVBInfo->VDE;
}
pushbx = tempbx;
tempax = pVBInfo->VT;
pVBInfo->LCDHDES = LCDBDesPtr->LCDHDES;
pVBInfo->LCDHRS = LCDBDesPtr->LCDHRS;
pVBInfo->LCDVDES = LCDBDesPtr->LCDVDES;
pVBInfo->LCDVRS = LCDBDesPtr->LCDVRS;
tempbx = pVBInfo->LCDVDES;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax; /* lcdvdes */
temp = tempbx & 0x00FF; /* RVEQ1EQ=lcdvdes */
xgifb_reg_set(pVBInfo->Part2Port, 0x05, temp);
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x06, temp);
tempch = ((tempcx & 0xFF00) >> 8) & 0x07;
tempbh = ((tempbx & 0xFF00) >> 8) & 0x07;
tempah = tempch;
tempah = tempah << 3;
tempah |= tempbh;
xgifb_reg_set(pVBInfo->Part2Port, 0x02, tempah);
/* getlcdsync() */
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
tempcx = tempbx;
tempax = pVBInfo->VT;
tempbx = pVBInfo->LCDVRS;
/* if (SetLCD_Info & EnableScalingLCD) */
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
temp = tempbx & 0x00FF; /* RTVACTEE=lcdvrs */
xgifb_reg_set(pVBInfo->Part2Port, 0x04, temp);
temp = (tempbx & 0xFF00) >> 8;
temp = temp << 4;
temp |= (tempcx & 0x000F);
xgifb_reg_set(pVBInfo->Part2Port, 0x01, temp);
tempcx = pushbx;
tempax = pVBInfo->HT;
tempbx = pVBInfo->LCDHDES;
tempbx &= 0x0FFF;
if (XGI_IsLCDDualLink(pVBInfo)) {
tempax = tempax >> 1;
tempbx = tempbx >> 1;
tempcx = tempcx >> 1;
}
if (pVBInfo->VBType & VB_XGI302LV)
tempbx += 1;
if (pVBInfo->VBType & VB_XGI301C) /* tap4 */
tempbx += 1;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x1F, temp); /* RHBLKE=lcdhdes */
temp = ((tempbx & 0xFF00) >> 8) << 4;
xgifb_reg_set(pVBInfo->Part2Port, 0x20, temp);
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part2Port, 0x23, temp); /* RHEQPLE=lcdhdee */
temp = (tempcx & 0xFF00) >> 8;
xgifb_reg_set(pVBInfo->Part2Port, 0x25, temp);
/* getlcdsync() */
XGI_GetLCDSync(&tempax, &tempbx, pVBInfo);
tempcx = tempax;
tempax = pVBInfo->HT;
tempbx = pVBInfo->LCDHRS;
/* if ( SetLCD_Info & EnableScalingLCD) */
if (XGI_IsLCDDualLink(pVBInfo)) {
tempax = tempax >> 1;
tempbx = tempbx >> 1;
tempcx = tempcx >> 1;
}
if (pVBInfo->VBType & VB_XGI302LV)
tempbx += 1;
tempcx += tempbx;
if (tempcx >= tempax)
tempcx -= tempax;
temp = tempbx & 0x00FF; /* RHBURSTS=lcdhrs */
xgifb_reg_set(pVBInfo->Part2Port, 0x1C, temp);
temp = (tempbx & 0xFF00) >> 8;
temp = temp << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1D, ~0x0F0, temp);
temp = tempcx & 0x00FF; /* RHSYEXP2S=lcdhre */
xgifb_reg_set(pVBInfo->Part2Port, 0x21, temp);
if (!(pVBInfo->LCDInfo & LCDVESATiming)) {
if (pVBInfo->VGAVDE == 525) {
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV
| VB_XGI301C)) {
temp = 0xC6;
} else
temp = 0xC4;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
xgifb_reg_set(pVBInfo->Part2Port, 0x30, 0xB3);
}
if (pVBInfo->VGAVDE == 420) {
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV
| VB_XGI301C)) {
temp = 0x4F;
} else
temp = 0x4E;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
}
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_GetTap4Ptr */
/* Input : */
/* Output : di -> Tap4 Reg. Setting Pointer */
/* Description : */
/* --------------------------------------------------------------------- */
static struct XGI301C_Tap4TimingStruct *XGI_GetTap4Ptr(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempbx, i;
struct XGI301C_Tap4TimingStruct *Tap4TimingPtr;
if (tempcx == 0) {
tempax = pVBInfo->VGAHDE;
tempbx = pVBInfo->HDE;
} else {
tempax = pVBInfo->VGAVDE;
tempbx = pVBInfo->VDE;
}
if (tempax < tempbx)
return &EnlargeTap4Timing[0];
else if (tempax == tempbx)
return &NoScaleTap4Timing[0]; /* 1:1 */
else
Tap4TimingPtr = NTSCTap4Timing; /* NTSC */
if (pVBInfo->TVInfo & SetPALTV)
Tap4TimingPtr = PALTap4Timing;
if (pVBInfo->VBInfo & SetCRT2ToYPbPr) {
if (pVBInfo->TVInfo & SetYPbPrMode525i)
Tap4TimingPtr = YPbPr525iTap4Timing;
if (pVBInfo->TVInfo & SetYPbPrMode525p)
Tap4TimingPtr = YPbPr525pTap4Timing;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
Tap4TimingPtr = YPbPr750pTap4Timing;
}
if (pVBInfo->VBInfo & SetCRT2ToHiVisionTV)
Tap4TimingPtr = HiTVTap4Timing;
i = 0;
while (Tap4TimingPtr[i].DE != 0xFFFF) {
if (Tap4TimingPtr[i].DE == tempax)
break;
i++;
}
return &Tap4TimingPtr[i];
}
static void XGI_SetTap4Regs(struct vb_device_info *pVBInfo)
{
unsigned short i, j;
struct XGI301C_Tap4TimingStruct *Tap4TimingPtr;
if (!(pVBInfo->VBType & VB_XGI301C))
return;
#ifndef Tap4
xgifb_reg_and(pVBInfo->Part2Port, 0x4E, 0xEB); /* Disable Tap4 */
#else /* Tap4 Setting */
Tap4TimingPtr = XGI_GetTap4Ptr(0, pVBInfo); /* Set Horizontal Scaling */
for (i = 0x80, j = 0; i <= 0xBF; i++, j++)
xgifb_reg_set(pVBInfo->Part2Port, i, Tap4TimingPtr->Reg[j]);
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
(!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV))) {
/* Set Vertical Scaling */
Tap4TimingPtr = XGI_GetTap4Ptr(1, pVBInfo);
for (i = 0xC0, j = 0; i < 0xFF; i++, j++)
xgifb_reg_set(pVBInfo->Part2Port,
i,
Tap4TimingPtr->Reg[j]);
}
if ((pVBInfo->VBInfo & SetCRT2ToTV) &&
(!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)))
/* Enable V.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x04);
else
/* Enable H.Scaling */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x4E, ~0x14, 0x10);
#endif
}
static void XGI_SetGroup3(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short i;
unsigned char *tempdi;
unsigned short modeflag;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
xgifb_reg_set(pVBInfo->Part3Port, 0x00, 0x00);
if (pVBInfo->TVInfo & SetPALTV) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
} else {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xF5);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xB7);
}
if (!(pVBInfo->VBInfo & SetCRT2ToTV))
return;
if (pVBInfo->TVInfo & SetPALMTV) {
xgifb_reg_set(pVBInfo->Part3Port, 0x13, 0xFA);
xgifb_reg_set(pVBInfo->Part3Port, 0x14, 0xC8);
xgifb_reg_set(pVBInfo->Part3Port, 0x3D, 0xA8);
}
if ((pVBInfo->VBInfo & SetCRT2ToHiVisionTV) || (pVBInfo->VBInfo
& SetCRT2ToYPbPr)) {
if (pVBInfo->TVInfo & SetYPbPrMode525i)
return;
tempdi = pVBInfo->HiTVGroup3Data;
if (pVBInfo->SetFlag & TVSimuMode) {
tempdi = pVBInfo->HiTVGroup3Simu;
if (!(modeflag & Charx8Dot))
tempdi = pVBInfo->HiTVGroup3Text;
}
if (pVBInfo->TVInfo & SetYPbPrMode525p)
tempdi = pVBInfo->Ren525pGroup3;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
tempdi = pVBInfo->Ren750pGroup3;
for (i = 0; i <= 0x3E; i++)
xgifb_reg_set(pVBInfo->Part3Port, i, tempdi[i]);
if (pVBInfo->VBType & VB_XGI301C) { /* Marcovision */
if (pVBInfo->TVInfo & SetYPbPrMode525p)
xgifb_reg_set(pVBInfo->Part3Port, 0x28, 0x3f);
}
}
return;
} /* {end of XGI_SetGroup3} */
static void XGI_SetGroup4(unsigned short ModeNo, unsigned short ModeIdIndex,
unsigned short RefreshRateTableIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempax, tempcx, tempbx, modeflag, temp, temp2;
unsigned long tempebx, tempeax, templong;
if (ModeNo <= 0x13)
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
/* si+Ext_ResInfo */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
temp = pVBInfo->RVBHCFACT;
xgifb_reg_set(pVBInfo->Part4Port, 0x13, temp);
tempbx = pVBInfo->RVBHCMAX;
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x14, temp);
temp2 = ((tempbx & 0xFF00) >> 8) << 7;
tempcx = pVBInfo->VGAHT - 1;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x16, temp);
temp = ((tempcx & 0xFF00) >> 8) << 3;
temp2 |= temp;
tempcx = pVBInfo->VGAVT - 1;
if (!(pVBInfo->VBInfo & SetCRT2ToTV))
tempcx -= 5;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x17, temp);
temp = temp2 | ((tempcx & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part4Port, 0x15, temp);
xgifb_reg_or(pVBInfo->Part4Port, 0x0D, 0x08);
tempcx = pVBInfo->VBInfo;
tempbx = pVBInfo->VGAHDE;
if (modeflag & HalfDCLK)
tempbx = tempbx >> 1;
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
if (tempcx & SetCRT2ToHiVisionTV) {
temp = 0;
if (tempbx <= 1024)
temp = 0xA0;
if (tempbx == 1280)
temp = 0xC0;
} else if (tempcx & SetCRT2ToTV) {
temp = 0xA0;
if (tempbx <= 800)
temp = 0x80;
} else {
temp = 0x80;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
temp = 0;
if (tempbx > 800)
temp = 0x60;
}
}
if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p)) {
temp = 0x00;
if (pVBInfo->VGAHDE == 1280)
temp = 0x40;
if (pVBInfo->VGAHDE == 1024)
temp = 0x20;
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0E, ~0xEF, temp);
tempebx = pVBInfo->VDE;
if (tempcx & SetCRT2ToHiVisionTV) {
if (!(temp & 0xE000))
tempbx = tempbx >> 1;
}
tempcx = pVBInfo->RVBHRS;
temp = tempcx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x18, temp);
tempeax = pVBInfo->VGAVDE;
tempcx |= 0x04000;
if (tempeax <= tempebx) {
tempcx = (tempcx & (~0x4000));
tempeax = pVBInfo->VGAVDE;
} else {
tempeax -= tempebx;
}
templong = (tempeax * 256 * 1024) % tempebx;
tempeax = (tempeax * 256 * 1024) / tempebx;
tempebx = tempeax;
if (templong != 0)
tempebx++;
temp = (unsigned short) (tempebx & 0x000000FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1B, temp);
temp = (unsigned short) ((tempebx & 0x0000FF00) >> 8);
xgifb_reg_set(pVBInfo->Part4Port, 0x1A, temp);
tempbx = (unsigned short) (tempebx >> 16);
temp = tempbx & 0x00FF;
temp = temp << 4;
temp |= ((tempcx & 0xFF00) >> 8);
xgifb_reg_set(pVBInfo->Part4Port, 0x19, temp);
/* 301b */
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
temp = 0x0028;
xgifb_reg_set(pVBInfo->Part4Port, 0x1C, temp);
tempax = pVBInfo->VGAHDE;
if (modeflag & HalfDCLK)
tempax = tempax >> 1;
if (XGI_IsLCDDualLink(pVBInfo))
tempax = tempax >> 1;
/* if((pVBInfo->VBInfo&(SetCRT2ToLCD)) ||
((pVBInfo->TVInfo&SetYPbPrMode525p) ||
(pVBInfo->TVInfo&SetYPbPrMode750p))) { */
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
if (tempax > 800)
tempax -= 800;
} else {
if (pVBInfo->VGAHDE > 800) {
if (pVBInfo->VGAHDE == 1024)
tempax = (tempax * 25 / 32) - 1;
else
tempax = (tempax * 20 / 32) - 1;
}
}
tempax -= 1;
/*
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
if (pVBInfo->VBType & VB_XGI301LV) {
if (!(pVBInfo->TVInfo &
(SetYPbPrMode525p |
SetYPbPrMode750p |
SetYPbPrMode1080i))) {
if (pVBInfo->VGAHDE > 800) {
if (pVBInfo->VGAHDE == 1024)
tempax =(tempax * 25 /
32) - 1;
else
tempax = (tempax * 20 /
32) - 1;
}
}
} else {
if (pVBInfo->VGAHDE > 800) {
if (pVBInfo->VGAHDE == 1024)
tempax = (tempax * 25 / 32) - 1;
else
tempax = (tempax * 20 / 32) - 1;
}
}
}
*/
temp = (tempax & 0xFF00) >> 8;
temp = ((temp & 0x0003) << 4);
xgifb_reg_set(pVBInfo->Part4Port, 0x1E, temp);
temp = (tempax & 0x00FF);
xgifb_reg_set(pVBInfo->Part4Port, 0x1D, temp);
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToHiVisionTV)) {
if (pVBInfo->VGAHDE > 800)
xgifb_reg_or(pVBInfo->Part4Port, 0x1E, 0x08);
}
temp = 0x0036;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (!(pVBInfo->TVInfo & (NTSC1024x768
| SetYPbPrMode525p | SetYPbPrMode750p
| SetYPbPrMode1080i))) {
temp |= 0x0001;
if ((pVBInfo->VBInfo & SetInSlaveMode)
&& (!(pVBInfo->TVInfo
& TVSimuMode)))
temp &= (~0x0001);
}
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x1F, 0x00C0, temp);
tempbx = pVBInfo->HT;
if (XGI_IsLCDDualLink(pVBInfo))
tempbx = tempbx >> 1;
tempbx = (tempbx >> 1) - 2;
temp = ((tempbx & 0x0700) >> 8) << 3;
xgifb_reg_and_or(pVBInfo->Part4Port, 0x21, 0x00C0, temp);
temp = tempbx & 0x00FF;
xgifb_reg_set(pVBInfo->Part4Port, 0x22, temp);
}
/* end 301b */
if (pVBInfo->ISXPDOS == 0)
XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo);
}
static void XGINew_EnableCRT2(struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3c4, 0x1E, 0xFF, 0x20);
}
static void XGI_SetGroup5(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short Pindex, Pdata;
Pindex = pVBInfo->Part5Port;
Pdata = pVBInfo->Part5Port + 1;
if (pVBInfo->ModeType == ModeVGA) {
if (!(pVBInfo->VBInfo & (SetInSlaveMode | LoadDACFlag
| CRT2DisplayFlag))) {
XGINew_EnableCRT2(pVBInfo);
/* LoadDAC2(pVBInfo->Part5Port, ModeNo, ModeIdIndex); */
}
}
return;
}
static void XGI_EnableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x40);
}
static void XGI_DisableGatingCRT(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->P3d4, 0x63, 0xBF, 0x00);
}
/*----------------------------------------------------------------------------*/
/* input */
/* bl[5] : 1;LVDS signal on */
/* bl[1] : 1;LVDS backlight on */
/* bl[0] : 1:LVDS VDD on */
/* bh: 100000b : clear bit 5, to set bit5 */
/* 000010b : clear bit 1, to set bit1 */
/* 000001b : clear bit 0, to set bit0 */
/*----------------------------------------------------------------------------*/
void XGI_XG21BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
tempbh &= 0x23;
tempbl &= 0x23;
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
if (tempbh & 0x20) {
temp = (tempbl >> 4) & 0x02;
/* CR B4[1] */
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
}
temp = xgifb_reg_get(pVBInfo->P3d4, 0x48);
temp = XG21GPIODataTransfer(temp);
temp &= ~tempbh;
temp |= tempbl;
xgifb_reg_set(pVBInfo->P3d4, 0x48, temp);
}
void XGI_XG27BLSignalVDD(unsigned short tempbh, unsigned short tempbl,
struct vb_device_info *pVBInfo)
{
unsigned char CR4A, temp;
unsigned short tempbh0, tempbl0;
tempbh0 = tempbh;
tempbl0 = tempbl;
tempbh0 &= 0x20;
tempbl0 &= 0x20;
tempbh0 >>= 3;
tempbl0 >>= 3;
if (tempbh & 0x20) {
temp = (tempbl >> 4) & 0x02;
/* CR B4[1] */
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~0x02, temp);
}
xgifb_reg_and_or(pVBInfo->P3d4, 0xB4, ~tempbh0, tempbl0);
CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A);
tempbh &= 0x03;
tempbl &= 0x03;
tempbh <<= 2;
tempbl <<= 2; /* GPIOC,GPIOD */
xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~tempbh); /* enable GPIO write */
xgifb_reg_and_or(pVBInfo->P3d4, 0x48, ~tempbh, tempbl);
}
/* --------------------------------------------------------------------- */
unsigned short XGI_GetLVDSOEMTableIndex(struct vb_device_info *pVBInfo)
{
unsigned short index;
index = xgifb_reg_get(pVBInfo->P3d4, 0x36);
if (index < sizeof(XGI21_LCDCapList)
/ sizeof(struct XGI21_LVDSCapStruct))
return index;
return 0;
}
/* --------------------------------------------------------------------- */
/* Function : XGI_XG21SetPanelDelay */
/* Input : */
/* Output : */
/* Description : */
/* I/P : bl : 1 ; T1 : the duration between CPL on and signal on */
/* : bl : 2 ; T2 : the duration signal on and Vdd on */
/* : bl : 3 ; T3 : the duration between CPL off and signal off */
/* : bl : 4 ; T4 : the duration signal off and Vdd off */
/* --------------------------------------------------------------------- */
void XGI_XG21SetPanelDelay(unsigned short tempbl,
struct vb_device_info *pVBInfo)
{
unsigned short index;
index = XGI_GetLVDSOEMTableIndex(pVBInfo);
if (tempbl == 1)
mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S1);
if (tempbl == 2)
mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S2);
if (tempbl == 3)
mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S3);
if (tempbl == 4)
mdelay(pVBInfo->XG21_LVDSCapList[index].PSC_S4);
}
unsigned char XGI_XG21CheckLVDSMode(unsigned short ModeNo,
unsigned short ModeIdIndex, struct vb_device_info *pVBInfo)
{
unsigned short xres, yres, colordepth, modeflag, resindex,
lvdstableindex;
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
if (!(modeflag & Charx8Dot)) {
xres /= 9;
xres *= 8;
}
if (ModeNo > 0x13) {
if ((ModeNo > 0x13) && (modeflag & HalfDCLK))
xres *= 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
yres *= 2;
}
lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
if (xres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE))
return 0;
if (yres > (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE))
return 0;
if (ModeNo > 0x13) {
if ((xres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDSHDE)) ||
(yres != (pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDSVDE))) {
colordepth = XGI_GetColorDepth(ModeNo,
ModeIdIndex,
pVBInfo);
if (colordepth > 2)
return 0;
}
}
return 1;
}
void XGI_SetXG21FPBits(struct vb_device_info *pVBInfo)
{
unsigned char temp;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x37); /* D[0] 1: 18bit */
temp = (temp & 1) << 6;
/* SR06[6] 18bit Dither */
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0x40, temp);
/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: dual 12bits */
xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
}
void XGI_SetXG27FPBits(struct vb_device_info *pVBInfo)
{
unsigned char temp;
/* D[1:0] 01: 18bit, 00: dual 12, 10: single 24 */
temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
temp = (temp & 3) << 6;
/* SR06[7]0: dual 12/1: single 24 [6] 18bit Dither <= 0 h/w recommend */
xgifb_reg_and_or(pVBInfo->P3c4, 0x06, ~0xc0, temp & 0x80);
/* SR09[7] enable FP output, SR09[6] 1: sigle 18bits, 0: 24bits */
xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0xc0, temp | 0x80);
}
static void XGI_SetXG21LVDSPara(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
unsigned short xres, yres, modeflag, resindex, lvdstableindex;
unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = (unsigned char) inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDS_Capability & LCDPolarity);
/* SR35[7] FP VSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
/* SR30[5] FP HSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, ~0x20, (temp & 0x40) >> 1);
XGI_SetXG21FPBits(pVBInfo);
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
if (!(modeflag & Charx8Dot))
xres = xres * 8 / 9;
LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- xres) / 2;
if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
LVDSHBS -= xres / 4;
if (LVDSHBS > LVDSHT)
LVDSHBS -= LVDSHT;
LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
if (LVDSHRS > LVDSHT)
LVDSHRS -= LVDSHT;
LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
if (LVDSHRE > LVDSHT)
LVDSHRE -= LVDSHT;
LVDSHBE = LVDSHBS + LVDSHT
- pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- yres) / 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
LVDSVBS += yres / 2;
if (LVDSVBS > LVDSVT)
LVDSVBS -= LVDSVT;
LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
if (LVDSVRS > LVDSVT)
LVDSVRS -= LVDSVT;
LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVSYNC;
if (LVDSVRE > LVDSVT)
LVDSVRE -= LVDSVT;
LVDSVBE = LVDSVBS + LVDSVT
- pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
if (!(modeflag & Charx8Dot))
xgifb_reg_or(pVBInfo->P3c4, 0x1, 0x1);
/* HT SR0B[1:0] CR00 */
value = (LVDSHT >> 3) - 5;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x03, (value & 0x300) >> 8);
xgifb_reg_set(pVBInfo->P3d4, 0x0, (value & 0xFF));
/* HBS SR0B[5:4] CR02 */
value = (LVDSHBS >> 3) - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x30, (value & 0x300) >> 4);
xgifb_reg_set(pVBInfo->P3d4, 0x2, (value & 0xFF));
/* HBE SR0C[1:0] CR05[7] CR03[4:0] */
value = (LVDSHBE >> 3) - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x03, (value & 0xC0) >> 6);
xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x80, (value & 0x20) << 2);
xgifb_reg_and_or(pVBInfo->P3d4, 0x03, ~0x1F, value & 0x1F);
/* HRS SR0B[7:6] CR04 */
value = (LVDSHRS >> 3) + 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0xC0, (value & 0x300) >> 2);
xgifb_reg_set(pVBInfo->P3d4, 0x4, (value & 0xFF));
/* Panel HRS SR2F[1:0] SR2E[7:0] */
value--;
xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0x03, (value & 0x300) >> 8);
xgifb_reg_set(pVBInfo->P3c4, 0x2E, (value & 0xFF));
/* HRE SR0C[2] CR05[4:0] */
value = (LVDSHRE >> 3) + 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x04, (value & 0x20) >> 3);
xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x1F, value & 0x1F);
/* Panel HRE SR2F[7:2] */
value--;
xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0xFC, value << 2);
/* VT SR0A[0] CR07[5][0] CR06 */
value = LVDSVT - 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x01, (value & 0x400) >> 10);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x20, (value & 0x200) >> 4);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x01, (value & 0x100) >> 8);
xgifb_reg_set(pVBInfo->P3d4, 0x06, (value & 0xFF));
/* VBS SR0A[2] CR09[5] CR07[3] CR15 */
value = LVDSVBS - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x04, (value & 0x400) >> 8);
xgifb_reg_and_or(pVBInfo->P3d4, 0x09, ~0x20, (value & 0x200) >> 4);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x08, (value & 0x100) >> 5);
xgifb_reg_set(pVBInfo->P3d4, 0x15, (value & 0xFF));
/* VBE SR0A[4] CR16 */
value = LVDSVBE - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x10, (value & 0x100) >> 4);
xgifb_reg_set(pVBInfo->P3d4, 0x16, (value & 0xFF));
/* VRS SR0A[3] CR7[7][2] CR10 */
value = LVDSVRS - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x08, (value & 0x400) >> 7);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x80, (value & 0x200) >> 2);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x04, (value & 0x100) >> 6);
xgifb_reg_set(pVBInfo->P3d4, 0x10, (value & 0xFF));
/* Panel VRS SR3F[1:0] SR34[7:0] SR33[0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0x03, (value & 0x600) >> 9);
xgifb_reg_set(pVBInfo->P3c4, 0x34, (value >> 1) & 0xFF);
xgifb_reg_and_or(pVBInfo->P3d4, 0x33, ~0x01, value & 0x01);
/* VRE SR0A[5] CR11[3:0] */
value = LVDSVRE - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x20, (value & 0x10) << 1);
xgifb_reg_and_or(pVBInfo->P3d4, 0x11, ~0x0F, value & 0x0F);
/* Panel VRE SR3F[7:2] *//* SR3F[7] has to be 0, h/w bug */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, (value << 2) & 0x7C);
for (temp = 0, value = 0; temp < 3; temp++) {
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
0x2B,
pVBInfo->XG21_LVDSCapList[lvdstableindex].
VCLKData1);
xgifb_reg_set(pVBInfo->P3c4,
0x2C,
pVBInfo->XG21_LVDSCapList[lvdstableindex].
VCLKData2);
value += 0x10;
}
if (!(modeflag & Charx8Dot)) {
inb(pVBInfo->P3da); /* reset 3da */
outb(0x13, pVBInfo->P3c0); /* set index */
/* set data, panning = 0, shift left 1 dot*/
outb(0x00, pVBInfo->P3c0);
inb(pVBInfo->P3da); /* Enable Attribute */
outb(0x20, pVBInfo->P3c0);
inb(pVBInfo->P3da); /* reset 3da */
}
}
/* no shadow case */
static void XGI_SetXG27LVDSPara(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned char temp, Miscdata;
unsigned short xres, yres, modeflag, resindex, lvdstableindex;
unsigned short LVDSHT, LVDSHBS, LVDSHRS, LVDSHRE, LVDSHBE;
unsigned short LVDSVT, LVDSVBS, LVDSVRS, LVDSVRE, LVDSVBE;
unsigned short value;
lvdstableindex = XGI_GetLVDSOEMTableIndex(pVBInfo);
temp = (unsigned char) ((pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDS_Capability &
(LCDPolarity << 8)) >> 8);
temp &= LCDPolarity;
Miscdata = (unsigned char) inb(pVBInfo->P3cc);
outb((Miscdata & 0x3F) | temp, pVBInfo->P3c2);
temp = (unsigned char) (pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDS_Capability & LCDPolarity);
/* SR35[7] FP VSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x80, temp & 0x80);
/* SR30[5] FP HSync polarity */
xgifb_reg_and_or(pVBInfo->P3c4, 0x30, ~0x20, (temp & 0x40) >> 1);
XGI_SetXG27FPBits(pVBInfo);
resindex = XGI_GetResInfo(ModeNo, ModeIdIndex, pVBInfo);
if (ModeNo <= 0x13) {
xres = pVBInfo->StResInfo[resindex].HTotal;
yres = pVBInfo->StResInfo[resindex].VTotal;
/* si+St_ResInfo */
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
xres = pVBInfo->ModeResInfo[resindex].HTotal; /* xres->ax */
yres = pVBInfo->ModeResInfo[resindex].VTotal; /* yres->bx */
/* si+St_ModeFlag */
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
if (!(modeflag & Charx8Dot))
xres = xres * 8 / 9;
LVDSHT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHT;
LVDSHBS = xres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE
- xres) / 2;
if ((ModeNo <= 0x13) && (modeflag & HalfDCLK))
LVDSHBS -= xres / 4;
if (LVDSHBS > LVDSHT)
LVDSHBS -= LVDSHT;
LVDSHRS = LVDSHBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHFP;
if (LVDSHRS > LVDSHT)
LVDSHRS -= LVDSHT;
LVDSHRE = LVDSHRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHSYNC;
if (LVDSHRE > LVDSHT)
LVDSHRE -= LVDSHT;
LVDSHBE = LVDSHBS + LVDSHT
- pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSHDE;
LVDSVT = pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVT;
LVDSVBS = yres + (pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE
- yres) / 2;
if ((ModeNo > 0x13) && (modeflag & DoubleScanMode))
LVDSVBS += yres / 2;
if (LVDSVBS > LVDSVT)
LVDSVBS -= LVDSVT;
LVDSVRS = LVDSVBS + pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVFP;
if (LVDSVRS > LVDSVT)
LVDSVRS -= LVDSVT;
LVDSVRE = LVDSVRS + pVBInfo->XG21_LVDSCapList[lvdstableindex].
LVDSVSYNC;
if (LVDSVRE > LVDSVT)
LVDSVRE -= LVDSVT;
LVDSVBE = LVDSVBS + LVDSVT
- pVBInfo->XG21_LVDSCapList[lvdstableindex].LVDSVDE;
temp = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x11);
xgifb_reg_set(pVBInfo->P3d4, 0x11, temp & 0x7f); /* Unlock CRTC */
if (!(modeflag & Charx8Dot))
xgifb_reg_or(pVBInfo->P3c4, 0x1, 0x1);
/* HT SR0B[1:0] CR00 */
value = (LVDSHT >> 3) - 5;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x03, (value & 0x300) >> 8);
xgifb_reg_set(pVBInfo->P3d4, 0x0, (value & 0xFF));
/* HBS SR0B[5:4] CR02 */
value = (LVDSHBS >> 3) - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0x30, (value & 0x300) >> 4);
xgifb_reg_set(pVBInfo->P3d4, 0x2, (value & 0xFF));
/* HBE SR0C[1:0] CR05[7] CR03[4:0] */
value = (LVDSHBE >> 3) - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x03, (value & 0xC0) >> 6);
xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x80, (value & 0x20) << 2);
xgifb_reg_and_or(pVBInfo->P3d4, 0x03, ~0x1F, value & 0x1F);
/* HRS SR0B[7:6] CR04 */
value = (LVDSHRS >> 3) + 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0B, ~0xC0, (value & 0x300) >> 2);
xgifb_reg_set(pVBInfo->P3d4, 0x4, (value & 0xFF));
/* Panel HRS SR2F[1:0] SR2E[7:0] */
value--;
xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0x03, (value & 0x300) >> 8);
xgifb_reg_set(pVBInfo->P3c4, 0x2E, (value & 0xFF));
/* HRE SR0C[2] CR05[4:0] */
value = (LVDSHRE >> 3) + 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0C, ~0x04, (value & 0x20) >> 3);
xgifb_reg_and_or(pVBInfo->P3d4, 0x05, ~0x1F, value & 0x1F);
/* Panel HRE SR2F[7:2] */
value--;
xgifb_reg_and_or(pVBInfo->P3c4, 0x2F, ~0xFC, value << 2);
/* VT SR0A[0] CR07[5][0] CR06 */
value = LVDSVT - 2;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x01, (value & 0x400) >> 10);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x20, (value & 0x200) >> 4);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x01, (value & 0x100) >> 8);
xgifb_reg_set(pVBInfo->P3d4, 0x06, (value & 0xFF));
/* VBS SR0A[2] CR09[5] CR07[3] CR15 */
value = LVDSVBS - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x04, (value & 0x400) >> 8);
xgifb_reg_and_or(pVBInfo->P3d4, 0x09, ~0x20, (value & 0x200) >> 4);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x08, (value & 0x100) >> 5);
xgifb_reg_set(pVBInfo->P3d4, 0x15, (value & 0xFF));
/* VBE SR0A[4] CR16 */
value = LVDSVBE - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x10, (value & 0x100) >> 4);
xgifb_reg_set(pVBInfo->P3d4, 0x16, (value & 0xFF));
/* VRS SR0A[3] CR7[7][2] CR10 */
value = LVDSVRS - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x08, (value & 0x400) >> 7);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x80, (value & 0x200) >> 2);
xgifb_reg_and_or(pVBInfo->P3d4, 0x07, ~0x04, (value & 0x100) >> 6);
xgifb_reg_set(pVBInfo->P3d4, 0x10, (value & 0xFF));
/* Panel VRS SR35[2:0] SR34[7:0] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x35, ~0x07, (value & 0x700) >> 8);
xgifb_reg_set(pVBInfo->P3c4, 0x34, value & 0xFF);
/* VRE SR0A[5] CR11[3:0] */
value = LVDSVRE - 1;
xgifb_reg_and_or(pVBInfo->P3c4, 0x0A, ~0x20, (value & 0x10) << 1);
xgifb_reg_and_or(pVBInfo->P3d4, 0x11, ~0x0F, value & 0x0F);
/* Panel VRE SR3F[7:2] */
xgifb_reg_and_or(pVBInfo->P3c4, 0x3F, ~0xFC, (value << 2) & 0xFC);
for (temp = 0, value = 0; temp < 3; temp++) {
xgifb_reg_and_or(pVBInfo->P3c4, 0x31, ~0x30, value);
xgifb_reg_set(pVBInfo->P3c4,
0x2B,
pVBInfo->XG21_LVDSCapList[lvdstableindex].
VCLKData1);
xgifb_reg_set(pVBInfo->P3c4,
0x2C,
pVBInfo->XG21_LVDSCapList[lvdstableindex].
VCLKData2);
value += 0x10;
}
if (!(modeflag & Charx8Dot)) {
inb(pVBInfo->P3da); /* reset 3da */
outb(0x13, pVBInfo->P3c0); /* set index */
/* set data, panning = 0, shift left 1 dot*/
outb(0x00, pVBInfo->P3c0);
inb(pVBInfo->P3da); /* Enable Attribute */
outb(0x20, pVBInfo->P3c0);
inb(pVBInfo->P3da); /* reset 3da */
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_IsLCDON */
/* Input : */
/* Output : 0 : Skip PSC Control */
/* 1: Disable PSC */
/* Description : */
/* --------------------------------------------------------------------- */
static unsigned char XGI_IsLCDON(struct vb_device_info *pVBInfo)
{
unsigned short tempax;
tempax = pVBInfo->VBInfo;
if (tempax & SetCRT2ToDualEdge)
return 0;
else if (tempax & (DisableCRT2Display | SwitchToCRT2 | SetSimuScanMode))
return 1;
return 0;
}
/* --------------------------------------------------------------------- */
/* Function : XGI_DisableChISLCD */
/* Input : */
/* Output : 0 -> Not LCD Mode */
/* Description : */
/* --------------------------------------------------------------------- */
static unsigned char XGI_DisableChISLCD(struct vb_device_info *pVBInfo)
{
unsigned short tempbx, tempah;
tempbx = pVBInfo->SetFlag & (DisableChA | DisableChB);
tempah = ~((unsigned short) xgifb_reg_get(pVBInfo->Part1Port, 0x2E));
if (tempbx & (EnableChA | DisableChA)) {
if (!(tempah & 0x08)) /* Chk LCDA Mode */
return 0;
}
if (!(tempbx & (EnableChB | DisableChB)))
return 0;
if (tempah & 0x01) /* Chk LCDB Mode */
return 1;
return 0;
}
/* --------------------------------------------------------------------- */
/* Function : XGI_EnableChISLCD */
/* Input : */
/* Output : 0 -> Not LCD mode */
/* Description : */
/* --------------------------------------------------------------------- */
static unsigned char XGI_EnableChISLCD(struct vb_device_info *pVBInfo)
{
unsigned short tempbx, tempah;
tempbx = pVBInfo->SetFlag & (EnableChA | EnableChB);
tempah = ~((unsigned short) xgifb_reg_get(pVBInfo->Part1Port, 0x2E));
if (tempbx & (EnableChA | DisableChA)) {
if (!(tempah & 0x08)) /* Chk LCDA Mode */
return 0;
}
if (!(tempbx & (EnableChB | DisableChB)))
return 0;
if (tempah & 0x01) /* Chk LCDB Mode */
return 1;
return 0;
}
void XGI_DisableBridge(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah = 0;
if (pVBInfo->SetFlag == Win9xDOSMode)
return;
/*
if (CH7017) {
if (!(pVBInfo->VBInfo &
(SetCRT2ToLCD | SetCRT2toLCDA)) ||
(XGI_DisableChISLCD(pVBInfo))) {
if (!XGI_IsLCDON(pVBInfo)) {
if (DISCHARGE) {
tempbx = XGINew_GetCH7005(0x61);
// first time we power up
if (tempbx < 0x01)
// and disable power sequence
XGINew_SetCH7005(0x0066);
else
// leave VDD on - disable power
XGINew_SetCH7005(0x5f66);
}
}
}
}
*/
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempah = 0x3F;
if (!(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode))) {
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempah = 0x7F; /* Disable Channel A */
if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
/* Disable Channel B */
tempah = 0xBF;
if (pVBInfo->SetFlag & DisableChB)
/* force to disable Cahnnel */
tempah &= 0xBF;
if (pVBInfo->SetFlag & DisableChA)
/* Force to disable Channel B */
tempah &= 0x7F;
}
}
}
/* disable part4_1f */
xgifb_reg_and(pVBInfo->Part4Port, 0x1F, tempah);
if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
if (((pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)))
|| (XGI_DisableChISLCD(pVBInfo))
|| (XGI_IsLCDON(pVBInfo)))
/* LVDS Driver power down */
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x80);
}
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
& (DisableCRT2Display | SetCRT2ToLCDA
| SetSimuScanMode))) {
if (pVBInfo->SetFlag & GatingCRT)
XGI_EnableGatingCRT(HwDeviceExtension, pVBInfo);
XGI_DisplayOff(HwDeviceExtension, pVBInfo);
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
if ((pVBInfo->SetFlag & DisableChA) || (pVBInfo->VBInfo
& SetCRT2ToLCDA))
/* Power down */
xgifb_reg_and(pVBInfo->Part1Port, 0x1e, 0xdf);
}
/* disable TV as primary VGA swap */
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xdf);
if ((pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToDualEdge)))
xgifb_reg_and(pVBInfo->Part2Port, 0x00, 0xdf);
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
((!(pVBInfo->VBInfo & SetCRT2ToLCDA)) &&
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))))
/* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
if ((pVBInfo->SetFlag & DisableChB) ||
(pVBInfo->VBInfo &
(DisableCRT2Display | SetSimuScanMode)) ||
(!(pVBInfo->VBInfo & SetCRT2ToLCDA)) ||
(pVBInfo->VBInfo &
(SetCRT2ToRAMDAC | SetCRT2ToLCD | SetCRT2ToTV))) {
/* save Part1 index 0 */
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
/* BTDAC = 1, avoid VB reset */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x10);
/* disable CRT2 */
xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
/* restore Part1 index 0 */
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
}
} else { /* {301} */
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToTV)) {
/* BScreenOff=1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x00, 0x80);
/* Disable CRT2 */
xgifb_reg_and(pVBInfo->Part1Port, 0x1E, 0xDF);
/* Disable TV asPrimary VGA swap */
xgifb_reg_and(pVBInfo->P3c4, 0x32, 0xDF);
}
if (pVBInfo->VBInfo & (DisableCRT2Display | SetCRT2ToLCDA
| SetSimuScanMode))
XGI_DisplayOff(HwDeviceExtension, pVBInfo);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_GetTVPtrIndex */
/* Input : */
/* Output : */
/* Description : bx 0 : ExtNTSC */
/* 1 : StNTSC */
/* 2 : ExtPAL */
/* 3 : StPAL */
/* 4 : ExtHiTV */
/* 5 : StHiTV */
/* 6 : Ext525i */
/* 7 : St525i */
/* 8 : Ext525p */
/* 9 : St525p */
/* A : Ext750p */
/* B : St750p */
/* --------------------------------------------------------------------- */
static unsigned short XGI_GetTVPtrIndex(struct vb_device_info *pVBInfo)
{
unsigned short tempbx = 0;
if (pVBInfo->TVInfo & SetPALTV)
tempbx = 2;
if (pVBInfo->TVInfo & SetYPbPrMode1080i)
tempbx = 4;
if (pVBInfo->TVInfo & SetYPbPrMode525i)
tempbx = 6;
if (pVBInfo->TVInfo & SetYPbPrMode525p)
tempbx = 8;
if (pVBInfo->TVInfo & SetYPbPrMode750p)
tempbx = 10;
if (pVBInfo->TVInfo & TVSimuMode)
tempbx++;
return tempbx;
}
/* --------------------------------------------------------------------- */
/* Function : XGI_GetTVPtrIndex2 */
/* Input : */
/* Output : bx 0 : NTSC */
/* 1 : PAL */
/* 2 : PALM */
/* 3 : PALN */
/* 4 : NTSC1024x768 */
/* 5 : PAL-M 1024x768 */
/* 6-7: reserved */
/* cl 0 : YFilter1 */
/* 1 : YFilter2 */
/* ch 0 : 301A */
/* 1 : 301B/302B/301LV/302LV */
/* Description : */
/* --------------------------------------------------------------------- */
static void XGI_GetTVPtrIndex2(unsigned short *tempbx, unsigned char *tempcl,
unsigned char *tempch, struct vb_device_info *pVBInfo)
{
*tempbx = 0;
*tempcl = 0;
*tempch = 0;
if (pVBInfo->TVInfo & SetPALTV)
*tempbx = 1;
if (pVBInfo->TVInfo & SetPALMTV)
*tempbx = 2;
if (pVBInfo->TVInfo & SetPALNTV)
*tempbx = 3;
if (pVBInfo->TVInfo & NTSC1024x768) {
*tempbx = 4;
if (pVBInfo->TVInfo & SetPALMTV)
*tempbx = 5;
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if ((!(pVBInfo->VBInfo & SetInSlaveMode)) || (pVBInfo->TVInfo
& TVSimuMode)) {
*tempbx += 8;
*tempcl += 1;
}
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C))
(*tempch)++;
}
static void XGI_SetDelayComp(struct vb_device_info *pVBInfo)
{
unsigned short index;
unsigned char tempah, tempbl, tempbh;
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA
| SetCRT2ToTV | SetCRT2ToRAMDAC)) {
tempbl = 0;
tempbh = 0;
index = XGI_GetTVPtrIndex(pVBInfo); /* Get TV Delay */
tempbl = pVBInfo->XGI_TVDelayList[index];
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV
| VB_XGI301C))
tempbl = pVBInfo->XGI_TVDelayList2[index];
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempbl = tempbl >> 4;
/*
if (pVBInfo->VBInfo & SetCRT2ToRAMDAC)
tempbl = CRT2Delay1; // Get CRT2 Delay
if (pVBInfo->VBType &
(VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C))
tempbl = CRT2Delay2;
*/
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
/* Get LCD Delay */
index = XGI_GetLCDCapPtr(pVBInfo);
tempbh = pVBInfo->LCDCapList[index].
LCD_DelayCompensation;
if (!(pVBInfo->VBInfo & SetCRT2ToLCDA))
tempbl = tempbh;
}
tempbl &= 0x0F;
tempbh &= 0xF0;
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x2D);
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToLCD
| SetCRT2ToTV)) { /* Channel B */
tempah &= 0xF0;
tempah |= tempbl;
}
if (pVBInfo->VBInfo & SetCRT2ToLCDA) { /* Channel A */
tempah &= 0x0F;
tempah |= tempbh;
}
xgifb_reg_set(pVBInfo->Part1Port, 0x2D, tempah);
}
} else if (pVBInfo->IF_DEF_LVDS == 1) {
tempbl = 0;
tempbh = 0;
if (pVBInfo->VBInfo & SetCRT2ToLCD) {
/* / Get LCD Delay */
tempah = pVBInfo->LCDCapList[
XGI_GetLCDCapPtr(pVBInfo)].
LCD_DelayCompensation;
tempah &= 0x0f;
tempah = tempah << 4;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2D, 0x0f,
tempah);
}
}
}
static void XGI_SetLCDCap_A(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
unsigned short temp;
temp = xgifb_reg_get(pVBInfo->P3d4, 0x37);
if (temp & LCDRGB18Bit) {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
/* Enable Dither */
(unsigned short) (0x20 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
} else {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x19, 0x0F,
(unsigned short) (0x30 | (tempcx & 0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
}
/*
if (tempcx & EnableLCD24bpp) { // 24bits
xgifb_reg_and_or(pVBInfo->Part1Port,
0x19,
0x0F,
(unsigned short)(0x30 | (tempcx&0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x00);
} else {
xgifb_reg_and_or(pVBInfo->Part1Port,
0x19,
0x0F,
// Enable Dither
(unsigned short)(0x20 | (tempcx&0x00C0)));
xgifb_reg_and_or(pVBInfo->Part1Port, 0x1A, 0x7F, 0x80);
}
*/
}
/* --------------------------------------------------------------------- */
/* Function : XGI_SetLCDCap_B */
/* Input : cx -> LCD Capability */
/* Output : */
/* Description : */
/* --------------------------------------------------------------------- */
static void XGI_SetLCDCap_B(unsigned short tempcx,
struct vb_device_info *pVBInfo)
{
if (tempcx & EnableLCD24bpp) /* 24bits */
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
(unsigned short) (((tempcx & 0x00ff) >> 6)
| 0x0c));
else
xgifb_reg_and_or(pVBInfo->Part2Port, 0x1A, 0xE0,
(unsigned short) (((tempcx & 0x00ff) >> 6)
| 0x18)); /* Enable Dither */
}
static void SetSpectrum(struct vb_device_info *pVBInfo)
{
unsigned short index;
index = XGI_GetLCDCapPtr(pVBInfo);
/* disable down spectrum D[4] */
xgifb_reg_and(pVBInfo->Part4Port, 0x30, 0x8F);
XGI_LongWait(pVBInfo);
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x20); /* reset spectrum */
XGI_LongWait(pVBInfo);
xgifb_reg_set(pVBInfo->Part4Port, 0x31,
pVBInfo->LCDCapList[index].Spectrum_31);
xgifb_reg_set(pVBInfo->Part4Port, 0x32,
pVBInfo->LCDCapList[index].Spectrum_32);
xgifb_reg_set(pVBInfo->Part4Port, 0x33,
pVBInfo->LCDCapList[index].Spectrum_33);
xgifb_reg_set(pVBInfo->Part4Port, 0x34,
pVBInfo->LCDCapList[index].Spectrum_34);
XGI_LongWait(pVBInfo);
xgifb_reg_or(pVBInfo->Part4Port, 0x30, 0x40); /* enable spectrum */
}
static void XGI_SetLCDCap(struct vb_device_info *pVBInfo)
{
unsigned short tempcx;
tempcx = pVBInfo->LCDCapList[XGI_GetLCDCapPtr(pVBInfo)].LCD_Capability;
if (pVBInfo->VBType &
(VB_XGI301B |
VB_XGI302B |
VB_XGI301LV |
VB_XGI302LV |
VB_XGI301C)) { /* 301LV/302LV only */
if (pVBInfo->VBType &
(VB_XGI301LV | VB_XGI302LV | VB_XGI301C)) {
/* Set 301LV Capability */
xgifb_reg_set(pVBInfo->Part4Port, 0x24,
(unsigned char) (tempcx & 0x1F));
}
/* VB Driving */
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D,
~((EnableVBCLKDRVLOW | EnablePLLSPLOW) >> 8),
(unsigned short) ((tempcx & (EnableVBCLKDRVLOW
| EnablePLLSPLOW)) >> 8));
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & SetCRT2ToLCD)
XGI_SetLCDCap_B(tempcx, pVBInfo);
else if (pVBInfo->VBInfo & SetCRT2ToLCDA)
XGI_SetLCDCap_A(tempcx, pVBInfo);
if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
if (tempcx & EnableSpectrum)
SetSpectrum(pVBInfo);
}
} else {
/* LVDS,CH7017 */
XGI_SetLCDCap_A(tempcx, pVBInfo);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_SetAntiFlicker */
/* Input : */
/* Output : */
/* Description : Set TV Customized Param. */
/* --------------------------------------------------------------------- */
static void XGI_SetAntiFlicker(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx, index;
unsigned char tempah;
if (pVBInfo->TVInfo & (SetYPbPrMode525p | SetYPbPrMode750p))
return;
tempbx = XGI_GetTVPtrIndex(pVBInfo);
tempbx &= 0xFE;
if (ModeNo <= 0x13)
index = pVBInfo->SModeIDTable[ModeIdIndex].VB_StTVFlickerIndex;
else
index = pVBInfo->EModeIDTable[ModeIdIndex].VB_ExtTVFlickerIndex;
tempbx += index;
tempah = TVAntiFlickList[tempbx];
tempah = tempah << 4;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x0A, 0x8F, tempah);
}
static void XGI_SetEdgeEnhance(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx, index;
unsigned char tempah;
tempbx = XGI_GetTVPtrIndex(pVBInfo);
tempbx &= 0xFE;
if (ModeNo <= 0x13)
index = pVBInfo->SModeIDTable[ModeIdIndex].VB_StTVEdgeIndex;
else
index = pVBInfo->EModeIDTable[ModeIdIndex].VB_ExtTVEdgeIndex;
tempbx += index;
tempah = TVEdgeList[tempbx];
tempah = tempah << 5;
xgifb_reg_and_or(pVBInfo->Part2Port, 0x3A, 0x1F, tempah);
}
static void XGI_SetPhaseIncr(struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
unsigned char tempcl, tempch;
unsigned long tempData;
XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
tempData = TVPhaseList[tempbx];
xgifb_reg_set(pVBInfo->Part2Port, 0x31, (unsigned short) (tempData
& 0x000000FF));
xgifb_reg_set(pVBInfo->Part2Port, 0x32, (unsigned short) ((tempData
& 0x0000FF00) >> 8));
xgifb_reg_set(pVBInfo->Part2Port, 0x33, (unsigned short) ((tempData
& 0x00FF0000) >> 16));
xgifb_reg_set(pVBInfo->Part2Port, 0x34, (unsigned short) ((tempData
& 0xFF000000) >> 24));
}
static void XGI_SetYFilter(unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx, index;
unsigned char tempcl, tempch, tempal, *filterPtr;
XGI_GetTVPtrIndex2(&tempbx, &tempcl, &tempch, pVBInfo); /* bx, cl, ch */
switch (tempbx) {
case 0x00:
case 0x04:
filterPtr = NTSCYFilter1;
break;
case 0x01:
filterPtr = PALYFilter1;
break;
case 0x02:
case 0x05:
case 0x0D:
filterPtr = PALMYFilter1;
break;
case 0x03:
filterPtr = PALNYFilter1;
break;
case 0x08:
case 0x0C:
filterPtr = NTSCYFilter2;
break;
case 0x0A:
filterPtr = PALMYFilter2;
break;
case 0x0B:
filterPtr = PALNYFilter2;
break;
case 0x09:
filterPtr = PALYFilter2;
break;
default:
return;
}
if (ModeNo <= 0x13)
tempal = pVBInfo->SModeIDTable[ModeIdIndex].
VB_StTVYFilterIndex;
else
tempal = pVBInfo->EModeIDTable[ModeIdIndex].
VB_ExtTVYFilterIndex;
if (tempcl == 0)
index = tempal * 4;
else
index = tempal * 7;
if ((tempcl == 0) && (tempch == 1)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x35, 0);
xgifb_reg_set(pVBInfo->Part2Port, 0x36, 0);
xgifb_reg_set(pVBInfo->Part2Port, 0x37, 0);
xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
} else {
xgifb_reg_set(pVBInfo->Part2Port, 0x35, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x36, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x37, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x38, filterPtr[index++]);
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
xgifb_reg_set(pVBInfo->Part2Port, 0x48, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x49, filterPtr[index++]);
xgifb_reg_set(pVBInfo->Part2Port, 0x4A, filterPtr[index++]);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_OEM310Setting */
/* Input : */
/* Output : */
/* Description : Customized Param. for 301 */
/* --------------------------------------------------------------------- */
static void XGI_OEM310Setting(unsigned short ModeNo,
unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
if (pVBInfo->SetFlag & Win9xDOSMode)
return;
/* GetPart1IO(); */
XGI_SetDelayComp(pVBInfo);
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA))
XGI_SetLCDCap(pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToTV) {
/* GetPart2IO() */
XGI_SetPhaseIncr(pVBInfo);
XGI_SetYFilter(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetAntiFlicker(ModeNo, ModeIdIndex, pVBInfo);
if (pVBInfo->VBType & VB_XGI301)
XGI_SetEdgeEnhance(ModeNo, ModeIdIndex, pVBInfo);
}
}
/* --------------------------------------------------------------------- */
/* Function : XGI_SetCRT2ModeRegs */
/* Input : */
/* Output : */
/* Description : Origin code for crt2group */
/* --------------------------------------------------------------------- */
void XGI_SetCRT2ModeRegs(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbl;
short tempcl;
unsigned char tempah;
/* // fix write part1 index 0 BTDRAM bit Bug
* xgifb_reg_set(pVBInfo->Part1Port, 0x03, 0x00); */
tempah = 0;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
tempah = xgifb_reg_get(pVBInfo->Part1Port, 0x00);
tempah &= ~0x10; /* BTRAMDAC */
tempah |= 0x40; /* BTRAM */
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
| SetCRT2ToLCD)) {
tempah = 0x40; /* BTDRAM */
if (ModeNo > 0x13) {
tempcl = pVBInfo->ModeType;
tempcl -= ModeVGA;
if (tempcl >= 0) {
/* BT Color */
tempah = (0x008 >> tempcl);
if (tempah == 0)
tempah = 1;
tempah |= 0x040;
}
}
if (pVBInfo->VBInfo & SetInSlaveMode)
tempah ^= 0x50; /* BTDAC */
}
}
/* 0210 shampoo
if (pVBInfo->VBInfo & DisableCRT2Display) {
tempah = 0;
}
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD)) {
tempcl = pVBInfo->ModeType;
if (ModeNo > 0x13) {
tempcl -= ModeVGA;
if ((tempcl > 0) || (tempcl == 0)) {
tempah=(0x008>>tempcl) ;
if (tempah == 0)
tempah = 1;
tempah |= 0x040;
}
} else {
tempah = 0x040;
}
if (pVBInfo->VBInfo & SetInSlaveMode) {
tempah = (tempah ^ 0x050);
}
}
*/
xgifb_reg_set(pVBInfo->Part1Port, 0x00, tempah);
tempah = 0x08;
tempbl = 0xf0;
if (pVBInfo->VBInfo & DisableCRT2Display) {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e, tempbl, tempah);
} else {
tempah = 0x00;
tempbl = 0xff;
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV
| SetCRT2ToLCD | SetCRT2ToLCDA)) {
if ((pVBInfo->VBInfo & SetCRT2ToLCDA) &&
(!(pVBInfo->VBInfo & SetSimuScanMode))) {
tempbl &= 0xf7;
tempah |= 0x01;
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e,
tempbl, tempah);
} else {
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
tempbl &= 0xf7;
tempah |= 0x01;
}
if (pVBInfo->VBInfo &
(SetCRT2ToRAMDAC |
SetCRT2ToTV |
SetCRT2ToLCD)) {
tempbl &= 0xf8;
tempah = 0x01;
if (!(pVBInfo->VBInfo & SetInSlaveMode))
tempah |= 0x02;
if (!(pVBInfo->VBInfo &
SetCRT2ToRAMDAC)) {
tempah = tempah ^ 0x05;
if (!(pVBInfo->VBInfo &
SetCRT2ToLCD))
tempah = tempah ^ 0x01;
}
if (!(pVBInfo->VBInfo &
SetCRT2ToDualEdge))
tempah |= 0x08;
xgifb_reg_and_or(pVBInfo->Part1Port,
0x2e, tempbl, tempah);
} else {
xgifb_reg_and_or(pVBInfo->Part1Port,
0x2e, tempbl, tempah);
}
}
} else {
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2e, tempbl,
tempah);
}
}
if (pVBInfo->VBInfo & (SetCRT2ToRAMDAC | SetCRT2ToTV | SetCRT2ToLCD
| SetCRT2ToLCDA)) {
tempah &= (~0x08);
if ((pVBInfo->ModeType == ModeVGA) && (!(pVBInfo->VBInfo
& SetInSlaveMode))) {
tempah |= 0x010;
}
tempah |= 0x080;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
/* if (!(pVBInfo->TVInfo &
(SetYPbPrMode525p | SetYPbPrMode750p))) { */
tempah |= 0x020;
if (ModeNo > 0x13) {
if (pVBInfo->VBInfo & DriverMode)
tempah = tempah ^ 0x20;
}
/* } */
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x0D, ~0x0BF, tempah);
tempah = 0;
if (pVBInfo->LCDInfo & SetLCDDualLink)
tempah |= 0x40;
if (pVBInfo->VBInfo & SetCRT2ToTV) {
/* if ((!(pVBInfo->VBInfo & SetCRT2ToHiVisionTV)) &&
(!(pVBInfo->TVInfo &
(SetYPbPrMode525p | SetYPbPrMode750p)))) { */
if (pVBInfo->TVInfo & RPLLDIV2XO)
tempah |= 0x40;
/* } */
}
if ((pVBInfo->LCDResInfo == Panel1280x1024)
|| (pVBInfo->LCDResInfo == Panel1280x1024x75))
tempah |= 0x80;
if (pVBInfo->LCDResInfo == Panel1280x960)
tempah |= 0x80;
xgifb_reg_set(pVBInfo->Part4Port, 0x0C, tempah);
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
tempah = 0;
tempbl = 0xfb;
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
tempbl = 0xff;
if (pVBInfo->VBInfo & SetCRT2ToLCDA)
tempah |= 0x04; /* shampoo 0129 */
}
xgifb_reg_and_or(pVBInfo->Part1Port, 0x13, tempbl, tempah);
tempah = 0x00;
tempbl = 0xcf;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempah |= 0x30;
}
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2c, tempbl, tempah);
tempah = 0;
tempbl = 0x3f;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
if (pVBInfo->VBInfo & SetCRT2ToDualEdge)
tempah |= 0xc0;
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x21, tempbl, tempah);
}
tempah = 0;
tempbl = 0x7f;
if (!(pVBInfo->VBInfo & SetCRT2ToLCDA)) {
tempbl = 0xff;
if (!(pVBInfo->VBInfo & SetCRT2ToDualEdge))
tempah |= 0x80;
}
xgifb_reg_and_or(pVBInfo->Part4Port, 0x23, tempbl, tempah);
if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
if (pVBInfo->LCDInfo & SetLCDDualLink) {
xgifb_reg_or(pVBInfo->Part4Port, 0x27, 0x20);
xgifb_reg_or(pVBInfo->Part4Port, 0x34, 0x10);
}
}
}
static void XGI_CloseCRTC(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
tempbx = 0;
if (pVBInfo->VBInfo & SetCRT2ToLCDA)
tempbx = 0x08A0;
}
void XGI_OpenCRTC(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx;
tempbx = 0;
}
void XGI_UnLockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2f, 0xFF, 0x01);
}
void XGI_LockCRT2(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
xgifb_reg_and_or(pVBInfo->Part1Port, 0x2F, 0xFE, 0x00);
}
unsigned char XGI_BridgeIsOn(struct vb_device_info *pVBInfo)
{
unsigned short flag;
if (pVBInfo->IF_DEF_LVDS == 1) {
return 1;
} else {
flag = xgifb_reg_get(pVBInfo->Part4Port, 0x00);
if ((flag == 1) || (flag == 2))
return 1; /* 301b */
else
return 0;
}
}
void XGI_LongWait(struct vb_device_info *pVBInfo)
{
unsigned short i;
i = xgifb_reg_get(pVBInfo->P3c4, 0x1F);
if (!(i & 0xC0)) {
for (i = 0; i < 0xFFFF; i++) {
if (!(inb(pVBInfo->P3da) & 0x08))
break;
}
for (i = 0; i < 0xFFFF; i++) {
if ((inb(pVBInfo->P3da) & 0x08))
break;
}
}
}
static void XGI_VBLongWait(struct vb_device_info *pVBInfo)
{
unsigned short tempal, temp, i, j;
return;
if (!(pVBInfo->VBInfo & SetCRT2ToTV)) {
temp = 0;
for (i = 0; i < 3; i++) {
for (j = 0; j < 100; j++) {
tempal = inb(pVBInfo->P3da);
if (temp & 0x01) { /* VBWaitMode2 */
if ((tempal & 0x08))
continue;
if (!(tempal & 0x08))
break;
} else { /* VBWaitMode1 */
if (!(tempal & 0x08))
continue;
if ((tempal & 0x08))
break;
}
}
temp = temp ^ 0x01;
}
} else {
XGI_LongWait(pVBInfo);
}
return;
}
unsigned short XGI_GetRatePtrCRT2(struct xgi_hw_device_info *pXGIHWDE,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
short LCDRefreshIndex[] = { 0x00, 0x00, 0x03, 0x01 },
LCDARefreshIndex[] = { 0x00, 0x00, 0x03, 0x01, 0x01,
0x01, 0x01 };
unsigned short RefreshRateTableIndex, i, modeflag, index, temp;
if (ModeNo <= 0x13)
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
else
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
if (pVBInfo->IF_DEF_CH7005 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV) {
if (modeflag & HalfDCLK)
return 0;
}
}
if (ModeNo < 0x14)
return 0xFFFF;
index = xgifb_reg_get(pVBInfo->P3d4, 0x33);
index = index >> pVBInfo->SelectCRT2Rate;
index &= 0x0F;
if (pVBInfo->LCDInfo & LCDNonExpanding)
index = 0;
if (index > 0)
index--;
if (pVBInfo->SetFlag & ProgrammingCRT2) {
if (pVBInfo->IF_DEF_CH7005 == 1) {
if (pVBInfo->VBInfo & SetCRT2ToTV)
index = 0;
}
if (pVBInfo->VBInfo & (SetCRT2ToLCD | SetCRT2ToLCDA)) {
if (pVBInfo->IF_DEF_LVDS == 0) {
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B
| VB_XGI301LV | VB_XGI302LV
| VB_XGI301C))
/* 301b */
temp = LCDARefreshIndex[
pVBInfo->LCDResInfo & 0x0F];
else
temp = LCDRefreshIndex[
pVBInfo->LCDResInfo & 0x0F];
if (index > temp)
index = temp;
} else {
index = 0;
}
}
}
RefreshRateTableIndex = pVBInfo->EModeIDTable[ModeIdIndex].REFindex;
ModeNo = pVBInfo->RefIndex[RefreshRateTableIndex].ModeID;
if (pXGIHWDE->jChipType >= XG20) { /* for XG20, XG21, XG27 */
/*
if (pVBInfo->RefIndex[RefreshRateTableIndex].Ext_InfoFlag &
XG2xNotSupport) {
index++;
}
*/
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 800) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 600)) {
index++;
}
/* Alan 10/19/2007;
* do the similar adjustment like XGISearchCRT1Rate() */
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 1024) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 768)) {
index++;
}
if ((pVBInfo->RefIndex[RefreshRateTableIndex].XRes == 1280) &&
(pVBInfo->RefIndex[RefreshRateTableIndex].YRes == 1024)) {
index++;
}
}
i = 0;
do {
if (pVBInfo->RefIndex[RefreshRateTableIndex + i].
ModeID != ModeNo)
break;
temp = pVBInfo->RefIndex[RefreshRateTableIndex + i].
Ext_InfoFlag;
temp &= ModeInfoFlag;
if (temp < pVBInfo->ModeType)
break;
i++;
index--;
} while (index != 0xFFFF);
if (!(pVBInfo->VBInfo & SetCRT2ToRAMDAC)) {
if (pVBInfo->VBInfo & SetInSlaveMode) {
temp = pVBInfo->RefIndex[RefreshRateTableIndex + i - 1].
Ext_InfoFlag;
if (temp & InterlaceMode)
i++;
}
}
i--;
if ((pVBInfo->SetFlag & ProgrammingCRT2)) {
temp = XGI_AjustCRT2Rate(ModeNo, ModeIdIndex,
RefreshRateTableIndex, &i, pVBInfo);
}
return RefreshRateTableIndex + i; /* return (0x01 | (temp1<<1)); */
}
static void XGI_SetLCDAGroup(unsigned short ModeNo, unsigned short ModeIdIndex,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short RefreshRateTableIndex;
/* unsigned short temp ; */
/* pVBInfo->SelectCRT2Rate = 0; */
pVBInfo->SetFlag |= ProgrammingCRT2;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
XGI_GetLVDSResInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLVDSData(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_ModCRT1Regs(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetLVDSRegs(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetCRT2ECLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
}
unsigned char XGI_SetCRT2Group301(unsigned short ModeNo,
struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempbx, ModeIdIndex, RefreshRateTableIndex;
tempbx = pVBInfo->VBInfo;
pVBInfo->SetFlag |= ProgrammingCRT2;
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
pVBInfo->SelectCRT2Rate = 4;
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
XGI_SaveCRT2Info(ModeNo, pVBInfo);
XGI_GetCRT2ResInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetCRT2Data(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_PreSetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
XGI_SetGroup1(ModeNo, ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
XGI_SetLockRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
XGI_SetGroup2(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetLCDRegs(ModeNo, ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
XGI_SetTap4Regs(pVBInfo);
XGI_SetGroup3(ModeNo, ModeIdIndex, pVBInfo);
XGI_SetGroup4(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT2VCLK(ModeNo, ModeIdIndex, RefreshRateTableIndex, pVBInfo);
XGI_SetGroup5(ModeNo, ModeIdIndex, pVBInfo);
XGI_AutoThreshold(pVBInfo);
return 1;
}
void XGI_SenseCRT1(struct vb_device_info *pVBInfo)
{
unsigned char CRTCData[17] = { 0x5F, 0x4F, 0x50, 0x82, 0x55, 0x81,
0x0B, 0x3E, 0xE9, 0x0B, 0xDF, 0xE7, 0x04, 0x00, 0x00,
0x05, 0x00 };
unsigned char SR01 = 0, SR1F = 0, SR07 = 0, SR06 = 0;
unsigned char CR17, CR63, SR31;
unsigned short temp;
unsigned char DAC_TEST_PARMS[3] = { 0x0F, 0x0F, 0x0F };
int i;
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
/* [2004/05/06] Vicent to fix XG42 single LCD sense to CRT+LCD */
xgifb_reg_set(pVBInfo->P3d4, 0x57, 0x4A);
xgifb_reg_set(pVBInfo->P3d4, 0x53, (unsigned char) (xgifb_reg_get(
pVBInfo->P3d4, 0x53) | 0x02));
SR31 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x31);
CR63 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x63);
SR01 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x01);
xgifb_reg_set(pVBInfo->P3c4, 0x01, (unsigned char) (SR01 & 0xDF));
xgifb_reg_set(pVBInfo->P3d4, 0x63, (unsigned char) (CR63 & 0xBF));
CR17 = (unsigned char) xgifb_reg_get(pVBInfo->P3d4, 0x17);
xgifb_reg_set(pVBInfo->P3d4, 0x17, (unsigned char) (CR17 | 0x80));
SR1F = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x1F);
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) (SR1F | 0x04));
SR07 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x07);
xgifb_reg_set(pVBInfo->P3c4, 0x07, (unsigned char) (SR07 & 0xFB));
SR06 = (unsigned char) xgifb_reg_get(pVBInfo->P3c4, 0x06);
xgifb_reg_set(pVBInfo->P3c4, 0x06, (unsigned char) (SR06 & 0xC3));
xgifb_reg_set(pVBInfo->P3d4, 0x11, 0x00);
for (i = 0; i < 8; i++)
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) i, CRTCData[i]);
for (i = 8; i < 11; i++)
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 8),
CRTCData[i]);
for (i = 11; i < 13; i++)
xgifb_reg_set(pVBInfo->P3d4, (unsigned short) (i + 4),
CRTCData[i]);
for (i = 13; i < 16; i++)
xgifb_reg_set(pVBInfo->P3c4, (unsigned short) (i - 3),
CRTCData[i]);
xgifb_reg_set(pVBInfo->P3c4, 0x0E, (unsigned char) (CRTCData[16]
& 0xE0));
xgifb_reg_set(pVBInfo->P3c4, 0x31, 0x00);
xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, 0xE1);
outb(0x00, pVBInfo->P3c8);
for (i = 0; i < 256; i++) {
outb((unsigned char) DAC_TEST_PARMS[0], (pVBInfo->P3c8 + 1));
outb((unsigned char) DAC_TEST_PARMS[1], (pVBInfo->P3c8 + 1));
outb((unsigned char) DAC_TEST_PARMS[2], (pVBInfo->P3c8 + 1));
}
XGI_VBLongWait(pVBInfo);
XGI_VBLongWait(pVBInfo);
XGI_VBLongWait(pVBInfo);
mdelay(1);
XGI_WaitDisply(pVBInfo);
temp = inb(pVBInfo->P3c2);
if (temp & 0x10)
xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xDF, 0x20);
else
xgifb_reg_and_or(pVBInfo->P3d4, 0x32, 0xDF, 0x00);
/* alan, avoid display something, set BLACK DAC if not restore DAC */
outb(0x00, pVBInfo->P3c8);
for (i = 0; i < 256; i++) {
outb(0, (pVBInfo->P3c8 + 1));
outb(0, (pVBInfo->P3c8 + 1));
outb(0, (pVBInfo->P3c8 + 1));
}
xgifb_reg_set(pVBInfo->P3c4, 0x01, SR01);
xgifb_reg_set(pVBInfo->P3d4, 0x63, CR63);
xgifb_reg_set(pVBInfo->P3c4, 0x31, SR31);
/* [2004/05/11] Vicent */
xgifb_reg_set(pVBInfo->P3d4, 0x53, (unsigned char) (xgifb_reg_get(
pVBInfo->P3d4, 0x53) & 0xFD));
xgifb_reg_set(pVBInfo->P3c4, 0x1F, (unsigned char) SR1F);
}
void XGI_EnableBridge(struct xgi_hw_device_info *HwDeviceExtension,
struct vb_device_info *pVBInfo)
{
unsigned short tempah;
if (pVBInfo->SetFlag == Win9xDOSMode) {
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
return;
} else
/* LVDS or CH7017 */
return;
}
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (!(pVBInfo->SetFlag & DisableChA)) {
if (pVBInfo->SetFlag & EnableChA) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port, 0x1E, 0x20);
} else {
/* SetCRT2ToLCDA ) */
if (pVBInfo->VBInfo & SetCRT2ToDualEdge) {
/* Power on */
xgifb_reg_set(pVBInfo->Part1Port,
0x1E, 0x20);
}
}
}
if (!(pVBInfo->SetFlag & DisableChB)) {
if ((pVBInfo->SetFlag & EnableChB) || (pVBInfo->VBInfo
& (SetCRT2ToLCD | SetCRT2ToTV
| SetCRT2ToRAMDAC))) {
tempah = (unsigned char) xgifb_reg_get(
pVBInfo->P3c4, 0x32);
tempah &= 0xDF;
if (pVBInfo->VBInfo & SetInSlaveMode) {
if (!(pVBInfo->VBInfo &
SetCRT2ToRAMDAC))
tempah |= 0x20;
}
xgifb_reg_set(pVBInfo->P3c4, 0x32, tempah);
xgifb_reg_or(pVBInfo->P3c4, 0x1E, 0x20);
tempah = (unsigned char) xgifb_reg_get(
pVBInfo->Part1Port, 0x2E);
if (!(tempah & 0x80))
/* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port,
0x2E, 0x80);
/* BScreenOFF = 0 */
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
}
}
if ((pVBInfo->SetFlag & (EnableChA | EnableChB))
|| (!(pVBInfo->VBInfo & DisableCRT2Display))) {
xgifb_reg_and_or(pVBInfo->Part2Port, 0x00, ~0xE0,
0x20); /* shampoo 0129 */
if (pVBInfo->VBType & (VB_XGI302LV | VB_XGI301C)) {
if (!XGI_DisableChISLCD(pVBInfo)) {
if (XGI_EnableChISLCD(pVBInfo) ||
(pVBInfo->VBInfo &
(SetCRT2ToLCD | SetCRT2ToLCDA)))
/* LVDS PLL power on */
xgifb_reg_and(
pVBInfo->Part4Port,
0x2A,
0x7F);
}
/* LVDS Driver power on */
xgifb_reg_and(pVBInfo->Part4Port, 0x30, 0x7F);
}
}
tempah = 0x00;
if (!(pVBInfo->VBInfo & DisableCRT2Display)) {
tempah = 0xc0;
if (!(pVBInfo->VBInfo & SetSimuScanMode)) {
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
if (pVBInfo->VBInfo &
SetCRT2ToDualEdge) {
tempah = tempah & 0x40;
if (pVBInfo->VBInfo &
SetCRT2ToLCDA)
tempah = tempah ^ 0xC0;
if (pVBInfo->SetFlag &
DisableChB)
tempah &= 0xBF;
if (pVBInfo->SetFlag &
DisableChA)
tempah &= 0x7F;
if (pVBInfo->SetFlag &
EnableChB)
tempah |= 0x40;
if (pVBInfo->SetFlag &
EnableChA)
tempah |= 0x80;
}
}
}
}
/* EnablePart4_1F */
xgifb_reg_or(pVBInfo->Part4Port, 0x1F, tempah);
if (pVBInfo->SetFlag & Win9xDOSMode) {
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
return;
}
if (!(pVBInfo->SetFlag & DisableChA)) {
XGI_VBLongWait(pVBInfo);
if (!(pVBInfo->SetFlag & GatingCRT)) {
XGI_DisableGatingCRT(HwDeviceExtension,
pVBInfo);
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
XGI_VBLongWait(pVBInfo);
}
}
} /* 301 */
else { /* LVDS */
if (pVBInfo->VBInfo & (SetCRT2ToTV | SetCRT2ToLCD
| SetCRT2ToLCDA))
/* enable CRT2 */
xgifb_reg_or(pVBInfo->Part1Port, 0x1E, 0x20);
tempah = (unsigned char) xgifb_reg_get(pVBInfo->Part1Port,
0x2E);
if (!(tempah & 0x80))
/* BVBDOENABLE = 1 */
xgifb_reg_or(pVBInfo->Part1Port, 0x2E, 0x80);
xgifb_reg_and(pVBInfo->Part1Port, 0x00, 0x7F);
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
} /* End of VB */
}
static void XGI_SetCRT1Group(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo, unsigned short ModeIdIndex,
struct vb_device_info *pVBInfo)
{
unsigned short StandTableIndex, RefreshRateTableIndex, b3CC, temp;
unsigned short XGINew_P3cc = pVBInfo->P3cc;
/* XGINew_CRT1Mode = ModeNo; // SaveModeID */
StandTableIndex = XGI_GetModePtr(ModeNo, ModeIdIndex, pVBInfo);
/* XGI_SetBIOSData(ModeNo, ModeIdIndex); */
/* XGI_ClearBankRegs(ModeNo, ModeIdIndex); */
XGI_SetSeqRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
XGI_SetMiscRegs(StandTableIndex, pVBInfo);
XGI_SetCRTCRegs(HwDeviceExtension, StandTableIndex, pVBInfo);
XGI_SetATTRegs(ModeNo, StandTableIndex, ModeIdIndex, pVBInfo);
XGI_SetGRCRegs(StandTableIndex, pVBInfo);
XGI_ClearExt1Regs(pVBInfo);
/* if (pVBInfo->IF_DEF_ExpLink) */
if (HwDeviceExtension->jChipType == XG27) {
if (pVBInfo->IF_DEF_LVDS == 0)
XGI_SetDefaultVCLK(pVBInfo);
}
temp = ~ProgrammingCRT2;
pVBInfo->SetFlag &= temp;
pVBInfo->SelectCRT2Rate = 0;
if (pVBInfo->VBType & (VB_XGI301B | VB_XGI302B | VB_XGI301LV
| VB_XGI302LV | VB_XGI301C)) {
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA
| SetInSlaveMode)) {
pVBInfo->SetFlag |= ProgrammingCRT2;
}
}
RefreshRateTableIndex = XGI_GetRatePtrCRT2(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (RefreshRateTableIndex != 0xFFFF) {
XGI_SetSync(RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1CRTC(ModeNo, ModeIdIndex, RefreshRateTableIndex,
pVBInfo, HwDeviceExtension);
XGI_SetCRT1DE(HwDeviceExtension, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
XGI_SetCRT1Offset(ModeNo, ModeIdIndex, RefreshRateTableIndex,
HwDeviceExtension, pVBInfo);
XGI_SetCRT1VCLK(ModeNo, ModeIdIndex, HwDeviceExtension,
RefreshRateTableIndex, pVBInfo);
}
if ((HwDeviceExtension->jChipType >= XG20) &&
(HwDeviceExtension->jChipType < XG27)) { /* fix H/W DCLK/2 bug */
if ((ModeNo == 0x00) | (ModeNo == 0x01)) {
xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x4E);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, 0xE9);
b3CC = (unsigned char) inb(XGINew_P3cc);
outb((b3CC |= 0x0C), XGINew_P3cc);
} else if ((ModeNo == 0x04) | (ModeNo == 0x05) | (ModeNo
== 0x0D)) {
xgifb_reg_set(pVBInfo->P3c4, 0x2B, 0x1B);
xgifb_reg_set(pVBInfo->P3c4, 0x2C, 0xE3);
b3CC = (unsigned char) inb(XGINew_P3cc);
outb((b3CC |= 0x0C), XGINew_P3cc);
}
}
if (HwDeviceExtension->jChipType >= XG21) {
temp = xgifb_reg_get(pVBInfo->P3d4, 0x38);
if (temp & 0xA0) {
/* Enable write GPIOF */
/* xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20); */
/* P. DWN */
/* xgifb_reg_and(pVBInfo->P3d4, 0x48, ~0x20); */
/* XG21 CRT1 Timing */
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27CRTC(ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
else
XGI_SetXG21CRTC(ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
XGI_UpdateXG21CRTC(ModeNo, pVBInfo,
RefreshRateTableIndex);
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27LCD(pVBInfo, RefreshRateTableIndex,
ModeNo);
else
XGI_SetXG21LCD(pVBInfo, RefreshRateTableIndex,
ModeNo);
if (pVBInfo->IF_DEF_LVDS == 1) {
if (HwDeviceExtension->jChipType == XG27)
XGI_SetXG27LVDSPara(ModeNo,
ModeIdIndex, pVBInfo);
else
XGI_SetXG21LVDSPara(ModeNo,
ModeIdIndex, pVBInfo);
}
/* P. ON */
/* xgifb_reg_or(pVBInfo->P3d4, 0x48, 0x20); */
}
}
pVBInfo->SetFlag &= (~ProgrammingCRT2);
XGI_SetCRT1FIFO(ModeNo, HwDeviceExtension, pVBInfo);
XGI_SetCRT1ModeRegs(HwDeviceExtension, ModeNo, ModeIdIndex,
RefreshRateTableIndex, pVBInfo);
/* XGI_LoadCharacter(); //dif ifdef TVFont */
XGI_LoadDAC(ModeNo, ModeIdIndex, pVBInfo);
/* XGI_ClearBuffer(HwDeviceExtension, ModeNo, pVBInfo); */
}
unsigned char XGISetModeNew(struct xgi_hw_device_info *HwDeviceExtension,
unsigned short ModeNo)
{
unsigned short ModeIdIndex;
/* unsigned char *pVBInfo->FBAddr =
HwDeviceExtension->pjVideoMemoryAddress; */
struct vb_device_info VBINF;
struct vb_device_info *pVBInfo = &VBINF;
pVBInfo->ROMAddr = HwDeviceExtension->pjVirtualRomBase;
pVBInfo->BaseAddr = (unsigned long) HwDeviceExtension->pjIOAddress;
pVBInfo->IF_DEF_LVDS = 0;
pVBInfo->IF_DEF_CH7005 = 0;
pVBInfo->IF_DEF_LCDA = 1;
pVBInfo->IF_DEF_CH7017 = 0;
pVBInfo->IF_DEF_CH7007 = 0; /* [Billy] 2007/05/14 */
pVBInfo->IF_DEF_VideoCapture = 0;
pVBInfo->IF_DEF_ScaleLCD = 0;
pVBInfo->IF_DEF_OEMUtil = 0;
pVBInfo->IF_DEF_PWD = 0;
if (HwDeviceExtension->jChipType >= XG20) { /* kuku 2004/06/25 */
pVBInfo->IF_DEF_YPbPr = 0;
pVBInfo->IF_DEF_HiVision = 0;
pVBInfo->IF_DEF_CRT2Monitor = 0;
pVBInfo->VBType = 0; /*set VBType default 0*/
} else {
pVBInfo->IF_DEF_YPbPr = 1;
pVBInfo->IF_DEF_HiVision = 1;
pVBInfo->IF_DEF_CRT2Monitor = 1;
}
pVBInfo->P3c4 = pVBInfo->BaseAddr + 0x14;
pVBInfo->P3d4 = pVBInfo->BaseAddr + 0x24;
pVBInfo->P3c0 = pVBInfo->BaseAddr + 0x10;
pVBInfo->P3ce = pVBInfo->BaseAddr + 0x1e;
pVBInfo->P3c2 = pVBInfo->BaseAddr + 0x12;
pVBInfo->P3cc = pVBInfo->BaseAddr + 0x1C;
pVBInfo->P3ca = pVBInfo->BaseAddr + 0x1a;
pVBInfo->P3c6 = pVBInfo->BaseAddr + 0x16;
pVBInfo->P3c7 = pVBInfo->BaseAddr + 0x17;
pVBInfo->P3c8 = pVBInfo->BaseAddr + 0x18;
pVBInfo->P3c9 = pVBInfo->BaseAddr + 0x19;
pVBInfo->P3da = pVBInfo->BaseAddr + 0x2A;
pVBInfo->Part0Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_00;
pVBInfo->Part1Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_04;
pVBInfo->Part2Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_10;
pVBInfo->Part3Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_12;
pVBInfo->Part4Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14;
pVBInfo->Part5Port = pVBInfo->BaseAddr + XGI_CRT2_PORT_14 + 2;
/* for x86 Linux, XG21 LVDS */
if (HwDeviceExtension->jChipType == XG21) {
if ((xgifb_reg_get(pVBInfo->P3d4, 0x38) & 0xE0) == 0xC0)
pVBInfo->IF_DEF_LVDS = 1;
}
if (HwDeviceExtension->jChipType == XG27) {
if ((xgifb_reg_get(pVBInfo->P3d4, 0x38) & 0xE0) == 0xC0) {
if (xgifb_reg_get(pVBInfo->P3d4, 0x30) & 0x20)
pVBInfo->IF_DEF_LVDS = 1;
}
}
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 */
XGI_GetVBType(pVBInfo);
InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo);
if (ModeNo & 0x80) {
ModeNo = ModeNo & 0x7F;
/* XGINew_flag_clearbuffer = 0; */
}
/* else {
XGINew_flag_clearbuffer = 1;
}
*/
xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86);
if (HwDeviceExtension->jChipType < XG20) /* kuku 2004/06/25 1.Openkey */
XGI_UnLockCRT2(HwDeviceExtension, pVBInfo);
XGI_SearchModeID(ModeNo, &ModeIdIndex, pVBInfo);
XGI_GetVGAType(HwDeviceExtension, pVBInfo);
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
XGI_GetVBInfo(ModeNo, ModeIdIndex, HwDeviceExtension, pVBInfo);
XGI_GetTVInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_GetLCDInfo(ModeNo, ModeIdIndex, pVBInfo);
XGI_DisableBridge(HwDeviceExtension, pVBInfo);
/* XGI_OpenCRTC(HwDeviceExtension, pVBInfo); */
if (pVBInfo->VBInfo & (SetSimuScanMode | SetCRT2ToLCDA)) {
XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension, pVBInfo);
}
} else {
if (!(pVBInfo->VBInfo & SwitchToCRT2)) {
XGI_SetCRT1Group(HwDeviceExtension, ModeNo,
ModeIdIndex, pVBInfo);
if (pVBInfo->VBInfo & SetCRT2ToLCDA) {
XGI_SetLCDAGroup(ModeNo, ModeIdIndex,
HwDeviceExtension,
pVBInfo);
}
}
}
if (pVBInfo->VBInfo & (SetSimuScanMode | SwitchToCRT2)) {
switch (HwDeviceExtension->ujVBChipID) {
case VB_CHIP_301:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
pVBInfo); /*add for CRT2 */
break;
case VB_CHIP_302:
XGI_SetCRT2Group301(ModeNo, HwDeviceExtension,
pVBInfo); /*add for CRT2 */
break;
default:
break;
}
}
XGI_SetCRT2ModeRegs(ModeNo, HwDeviceExtension, pVBInfo);
XGI_OEM310Setting(ModeNo, ModeIdIndex, pVBInfo); /*0212*/
XGI_CloseCRTC(HwDeviceExtension, pVBInfo);
XGI_EnableBridge(HwDeviceExtension, pVBInfo);
} /* !XG20 */
else {
if (pVBInfo->IF_DEF_LVDS == 1)
if (!XGI_XG21CheckLVDSMode(ModeNo,
ModeIdIndex,
pVBInfo))
return 0;
if (ModeNo <= 0x13) {
pVBInfo->ModeType = pVBInfo->SModeIDTable[ModeIdIndex].
St_ModeFlag & ModeInfoFlag;
} else {
pVBInfo->ModeType = pVBInfo->EModeIDTable[ModeIdIndex].
Ext_ModeFlag & ModeInfoFlag;
}
pVBInfo->SetFlag = 0;
if (pVBInfo->IF_DEF_CH7007 != 1)
pVBInfo->VBInfo = DisableCRT2Display;
XGI_DisplayOff(HwDeviceExtension, pVBInfo);
XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex,
pVBInfo);
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
/*
if (HwDeviceExtension->jChipType == XG21)
xgifb_reg_and_or(pVBInfo->P3c4, 0x09, ~0x80, 0x80);
*/
}
/*
if (ModeNo <= 0x13) {
modeflag = pVBInfo->SModeIDTable[ModeIdIndex].St_ModeFlag;
} else {
modeflag = pVBInfo->EModeIDTable[ModeIdIndex].Ext_ModeFlag;
}
pVBInfo->ModeType = modeflag&ModeInfoFlag;
pVBInfo->SetFlag = 0x00;
pVBInfo->VBInfo = DisableCRT2Display;
temp = XGINew_CheckMemorySize(HwDeviceExtension,
ModeNo,
ModeIdIndex,
pVBInfo);
if (temp == 0)
return (0);
XGI_DisplayOff(HwDeviceExtension, pVBInfo) ;
XGI_SetCRT1Group(HwDeviceExtension, ModeNo, ModeIdIndex, pVBInfo);
XGI_DisplayOn(HwDeviceExtension, pVBInfo);
*/
XGI_UpdateModeInfo(HwDeviceExtension, pVBInfo);
if (HwDeviceExtension->jChipType < XG20) { /* kuku 2004/06/25 */
XGI_LockCRT2(HwDeviceExtension, pVBInfo);
}
return 1;
}
| gpl-2.0 |
crypta-io/android_kernel_samsung_prevail2spr-stock-galaxy-rush | net/batman-adv/vis.c | 2378 | 26429 | /*
* Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*
*/
#include "main.h"
#include "send.h"
#include "translation-table.h"
#include "vis.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "hash.h"
#include "originator.h"
#define MAX_VIS_PACKET_SIZE 1000
/* Returns the smallest signed integer in two's complement with the sizeof x */
#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
* they handle overflows/underflows and can correctly check for a
* predecessor/successor unless the variable sequence number has grown by
* more then 2**(bitwidth(x)-1)-1.
* This means that for a uint8_t with the maximum value 255, it would think:
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
* - after adding more than 127 to the starting value - it is a successor */
#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
static void start_vis_timer(struct bat_priv *bat_priv);
/* free the info */
static void free_info(struct kref *ref)
{
struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
list_del_init(&info->send_list);
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
static int vis_info_cmp(struct hlist_node *node, void *data2)
{
struct vis_info *d1, *d2;
struct vis_packet *p1, *p2;
d1 = container_of(node, struct vis_info, hash_entry);
d2 = data2;
p1 = (struct vis_packet *)d1->skb_packet->data;
p2 = (struct vis_packet *)d2->skb_packet->data;
return compare_eth(p1->vis_orig, p2->vis_orig);
}
/* hash function to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
static int vis_info_choose(void *data, int size)
{
struct vis_info *vis_info = data;
struct vis_packet *packet;
unsigned char *key;
uint32_t hash = 0;
size_t i;
packet = (struct vis_packet *)vis_info->skb_packet->data;
key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash % size;
}
static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
void *data)
{
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_head *head;
struct hlist_node *node;
struct vis_info *vis_info, *vis_info_tmp = NULL;
int index;
if (!hash)
return NULL;
index = vis_info_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
if (!vis_info_cmp(node, data))
continue;
vis_info_tmp = vis_info;
break;
}
rcu_read_unlock();
return vis_info_tmp;
}
/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
static void vis_data_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
struct if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
if (compare_eth(entry->addr, (void *)interface))
return;
}
/* its a new address, add it to the list */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->addr, interface, ETH_ALEN);
entry->primary = primary;
hlist_add_head(&entry->list, if_list);
}
static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
len += sprintf(buff + len, "PRIMARY, ");
else
len += sprintf(buff + len, "SEC %pM, ", entry->addr);
}
return len;
}
static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t count = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
count += 9;
else
count += 23;
}
return count;
}
/* read an entry */
static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
uint8_t *src, bool primary)
{
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
if (primary && entry->quality == 0)
return sprintf(buff, "TT %pM, ", entry->dest);
else if (compare_eth(entry->src, src))
return sprintf(buff, "TQ %pM %d, ", entry->dest,
entry->quality);
return 0;
}
int vis_seq_print_text(struct seq_file *seq, void *offset)
{
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
struct vis_info *info;
struct vis_packet *packet;
struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hashtable_t *hash = bat_priv->vis_hash;
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
int i, j, ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
int compare;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
if (vis_server == VIS_TYPE_CLIENT_UPDATE)
goto out;
buf_size = 1;
/* Estimate length */
spin_lock_bh(&bat_priv->vis_hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buf_size += 18 + 26 * packet->entries;
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buf_size +=
vis_data_count_prim_sec(&vis_if_list);
buf_size += 1;
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
spin_unlock_bh(&bat_priv->vis_hash_lock);
ret = -ENOMEM;
goto out;
}
buff[0] = '\0';
buff_pos = 0;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
&entries[j],
entry->addr,
entry->primary);
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buff_pos +=
vis_data_read_prim_sec(buff + buff_pos,
&vis_if_list);
buff_pos += sprintf(buff + buff_pos, "\n");
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
return ret;
}
/* add the info packet to the send list, if it was not
* already linked in. */
static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
list_add_tail(&info->send_list, &bat_priv->vis_send_list);
}
}
/* delete the info packet from the send list, if it was
* linked in. */
static void send_list_del(struct vis_info *info)
{
if (!list_empty(&info->send_list)) {
list_del_init(&info->send_list);
kref_put(&info->refcount, free_info);
}
}
/* tries to add one entry to the receive list. */
static void recv_list_add(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->mac, mac, ETH_ALEN);
spin_lock_bh(&bat_priv->vis_list_lock);
list_add_tail(&entry->list, recv_list);
spin_unlock_bh(&bat_priv->vis_list_lock);
}
/* returns 1 if this mac is in the recv_list */
static int recv_list_is_in(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
if (compare_eth(entry->mac, mac)) {
spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
}
spin_unlock_bh(&bat_priv->vis_list_lock);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
static struct vis_info *add_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len, int *is_new,
int make_broadcast)
{
struct vis_info *info, *old_info;
struct vis_packet *search_packet, *old_packet;
struct vis_info search_elem;
struct vis_packet *packet;
int hash_added;
*is_new = 0;
/* sanity check */
if (!bat_priv->vis_hash)
return NULL;
/* see if the packet is already in vis_hash */
search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
if (!search_elem.skb_packet)
return NULL;
search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
sizeof(struct vis_packet));
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
old_info = vis_hash_find(bat_priv, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info) {
old_packet = (struct vis_packet *)old_info->skb_packet->data;
if (!seq_after(ntohl(vis_packet->seqno),
ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
recv_list_add(bat_priv, &old_info->recv_list,
vis_packet->sender_orig);
return old_info;
} else {
/* newer packet is already in hash. */
return NULL;
}
}
/* remove old entry */
hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
old_info);
send_list_del(old_info);
kref_put(&old_info->refcount, free_info);
}
info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
if (!info)
return NULL;
info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
vis_info_len + sizeof(struct ethhdr));
if (!info->skb_packet) {
kfree(info);
return NULL;
}
skb_reserve(info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(info->skb_packet,
sizeof(struct vis_packet) +
vis_info_len);
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
info->bat_priv = bat_priv;
memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
packet->entries = vis_info_len / sizeof(struct vis_info_entry);
recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
info, &info->hash_entry);
if (hash_added < 0) {
/* did not work (for some reason) */
kref_put(&info->refcount, free_info);
info = NULL;
}
return info;
}
/* handle the server sync packet, forward if needed. */
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
int is_new, make_broadcast;
int vis_server = atomic_read(&bat_priv->vis_mode);
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, make_broadcast);
if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
send_list_add(bat_priv, info);
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
struct vis_packet *packet;
int is_new;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
/* clients shall not broadcast. */
if (is_broadcast_ether_addr(vis_packet->target_orig))
return;
/* Are we the target for this VIS packet? */
if (vis_server == VIS_TYPE_SERVER_SYNC &&
is_my_mac(vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, are_target);
if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
packet = (struct vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(packet->target_orig)) {
send_list_add(bat_priv, info);
}
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
* Must be called with the originator hash locked */
static int find_best_vis_server(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct neigh_node *router;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
int best_tq = -1, i;
packet = (struct vis_packet *)info->skb_packet->data;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if ((orig_node->flags & VIS_SERVER) &&
(router->tq_avg > best_tq)) {
best_tq = router->tq_avg;
memcpy(packet->target_orig, orig_node->orig,
ETH_ALEN);
}
neigh_node_free_ref(router);
}
rcu_read_unlock();
}
return best_tq;
}
/* Return true if the vis packet is full. */
static bool vis_packet_full(struct vis_info *info)
{
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
< packet->entries + 1)
return true;
return false;
}
/* generates a packet of own vis data,
* returns 0 on success, -1 if no packet could be generated */
static int generate_vis_packet(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct neigh_node *router;
struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
struct tt_local_entry *tt_local_entry;
int best_tq = -1, i;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
packet->ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
skb_trim(info->skb_packet, sizeof(struct vis_packet));
if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
best_tq = find_best_vis_server(bat_priv, info);
if (best_tq < 0)
return -1;
}
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if (!compare_eth(router->addr, orig_node->orig))
goto next;
if (router->if_incoming->if_status != IF_ACTIVE)
goto next;
if (router->tq_avg < 1)
goto next;
/* fill one entry into buffer. */
entry = (struct vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
ETH_ALEN);
memcpy(entry->dest, orig_node->orig, ETH_ALEN);
entry->quality = router->tq_avg;
packet->entries++;
next:
neigh_node_free_ref(router);
if (vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
}
hash = bat_priv->tt_local_hash;
spin_lock_bh(&bat_priv->tt_lhash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
entry = (struct vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means TT */
packet->entries++;
if (vis_packet_full(info)) {
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
}
}
}
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
unlock:
rcu_read_unlock();
return 0;
}
/* free old vis packets. Must be called with this vis_hash_lock
* held */
static void purge_vis_packets(struct bat_priv *bat_priv)
{
int i;
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry_safe(info, node, node_tmp,
head, hash_entry) {
/* never purge own data. */
if (info == bat_priv->my_vis_info)
continue;
if (time_after(jiffies,
info->first_seen + VIS_TIMEOUT * HZ)) {
hlist_del(node);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
}
}
}
static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct neigh_node *router;
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
struct sk_buff *skb;
struct hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
int i;
packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
if (!(orig_node->flags & VIS_SERVER))
continue;
router = orig_node_get_router(orig_node);
if (!router)
continue;
/* don't send it if we already received the packet from
* this node. */
if (recv_list_is_in(bat_priv, &info->recv_list,
orig_node->orig)) {
neigh_node_free_ref(router);
continue;
}
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
hard_iface = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
neigh_node_free_ref(router);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, hard_iface, dstaddr);
}
rcu_read_unlock();
}
}
static void unicast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct orig_node *orig_node;
struct neigh_node *router = NULL;
struct sk_buff *skb;
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
orig_node = orig_hash_find(bat_priv, packet->target_orig);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
}
/* only send one vis packet. called from send_vis_packets() */
static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
{
struct hard_iface *primary_if;
struct vis_packet *packet;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
packet = (struct vis_packet *)info->skb_packet->data;
if (packet->ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
}
memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
packet->ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
broadcast_vis_packet(bat_priv, info);
else
unicast_vis_packet(bat_priv, info);
packet->ttl++; /* restore TTL */
out:
if (primary_if)
hardif_free_ref(primary_if);
}
/* called from timer; send (and maybe generate) vis packet. */
static void send_vis_packets(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
send_list_add(bat_priv, bat_priv->my_vis_info);
}
while (!list_empty(&bat_priv->vis_send_list)) {
info = list_first_entry(&bat_priv->vis_send_list,
typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
send_vis_packet(bat_priv, info);
spin_lock_bh(&bat_priv->vis_hash_lock);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
}
/* init the vis server. this may only be called when if_list is already
* initialized (e.g. bat0 is initialized, interfaces have been added) */
int vis_init(struct bat_priv *bat_priv)
{
struct vis_packet *packet;
int hash_added;
if (bat_priv->vis_hash)
return 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
bat_priv->vis_hash = hash_new(256);
if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->my_vis_info) {
pr_err("Can't initialize vis packet\n");
goto err;
}
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
sizeof(struct vis_packet) +
MAX_VIS_PACKET_SIZE +
sizeof(struct ethhdr));
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(
bat_priv->my_vis_info->skb_packet,
sizeof(struct vis_packet));
/* prefill the vis info */
bat_priv->my_vis_info->first_seen = jiffies -
msecs_to_jiffies(VIS_INTERVAL);
INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
packet->version = COMPAT_VERSION;
packet->packet_type = BAT_VIS;
packet->ttl = TTL;
packet->seqno = 0;
packet->entries = 0;
INIT_LIST_HEAD(&bat_priv->vis_send_list);
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
bat_priv->my_vis_info,
&bat_priv->my_vis_info->hash_entry);
if (hash_added < 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&bat_priv->my_vis_info->refcount, free_info);
goto err;
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
return 1;
free_info:
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
vis_quit(bat_priv);
return 0;
}
/* Decrease the reference count on a hash item info */
static void free_info_ref(struct hlist_node *node, void *arg)
{
struct vis_info *info;
info = container_of(node, struct vis_info, hash_entry);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
/* shutdown vis-server */
void vis_quit(struct bat_priv *bat_priv)
{
if (!bat_priv->vis_hash)
return;
cancel_delayed_work_sync(&bat_priv->vis_work);
spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */
static void start_vis_timer(struct bat_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
msecs_to_jiffies(VIS_INTERVAL));
}
| gpl-2.0 |
stevegaron/android-kernel-tuna | net/batman-adv/vis.c | 2378 | 26429 | /*
* Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
*
* Simon Wunderlich
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*
*/
#include "main.h"
#include "send.h"
#include "translation-table.h"
#include "vis.h"
#include "soft-interface.h"
#include "hard-interface.h"
#include "hash.h"
#include "originator.h"
#define MAX_VIS_PACKET_SIZE 1000
/* Returns the smallest signed integer in two's complement with the sizeof x */
#define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
/* Checks if a sequence number x is a predecessor/successor of y.
* they handle overflows/underflows and can correctly check for a
* predecessor/successor unless the variable sequence number has grown by
* more then 2**(bitwidth(x)-1)-1.
* This means that for a uint8_t with the maximum value 255, it would think:
* - when adding nothing - it is neither a predecessor nor a successor
* - before adding more than 127 to the starting value - it is a predecessor,
* - when adding 128 - it is neither a predecessor nor a successor,
* - after adding more than 127 to the starting value - it is a successor */
#define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
_dummy > smallest_signed_int(_dummy); })
#define seq_after(x, y) seq_before(y, x)
static void start_vis_timer(struct bat_priv *bat_priv);
/* free the info */
static void free_info(struct kref *ref)
{
struct vis_info *info = container_of(ref, struct vis_info, refcount);
struct bat_priv *bat_priv = info->bat_priv;
struct recvlist_node *entry, *tmp;
list_del_init(&info->send_list);
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
list_del(&entry->list);
kfree(entry);
}
spin_unlock_bh(&bat_priv->vis_list_lock);
kfree_skb(info->skb_packet);
kfree(info);
}
/* Compare two vis packets, used by the hashing algorithm */
static int vis_info_cmp(struct hlist_node *node, void *data2)
{
struct vis_info *d1, *d2;
struct vis_packet *p1, *p2;
d1 = container_of(node, struct vis_info, hash_entry);
d2 = data2;
p1 = (struct vis_packet *)d1->skb_packet->data;
p2 = (struct vis_packet *)d2->skb_packet->data;
return compare_eth(p1->vis_orig, p2->vis_orig);
}
/* hash function to choose an entry in a hash table of given size */
/* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
static int vis_info_choose(void *data, int size)
{
struct vis_info *vis_info = data;
struct vis_packet *packet;
unsigned char *key;
uint32_t hash = 0;
size_t i;
packet = (struct vis_packet *)vis_info->skb_packet->data;
key = packet->vis_orig;
for (i = 0; i < ETH_ALEN; i++) {
hash += key[i];
hash += (hash << 10);
hash ^= (hash >> 6);
}
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash % size;
}
static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
void *data)
{
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_head *head;
struct hlist_node *node;
struct vis_info *vis_info, *vis_info_tmp = NULL;
int index;
if (!hash)
return NULL;
index = vis_info_choose(data, hash->size);
head = &hash->table[index];
rcu_read_lock();
hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
if (!vis_info_cmp(node, data))
continue;
vis_info_tmp = vis_info;
break;
}
rcu_read_unlock();
return vis_info_tmp;
}
/* insert interface to the list of interfaces of one originator, if it
* does not already exist in the list */
static void vis_data_insert_interface(const uint8_t *interface,
struct hlist_head *if_list,
bool primary)
{
struct if_list_entry *entry;
struct hlist_node *pos;
hlist_for_each_entry(entry, pos, if_list, list) {
if (compare_eth(entry->addr, (void *)interface))
return;
}
/* its a new address, add it to the list */
entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->addr, interface, ETH_ALEN);
entry->primary = primary;
hlist_add_head(&entry->list, if_list);
}
static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t len = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
len += sprintf(buff + len, "PRIMARY, ");
else
len += sprintf(buff + len, "SEC %pM, ", entry->addr);
}
return len;
}
static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
{
struct if_list_entry *entry;
struct hlist_node *pos;
size_t count = 0;
hlist_for_each_entry(entry, pos, if_list, list) {
if (entry->primary)
count += 9;
else
count += 23;
}
return count;
}
/* read an entry */
static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
uint8_t *src, bool primary)
{
/* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
if (primary && entry->quality == 0)
return sprintf(buff, "TT %pM, ", entry->dest);
else if (compare_eth(entry->src, src))
return sprintf(buff, "TQ %pM %d, ", entry->dest,
entry->quality);
return 0;
}
int vis_seq_print_text(struct seq_file *seq, void *offset)
{
struct hard_iface *primary_if;
struct hlist_node *node;
struct hlist_head *head;
struct vis_info *info;
struct vis_packet *packet;
struct vis_info_entry *entries;
struct net_device *net_dev = (struct net_device *)seq->private;
struct bat_priv *bat_priv = netdev_priv(net_dev);
struct hashtable_t *hash = bat_priv->vis_hash;
HLIST_HEAD(vis_if_list);
struct if_list_entry *entry;
struct hlist_node *pos, *n;
int i, j, ret = 0;
int vis_server = atomic_read(&bat_priv->vis_mode);
size_t buff_pos, buf_size;
char *buff;
int compare;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
if (vis_server == VIS_TYPE_CLIENT_UPDATE)
goto out;
buf_size = 1;
/* Estimate length */
spin_lock_bh(&bat_priv->vis_hash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buf_size += 18 + 26 * packet->entries;
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buf_size +=
vis_data_count_prim_sec(&vis_if_list);
buf_size += 1;
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
buff = kmalloc(buf_size, GFP_ATOMIC);
if (!buff) {
spin_unlock_bh(&bat_priv->vis_hash_lock);
ret = -ENOMEM;
goto out;
}
buff[0] = '\0';
buff_pos = 0;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(info, node, head, hash_entry) {
packet = (struct vis_packet *)info->skb_packet->data;
entries = (struct vis_info_entry *)
((char *)packet + sizeof(struct vis_packet));
for (j = 0; j < packet->entries; j++) {
if (entries[j].quality == 0)
continue;
compare =
compare_eth(entries[j].src, packet->vis_orig);
vis_data_insert_interface(entries[j].src,
&vis_if_list,
compare);
}
hlist_for_each_entry(entry, pos, &vis_if_list, list) {
buff_pos += sprintf(buff + buff_pos, "%pM,",
entry->addr);
for (j = 0; j < packet->entries; j++)
buff_pos += vis_data_read_entry(
buff + buff_pos,
&entries[j],
entry->addr,
entry->primary);
/* add primary/secondary records */
if (compare_eth(entry->addr, packet->vis_orig))
buff_pos +=
vis_data_read_prim_sec(buff + buff_pos,
&vis_if_list);
buff_pos += sprintf(buff + buff_pos, "\n");
}
hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
list) {
hlist_del(&entry->list);
kfree(entry);
}
}
rcu_read_unlock();
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
seq_printf(seq, "%s", buff);
kfree(buff);
out:
if (primary_if)
hardif_free_ref(primary_if);
return ret;
}
/* add the info packet to the send list, if it was not
* already linked in. */
static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
{
if (list_empty(&info->send_list)) {
kref_get(&info->refcount);
list_add_tail(&info->send_list, &bat_priv->vis_send_list);
}
}
/* delete the info packet from the send list, if it was
* linked in. */
static void send_list_del(struct vis_info *info)
{
if (!list_empty(&info->send_list)) {
list_del_init(&info->send_list);
kref_put(&info->refcount, free_info);
}
}
/* tries to add one entry to the receive list. */
static void recv_list_add(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
if (!entry)
return;
memcpy(entry->mac, mac, ETH_ALEN);
spin_lock_bh(&bat_priv->vis_list_lock);
list_add_tail(&entry->list, recv_list);
spin_unlock_bh(&bat_priv->vis_list_lock);
}
/* returns 1 if this mac is in the recv_list */
static int recv_list_is_in(struct bat_priv *bat_priv,
struct list_head *recv_list, char *mac)
{
struct recvlist_node *entry;
spin_lock_bh(&bat_priv->vis_list_lock);
list_for_each_entry(entry, recv_list, list) {
if (compare_eth(entry->mac, mac)) {
spin_unlock_bh(&bat_priv->vis_list_lock);
return 1;
}
}
spin_unlock_bh(&bat_priv->vis_list_lock);
return 0;
}
/* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
* broken.. ). vis hash must be locked outside. is_new is set when the packet
* is newer than old entries in the hash. */
static struct vis_info *add_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len, int *is_new,
int make_broadcast)
{
struct vis_info *info, *old_info;
struct vis_packet *search_packet, *old_packet;
struct vis_info search_elem;
struct vis_packet *packet;
int hash_added;
*is_new = 0;
/* sanity check */
if (!bat_priv->vis_hash)
return NULL;
/* see if the packet is already in vis_hash */
search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
if (!search_elem.skb_packet)
return NULL;
search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
sizeof(struct vis_packet));
memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
old_info = vis_hash_find(bat_priv, &search_elem);
kfree_skb(search_elem.skb_packet);
if (old_info) {
old_packet = (struct vis_packet *)old_info->skb_packet->data;
if (!seq_after(ntohl(vis_packet->seqno),
ntohl(old_packet->seqno))) {
if (old_packet->seqno == vis_packet->seqno) {
recv_list_add(bat_priv, &old_info->recv_list,
vis_packet->sender_orig);
return old_info;
} else {
/* newer packet is already in hash. */
return NULL;
}
}
/* remove old entry */
hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
old_info);
send_list_del(old_info);
kref_put(&old_info->refcount, free_info);
}
info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
if (!info)
return NULL;
info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
vis_info_len + sizeof(struct ethhdr));
if (!info->skb_packet) {
kfree(info);
return NULL;
}
skb_reserve(info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(info->skb_packet,
sizeof(struct vis_packet) +
vis_info_len);
kref_init(&info->refcount);
INIT_LIST_HEAD(&info->send_list);
INIT_LIST_HEAD(&info->recv_list);
info->first_seen = jiffies;
info->bat_priv = bat_priv;
memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
/* initialize and add new packet. */
*is_new = 1;
/* Make it a broadcast packet, if required */
if (make_broadcast)
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
/* repair if entries is longer than packet. */
if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
packet->entries = vis_info_len / sizeof(struct vis_info_entry);
recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
/* try to add it */
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
info, &info->hash_entry);
if (hash_added < 0) {
/* did not work (for some reason) */
kref_put(&info->refcount, free_info);
info = NULL;
}
return info;
}
/* handle the server sync packet, forward if needed. */
void receive_server_sync_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
int is_new, make_broadcast;
int vis_server = atomic_read(&bat_priv->vis_mode);
make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, make_broadcast);
if (!info)
goto end;
/* only if we are server ourselves and packet is newer than the one in
* hash.*/
if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
send_list_add(bat_priv, info);
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* handle an incoming client update packet and schedule forward if needed. */
void receive_client_update_packet(struct bat_priv *bat_priv,
struct vis_packet *vis_packet,
int vis_info_len)
{
struct vis_info *info;
struct vis_packet *packet;
int is_new;
int vis_server = atomic_read(&bat_priv->vis_mode);
int are_target = 0;
/* clients shall not broadcast. */
if (is_broadcast_ether_addr(vis_packet->target_orig))
return;
/* Are we the target for this VIS packet? */
if (vis_server == VIS_TYPE_SERVER_SYNC &&
is_my_mac(vis_packet->target_orig))
are_target = 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
info = add_packet(bat_priv, vis_packet, vis_info_len,
&is_new, are_target);
if (!info)
goto end;
/* note that outdated packets will be dropped at this point. */
packet = (struct vis_packet *)info->skb_packet->data;
/* send only if we're the target server or ... */
if (are_target && is_new) {
packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
send_list_add(bat_priv, info);
/* ... we're not the recipient (and thus need to forward). */
} else if (!is_my_mac(packet->target_orig)) {
send_list_add(bat_priv, info);
}
end:
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* Walk the originators and find the VIS server with the best tq. Set the packet
* address to its address and return the best_tq.
*
* Must be called with the originator hash locked */
static int find_best_vis_server(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct neigh_node *router;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
int best_tq = -1, i;
packet = (struct vis_packet *)info->skb_packet->data;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if ((orig_node->flags & VIS_SERVER) &&
(router->tq_avg > best_tq)) {
best_tq = router->tq_avg;
memcpy(packet->target_orig, orig_node->orig,
ETH_ALEN);
}
neigh_node_free_ref(router);
}
rcu_read_unlock();
}
return best_tq;
}
/* Return true if the vis packet is full. */
static bool vis_packet_full(struct vis_info *info)
{
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
< packet->entries + 1)
return true;
return false;
}
/* generates a packet of own vis data,
* returns 0 on success, -1 if no packet could be generated */
static int generate_vis_packet(struct bat_priv *bat_priv)
{
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct neigh_node *router;
struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
struct vis_info_entry *entry;
struct tt_local_entry *tt_local_entry;
int best_tq = -1, i;
info->first_seen = jiffies;
packet->vis_type = atomic_read(&bat_priv->vis_mode);
memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
packet->ttl = TTL;
packet->seqno = htonl(ntohl(packet->seqno) + 1);
packet->entries = 0;
skb_trim(info->skb_packet, sizeof(struct vis_packet));
if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
best_tq = find_best_vis_server(bat_priv, info);
if (best_tq < 0)
return -1;
}
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
router = orig_node_get_router(orig_node);
if (!router)
continue;
if (!compare_eth(router->addr, orig_node->orig))
goto next;
if (router->if_incoming->if_status != IF_ACTIVE)
goto next;
if (router->tq_avg < 1)
goto next;
/* fill one entry into buffer. */
entry = (struct vis_info_entry *)
skb_put(info->skb_packet, sizeof(*entry));
memcpy(entry->src,
router->if_incoming->net_dev->dev_addr,
ETH_ALEN);
memcpy(entry->dest, orig_node->orig, ETH_ALEN);
entry->quality = router->tq_avg;
packet->entries++;
next:
neigh_node_free_ref(router);
if (vis_packet_full(info))
goto unlock;
}
rcu_read_unlock();
}
hash = bat_priv->tt_local_hash;
spin_lock_bh(&bat_priv->tt_lhash_lock);
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry(tt_local_entry, node, head, hash_entry) {
entry = (struct vis_info_entry *)
skb_put(info->skb_packet,
sizeof(*entry));
memset(entry->src, 0, ETH_ALEN);
memcpy(entry->dest, tt_local_entry->addr, ETH_ALEN);
entry->quality = 0; /* 0 means TT */
packet->entries++;
if (vis_packet_full(info)) {
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
}
}
}
spin_unlock_bh(&bat_priv->tt_lhash_lock);
return 0;
unlock:
rcu_read_unlock();
return 0;
}
/* free old vis packets. Must be called with this vis_hash_lock
* held */
static void purge_vis_packets(struct bat_priv *bat_priv)
{
int i;
struct hashtable_t *hash = bat_priv->vis_hash;
struct hlist_node *node, *node_tmp;
struct hlist_head *head;
struct vis_info *info;
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
hlist_for_each_entry_safe(info, node, node_tmp,
head, hash_entry) {
/* never purge own data. */
if (info == bat_priv->my_vis_info)
continue;
if (time_after(jiffies,
info->first_seen + VIS_TIMEOUT * HZ)) {
hlist_del(node);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
}
}
}
static void broadcast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct neigh_node *router;
struct hashtable_t *hash = bat_priv->orig_hash;
struct hlist_node *node;
struct hlist_head *head;
struct orig_node *orig_node;
struct vis_packet *packet;
struct sk_buff *skb;
struct hard_iface *hard_iface;
uint8_t dstaddr[ETH_ALEN];
int i;
packet = (struct vis_packet *)info->skb_packet->data;
/* send to all routers in range. */
for (i = 0; i < hash->size; i++) {
head = &hash->table[i];
rcu_read_lock();
hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
/* if it's a vis server and reachable, send it. */
if (!(orig_node->flags & VIS_SERVER))
continue;
router = orig_node_get_router(orig_node);
if (!router)
continue;
/* don't send it if we already received the packet from
* this node. */
if (recv_list_is_in(bat_priv, &info->recv_list,
orig_node->orig)) {
neigh_node_free_ref(router);
continue;
}
memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
hard_iface = router->if_incoming;
memcpy(dstaddr, router->addr, ETH_ALEN);
neigh_node_free_ref(router);
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, hard_iface, dstaddr);
}
rcu_read_unlock();
}
}
static void unicast_vis_packet(struct bat_priv *bat_priv,
struct vis_info *info)
{
struct orig_node *orig_node;
struct neigh_node *router = NULL;
struct sk_buff *skb;
struct vis_packet *packet;
packet = (struct vis_packet *)info->skb_packet->data;
orig_node = orig_hash_find(bat_priv, packet->target_orig);
if (!orig_node)
goto out;
router = orig_node_get_router(orig_node);
if (!router)
goto out;
skb = skb_clone(info->skb_packet, GFP_ATOMIC);
if (skb)
send_skb_packet(skb, router->if_incoming, router->addr);
out:
if (router)
neigh_node_free_ref(router);
if (orig_node)
orig_node_free_ref(orig_node);
}
/* only send one vis packet. called from send_vis_packets() */
static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
{
struct hard_iface *primary_if;
struct vis_packet *packet;
primary_if = primary_if_get_selected(bat_priv);
if (!primary_if)
goto out;
packet = (struct vis_packet *)info->skb_packet->data;
if (packet->ttl < 2) {
pr_debug("Error - can't send vis packet: ttl exceeded\n");
goto out;
}
memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN);
packet->ttl--;
if (is_broadcast_ether_addr(packet->target_orig))
broadcast_vis_packet(bat_priv, info);
else
unicast_vis_packet(bat_priv, info);
packet->ttl++; /* restore TTL */
out:
if (primary_if)
hardif_free_ref(primary_if);
}
/* called from timer; send (and maybe generate) vis packet. */
static void send_vis_packets(struct work_struct *work)
{
struct delayed_work *delayed_work =
container_of(work, struct delayed_work, work);
struct bat_priv *bat_priv =
container_of(delayed_work, struct bat_priv, vis_work);
struct vis_info *info;
spin_lock_bh(&bat_priv->vis_hash_lock);
purge_vis_packets(bat_priv);
if (generate_vis_packet(bat_priv) == 0) {
/* schedule if generation was successful */
send_list_add(bat_priv, bat_priv->my_vis_info);
}
while (!list_empty(&bat_priv->vis_send_list)) {
info = list_first_entry(&bat_priv->vis_send_list,
typeof(*info), send_list);
kref_get(&info->refcount);
spin_unlock_bh(&bat_priv->vis_hash_lock);
send_vis_packet(bat_priv, info);
spin_lock_bh(&bat_priv->vis_hash_lock);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
}
/* init the vis server. this may only be called when if_list is already
* initialized (e.g. bat0 is initialized, interfaces have been added) */
int vis_init(struct bat_priv *bat_priv)
{
struct vis_packet *packet;
int hash_added;
if (bat_priv->vis_hash)
return 1;
spin_lock_bh(&bat_priv->vis_hash_lock);
bat_priv->vis_hash = hash_new(256);
if (!bat_priv->vis_hash) {
pr_err("Can't initialize vis_hash\n");
goto err;
}
bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
if (!bat_priv->my_vis_info) {
pr_err("Can't initialize vis packet\n");
goto err;
}
bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
sizeof(struct vis_packet) +
MAX_VIS_PACKET_SIZE +
sizeof(struct ethhdr));
if (!bat_priv->my_vis_info->skb_packet)
goto free_info;
skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
packet = (struct vis_packet *)skb_put(
bat_priv->my_vis_info->skb_packet,
sizeof(struct vis_packet));
/* prefill the vis info */
bat_priv->my_vis_info->first_seen = jiffies -
msecs_to_jiffies(VIS_INTERVAL);
INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
kref_init(&bat_priv->my_vis_info->refcount);
bat_priv->my_vis_info->bat_priv = bat_priv;
packet->version = COMPAT_VERSION;
packet->packet_type = BAT_VIS;
packet->ttl = TTL;
packet->seqno = 0;
packet->entries = 0;
INIT_LIST_HEAD(&bat_priv->vis_send_list);
hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
bat_priv->my_vis_info,
&bat_priv->my_vis_info->hash_entry);
if (hash_added < 0) {
pr_err("Can't add own vis packet into hash\n");
/* not in hash, need to remove it manually. */
kref_put(&bat_priv->my_vis_info->refcount, free_info);
goto err;
}
spin_unlock_bh(&bat_priv->vis_hash_lock);
start_vis_timer(bat_priv);
return 1;
free_info:
kfree(bat_priv->my_vis_info);
bat_priv->my_vis_info = NULL;
err:
spin_unlock_bh(&bat_priv->vis_hash_lock);
vis_quit(bat_priv);
return 0;
}
/* Decrease the reference count on a hash item info */
static void free_info_ref(struct hlist_node *node, void *arg)
{
struct vis_info *info;
info = container_of(node, struct vis_info, hash_entry);
send_list_del(info);
kref_put(&info->refcount, free_info);
}
/* shutdown vis-server */
void vis_quit(struct bat_priv *bat_priv)
{
if (!bat_priv->vis_hash)
return;
cancel_delayed_work_sync(&bat_priv->vis_work);
spin_lock_bh(&bat_priv->vis_hash_lock);
/* properly remove, kill timers ... */
hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
bat_priv->vis_hash = NULL;
bat_priv->my_vis_info = NULL;
spin_unlock_bh(&bat_priv->vis_hash_lock);
}
/* schedule packets for (re)transmission */
static void start_vis_timer(struct bat_priv *bat_priv)
{
INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
msecs_to_jiffies(VIS_INTERVAL));
}
| gpl-2.0 |
Pafcholini/emotion_tw_511_COI3 | drivers/staging/rtl8187se/ieee80211/ieee80211_module.c | 2634 | 5367 | /*******************************************************************************
Copyright(c) 2004 Intel Corporation. All rights reserved.
Portions of this file are based on the WEP enablement code provided by the
Host AP project hostap-drivers v0.1.3
Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
<jkmaline@cc.hut.fi>
Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*******************************************************************************/
#include <linux/compiler.h>
//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h>
#include <net/arp.h>
#include <net/net_namespace.h>
#include "ieee80211.h"
MODULE_DESCRIPTION("802.11 data/management/control stack");
MODULE_AUTHOR("Copyright (C) 2004 Intel Corporation <jketreno@linux.intel.com>");
MODULE_LICENSE("GPL");
#define DRV_NAME "ieee80211"
static inline int ieee80211_networks_allocate(struct ieee80211_device *ieee)
{
if (ieee->networks)
return 0;
ieee->networks = kcalloc(
MAX_NETWORK_COUNT, sizeof(struct ieee80211_network),
GFP_KERNEL);
if (!ieee->networks)
return -ENOMEM;
return 0;
}
static inline void ieee80211_networks_free(struct ieee80211_device *ieee)
{
if (!ieee->networks)
return;
kfree(ieee->networks);
ieee->networks = NULL;
}
static inline void ieee80211_networks_initialize(struct ieee80211_device *ieee)
{
int i;
INIT_LIST_HEAD(&ieee->network_free_list);
INIT_LIST_HEAD(&ieee->network_list);
for (i = 0; i < MAX_NETWORK_COUNT; i++)
list_add_tail(&ieee->networks[i].list, &ieee->network_free_list);
}
struct net_device *alloc_ieee80211(int sizeof_priv)
{
struct ieee80211_device *ieee;
struct net_device *dev;
int i, err;
IEEE80211_DEBUG_INFO("Initializing...\n");
dev = alloc_etherdev(sizeof(struct ieee80211_device) + sizeof_priv);
if (!dev) {
IEEE80211_ERROR("Unable to network device.\n");
goto failed;
}
ieee = netdev_priv(dev);
ieee->dev = dev;
err = ieee80211_networks_allocate(ieee);
if (err) {
IEEE80211_ERROR("Unable to allocate beacon storage: %d\n",
err);
goto failed;
}
ieee80211_networks_initialize(ieee);
/* Default fragmentation threshold is maximum payload size */
ieee->fts = DEFAULT_FTS;
ieee->scan_age = DEFAULT_MAX_SCAN_AGE;
ieee->open_wep = 1;
/* Default to enabling full open WEP with host based encrypt/decrypt */
ieee->host_encrypt = 1;
ieee->host_decrypt = 1;
ieee->ieee802_1x = 1; /* Default to supporting 802.1x */
INIT_LIST_HEAD(&ieee->crypt_deinit_list);
init_timer(&ieee->crypt_deinit_timer);
ieee->crypt_deinit_timer.data = (unsigned long)ieee;
ieee->crypt_deinit_timer.function = ieee80211_crypt_deinit_handler;
spin_lock_init(&ieee->lock);
spin_lock_init(&ieee->wpax_suitlist_lock);
ieee->wpax_type_set = 0;
ieee->wpa_enabled = 0;
ieee->tkip_countermeasures = 0;
ieee->drop_unencrypted = 0;
ieee->privacy_invoked = 0;
ieee->ieee802_1x = 1;
ieee->raw_tx = 0;
ieee80211_softmac_init(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++)
INIT_LIST_HEAD(&ieee->ibss_mac_hash[i]);
for (i = 0; i < 17; i++) {
ieee->last_rxseq_num[i] = -1;
ieee->last_rxfrag_num[i] = -1;
ieee->last_packet_time[i] = 0;
}
//These function were added to load crypte module autoly
ieee80211_tkip_null();
ieee80211_wep_null();
ieee80211_ccmp_null();
return dev;
failed:
if (dev)
free_netdev(dev);
return NULL;
}
void free_ieee80211(struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
int i;
struct list_head *p, *q;
ieee80211_softmac_free(ieee);
del_timer_sync(&ieee->crypt_deinit_timer);
ieee80211_crypt_deinit_entries(ieee, 1);
for (i = 0; i < WEP_KEYS; i++) {
struct ieee80211_crypt_data *crypt = ieee->crypt[i];
if (crypt) {
if (crypt->ops)
crypt->ops->deinit(crypt->priv);
kfree(crypt);
ieee->crypt[i] = NULL;
}
}
ieee80211_networks_free(ieee);
for (i = 0; i < IEEE_IBSS_MAC_HASH_SIZE; i++) {
list_for_each_safe(p, q, &ieee->ibss_mac_hash[i]) {
kfree(list_entry(p, struct ieee_ibss_seq, list));
list_del(p);
}
}
free_netdev(dev);
}
| gpl-2.0 |
sultanxda/sultan-kernel-pyramid | drivers/staging/tidspbridge/core/tiomap3430_pwr.c | 3146 | 16438 | /*
* tiomap_pwr.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* Implementation of DSP wake/sleep routines.
*
* Copyright (C) 2007-2008 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
#include <plat/dsp.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
#include <dspbridge/drv.h>
#include <dspbridge/io_sm.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/brddefs.h>
#include <dspbridge/dev.h>
#include <dspbridge/io.h>
/* ------------------------------------ Hardware Abstraction Layer */
#include <hw_defs.h>
#include <hw_mmu.h>
#include <dspbridge/pwr.h>
/* ----------------------------------- Bridge Driver */
#include <dspbridge/dspdeh.h>
#include <dspbridge/wdt.h>
/* ----------------------------------- specific to this file */
#include "_tiomap.h"
#include "_tiomap_pwr.h"
#include <mach-omap2/prm-regbits-34xx.h>
#include <mach-omap2/cm-regbits-34xx.h>
#define PWRSTST_TIMEOUT 200
/*
* ======== handle_constraints_set ========
* Sets new DSP constraint
*/
int handle_constraints_set(struct bridge_dev_context *dev_context,
void *pargs)
{
#ifdef CONFIG_TIDSPBRIDGE_DVFS
u32 *constraint_val;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
constraint_val = (u32 *) (pargs);
/* Read the target value requested by DSP */
dev_dbg(bridge, "OPP: %s opp requested = 0x%x\n", __func__,
(u32) *(constraint_val + 1));
/* Set the new opp value */
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) ((u32) *(constraint_val + 1));
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
return 0;
}
/*
* ======== handle_hibernation_from_dsp ========
* Handle Hibernation requested from DSP
*/
int handle_hibernation_from_dsp(struct bridge_dev_context *dev_context)
{
int status = 0;
#ifdef CONFIG_PM
u16 timeout = PWRSTST_TIMEOUT / 10;
u32 pwr_state;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
u32 opplevel;
struct io_mgr *hio_mgr;
#endif
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
/* Wait for DSP to move into OFF state */
while ((pwr_state != PWRDM_POWER_OFF) && --timeout) {
if (msleep_interruptible(10)) {
pr_err("Waiting for DSP OFF mode interrupted\n");
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
}
if (timeout == 0) {
pr_err("%s: Timed out waiting for DSP off mode\n", __func__);
status = -ETIMEDOUT;
return status;
} else {
/* Save mailbox settings */
omap_mbox_save_ctx(dev_context->mbox);
/* Turn off DSP Peripheral clocks and DSP Load monitor timer */
status = dsp_clock_disable_all(dev_context->dsp_per_clks);
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
if (!status) {
/* Update the Bridger Driver state */
dev_context->brd_state = BRD_DSP_HIBERNATION;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
status =
dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (!hio_mgr) {
status = DSP_EHANDLE;
return status;
}
io_sh_msetting(hio_mgr, SHM_GETOPP, &opplevel);
/*
* Set the OPP to low level before moving to OFF
* mode
*/
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) (VDD1_OPP1);
status = 0;
#endif /* CONFIG_TIDSPBRIDGE_DVFS */
}
}
#endif
return status;
}
/*
* ======== sleep_dsp ========
* Put DSP in low power consuming state.
*/
int sleep_dsp(struct bridge_dev_context *dev_context, u32 dw_cmd,
void *pargs)
{
int status = 0;
#ifdef CONFIG_PM
#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
struct deh_mgr *hdeh_mgr;
#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
u16 timeout = PWRSTST_TIMEOUT / 10;
u32 pwr_state, target_pwr_state;
struct omap_dsp_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
/* Check if sleep code is valid */
if ((dw_cmd != PWR_DEEPSLEEP) && (dw_cmd != PWR_EMERGENCYDEEPSLEEP))
return -EINVAL;
switch (dev_context->brd_state) {
case BRD_RUNNING:
omap_mbox_save_ctx(dev_context->mbox);
if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
dev_dbg(bridge, "PM: %s - sent hibernate cmd to DSP\n",
__func__);
target_pwr_state = PWRDM_POWER_OFF;
} else {
sm_interrupt_dsp(dev_context, MBX_PM_DSPRETENTION);
target_pwr_state = PWRDM_POWER_RET;
}
break;
case BRD_RETENTION:
omap_mbox_save_ctx(dev_context->mbox);
if (dsp_test_sleepstate == PWRDM_POWER_OFF) {
sm_interrupt_dsp(dev_context, MBX_PM_DSPHIBERNATE);
target_pwr_state = PWRDM_POWER_OFF;
} else
return 0;
break;
case BRD_HIBERNATION:
case BRD_DSP_HIBERNATION:
/* Already in Hibernation, so just return */
dev_dbg(bridge, "PM: %s - DSP already in hibernation\n",
__func__);
return 0;
case BRD_STOPPED:
dev_dbg(bridge, "PM: %s - Board in STOP state\n", __func__);
return 0;
default:
dev_dbg(bridge, "PM: %s - Bridge in Illegal state\n", __func__);
return -EPERM;
}
/* Get the PRCM DSP power domain status */
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST) &
OMAP_POWERSTATEST_MASK;
/* Wait for DSP to move into target power state */
while ((pwr_state != target_pwr_state) && --timeout) {
if (msleep_interruptible(10)) {
pr_err("Waiting for DSP to Suspend interrupted\n");
return -EPERM;
}
pwr_state = (*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
OMAP2_PM_PWSTST) & OMAP_POWERSTATEST_MASK;
}
if (!timeout) {
pr_err("%s: Timed out waiting for DSP off mode, state %x\n",
__func__, pwr_state);
#ifdef CONFIG_TIDSPBRIDGE_NTFY_PWRERR
dev_get_deh_mgr(dev_context->dev_obj, &hdeh_mgr);
bridge_deh_notify(hdeh_mgr, DSP_PWRERROR, 0);
#endif /* CONFIG_TIDSPBRIDGE_NTFY_PWRERR */
return -ETIMEDOUT;
} else {
/* Update the Bridger Driver state */
if (dsp_test_sleepstate == PWRDM_POWER_OFF)
dev_context->brd_state = BRD_HIBERNATION;
else
dev_context->brd_state = BRD_RETENTION;
/* Disable wdt on hibernation. */
dsp_wdt_enable(false);
/* Turn off DSP Peripheral clocks */
status = dsp_clock_disable_all(dev_context->dsp_per_clks);
if (status)
return status;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
else if (target_pwr_state == PWRDM_POWER_OFF) {
/*
* Set the OPP to low level before moving to OFF mode
*/
if (pdata->dsp_set_min_opp)
(*pdata->dsp_set_min_opp) (VDD1_OPP1);
}
#endif /* CONFIG_TIDSPBRIDGE_DVFS */
}
#endif /* CONFIG_PM */
return status;
}
/*
* ======== wake_dsp ========
* Wake up DSP from sleep.
*/
int wake_dsp(struct bridge_dev_context *dev_context, void *pargs)
{
int status = 0;
#ifdef CONFIG_PM
/* Check the board state, if it is not 'SLEEP' then return */
if (dev_context->brd_state == BRD_RUNNING ||
dev_context->brd_state == BRD_STOPPED) {
/* The Device is in 'RET' or 'OFF' state and Bridge state is not
* 'SLEEP', this means state inconsistency, so return */
return 0;
}
/* Send a wakeup message to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_DSPWAKEUP);
/* Set the device state to RUNNIG */
dev_context->brd_state = BRD_RUNNING;
#endif /* CONFIG_PM */
return status;
}
/*
* ======== dsp_peripheral_clk_ctrl ========
* Enable/Disable the DSP peripheral clocks as needed..
*/
int dsp_peripheral_clk_ctrl(struct bridge_dev_context *dev_context,
void *pargs)
{
u32 ext_clk = 0;
u32 ext_clk_id = 0;
u32 ext_clk_cmd = 0;
u32 clk_id_index = MBX_PM_MAX_RESOURCES;
u32 tmp_index;
u32 dsp_per_clks_before;
int status = 0;
dsp_per_clks_before = dev_context->dsp_per_clks;
ext_clk = (u32) *((u32 *) pargs);
ext_clk_id = ext_clk & MBX_PM_CLK_IDMASK;
/* process the power message -- TODO, keep it in a separate function */
for (tmp_index = 0; tmp_index < MBX_PM_MAX_RESOURCES; tmp_index++) {
if (ext_clk_id == bpwr_clkid[tmp_index]) {
clk_id_index = tmp_index;
break;
}
}
/* TODO -- Assert may be a too hard restriction here.. May be we should
* just return with failure when the CLK ID does not match */
/* DBC_ASSERT(clk_id_index < MBX_PM_MAX_RESOURCES); */
if (clk_id_index == MBX_PM_MAX_RESOURCES) {
/* return with a more meaningfull error code */
return -EPERM;
}
ext_clk_cmd = (ext_clk >> MBX_PM_CLK_CMDSHIFT) & MBX_PM_CLK_CMDMASK;
switch (ext_clk_cmd) {
case BPWR_DISABLE_CLOCK:
status = dsp_clk_disable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id,
false);
if (!status) {
(dev_context->dsp_per_clks) &=
(~((u32) (1 << bpwr_clks[clk_id_index].clk)));
}
break;
case BPWR_ENABLE_CLOCK:
status = dsp_clk_enable(bpwr_clks[clk_id_index].clk);
dsp_clk_wakeup_event_ctrl(bpwr_clks[clk_id_index].clk_id, true);
if (!status)
(dev_context->dsp_per_clks) |=
(1 << bpwr_clks[clk_id_index].clk);
break;
default:
dev_dbg(bridge, "%s: Unsupported CMD\n", __func__);
/* unsupported cmd */
/* TODO -- provide support for AUTOIDLE Enable/Disable
* commands */
}
return status;
}
/*
* ========pre_scale_dsp========
* Sends prescale notification to DSP
*
*/
int pre_scale_dsp(struct bridge_dev_context *dev_context, void *pargs)
{
#ifdef CONFIG_TIDSPBRIDGE_DVFS
u32 level;
u32 voltage_domain;
voltage_domain = *((u32 *) pargs);
level = *((u32 *) pargs + 1);
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
if ((dev_context->brd_state == BRD_HIBERNATION) ||
(dev_context->brd_state == BRD_RETENTION) ||
(dev_context->brd_state == BRD_DSP_HIBERNATION)) {
dev_dbg(bridge, "OPP: %s IVA in sleep. No message to DSP\n");
return 0;
} else if ((dev_context->brd_state == BRD_RUNNING)) {
/* Send a prenotificatio to DSP */
dev_dbg(bridge, "OPP: %s sent notification to DSP\n", __func__);
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_PRENOTIFY);
return 0;
} else {
return -EPERM;
}
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
return 0;
}
/*
* ========post_scale_dsp========
* Sends postscale notification to DSP
*
*/
int post_scale_dsp(struct bridge_dev_context *dev_context,
void *pargs)
{
int status = 0;
#ifdef CONFIG_TIDSPBRIDGE_DVFS
u32 level;
u32 voltage_domain;
struct io_mgr *hio_mgr;
status = dev_get_io_mgr(dev_context->dev_obj, &hio_mgr);
if (!hio_mgr)
return -EFAULT;
voltage_domain = *((u32 *) pargs);
level = *((u32 *) pargs + 1);
dev_dbg(bridge, "OPP: %s voltage_domain = %x, level = 0x%x\n",
__func__, voltage_domain, level);
if ((dev_context->brd_state == BRD_HIBERNATION) ||
(dev_context->brd_state == BRD_RETENTION) ||
(dev_context->brd_state == BRD_DSP_HIBERNATION)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
dev_dbg(bridge, "OPP: %s IVA in sleep. Wrote to shm\n",
__func__);
} else if ((dev_context->brd_state == BRD_RUNNING)) {
/* Update the OPP value in shared memory */
io_sh_msetting(hio_mgr, SHM_CURROPP, &level);
/* Send a post notification to DSP */
sm_interrupt_dsp(dev_context, MBX_PM_SETPOINT_POSTNOTIFY);
dev_dbg(bridge, "OPP: %s wrote to shm. Sent post notification "
"to DSP\n", __func__);
} else {
status = -EPERM;
}
#endif /* #ifdef CONFIG_TIDSPBRIDGE_DVFS */
return status;
}
void dsp_clk_wakeup_event_ctrl(u32 clock_id, bool enable)
{
struct cfg_hostres *resources;
int status = 0;
u32 iva2_grpsel;
u32 mpu_grpsel;
struct dev_object *hdev_object = NULL;
struct bridge_dev_context *bridge_context = NULL;
hdev_object = (struct dev_object *)drv_get_first_dev_object();
if (!hdev_object)
return;
status = dev_get_bridge_context(hdev_object, &bridge_context);
if (!bridge_context)
return;
resources = bridge_context->resources;
if (!resources)
return;
switch (clock_id) {
case BPWR_GP_TIMER5:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT5_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER6:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT6_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT6_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER7:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT7_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT7_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_GP_TIMER8:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_GPT8_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_GPT8_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP1:
iva2_grpsel = readl(resources->core_pm_base + 0xA8);
mpu_grpsel = readl(resources->core_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP1_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP1_MASK;
}
writel(iva2_grpsel, resources->core_pm_base + 0xA8);
writel(mpu_grpsel, resources->core_pm_base + 0xA4);
break;
case BPWR_MCBSP2:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP2_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP2_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP3:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP3_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP3_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP4:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP4_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP4_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
case BPWR_MCBSP5:
iva2_grpsel = readl(resources->per_pm_base + 0xA8);
mpu_grpsel = readl(resources->per_pm_base + 0xA4);
if (enable) {
iva2_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
mpu_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
} else {
mpu_grpsel |= OMAP3430_GRPSEL_MCBSP5_MASK;
iva2_grpsel &= ~OMAP3430_GRPSEL_MCBSP5_MASK;
}
writel(iva2_grpsel, resources->per_pm_base + 0xA8);
writel(mpu_grpsel, resources->per_pm_base + 0xA4);
break;
}
}
| gpl-2.0 |
Root-Box/kernel_samsung_smdk4412 | drivers/staging/tidspbridge/rmgr/node.c | 3146 | 87658 | /*
* node.c
*
* DSP-BIOS Bridge driver support functions for TI OMAP processors.
*
* DSP/BIOS Bridge Node Manager.
*
* Copyright (C) 2005-2006 Texas Instruments, Inc.
*
* This package is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
* WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*/
#include <linux/types.h>
#include <linux/bitmap.h>
#include <linux/list.h>
/* ----------------------------------- Host OS */
#include <dspbridge/host_os.h>
/* ----------------------------------- DSP/BIOS Bridge */
#include <dspbridge/dbdefs.h>
/* ----------------------------------- Trace & Debug */
#include <dspbridge/dbc.h>
/* ----------------------------------- OS Adaptation Layer */
#include <dspbridge/memdefs.h>
#include <dspbridge/proc.h>
#include <dspbridge/strm.h>
#include <dspbridge/sync.h>
#include <dspbridge/ntfy.h>
/* ----------------------------------- Platform Manager */
#include <dspbridge/cmm.h>
#include <dspbridge/cod.h>
#include <dspbridge/dev.h>
#include <dspbridge/msg.h>
/* ----------------------------------- Resource Manager */
#include <dspbridge/dbdcd.h>
#include <dspbridge/disp.h>
#include <dspbridge/rms_sh.h>
/* ----------------------------------- Link Driver */
#include <dspbridge/dspdefs.h>
#include <dspbridge/dspioctl.h>
/* ----------------------------------- Others */
#include <dspbridge/uuidutil.h>
/* ----------------------------------- This */
#include <dspbridge/nodepriv.h>
#include <dspbridge/node.h>
#include <dspbridge/dmm.h>
/* Static/Dynamic Loader includes */
#include <dspbridge/dbll.h>
#include <dspbridge/nldr.h>
#include <dspbridge/drv.h>
#include <dspbridge/resourcecleanup.h>
#include <_tiomap.h>
#include <dspbridge/dspdeh.h>
#define HOSTPREFIX "/host"
#define PIPEPREFIX "/dbpipe"
#define MAX_INPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_input_streams)
#define MAX_OUTPUTS(h) \
((h)->dcd_props.obj_data.node_obj.ndb_props.num_output_streams)
#define NODE_GET_PRIORITY(h) ((h)->prio)
#define NODE_SET_PRIORITY(hnode, prio) ((hnode)->prio = prio)
#define NODE_SET_STATE(hnode, state) ((hnode)->node_state = state)
#define MAXPIPES 100 /* Max # of /pipe connections (CSL limit) */
#define MAXDEVSUFFIXLEN 2 /* Max(Log base 10 of MAXPIPES, MAXSTREAMS) */
#define PIPENAMELEN (sizeof(PIPEPREFIX) + MAXDEVSUFFIXLEN)
#define HOSTNAMELEN (sizeof(HOSTPREFIX) + MAXDEVSUFFIXLEN)
#define MAXDEVNAMELEN 32 /* dsp_ndbprops.ac_name size */
#define CREATEPHASE 1
#define EXECUTEPHASE 2
#define DELETEPHASE 3
/* Define default STRM parameters */
/*
* TBD: Put in header file, make global DSP_STRMATTRS with defaults,
* or make defaults configurable.
*/
#define DEFAULTBUFSIZE 32
#define DEFAULTNBUFS 2
#define DEFAULTSEGID 0
#define DEFAULTALIGNMENT 0
#define DEFAULTTIMEOUT 10000
#define RMSQUERYSERVER 0
#define RMSCONFIGURESERVER 1
#define RMSCREATENODE 2
#define RMSEXECUTENODE 3
#define RMSDELETENODE 4
#define RMSCHANGENODEPRIORITY 5
#define RMSREADMEMORY 6
#define RMSWRITEMEMORY 7
#define RMSCOPY 8
#define MAXTIMEOUT 2000
#define NUMRMSFXNS 9
#define PWR_TIMEOUT 500 /* default PWR timeout in msec */
#define STACKSEGLABEL "L1DSRAM_HEAP" /* Label for DSP Stack Segment Addr */
/*
* ======== node_mgr ========
*/
struct node_mgr {
struct dev_object *dev_obj; /* Device object */
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
struct dcd_manager *dcd_mgr; /* Proc/Node data manager */
struct disp_object *disp_obj; /* Node dispatcher */
struct list_head node_list; /* List of all allocated nodes */
u32 num_nodes; /* Number of nodes in node_list */
u32 num_created; /* Number of nodes *created* on DSP */
DECLARE_BITMAP(pipe_map, MAXPIPES); /* Pipe connection bitmap */
DECLARE_BITMAP(pipe_done_map, MAXPIPES); /* Pipes that are half free */
/* Channel allocation bitmap */
DECLARE_BITMAP(chnl_map, CHNL_MAXCHANNELS);
/* DMA Channel allocation bitmap */
DECLARE_BITMAP(dma_chnl_map, CHNL_MAXCHANNELS);
/* Zero-Copy Channel alloc bitmap */
DECLARE_BITMAP(zc_chnl_map, CHNL_MAXCHANNELS);
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
struct mutex node_mgr_lock; /* For critical sections */
u32 fxn_addrs[NUMRMSFXNS]; /* RMS function addresses */
struct msg_mgr *msg_mgr_obj;
/* Processor properties needed by Node Dispatcher */
u32 num_chnls; /* Total number of channels */
u32 chnl_offset; /* Offset of chnl ids rsvd for RMS */
u32 chnl_buf_size; /* Buffer size for data to RMS */
int proc_family; /* eg, 5000 */
int proc_type; /* eg, 5510 */
u32 dsp_word_size; /* Size of DSP word on host bytes */
u32 dsp_data_mau_size; /* Size of DSP data MAU */
u32 dsp_mau_size; /* Size of MAU */
s32 min_pri; /* Minimum runtime priority for node */
s32 max_pri; /* Maximum runtime priority for node */
struct strm_mgr *strm_mgr_obj; /* STRM manager */
/* Loader properties */
struct nldr_object *nldr_obj; /* Handle to loader */
struct node_ldr_fxns nldr_fxns; /* Handle to loader functions */
bool loader_init; /* Loader Init function succeeded? */
};
/*
* ======== connecttype ========
*/
enum connecttype {
NOTCONNECTED = 0,
NODECONNECT,
HOSTCONNECT,
DEVICECONNECT,
};
/*
* ======== stream_chnl ========
*/
struct stream_chnl {
enum connecttype type; /* Type of stream connection */
u32 dev_id; /* pipe or channel id */
};
/*
* ======== node_object ========
*/
struct node_object {
struct list_head list_elem;
struct node_mgr *node_mgr; /* The manager of this node */
struct proc_object *processor; /* Back pointer to processor */
struct dsp_uuid node_uuid; /* Node's ID */
s32 prio; /* Node's current priority */
u32 timeout; /* Timeout for blocking NODE calls */
u32 heap_size; /* Heap Size */
u32 dsp_heap_virt_addr; /* Heap Size */
u32 gpp_heap_virt_addr; /* Heap Size */
enum node_type ntype; /* Type of node: message, task, etc */
enum node_state node_state; /* NODE_ALLOCATED, NODE_CREATED, ... */
u32 num_inputs; /* Current number of inputs */
u32 num_outputs; /* Current number of outputs */
u32 max_input_index; /* Current max input stream index */
u32 max_output_index; /* Current max output stream index */
struct stream_chnl *inputs; /* Node's input streams */
struct stream_chnl *outputs; /* Node's output streams */
struct node_createargs create_args; /* Args for node create func */
nodeenv node_env; /* Environment returned by RMS */
struct dcd_genericobj dcd_props; /* Node properties from DCD */
struct dsp_cbdata *args; /* Optional args to pass to node */
struct ntfy_object *ntfy_obj; /* Manages registered notifications */
char *str_dev_name; /* device name, if device node */
struct sync_object *sync_done; /* Synchronize node_terminate */
s32 exit_status; /* execute function return status */
/* Information needed for node_get_attr() */
void *device_owner; /* If dev node, task that owns it */
u32 num_gpp_inputs; /* Current # of from GPP streams */
u32 num_gpp_outputs; /* Current # of to GPP streams */
/* Current stream connections */
struct dsp_streamconnect *stream_connect;
/* Message queue */
struct msg_queue *msg_queue_obj;
/* These fields used for SM messaging */
struct cmm_xlatorobject *xlator; /* Node's SM addr translator */
/* Handle to pass to dynamic loader */
struct nldr_nodeobject *nldr_node_obj;
bool loaded; /* Code is (dynamically) loaded */
bool phase_split; /* Phases split in many libs or ovly */
};
/* Default buffer attributes */
static struct dsp_bufferattr node_dfltbufattrs = {
.cb_struct = 0,
.segment_id = 1,
.buf_alignment = 0,
};
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt);
static void delete_node_mgr(struct node_mgr *hnode_mgr);
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2, u32 stream1,
u32 stream2);
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs);
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream);
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase);
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop);
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj);
static int get_rms_fxns(struct node_mgr *hnode_mgr);
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space);
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space);
static u32 refs; /* module reference count */
/* Dynamic loader functions. */
static struct node_ldr_fxns nldr_fxns = {
nldr_allocate,
nldr_create,
nldr_delete,
nldr_exit,
nldr_get_fxn_addr,
nldr_init,
nldr_load,
nldr_unload,
};
enum node_state node_get_state(void *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
if (!pnode)
return -1;
return pnode->node_state;
}
/*
* ======== node_allocate ========
* Purpose:
* Allocate GPP resources to manage a node on the DSP.
*/
int node_allocate(struct proc_object *hprocessor,
const struct dsp_uuid *node_uuid,
const struct dsp_cbdata *pargs,
const struct dsp_nodeattrin *attr_in,
struct node_res_object **noderes,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct dev_object *hdev_obj;
struct node_object *pnode = NULL;
enum node_type node_type = NODE_TASK;
struct node_msgargs *pmsg_args;
struct node_taskargs *ptask_args;
u32 num_streams;
struct bridge_drv_interface *intf_fxns;
int status = 0;
struct cmm_object *hcmm_mgr = NULL; /* Shared memory manager hndl */
u32 proc_id;
u32 pul_value;
u32 dynext_base;
u32 off_set = 0;
u32 ul_stack_seg_addr, ul_stack_seg_val;
u32 ul_gpp_mem_base;
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
u32 mapped_addr = 0;
u32 map_attrs = 0x0;
struct dsp_processorstate proc_state;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object = (struct proc_object *)hprocessor;
#endif
void *node_res;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hprocessor != NULL);
DBC_REQUIRE(noderes != NULL);
DBC_REQUIRE(node_uuid != NULL);
*noderes = NULL;
status = proc_get_processor_id(hprocessor, &proc_id);
if (proc_id != DSP_UNIT)
goto func_end;
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (!status) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL)
status = -EPERM;
}
if (status)
goto func_end;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* Assuming that 0 is not a valid function address */
if (hnode_mgr->fxn_addrs[0] == 0) {
/* No RMS on target - we currently can't handle this */
pr_err("%s: Failed, no RMS in base image\n", __func__);
status = -EPERM;
} else {
/* Validate attr_in fields, if non-NULL */
if (attr_in) {
/* Check if attr_in->prio is within range */
if (attr_in->prio < hnode_mgr->min_pri ||
attr_in->prio > hnode_mgr->max_pri)
status = -EDOM;
}
}
/* Allocate node object and fill in */
if (status)
goto func_end;
pnode = kzalloc(sizeof(struct node_object), GFP_KERNEL);
if (pnode == NULL) {
status = -ENOMEM;
goto func_end;
}
pnode->node_mgr = hnode_mgr;
/* This critical section protects get_node_props */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Get dsp_ndbprops from node database */
status = get_node_props(hnode_mgr->dcd_mgr, pnode, node_uuid,
&(pnode->dcd_props));
if (status)
goto func_cont;
pnode->node_uuid = *node_uuid;
pnode->processor = hprocessor;
pnode->ntype = pnode->dcd_props.obj_data.node_obj.ndb_props.ntype;
pnode->timeout = pnode->dcd_props.obj_data.node_obj.ndb_props.timeout;
pnode->prio = pnode->dcd_props.obj_data.node_obj.ndb_props.prio;
/* Currently only C64 DSP builds support Node Dynamic * heaps */
/* Allocate memory for node heap */
pnode->create_args.asa.task_arg_obj.heap_size = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_addr = 0;
pnode->create_args.asa.task_arg_obj.dsp_heap_res_addr = 0;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr = 0;
if (!attr_in)
goto func_cont;
/* Check if we have a user allocated node heap */
if (!(attr_in->pgpp_virt_addr))
goto func_cont;
/* check for page aligned Heap size */
if (((attr_in->heap_size) & (PG_SIZE4K - 1))) {
pr_err("%s: node heap size not aligned to 4K, size = 0x%x \n",
__func__, attr_in->heap_size);
status = -EINVAL;
} else {
pnode->create_args.asa.task_arg_obj.heap_size =
attr_in->heap_size;
pnode->create_args.asa.task_arg_obj.gpp_heap_addr =
(u32) attr_in->pgpp_virt_addr;
}
if (status)
goto func_cont;
status = proc_reserve_memory(hprocessor,
pnode->create_args.asa.task_arg_obj.
heap_size + PAGE_SIZE,
(void **)&(pnode->create_args.asa.
task_arg_obj.dsp_heap_res_addr),
pr_ctxt);
if (status) {
pr_err("%s: Failed to reserve memory for heap: 0x%x\n",
__func__, status);
goto func_cont;
}
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (!dmm_mgr) {
status = DSP_EHANDLE;
goto func_cont;
}
dmm_mem_map_dump(dmm_mgr);
#endif
map_attrs |= DSP_MAPLITTLEENDIAN;
map_attrs |= DSP_MAPELEMSIZE32;
map_attrs |= DSP_MAPVIRTUALADDR;
status = proc_map(hprocessor, (void *)attr_in->pgpp_virt_addr,
pnode->create_args.asa.task_arg_obj.heap_size,
(void *)pnode->create_args.asa.task_arg_obj.
dsp_heap_res_addr, (void **)&mapped_addr, map_attrs,
pr_ctxt);
if (status)
pr_err("%s: Failed to map memory for Heap: 0x%x\n",
__func__, status);
else
pnode->create_args.asa.task_arg_obj.dsp_heap_addr =
(u32) mapped_addr;
func_cont:
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (attr_in != NULL) {
/* Overrides of NBD properties */
pnode->timeout = attr_in->timeout;
pnode->prio = attr_in->prio;
}
/* Create object to manage notifications */
if (!status) {
pnode->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (pnode->ntfy_obj)
ntfy_init(pnode->ntfy_obj);
else
status = -ENOMEM;
}
if (!status) {
node_type = node_get_type(pnode);
/* Allocate dsp_streamconnect array for device, task, and
* dais socket nodes. */
if (node_type != NODE_MESSAGE) {
num_streams = MAX_INPUTS(pnode) + MAX_OUTPUTS(pnode);
pnode->stream_connect = kzalloc(num_streams *
sizeof(struct dsp_streamconnect),
GFP_KERNEL);
if (num_streams > 0 && pnode->stream_connect == NULL)
status = -ENOMEM;
}
if (!status && (node_type == NODE_TASK ||
node_type == NODE_DAISSOCKET)) {
/* Allocate arrays for maintainig stream connections */
pnode->inputs = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
pnode->outputs = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct stream_chnl), GFP_KERNEL);
ptask_args = &(pnode->create_args.asa.task_arg_obj);
ptask_args->strm_in_def = kzalloc(MAX_INPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
ptask_args->strm_out_def = kzalloc(MAX_OUTPUTS(pnode) *
sizeof(struct node_strmdef),
GFP_KERNEL);
if ((MAX_INPUTS(pnode) > 0 && (pnode->inputs == NULL ||
ptask_args->strm_in_def
== NULL))
|| (MAX_OUTPUTS(pnode) > 0
&& (pnode->outputs == NULL
|| ptask_args->strm_out_def == NULL)))
status = -ENOMEM;
}
}
if (!status && (node_type != NODE_DEVICE)) {
/* Create an event that will be posted when RMS_EXIT is
* received. */
pnode->sync_done = kzalloc(sizeof(struct sync_object),
GFP_KERNEL);
if (pnode->sync_done)
sync_init_event(pnode->sync_done);
else
status = -ENOMEM;
if (!status) {
/*Get the shared mem mgr for this nodes dev object */
status = cmm_get_handle(hprocessor, &hcmm_mgr);
if (!status) {
/* Allocate a SM addr translator for this node
* w/ deflt attr */
status = cmm_xlator_create(&pnode->xlator,
hcmm_mgr, NULL);
}
}
if (!status) {
/* Fill in message args */
if ((pargs != NULL) && (pargs->cb_data > 0)) {
pmsg_args =
&(pnode->create_args.asa.node_msg_args);
pmsg_args->pdata = kzalloc(pargs->cb_data,
GFP_KERNEL);
if (pmsg_args->pdata == NULL) {
status = -ENOMEM;
} else {
pmsg_args->arg_length = pargs->cb_data;
memcpy(pmsg_args->pdata,
pargs->node_data,
pargs->cb_data);
}
}
}
}
if (!status && node_type != NODE_DEVICE) {
/* Create a message queue for this node */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_create_queue) (hnode_mgr->msg_mgr_obj,
&pnode->msg_queue_obj,
0,
pnode->create_args.asa.
node_msg_args.max_msgs,
pnode);
}
if (!status) {
/* Create object for dynamic loading */
status = hnode_mgr->nldr_fxns.allocate(hnode_mgr->nldr_obj,
(void *)pnode,
&pnode->dcd_props.
obj_data.node_obj,
&pnode->
nldr_node_obj,
&pnode->phase_split);
}
/* Compare value read from Node Properties and check if it is same as
* STACKSEGLABEL, if yes read the Address of STACKSEGLABEL, calculate
* GPP Address, Read the value in that address and override the
* stack_seg value in task args */
if (!status &&
(char *)pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name != NULL) {
if (strcmp((char *)
pnode->dcd_props.obj_data.node_obj.ndb_props.
stack_seg_name, STACKSEGLABEL) == 0) {
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj, "DYNEXT_BEG",
&dynext_base);
if (status)
pr_err("%s: Failed to get addr for DYNEXT_BEG"
" status = 0x%x\n", __func__, status);
status =
hnode_mgr->nldr_fxns.
get_fxn_addr(pnode->nldr_node_obj,
"L1DSRAM_HEAP", &pul_value);
if (status)
pr_err("%s: Failed to get addr for L1DSRAM_HEAP"
" status = 0x%x\n", __func__, status);
host_res = pbridge_context->resources;
if (!host_res)
status = -EPERM;
if (status) {
pr_err("%s: Failed to get host resource, status"
" = 0x%x\n", __func__, status);
goto func_end;
}
ul_gpp_mem_base = (u32) host_res->mem_base[1];
off_set = pul_value - dynext_base;
ul_stack_seg_addr = ul_gpp_mem_base + off_set;
ul_stack_seg_val = readl(ul_stack_seg_addr);
dev_dbg(bridge, "%s: StackSegVal = 0x%x, StackSegAddr ="
" 0x%x\n", __func__, ul_stack_seg_val,
ul_stack_seg_addr);
pnode->create_args.asa.task_arg_obj.stack_seg =
ul_stack_seg_val;
}
}
if (!status) {
/* Add the node to the node manager's list of allocated
* nodes. */
NODE_SET_STATE(pnode, NODE_ALLOCATED);
mutex_lock(&hnode_mgr->node_mgr_lock);
list_add_tail(&pnode->list_elem, &hnode_mgr->node_list);
++(hnode_mgr->num_nodes);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
/* Preset this to assume phases are split
* (for overlay and dll) */
pnode->phase_split = true;
/* Notify all clients registered for DSP_NODESTATECHANGE. */
proc_notify_all_clients(hprocessor, DSP_NODESTATECHANGE);
} else {
/* Cleanup */
if (pnode)
delete_node(pnode, pr_ctxt);
}
if (!status) {
status = drv_insert_node_res_element(pnode, &node_res, pr_ctxt);
if (status) {
delete_node(pnode, pr_ctxt);
goto func_end;
}
*noderes = (struct node_res_object *)node_res;
drv_proc_node_update_heap_status(node_res, true);
drv_proc_node_update_status(node_res, true);
}
DBC_ENSURE((status && *noderes == NULL) || (!status && *noderes));
func_end:
dev_dbg(bridge, "%s: hprocessor: %p pNodeId: %p pargs: %p attr_in: %p "
"node_res: %p status: 0x%x\n", __func__, hprocessor,
node_uuid, pargs, attr_in, noderes, status);
return status;
}
/*
* ======== node_alloc_msg_buf ========
* Purpose:
* Allocates buffer for zero copy messaging.
*/
DBAPI node_alloc_msg_buf(struct node_object *hnode, u32 usize,
struct dsp_bufferattr *pattr,
u8 **pbuffer)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
bool va_flag = false;
bool set_info;
u32 proc_id;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pbuffer != NULL);
DBC_REQUIRE(usize > 0);
if (!pnode)
status = -EFAULT;
else if (node_get_type(pnode) == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
if (pattr == NULL)
pattr = &node_dfltbufattrs; /* set defaults */
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id != DSP_UNIT) {
DBC_ASSERT(NULL);
goto func_end;
}
/* If segment ID includes MEM_SETVIRTUALSEGID then pbuffer is a
* virt address, so set this info in this node's translator
* object for future ref. If MEM_GETVIRTUALSEGID then retrieve
* virtual address from node's translator. */
if ((pattr->segment_id & MEM_SETVIRTUALSEGID) ||
(pattr->segment_id & MEM_GETVIRTUALSEGID)) {
va_flag = true;
set_info = (pattr->segment_id & MEM_SETVIRTUALSEGID) ?
true : false;
/* Clear mask bits */
pattr->segment_id &= ~MEM_MASKVIRTUALSEGID;
/* Set/get this node's translators virtual address base/size */
status = cmm_xlator_info(pnode->xlator, pbuffer, usize,
pattr->segment_id, set_info);
}
if (!status && (!va_flag)) {
if (pattr->segment_id != 1) {
/* Node supports single SM segment only. */
status = -EBADR;
}
/* Arbitrary SM buffer alignment not supported for host side
* allocs, but guaranteed for the following alignment
* values. */
switch (pattr->buf_alignment) {
case 0:
case 1:
case 2:
case 4:
break;
default:
/* alignment value not suportted */
status = -EPERM;
break;
}
if (!status) {
/* allocate physical buffer from seg_id in node's
* translator */
(void)cmm_xlator_alloc_buf(pnode->xlator, pbuffer,
usize);
if (*pbuffer == NULL) {
pr_err("%s: error - Out of shared memory\n",
__func__);
status = -ENOMEM;
}
}
}
func_end:
return status;
}
/*
* ======== node_change_priority ========
* Purpose:
* Change the priority of a node in the allocated state, or that is
* currently running or paused on the target.
*/
int node_change_priority(struct node_object *hnode, s32 prio)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
enum node_state state;
int status = 0;
u32 proc_id;
DBC_REQUIRE(refs > 0);
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
} else {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
else if (prio < hnode_mgr->min_pri || prio > hnode_mgr->max_pri)
status = -EDOM;
}
if (status)
goto func_end;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_ALLOCATED || state == NODE_PAUSED) {
NODE_SET_PRIORITY(hnode, prio);
} else {
if (state != NODE_RUNNING) {
status = -EBADR;
goto func_cont;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
status =
disp_node_change_priority(hnode_mgr->disp_obj,
hnode,
hnode_mgr->fxn_addrs
[RMSCHANGENODEPRIORITY],
hnode->node_env, prio);
}
if (status >= 0)
NODE_SET_PRIORITY(hnode, prio);
}
func_cont:
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_connect ========
* Purpose:
* Connect two nodes on the DSP, or a node on the DSP to the GPP.
*/
int node_connect(struct node_object *node1, u32 stream1,
struct node_object *node2,
u32 stream2, struct dsp_strmattr *pattrs,
struct dsp_cbdata *conn_param)
{
struct node_mgr *hnode_mgr;
char *pstr_dev_name = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
enum dsp_strmmode strm_mode;
struct node_strmdef *pstrm_def;
struct node_strmdef *input = NULL;
struct node_strmdef *output = NULL;
struct node_object *dev_node_obj;
struct node_object *hnode;
struct stream_chnl *pstream;
u32 pipe_id;
u32 chnl_id;
s8 chnl_mode;
u32 dw_length;
int status = 0;
DBC_REQUIRE(refs > 0);
if (!node1 || !node2)
return -EFAULT;
/* The two nodes must be on the same processor */
if (node1 != (struct node_object *)DSP_HGPPNODE &&
node2 != (struct node_object *)DSP_HGPPNODE &&
node1->node_mgr != node2->node_mgr)
return -EPERM;
/* Cannot connect a node to itself */
if (node1 == node2)
return -EPERM;
/* node_get_type() will return NODE_GPP if hnode = DSP_HGPPNODE. */
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
/* Check stream indices ranges */
if ((node1_type != NODE_GPP && node1_type != NODE_DEVICE &&
stream1 >= MAX_OUTPUTS(node1)) ||
(node2_type != NODE_GPP && node2_type != NODE_DEVICE &&
stream2 >= MAX_INPUTS(node2)))
return -EINVAL;
/*
* Only the following types of connections are allowed:
* task/dais socket < == > task/dais socket
* task/dais socket < == > device
* task/dais socket < == > GPP
*
* ie, no message nodes, and at least one task or dais
* socket node.
*/
if (node1_type == NODE_MESSAGE || node2_type == NODE_MESSAGE ||
(node1_type != NODE_TASK &&
node1_type != NODE_DAISSOCKET &&
node2_type != NODE_TASK &&
node2_type != NODE_DAISSOCKET))
return -EPERM;
/*
* Check stream mode. Default is STRMMODE_PROCCOPY.
*/
if (pattrs && pattrs->strm_mode != STRMMODE_PROCCOPY)
return -EPERM; /* illegal stream mode */
if (node1_type != NODE_GPP) {
hnode_mgr = node1->node_mgr;
} else {
DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
hnode_mgr = node2->node_mgr;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Nodes must be in the allocated state */
if (node1_type != NODE_GPP &&
node_get_state(node1) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
if (node2_type != NODE_GPP &&
node_get_state(node2) != NODE_ALLOCATED) {
status = -EBADR;
goto out_unlock;
}
/*
* Check that stream indices for task and dais socket nodes
* are not already be used. (Device nodes checked later)
*/
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
output = &(node1->create_args.asa.
task_arg_obj.strm_out_def[stream1]);
if (output->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
input = &(node2->create_args.asa.
task_arg_obj.strm_in_def[stream2]);
if (input->sz_device) {
status = -EISCONN;
goto out_unlock;
}
}
/* Connecting two task nodes? */
if ((node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) &&
(node2_type == NODE_TASK ||
node2_type == NODE_DAISSOCKET)) {
/* Find available pipe */
pipe_id = find_first_zero_bit(hnode_mgr->pipe_map, MAXPIPES);
if (pipe_id == MAXPIPES) {
status = -ECONNREFUSED;
goto out_unlock;
}
set_bit(pipe_id, hnode_mgr->pipe_map);
node1->outputs[stream1].type = NODECONNECT;
node2->inputs[stream2].type = NODECONNECT;
node1->outputs[stream1].dev_id = pipe_id;
node2->inputs[stream2].dev_id = pipe_id;
output->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
input->sz_device = kzalloc(PIPENAMELEN + 1, GFP_KERNEL);
if (!output->sz_device || !input->sz_device) {
/* Undo the connection */
kfree(output->sz_device);
kfree(input->sz_device);
clear_bit(pipe_id, hnode_mgr->pipe_map);
status = -ENOMEM;
goto out_unlock;
}
/* Copy "/dbpipe<pipId>" name to device names */
sprintf(output->sz_device, "%s%d", PIPEPREFIX, pipe_id);
strcpy(input->sz_device, output->sz_device);
}
/* Connecting task node to host? */
if (node1_type == NODE_GPP || node2_type == NODE_GPP) {
pstr_dev_name = kzalloc(HOSTNAMELEN + 1, GFP_KERNEL);
if (!pstr_dev_name) {
status = -ENOMEM;
goto out_unlock;
}
DBC_ASSERT((node1_type == NODE_GPP) ||
(node2_type == NODE_GPP));
chnl_mode = (node1_type == NODE_GPP) ?
CHNL_MODETODSP : CHNL_MODEFROMDSP;
/*
* Reserve a channel id. We need to put the name "/host<id>"
* in the node's create_args, but the host
* side channel will not be opened until DSPStream_Open is
* called for this node.
*/
strm_mode = pattrs ? pattrs->strm_mode : STRMMODE_PROCCOPY;
switch (strm_mode) {
case STRMMODE_RDMA:
chnl_id = find_first_zero_bit(hnode_mgr->dma_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->dma_chnl_map);
/* dma chans are 2nd transport chnl set
* ids(e.g. 16-31) */
chnl_id = chnl_id + hnode_mgr->num_chnls;
}
break;
case STRMMODE_ZEROCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->zc_chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS) {
set_bit(chnl_id, hnode_mgr->zc_chnl_map);
/* zero-copy chans are 3nd transport set
* (e.g. 32-47) */
chnl_id = chnl_id +
(2 * hnode_mgr->num_chnls);
}
break;
case STRMMODE_PROCCOPY:
chnl_id = find_first_zero_bit(hnode_mgr->chnl_map,
CHNL_MAXCHANNELS);
if (chnl_id < CHNL_MAXCHANNELS)
set_bit(chnl_id, hnode_mgr->chnl_map);
break;
default:
status = -EINVAL;
goto out_unlock;
}
if (chnl_id == CHNL_MAXCHANNELS) {
status = -ECONNREFUSED;
goto out_unlock;
}
if (node1 == (struct node_object *)DSP_HGPPNODE) {
node2->inputs[stream2].type = HOSTCONNECT;
node2->inputs[stream2].dev_id = chnl_id;
input->sz_device = pstr_dev_name;
} else {
node1->outputs[stream1].type = HOSTCONNECT;
node1->outputs[stream1].dev_id = chnl_id;
output->sz_device = pstr_dev_name;
}
sprintf(pstr_dev_name, "%s%d", HOSTPREFIX, chnl_id);
}
/* Connecting task node to device node? */
if ((node1_type == NODE_DEVICE) || (node2_type == NODE_DEVICE)) {
if (node2_type == NODE_DEVICE) {
/* node1 == > device */
dev_node_obj = node2;
hnode = node1;
pstream = &(node1->outputs[stream1]);
pstrm_def = output;
} else {
/* device == > node2 */
dev_node_obj = node1;
hnode = node2;
pstream = &(node2->inputs[stream2]);
pstrm_def = input;
}
/* Set up create args */
pstream->type = DEVICECONNECT;
dw_length = strlen(dev_node_obj->str_dev_name);
if (conn_param)
pstrm_def->sz_device = kzalloc(dw_length + 1 +
conn_param->cb_data,
GFP_KERNEL);
else
pstrm_def->sz_device = kzalloc(dw_length + 1,
GFP_KERNEL);
if (!pstrm_def->sz_device) {
status = -ENOMEM;
goto out_unlock;
}
/* Copy device name */
strncpy(pstrm_def->sz_device,
dev_node_obj->str_dev_name, dw_length);
if (conn_param)
strncat(pstrm_def->sz_device,
(char *)conn_param->node_data,
(u32) conn_param->cb_data);
dev_node_obj->device_owner = hnode;
}
/* Fill in create args */
if (node1_type == NODE_TASK || node1_type == NODE_DAISSOCKET) {
node1->create_args.asa.task_arg_obj.num_outputs++;
fill_stream_def(node1, output, pattrs);
}
if (node2_type == NODE_TASK || node2_type == NODE_DAISSOCKET) {
node2->create_args.asa.task_arg_obj.num_inputs++;
fill_stream_def(node2, input, pattrs);
}
/* Update node1 and node2 stream_connect */
if (node1_type != NODE_GPP && node1_type != NODE_DEVICE) {
node1->num_outputs++;
if (stream1 > node1->max_output_index)
node1->max_output_index = stream1;
}
if (node2_type != NODE_GPP && node2_type != NODE_DEVICE) {
node2->num_inputs++;
if (stream2 > node2->max_input_index)
node2->max_input_index = stream2;
}
fill_stream_connect(node1, node2, stream1, stream2);
/* end of sync_enter_cs */
/* Exit critical section */
out_unlock:
if (status && pstr_dev_name)
kfree(pstr_dev_name);
mutex_unlock(&hnode_mgr->node_mgr_lock);
dev_dbg(bridge, "%s: node1: %p stream1: %d node2: %p stream2: %d"
"pattrs: %p status: 0x%x\n", __func__, node1,
stream1, node2, stream2, pattrs, status);
return status;
}
/*
* ======== node_create ========
* Purpose:
* Create a node on the DSP by remotely calling the node's create function.
*/
int node_create(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 ul_create_fxn;
enum node_type node_type;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id = 255;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
struct dspbridge_platform_data *pdata =
omap_dspbridge_dev->dev.platform_data;
#endif
DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to create
new node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR calls */
cb_data.cb_data = PWR_TIMEOUT;
node_type = node_get_type(hnode);
hnode_mgr = hnode->node_mgr;
intf_fxns = hnode_mgr->intf_fxns;
/* Get access to node dispatcher */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Check node state */
if (node_get_state(hnode) != NODE_ALLOCATED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont2;
if (proc_id != DSP_UNIT)
goto func_cont2;
/* Make sure streams are properly connected */
if ((hnode->num_inputs && hnode->max_input_index >
hnode->num_inputs - 1) ||
(hnode->num_outputs && hnode->max_output_index >
hnode->num_outputs - 1))
status = -ENOTCONN;
if (!status) {
/* If node's create function is not loaded, load it */
/* Boost the OPP level to max level that DSP can be requested */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP3]);
#endif
status = hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_CREATE);
/* Get address of node's create function */
if (!status) {
hnode->loaded = true;
if (node_type != NODE_DEVICE) {
status = get_fxn_address(hnode, &ul_create_fxn,
CREATEPHASE);
}
} else {
pr_err("%s: failed to load create code: 0x%x\n",
__func__, status);
}
/* Request the lowest OPP level */
#if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ)
if (pdata->cpu_set_freq)
(*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]);
#endif
/* Get address of iAlg functions, if socket node */
if (!status) {
if (node_type == NODE_DAISSOCKET) {
status = hnode_mgr->nldr_fxns.get_fxn_addr
(hnode->nldr_node_obj,
hnode->dcd_props.obj_data.node_obj.
str_i_alg_name,
&hnode->create_args.asa.
task_arg_obj.dais_arg);
}
}
}
if (!status) {
if (node_type != NODE_DEVICE) {
status = disp_node_create(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs
[RMSCREATENODE],
ul_create_fxn,
&(hnode->create_args),
&(hnode->node_env));
if (status >= 0) {
/* Set the message queue id to the node env
* pointer */
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_set_queue_id) (hnode->
msg_queue_obj,
hnode->node_env);
}
}
}
/* Phase II/Overlays: Create, execute, delete phases possibly in
* different files/sections. */
if (hnode->loaded && hnode->phase_split) {
/* If create code was dynamically loaded, we can now unload
* it. */
status1 = hnode_mgr->nldr_fxns.unload(hnode->nldr_node_obj,
NLDR_CREATE);
hnode->loaded = false;
}
if (status1)
pr_err("%s: Failed to unload create code: 0x%x\n",
__func__, status1);
func_cont2:
/* Update node state and node manager state */
if (status >= 0) {
NODE_SET_STATE(hnode, NODE_CREATED);
hnode_mgr->num_created++;
goto func_cont;
}
if (status != -EBADR) {
/* Put back in NODE_ALLOCATED state if error occurred */
NODE_SET_STATE(hnode, NODE_ALLOCATED);
}
func_cont:
/* Free access to node dispatcher */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
dev_dbg(bridge, "%s: hnode: %p status: 0x%x\n", __func__,
hnode, status);
return status;
}
/*
* ======== node_create_mgr ========
* Purpose:
* Create a NODE Manager object.
*/
int node_create_mgr(struct node_mgr **node_man,
struct dev_object *hdev_obj)
{
u32 i;
struct node_mgr *node_mgr_obj = NULL;
struct disp_attr disp_attr_obj;
char *sz_zl_file = "";
struct nldr_attrs nldr_attrs_obj;
int status = 0;
u8 dev_type;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(node_man != NULL);
DBC_REQUIRE(hdev_obj != NULL);
*node_man = NULL;
/* Allocate Node manager object */
node_mgr_obj = kzalloc(sizeof(struct node_mgr), GFP_KERNEL);
if (!node_mgr_obj)
return -ENOMEM;
node_mgr_obj->dev_obj = hdev_obj;
node_mgr_obj->ntfy_obj = kmalloc(sizeof(struct ntfy_object),
GFP_KERNEL);
if (!node_mgr_obj->ntfy_obj) {
status = -ENOMEM;
goto out_err;
}
ntfy_init(node_mgr_obj->ntfy_obj);
INIT_LIST_HEAD(&node_mgr_obj->node_list);
dev_get_dev_type(hdev_obj, &dev_type);
status = dcd_create_manager(sz_zl_file, &node_mgr_obj->dcd_mgr);
if (status)
goto out_err;
status = get_proc_props(node_mgr_obj, hdev_obj);
if (status)
goto out_err;
/* Create NODE Dispatcher */
disp_attr_obj.chnl_offset = node_mgr_obj->chnl_offset;
disp_attr_obj.chnl_buf_size = node_mgr_obj->chnl_buf_size;
disp_attr_obj.proc_family = node_mgr_obj->proc_family;
disp_attr_obj.proc_type = node_mgr_obj->proc_type;
status = disp_create(&node_mgr_obj->disp_obj, hdev_obj, &disp_attr_obj);
if (status)
goto out_err;
/* Create a STRM Manager */
status = strm_create(&node_mgr_obj->strm_mgr_obj, hdev_obj);
if (status)
goto out_err;
dev_get_intf_fxns(hdev_obj, &node_mgr_obj->intf_fxns);
/* Get msg_ctrl queue manager */
dev_get_msg_mgr(hdev_obj, &node_mgr_obj->msg_mgr_obj);
mutex_init(&node_mgr_obj->node_mgr_lock);
/* Block out reserved channels */
for (i = 0; i < node_mgr_obj->chnl_offset; i++)
set_bit(i, node_mgr_obj->chnl_map);
/* Block out channels reserved for RMS */
set_bit(node_mgr_obj->chnl_offset, node_mgr_obj->chnl_map);
set_bit(node_mgr_obj->chnl_offset + 1, node_mgr_obj->chnl_map);
/* NO RM Server on the IVA */
if (dev_type != IVA_UNIT) {
/* Get addresses of any RMS functions loaded */
status = get_rms_fxns(node_mgr_obj);
if (status)
goto out_err;
}
/* Get loader functions and create loader */
node_mgr_obj->nldr_fxns = nldr_fxns; /* Dyn loader funcs */
nldr_attrs_obj.ovly = ovly;
nldr_attrs_obj.write = mem_write;
nldr_attrs_obj.dsp_word_size = node_mgr_obj->dsp_word_size;
nldr_attrs_obj.dsp_mau_size = node_mgr_obj->dsp_mau_size;
node_mgr_obj->loader_init = node_mgr_obj->nldr_fxns.init();
status = node_mgr_obj->nldr_fxns.create(&node_mgr_obj->nldr_obj,
hdev_obj,
&nldr_attrs_obj);
if (status)
goto out_err;
*node_man = node_mgr_obj;
DBC_ENSURE((status && *node_man == NULL) || (!status && *node_man));
return status;
out_err:
delete_node_mgr(node_mgr_obj);
return status;
}
/*
* ======== node_delete ========
* Purpose:
* Delete a node on the DSP by remotely calling the node's delete function.
* Loads the node's delete function if necessary. Free GPP side resources
* after node's delete function returns.
*/
int node_delete(struct node_res_object *noderes,
struct process_context *pr_ctxt)
{
struct node_object *pnode = noderes->node;
struct node_mgr *hnode_mgr;
struct proc_object *hprocessor;
struct disp_object *disp_obj;
u32 ul_delete_fxn;
enum node_type node_type;
enum node_state state;
int status = 0;
int status1 = 0;
struct dsp_cbdata cb_data;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
void *node_res = noderes;
struct dsp_processorstate proc_state;
DBC_REQUIRE(refs > 0);
if (!pnode) {
status = -EFAULT;
goto func_end;
}
/* create struct dsp_cbdata struct for PWR call */
cb_data.cb_data = PWR_TIMEOUT;
hnode_mgr = pnode->node_mgr;
hprocessor = pnode->processor;
disp_obj = hnode_mgr->disp_obj;
node_type = node_get_type(pnode);
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(pnode);
/* Execute delete phase code for non-device node in all cases
* except when the node was only allocated. Delete phase must be
* executed even if create phase was executed, but failed.
* If the node environment pointer is non-NULL, the delete phase
* code must be executed. */
if (!(state == NODE_ALLOCATED && pnode->node_env == (u32) NULL) &&
node_type != NODE_DEVICE) {
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if (proc_id == DSP_UNIT || proc_id == IVA_UNIT) {
/* If node has terminated, execute phase code will
* have already been unloaded in node_on_exit(). If the
* node is PAUSED, the execute phase is loaded, and it
* is now ok to unload it. If the node is running, we
* will unload the execute phase only after deleting
* the node. */
if (state == NODE_PAUSED && pnode->loaded &&
pnode->phase_split) {
/* Ok to unload execute code as long as node
* is not * running */
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
pnode->loaded = false;
NODE_SET_STATE(pnode, NODE_DONE);
}
/* Load delete phase code if not loaded or if haven't
* * unloaded EXECUTE phase */
if ((!(pnode->loaded) || (state == NODE_RUNNING)) &&
pnode->phase_split) {
status =
hnode_mgr->nldr_fxns.
load(pnode->nldr_node_obj, NLDR_DELETE);
if (!status)
pnode->loaded = true;
else
pr_err("%s: fail - load delete code:"
" 0x%x\n", __func__, status);
}
}
func_cont1:
if (!status) {
/* Unblock a thread trying to terminate the node */
(void)sync_set_event(pnode->sync_done);
if (proc_id == DSP_UNIT) {
/* ul_delete_fxn = address of node's delete
* function */
status = get_fxn_address(pnode, &ul_delete_fxn,
DELETEPHASE);
} else if (proc_id == IVA_UNIT)
ul_delete_fxn = (u32) pnode->node_env;
if (!status) {
status = proc_get_state(hprocessor,
&proc_state,
sizeof(struct
dsp_processorstate));
if (proc_state.proc_state != PROC_ERROR) {
status =
disp_node_delete(disp_obj, pnode,
hnode_mgr->
fxn_addrs
[RMSDELETENODE],
ul_delete_fxn,
pnode->node_env);
} else
NODE_SET_STATE(pnode, NODE_DONE);
/* Unload execute, if not unloaded, and delete
* function */
if (state == NODE_RUNNING &&
pnode->phase_split) {
status1 =
hnode_mgr->nldr_fxns.
unload(pnode->nldr_node_obj,
NLDR_EXECUTE);
}
if (status1)
pr_err("%s: fail - unload execute code:"
" 0x%x\n", __func__, status1);
status1 =
hnode_mgr->nldr_fxns.unload(pnode->
nldr_node_obj,
NLDR_DELETE);
pnode->loaded = false;
if (status1)
pr_err("%s: fail - unload delete code: "
"0x%x\n", __func__, status1);
}
}
}
/* Free host side resources even if a failure occurred */
/* Remove node from hnode_mgr->node_list */
list_del(&pnode->list_elem);
hnode_mgr->num_nodes--;
/* Decrement count of nodes created on DSP */
if ((state != NODE_ALLOCATED) || ((state == NODE_ALLOCATED) &&
(pnode->node_env != (u32) NULL)))
hnode_mgr->num_created--;
/* Free host-side resources allocated by node_create()
* delete_node() fails if SM buffers not freed by client! */
drv_proc_node_update_status(node_res, false);
delete_node(pnode, pr_ctxt);
/*
* Release all Node resources and its context
*/
idr_remove(pr_ctxt->node_id, ((struct node_res_object *)node_res)->id);
kfree(node_res);
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
proc_notify_clients(hprocessor, DSP_NODESTATECHANGE);
func_end:
dev_dbg(bridge, "%s: pnode: %p status 0x%x\n", __func__, pnode, status);
return status;
}
/*
* ======== node_delete_mgr ========
* Purpose:
* Delete the NODE Manager.
*/
int node_delete_mgr(struct node_mgr *hnode_mgr)
{
DBC_REQUIRE(refs > 0);
if (!hnode_mgr)
return -EFAULT;
delete_node_mgr(hnode_mgr);
return 0;
}
/*
* ======== node_enum_nodes ========
* Purpose:
* Enumerate currently allocated nodes.
*/
int node_enum_nodes(struct node_mgr *hnode_mgr, void **node_tab,
u32 node_tab_size, u32 *pu_num_nodes,
u32 *pu_allocated)
{
struct node_object *hnode;
u32 i = 0;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(node_tab != NULL || node_tab_size == 0);
DBC_REQUIRE(pu_num_nodes != NULL);
DBC_REQUIRE(pu_allocated != NULL);
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->num_nodes > node_tab_size) {
*pu_allocated = hnode_mgr->num_nodes;
*pu_num_nodes = 0;
status = -EINVAL;
} else {
list_for_each_entry(hnode, &hnode_mgr->node_list, list_elem)
node_tab[i++] = hnode;
*pu_allocated = *pu_num_nodes = hnode_mgr->num_nodes;
}
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== node_exit ========
* Purpose:
* Discontinue usage of NODE module.
*/
void node_exit(void)
{
DBC_REQUIRE(refs > 0);
refs--;
DBC_ENSURE(refs >= 0);
}
/*
* ======== node_free_msg_buf ========
* Purpose:
* Frees the message buffer.
*/
int node_free_msg_buf(struct node_object *hnode, u8 * pbuffer,
struct dsp_bufferattr *pattr)
{
struct node_object *pnode = (struct node_object *)hnode;
int status = 0;
u32 proc_id;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pbuffer != NULL);
DBC_REQUIRE(pnode != NULL);
DBC_REQUIRE(pnode->xlator != NULL);
if (!hnode) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == DSP_UNIT) {
if (!status) {
if (pattr == NULL) {
/* set defaults */
pattr = &node_dfltbufattrs;
}
/* Node supports single SM segment only */
if (pattr->segment_id != 1)
status = -EBADR;
/* pbuffer is clients Va. */
status = cmm_xlator_free_buf(pnode->xlator, pbuffer);
}
} else {
DBC_ASSERT(NULL); /* BUG */
}
func_end:
return status;
}
/*
* ======== node_get_attr ========
* Purpose:
* Copy the current attributes of the specified node into a dsp_nodeattr
* structure.
*/
int node_get_attr(struct node_object *hnode,
struct dsp_nodeattr *pattr, u32 attr_size)
{
struct node_mgr *hnode_mgr;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pattr != NULL);
DBC_REQUIRE(attr_size >= sizeof(struct dsp_nodeattr));
if (!hnode)
return -EFAULT;
hnode_mgr = hnode->node_mgr;
/* Enter hnode_mgr critical section (since we're accessing
* data that could be changed by node_change_priority() and
* node_connect(). */
mutex_lock(&hnode_mgr->node_mgr_lock);
pattr->cb_struct = sizeof(struct dsp_nodeattr);
/* dsp_nodeattrin */
pattr->in_node_attr_in.cb_struct =
sizeof(struct dsp_nodeattrin);
pattr->in_node_attr_in.prio = hnode->prio;
pattr->in_node_attr_in.timeout = hnode->timeout;
pattr->in_node_attr_in.heap_size =
hnode->create_args.asa.task_arg_obj.heap_size;
pattr->in_node_attr_in.pgpp_virt_addr = (void *)
hnode->create_args.asa.task_arg_obj.gpp_heap_addr;
pattr->node_attr_inputs = hnode->num_gpp_inputs;
pattr->node_attr_outputs = hnode->num_gpp_outputs;
/* dsp_nodeinfo */
get_node_info(hnode, &(pattr->node_info));
/* end of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
return 0;
}
/*
* ======== node_get_channel_id ========
* Purpose:
* Get the channel index reserved for a stream connection between the
* host and a node.
*/
int node_get_channel_id(struct node_object *hnode, u32 dir, u32 index,
u32 *chan_id)
{
enum node_type node_type;
int status = -EINVAL;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(dir == DSP_TONODE || dir == DSP_FROMNODE);
DBC_REQUIRE(chan_id != NULL);
if (!hnode) {
status = -EFAULT;
return status;
}
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET) {
status = -EPERM;
return status;
}
if (dir == DSP_TONODE) {
if (index < MAX_INPUTS(hnode)) {
if (hnode->inputs[index].type == HOSTCONNECT) {
*chan_id = hnode->inputs[index].dev_id;
status = 0;
}
}
} else {
DBC_ASSERT(dir == DSP_FROMNODE);
if (index < MAX_OUTPUTS(hnode)) {
if (hnode->outputs[index].type == HOSTCONNECT) {
*chan_id = hnode->outputs[index].dev_id;
status = 0;
}
}
}
return status;
}
/*
* ======== node_get_message ========
* Purpose:
* Retrieve a message from a node on the DSP.
*/
int node_get_message(struct node_object *hnode,
struct dsp_msg *message, u32 utimeout)
{
struct node_mgr *hnode_mgr;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
int status = 0;
void *tmp_buf;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(message != NULL);
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to get the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET) {
status = -EPERM;
goto func_end;
}
/* This function will block unless a message is available. Since
* DSPNode_RegisterNotify() allows notification when a message
* is available, the system can be designed so that
* DSPNode_GetMessage() is only called when a message is
* available. */
intf_fxns = hnode_mgr->intf_fxns;
status =
(*intf_fxns->msg_get) (hnode->msg_queue_obj, message, utimeout);
/* Check if message contains SM descriptor */
if (status || !(message->cmd & DSP_RMSBUFDESC))
goto func_end;
/* Translate DSP byte addr to GPP Va. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)(message->arg1 *
hnode->node_mgr->
dsp_word_size), CMM_DSPPA2PA);
if (tmp_buf != NULL) {
/* now convert this GPP Pa to Va */
tmp_buf = cmm_xlator_translate(hnode->xlator, tmp_buf,
CMM_PA2VA);
if (tmp_buf != NULL) {
/* Adjust SM size in msg */
message->arg1 = (u32) tmp_buf;
message->arg2 *= hnode->node_mgr->dsp_word_size;
} else {
status = -ESRCH;
}
} else {
status = -ESRCH;
}
func_end:
dev_dbg(bridge, "%s: hnode: %p message: %p utimeout: 0x%x\n", __func__,
hnode, message, utimeout);
return status;
}
/*
* ======== node_get_nldr_obj ========
*/
int node_get_nldr_obj(struct node_mgr *hnode_mgr,
struct nldr_object **nldr_ovlyobj)
{
int status = 0;
struct node_mgr *node_mgr_obj = hnode_mgr;
DBC_REQUIRE(nldr_ovlyobj != NULL);
if (!hnode_mgr)
status = -EFAULT;
else
*nldr_ovlyobj = node_mgr_obj->nldr_obj;
DBC_ENSURE(!status || (nldr_ovlyobj != NULL && *nldr_ovlyobj == NULL));
return status;
}
/*
* ======== node_get_strm_mgr ========
* Purpose:
* Returns the Stream manager.
*/
int node_get_strm_mgr(struct node_object *hnode,
struct strm_mgr **strm_man)
{
int status = 0;
DBC_REQUIRE(refs > 0);
if (!hnode)
status = -EFAULT;
else
*strm_man = hnode->node_mgr->strm_mgr_obj;
return status;
}
/*
* ======== node_get_load_type ========
*/
enum nldr_loadtype node_get_load_type(struct node_object *hnode)
{
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: Failed. hnode: %p\n", __func__, hnode);
return -1;
} else {
return hnode->dcd_props.obj_data.node_obj.load_type;
}
}
/*
* ======== node_get_timeout ========
* Purpose:
* Returns the timeout value for this node.
*/
u32 node_get_timeout(struct node_object *hnode)
{
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hnode);
if (!hnode) {
dev_dbg(bridge, "%s: failed. hnode: %p\n", __func__, hnode);
return 0;
} else {
return hnode->timeout;
}
}
/*
* ======== node_get_type ========
* Purpose:
* Returns the node type.
*/
enum node_type node_get_type(struct node_object *hnode)
{
enum node_type node_type;
if (hnode == (struct node_object *)DSP_HGPPNODE)
node_type = NODE_GPP;
else {
if (!hnode)
node_type = -1;
else
node_type = hnode->ntype;
}
return node_type;
}
/*
* ======== node_init ========
* Purpose:
* Initialize the NODE module.
*/
bool node_init(void)
{
DBC_REQUIRE(refs >= 0);
refs++;
return true;
}
/*
* ======== node_on_exit ========
* Purpose:
* Gets called when RMS_EXIT is received for a node.
*/
void node_on_exit(struct node_object *hnode, s32 node_status)
{
if (!hnode)
return;
/* Set node state to done */
NODE_SET_STATE(hnode, NODE_DONE);
hnode->exit_status = node_status;
if (hnode->loaded && hnode->phase_split) {
(void)hnode->node_mgr->nldr_fxns.unload(hnode->
nldr_node_obj,
NLDR_EXECUTE);
hnode->loaded = false;
}
/* Unblock call to node_terminate */
(void)sync_set_event(hnode->sync_done);
/* Notify clients */
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
/*
* ======== node_pause ========
* Purpose:
* Suspend execution of a node currently running on the DSP.
*/
int node_pause(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
enum node_type node_type;
enum node_state state;
struct node_mgr *hnode_mgr;
int status = 0;
u32 proc_id;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
DBC_REQUIRE(refs > 0);
if (!hnode) {
status = -EFAULT;
} else {
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (status)
goto func_end;
status = proc_get_processor_id(pnode->processor, &proc_id);
if (proc_id == IVA_UNIT)
status = -ENOSYS;
if (!status) {
hnode_mgr = hnode->node_mgr;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
/* Check node state */
if (state != NODE_RUNNING)
status = -EBADR;
if (status)
goto func_cont;
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY],
hnode->node_env, NODE_SUSPENDEDPRI);
/* Update state */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_PAUSED);
func_cont:
/* End of sync_enter_cs */
/* Leave critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor,
DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_put_message ========
* Purpose:
* Send a message to a message node, task node, or XDAIS socket node. This
* function will block until the message stream can accommodate the
* message, or a timeout occurs.
*/
int node_put_message(struct node_object *hnode,
const struct dsp_msg *pmsg, u32 utimeout)
{
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
int status = 0;
void *tmp_buf;
struct dsp_msg new_msg;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pmsg != NULL);
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in bad state then don't attempt sending the
message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_MESSAGE && node_type != NODE_TASK &&
node_type != NODE_DAISSOCKET)
status = -EPERM;
if (!status) {
/* Check node state. Can't send messages to a node after
* we've sent the RMS_EXIT command. There is still the
* possibility that node_terminate can be called after we've
* checked the state. Could add another SYNC object to
* prevent this (can't use node_mgr_lock, since we don't
* want to block other NODE functions). However, the node may
* still exit on its own, before this message is sent. */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state == NODE_TERMINATING || state == NODE_DONE)
status = -EBADR;
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (status)
goto func_end;
/* assign pmsg values to new msg */
new_msg = *pmsg;
/* Now, check if message contains a SM buffer descriptor */
if (pmsg->cmd & DSP_RMSBUFDESC) {
/* Translate GPP Va to DSP physical buf Ptr. */
tmp_buf = cmm_xlator_translate(hnode->xlator,
(void *)new_msg.arg1,
CMM_VA2DSPPA);
if (tmp_buf != NULL) {
/* got translation, convert to MAUs in msg */
if (hnode->node_mgr->dsp_word_size != 0) {
new_msg.arg1 =
(u32) tmp_buf /
hnode->node_mgr->dsp_word_size;
/* MAUs */
new_msg.arg2 /= hnode->node_mgr->
dsp_word_size;
} else {
pr_err("%s: dsp_word_size is zero!\n",
__func__);
status = -EPERM; /* bad DSPWordSize */
}
} else { /* failed to translate buffer address */
status = -ESRCH;
}
}
if (!status) {
intf_fxns = hnode_mgr->intf_fxns;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj,
&new_msg, utimeout);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p pmsg: %p utimeout: 0x%x, "
"status 0x%x\n", __func__, hnode, pmsg, utimeout, status);
return status;
}
/*
* ======== node_register_notify ========
* Purpose:
* Register to be notified on specific events for this node.
*/
int node_register_notify(struct node_object *hnode, u32 event_mask,
u32 notify_type,
struct dsp_notification *hnotification)
{
struct bridge_drv_interface *intf_fxns;
int status = 0;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hnotification != NULL);
if (!hnode) {
status = -EFAULT;
} else {
/* Check if event mask is a valid node related event */
if (event_mask & ~(DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
/* Check if notify type is valid */
if (notify_type != DSP_SIGNALEVENT)
status = -EINVAL;
/* Only one Notification can be registered at a
* time - Limitation */
if (event_mask == (DSP_NODESTATECHANGE | DSP_NODEMESSAGEREADY))
status = -EINVAL;
}
if (!status) {
if (event_mask == DSP_NODESTATECHANGE) {
status = ntfy_register(hnode->ntfy_obj, hnotification,
event_mask & DSP_NODESTATECHANGE,
notify_type);
} else {
/* Send Message part of event mask to msg_ctrl */
intf_fxns = hnode->node_mgr->intf_fxns;
status = (*intf_fxns->msg_register_notify)
(hnode->msg_queue_obj,
event_mask & DSP_NODEMESSAGEREADY, notify_type,
hnotification);
}
}
dev_dbg(bridge, "%s: hnode: %p event_mask: 0x%x notify_type: 0x%x "
"hnotification: %p status 0x%x\n", __func__, hnode,
event_mask, notify_type, hnotification, status);
return status;
}
/*
* ======== node_run ========
* Purpose:
* Start execution of a node's execute phase, or resume execution of a node
* that has been suspended (via NODE_NodePause()) on the DSP. Load the
* node's execute function if necessary.
*/
int node_run(struct node_object *hnode)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr;
enum node_type node_type;
enum node_state state;
u32 ul_execute_fxn;
u32 ul_fxn_addr;
int status = 0;
u32 proc_id;
struct bridge_drv_interface *intf_fxns;
struct dsp_processorstate proc_state;
struct proc_object *hprocessor;
DBC_REQUIRE(refs > 0);
if (!hnode) {
status = -EFAULT;
goto func_end;
}
hprocessor = hnode->processor;
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt to run the node */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
node_type = node_get_type(hnode);
if (node_type == NODE_DEVICE)
status = -EPERM;
if (status)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr) {
status = -EFAULT;
goto func_end;
}
intf_fxns = hnode_mgr->intf_fxns;
/* Enter critical section */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_CREATED && state != NODE_PAUSED)
status = -EBADR;
if (!status)
status = proc_get_processor_id(pnode->processor, &proc_id);
if (status)
goto func_cont1;
if ((proc_id != DSP_UNIT) && (proc_id != IVA_UNIT))
goto func_cont1;
if (state == NODE_CREATED) {
/* If node's execute function is not loaded, load it */
if (!(hnode->loaded) && hnode->phase_split) {
status =
hnode_mgr->nldr_fxns.load(hnode->nldr_node_obj,
NLDR_EXECUTE);
if (!status) {
hnode->loaded = true;
} else {
pr_err("%s: fail - load execute code: 0x%x\n",
__func__, status);
}
}
if (!status) {
/* Get address of node's execute function */
if (proc_id == IVA_UNIT)
ul_execute_fxn = (u32) hnode->node_env;
else {
status = get_fxn_address(hnode, &ul_execute_fxn,
EXECUTEPHASE);
}
}
if (!status) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSEXECUTENODE];
status =
disp_node_run(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, ul_execute_fxn,
hnode->node_env);
}
} else if (state == NODE_PAUSED) {
ul_fxn_addr = hnode_mgr->fxn_addrs[RMSCHANGENODEPRIORITY];
status = disp_node_change_priority(hnode_mgr->disp_obj, hnode,
ul_fxn_addr, hnode->node_env,
NODE_GET_PRIORITY(hnode));
} else {
/* We should never get here */
DBC_ASSERT(false);
}
func_cont1:
/* Update node state. */
if (status >= 0)
NODE_SET_STATE(hnode, NODE_RUNNING);
else /* Set state back to previous value */
NODE_SET_STATE(hnode, state);
/*End of sync_enter_cs */
/* Exit critical section */
mutex_unlock(&hnode_mgr->node_mgr_lock);
if (status >= 0) {
proc_notify_clients(hnode->processor, DSP_NODESTATECHANGE);
ntfy_notify(hnode->ntfy_obj, DSP_NODESTATECHANGE);
}
func_end:
dev_dbg(bridge, "%s: hnode: %p status 0x%x\n", __func__, hnode, status);
return status;
}
/*
* ======== node_terminate ========
* Purpose:
* Signal a node running on the DSP that it should exit its execute phase
* function.
*/
int node_terminate(struct node_object *hnode, int *pstatus)
{
struct node_object *pnode = (struct node_object *)hnode;
struct node_mgr *hnode_mgr = NULL;
enum node_type node_type;
struct bridge_drv_interface *intf_fxns;
enum node_state state;
struct dsp_msg msg, killmsg;
int status = 0;
u32 proc_id, kill_time_out;
struct deh_mgr *hdeh_mgr;
struct dsp_processorstate proc_state;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(pstatus != NULL);
if (!hnode || !hnode->node_mgr) {
status = -EFAULT;
goto func_end;
}
if (pnode->processor == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_processor_id(pnode->processor, &proc_id);
if (!status) {
hnode_mgr = hnode->node_mgr;
node_type = node_get_type(hnode);
if (node_type != NODE_TASK && node_type != NODE_DAISSOCKET)
status = -EPERM;
}
if (!status) {
/* Check node state */
mutex_lock(&hnode_mgr->node_mgr_lock);
state = node_get_state(hnode);
if (state != NODE_RUNNING) {
status = -EBADR;
/* Set the exit status if node terminated on
* its own. */
if (state == NODE_DONE)
*pstatus = hnode->exit_status;
} else {
NODE_SET_STATE(hnode, NODE_TERMINATING);
}
/* end of sync_enter_cs */
mutex_unlock(&hnode_mgr->node_mgr_lock);
}
if (!status) {
/*
* Send exit message. Do not change state to NODE_DONE
* here. That will be done in callback.
*/
status = proc_get_state(pnode->processor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_cont;
/* If processor is in error state then don't attempt to send
* A kill task command */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_cont;
}
msg.cmd = RMS_EXIT;
msg.arg1 = hnode->node_env;
killmsg.cmd = RMS_KILLTASK;
killmsg.arg1 = hnode->node_env;
intf_fxns = hnode_mgr->intf_fxns;
if (hnode->timeout > MAXTIMEOUT)
kill_time_out = MAXTIMEOUT;
else
kill_time_out = (hnode->timeout) * 2;
status = (*intf_fxns->msg_put) (hnode->msg_queue_obj, &msg,
hnode->timeout);
if (status)
goto func_cont;
/*
* Wait on synchronization object that will be
* posted in the callback on receiving RMS_EXIT
* message, or by node_delete. Check for valid hnode,
* in case posted by node_delete().
*/
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status != ETIME)
goto func_cont;
status = (*intf_fxns->msg_put)(hnode->msg_queue_obj,
&killmsg, hnode->timeout);
if (status)
goto func_cont;
status = sync_wait_on_event(hnode->sync_done,
kill_time_out / 2);
if (status) {
/*
* Here it goes the part of the simulation of
* the DSP exception.
*/
dev_get_deh_mgr(hnode_mgr->dev_obj, &hdeh_mgr);
if (!hdeh_mgr)
goto func_cont;
bridge_deh_notify(hdeh_mgr, DSP_SYSERROR, DSP_EXCEPTIONABORT);
}
}
func_cont:
if (!status) {
/* Enter CS before getting exit status, in case node was
* deleted. */
mutex_lock(&hnode_mgr->node_mgr_lock);
/* Make sure node wasn't deleted while we blocked */
if (!hnode) {
status = -EPERM;
} else {
*pstatus = hnode->exit_status;
dev_dbg(bridge, "%s: hnode: %p env 0x%x status 0x%x\n",
__func__, hnode, hnode->node_env, status);
}
mutex_unlock(&hnode_mgr->node_mgr_lock);
} /*End of sync_enter_cs */
func_end:
return status;
}
/*
* ======== delete_node ========
* Purpose:
* Free GPP resources allocated in node_allocate() or node_connect().
*/
static void delete_node(struct node_object *hnode,
struct process_context *pr_ctxt)
{
struct node_mgr *hnode_mgr;
struct bridge_drv_interface *intf_fxns;
u32 i;
enum node_type node_type;
struct stream_chnl stream;
struct node_msgargs node_msg_args;
struct node_taskargs task_arg_obj;
#ifdef DSP_DMM_DEBUG
struct dmm_object *dmm_mgr;
struct proc_object *p_proc_object =
(struct proc_object *)hnode->processor;
#endif
int status;
if (!hnode)
goto func_end;
hnode_mgr = hnode->node_mgr;
if (!hnode_mgr)
goto func_end;
node_type = node_get_type(hnode);
if (node_type != NODE_DEVICE) {
node_msg_args = hnode->create_args.asa.node_msg_args;
kfree(node_msg_args.pdata);
/* Free msg_ctrl queue */
if (hnode->msg_queue_obj) {
intf_fxns = hnode_mgr->intf_fxns;
(*intf_fxns->msg_delete_queue) (hnode->
msg_queue_obj);
hnode->msg_queue_obj = NULL;
}
kfree(hnode->sync_done);
/* Free all stream info */
if (hnode->inputs) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
stream = hnode->inputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->inputs);
hnode->inputs = NULL;
}
if (hnode->outputs) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
stream = hnode->outputs[i];
free_stream(hnode_mgr, stream);
}
kfree(hnode->outputs);
hnode->outputs = NULL;
}
task_arg_obj = hnode->create_args.asa.task_arg_obj;
if (task_arg_obj.strm_in_def) {
for (i = 0; i < MAX_INPUTS(hnode); i++) {
kfree(task_arg_obj.strm_in_def[i].sz_device);
task_arg_obj.strm_in_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_in_def);
task_arg_obj.strm_in_def = NULL;
}
if (task_arg_obj.strm_out_def) {
for (i = 0; i < MAX_OUTPUTS(hnode); i++) {
kfree(task_arg_obj.strm_out_def[i].sz_device);
task_arg_obj.strm_out_def[i].sz_device = NULL;
}
kfree(task_arg_obj.strm_out_def);
task_arg_obj.strm_out_def = NULL;
}
if (task_arg_obj.dsp_heap_res_addr) {
status = proc_un_map(hnode->processor, (void *)
task_arg_obj.dsp_heap_addr,
pr_ctxt);
status = proc_un_reserve_memory(hnode->processor,
(void *)
task_arg_obj.
dsp_heap_res_addr,
pr_ctxt);
#ifdef DSP_DMM_DEBUG
status = dmm_get_handle(p_proc_object, &dmm_mgr);
if (dmm_mgr)
dmm_mem_map_dump(dmm_mgr);
else
status = DSP_EHANDLE;
#endif
}
}
if (node_type != NODE_MESSAGE) {
kfree(hnode->stream_connect);
hnode->stream_connect = NULL;
}
kfree(hnode->str_dev_name);
hnode->str_dev_name = NULL;
if (hnode->ntfy_obj) {
ntfy_delete(hnode->ntfy_obj);
kfree(hnode->ntfy_obj);
hnode->ntfy_obj = NULL;
}
/* These were allocated in dcd_get_object_def (via node_allocate) */
kfree(hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn);
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn = NULL;
kfree(hnode->dcd_props.obj_data.node_obj.str_i_alg_name);
hnode->dcd_props.obj_data.node_obj.str_i_alg_name = NULL;
/* Free all SM address translator resources */
kfree(hnode->xlator);
kfree(hnode->nldr_node_obj);
hnode->nldr_node_obj = NULL;
hnode->node_mgr = NULL;
kfree(hnode);
hnode = NULL;
func_end:
return;
}
/*
* ======== delete_node_mgr ========
* Purpose:
* Frees the node manager.
*/
static void delete_node_mgr(struct node_mgr *hnode_mgr)
{
struct node_object *hnode, *tmp;
if (hnode_mgr) {
/* Free resources */
if (hnode_mgr->dcd_mgr)
dcd_destroy_manager(hnode_mgr->dcd_mgr);
/* Remove any elements remaining in lists */
list_for_each_entry_safe(hnode, tmp, &hnode_mgr->node_list,
list_elem) {
list_del(&hnode->list_elem);
delete_node(hnode, NULL);
}
mutex_destroy(&hnode_mgr->node_mgr_lock);
if (hnode_mgr->ntfy_obj) {
ntfy_delete(hnode_mgr->ntfy_obj);
kfree(hnode_mgr->ntfy_obj);
}
if (hnode_mgr->disp_obj)
disp_delete(hnode_mgr->disp_obj);
if (hnode_mgr->strm_mgr_obj)
strm_delete(hnode_mgr->strm_mgr_obj);
/* Delete the loader */
if (hnode_mgr->nldr_obj)
hnode_mgr->nldr_fxns.delete(hnode_mgr->nldr_obj);
if (hnode_mgr->loader_init)
hnode_mgr->nldr_fxns.exit();
kfree(hnode_mgr);
}
}
/*
* ======== fill_stream_connect ========
* Purpose:
* Fills stream information.
*/
static void fill_stream_connect(struct node_object *node1,
struct node_object *node2,
u32 stream1, u32 stream2)
{
u32 strm_index;
struct dsp_streamconnect *strm1 = NULL;
struct dsp_streamconnect *strm2 = NULL;
enum node_type node1_type = NODE_TASK;
enum node_type node2_type = NODE_TASK;
node1_type = node_get_type(node1);
node2_type = node_get_type(node2);
if (node1 != (struct node_object *)DSP_HGPPNODE) {
if (node1_type != NODE_DEVICE) {
strm_index = node1->num_inputs +
node1->num_outputs - 1;
strm1 = &(node1->stream_connect[strm_index]);
strm1->cb_struct = sizeof(struct dsp_streamconnect);
strm1->this_node_stream_index = stream1;
}
if (node2 != (struct node_object *)DSP_HGPPNODE) {
/* NODE == > NODE */
if (node1_type != NODE_DEVICE) {
strm1->connected_node = node2;
strm1->ui_connected_node_id = node2->node_uuid;
strm1->connected_node_stream_index = stream2;
strm1->connect_type = CONNECTTYPE_NODEOUTPUT;
}
if (node2_type != NODE_DEVICE) {
strm_index = node2->num_inputs +
node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct =
sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connected_node = node1;
strm2->ui_connected_node_id = node1->node_uuid;
strm2->connected_node_stream_index = stream1;
strm2->connect_type = CONNECTTYPE_NODEINPUT;
}
} else if (node1_type != NODE_DEVICE)
strm1->connect_type = CONNECTTYPE_GPPOUTPUT;
} else {
/* GPP == > NODE */
DBC_ASSERT(node2 != (struct node_object *)DSP_HGPPNODE);
strm_index = node2->num_inputs + node2->num_outputs - 1;
strm2 = &(node2->stream_connect[strm_index]);
strm2->cb_struct = sizeof(struct dsp_streamconnect);
strm2->this_node_stream_index = stream2;
strm2->connect_type = CONNECTTYPE_GPPINPUT;
}
}
/*
* ======== fill_stream_def ========
* Purpose:
* Fills Stream attributes.
*/
static void fill_stream_def(struct node_object *hnode,
struct node_strmdef *pstrm_def,
struct dsp_strmattr *pattrs)
{
struct node_mgr *hnode_mgr = hnode->node_mgr;
if (pattrs != NULL) {
pstrm_def->num_bufs = pattrs->num_bufs;
pstrm_def->buf_size =
pattrs->buf_size / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = pattrs->seg_id;
pstrm_def->buf_alignment = pattrs->buf_alignment;
pstrm_def->timeout = pattrs->timeout;
} else {
pstrm_def->num_bufs = DEFAULTNBUFS;
pstrm_def->buf_size =
DEFAULTBUFSIZE / hnode_mgr->dsp_data_mau_size;
pstrm_def->seg_id = DEFAULTSEGID;
pstrm_def->buf_alignment = DEFAULTALIGNMENT;
pstrm_def->timeout = DEFAULTTIMEOUT;
}
}
/*
* ======== free_stream ========
* Purpose:
* Updates the channel mask and frees the pipe id.
*/
static void free_stream(struct node_mgr *hnode_mgr, struct stream_chnl stream)
{
/* Free up the pipe id unless other node has not yet been deleted. */
if (stream.type == NODECONNECT) {
if (test_bit(stream.dev_id, hnode_mgr->pipe_done_map)) {
/* The other node has already been deleted */
clear_bit(stream.dev_id, hnode_mgr->pipe_done_map);
clear_bit(stream.dev_id, hnode_mgr->pipe_map);
} else {
/* The other node has not been deleted yet */
set_bit(stream.dev_id, hnode_mgr->pipe_done_map);
}
} else if (stream.type == HOSTCONNECT) {
if (stream.dev_id < hnode_mgr->num_chnls) {
clear_bit(stream.dev_id, hnode_mgr->chnl_map);
} else if (stream.dev_id < (2 * hnode_mgr->num_chnls)) {
/* dsp-dma */
clear_bit(stream.dev_id - (1 * hnode_mgr->num_chnls),
hnode_mgr->dma_chnl_map);
} else if (stream.dev_id < (3 * hnode_mgr->num_chnls)) {
/* zero-copy */
clear_bit(stream.dev_id - (2 * hnode_mgr->num_chnls),
hnode_mgr->zc_chnl_map);
}
}
}
/*
* ======== get_fxn_address ========
* Purpose:
* Retrieves the address for create, execute or delete phase for a node.
*/
static int get_fxn_address(struct node_object *hnode, u32 * fxn_addr,
u32 phase)
{
char *pstr_fxn_name = NULL;
struct node_mgr *hnode_mgr = hnode->node_mgr;
int status = 0;
DBC_REQUIRE(node_get_type(hnode) == NODE_TASK ||
node_get_type(hnode) == NODE_DAISSOCKET ||
node_get_type(hnode) == NODE_MESSAGE);
switch (phase) {
case CREATEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_create_phase_fxn;
break;
case EXECUTEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_execute_phase_fxn;
break;
case DELETEPHASE:
pstr_fxn_name =
hnode->dcd_props.obj_data.node_obj.str_delete_phase_fxn;
break;
default:
/* Should never get here */
DBC_ASSERT(false);
break;
}
status =
hnode_mgr->nldr_fxns.get_fxn_addr(hnode->nldr_node_obj,
pstr_fxn_name, fxn_addr);
return status;
}
/*
* ======== get_node_info ========
* Purpose:
* Retrieves the node information.
*/
void get_node_info(struct node_object *hnode, struct dsp_nodeinfo *node_info)
{
u32 i;
DBC_REQUIRE(hnode);
DBC_REQUIRE(node_info != NULL);
node_info->cb_struct = sizeof(struct dsp_nodeinfo);
node_info->nb_node_database_props =
hnode->dcd_props.obj_data.node_obj.ndb_props;
node_info->execution_priority = hnode->prio;
node_info->device_owner = hnode->device_owner;
node_info->number_streams = hnode->num_inputs + hnode->num_outputs;
node_info->node_env = hnode->node_env;
node_info->ns_execution_state = node_get_state(hnode);
/* Copy stream connect data */
for (i = 0; i < hnode->num_inputs + hnode->num_outputs; i++)
node_info->sc_stream_connection[i] = hnode->stream_connect[i];
}
/*
* ======== get_node_props ========
* Purpose:
* Retrieve node properties.
*/
static int get_node_props(struct dcd_manager *hdcd_mgr,
struct node_object *hnode,
const struct dsp_uuid *node_uuid,
struct dcd_genericobj *dcd_prop)
{
u32 len;
struct node_msgargs *pmsg_args;
struct node_taskargs *task_arg_obj;
enum node_type node_type = NODE_TASK;
struct dsp_ndbprops *pndb_props =
&(dcd_prop->obj_data.node_obj.ndb_props);
int status = 0;
char sz_uuid[MAXUUIDLEN];
status = dcd_get_object_def(hdcd_mgr, (struct dsp_uuid *)node_uuid,
DSP_DCDNODETYPE, dcd_prop);
if (!status) {
hnode->ntype = node_type = pndb_props->ntype;
/* Create UUID value to set in registry. */
uuid_uuid_to_string((struct dsp_uuid *)node_uuid, sz_uuid,
MAXUUIDLEN);
dev_dbg(bridge, "(node) UUID: %s\n", sz_uuid);
/* Fill in message args that come from NDB */
if (node_type != NODE_DEVICE) {
pmsg_args = &(hnode->create_args.asa.node_msg_args);
pmsg_args->seg_id =
dcd_prop->obj_data.node_obj.msg_segid;
pmsg_args->notify_type =
dcd_prop->obj_data.node_obj.msg_notify_type;
pmsg_args->max_msgs = pndb_props->message_depth;
dev_dbg(bridge, "(node) Max Number of Messages: 0x%x\n",
pmsg_args->max_msgs);
} else {
/* Copy device name */
DBC_REQUIRE(pndb_props->ac_name);
len = strlen(pndb_props->ac_name);
DBC_ASSERT(len < MAXDEVNAMELEN);
hnode->str_dev_name = kzalloc(len + 1, GFP_KERNEL);
if (hnode->str_dev_name == NULL) {
status = -ENOMEM;
} else {
strncpy(hnode->str_dev_name,
pndb_props->ac_name, len);
}
}
}
if (!status) {
/* Fill in create args that come from NDB */
if (node_type == NODE_TASK || node_type == NODE_DAISSOCKET) {
task_arg_obj = &(hnode->create_args.asa.task_arg_obj);
task_arg_obj->prio = pndb_props->prio;
task_arg_obj->stack_size = pndb_props->stack_size;
task_arg_obj->sys_stack_size =
pndb_props->sys_stack_size;
task_arg_obj->stack_seg = pndb_props->stack_seg;
dev_dbg(bridge, "(node) Priority: 0x%x Stack Size: "
"0x%x words System Stack Size: 0x%x words "
"Stack Segment: 0x%x profile count : 0x%x\n",
task_arg_obj->prio, task_arg_obj->stack_size,
task_arg_obj->sys_stack_size,
task_arg_obj->stack_seg,
pndb_props->count_profiles);
}
}
return status;
}
/*
* ======== get_proc_props ========
* Purpose:
* Retrieve the processor properties.
*/
static int get_proc_props(struct node_mgr *hnode_mgr,
struct dev_object *hdev_obj)
{
struct cfg_hostres *host_res;
struct bridge_dev_context *pbridge_context;
int status = 0;
status = dev_get_bridge_context(hdev_obj, &pbridge_context);
if (!pbridge_context)
status = -EFAULT;
if (!status) {
host_res = pbridge_context->resources;
if (!host_res)
return -EPERM;
hnode_mgr->chnl_offset = host_res->chnl_offset;
hnode_mgr->chnl_buf_size = host_res->chnl_buf_size;
hnode_mgr->num_chnls = host_res->num_chnls;
/*
* PROC will add an API to get dsp_processorinfo.
* Fill in default values for now.
*/
/* TODO -- Instead of hard coding, take from registry */
hnode_mgr->proc_family = 6000;
hnode_mgr->proc_type = 6410;
hnode_mgr->min_pri = DSP_NODE_MIN_PRIORITY;
hnode_mgr->max_pri = DSP_NODE_MAX_PRIORITY;
hnode_mgr->dsp_word_size = DSPWORDSIZE;
hnode_mgr->dsp_data_mau_size = DSPWORDSIZE;
hnode_mgr->dsp_mau_size = 1;
}
return status;
}
/*
* ======== node_get_uuid_props ========
* Purpose:
* Fetch Node UUID properties from DCD/DOF file.
*/
int node_get_uuid_props(void *hprocessor,
const struct dsp_uuid *node_uuid,
struct dsp_ndbprops *node_props)
{
struct node_mgr *hnode_mgr = NULL;
struct dev_object *hdev_obj;
int status = 0;
struct dcd_nodeprops dcd_node_props;
struct dsp_processorstate proc_state;
DBC_REQUIRE(refs > 0);
DBC_REQUIRE(hprocessor != NULL);
DBC_REQUIRE(node_uuid != NULL);
if (hprocessor == NULL || node_uuid == NULL) {
status = -EFAULT;
goto func_end;
}
status = proc_get_state(hprocessor, &proc_state,
sizeof(struct dsp_processorstate));
if (status)
goto func_end;
/* If processor is in error state then don't attempt
to send the message */
if (proc_state.proc_state == PROC_ERROR) {
status = -EPERM;
goto func_end;
}
status = proc_get_dev_object(hprocessor, &hdev_obj);
if (hdev_obj) {
status = dev_get_node_manager(hdev_obj, &hnode_mgr);
if (hnode_mgr == NULL) {
status = -EFAULT;
goto func_end;
}
}
/*
* Enter the critical section. This is needed because
* dcd_get_object_def will ultimately end up calling dbll_open/close,
* which needs to be protected in order to not corrupt the zlib manager
* (COD).
*/
mutex_lock(&hnode_mgr->node_mgr_lock);
dcd_node_props.str_create_phase_fxn = NULL;
dcd_node_props.str_execute_phase_fxn = NULL;
dcd_node_props.str_delete_phase_fxn = NULL;
dcd_node_props.str_i_alg_name = NULL;
status = dcd_get_object_def(hnode_mgr->dcd_mgr,
(struct dsp_uuid *)node_uuid, DSP_DCDNODETYPE,
(struct dcd_genericobj *)&dcd_node_props);
if (!status) {
*node_props = dcd_node_props.ndb_props;
kfree(dcd_node_props.str_create_phase_fxn);
kfree(dcd_node_props.str_execute_phase_fxn);
kfree(dcd_node_props.str_delete_phase_fxn);
kfree(dcd_node_props.str_i_alg_name);
}
/* Leave the critical section, we're done. */
mutex_unlock(&hnode_mgr->node_mgr_lock);
func_end:
return status;
}
/*
* ======== get_rms_fxns ========
* Purpose:
* Retrieve the RMS functions.
*/
static int get_rms_fxns(struct node_mgr *hnode_mgr)
{
s32 i;
struct dev_object *dev_obj = hnode_mgr->dev_obj;
int status = 0;
static char *psz_fxns[NUMRMSFXNS] = {
"RMS_queryServer", /* RMSQUERYSERVER */
"RMS_configureServer", /* RMSCONFIGURESERVER */
"RMS_createNode", /* RMSCREATENODE */
"RMS_executeNode", /* RMSEXECUTENODE */
"RMS_deleteNode", /* RMSDELETENODE */
"RMS_changeNodePriority", /* RMSCHANGENODEPRIORITY */
"RMS_readMemory", /* RMSREADMEMORY */
"RMS_writeMemory", /* RMSWRITEMEMORY */
"RMS_copy", /* RMSCOPY */
};
for (i = 0; i < NUMRMSFXNS; i++) {
status = dev_get_symbol(dev_obj, psz_fxns[i],
&(hnode_mgr->fxn_addrs[i]));
if (status) {
if (status == -ESPIPE) {
/*
* May be loaded dynamically (in the future),
* but return an error for now.
*/
dev_dbg(bridge, "%s: RMS function: %s currently"
" not loaded\n", __func__, psz_fxns[i]);
} else {
dev_dbg(bridge, "%s: Symbol not found: %s "
"status = 0x%x\n", __func__,
psz_fxns[i], status);
break;
}
}
}
return status;
}
/*
* ======== ovly ========
* Purpose:
* Called during overlay.Sends command to RMS to copy a block of data.
*/
static u32 ovly(void *priv_ref, u32 dsp_run_addr, u32 dsp_load_addr,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u32 ul_bytes = 0;
u32 ul_size;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver*/
struct bridge_drv_interface *intf_fxns;
DBC_REQUIRE(hnode);
hnode_mgr = hnode->node_mgr;
ul_size = ul_num_bytes / hnode_mgr->dsp_word_size;
ul_timeout = hnode->timeout;
/* Call new MemCopy function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
if (!status) {
status =
(*intf_fxns->brd_mem_copy) (hbridge_context,
dsp_run_addr, dsp_load_addr,
ul_num_bytes, (u32) mem_space);
if (!status)
ul_bytes = ul_num_bytes;
else
pr_debug("%s: failed to copy brd memory, status 0x%x\n",
__func__, status);
} else {
pr_debug("%s: failed to get Bridge context, status 0x%x\n",
__func__, status);
}
return ul_bytes;
}
/*
* ======== mem_write ========
*/
static u32 mem_write(void *priv_ref, u32 dsp_add, void *pbuf,
u32 ul_num_bytes, u32 mem_space)
{
struct node_object *hnode = (struct node_object *)priv_ref;
struct node_mgr *hnode_mgr;
u16 mem_sect_type;
u32 ul_timeout;
int status = 0;
struct bridge_dev_context *hbridge_context;
/* Function interface to Bridge driver */
struct bridge_drv_interface *intf_fxns;
DBC_REQUIRE(hnode);
DBC_REQUIRE(mem_space & DBLL_CODE || mem_space & DBLL_DATA);
hnode_mgr = hnode->node_mgr;
ul_timeout = hnode->timeout;
mem_sect_type = (mem_space & DBLL_CODE) ? RMS_CODE : RMS_DATA;
/* Call new MemWrite function */
intf_fxns = hnode_mgr->intf_fxns;
status = dev_get_bridge_context(hnode_mgr->dev_obj, &hbridge_context);
status = (*intf_fxns->brd_mem_write) (hbridge_context, pbuf,
dsp_add, ul_num_bytes, mem_sect_type);
return ul_num_bytes;
}
#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
/*
* ======== node_find_addr ========
*/
int node_find_addr(struct node_mgr *node_mgr, u32 sym_addr,
u32 offset_range, void *sym_addr_output, char *sym_name)
{
struct node_object *node_obj;
int status = -ENOENT;
pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x, %s)\n", __func__,
(unsigned int) node_mgr,
sym_addr, offset_range,
(unsigned int) sym_addr_output, sym_name);
list_for_each_entry(node_obj, &node_mgr->node_list, list_elem) {
status = nldr_find_addr(node_obj->nldr_node_obj, sym_addr,
offset_range, sym_addr_output, sym_name);
if (!status)
break;
}
return status;
}
#endif
| gpl-2.0 |
thederekjay/kernel_lge_mako | drivers/i2c/busses/i2c-piix4.c | 4170 | 16002 | /*
Copyright (c) 1998 - 2002 Frodo Looijaard <frodol@dds.nl> and
Philip Edelbrock <phil@netroedge.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Supports:
Intel PIIX4, 440MX
Serverworks OSB4, CSB5, CSB6, HT-1000, HT-1100
ATI IXP200, IXP300, IXP400, SB600, SB700, SB800
AMD Hudson-2
SMSC Victory66
Note: we assume there can only be one device, with one SMBus interface.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/stddef.h>
#include <linux/ioport.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/dmi.h>
#include <linux/acpi.h>
#include <linux/io.h>
/* PIIX4 SMBus address offsets */
#define SMBHSTSTS (0 + piix4_smba)
#define SMBHSLVSTS (1 + piix4_smba)
#define SMBHSTCNT (2 + piix4_smba)
#define SMBHSTCMD (3 + piix4_smba)
#define SMBHSTADD (4 + piix4_smba)
#define SMBHSTDAT0 (5 + piix4_smba)
#define SMBHSTDAT1 (6 + piix4_smba)
#define SMBBLKDAT (7 + piix4_smba)
#define SMBSLVCNT (8 + piix4_smba)
#define SMBSHDWCMD (9 + piix4_smba)
#define SMBSLVEVT (0xA + piix4_smba)
#define SMBSLVDAT (0xC + piix4_smba)
/* count for request_region */
#define SMBIOSIZE 8
/* PCI Address Constants */
#define SMBBA 0x090
#define SMBHSTCFG 0x0D2
#define SMBSLVC 0x0D3
#define SMBSHDW1 0x0D4
#define SMBSHDW2 0x0D5
#define SMBREV 0x0D6
/* Other settings */
#define MAX_TIMEOUT 500
#define ENABLE_INT9 0
/* PIIX4 constants */
#define PIIX4_QUICK 0x00
#define PIIX4_BYTE 0x04
#define PIIX4_BYTE_DATA 0x08
#define PIIX4_WORD_DATA 0x0C
#define PIIX4_BLOCK_DATA 0x14
/* insmod parameters */
/* If force is set to anything different from 0, we forcibly enable the
PIIX4. DANGEROUS! */
static int force;
module_param (force, int, 0);
MODULE_PARM_DESC(force, "Forcibly enable the PIIX4. DANGEROUS!");
/* If force_addr is set to anything different from 0, we forcibly enable
the PIIX4 at the given address. VERY DANGEROUS! */
static int force_addr;
module_param (force_addr, int, 0);
MODULE_PARM_DESC(force_addr,
"Forcibly enable the PIIX4 at the given address. "
"EXTREMELY DANGEROUS!");
static unsigned short piix4_smba;
static int srvrworks_csb5_delay;
static struct pci_driver piix4_driver;
static struct i2c_adapter piix4_adapter;
static struct dmi_system_id __devinitdata piix4_dmi_blacklist[] = {
{
.ident = "Sapphire AM2RD790",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "SAPPHIRE Inc."),
DMI_MATCH(DMI_BOARD_NAME, "PC-AM2RD790"),
},
},
{
.ident = "DFI Lanparty UT 790FX",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "DFI Inc."),
DMI_MATCH(DMI_BOARD_NAME, "LP UT 790FX"),
},
},
{ }
};
/* The IBM entry is in a separate table because we only check it
on Intel-based systems */
static struct dmi_system_id __devinitdata piix4_dmi_ibm[] = {
{
.ident = "IBM",
.matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), },
},
{ },
};
static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned char temp;
if ((PIIX4_dev->vendor == PCI_VENDOR_ID_SERVERWORKS) &&
(PIIX4_dev->device == PCI_DEVICE_ID_SERVERWORKS_CSB5))
srvrworks_csb5_delay = 1;
/* On some motherboards, it was reported that accessing the SMBus
caused severe hardware problems */
if (dmi_check_system(piix4_dmi_blacklist)) {
dev_err(&PIIX4_dev->dev,
"Accessing the SMBus on this system is unsafe!\n");
return -EPERM;
}
/* Don't access SMBus on IBM systems which get corrupted eeproms */
if (dmi_check_system(piix4_dmi_ibm) &&
PIIX4_dev->vendor == PCI_VENDOR_ID_INTEL) {
dev_err(&PIIX4_dev->dev, "IBM system detected; this module "
"may corrupt your serial eeprom! Refusing to load "
"module!\n");
return -EPERM;
}
/* Determine the address of the SMBus areas */
if (force_addr) {
piix4_smba = force_addr & 0xfff0;
force = 0;
} else {
pci_read_config_word(PIIX4_dev, SMBBA, &piix4_smba);
piix4_smba &= 0xfff0;
if(piix4_smba == 0) {
dev_err(&PIIX4_dev->dev, "SMBus base address "
"uninitialized - upgrade BIOS or use "
"force_addr=0xaddr\n");
return -ENODEV;
}
}
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
piix4_smba);
return -EBUSY;
}
pci_read_config_byte(PIIX4_dev, SMBHSTCFG, &temp);
/* If force_addr is set, we program the new address here. Just to make
sure, we disable the PIIX4 first. */
if (force_addr) {
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp & 0xfe);
pci_write_config_word(PIIX4_dev, SMBBA, piix4_smba);
pci_write_config_byte(PIIX4_dev, SMBHSTCFG, temp | 0x01);
dev_info(&PIIX4_dev->dev, "WARNING: SMBus interface set to "
"new address %04x!\n", piix4_smba);
} else if ((temp & 1) == 0) {
if (force) {
/* This should never need to be done, but has been
* noted that many Dell machines have the SMBus
* interface on the PIIX4 disabled!? NOTE: This assumes
* I/O space and other allocations WERE done by the
* Bios! Don't complain if your hardware does weird
* things after enabling this. :') Check for Bios
* updates before resorting to this.
*/
pci_write_config_byte(PIIX4_dev, SMBHSTCFG,
temp | 1);
dev_printk(KERN_NOTICE, &PIIX4_dev->dev,
"WARNING: SMBus interface has been "
"FORCEFULLY ENABLED!\n");
} else {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
release_region(piix4_smba, SMBIOSIZE);
piix4_smba = 0;
return -ENODEV;
}
}
if (((temp & 0x0E) == 8) || ((temp & 0x0E) == 2))
dev_dbg(&PIIX4_dev->dev, "Using Interrupt 9 for SMBus.\n");
else if ((temp & 0x0E) == 0)
dev_dbg(&PIIX4_dev->dev, "Using Interrupt SMI# for SMBus.\n");
else
dev_err(&PIIX4_dev->dev, "Illegal Interrupt configuration "
"(or code out of date)!\n");
pci_read_config_byte(PIIX4_dev, SMBREV, &temp);
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, temp);
return 0;
}
static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
const struct pci_device_id *id)
{
unsigned short smba_idx = 0xcd6;
u8 smba_en_lo, smba_en_hi, i2ccfg, i2ccfg_offset = 0x10, smb_en = 0x2c;
/* SB800 and later SMBus does not support forcing address */
if (force || force_addr) {
dev_err(&PIIX4_dev->dev, "SMBus does not support "
"forcing address!\n");
return -EINVAL;
}
/* Determine the address of the SMBus areas */
if (!request_region(smba_idx, 2, "smba_idx")) {
dev_err(&PIIX4_dev->dev, "SMBus base address index region "
"0x%x already in use!\n", smba_idx);
return -EBUSY;
}
outb_p(smb_en, smba_idx);
smba_en_lo = inb_p(smba_idx + 1);
outb_p(smb_en + 1, smba_idx);
smba_en_hi = inb_p(smba_idx + 1);
release_region(smba_idx, 2);
if ((smba_en_lo & 1) == 0) {
dev_err(&PIIX4_dev->dev,
"Host SMBus controller not enabled!\n");
return -ENODEV;
}
piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
return -ENODEV;
if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
piix4_smba);
return -EBUSY;
}
/* Request the SMBus I2C bus config region */
if (!request_region(piix4_smba + i2ccfg_offset, 1, "i2ccfg")) {
dev_err(&PIIX4_dev->dev, "SMBus I2C bus config region "
"0x%x already in use!\n", piix4_smba + i2ccfg_offset);
release_region(piix4_smba, SMBIOSIZE);
piix4_smba = 0;
return -EBUSY;
}
i2ccfg = inb_p(piix4_smba + i2ccfg_offset);
release_region(piix4_smba + i2ccfg_offset, 1);
if (i2ccfg & 1)
dev_dbg(&PIIX4_dev->dev, "Using IRQ for SMBus.\n");
else
dev_dbg(&PIIX4_dev->dev, "Using SMI# for SMBus.\n");
dev_info(&PIIX4_dev->dev,
"SMBus Host Controller at 0x%x, revision %d\n",
piix4_smba, i2ccfg >> 4);
return 0;
}
static int piix4_transaction(void)
{
int temp;
int result = 0;
int timeout = 0;
dev_dbg(&piix4_adapter.dev, "Transaction (pre): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
/* Make sure the SMBus host is ready to start transmitting */
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_dbg(&piix4_adapter.dev, "SMBus busy (%02x). "
"Resetting...\n", temp);
outb_p(temp, SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_err(&piix4_adapter.dev, "Failed! (%02x)\n", temp);
return -EBUSY;
} else {
dev_dbg(&piix4_adapter.dev, "Successful!\n");
}
}
/* start the transaction by setting bit 6 */
outb_p(inb(SMBHSTCNT) | 0x040, SMBHSTCNT);
/* We will always wait for a fraction of a second! (See PIIX4 docs errata) */
if (srvrworks_csb5_delay) /* Extra delay for SERVERWORKS_CSB5 */
msleep(2);
else
msleep(1);
while ((++timeout < MAX_TIMEOUT) &&
((temp = inb_p(SMBHSTSTS)) & 0x01))
msleep(1);
/* If the SMBus is still busy, we give up */
if (timeout == MAX_TIMEOUT) {
dev_err(&piix4_adapter.dev, "SMBus Timeout!\n");
result = -ETIMEDOUT;
}
if (temp & 0x10) {
result = -EIO;
dev_err(&piix4_adapter.dev, "Error: Failed bus transaction\n");
}
if (temp & 0x08) {
result = -EIO;
dev_dbg(&piix4_adapter.dev, "Bus collision! SMBus may be "
"locked until next hard reset. (sorry!)\n");
/* Clock stops and slave is stuck in mid-transmission */
}
if (temp & 0x04) {
result = -ENXIO;
dev_dbg(&piix4_adapter.dev, "Error: no response!\n");
}
if (inb_p(SMBHSTSTS) != 0x00)
outb_p(inb(SMBHSTSTS), SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) != 0x00) {
dev_err(&piix4_adapter.dev, "Failed reset at end of "
"transaction (%02x)\n", temp);
}
dev_dbg(&piix4_adapter.dev, "Transaction (post): CNT=%02x, CMD=%02x, "
"ADD=%02x, DAT0=%02x, DAT1=%02x\n", inb_p(SMBHSTCNT),
inb_p(SMBHSTCMD), inb_p(SMBHSTADD), inb_p(SMBHSTDAT0),
inb_p(SMBHSTDAT1));
return result;
}
/* Return negative errno on error. */
static s32 piix4_access(struct i2c_adapter * adap, u16 addr,
unsigned short flags, char read_write,
u8 command, int size, union i2c_smbus_data * data)
{
int i, len;
int status;
switch (size) {
case I2C_SMBUS_QUICK:
outb_p((addr << 1) | read_write,
SMBHSTADD);
size = PIIX4_QUICK;
break;
case I2C_SMBUS_BYTE:
outb_p((addr << 1) | read_write,
SMBHSTADD);
if (read_write == I2C_SMBUS_WRITE)
outb_p(command, SMBHSTCMD);
size = PIIX4_BYTE;
break;
case I2C_SMBUS_BYTE_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE)
outb_p(data->byte, SMBHSTDAT0);
size = PIIX4_BYTE_DATA;
break;
case I2C_SMBUS_WORD_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
outb_p(data->word & 0xff, SMBHSTDAT0);
outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1);
}
size = PIIX4_WORD_DATA;
break;
case I2C_SMBUS_BLOCK_DATA:
outb_p((addr << 1) | read_write,
SMBHSTADD);
outb_p(command, SMBHSTCMD);
if (read_write == I2C_SMBUS_WRITE) {
len = data->block[0];
if (len == 0 || len > I2C_SMBUS_BLOCK_MAX)
return -EINVAL;
outb_p(len, SMBHSTDAT0);
i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */
for (i = 1; i <= len; i++)
outb_p(data->block[i], SMBBLKDAT);
}
size = PIIX4_BLOCK_DATA;
break;
default:
dev_warn(&adap->dev, "Unsupported transaction %d\n", size);
return -EOPNOTSUPP;
}
outb_p((size & 0x1C) + (ENABLE_INT9 & 1), SMBHSTCNT);
status = piix4_transaction();
if (status)
return status;
if ((read_write == I2C_SMBUS_WRITE) || (size == PIIX4_QUICK))
return 0;
switch (size) {
case PIIX4_BYTE:
case PIIX4_BYTE_DATA:
data->byte = inb_p(SMBHSTDAT0);
break;
case PIIX4_WORD_DATA:
data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8);
break;
case PIIX4_BLOCK_DATA:
data->block[0] = inb_p(SMBHSTDAT0);
if (data->block[0] == 0 || data->block[0] > I2C_SMBUS_BLOCK_MAX)
return -EPROTO;
i = inb_p(SMBHSTCNT); /* Reset SMBBLKDAT */
for (i = 1; i <= data->block[0]; i++)
data->block[i] = inb_p(SMBBLKDAT);
break;
}
return 0;
}
static u32 piix4_func(struct i2c_adapter *adapter)
{
return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_BLOCK_DATA;
}
static const struct i2c_algorithm smbus_algorithm = {
.smbus_xfer = piix4_access,
.functionality = piix4_func,
};
static struct i2c_adapter piix4_adapter = {
.owner = THIS_MODULE,
.class = I2C_CLASS_HWMON | I2C_CLASS_SPD,
.algo = &smbus_algorithm,
};
static DEFINE_PCI_DEVICE_TABLE(piix4_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443MX_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_EFAR, PCI_DEVICE_ID_EFAR_SLC90E66_3) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP200_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP300_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP400_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SMBUS) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_OSB4) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB5) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_CSB6) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1000SB) },
{ PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS,
PCI_DEVICE_ID_SERVERWORKS_HT1100LD) },
{ 0, }
};
MODULE_DEVICE_TABLE (pci, piix4_ids);
static int __devinit piix4_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
int retval;
if ((dev->vendor == PCI_VENDOR_ID_ATI &&
dev->device == PCI_DEVICE_ID_ATI_SBX00_SMBUS &&
dev->revision >= 0x40) ||
dev->vendor == PCI_VENDOR_ID_AMD)
/* base address location etc changed in SB800 */
retval = piix4_setup_sb800(dev, id);
else
retval = piix4_setup(dev, id);
if (retval)
return retval;
/* set up the sysfs linkage to our parent device */
piix4_adapter.dev.parent = &dev->dev;
snprintf(piix4_adapter.name, sizeof(piix4_adapter.name),
"SMBus PIIX4 adapter at %04x", piix4_smba);
if ((retval = i2c_add_adapter(&piix4_adapter))) {
dev_err(&dev->dev, "Couldn't register adapter!\n");
release_region(piix4_smba, SMBIOSIZE);
piix4_smba = 0;
}
return retval;
}
static void __devexit piix4_remove(struct pci_dev *dev)
{
if (piix4_smba) {
i2c_del_adapter(&piix4_adapter);
release_region(piix4_smba, SMBIOSIZE);
piix4_smba = 0;
}
}
static struct pci_driver piix4_driver = {
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
.remove = __devexit_p(piix4_remove),
};
static int __init i2c_piix4_init(void)
{
return pci_register_driver(&piix4_driver);
}
static void __exit i2c_piix4_exit(void)
{
pci_unregister_driver(&piix4_driver);
}
MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl> and "
"Philip Edelbrock <phil@netroedge.com>");
MODULE_DESCRIPTION("PIIX4 SMBus driver");
MODULE_LICENSE("GPL");
module_init(i2c_piix4_init);
module_exit(i2c_piix4_exit);
| gpl-2.0 |
BrateloSlava/SaveEnergy-2 | drivers/video/cirrusfb.c | 4938 | 78566 | /*
* drivers/video/cirrusfb.c - driver for Cirrus Logic chipsets
*
* Copyright 1999-2001 Jeff Garzik <jgarzik@pobox.com>
*
* Contributors (thanks, all!)
*
* David Eger:
* Overhaul for Linux 2.6
*
* Jeff Rugen:
* Major contributions; Motorola PowerStack (PPC and PCI) support,
* GD54xx, 1280x1024 mode support, change MCLK based on VCLK.
*
* Geert Uytterhoeven:
* Excellent code review.
*
* Lars Hecking:
* Amiga updates and testing.
*
* Original cirrusfb author: Frank Neumann
*
* Based on retz3fb.c and cirrusfb.c:
* Copyright (C) 1997 Jes Sorensen
* Copyright (C) 1996 Frank Neumann
*
***************************************************************
*
* Format this code with GNU indent '-kr -i8 -pcs' options.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#ifdef CONFIG_ZORRO
#include <linux/zorro.h>
#endif
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
#ifdef CONFIG_PPC_PREP
#include <asm/machdep.h>
#define isPReP machine_is(prep)
#else
#define isPReP 0
#endif
#include <video/vga.h>
#include <video/cirrus.h>
/*****************************************************************
*
* debugging and utility macros
*
*/
/* disable runtime assertions? */
/* #define CIRRUSFB_NDEBUG */
/* debugging assertions */
#ifndef CIRRUSFB_NDEBUG
#define assert(expr) \
if (!(expr)) { \
printk("Assertion failed! %s,%s,%s,line=%d\n", \
#expr, __FILE__, __func__, __LINE__); \
}
#else
#define assert(expr)
#endif
#define MB_ (1024 * 1024)
/*****************************************************************
*
* chipset information
*
*/
/* board types */
enum cirrus_board {
BT_NONE = 0,
BT_SD64, /* GD5434 */
BT_PICCOLO, /* GD5426 */
BT_PICASSO, /* GD5426 or GD5428 */
BT_SPECTRUM, /* GD5426 or GD5428 */
BT_PICASSO4, /* GD5446 */
BT_ALPINE, /* GD543x/4x */
BT_GD5480,
BT_LAGUNA, /* GD5462/64 */
BT_LAGUNAB, /* GD5465 */
};
/*
* per-board-type information, used for enumerating and abstracting
* chip-specific information
* NOTE: MUST be in the same order as enum cirrus_board in order to
* use direct indexing on this array
* NOTE: '__initdata' cannot be used as some of this info
* is required at runtime. Maybe separate into an init-only and
* a run-time table?
*/
static const struct cirrusfb_board_info_rec {
char *name; /* ASCII name of chipset */
long maxclock[5]; /* maximum video clock */
/* for 1/4bpp, 8bpp 15/16bpp, 24bpp, 32bpp - numbers from xorg code */
bool init_sr07 : 1; /* init SR07 during init_vgachip() */
bool init_sr1f : 1; /* write SR1F during init_vgachip() */
/* construct bit 19 of screen start address */
bool scrn_start_bit19 : 1;
/* initial SR07 value, then for each mode */
unsigned char sr07;
unsigned char sr07_1bpp;
unsigned char sr07_1bpp_mux;
unsigned char sr07_8bpp;
unsigned char sr07_8bpp_mux;
unsigned char sr1f; /* SR1F VGA initial register value */
} cirrusfb_board_info[] = {
[BT_SD64] = {
.name = "CL SD64",
.maxclock = {
/* guess */
/* the SD64/P4 have a higher max. videoclock */
135100, 135100, 85500, 85500, 0
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = true,
.sr07 = 0xF0,
.sr07_1bpp = 0xF0,
.sr07_1bpp_mux = 0xF6,
.sr07_8bpp = 0xF1,
.sr07_8bpp_mux = 0xF7,
.sr1f = 0x1E
},
[BT_PICCOLO] = {
.name = "CL Piccolo",
.maxclock = {
/* guess */
90000, 90000, 90000, 90000, 90000
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = false,
.sr07 = 0x80,
.sr07_1bpp = 0x80,
.sr07_8bpp = 0x81,
.sr1f = 0x22
},
[BT_PICASSO] = {
.name = "CL Picasso",
.maxclock = {
/* guess */
90000, 90000, 90000, 90000, 90000
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = false,
.sr07 = 0x20,
.sr07_1bpp = 0x20,
.sr07_8bpp = 0x21,
.sr1f = 0x22
},
[BT_SPECTRUM] = {
.name = "CL Spectrum",
.maxclock = {
/* guess */
90000, 90000, 90000, 90000, 90000
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = false,
.sr07 = 0x80,
.sr07_1bpp = 0x80,
.sr07_8bpp = 0x81,
.sr1f = 0x22
},
[BT_PICASSO4] = {
.name = "CL Picasso4",
.maxclock = {
135100, 135100, 85500, 85500, 0
},
.init_sr07 = true,
.init_sr1f = false,
.scrn_start_bit19 = true,
.sr07 = 0xA0,
.sr07_1bpp = 0xA0,
.sr07_1bpp_mux = 0xA6,
.sr07_8bpp = 0xA1,
.sr07_8bpp_mux = 0xA7,
.sr1f = 0
},
[BT_ALPINE] = {
.name = "CL Alpine",
.maxclock = {
/* for the GD5430. GD5446 can do more... */
85500, 85500, 50000, 28500, 0
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = true,
.sr07 = 0xA0,
.sr07_1bpp = 0xA0,
.sr07_1bpp_mux = 0xA6,
.sr07_8bpp = 0xA1,
.sr07_8bpp_mux = 0xA7,
.sr1f = 0x1C
},
[BT_GD5480] = {
.name = "CL GD5480",
.maxclock = {
135100, 200000, 200000, 135100, 135100
},
.init_sr07 = true,
.init_sr1f = true,
.scrn_start_bit19 = true,
.sr07 = 0x10,
.sr07_1bpp = 0x11,
.sr07_8bpp = 0x11,
.sr1f = 0x1C
},
[BT_LAGUNA] = {
.name = "CL Laguna",
.maxclock = {
/* taken from X11 code */
170000, 170000, 170000, 170000, 135100,
},
.init_sr07 = false,
.init_sr1f = false,
.scrn_start_bit19 = true,
},
[BT_LAGUNAB] = {
.name = "CL Laguna AGP",
.maxclock = {
/* taken from X11 code */
170000, 250000, 170000, 170000, 135100,
},
.init_sr07 = false,
.init_sr1f = false,
.scrn_start_bit19 = true,
}
};
#ifdef CONFIG_PCI
#define CHIP(id, btype) \
{ PCI_VENDOR_ID_CIRRUS, id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (btype) }
static struct pci_device_id cirrusfb_pci_table[] = {
CHIP(PCI_DEVICE_ID_CIRRUS_5436, BT_ALPINE),
CHIP(PCI_DEVICE_ID_CIRRUS_5434_8, BT_SD64),
CHIP(PCI_DEVICE_ID_CIRRUS_5434_4, BT_SD64),
CHIP(PCI_DEVICE_ID_CIRRUS_5430, BT_ALPINE), /* GD-5440 is same id */
CHIP(PCI_DEVICE_ID_CIRRUS_7543, BT_ALPINE),
CHIP(PCI_DEVICE_ID_CIRRUS_7548, BT_ALPINE),
CHIP(PCI_DEVICE_ID_CIRRUS_5480, BT_GD5480), /* MacPicasso likely */
CHIP(PCI_DEVICE_ID_CIRRUS_5446, BT_PICASSO4), /* Picasso 4 is 5446 */
CHIP(PCI_DEVICE_ID_CIRRUS_5462, BT_LAGUNA), /* CL Laguna */
CHIP(PCI_DEVICE_ID_CIRRUS_5464, BT_LAGUNA), /* CL Laguna 3D */
CHIP(PCI_DEVICE_ID_CIRRUS_5465, BT_LAGUNAB), /* CL Laguna 3DA*/
{ 0, }
};
MODULE_DEVICE_TABLE(pci, cirrusfb_pci_table);
#undef CHIP
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
struct zorrocl {
enum cirrus_board type; /* Board type */
u32 regoffset; /* Offset of registers in first Zorro device */
u32 ramsize; /* Size of video RAM in first Zorro device */
/* If zero, use autoprobe on RAM device */
u32 ramoffset; /* Offset of video RAM in first Zorro device */
zorro_id ramid; /* Zorro ID of RAM device */
zorro_id ramid2; /* Zorro ID of optional second RAM device */
};
static const struct zorrocl zcl_sd64 __devinitconst = {
.type = BT_SD64,
.ramid = ZORRO_PROD_HELFRICH_SD64_RAM,
};
static const struct zorrocl zcl_piccolo __devinitconst = {
.type = BT_PICCOLO,
.ramid = ZORRO_PROD_HELFRICH_PICCOLO_RAM,
};
static const struct zorrocl zcl_picasso __devinitconst = {
.type = BT_PICASSO,
.ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_RAM,
};
static const struct zorrocl zcl_spectrum __devinitconst = {
.type = BT_SPECTRUM,
.ramid = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_RAM,
};
static const struct zorrocl zcl_picasso4_z3 __devinitconst = {
.type = BT_PICASSO4,
.regoffset = 0x00600000,
.ramsize = 4 * MB_,
.ramoffset = 0x01000000, /* 0x02000000 for 64 MiB boards */
};
static const struct zorrocl zcl_picasso4_z2 __devinitconst = {
.type = BT_PICASSO4,
.regoffset = 0x10000,
.ramid = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM1,
.ramid2 = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_RAM2,
};
static const struct zorro_device_id cirrusfb_zorro_table[] __devinitconst = {
{
.id = ZORRO_PROD_HELFRICH_SD64_REG,
.driver_data = (unsigned long)&zcl_sd64,
}, {
.id = ZORRO_PROD_HELFRICH_PICCOLO_REG,
.driver_data = (unsigned long)&zcl_piccolo,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_II_II_PLUS_REG,
.driver_data = (unsigned long)&zcl_picasso,
}, {
.id = ZORRO_PROD_GVP_EGS_28_24_SPECTRUM_REG,
.driver_data = (unsigned long)&zcl_spectrum,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z3,
.driver_data = (unsigned long)&zcl_picasso4_z3,
}, {
.id = ZORRO_PROD_VILLAGE_TRONIC_PICASSO_IV_Z2_REG,
.driver_data = (unsigned long)&zcl_picasso4_z2,
},
{ 0 }
};
MODULE_DEVICE_TABLE(zorro, cirrusfb_zorro_table);
#endif /* CONFIG_ZORRO */
#ifdef CIRRUSFB_DEBUG
enum cirrusfb_dbg_reg_class {
CRT,
SEQ
};
#endif /* CIRRUSFB_DEBUG */
/* info about board */
struct cirrusfb_info {
u8 __iomem *regbase;
u8 __iomem *laguna_mmio;
enum cirrus_board btype;
unsigned char SFR; /* Shadow of special function register */
int multiplexing;
int doubleVCLK;
int blank_mode;
u32 pseudo_palette[16];
void (*unmap)(struct fb_info *info);
};
static bool noaccel __devinitdata;
static char *mode_option __devinitdata = "640x480@60";
/****************************************************************************/
/**** BEGIN PROTOTYPES ******************************************************/
/*--- Interface used by the world ------------------------------------------*/
static int cirrusfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info);
/*--- Internal routines ----------------------------------------------------*/
static void init_vgachip(struct fb_info *info);
static void switch_monitor(struct cirrusfb_info *cinfo, int on);
static void WGen(const struct cirrusfb_info *cinfo,
int regnum, unsigned char val);
static unsigned char RGen(const struct cirrusfb_info *cinfo, int regnum);
static void AttrOn(const struct cirrusfb_info *cinfo);
static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val);
static void WSFR(struct cirrusfb_info *cinfo, unsigned char val);
static void WSFR2(struct cirrusfb_info *cinfo, unsigned char val);
static void WClut(struct cirrusfb_info *cinfo, unsigned char regnum,
unsigned char red, unsigned char green, unsigned char blue);
#if 0
static void RClut(struct cirrusfb_info *cinfo, unsigned char regnum,
unsigned char *red, unsigned char *green,
unsigned char *blue);
#endif
static void cirrusfb_WaitBLT(u8 __iomem *regbase);
static void cirrusfb_BitBLT(u8 __iomem *regbase, int bits_per_pixel,
u_short curx, u_short cury,
u_short destx, u_short desty,
u_short width, u_short height,
u_short line_length);
static void cirrusfb_RectFill(u8 __iomem *regbase, int bits_per_pixel,
u_short x, u_short y,
u_short width, u_short height,
u32 fg_color, u32 bg_color,
u_short line_length, u_char blitmode);
static void bestclock(long freq, int *nom, int *den, int *div);
#ifdef CIRRUSFB_DEBUG
static void cirrusfb_dbg_reg_dump(struct fb_info *info, caddr_t regbase);
static void cirrusfb_dbg_print_regs(struct fb_info *info,
caddr_t regbase,
enum cirrusfb_dbg_reg_class reg_class, ...);
#endif /* CIRRUSFB_DEBUG */
/*** END PROTOTYPES ********************************************************/
/*****************************************************************************/
/*** BEGIN Interface Used by the World ***************************************/
static inline int is_laguna(const struct cirrusfb_info *cinfo)
{
return cinfo->btype == BT_LAGUNA || cinfo->btype == BT_LAGUNAB;
}
static int opencount;
/*--- Open /dev/fbx ---------------------------------------------------------*/
static int cirrusfb_open(struct fb_info *info, int user)
{
if (opencount++ == 0)
switch_monitor(info->par, 1);
return 0;
}
/*--- Close /dev/fbx --------------------------------------------------------*/
static int cirrusfb_release(struct fb_info *info, int user)
{
if (--opencount == 0)
switch_monitor(info->par, 0);
return 0;
}
/**** END Interface used by the World *************************************/
/****************************************************************************/
/**** BEGIN Hardware specific Routines **************************************/
/* Check if the MCLK is not a better clock source */
static int cirrusfb_check_mclk(struct fb_info *info, long freq)
{
struct cirrusfb_info *cinfo = info->par;
long mclk = vga_rseq(cinfo->regbase, CL_SEQR1F) & 0x3f;
/* Read MCLK value */
mclk = (14318 * mclk) >> 3;
dev_dbg(info->device, "Read MCLK of %ld kHz\n", mclk);
/* Determine if we should use MCLK instead of VCLK, and if so, what we
* should divide it by to get VCLK
*/
if (abs(freq - mclk) < 250) {
dev_dbg(info->device, "Using VCLK = MCLK\n");
return 1;
} else if (abs(freq - (mclk / 2)) < 250) {
dev_dbg(info->device, "Using VCLK = MCLK/2\n");
return 2;
}
return 0;
}
static int cirrusfb_check_pixclock(const struct fb_var_screeninfo *var,
struct fb_info *info)
{
long freq;
long maxclock;
struct cirrusfb_info *cinfo = info->par;
unsigned maxclockidx = var->bits_per_pixel >> 3;
/* convert from ps to kHz */
freq = PICOS2KHZ(var->pixclock);
dev_dbg(info->device, "desired pixclock: %ld kHz\n", freq);
maxclock = cirrusfb_board_info[cinfo->btype].maxclock[maxclockidx];
cinfo->multiplexing = 0;
/* If the frequency is greater than we can support, we might be able
* to use multiplexing for the video mode */
if (freq > maxclock) {
dev_err(info->device,
"Frequency greater than maxclock (%ld kHz)\n",
maxclock);
return -EINVAL;
}
/*
* Additional constraint: 8bpp uses DAC clock doubling to allow maximum
* pixel clock
*/
if (var->bits_per_pixel == 8) {
switch (cinfo->btype) {
case BT_ALPINE:
case BT_SD64:
case BT_PICASSO4:
if (freq > 85500)
cinfo->multiplexing = 1;
break;
case BT_GD5480:
if (freq > 135100)
cinfo->multiplexing = 1;
break;
default:
break;
}
}
/* If we have a 1MB 5434, we need to put ourselves in a mode where
* the VCLK is double the pixel clock. */
cinfo->doubleVCLK = 0;
if (cinfo->btype == BT_SD64 && info->fix.smem_len <= MB_ &&
var->bits_per_pixel == 16) {
cinfo->doubleVCLK = 1;
}
return 0;
}
static int cirrusfb_check_var(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int yres;
/* memory size in pixels */
unsigned pixels = info->screen_size * 8 / var->bits_per_pixel;
struct cirrusfb_info *cinfo = info->par;
switch (var->bits_per_pixel) {
case 1:
var->red.offset = 0;
var->red.length = 1;
var->green = var->red;
var->blue = var->red;
break;
case 8:
var->red.offset = 0;
var->red.length = 8;
var->green = var->red;
var->blue = var->red;
break;
case 16:
if (isPReP) {
var->red.offset = 2;
var->green.offset = -3;
var->blue.offset = 8;
} else {
var->red.offset = 11;
var->green.offset = 5;
var->blue.offset = 0;
}
var->red.length = 5;
var->green.length = 6;
var->blue.length = 5;
break;
case 24:
if (isPReP) {
var->red.offset = 0;
var->green.offset = 8;
var->blue.offset = 16;
} else {
var->red.offset = 16;
var->green.offset = 8;
var->blue.offset = 0;
}
var->red.length = 8;
var->green.length = 8;
var->blue.length = 8;
break;
default:
dev_dbg(info->device,
"Unsupported bpp size: %d\n", var->bits_per_pixel);
return -EINVAL;
}
if (var->xres_virtual < var->xres)
var->xres_virtual = var->xres;
/* use highest possible virtual resolution */
if (var->yres_virtual == -1) {
var->yres_virtual = pixels / var->xres_virtual;
dev_info(info->device,
"virtual resolution set to maximum of %dx%d\n",
var->xres_virtual, var->yres_virtual);
}
if (var->yres_virtual < var->yres)
var->yres_virtual = var->yres;
if (var->xres_virtual * var->yres_virtual > pixels) {
dev_err(info->device, "mode %dx%dx%d rejected... "
"virtual resolution too high to fit into video memory!\n",
var->xres_virtual, var->yres_virtual,
var->bits_per_pixel);
return -EINVAL;
}
if (var->xoffset < 0)
var->xoffset = 0;
if (var->yoffset < 0)
var->yoffset = 0;
/* truncate xoffset and yoffset to maximum if too high */
if (var->xoffset > var->xres_virtual - var->xres)
var->xoffset = var->xres_virtual - var->xres - 1;
if (var->yoffset > var->yres_virtual - var->yres)
var->yoffset = var->yres_virtual - var->yres - 1;
var->red.msb_right =
var->green.msb_right =
var->blue.msb_right =
var->transp.offset =
var->transp.length =
var->transp.msb_right = 0;
yres = var->yres;
if (var->vmode & FB_VMODE_DOUBLE)
yres *= 2;
else if (var->vmode & FB_VMODE_INTERLACED)
yres = (yres + 1) / 2;
if (yres >= 1280) {
dev_err(info->device, "ERROR: VerticalTotal >= 1280; "
"special treatment required! (TODO)\n");
return -EINVAL;
}
if (cirrusfb_check_pixclock(var, info))
return -EINVAL;
if (!is_laguna(cinfo))
var->accel_flags = FB_ACCELF_TEXT;
return 0;
}
static void cirrusfb_set_mclk_as_source(const struct fb_info *info, int div)
{
struct cirrusfb_info *cinfo = info->par;
unsigned char old1f, old1e;
assert(cinfo != NULL);
old1f = vga_rseq(cinfo->regbase, CL_SEQR1F) & ~0x40;
if (div) {
dev_dbg(info->device, "Set %s as pixclock source.\n",
(div == 2) ? "MCLK/2" : "MCLK");
old1f |= 0x40;
old1e = vga_rseq(cinfo->regbase, CL_SEQR1E) & ~0x1;
if (div == 2)
old1e |= 1;
vga_wseq(cinfo->regbase, CL_SEQR1E, old1e);
}
vga_wseq(cinfo->regbase, CL_SEQR1F, old1f);
}
/*************************************************************************
cirrusfb_set_par_foo()
actually writes the values for a new video mode into the hardware,
**************************************************************************/
static int cirrusfb_set_par_foo(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
struct fb_var_screeninfo *var = &info->var;
u8 __iomem *regbase = cinfo->regbase;
unsigned char tmp;
int pitch;
const struct cirrusfb_board_info_rec *bi;
int hdispend, hsyncstart, hsyncend, htotal;
int yres, vdispend, vsyncstart, vsyncend, vtotal;
long freq;
int nom, den, div;
unsigned int control = 0, format = 0, threshold = 0;
dev_dbg(info->device, "Requested mode: %dx%dx%d\n",
var->xres, var->yres, var->bits_per_pixel);
switch (var->bits_per_pixel) {
case 1:
info->fix.line_length = var->xres_virtual / 8;
info->fix.visual = FB_VISUAL_MONO10;
break;
case 8:
info->fix.line_length = var->xres_virtual;
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
break;
case 16:
case 24:
info->fix.line_length = var->xres_virtual *
var->bits_per_pixel >> 3;
info->fix.visual = FB_VISUAL_TRUECOLOR;
break;
}
info->fix.type = FB_TYPE_PACKED_PIXELS;
init_vgachip(info);
bi = &cirrusfb_board_info[cinfo->btype];
hsyncstart = var->xres + var->right_margin;
hsyncend = hsyncstart + var->hsync_len;
htotal = (hsyncend + var->left_margin) / 8;
hdispend = var->xres / 8;
hsyncstart = hsyncstart / 8;
hsyncend = hsyncend / 8;
vdispend = var->yres;
vsyncstart = vdispend + var->lower_margin;
vsyncend = vsyncstart + var->vsync_len;
vtotal = vsyncend + var->upper_margin;
if (var->vmode & FB_VMODE_DOUBLE) {
vdispend *= 2;
vsyncstart *= 2;
vsyncend *= 2;
vtotal *= 2;
} else if (var->vmode & FB_VMODE_INTERLACED) {
vdispend = (vdispend + 1) / 2;
vsyncstart = (vsyncstart + 1) / 2;
vsyncend = (vsyncend + 1) / 2;
vtotal = (vtotal + 1) / 2;
}
yres = vdispend;
if (yres >= 1024) {
vtotal /= 2;
vsyncstart /= 2;
vsyncend /= 2;
vdispend /= 2;
}
vdispend -= 1;
vsyncstart -= 1;
vsyncend -= 1;
vtotal -= 2;
if (cinfo->multiplexing) {
htotal /= 2;
hsyncstart /= 2;
hsyncend /= 2;
hdispend /= 2;
}
htotal -= 5;
hdispend -= 1;
hsyncstart += 1;
hsyncend += 1;
/* unlock register VGA_CRTC_H_TOTAL..CRT7 */
vga_wcrt(regbase, VGA_CRTC_V_SYNC_END, 0x20); /* previously: 0x00) */
/* if debugging is enabled, all parameters get output before writing */
dev_dbg(info->device, "CRT0: %d\n", htotal);
vga_wcrt(regbase, VGA_CRTC_H_TOTAL, htotal);
dev_dbg(info->device, "CRT1: %d\n", hdispend);
vga_wcrt(regbase, VGA_CRTC_H_DISP, hdispend);
dev_dbg(info->device, "CRT2: %d\n", var->xres / 8);
vga_wcrt(regbase, VGA_CRTC_H_BLANK_START, var->xres / 8);
/* + 128: Compatible read */
dev_dbg(info->device, "CRT3: 128+%d\n", (htotal + 5) % 32);
vga_wcrt(regbase, VGA_CRTC_H_BLANK_END,
128 + ((htotal + 5) % 32));
dev_dbg(info->device, "CRT4: %d\n", hsyncstart);
vga_wcrt(regbase, VGA_CRTC_H_SYNC_START, hsyncstart);
tmp = hsyncend % 32;
if ((htotal + 5) & 32)
tmp += 128;
dev_dbg(info->device, "CRT5: %d\n", tmp);
vga_wcrt(regbase, VGA_CRTC_H_SYNC_END, tmp);
dev_dbg(info->device, "CRT6: %d\n", vtotal & 0xff);
vga_wcrt(regbase, VGA_CRTC_V_TOTAL, vtotal & 0xff);
tmp = 16; /* LineCompare bit #9 */
if (vtotal & 256)
tmp |= 1;
if (vdispend & 256)
tmp |= 2;
if (vsyncstart & 256)
tmp |= 4;
if ((vdispend + 1) & 256)
tmp |= 8;
if (vtotal & 512)
tmp |= 32;
if (vdispend & 512)
tmp |= 64;
if (vsyncstart & 512)
tmp |= 128;
dev_dbg(info->device, "CRT7: %d\n", tmp);
vga_wcrt(regbase, VGA_CRTC_OVERFLOW, tmp);
tmp = 0x40; /* LineCompare bit #8 */
if ((vdispend + 1) & 512)
tmp |= 0x20;
if (var->vmode & FB_VMODE_DOUBLE)
tmp |= 0x80;
dev_dbg(info->device, "CRT9: %d\n", tmp);
vga_wcrt(regbase, VGA_CRTC_MAX_SCAN, tmp);
dev_dbg(info->device, "CRT10: %d\n", vsyncstart & 0xff);
vga_wcrt(regbase, VGA_CRTC_V_SYNC_START, vsyncstart & 0xff);
dev_dbg(info->device, "CRT11: 64+32+%d\n", vsyncend % 16);
vga_wcrt(regbase, VGA_CRTC_V_SYNC_END, vsyncend % 16 + 64 + 32);
dev_dbg(info->device, "CRT12: %d\n", vdispend & 0xff);
vga_wcrt(regbase, VGA_CRTC_V_DISP_END, vdispend & 0xff);
dev_dbg(info->device, "CRT15: %d\n", (vdispend + 1) & 0xff);
vga_wcrt(regbase, VGA_CRTC_V_BLANK_START, (vdispend + 1) & 0xff);
dev_dbg(info->device, "CRT16: %d\n", vtotal & 0xff);
vga_wcrt(regbase, VGA_CRTC_V_BLANK_END, vtotal & 0xff);
dev_dbg(info->device, "CRT18: 0xff\n");
vga_wcrt(regbase, VGA_CRTC_LINE_COMPARE, 0xff);
tmp = 0;
if (var->vmode & FB_VMODE_INTERLACED)
tmp |= 1;
if ((htotal + 5) & 64)
tmp |= 16;
if ((htotal + 5) & 128)
tmp |= 32;
if (vtotal & 256)
tmp |= 64;
if (vtotal & 512)
tmp |= 128;
dev_dbg(info->device, "CRT1a: %d\n", tmp);
vga_wcrt(regbase, CL_CRT1A, tmp);
freq = PICOS2KHZ(var->pixclock);
if (var->bits_per_pixel == 24)
if (cinfo->btype == BT_ALPINE || cinfo->btype == BT_SD64)
freq *= 3;
if (cinfo->multiplexing)
freq /= 2;
if (cinfo->doubleVCLK)
freq *= 2;
bestclock(freq, &nom, &den, &div);
dev_dbg(info->device, "VCLK freq: %ld kHz nom: %d den: %d div: %d\n",
freq, nom, den, div);
/* set VCLK0 */
/* hardware RefClock: 14.31818 MHz */
/* formula: VClk = (OSC * N) / (D * (1+P)) */
/* Example: VClk = (14.31818 * 91) / (23 * (1+1)) = 28.325 MHz */
if (cinfo->btype == BT_ALPINE || cinfo->btype == BT_PICASSO4 ||
cinfo->btype == BT_SD64) {
/* if freq is close to mclk or mclk/2 select mclk
* as clock source
*/
int divMCLK = cirrusfb_check_mclk(info, freq);
if (divMCLK)
nom = 0;
cirrusfb_set_mclk_as_source(info, divMCLK);
}
if (is_laguna(cinfo)) {
long pcifc = fb_readl(cinfo->laguna_mmio + 0x3fc);
unsigned char tile = fb_readb(cinfo->laguna_mmio + 0x407);
unsigned short tile_control;
if (cinfo->btype == BT_LAGUNAB) {
tile_control = fb_readw(cinfo->laguna_mmio + 0x2c4);
tile_control &= ~0x80;
fb_writew(tile_control, cinfo->laguna_mmio + 0x2c4);
}
fb_writel(pcifc | 0x10000000l, cinfo->laguna_mmio + 0x3fc);
fb_writeb(tile & 0x3f, cinfo->laguna_mmio + 0x407);
control = fb_readw(cinfo->laguna_mmio + 0x402);
threshold = fb_readw(cinfo->laguna_mmio + 0xea);
control &= ~0x6800;
format = 0;
threshold &= 0xffc0 & 0x3fbf;
}
if (nom) {
tmp = den << 1;
if (div != 0)
tmp |= 1;
/* 6 bit denom; ONLY 5434!!! (bugged me 10 days) */
if ((cinfo->btype == BT_SD64) ||
(cinfo->btype == BT_ALPINE) ||
(cinfo->btype == BT_GD5480))
tmp |= 0x80;
/* Laguna chipset has reversed clock registers */
if (is_laguna(cinfo)) {
vga_wseq(regbase, CL_SEQRE, tmp);
vga_wseq(regbase, CL_SEQR1E, nom);
} else {
vga_wseq(regbase, CL_SEQRE, nom);
vga_wseq(regbase, CL_SEQR1E, tmp);
}
}
if (yres >= 1024)
/* 1280x1024 */
vga_wcrt(regbase, VGA_CRTC_MODE, 0xc7);
else
/* mode control: VGA_CRTC_START_HI enable, ROTATE(?), 16bit
* address wrap, no compat. */
vga_wcrt(regbase, VGA_CRTC_MODE, 0xc3);
/* don't know if it would hurt to also program this if no interlaced */
/* mode is used, but I feel better this way.. :-) */
if (var->vmode & FB_VMODE_INTERLACED)
vga_wcrt(regbase, VGA_CRTC_REGS, htotal / 2);
else
vga_wcrt(regbase, VGA_CRTC_REGS, 0x00); /* interlace control */
/* adjust horizontal/vertical sync type (low/high), use VCLK3 */
/* enable display memory & CRTC I/O address for color mode */
tmp = 0x03 | 0xc;
if (var->sync & FB_SYNC_HOR_HIGH_ACT)
tmp |= 0x40;
if (var->sync & FB_SYNC_VERT_HIGH_ACT)
tmp |= 0x80;
WGen(cinfo, VGA_MIS_W, tmp);
/* text cursor on and start line */
vga_wcrt(regbase, VGA_CRTC_CURSOR_START, 0);
/* text cursor end line */
vga_wcrt(regbase, VGA_CRTC_CURSOR_END, 31);
/******************************************************
*
* 1 bpp
*
*/
/* programming for different color depths */
if (var->bits_per_pixel == 1) {
dev_dbg(info->device, "preparing for 1 bit deep display\n");
vga_wgfx(regbase, VGA_GFX_MODE, 0); /* mode register */
/* SR07 */
switch (cinfo->btype) {
case BT_SD64:
case BT_PICCOLO:
case BT_PICASSO:
case BT_SPECTRUM:
case BT_PICASSO4:
case BT_ALPINE:
case BT_GD5480:
vga_wseq(regbase, CL_SEQR7,
cinfo->multiplexing ?
bi->sr07_1bpp_mux : bi->sr07_1bpp);
break;
case BT_LAGUNA:
case BT_LAGUNAB:
vga_wseq(regbase, CL_SEQR7,
vga_rseq(regbase, CL_SEQR7) & ~0x01);
break;
default:
dev_warn(info->device, "unknown Board\n");
break;
}
/* Extended Sequencer Mode */
switch (cinfo->btype) {
case BT_PICCOLO:
case BT_SPECTRUM:
/* evtl d0 bei 1 bit? avoid FIFO underruns..? */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_PICASSO:
/* ## vorher d0 avoid FIFO underruns..? */
vga_wseq(regbase, CL_SEQRF, 0xd0);
break;
case BT_SD64:
case BT_PICASSO4:
case BT_ALPINE:
case BT_GD5480:
case BT_LAGUNA:
case BT_LAGUNAB:
/* do nothing */
break;
default:
dev_warn(info->device, "unknown Board\n");
break;
}
/* pixel mask: pass-through for first plane */
WGen(cinfo, VGA_PEL_MSK, 0x01);
if (cinfo->multiplexing)
/* hidden dac reg: 1280x1024 */
WHDR(cinfo, 0x4a);
else
/* hidden dac: nothing */
WHDR(cinfo, 0);
/* memory mode: odd/even, ext. memory */
vga_wseq(regbase, VGA_SEQ_MEMORY_MODE, 0x06);
/* plane mask: only write to first plane */
vga_wseq(regbase, VGA_SEQ_PLANE_WRITE, 0x01);
}
/******************************************************
*
* 8 bpp
*
*/
else if (var->bits_per_pixel == 8) {
dev_dbg(info->device, "preparing for 8 bit deep display\n");
switch (cinfo->btype) {
case BT_SD64:
case BT_PICCOLO:
case BT_PICASSO:
case BT_SPECTRUM:
case BT_PICASSO4:
case BT_ALPINE:
case BT_GD5480:
vga_wseq(regbase, CL_SEQR7,
cinfo->multiplexing ?
bi->sr07_8bpp_mux : bi->sr07_8bpp);
break;
case BT_LAGUNA:
case BT_LAGUNAB:
vga_wseq(regbase, CL_SEQR7,
vga_rseq(regbase, CL_SEQR7) | 0x01);
threshold |= 0x10;
break;
default:
dev_warn(info->device, "unknown Board\n");
break;
}
switch (cinfo->btype) {
case BT_PICCOLO:
case BT_PICASSO:
case BT_SPECTRUM:
/* Fast Page-Mode writes */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_PICASSO4:
#ifdef CONFIG_ZORRO
/* ### INCOMPLETE!! */
vga_wseq(regbase, CL_SEQRF, 0xb8);
#endif
case BT_ALPINE:
case BT_SD64:
case BT_GD5480:
case BT_LAGUNA:
case BT_LAGUNAB:
/* do nothing */
break;
default:
dev_warn(info->device, "unknown board\n");
break;
}
/* mode register: 256 color mode */
vga_wgfx(regbase, VGA_GFX_MODE, 64);
if (cinfo->multiplexing)
/* hidden dac reg: 1280x1024 */
WHDR(cinfo, 0x4a);
else
/* hidden dac: nothing */
WHDR(cinfo, 0);
}
/******************************************************
*
* 16 bpp
*
*/
else if (var->bits_per_pixel == 16) {
dev_dbg(info->device, "preparing for 16 bit deep display\n");
switch (cinfo->btype) {
case BT_PICCOLO:
case BT_SPECTRUM:
vga_wseq(regbase, CL_SEQR7, 0x87);
/* Fast Page-Mode writes */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_PICASSO:
vga_wseq(regbase, CL_SEQR7, 0x27);
/* Fast Page-Mode writes */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_SD64:
case BT_PICASSO4:
case BT_ALPINE:
/* Extended Sequencer Mode: 256c col. mode */
vga_wseq(regbase, CL_SEQR7,
cinfo->doubleVCLK ? 0xa3 : 0xa7);
break;
case BT_GD5480:
vga_wseq(regbase, CL_SEQR7, 0x17);
/* We already set SRF and SR1F */
break;
case BT_LAGUNA:
case BT_LAGUNAB:
vga_wseq(regbase, CL_SEQR7,
vga_rseq(regbase, CL_SEQR7) & ~0x01);
control |= 0x2000;
format |= 0x1400;
threshold |= 0x10;
break;
default:
dev_warn(info->device, "unknown Board\n");
break;
}
/* mode register: 256 color mode */
vga_wgfx(regbase, VGA_GFX_MODE, 64);
#ifdef CONFIG_PCI
WHDR(cinfo, cinfo->doubleVCLK ? 0xe1 : 0xc1);
#elif defined(CONFIG_ZORRO)
/* FIXME: CONFIG_PCI and CONFIG_ZORRO may be defined both */
WHDR(cinfo, 0xa0); /* hidden dac reg: nothing special */
#endif
}
/******************************************************
*
* 24 bpp
*
*/
else if (var->bits_per_pixel == 24) {
dev_dbg(info->device, "preparing for 24 bit deep display\n");
switch (cinfo->btype) {
case BT_PICCOLO:
case BT_SPECTRUM:
vga_wseq(regbase, CL_SEQR7, 0x85);
/* Fast Page-Mode writes */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_PICASSO:
vga_wseq(regbase, CL_SEQR7, 0x25);
/* Fast Page-Mode writes */
vga_wseq(regbase, CL_SEQRF, 0xb0);
break;
case BT_SD64:
case BT_PICASSO4:
case BT_ALPINE:
/* Extended Sequencer Mode: 256c col. mode */
vga_wseq(regbase, CL_SEQR7, 0xa5);
break;
case BT_GD5480:
vga_wseq(regbase, CL_SEQR7, 0x15);
/* We already set SRF and SR1F */
break;
case BT_LAGUNA:
case BT_LAGUNAB:
vga_wseq(regbase, CL_SEQR7,
vga_rseq(regbase, CL_SEQR7) & ~0x01);
control |= 0x4000;
format |= 0x2400;
threshold |= 0x20;
break;
default:
dev_warn(info->device, "unknown Board\n");
break;
}
/* mode register: 256 color mode */
vga_wgfx(regbase, VGA_GFX_MODE, 64);
/* hidden dac reg: 8-8-8 mode (24 or 32) */
WHDR(cinfo, 0xc5);
}
/******************************************************
*
* unknown/unsupported bpp
*
*/
else
dev_err(info->device,
"What's this? requested color depth == %d.\n",
var->bits_per_pixel);
pitch = info->fix.line_length >> 3;
vga_wcrt(regbase, VGA_CRTC_OFFSET, pitch & 0xff);
tmp = 0x22;
if (pitch & 0x100)
tmp |= 0x10; /* offset overflow bit */
/* screen start addr #16-18, fastpagemode cycles */
vga_wcrt(regbase, CL_CRT1B, tmp);
/* screen start address bit 19 */
if (cirrusfb_board_info[cinfo->btype].scrn_start_bit19)
vga_wcrt(regbase, CL_CRT1D, (pitch >> 9) & 1);
if (is_laguna(cinfo)) {
tmp = 0;
if ((htotal + 5) & 256)
tmp |= 128;
if (hdispend & 256)
tmp |= 64;
if (hsyncstart & 256)
tmp |= 48;
if (vtotal & 1024)
tmp |= 8;
if (vdispend & 1024)
tmp |= 4;
if (vsyncstart & 1024)
tmp |= 3;
vga_wcrt(regbase, CL_CRT1E, tmp);
dev_dbg(info->device, "CRT1e: %d\n", tmp);
}
/* pixel panning */
vga_wattr(regbase, CL_AR33, 0);
/* [ EGS: SetOffset(); ] */
/* From SetOffset(): Turn on VideoEnable bit in Attribute controller */
AttrOn(cinfo);
if (is_laguna(cinfo)) {
/* no tiles */
fb_writew(control | 0x1000, cinfo->laguna_mmio + 0x402);
fb_writew(format, cinfo->laguna_mmio + 0xc0);
fb_writew(threshold, cinfo->laguna_mmio + 0xea);
}
/* finally, turn on everything - turn off "FullBandwidth" bit */
/* also, set "DotClock%2" bit where requested */
tmp = 0x01;
/*** FB_VMODE_CLOCK_HALVE in linux/fb.h not defined anymore ?
if (var->vmode & FB_VMODE_CLOCK_HALVE)
tmp |= 0x08;
*/
vga_wseq(regbase, VGA_SEQ_CLOCK_MODE, tmp);
dev_dbg(info->device, "CL_SEQR1: %d\n", tmp);
#ifdef CIRRUSFB_DEBUG
cirrusfb_dbg_reg_dump(info, NULL);
#endif
return 0;
}
/* for some reason incomprehensible to me, cirrusfb requires that you write
* the registers twice for the settings to take..grr. -dte */
static int cirrusfb_set_par(struct fb_info *info)
{
cirrusfb_set_par_foo(info);
return cirrusfb_set_par_foo(info);
}
static int cirrusfb_setcolreg(unsigned regno, unsigned red, unsigned green,
unsigned blue, unsigned transp,
struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
if (regno > 255)
return -EINVAL;
if (info->fix.visual == FB_VISUAL_TRUECOLOR) {
u32 v;
red >>= (16 - info->var.red.length);
green >>= (16 - info->var.green.length);
blue >>= (16 - info->var.blue.length);
if (regno >= 16)
return 1;
v = (red << info->var.red.offset) |
(green << info->var.green.offset) |
(blue << info->var.blue.offset);
cinfo->pseudo_palette[regno] = v;
return 0;
}
if (info->var.bits_per_pixel == 8)
WClut(cinfo, regno, red >> 10, green >> 10, blue >> 10);
return 0;
}
/*************************************************************************
cirrusfb_pan_display()
performs display panning - provided hardware permits this
**************************************************************************/
static int cirrusfb_pan_display(struct fb_var_screeninfo *var,
struct fb_info *info)
{
int xoffset;
unsigned long base;
unsigned char tmp, xpix;
struct cirrusfb_info *cinfo = info->par;
/* no range checks for xoffset and yoffset, */
/* as fb_pan_display has already done this */
if (var->vmode & FB_VMODE_YWRAP)
return -EINVAL;
xoffset = var->xoffset * info->var.bits_per_pixel / 8;
base = var->yoffset * info->fix.line_length + xoffset;
if (info->var.bits_per_pixel == 1) {
/* base is already correct */
xpix = (unsigned char) (var->xoffset % 8);
} else {
base /= 4;
xpix = (unsigned char) ((xoffset % 4) * 2);
}
if (!is_laguna(cinfo))
cirrusfb_WaitBLT(cinfo->regbase);
/* lower 8 + 8 bits of screen start address */
vga_wcrt(cinfo->regbase, VGA_CRTC_START_LO, base & 0xff);
vga_wcrt(cinfo->regbase, VGA_CRTC_START_HI, (base >> 8) & 0xff);
/* 0xf2 is %11110010, exclude tmp bits */
tmp = vga_rcrt(cinfo->regbase, CL_CRT1B) & 0xf2;
/* construct bits 16, 17 and 18 of screen start address */
if (base & 0x10000)
tmp |= 0x01;
if (base & 0x20000)
tmp |= 0x04;
if (base & 0x40000)
tmp |= 0x08;
vga_wcrt(cinfo->regbase, CL_CRT1B, tmp);
/* construct bit 19 of screen start address */
if (cirrusfb_board_info[cinfo->btype].scrn_start_bit19) {
tmp = vga_rcrt(cinfo->regbase, CL_CRT1D);
if (is_laguna(cinfo))
tmp = (tmp & ~0x18) | ((base >> 16) & 0x18);
else
tmp = (tmp & ~0x80) | ((base >> 12) & 0x80);
vga_wcrt(cinfo->regbase, CL_CRT1D, tmp);
}
/* write pixel panning value to AR33; this does not quite work in 8bpp
*
* ### Piccolo..? Will this work?
*/
if (info->var.bits_per_pixel == 1)
vga_wattr(cinfo->regbase, CL_AR33, xpix);
return 0;
}
static int cirrusfb_blank(int blank_mode, struct fb_info *info)
{
/*
* Blank the screen if blank_mode != 0, else unblank. If blank == NULL
* then the caller blanks by setting the CLUT (Color Look Up Table)
* to all black. Return 0 if blanking succeeded, != 0 if un-/blanking
* failed due to e.g. a video mode which doesn't support it.
* Implements VESA suspend and powerdown modes on hardware that
* supports disabling hsync/vsync:
* blank_mode == 2: suspend vsync
* blank_mode == 3: suspend hsync
* blank_mode == 4: powerdown
*/
unsigned char val;
struct cirrusfb_info *cinfo = info->par;
int current_mode = cinfo->blank_mode;
dev_dbg(info->device, "ENTER, blank mode = %d\n", blank_mode);
if (info->state != FBINFO_STATE_RUNNING ||
current_mode == blank_mode) {
dev_dbg(info->device, "EXIT, returning 0\n");
return 0;
}
/* Undo current */
if (current_mode == FB_BLANK_NORMAL ||
current_mode == FB_BLANK_UNBLANK)
/* clear "FullBandwidth" bit */
val = 0;
else
/* set "FullBandwidth" bit */
val = 0x20;
val |= vga_rseq(cinfo->regbase, VGA_SEQ_CLOCK_MODE) & 0xdf;
vga_wseq(cinfo->regbase, VGA_SEQ_CLOCK_MODE, val);
switch (blank_mode) {
case FB_BLANK_UNBLANK:
case FB_BLANK_NORMAL:
val = 0x00;
break;
case FB_BLANK_VSYNC_SUSPEND:
val = 0x04;
break;
case FB_BLANK_HSYNC_SUSPEND:
val = 0x02;
break;
case FB_BLANK_POWERDOWN:
val = 0x06;
break;
default:
dev_dbg(info->device, "EXIT, returning 1\n");
return 1;
}
vga_wgfx(cinfo->regbase, CL_GRE, val);
cinfo->blank_mode = blank_mode;
dev_dbg(info->device, "EXIT, returning 0\n");
/* Let fbcon do a soft blank for us */
return (blank_mode == FB_BLANK_NORMAL) ? 1 : 0;
}
/**** END Hardware specific Routines **************************************/
/****************************************************************************/
/**** BEGIN Internal Routines ***********************************************/
static void init_vgachip(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
const struct cirrusfb_board_info_rec *bi;
assert(cinfo != NULL);
bi = &cirrusfb_board_info[cinfo->btype];
/* reset board globally */
switch (cinfo->btype) {
case BT_PICCOLO:
WSFR(cinfo, 0x01);
udelay(500);
WSFR(cinfo, 0x51);
udelay(500);
break;
case BT_PICASSO:
WSFR2(cinfo, 0xff);
udelay(500);
break;
case BT_SD64:
case BT_SPECTRUM:
WSFR(cinfo, 0x1f);
udelay(500);
WSFR(cinfo, 0x4f);
udelay(500);
break;
case BT_PICASSO4:
/* disable flickerfixer */
vga_wcrt(cinfo->regbase, CL_CRT51, 0x00);
mdelay(100);
/* mode */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
case BT_GD5480: /* fall through */
/* from Klaus' NetBSD driver: */
vga_wgfx(cinfo->regbase, CL_GR2F, 0x00);
case BT_ALPINE: /* fall through */
/* put blitter into 542x compat */
vga_wgfx(cinfo->regbase, CL_GR33, 0x00);
break;
case BT_LAGUNA:
case BT_LAGUNAB:
/* Nothing to do to reset the board. */
break;
default:
dev_err(info->device, "Warning: Unknown board type\n");
break;
}
/* make sure RAM size set by this point */
assert(info->screen_size > 0);
/* the P4 is not fully initialized here; I rely on it having been */
/* inited under AmigaOS already, which seems to work just fine */
/* (Klaus advised to do it this way) */
if (cinfo->btype != BT_PICASSO4) {
WGen(cinfo, CL_VSSM, 0x10); /* EGS: 0x16 */
WGen(cinfo, CL_POS102, 0x01);
WGen(cinfo, CL_VSSM, 0x08); /* EGS: 0x0e */
if (cinfo->btype != BT_SD64)
WGen(cinfo, CL_VSSM2, 0x01);
/* reset sequencer logic */
vga_wseq(cinfo->regbase, VGA_SEQ_RESET, 0x03);
/* FullBandwidth (video off) and 8/9 dot clock */
vga_wseq(cinfo->regbase, VGA_SEQ_CLOCK_MODE, 0x21);
/* "magic cookie" - doesn't make any sense to me.. */
/* vga_wgfx(cinfo->regbase, CL_GRA, 0xce); */
/* unlock all extension registers */
vga_wseq(cinfo->regbase, CL_SEQR6, 0x12);
switch (cinfo->btype) {
case BT_GD5480:
vga_wseq(cinfo->regbase, CL_SEQRF, 0x98);
break;
case BT_ALPINE:
case BT_LAGUNA:
case BT_LAGUNAB:
break;
case BT_SD64:
#ifdef CONFIG_ZORRO
vga_wseq(cinfo->regbase, CL_SEQRF, 0xb8);
#endif
break;
default:
vga_wseq(cinfo->regbase, CL_SEQR16, 0x0f);
vga_wseq(cinfo->regbase, CL_SEQRF, 0xb0);
break;
}
}
/* plane mask: nothing */
vga_wseq(cinfo->regbase, VGA_SEQ_PLANE_WRITE, 0xff);
/* character map select: doesn't even matter in gx mode */
vga_wseq(cinfo->regbase, VGA_SEQ_CHARACTER_MAP, 0x00);
/* memory mode: chain4, ext. memory */
vga_wseq(cinfo->regbase, VGA_SEQ_MEMORY_MODE, 0x0a);
/* controller-internal base address of video memory */
if (bi->init_sr07)
vga_wseq(cinfo->regbase, CL_SEQR7, bi->sr07);
/* vga_wseq(cinfo->regbase, CL_SEQR8, 0x00); */
/* EEPROM control: shouldn't be necessary to write to this at all.. */
/* graphics cursor X position (incomplete; position gives rem. 3 bits */
vga_wseq(cinfo->regbase, CL_SEQR10, 0x00);
/* graphics cursor Y position (..."... ) */
vga_wseq(cinfo->regbase, CL_SEQR11, 0x00);
/* graphics cursor attributes */
vga_wseq(cinfo->regbase, CL_SEQR12, 0x00);
/* graphics cursor pattern address */
vga_wseq(cinfo->regbase, CL_SEQR13, 0x00);
/* writing these on a P4 might give problems.. */
if (cinfo->btype != BT_PICASSO4) {
/* configuration readback and ext. color */
vga_wseq(cinfo->regbase, CL_SEQR17, 0x00);
/* signature generator */
vga_wseq(cinfo->regbase, CL_SEQR18, 0x02);
}
/* Screen A preset row scan: none */
vga_wcrt(cinfo->regbase, VGA_CRTC_PRESET_ROW, 0x00);
/* Text cursor start: disable text cursor */
vga_wcrt(cinfo->regbase, VGA_CRTC_CURSOR_START, 0x20);
/* Text cursor end: - */
vga_wcrt(cinfo->regbase, VGA_CRTC_CURSOR_END, 0x00);
/* text cursor location high: 0 */
vga_wcrt(cinfo->regbase, VGA_CRTC_CURSOR_HI, 0x00);
/* text cursor location low: 0 */
vga_wcrt(cinfo->regbase, VGA_CRTC_CURSOR_LO, 0x00);
/* Underline Row scanline: - */
vga_wcrt(cinfo->regbase, VGA_CRTC_UNDERLINE, 0x00);
/* ### add 0x40 for text modes with > 30 MHz pixclock */
/* ext. display controls: ext.adr. wrap */
vga_wcrt(cinfo->regbase, CL_CRT1B, 0x02);
/* Set/Reset registes: - */
vga_wgfx(cinfo->regbase, VGA_GFX_SR_VALUE, 0x00);
/* Set/Reset enable: - */
vga_wgfx(cinfo->regbase, VGA_GFX_SR_ENABLE, 0x00);
/* Color Compare: - */
vga_wgfx(cinfo->regbase, VGA_GFX_COMPARE_VALUE, 0x00);
/* Data Rotate: - */
vga_wgfx(cinfo->regbase, VGA_GFX_DATA_ROTATE, 0x00);
/* Read Map Select: - */
vga_wgfx(cinfo->regbase, VGA_GFX_PLANE_READ, 0x00);
/* Mode: conf. for 16/4/2 color mode, no odd/even, read/write mode 0 */
vga_wgfx(cinfo->regbase, VGA_GFX_MODE, 0x00);
/* Miscellaneous: memory map base address, graphics mode */
vga_wgfx(cinfo->regbase, VGA_GFX_MISC, 0x01);
/* Color Don't care: involve all planes */
vga_wgfx(cinfo->regbase, VGA_GFX_COMPARE_MASK, 0x0f);
/* Bit Mask: no mask at all */
vga_wgfx(cinfo->regbase, VGA_GFX_BIT_MASK, 0xff);
if (cinfo->btype == BT_ALPINE || cinfo->btype == BT_SD64 ||
is_laguna(cinfo))
/* (5434 can't have bit 3 set for bitblt) */
vga_wgfx(cinfo->regbase, CL_GRB, 0x20);
else
/* Graphics controller mode extensions: finer granularity,
* 8byte data latches
*/
vga_wgfx(cinfo->regbase, CL_GRB, 0x28);
vga_wgfx(cinfo->regbase, CL_GRC, 0xff); /* Color Key compare: - */
vga_wgfx(cinfo->regbase, CL_GRD, 0x00); /* Color Key compare mask: - */
vga_wgfx(cinfo->regbase, CL_GRE, 0x00); /* Miscellaneous control: - */
/* Background color byte 1: - */
/* vga_wgfx (cinfo->regbase, CL_GR10, 0x00); */
/* vga_wgfx (cinfo->regbase, CL_GR11, 0x00); */
/* Attribute Controller palette registers: "identity mapping" */
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE0, 0x00);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE1, 0x01);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE2, 0x02);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE3, 0x03);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE4, 0x04);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE5, 0x05);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE6, 0x06);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE7, 0x07);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE8, 0x08);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTE9, 0x09);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTEA, 0x0a);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTEB, 0x0b);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTEC, 0x0c);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTED, 0x0d);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTEE, 0x0e);
vga_wattr(cinfo->regbase, VGA_ATC_PALETTEF, 0x0f);
/* Attribute Controller mode: graphics mode */
vga_wattr(cinfo->regbase, VGA_ATC_MODE, 0x01);
/* Overscan color reg.: reg. 0 */
vga_wattr(cinfo->regbase, VGA_ATC_OVERSCAN, 0x00);
/* Color Plane enable: Enable all 4 planes */
vga_wattr(cinfo->regbase, VGA_ATC_PLANE_ENABLE, 0x0f);
/* Color Select: - */
vga_wattr(cinfo->regbase, VGA_ATC_COLOR_PAGE, 0x00);
WGen(cinfo, VGA_PEL_MSK, 0xff); /* Pixel mask: no mask */
/* BLT Start/status: Blitter reset */
vga_wgfx(cinfo->regbase, CL_GR31, 0x04);
/* - " - : "end-of-reset" */
vga_wgfx(cinfo->regbase, CL_GR31, 0x00);
/* misc... */
WHDR(cinfo, 0); /* Hidden DAC register: - */
return;
}
static void switch_monitor(struct cirrusfb_info *cinfo, int on)
{
#ifdef CONFIG_ZORRO /* only works on Zorro boards */
static int IsOn = 0; /* XXX not ok for multiple boards */
if (cinfo->btype == BT_PICASSO4)
return; /* nothing to switch */
if (cinfo->btype == BT_ALPINE)
return; /* nothing to switch */
if (cinfo->btype == BT_GD5480)
return; /* nothing to switch */
if (cinfo->btype == BT_PICASSO) {
if ((on && !IsOn) || (!on && IsOn))
WSFR(cinfo, 0xff);
return;
}
if (on) {
switch (cinfo->btype) {
case BT_SD64:
WSFR(cinfo, cinfo->SFR | 0x21);
break;
case BT_PICCOLO:
WSFR(cinfo, cinfo->SFR | 0x28);
break;
case BT_SPECTRUM:
WSFR(cinfo, 0x6f);
break;
default: /* do nothing */ break;
}
} else {
switch (cinfo->btype) {
case BT_SD64:
WSFR(cinfo, cinfo->SFR & 0xde);
break;
case BT_PICCOLO:
WSFR(cinfo, cinfo->SFR & 0xd7);
break;
case BT_SPECTRUM:
WSFR(cinfo, 0x4f);
break;
default: /* do nothing */
break;
}
}
#endif /* CONFIG_ZORRO */
}
/******************************************/
/* Linux 2.6-style accelerated functions */
/******************************************/
static int cirrusfb_sync(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
if (!is_laguna(cinfo)) {
while (vga_rgfx(cinfo->regbase, CL_GR31) & 0x03)
cpu_relax();
}
return 0;
}
static void cirrusfb_fillrect(struct fb_info *info,
const struct fb_fillrect *region)
{
struct fb_fillrect modded;
int vxres, vyres;
struct cirrusfb_info *cinfo = info->par;
int m = info->var.bits_per_pixel;
u32 color = (info->fix.visual == FB_VISUAL_TRUECOLOR) ?
cinfo->pseudo_palette[region->color] : region->color;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(info, region);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
memcpy(&modded, region, sizeof(struct fb_fillrect));
if (!modded.width || !modded.height ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
cirrusfb_RectFill(cinfo->regbase,
info->var.bits_per_pixel,
(region->dx * m) / 8, region->dy,
(region->width * m) / 8, region->height,
color, color,
info->fix.line_length, 0x40);
}
static void cirrusfb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct fb_copyarea modded;
u32 vxres, vyres;
struct cirrusfb_info *cinfo = info->par;
int m = info->var.bits_per_pixel;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(info, area);
return;
}
vxres = info->var.xres_virtual;
vyres = info->var.yres_virtual;
memcpy(&modded, area, sizeof(struct fb_copyarea));
if (!modded.width || !modded.height ||
modded.sx >= vxres || modded.sy >= vyres ||
modded.dx >= vxres || modded.dy >= vyres)
return;
if (modded.sx + modded.width > vxres)
modded.width = vxres - modded.sx;
if (modded.dx + modded.width > vxres)
modded.width = vxres - modded.dx;
if (modded.sy + modded.height > vyres)
modded.height = vyres - modded.sy;
if (modded.dy + modded.height > vyres)
modded.height = vyres - modded.dy;
cirrusfb_BitBLT(cinfo->regbase, info->var.bits_per_pixel,
(area->sx * m) / 8, area->sy,
(area->dx * m) / 8, area->dy,
(area->width * m) / 8, area->height,
info->fix.line_length);
}
static void cirrusfb_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct cirrusfb_info *cinfo = info->par;
unsigned char op = (info->var.bits_per_pixel == 24) ? 0xc : 0x4;
if (info->state != FBINFO_STATE_RUNNING)
return;
/* Alpine/SD64 does not work at 24bpp ??? */
if (info->flags & FBINFO_HWACCEL_DISABLED || image->depth != 1)
cfb_imageblit(info, image);
else if ((cinfo->btype == BT_ALPINE || cinfo->btype == BT_SD64) &&
op == 0xc)
cfb_imageblit(info, image);
else {
unsigned size = ((image->width + 7) >> 3) * image->height;
int m = info->var.bits_per_pixel;
u32 fg, bg;
if (info->var.bits_per_pixel == 8) {
fg = image->fg_color;
bg = image->bg_color;
} else {
fg = ((u32 *)(info->pseudo_palette))[image->fg_color];
bg = ((u32 *)(info->pseudo_palette))[image->bg_color];
}
if (info->var.bits_per_pixel == 24) {
/* clear background first */
cirrusfb_RectFill(cinfo->regbase,
info->var.bits_per_pixel,
(image->dx * m) / 8, image->dy,
(image->width * m) / 8,
image->height,
bg, bg,
info->fix.line_length, 0x40);
}
cirrusfb_RectFill(cinfo->regbase,
info->var.bits_per_pixel,
(image->dx * m) / 8, image->dy,
(image->width * m) / 8, image->height,
fg, bg,
info->fix.line_length, op);
memcpy(info->screen_base, image->data, size);
}
}
#ifdef CONFIG_PPC_PREP
#define PREP_VIDEO_BASE ((volatile unsigned long) 0xC0000000)
#define PREP_IO_BASE ((volatile unsigned char *) 0x80000000)
static void get_prep_addrs(unsigned long *display, unsigned long *registers)
{
*display = PREP_VIDEO_BASE;
*registers = (unsigned long) PREP_IO_BASE;
}
#endif /* CONFIG_PPC_PREP */
#ifdef CONFIG_PCI
static int release_io_ports;
/* Pulled the logic from XFree86 Cirrus driver to get the memory size,
* based on the DRAM bandwidth bit and DRAM bank switching bit. This
* works with 1MB, 2MB and 4MB configurations (which the Motorola boards
* seem to have. */
static unsigned int __devinit cirrusfb_get_memsize(struct fb_info *info,
u8 __iomem *regbase)
{
unsigned long mem;
struct cirrusfb_info *cinfo = info->par;
if (is_laguna(cinfo)) {
unsigned char SR14 = vga_rseq(regbase, CL_SEQR14);
mem = ((SR14 & 7) + 1) << 20;
} else {
unsigned char SRF = vga_rseq(regbase, CL_SEQRF);
switch ((SRF & 0x18)) {
case 0x08:
mem = 512 * 1024;
break;
case 0x10:
mem = 1024 * 1024;
break;
/* 64-bit DRAM data bus width; assume 2MB.
* Also indicates 2MB memory on the 5430.
*/
case 0x18:
mem = 2048 * 1024;
break;
default:
dev_warn(info->device, "Unknown memory size!\n");
mem = 1024 * 1024;
}
/* If DRAM bank switching is enabled, there must be
* twice as much memory installed. (4MB on the 5434)
*/
if (cinfo->btype != BT_ALPINE && (SRF & 0x80) != 0)
mem *= 2;
}
/* TODO: Handling of GD5446/5480 (see XF86 sources ...) */
return mem;
}
static void get_pci_addrs(const struct pci_dev *pdev,
unsigned long *display, unsigned long *registers)
{
assert(pdev != NULL);
assert(display != NULL);
assert(registers != NULL);
*display = 0;
*registers = 0;
/* This is a best-guess for now */
if (pci_resource_flags(pdev, 0) & IORESOURCE_IO) {
*display = pci_resource_start(pdev, 1);
*registers = pci_resource_start(pdev, 0);
} else {
*display = pci_resource_start(pdev, 0);
*registers = pci_resource_start(pdev, 1);
}
assert(*display != 0);
}
static void cirrusfb_pci_unmap(struct fb_info *info)
{
struct pci_dev *pdev = to_pci_dev(info->device);
struct cirrusfb_info *cinfo = info->par;
if (cinfo->laguna_mmio == NULL)
iounmap(cinfo->laguna_mmio);
iounmap(info->screen_base);
#if 0 /* if system didn't claim this region, we would... */
release_mem_region(0xA0000, 65535);
#endif
if (release_io_ports)
release_region(0x3C0, 32);
pci_release_regions(pdev);
}
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
static void cirrusfb_zorro_unmap(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
struct zorro_dev *zdev = to_zorro_dev(info->device);
if (info->fix.smem_start > 16 * MB_)
iounmap(info->screen_base);
if (info->fix.mmio_start > 16 * MB_)
iounmap(cinfo->regbase);
zorro_release_device(zdev);
}
#endif /* CONFIG_ZORRO */
/* function table of the above functions */
static struct fb_ops cirrusfb_ops = {
.owner = THIS_MODULE,
.fb_open = cirrusfb_open,
.fb_release = cirrusfb_release,
.fb_setcolreg = cirrusfb_setcolreg,
.fb_check_var = cirrusfb_check_var,
.fb_set_par = cirrusfb_set_par,
.fb_pan_display = cirrusfb_pan_display,
.fb_blank = cirrusfb_blank,
.fb_fillrect = cirrusfb_fillrect,
.fb_copyarea = cirrusfb_copyarea,
.fb_sync = cirrusfb_sync,
.fb_imageblit = cirrusfb_imageblit,
};
static int __devinit cirrusfb_set_fbinfo(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
struct fb_var_screeninfo *var = &info->var;
info->pseudo_palette = cinfo->pseudo_palette;
info->flags = FBINFO_DEFAULT
| FBINFO_HWACCEL_XPAN
| FBINFO_HWACCEL_YPAN
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_HWACCEL_COPYAREA;
if (noaccel || is_laguna(cinfo)) {
info->flags |= FBINFO_HWACCEL_DISABLED;
info->fix.accel = FB_ACCEL_NONE;
} else
info->fix.accel = FB_ACCEL_CIRRUS_ALPINE;
info->fbops = &cirrusfb_ops;
if (cinfo->btype == BT_GD5480) {
if (var->bits_per_pixel == 16)
info->screen_base += 1 * MB_;
if (var->bits_per_pixel == 32)
info->screen_base += 2 * MB_;
}
/* Fill fix common fields */
strlcpy(info->fix.id, cirrusfb_board_info[cinfo->btype].name,
sizeof(info->fix.id));
/* monochrome: only 1 memory plane */
/* 8 bit and above: Use whole memory area */
info->fix.smem_len = info->screen_size;
if (var->bits_per_pixel == 1)
info->fix.smem_len /= 4;
info->fix.type_aux = 0;
info->fix.xpanstep = 1;
info->fix.ypanstep = 1;
info->fix.ywrapstep = 0;
/* FIXME: map region at 0xB8000 if available, fill in here */
info->fix.mmio_len = 0;
fb_alloc_cmap(&info->cmap, 256, 0);
return 0;
}
static int __devinit cirrusfb_register(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
int err;
/* sanity checks */
assert(cinfo->btype != BT_NONE);
/* set all the vital stuff */
cirrusfb_set_fbinfo(info);
dev_dbg(info->device, "(RAM start set to: 0x%p)\n", info->screen_base);
err = fb_find_mode(&info->var, info, mode_option, NULL, 0, NULL, 8);
if (!err) {
dev_dbg(info->device, "wrong initial video mode\n");
err = -EINVAL;
goto err_dealloc_cmap;
}
info->var.activate = FB_ACTIVATE_NOW;
err = cirrusfb_check_var(&info->var, info);
if (err < 0) {
/* should never happen */
dev_dbg(info->device,
"choking on default var... umm, no good.\n");
goto err_dealloc_cmap;
}
err = register_framebuffer(info);
if (err < 0) {
dev_err(info->device,
"could not register fb device; err = %d!\n", err);
goto err_dealloc_cmap;
}
return 0;
err_dealloc_cmap:
fb_dealloc_cmap(&info->cmap);
return err;
}
static void __devexit cirrusfb_cleanup(struct fb_info *info)
{
struct cirrusfb_info *cinfo = info->par;
switch_monitor(cinfo, 0);
unregister_framebuffer(info);
fb_dealloc_cmap(&info->cmap);
dev_dbg(info->device, "Framebuffer unregistered\n");
cinfo->unmap(info);
framebuffer_release(info);
}
#ifdef CONFIG_PCI
static int __devinit cirrusfb_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct cirrusfb_info *cinfo;
struct fb_info *info;
unsigned long board_addr, board_size;
int ret;
ret = pci_enable_device(pdev);
if (ret < 0) {
printk(KERN_ERR "cirrusfb: Cannot enable PCI device\n");
goto err_out;
}
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &pdev->dev);
if (!info) {
printk(KERN_ERR "cirrusfb: could not allocate memory\n");
ret = -ENOMEM;
goto err_out;
}
cinfo = info->par;
cinfo->btype = (enum cirrus_board) ent->driver_data;
dev_dbg(info->device,
" Found PCI device, base address 0 is 0x%Lx, btype set to %d\n",
(unsigned long long)pdev->resource[0].start, cinfo->btype);
dev_dbg(info->device, " base address 1 is 0x%Lx\n",
(unsigned long long)pdev->resource[1].start);
if (isPReP) {
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, 0x00000000);
#ifdef CONFIG_PPC_PREP
get_prep_addrs(&board_addr, &info->fix.mmio_start);
#endif
/* PReP dies if we ioremap the IO registers, but it works w/out... */
cinfo->regbase = (char __iomem *) info->fix.mmio_start;
} else {
dev_dbg(info->device,
"Attempt to get PCI info for Cirrus Graphics Card\n");
get_pci_addrs(pdev, &board_addr, &info->fix.mmio_start);
/* FIXME: this forces VGA. alternatives? */
cinfo->regbase = NULL;
cinfo->laguna_mmio = ioremap(info->fix.mmio_start, 0x1000);
}
dev_dbg(info->device, "Board address: 0x%lx, register address: 0x%lx\n",
board_addr, info->fix.mmio_start);
board_size = (cinfo->btype == BT_GD5480) ?
32 * MB_ : cirrusfb_get_memsize(info, cinfo->regbase);
ret = pci_request_regions(pdev, "cirrusfb");
if (ret < 0) {
dev_err(info->device, "cannot reserve region 0x%lx, abort\n",
board_addr);
goto err_release_fb;
}
#if 0 /* if the system didn't claim this region, we would... */
if (!request_mem_region(0xA0000, 65535, "cirrusfb")) {
dev_err(info->device, "cannot reserve region 0x%lx, abort\n",
0xA0000L);
ret = -EBUSY;
goto err_release_regions;
}
#endif
if (request_region(0x3C0, 32, "cirrusfb"))
release_io_ports = 1;
info->screen_base = ioremap(board_addr, board_size);
if (!info->screen_base) {
ret = -EIO;
goto err_release_legacy;
}
info->fix.smem_start = board_addr;
info->screen_size = board_size;
cinfo->unmap = cirrusfb_pci_unmap;
dev_info(info->device,
"Cirrus Logic chipset on PCI bus, RAM (%lu kB) at 0x%lx\n",
info->screen_size >> 10, board_addr);
pci_set_drvdata(pdev, info);
ret = cirrusfb_register(info);
if (!ret)
return 0;
pci_set_drvdata(pdev, NULL);
iounmap(info->screen_base);
err_release_legacy:
if (release_io_ports)
release_region(0x3C0, 32);
#if 0
release_mem_region(0xA0000, 65535);
err_release_regions:
#endif
pci_release_regions(pdev);
err_release_fb:
if (cinfo->laguna_mmio != NULL)
iounmap(cinfo->laguna_mmio);
framebuffer_release(info);
err_out:
return ret;
}
static void __devexit cirrusfb_pci_unregister(struct pci_dev *pdev)
{
struct fb_info *info = pci_get_drvdata(pdev);
cirrusfb_cleanup(info);
}
static struct pci_driver cirrusfb_pci_driver = {
.name = "cirrusfb",
.id_table = cirrusfb_pci_table,
.probe = cirrusfb_pci_register,
.remove = __devexit_p(cirrusfb_pci_unregister),
#ifdef CONFIG_PM
#if 0
.suspend = cirrusfb_pci_suspend,
.resume = cirrusfb_pci_resume,
#endif
#endif
};
#endif /* CONFIG_PCI */
#ifdef CONFIG_ZORRO
static int __devinit cirrusfb_zorro_register(struct zorro_dev *z,
const struct zorro_device_id *ent)
{
struct fb_info *info;
int error;
const struct zorrocl *zcl;
enum cirrus_board btype;
unsigned long regbase, ramsize, rambase;
struct cirrusfb_info *cinfo;
info = framebuffer_alloc(sizeof(struct cirrusfb_info), &z->dev);
if (!info) {
printk(KERN_ERR "cirrusfb: could not allocate memory\n");
return -ENOMEM;
}
zcl = (const struct zorrocl *)ent->driver_data;
btype = zcl->type;
regbase = zorro_resource_start(z) + zcl->regoffset;
ramsize = zcl->ramsize;
if (ramsize) {
rambase = zorro_resource_start(z) + zcl->ramoffset;
if (zorro_resource_len(z) == 64 * MB_) {
/* Quirk for 64 MiB Picasso IV */
rambase += zcl->ramoffset;
}
} else {
struct zorro_dev *ram = zorro_find_device(zcl->ramid, NULL);
if (!ram || !zorro_resource_len(ram)) {
dev_err(info->device, "No video RAM found\n");
error = -ENODEV;
goto err_release_fb;
}
rambase = zorro_resource_start(ram);
ramsize = zorro_resource_len(ram);
if (zcl->ramid2 &&
(ram = zorro_find_device(zcl->ramid2, NULL))) {
if (zorro_resource_start(ram) != rambase + ramsize) {
dev_warn(info->device,
"Skipping non-contiguous RAM at %pR\n",
&ram->resource);
} else {
ramsize += zorro_resource_len(ram);
}
}
}
dev_info(info->device,
"%s board detected, REG at 0x%lx, %lu MiB RAM at 0x%lx\n",
cirrusfb_board_info[btype].name, regbase, ramsize / MB_,
rambase);
if (!zorro_request_device(z, "cirrusfb")) {
dev_err(info->device, "Cannot reserve %pR\n", &z->resource);
error = -EBUSY;
goto err_release_fb;
}
cinfo = info->par;
cinfo->btype = btype;
info->fix.mmio_start = regbase;
cinfo->regbase = regbase > 16 * MB_ ? ioremap(regbase, 64 * 1024)
: (caddr_t)ZTWO_VADDR(regbase);
if (!cinfo->regbase) {
dev_err(info->device, "Cannot map registers\n");
error = -EIO;
goto err_release_dev;
}
info->fix.smem_start = rambase;
info->screen_size = ramsize;
info->screen_base = rambase > 16 * MB_ ? ioremap(rambase, ramsize)
: (caddr_t)ZTWO_VADDR(rambase);
if (!info->screen_base) {
dev_err(info->device, "Cannot map video RAM\n");
error = -EIO;
goto err_unmap_reg;
}
cinfo->unmap = cirrusfb_zorro_unmap;
dev_info(info->device,
"Cirrus Logic chipset on Zorro bus, RAM (%lu MiB) at 0x%lx\n",
ramsize / MB_, rambase);
/* MCLK select etc. */
if (cirrusfb_board_info[btype].init_sr1f)
vga_wseq(cinfo->regbase, CL_SEQR1F,
cirrusfb_board_info[btype].sr1f);
error = cirrusfb_register(info);
if (error) {
dev_err(info->device, "Failed to register device, error %d\n",
error);
goto err_unmap_ram;
}
zorro_set_drvdata(z, info);
return 0;
err_unmap_ram:
if (rambase > 16 * MB_)
iounmap(info->screen_base);
err_unmap_reg:
if (regbase > 16 * MB_)
iounmap(cinfo->regbase);
err_release_dev:
zorro_release_device(z);
err_release_fb:
framebuffer_release(info);
return error;
}
void __devexit cirrusfb_zorro_unregister(struct zorro_dev *z)
{
struct fb_info *info = zorro_get_drvdata(z);
cirrusfb_cleanup(info);
zorro_set_drvdata(z, NULL);
}
static struct zorro_driver cirrusfb_zorro_driver = {
.name = "cirrusfb",
.id_table = cirrusfb_zorro_table,
.probe = cirrusfb_zorro_register,
.remove = __devexit_p(cirrusfb_zorro_unregister),
};
#endif /* CONFIG_ZORRO */
#ifndef MODULE
static int __init cirrusfb_setup(char *options)
{
char *this_opt;
if (!options || !*options)
return 0;
while ((this_opt = strsep(&options, ",")) != NULL) {
if (!*this_opt)
continue;
if (!strcmp(this_opt, "noaccel"))
noaccel = 1;
else if (!strncmp(this_opt, "mode:", 5))
mode_option = this_opt + 5;
else
mode_option = this_opt;
}
return 0;
}
#endif
/*
* Modularization
*/
MODULE_AUTHOR("Copyright 1999,2000 Jeff Garzik <jgarzik@pobox.com>");
MODULE_DESCRIPTION("Accelerated FBDev driver for Cirrus Logic chips");
MODULE_LICENSE("GPL");
static int __init cirrusfb_init(void)
{
int error = 0;
#ifndef MODULE
char *option = NULL;
if (fb_get_options("cirrusfb", &option))
return -ENODEV;
cirrusfb_setup(option);
#endif
#ifdef CONFIG_ZORRO
error |= zorro_register_driver(&cirrusfb_zorro_driver);
#endif
#ifdef CONFIG_PCI
error |= pci_register_driver(&cirrusfb_pci_driver);
#endif
return error;
}
static void __exit cirrusfb_exit(void)
{
#ifdef CONFIG_PCI
pci_unregister_driver(&cirrusfb_pci_driver);
#endif
#ifdef CONFIG_ZORRO
zorro_unregister_driver(&cirrusfb_zorro_driver);
#endif
}
module_init(cirrusfb_init);
module_param(mode_option, charp, 0);
MODULE_PARM_DESC(mode_option, "Initial video mode e.g. '648x480-8@60'");
module_param(noaccel, bool, 0);
MODULE_PARM_DESC(noaccel, "Disable acceleration");
#ifdef MODULE
module_exit(cirrusfb_exit);
#endif
/**********************************************************************/
/* about the following functions - I have used the same names for the */
/* functions as Markus Wild did in his Retina driver for NetBSD as */
/* they just made sense for this purpose. Apart from that, I wrote */
/* these functions myself. */
/**********************************************************************/
/*** WGen() - write into one of the external/general registers ***/
static void WGen(const struct cirrusfb_info *cinfo,
int regnum, unsigned char val)
{
unsigned long regofs = 0;
if (cinfo->btype == BT_PICASSO) {
/* Picasso II specific hack */
/* if (regnum == VGA_PEL_IR || regnum == VGA_PEL_D ||
regnum == CL_VSSM2) */
if (regnum == VGA_PEL_IR || regnum == VGA_PEL_D)
regofs = 0xfff;
}
vga_w(cinfo->regbase, regofs + regnum, val);
}
/*** RGen() - read out one of the external/general registers ***/
static unsigned char RGen(const struct cirrusfb_info *cinfo, int regnum)
{
unsigned long regofs = 0;
if (cinfo->btype == BT_PICASSO) {
/* Picasso II specific hack */
/* if (regnum == VGA_PEL_IR || regnum == VGA_PEL_D ||
regnum == CL_VSSM2) */
if (regnum == VGA_PEL_IR || regnum == VGA_PEL_D)
regofs = 0xfff;
}
return vga_r(cinfo->regbase, regofs + regnum);
}
/*** AttrOn() - turn on VideoEnable for Attribute controller ***/
static void AttrOn(const struct cirrusfb_info *cinfo)
{
assert(cinfo != NULL);
if (vga_rcrt(cinfo->regbase, CL_CRT24) & 0x80) {
/* if we're just in "write value" mode, write back the */
/* same value as before to not modify anything */
vga_w(cinfo->regbase, VGA_ATT_IW,
vga_r(cinfo->regbase, VGA_ATT_R));
}
/* turn on video bit */
/* vga_w(cinfo->regbase, VGA_ATT_IW, 0x20); */
vga_w(cinfo->regbase, VGA_ATT_IW, 0x33);
/* dummy write on Reg0 to be on "write index" mode next time */
vga_w(cinfo->regbase, VGA_ATT_IW, 0x00);
}
/*** WHDR() - write into the Hidden DAC register ***/
/* as the HDR is the only extension register that requires special treatment
* (the other extension registers are accessible just like the "ordinary"
* registers of their functional group) here is a specialized routine for
* accessing the HDR
*/
static void WHDR(const struct cirrusfb_info *cinfo, unsigned char val)
{
unsigned char dummy;
if (is_laguna(cinfo))
return;
if (cinfo->btype == BT_PICASSO) {
/* Klaus' hint for correct access to HDR on some boards */
/* first write 0 to pixel mask (3c6) */
WGen(cinfo, VGA_PEL_MSK, 0x00);
udelay(200);
/* next read dummy from pixel address (3c8) */
dummy = RGen(cinfo, VGA_PEL_IW);
udelay(200);
}
/* now do the usual stuff to access the HDR */
dummy = RGen(cinfo, VGA_PEL_MSK);
udelay(200);
dummy = RGen(cinfo, VGA_PEL_MSK);
udelay(200);
dummy = RGen(cinfo, VGA_PEL_MSK);
udelay(200);
dummy = RGen(cinfo, VGA_PEL_MSK);
udelay(200);
WGen(cinfo, VGA_PEL_MSK, val);
udelay(200);
if (cinfo->btype == BT_PICASSO) {
/* now first reset HDR access counter */
dummy = RGen(cinfo, VGA_PEL_IW);
udelay(200);
/* and at the end, restore the mask value */
/* ## is this mask always 0xff? */
WGen(cinfo, VGA_PEL_MSK, 0xff);
udelay(200);
}
}
/*** WSFR() - write to the "special function register" (SFR) ***/
static void WSFR(struct cirrusfb_info *cinfo, unsigned char val)
{
#ifdef CONFIG_ZORRO
assert(cinfo->regbase != NULL);
cinfo->SFR = val;
z_writeb(val, cinfo->regbase + 0x8000);
#endif
}
/* The Picasso has a second register for switching the monitor bit */
static void WSFR2(struct cirrusfb_info *cinfo, unsigned char val)
{
#ifdef CONFIG_ZORRO
/* writing an arbitrary value to this one causes the monitor switcher */
/* to flip to Amiga display */
assert(cinfo->regbase != NULL);
cinfo->SFR = val;
z_writeb(val, cinfo->regbase + 0x9000);
#endif
}
/*** WClut - set CLUT entry (range: 0..63) ***/
static void WClut(struct cirrusfb_info *cinfo, unsigned char regnum, unsigned char red,
unsigned char green, unsigned char blue)
{
unsigned int data = VGA_PEL_D;
/* address write mode register is not translated.. */
vga_w(cinfo->regbase, VGA_PEL_IW, regnum);
if (cinfo->btype == BT_PICASSO || cinfo->btype == BT_PICASSO4 ||
cinfo->btype == BT_ALPINE || cinfo->btype == BT_GD5480 ||
cinfo->btype == BT_SD64 || is_laguna(cinfo)) {
/* but DAC data register IS, at least for Picasso II */
if (cinfo->btype == BT_PICASSO)
data += 0xfff;
vga_w(cinfo->regbase, data, red);
vga_w(cinfo->regbase, data, green);
vga_w(cinfo->regbase, data, blue);
} else {
vga_w(cinfo->regbase, data, blue);
vga_w(cinfo->regbase, data, green);
vga_w(cinfo->regbase, data, red);
}
}
#if 0
/*** RClut - read CLUT entry (range 0..63) ***/
static void RClut(struct cirrusfb_info *cinfo, unsigned char regnum, unsigned char *red,
unsigned char *green, unsigned char *blue)
{
unsigned int data = VGA_PEL_D;
vga_w(cinfo->regbase, VGA_PEL_IR, regnum);
if (cinfo->btype == BT_PICASSO || cinfo->btype == BT_PICASSO4 ||
cinfo->btype == BT_ALPINE || cinfo->btype == BT_GD5480) {
if (cinfo->btype == BT_PICASSO)
data += 0xfff;
*red = vga_r(cinfo->regbase, data);
*green = vga_r(cinfo->regbase, data);
*blue = vga_r(cinfo->regbase, data);
} else {
*blue = vga_r(cinfo->regbase, data);
*green = vga_r(cinfo->regbase, data);
*red = vga_r(cinfo->regbase, data);
}
}
#endif
/*******************************************************************
cirrusfb_WaitBLT()
Wait for the BitBLT engine to complete a possible earlier job
*********************************************************************/
/* FIXME: use interrupts instead */
static void cirrusfb_WaitBLT(u8 __iomem *regbase)
{
while (vga_rgfx(regbase, CL_GR31) & 0x08)
cpu_relax();
}
/*******************************************************************
cirrusfb_BitBLT()
perform accelerated "scrolling"
********************************************************************/
static void cirrusfb_set_blitter(u8 __iomem *regbase,
u_short nwidth, u_short nheight,
u_long nsrc, u_long ndest,
u_short bltmode, u_short line_length)
{
/* pitch: set to line_length */
/* dest pitch low */
vga_wgfx(regbase, CL_GR24, line_length & 0xff);
/* dest pitch hi */
vga_wgfx(regbase, CL_GR25, line_length >> 8);
/* source pitch low */
vga_wgfx(regbase, CL_GR26, line_length & 0xff);
/* source pitch hi */
vga_wgfx(regbase, CL_GR27, line_length >> 8);
/* BLT width: actual number of pixels - 1 */
/* BLT width low */
vga_wgfx(regbase, CL_GR20, nwidth & 0xff);
/* BLT width hi */
vga_wgfx(regbase, CL_GR21, nwidth >> 8);
/* BLT height: actual number of lines -1 */
/* BLT height low */
vga_wgfx(regbase, CL_GR22, nheight & 0xff);
/* BLT width hi */
vga_wgfx(regbase, CL_GR23, nheight >> 8);
/* BLT destination */
/* BLT dest low */
vga_wgfx(regbase, CL_GR28, (u_char) (ndest & 0xff));
/* BLT dest mid */
vga_wgfx(regbase, CL_GR29, (u_char) (ndest >> 8));
/* BLT dest hi */
vga_wgfx(regbase, CL_GR2A, (u_char) (ndest >> 16));
/* BLT source */
/* BLT src low */
vga_wgfx(regbase, CL_GR2C, (u_char) (nsrc & 0xff));
/* BLT src mid */
vga_wgfx(regbase, CL_GR2D, (u_char) (nsrc >> 8));
/* BLT src hi */
vga_wgfx(regbase, CL_GR2E, (u_char) (nsrc >> 16));
/* BLT mode */
vga_wgfx(regbase, CL_GR30, bltmode); /* BLT mode */
/* BLT ROP: SrcCopy */
vga_wgfx(regbase, CL_GR32, 0x0d); /* BLT ROP */
/* and finally: GO! */
vga_wgfx(regbase, CL_GR31, 0x02); /* BLT Start/status */
}
/*******************************************************************
cirrusfb_BitBLT()
perform accelerated "scrolling"
********************************************************************/
static void cirrusfb_BitBLT(u8 __iomem *regbase, int bits_per_pixel,
u_short curx, u_short cury,
u_short destx, u_short desty,
u_short width, u_short height,
u_short line_length)
{
u_short nwidth = width - 1;
u_short nheight = height - 1;
u_long nsrc, ndest;
u_char bltmode;
bltmode = 0x00;
/* if source adr < dest addr, do the Blt backwards */
if (cury <= desty) {
if (cury == desty) {
/* if src and dest are on the same line, check x */
if (curx < destx)
bltmode |= 0x01;
} else
bltmode |= 0x01;
}
/* standard case: forward blitting */
nsrc = (cury * line_length) + curx;
ndest = (desty * line_length) + destx;
if (bltmode) {
/* this means start addresses are at the end,
* counting backwards
*/
nsrc += nheight * line_length + nwidth;
ndest += nheight * line_length + nwidth;
}
cirrusfb_WaitBLT(regbase);
cirrusfb_set_blitter(regbase, nwidth, nheight,
nsrc, ndest, bltmode, line_length);
}
/*******************************************************************
cirrusfb_RectFill()
perform accelerated rectangle fill
********************************************************************/
static void cirrusfb_RectFill(u8 __iomem *regbase, int bits_per_pixel,
u_short x, u_short y, u_short width, u_short height,
u32 fg_color, u32 bg_color, u_short line_length,
u_char blitmode)
{
u_long ndest = (y * line_length) + x;
u_char op;
cirrusfb_WaitBLT(regbase);
/* This is a ColorExpand Blt, using the */
/* same color for foreground and background */
vga_wgfx(regbase, VGA_GFX_SR_VALUE, bg_color);
vga_wgfx(regbase, VGA_GFX_SR_ENABLE, fg_color);
op = 0x80;
if (bits_per_pixel >= 16) {
vga_wgfx(regbase, CL_GR10, bg_color >> 8);
vga_wgfx(regbase, CL_GR11, fg_color >> 8);
op = 0x90;
}
if (bits_per_pixel >= 24) {
vga_wgfx(regbase, CL_GR12, bg_color >> 16);
vga_wgfx(regbase, CL_GR13, fg_color >> 16);
op = 0xa0;
}
if (bits_per_pixel == 32) {
vga_wgfx(regbase, CL_GR14, bg_color >> 24);
vga_wgfx(regbase, CL_GR15, fg_color >> 24);
op = 0xb0;
}
cirrusfb_set_blitter(regbase, width - 1, height - 1,
0, ndest, op | blitmode, line_length);
}
/**************************************************************************
* bestclock() - determine closest possible clock lower(?) than the
* desired pixel clock
**************************************************************************/
static void bestclock(long freq, int *nom, int *den, int *div)
{
int n, d;
long h, diff;
assert(nom != NULL);
assert(den != NULL);
assert(div != NULL);
*nom = 0;
*den = 0;
*div = 0;
if (freq < 8000)
freq = 8000;
diff = freq;
for (n = 32; n < 128; n++) {
int s = 0;
d = (14318 * n) / freq;
if ((d >= 7) && (d <= 63)) {
int temp = d;
if (temp > 31) {
s = 1;
temp >>= 1;
}
h = ((14318 * n) / temp) >> s;
h = h > freq ? h - freq : freq - h;
if (h < diff) {
diff = h;
*nom = n;
*den = temp;
*div = s;
}
}
d++;
if ((d >= 7) && (d <= 63)) {
if (d > 31) {
s = 1;
d >>= 1;
}
h = ((14318 * n) / d) >> s;
h = h > freq ? h - freq : freq - h;
if (h < diff) {
diff = h;
*nom = n;
*den = d;
*div = s;
}
}
}
}
/* -------------------------------------------------------------------------
*
* debugging functions
*
* -------------------------------------------------------------------------
*/
#ifdef CIRRUSFB_DEBUG
/**
* cirrusfb_dbg_print_regs
* @base: If using newmmio, the newmmio base address, otherwise %NULL
* @reg_class: type of registers to read: %CRT, or %SEQ
*
* DESCRIPTION:
* Dumps the given list of VGA CRTC registers. If @base is %NULL,
* old-style I/O ports are queried for information, otherwise MMIO is
* used at the given @base address to query the information.
*/
static void cirrusfb_dbg_print_regs(struct fb_info *info,
caddr_t regbase,
enum cirrusfb_dbg_reg_class reg_class, ...)
{
va_list list;
unsigned char val = 0;
unsigned reg;
char *name;
va_start(list, reg_class);
name = va_arg(list, char *);
while (name != NULL) {
reg = va_arg(list, int);
switch (reg_class) {
case CRT:
val = vga_rcrt(regbase, (unsigned char) reg);
break;
case SEQ:
val = vga_rseq(regbase, (unsigned char) reg);
break;
default:
/* should never occur */
assert(false);
break;
}
dev_dbg(info->device, "%8s = 0x%02X\n", name, val);
name = va_arg(list, char *);
}
va_end(list);
}
/**
* cirrusfb_dbg_reg_dump
* @base: If using newmmio, the newmmio base address, otherwise %NULL
*
* DESCRIPTION:
* Dumps a list of interesting VGA and CIRRUSFB registers. If @base is %NULL,
* old-style I/O ports are queried for information, otherwise MMIO is
* used at the given @base address to query the information.
*/
static void cirrusfb_dbg_reg_dump(struct fb_info *info, caddr_t regbase)
{
dev_dbg(info->device, "VGA CRTC register dump:\n");
cirrusfb_dbg_print_regs(info, regbase, CRT,
"CR00", 0x00,
"CR01", 0x01,
"CR02", 0x02,
"CR03", 0x03,
"CR04", 0x04,
"CR05", 0x05,
"CR06", 0x06,
"CR07", 0x07,
"CR08", 0x08,
"CR09", 0x09,
"CR0A", 0x0A,
"CR0B", 0x0B,
"CR0C", 0x0C,
"CR0D", 0x0D,
"CR0E", 0x0E,
"CR0F", 0x0F,
"CR10", 0x10,
"CR11", 0x11,
"CR12", 0x12,
"CR13", 0x13,
"CR14", 0x14,
"CR15", 0x15,
"CR16", 0x16,
"CR17", 0x17,
"CR18", 0x18,
"CR22", 0x22,
"CR24", 0x24,
"CR26", 0x26,
"CR2D", 0x2D,
"CR2E", 0x2E,
"CR2F", 0x2F,
"CR30", 0x30,
"CR31", 0x31,
"CR32", 0x32,
"CR33", 0x33,
"CR34", 0x34,
"CR35", 0x35,
"CR36", 0x36,
"CR37", 0x37,
"CR38", 0x38,
"CR39", 0x39,
"CR3A", 0x3A,
"CR3B", 0x3B,
"CR3C", 0x3C,
"CR3D", 0x3D,
"CR3E", 0x3E,
"CR3F", 0x3F,
NULL);
dev_dbg(info->device, "\n");
dev_dbg(info->device, "VGA SEQ register dump:\n");
cirrusfb_dbg_print_regs(info, regbase, SEQ,
"SR00", 0x00,
"SR01", 0x01,
"SR02", 0x02,
"SR03", 0x03,
"SR04", 0x04,
"SR08", 0x08,
"SR09", 0x09,
"SR0A", 0x0A,
"SR0B", 0x0B,
"SR0D", 0x0D,
"SR10", 0x10,
"SR11", 0x11,
"SR12", 0x12,
"SR13", 0x13,
"SR14", 0x14,
"SR15", 0x15,
"SR16", 0x16,
"SR17", 0x17,
"SR18", 0x18,
"SR19", 0x19,
"SR1A", 0x1A,
"SR1B", 0x1B,
"SR1C", 0x1C,
"SR1D", 0x1D,
"SR1E", 0x1E,
"SR1F", 0x1F,
NULL);
dev_dbg(info->device, "\n");
}
#endif /* CIRRUSFB_DEBUG */
| gpl-2.0 |
mp3deviant721/boeffla-kernel-cm-bacon-mod | drivers/power/da9052-battery.c | 4938 | 15480 | /*
* Batttery Driver for Dialog DA9052 PMICs
*
* Copyright(c) 2011 Dialog Semiconductor Ltd.
*
* Author: David Dajun Chen <dchen@diasemi.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/fs.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/mfd/da9052/da9052.h>
#include <linux/mfd/da9052/pdata.h>
#include <linux/mfd/da9052/reg.h>
/* STATIC CONFIGURATION */
#define DA9052_BAT_CUTOFF_VOLT 2800
#define DA9052_BAT_TSH 62000
#define DA9052_BAT_LOW_CAP 4
#define DA9052_AVG_SZ 4
#define DA9052_VC_TBL_SZ 68
#define DA9052_VC_TBL_REF_SZ 3
#define DA9052_ISET_USB_MASK 0x0F
#define DA9052_CHG_USB_ILIM_MASK 0x40
#define DA9052_CHG_LIM_COLS 16
#define DA9052_MEAN(x, y) ((x + y) / 2)
enum charger_type_enum {
DA9052_NOCHARGER = 1,
DA9052_CHARGER,
};
static const u16 da9052_chg_current_lim[2][DA9052_CHG_LIM_COLS] = {
{70, 80, 90, 100, 110, 120, 400, 450,
500, 550, 600, 650, 700, 900, 1100, 1300},
{80, 90, 100, 110, 120, 400, 450, 500,
550, 600, 800, 1000, 1200, 1400, 1600, 1800},
};
static const u16 vc_tbl_ref[3] = {10, 25, 40};
/* Lookup table for voltage vs capacity */
static u32 const vc_tbl[3][68][2] = {
/* For temperature 10 degree Celsius */
{
{4082, 100}, {4036, 98},
{4020, 96}, {4008, 95},
{3997, 93}, {3983, 91},
{3964, 90}, {3943, 88},
{3926, 87}, {3912, 85},
{3900, 84}, {3890, 82},
{3881, 80}, {3873, 79},
{3865, 77}, {3857, 76},
{3848, 74}, {3839, 73},
{3829, 71}, {3820, 70},
{3811, 68}, {3802, 67},
{3794, 65}, {3785, 64},
{3778, 62}, {3770, 61},
{3763, 59}, {3756, 58},
{3750, 56}, {3744, 55},
{3738, 53}, {3732, 52},
{3727, 50}, {3722, 49},
{3717, 47}, {3712, 46},
{3708, 44}, {3703, 43},
{3700, 41}, {3696, 40},
{3693, 38}, {3691, 37},
{3688, 35}, {3686, 34},
{3683, 32}, {3681, 31},
{3678, 29}, {3675, 28},
{3672, 26}, {3669, 25},
{3665, 23}, {3661, 22},
{3656, 21}, {3651, 19},
{3645, 18}, {3639, 16},
{3631, 15}, {3622, 13},
{3611, 12}, {3600, 10},
{3587, 9}, {3572, 7},
{3548, 6}, {3503, 5},
{3420, 3}, {3268, 2},
{2992, 1}, {2746, 0}
},
/* For temperature 25 degree Celsius */
{
{4102, 100}, {4065, 98},
{4048, 96}, {4034, 95},
{4021, 93}, {4011, 92},
{4001, 90}, {3986, 88},
{3968, 87}, {3952, 85},
{3938, 84}, {3926, 82},
{3916, 81}, {3908, 79},
{3900, 77}, {3892, 76},
{3883, 74}, {3874, 73},
{3864, 71}, {3855, 70},
{3846, 68}, {3836, 67},
{3827, 65}, {3819, 64},
{3810, 62}, {3801, 61},
{3793, 59}, {3786, 58},
{3778, 56}, {3772, 55},
{3765, 53}, {3759, 52},
{3754, 50}, {3748, 49},
{3743, 47}, {3738, 46},
{3733, 44}, {3728, 43},
{3724, 41}, {3720, 40},
{3716, 38}, {3712, 37},
{3709, 35}, {3706, 34},
{3703, 33}, {3701, 31},
{3698, 30}, {3696, 28},
{3693, 27}, {3690, 25},
{3687, 24}, {3683, 22},
{3680, 21}, {3675, 19},
{3671, 18}, {3666, 17},
{3660, 15}, {3654, 14},
{3647, 12}, {3639, 11},
{3630, 9}, {3621, 8},
{3613, 6}, {3606, 5},
{3597, 4}, {3582, 2},
{3546, 1}, {2747, 0}
},
/* For temperature 40 degree Celsius */
{
{4114, 100}, {4081, 98},
{4065, 96}, {4050, 95},
{4036, 93}, {4024, 92},
{4013, 90}, {4002, 88},
{3990, 87}, {3976, 85},
{3962, 84}, {3950, 82},
{3939, 81}, {3930, 79},
{3921, 77}, {3912, 76},
{3902, 74}, {3893, 73},
{3883, 71}, {3874, 70},
{3865, 68}, {3856, 67},
{3847, 65}, {3838, 64},
{3829, 62}, {3820, 61},
{3812, 59}, {3803, 58},
{3795, 56}, {3787, 55},
{3780, 53}, {3773, 52},
{3767, 50}, {3761, 49},
{3756, 47}, {3751, 46},
{3746, 44}, {3741, 43},
{3736, 41}, {3732, 40},
{3728, 38}, {3724, 37},
{3720, 35}, {3716, 34},
{3713, 33}, {3710, 31},
{3707, 30}, {3704, 28},
{3701, 27}, {3698, 25},
{3695, 24}, {3691, 22},
{3686, 21}, {3681, 19},
{3676, 18}, {3671, 17},
{3666, 15}, {3661, 14},
{3655, 12}, {3648, 11},
{3640, 9}, {3632, 8},
{3622, 6}, {3616, 5},
{3611, 4}, {3604, 2},
{3594, 1}, {2747, 0}
}
};
struct da9052_battery {
struct da9052 *da9052;
struct power_supply psy;
struct notifier_block nb;
int charger_type;
int status;
int health;
};
static inline int volt_reg_to_mV(int value)
{
return ((value * 1000) / 512) + 2500;
}
static inline int ichg_reg_to_mA(int value)
{
return (value * 3900) / 1000;
}
static int da9052_read_chgend_current(struct da9052_battery *bat,
int *current_mA)
{
int ret;
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
return -EINVAL;
ret = da9052_reg_read(bat->da9052, DA9052_ICHG_END_REG);
if (ret < 0)
return ret;
*current_mA = ichg_reg_to_mA(ret & DA9052_ICHGEND_ICHGEND);
return 0;
}
static int da9052_read_chg_current(struct da9052_battery *bat, int *current_mA)
{
int ret;
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
return -EINVAL;
ret = da9052_reg_read(bat->da9052, DA9052_ICHG_AV_REG);
if (ret < 0)
return ret;
*current_mA = ichg_reg_to_mA(ret & DA9052_ICHGAV_ICHGAV);
return 0;
}
static int da9052_bat_check_status(struct da9052_battery *bat, int *status)
{
u8 v[2] = {0, 0};
u8 bat_status;
u8 chg_end;
int ret;
int chg_current;
int chg_end_current;
bool dcinsel;
bool dcindet;
bool vbussel;
bool vbusdet;
bool dc;
bool vbus;
ret = da9052_group_read(bat->da9052, DA9052_STATUS_A_REG, 2, v);
if (ret < 0)
return ret;
bat_status = v[0];
chg_end = v[1];
dcinsel = bat_status & DA9052_STATUSA_DCINSEL;
dcindet = bat_status & DA9052_STATUSA_DCINDET;
vbussel = bat_status & DA9052_STATUSA_VBUSSEL;
vbusdet = bat_status & DA9052_STATUSA_VBUSDET;
dc = dcinsel && dcindet;
vbus = vbussel && vbusdet;
/* Preference to WALL(DCIN) charger unit */
if (dc || vbus) {
bat->charger_type = DA9052_CHARGER;
/* If charging end flag is set and Charging current is greater
* than charging end limit then battery is charging
*/
if ((chg_end & DA9052_STATUSB_CHGEND) != 0) {
ret = da9052_read_chg_current(bat, &chg_current);
if (ret < 0)
return ret;
ret = da9052_read_chgend_current(bat, &chg_end_current);
if (ret < 0)
return ret;
if (chg_current >= chg_end_current)
bat->status = POWER_SUPPLY_STATUS_CHARGING;
else
bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
} else {
/* If Charging end flag is cleared then battery is
* charging
*/
bat->status = POWER_SUPPLY_STATUS_CHARGING;
}
} else if (dcindet || vbusdet) {
bat->charger_type = DA9052_CHARGER;
bat->status = POWER_SUPPLY_STATUS_NOT_CHARGING;
} else {
bat->charger_type = DA9052_NOCHARGER;
bat->status = POWER_SUPPLY_STATUS_DISCHARGING;
}
if (status != NULL)
*status = bat->status;
return 0;
}
static int da9052_bat_read_volt(struct da9052_battery *bat, int *volt_mV)
{
int volt;
volt = da9052_adc_manual_read(bat->da9052, DA9052_ADC_MAN_MUXSEL_VBAT);
if (volt < 0)
return volt;
*volt_mV = volt_reg_to_mV(volt);
return 0;
}
static int da9052_bat_check_presence(struct da9052_battery *bat, int *illegal)
{
int bat_temp;
bat_temp = da9052_adc_read_temp(bat->da9052);
if (bat_temp < 0)
return bat_temp;
if (bat_temp > DA9052_BAT_TSH)
*illegal = 1;
else
*illegal = 0;
return 0;
}
static int da9052_bat_interpolate(int vbat_lower, int vbat_upper,
int level_lower, int level_upper,
int bat_voltage)
{
int tmp;
tmp = ((level_upper - level_lower) * 1000) / (vbat_upper - vbat_lower);
tmp = level_lower + (((bat_voltage - vbat_lower) * tmp) / 1000);
return tmp;
}
unsigned char da9052_determine_vc_tbl_index(unsigned char adc_temp)
{
int i;
if (adc_temp <= vc_tbl_ref[0])
return 0;
if (adc_temp > vc_tbl_ref[DA9052_VC_TBL_REF_SZ - 1])
return DA9052_VC_TBL_REF_SZ - 1;
for (i = 0; i < DA9052_VC_TBL_REF_SZ; i++) {
if ((adc_temp > vc_tbl_ref[i]) &&
(adc_temp <= DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1])))
return i;
if ((adc_temp > DA9052_MEAN(vc_tbl_ref[i], vc_tbl_ref[i + 1]))
&& (adc_temp <= vc_tbl_ref[i]))
return i + 1;
}
}
static int da9052_bat_read_capacity(struct da9052_battery *bat, int *capacity)
{
int adc_temp;
int bat_voltage;
int vbat_lower;
int vbat_upper;
int level_upper;
int level_lower;
int ret;
int flag;
int i = 0;
int j;
ret = da9052_bat_read_volt(bat, &bat_voltage);
if (ret < 0)
return ret;
adc_temp = da9052_adc_read_temp(bat->da9052);
if (adc_temp < 0)
return adc_temp;
i = da9052_determine_vc_tbl_index(adc_temp);
if (bat_voltage >= vc_tbl[i][0][0]) {
*capacity = 100;
return 0;
}
if (bat_voltage <= vc_tbl[i][DA9052_VC_TBL_SZ - 1][0]) {
*capacity = 0;
return 0;
}
flag = 0;
for (j = 0; j < (DA9052_VC_TBL_SZ-1); j++) {
if ((bat_voltage <= vc_tbl[i][j][0]) &&
(bat_voltage >= vc_tbl[i][j + 1][0])) {
vbat_upper = vc_tbl[i][j][0];
vbat_lower = vc_tbl[i][j + 1][0];
level_upper = vc_tbl[i][j][1];
level_lower = vc_tbl[i][j + 1][1];
flag = 1;
break;
}
}
if (!flag)
return -EIO;
*capacity = da9052_bat_interpolate(vbat_lower, vbat_upper, level_lower,
level_upper, bat_voltage);
return 0;
}
static int da9052_bat_check_health(struct da9052_battery *bat, int *health)
{
int ret;
int bat_illegal;
int capacity;
ret = da9052_bat_check_presence(bat, &bat_illegal);
if (ret < 0)
return ret;
if (bat_illegal) {
bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
return 0;
}
if (bat->health != POWER_SUPPLY_HEALTH_OVERHEAT) {
ret = da9052_bat_read_capacity(bat, &capacity);
if (ret < 0)
return ret;
if (capacity < DA9052_BAT_LOW_CAP)
bat->health = POWER_SUPPLY_HEALTH_DEAD;
else
bat->health = POWER_SUPPLY_HEALTH_GOOD;
}
*health = bat->health;
return 0;
}
static irqreturn_t da9052_bat_irq(int irq, void *data)
{
struct da9052_battery *bat = data;
irq -= bat->da9052->irq_base;
if (irq == DA9052_IRQ_CHGEND)
bat->status = POWER_SUPPLY_STATUS_FULL;
else
da9052_bat_check_status(bat, NULL);
if (irq == DA9052_IRQ_CHGEND || irq == DA9052_IRQ_DCIN ||
irq == DA9052_IRQ_VBUS || irq == DA9052_IRQ_TBAT) {
power_supply_changed(&bat->psy);
}
return IRQ_HANDLED;
}
static int da9052_USB_current_notifier(struct notifier_block *nb,
unsigned long events, void *data)
{
u8 row;
u8 col;
int *current_mA = data;
int ret;
struct da9052_battery *bat = container_of(nb, struct da9052_battery,
nb);
if (bat->status == POWER_SUPPLY_STATUS_DISCHARGING)
return -EPERM;
ret = da9052_reg_read(bat->da9052, DA9052_CHGBUCK_REG);
if (ret & DA9052_CHG_USB_ILIM_MASK)
return -EPERM;
if (bat->da9052->chip_id == DA9052)
row = 0;
else
row = 1;
if (*current_mA < da9052_chg_current_lim[row][0] ||
*current_mA > da9052_chg_current_lim[row][DA9052_CHG_LIM_COLS - 1])
return -EINVAL;
for (col = 0; col <= DA9052_CHG_LIM_COLS - 1 ; col++) {
if (*current_mA <= da9052_chg_current_lim[row][col])
break;
}
return da9052_reg_update(bat->da9052, DA9052_ISET_REG,
DA9052_ISET_USB_MASK, col);
}
static int da9052_bat_get_property(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
int ret;
int illegal;
struct da9052_battery *bat = container_of(psy, struct da9052_battery,
psy);
ret = da9052_bat_check_presence(bat, &illegal);
if (ret < 0)
return ret;
if (illegal && psp != POWER_SUPPLY_PROP_PRESENT)
return -ENODEV;
switch (psp) {
case POWER_SUPPLY_PROP_STATUS:
ret = da9052_bat_check_status(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_ONLINE:
val->intval =
(bat->charger_type == DA9052_NOCHARGER) ? 0 : 1;
break;
case POWER_SUPPLY_PROP_PRESENT:
ret = da9052_bat_check_presence(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_HEALTH:
ret = da9052_bat_check_health(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
val->intval = DA9052_BAT_CUTOFF_VOLT * 1000;
break;
case POWER_SUPPLY_PROP_VOLTAGE_AVG:
ret = da9052_bat_read_volt(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_CURRENT_AVG:
ret = da9052_read_chg_current(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_CAPACITY:
ret = da9052_bat_read_capacity(bat, &val->intval);
break;
case POWER_SUPPLY_PROP_TEMP:
val->intval = da9052_adc_read_temp(bat->da9052);
ret = val->intval;
break;
case POWER_SUPPLY_PROP_TECHNOLOGY:
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
default:
return -EINVAL;
}
return ret;
}
static enum power_supply_property da9052_bat_props[] = {
POWER_SUPPLY_PROP_STATUS,
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_PRESENT,
POWER_SUPPLY_PROP_HEALTH,
POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
POWER_SUPPLY_PROP_VOLTAGE_AVG,
POWER_SUPPLY_PROP_CURRENT_AVG,
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_TEMP,
POWER_SUPPLY_PROP_TECHNOLOGY,
};
static struct power_supply template_battery = {
.name = "da9052-bat",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = da9052_bat_props,
.num_properties = ARRAY_SIZE(da9052_bat_props),
.get_property = da9052_bat_get_property,
};
static const char *const da9052_bat_irqs[] = {
"BATT TEMP",
"DCIN DET",
"DCIN REM",
"VBUS DET",
"VBUS REM",
"CHG END",
};
static s32 __devinit da9052_bat_probe(struct platform_device *pdev)
{
struct da9052_pdata *pdata;
struct da9052_battery *bat;
int ret;
int irq;
int i;
bat = kzalloc(sizeof(struct da9052_battery), GFP_KERNEL);
if (!bat)
return -ENOMEM;
bat->da9052 = dev_get_drvdata(pdev->dev.parent);
bat->psy = template_battery;
bat->charger_type = DA9052_NOCHARGER;
bat->status = POWER_SUPPLY_STATUS_UNKNOWN;
bat->health = POWER_SUPPLY_HEALTH_UNKNOWN;
bat->nb.notifier_call = da9052_USB_current_notifier;
pdata = bat->da9052->dev->platform_data;
if (pdata != NULL && pdata->use_for_apm)
bat->psy.use_for_apm = pdata->use_for_apm;
else
bat->psy.use_for_apm = 1;
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
ret = request_threaded_irq(bat->da9052->irq_base + irq,
NULL, da9052_bat_irq,
IRQF_TRIGGER_LOW | IRQF_ONESHOT,
da9052_bat_irqs[i], bat);
if (ret != 0) {
dev_err(bat->da9052->dev,
"DA9052 failed to request %s IRQ %d: %d\n",
da9052_bat_irqs[i], irq, ret);
goto err;
}
}
ret = power_supply_register(&pdev->dev, &bat->psy);
if (ret)
goto err;
platform_set_drvdata(pdev, bat);
return 0;
err:
for (; i >= 0; i--) {
irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
free_irq(bat->da9052->irq_base + irq, bat);
}
kfree(bat);
return ret;
}
static int __devexit da9052_bat_remove(struct platform_device *pdev)
{
int i;
int irq;
struct da9052_battery *bat = platform_get_drvdata(pdev);
for (i = 0; i < ARRAY_SIZE(da9052_bat_irqs); i++) {
irq = platform_get_irq_byname(pdev, da9052_bat_irqs[i]);
free_irq(bat->da9052->irq_base + irq, bat);
}
power_supply_unregister(&bat->psy);
kfree(bat);
return 0;
}
static struct platform_driver da9052_bat_driver = {
.probe = da9052_bat_probe,
.remove = __devexit_p(da9052_bat_remove),
.driver = {
.name = "da9052-bat",
.owner = THIS_MODULE,
},
};
module_platform_driver(da9052_bat_driver);
MODULE_DESCRIPTION("DA9052 BAT Device Driver");
MODULE_AUTHOR("David Dajun Chen <dchen@diasemi.com>");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:da9052-bat");
| gpl-2.0 |
micchie/mptcp | sound/soc/omap/zoom2.c | 4938 | 5957 | /*
* zoom2.c -- SoC audio for Zoom2
*
* Author: Misael Lopez Cruz <x0052729@ti.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include <mach/hardware.h>
#include <mach/gpio.h>
#include <mach/board-zoom.h>
#include <plat/mcbsp.h>
/* Register descriptions for twl4030 codec part */
#include <linux/mfd/twl4030-audio.h>
#include <linux/module.h>
#include "omap-mcbsp.h"
#include "omap-pcm.h"
#define ZOOM2_HEADSET_MUX_GPIO (OMAP_MAX_GPIO_LINES + 15)
static int zoom2_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
int ret;
/* Set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000,
SND_SOC_CLOCK_IN);
if (ret < 0) {
printk(KERN_ERR "can't set codec system clock\n");
return ret;
}
return 0;
}
static struct snd_soc_ops zoom2_ops = {
.hw_params = zoom2_hw_params,
};
/* Zoom2 machine DAPM */
static const struct snd_soc_dapm_widget zoom2_twl4030_dapm_widgets[] = {
SND_SOC_DAPM_MIC("Ext Mic", NULL),
SND_SOC_DAPM_SPK("Ext Spk", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_HP("Headset Stereophone", NULL),
SND_SOC_DAPM_LINE("Aux In", NULL),
};
static const struct snd_soc_dapm_route audio_map[] = {
/* External Mics: MAINMIC, SUBMIC with bias*/
{"MAINMIC", NULL, "Mic Bias 1"},
{"SUBMIC", NULL, "Mic Bias 2"},
{"Mic Bias 1", NULL, "Ext Mic"},
{"Mic Bias 2", NULL, "Ext Mic"},
/* External Speakers: HFL, HFR */
{"Ext Spk", NULL, "HFL"},
{"Ext Spk", NULL, "HFR"},
/* Headset Stereophone: HSOL, HSOR */
{"Headset Stereophone", NULL, "HSOL"},
{"Headset Stereophone", NULL, "HSOR"},
/* Headset Mic: HSMIC with bias */
{"HSMIC", NULL, "Headset Mic Bias"},
{"Headset Mic Bias", NULL, "Headset Mic"},
/* Aux In: AUXL, AUXR */
{"Aux In", NULL, "AUXL"},
{"Aux In", NULL, "AUXR"},
};
static int zoom2_twl4030_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
/* TWL4030 not connected pins */
snd_soc_dapm_nc_pin(dapm, "CARKITMIC");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC0");
snd_soc_dapm_nc_pin(dapm, "DIGIMIC1");
snd_soc_dapm_nc_pin(dapm, "EARPIECE");
snd_soc_dapm_nc_pin(dapm, "PREDRIVEL");
snd_soc_dapm_nc_pin(dapm, "PREDRIVER");
snd_soc_dapm_nc_pin(dapm, "CARKITL");
snd_soc_dapm_nc_pin(dapm, "CARKITR");
return 0;
}
static int zoom2_twl4030_voice_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
unsigned short reg;
/* Enable voice interface */
reg = codec->driver->read(codec, TWL4030_REG_VOICE_IF);
reg |= TWL4030_VIF_DIN_EN | TWL4030_VIF_DOUT_EN | TWL4030_VIF_EN;
codec->driver->write(codec, TWL4030_REG_VOICE_IF, reg);
return 0;
}
/* Digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link zoom2_dai[] = {
{
.name = "TWL4030 I2S",
.stream_name = "TWL4030 Audio",
.cpu_dai_name = "omap-mcbsp.2",
.codec_dai_name = "twl4030-hifi",
.platform_name = "omap-pcm-audio",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = zoom2_twl4030_init,
.ops = &zoom2_ops,
},
{
.name = "TWL4030 PCM",
.stream_name = "TWL4030 Voice",
.cpu_dai_name = "omap-mcbsp.3",
.codec_dai_name = "twl4030-voice",
.platform_name = "omap-pcm-audio",
.codec_name = "twl4030-codec",
.dai_fmt = SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_NF |
SND_SOC_DAIFMT_CBM_CFM,
.init = zoom2_twl4030_voice_init,
.ops = &zoom2_ops,
},
};
/* Audio machine driver */
static struct snd_soc_card snd_soc_zoom2 = {
.name = "Zoom2",
.owner = THIS_MODULE,
.dai_link = zoom2_dai,
.num_links = ARRAY_SIZE(zoom2_dai),
.dapm_widgets = zoom2_twl4030_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(zoom2_twl4030_dapm_widgets),
.dapm_routes = audio_map,
.num_dapm_routes = ARRAY_SIZE(audio_map),
};
static struct platform_device *zoom2_snd_device;
static int __init zoom2_soc_init(void)
{
int ret;
if (!machine_is_omap_zoom2())
return -ENODEV;
printk(KERN_INFO "Zoom2 SoC init\n");
zoom2_snd_device = platform_device_alloc("soc-audio", -1);
if (!zoom2_snd_device) {
printk(KERN_ERR "Platform device allocation failed\n");
return -ENOMEM;
}
platform_set_drvdata(zoom2_snd_device, &snd_soc_zoom2);
ret = platform_device_add(zoom2_snd_device);
if (ret)
goto err1;
BUG_ON(gpio_request(ZOOM2_HEADSET_MUX_GPIO, "hs_mux") < 0);
gpio_direction_output(ZOOM2_HEADSET_MUX_GPIO, 0);
BUG_ON(gpio_request(ZOOM2_HEADSET_EXTMUTE_GPIO, "ext_mute") < 0);
gpio_direction_output(ZOOM2_HEADSET_EXTMUTE_GPIO, 0);
return 0;
err1:
printk(KERN_ERR "Unable to add platform device\n");
platform_device_put(zoom2_snd_device);
return ret;
}
module_init(zoom2_soc_init);
static void __exit zoom2_soc_exit(void)
{
gpio_free(ZOOM2_HEADSET_MUX_GPIO);
gpio_free(ZOOM2_HEADSET_EXTMUTE_GPIO);
platform_device_unregister(zoom2_snd_device);
}
module_exit(zoom2_soc_exit);
MODULE_AUTHOR("Misael Lopez Cruz <x0052729@ti.com>");
MODULE_DESCRIPTION("ALSA SoC Zoom2");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ztemt/NX507J_Lollipop_kernel | drivers/media/video/saa7110.c | 7242 | 14039 | /*
* saa7110 - Philips SAA7110(A) video decoder driver
*
* Copyright (C) 1998 Pauline Middelink <middelin@polyware.nl>
*
* Copyright (C) 1999 Wolfgang Scherr <scherr@net4you.net>
* Copyright (C) 2000 Serguei Miridonov <mirsev@cicese.mx>
* - some corrections for Pinnacle Systems Inc. DC10plus card.
*
* Changes by Ronald Bultje <rbultje@ronald.bitfreak.net>
* - moved over to linux>=2.4.x i2c protocol (1/1/2003)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("Philips SAA7110 video decoder driver");
MODULE_AUTHOR("Pauline Middelink");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
#define SAA7110_MAX_INPUT 9 /* 6 CVBS, 3 SVHS */
#define SAA7110_MAX_OUTPUT 1 /* 1 YUV */
#define SAA7110_NR_REG 0x35
struct saa7110 {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
u8 reg[SAA7110_NR_REG];
v4l2_std_id norm;
int input;
int enable;
wait_queue_head_t wq;
};
static inline struct saa7110 *to_saa7110(struct v4l2_subdev *sd)
{
return container_of(sd, struct saa7110, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct saa7110, hdl)->sd;
}
/* ----------------------------------------------------------------------- */
/* I2C support functions */
/* ----------------------------------------------------------------------- */
static int saa7110_write(struct v4l2_subdev *sd, u8 reg, u8 value)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct saa7110 *decoder = to_saa7110(sd);
decoder->reg[reg] = value;
return i2c_smbus_write_byte_data(client, reg, value);
}
static int saa7110_write_block(struct v4l2_subdev *sd, const u8 *data, unsigned int len)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct saa7110 *decoder = to_saa7110(sd);
int ret = -1;
u8 reg = *data; /* first register to write to */
/* Sanity check */
if (reg + (len - 1) > SAA7110_NR_REG)
return ret;
/* the saa7110 has an autoincrement function, use it if
* the adapter understands raw I2C */
if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
ret = i2c_master_send(client, data, len);
/* Cache the written data */
memcpy(decoder->reg + reg, data + 1, len - 1);
} else {
for (++data, --len; len; len--) {
ret = saa7110_write(sd, reg++, *data++);
if (ret < 0)
break;
}
}
return ret;
}
static inline int saa7110_read(struct v4l2_subdev *sd)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return i2c_smbus_read_byte(client);
}
/* ----------------------------------------------------------------------- */
/* SAA7110 functions */
/* ----------------------------------------------------------------------- */
#define FRESP_06H_COMPST 0x03 /*0x13*/
#define FRESP_06H_SVIDEO 0x83 /*0xC0*/
static int saa7110_selmux(struct v4l2_subdev *sd, int chan)
{
static const unsigned char modes[9][8] = {
/* mode 0 */
{FRESP_06H_COMPST, 0xD9, 0x17, 0x40, 0x03,
0x44, 0x75, 0x16},
/* mode 1 */
{FRESP_06H_COMPST, 0xD8, 0x17, 0x40, 0x03,
0x44, 0x75, 0x16},
/* mode 2 */
{FRESP_06H_COMPST, 0xBA, 0x07, 0x91, 0x03,
0x60, 0xB5, 0x05},
/* mode 3 */
{FRESP_06H_COMPST, 0xB8, 0x07, 0x91, 0x03,
0x60, 0xB5, 0x05},
/* mode 4 */
{FRESP_06H_COMPST, 0x7C, 0x07, 0xD2, 0x83,
0x60, 0xB5, 0x03},
/* mode 5 */
{FRESP_06H_COMPST, 0x78, 0x07, 0xD2, 0x83,
0x60, 0xB5, 0x03},
/* mode 6 */
{FRESP_06H_SVIDEO, 0x59, 0x17, 0x42, 0xA3,
0x44, 0x75, 0x12},
/* mode 7 */
{FRESP_06H_SVIDEO, 0x9A, 0x17, 0xB1, 0x13,
0x60, 0xB5, 0x14},
/* mode 8 */
{FRESP_06H_SVIDEO, 0x3C, 0x27, 0xC1, 0x23,
0x44, 0x75, 0x21}
};
struct saa7110 *decoder = to_saa7110(sd);
const unsigned char *ptr = modes[chan];
saa7110_write(sd, 0x06, ptr[0]); /* Luminance control */
saa7110_write(sd, 0x20, ptr[1]); /* Analog Control #1 */
saa7110_write(sd, 0x21, ptr[2]); /* Analog Control #2 */
saa7110_write(sd, 0x22, ptr[3]); /* Mixer Control #1 */
saa7110_write(sd, 0x2C, ptr[4]); /* Mixer Control #2 */
saa7110_write(sd, 0x30, ptr[5]); /* ADCs gain control */
saa7110_write(sd, 0x31, ptr[6]); /* Mixer Control #3 */
saa7110_write(sd, 0x21, ptr[7]); /* Analog Control #2 */
decoder->input = chan;
return 0;
}
static const unsigned char initseq[1 + SAA7110_NR_REG] = {
0, 0x4C, 0x3C, 0x0D, 0xEF, 0xBD, 0xF2, 0x03, 0x00,
/* 0x08 */ 0xF8, 0xF8, 0x60, 0x60, 0x00, 0x86, 0x18, 0x90,
/* 0x10 */ 0x00, 0x59, 0x40, 0x46, 0x42, 0x1A, 0xFF, 0xDA,
/* 0x18 */ 0xF2, 0x8B, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
/* 0x20 */ 0xD9, 0x16, 0x40, 0x41, 0x80, 0x41, 0x80, 0x4F,
/* 0x28 */ 0xFE, 0x01, 0xCF, 0x0F, 0x03, 0x01, 0x03, 0x0C,
/* 0x30 */ 0x44, 0x71, 0x02, 0x8C, 0x02
};
static v4l2_std_id determine_norm(struct v4l2_subdev *sd)
{
DEFINE_WAIT(wait);
struct saa7110 *decoder = to_saa7110(sd);
int status;
/* mode changed, start automatic detection */
saa7110_write_block(sd, initseq, sizeof(initseq));
saa7110_selmux(sd, decoder->input);
prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(250));
finish_wait(&decoder->wq, &wait);
status = saa7110_read(sd);
if (status & 0x40) {
v4l2_dbg(1, debug, sd, "status=0x%02x (no signal)\n", status);
return decoder->norm; /* no change*/
}
if ((status & 3) == 0) {
saa7110_write(sd, 0x06, 0x83);
if (status & 0x20) {
v4l2_dbg(1, debug, sd, "status=0x%02x (NTSC/no color)\n", status);
/*saa7110_write(sd,0x2E,0x81);*/
return V4L2_STD_NTSC;
}
v4l2_dbg(1, debug, sd, "status=0x%02x (PAL/no color)\n", status);
/*saa7110_write(sd,0x2E,0x9A);*/
return V4L2_STD_PAL;
}
/*saa7110_write(sd,0x06,0x03);*/
if (status & 0x20) { /* 60Hz */
v4l2_dbg(1, debug, sd, "status=0x%02x (NTSC)\n", status);
saa7110_write(sd, 0x0D, 0x86);
saa7110_write(sd, 0x0F, 0x50);
saa7110_write(sd, 0x11, 0x2C);
/*saa7110_write(sd,0x2E,0x81);*/
return V4L2_STD_NTSC;
}
/* 50Hz -> PAL/SECAM */
saa7110_write(sd, 0x0D, 0x86);
saa7110_write(sd, 0x0F, 0x10);
saa7110_write(sd, 0x11, 0x59);
/*saa7110_write(sd,0x2E,0x9A);*/
prepare_to_wait(&decoder->wq, &wait, TASK_UNINTERRUPTIBLE);
schedule_timeout(msecs_to_jiffies(250));
finish_wait(&decoder->wq, &wait);
status = saa7110_read(sd);
if ((status & 0x03) == 0x01) {
v4l2_dbg(1, debug, sd, "status=0x%02x (SECAM)\n", status);
saa7110_write(sd, 0x0D, 0x87);
return V4L2_STD_SECAM;
}
v4l2_dbg(1, debug, sd, "status=0x%02x (PAL)\n", status);
return V4L2_STD_PAL;
}
static int saa7110_g_input_status(struct v4l2_subdev *sd, u32 *pstatus)
{
struct saa7110 *decoder = to_saa7110(sd);
int res = V4L2_IN_ST_NO_SIGNAL;
int status = saa7110_read(sd);
v4l2_dbg(1, debug, sd, "status=0x%02x norm=%llx\n",
status, (unsigned long long)decoder->norm);
if (!(status & 0x40))
res = 0;
if (!(status & 0x03))
res |= V4L2_IN_ST_NO_COLOR;
*pstatus = res;
return 0;
}
static int saa7110_querystd(struct v4l2_subdev *sd, v4l2_std_id *std)
{
*(v4l2_std_id *)std = determine_norm(sd);
return 0;
}
static int saa7110_s_std(struct v4l2_subdev *sd, v4l2_std_id std)
{
struct saa7110 *decoder = to_saa7110(sd);
if (decoder->norm != std) {
decoder->norm = std;
/*saa7110_write(sd, 0x06, 0x03);*/
if (std & V4L2_STD_NTSC) {
saa7110_write(sd, 0x0D, 0x86);
saa7110_write(sd, 0x0F, 0x50);
saa7110_write(sd, 0x11, 0x2C);
/*saa7110_write(sd, 0x2E, 0x81);*/
v4l2_dbg(1, debug, sd, "switched to NTSC\n");
} else if (std & V4L2_STD_PAL) {
saa7110_write(sd, 0x0D, 0x86);
saa7110_write(sd, 0x0F, 0x10);
saa7110_write(sd, 0x11, 0x59);
/*saa7110_write(sd, 0x2E, 0x9A);*/
v4l2_dbg(1, debug, sd, "switched to PAL\n");
} else if (std & V4L2_STD_SECAM) {
saa7110_write(sd, 0x0D, 0x87);
saa7110_write(sd, 0x0F, 0x10);
saa7110_write(sd, 0x11, 0x59);
/*saa7110_write(sd, 0x2E, 0x9A);*/
v4l2_dbg(1, debug, sd, "switched to SECAM\n");
} else {
return -EINVAL;
}
}
return 0;
}
static int saa7110_s_routing(struct v4l2_subdev *sd,
u32 input, u32 output, u32 config)
{
struct saa7110 *decoder = to_saa7110(sd);
if (input >= SAA7110_MAX_INPUT) {
v4l2_dbg(1, debug, sd, "input=%d not available\n", input);
return -EINVAL;
}
if (decoder->input != input) {
saa7110_selmux(sd, input);
v4l2_dbg(1, debug, sd, "switched to input=%d\n", input);
}
return 0;
}
static int saa7110_s_stream(struct v4l2_subdev *sd, int enable)
{
struct saa7110 *decoder = to_saa7110(sd);
if (decoder->enable != enable) {
decoder->enable = enable;
saa7110_write(sd, 0x0E, enable ? 0x18 : 0x80);
v4l2_dbg(1, debug, sd, "YUV %s\n", enable ? "on" : "off");
}
return 0;
}
static int saa7110_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
switch (ctrl->id) {
case V4L2_CID_BRIGHTNESS:
saa7110_write(sd, 0x19, ctrl->val);
break;
case V4L2_CID_CONTRAST:
saa7110_write(sd, 0x13, ctrl->val);
break;
case V4L2_CID_SATURATION:
saa7110_write(sd, 0x12, ctrl->val);
break;
case V4L2_CID_HUE:
saa7110_write(sd, 0x07, ctrl->val);
break;
default:
return -EINVAL;
}
return 0;
}
static int saa7110_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA7110, 0);
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops saa7110_ctrl_ops = {
.s_ctrl = saa7110_s_ctrl,
};
static const struct v4l2_subdev_core_ops saa7110_core_ops = {
.g_chip_ident = saa7110_g_chip_ident,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
.s_std = saa7110_s_std,
};
static const struct v4l2_subdev_video_ops saa7110_video_ops = {
.s_routing = saa7110_s_routing,
.s_stream = saa7110_s_stream,
.querystd = saa7110_querystd,
.g_input_status = saa7110_g_input_status,
};
static const struct v4l2_subdev_ops saa7110_ops = {
.core = &saa7110_core_ops,
.video = &saa7110_video_ops,
};
/* ----------------------------------------------------------------------- */
static int saa7110_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct saa7110 *decoder;
struct v4l2_subdev *sd;
int rv;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_BYTE | I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
return -ENODEV;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
decoder = kzalloc(sizeof(struct saa7110), GFP_KERNEL);
if (!decoder)
return -ENOMEM;
sd = &decoder->sd;
v4l2_i2c_subdev_init(sd, client, &saa7110_ops);
decoder->norm = V4L2_STD_PAL;
decoder->input = 0;
decoder->enable = 1;
v4l2_ctrl_handler_init(&decoder->hdl, 2);
v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
V4L2_CID_BRIGHTNESS, 0, 255, 1, 128);
v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
V4L2_CID_CONTRAST, 0, 127, 1, 64);
v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
V4L2_CID_SATURATION, 0, 127, 1, 64);
v4l2_ctrl_new_std(&decoder->hdl, &saa7110_ctrl_ops,
V4L2_CID_HUE, -128, 127, 1, 0);
sd->ctrl_handler = &decoder->hdl;
if (decoder->hdl.error) {
int err = decoder->hdl.error;
v4l2_ctrl_handler_free(&decoder->hdl);
kfree(decoder);
return err;
}
v4l2_ctrl_handler_setup(&decoder->hdl);
init_waitqueue_head(&decoder->wq);
rv = saa7110_write_block(sd, initseq, sizeof(initseq));
if (rv < 0) {
v4l2_dbg(1, debug, sd, "init status %d\n", rv);
} else {
int ver, status;
saa7110_write(sd, 0x21, 0x10);
saa7110_write(sd, 0x0e, 0x18);
saa7110_write(sd, 0x0D, 0x04);
ver = saa7110_read(sd);
saa7110_write(sd, 0x0D, 0x06);
/*mdelay(150);*/
status = saa7110_read(sd);
v4l2_dbg(1, debug, sd, "version %x, status=0x%02x\n",
ver, status);
saa7110_write(sd, 0x0D, 0x86);
saa7110_write(sd, 0x0F, 0x10);
saa7110_write(sd, 0x11, 0x59);
/*saa7110_write(sd, 0x2E, 0x9A);*/
}
/*saa7110_selmux(sd,0);*/
/*determine_norm(sd);*/
/* setup and implicit mode 0 select has been performed */
return 0;
}
static int saa7110_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct saa7110 *decoder = to_saa7110(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&decoder->hdl);
kfree(decoder);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct i2c_device_id saa7110_id[] = {
{ "saa7110", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, saa7110_id);
static struct i2c_driver saa7110_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "saa7110",
},
.probe = saa7110_probe,
.remove = saa7110_remove,
.id_table = saa7110_id,
};
module_i2c_driver(saa7110_driver);
| gpl-2.0 |
ShinySide/SM-G361H | drivers/media/i2c/wm8739.c | 7242 | 7739 | /*
* wm8739
*
* Copyright (C) 2005 T. Adachi <tadachi@tadachi-net.com>
*
* Copyright (C) 2005 Hans Verkuil <hverkuil@xs4all.nl>
* - Cleanup
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/ioctl.h>
#include <asm/uaccess.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-chip-ident.h>
#include <media/v4l2-ctrls.h>
MODULE_DESCRIPTION("wm8739 driver");
MODULE_AUTHOR("T. Adachi, Hans Verkuil");
MODULE_LICENSE("GPL");
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* ------------------------------------------------------------------------ */
enum {
R0 = 0, R1,
R5 = 5, R6, R7, R8, R9, R15 = 15,
TOT_REGS
};
struct wm8739_state {
struct v4l2_subdev sd;
struct v4l2_ctrl_handler hdl;
struct {
/* audio cluster */
struct v4l2_ctrl *volume;
struct v4l2_ctrl *mute;
struct v4l2_ctrl *balance;
};
u32 clock_freq;
};
static inline struct wm8739_state *to_state(struct v4l2_subdev *sd)
{
return container_of(sd, struct wm8739_state, sd);
}
static inline struct v4l2_subdev *to_sd(struct v4l2_ctrl *ctrl)
{
return &container_of(ctrl->handler, struct wm8739_state, hdl)->sd;
}
/* ------------------------------------------------------------------------ */
static int wm8739_write(struct v4l2_subdev *sd, int reg, u16 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
int i;
if (reg < 0 || reg >= TOT_REGS) {
v4l2_err(sd, "Invalid register R%d\n", reg);
return -1;
}
v4l2_dbg(1, debug, sd, "write: %02x %02x\n", reg, val);
for (i = 0; i < 3; i++)
if (i2c_smbus_write_byte_data(client,
(reg << 1) | (val >> 8), val & 0xff) == 0)
return 0;
v4l2_err(sd, "I2C: cannot write %03x to register R%d\n", val, reg);
return -1;
}
static int wm8739_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct v4l2_subdev *sd = to_sd(ctrl);
struct wm8739_state *state = to_state(sd);
unsigned int work_l, work_r;
u8 vol_l; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
u8 vol_r; /* +12dB to -34.5dB 1.5dB step (5bit) def:0dB */
u16 mute;
switch (ctrl->id) {
case V4L2_CID_AUDIO_VOLUME:
break;
default:
return -EINVAL;
}
/* normalize ( 65535 to 0 -> 31 to 0 (12dB to -34.5dB) ) */
work_l = (min(65536 - state->balance->val, 32768) * state->volume->val) / 32768;
work_r = (min(state->balance->val, 32768) * state->volume->val) / 32768;
vol_l = (long)work_l * 31 / 65535;
vol_r = (long)work_r * 31 / 65535;
/* set audio volume etc. */
mute = state->mute->val ? 0x80 : 0;
/* Volume setting: bits 0-4, 0x1f = 12 dB, 0x00 = -34.5 dB
* Default setting: 0x17 = 0 dB
*/
wm8739_write(sd, R0, (vol_l & 0x1f) | mute);
wm8739_write(sd, R1, (vol_r & 0x1f) | mute);
return 0;
}
/* ------------------------------------------------------------------------ */
static int wm8739_s_clock_freq(struct v4l2_subdev *sd, u32 audiofreq)
{
struct wm8739_state *state = to_state(sd);
state->clock_freq = audiofreq;
/* de-activate */
wm8739_write(sd, R9, 0x000);
switch (audiofreq) {
case 44100:
/* 256fps, fs=44.1k */
wm8739_write(sd, R8, 0x020);
break;
case 48000:
/* 256fps, fs=48k */
wm8739_write(sd, R8, 0x000);
break;
case 32000:
/* 256fps, fs=32k */
wm8739_write(sd, R8, 0x018);
break;
default:
break;
}
/* activate */
wm8739_write(sd, R9, 0x001);
return 0;
}
static int wm8739_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_WM8739, 0);
}
static int wm8739_log_status(struct v4l2_subdev *sd)
{
struct wm8739_state *state = to_state(sd);
v4l2_info(sd, "Frequency: %u Hz\n", state->clock_freq);
v4l2_ctrl_handler_log_status(&state->hdl, sd->name);
return 0;
}
/* ----------------------------------------------------------------------- */
static const struct v4l2_ctrl_ops wm8739_ctrl_ops = {
.s_ctrl = wm8739_s_ctrl,
};
static const struct v4l2_subdev_core_ops wm8739_core_ops = {
.log_status = wm8739_log_status,
.g_chip_ident = wm8739_g_chip_ident,
.g_ext_ctrls = v4l2_subdev_g_ext_ctrls,
.try_ext_ctrls = v4l2_subdev_try_ext_ctrls,
.s_ext_ctrls = v4l2_subdev_s_ext_ctrls,
.g_ctrl = v4l2_subdev_g_ctrl,
.s_ctrl = v4l2_subdev_s_ctrl,
.queryctrl = v4l2_subdev_queryctrl,
.querymenu = v4l2_subdev_querymenu,
};
static const struct v4l2_subdev_audio_ops wm8739_audio_ops = {
.s_clock_freq = wm8739_s_clock_freq,
};
static const struct v4l2_subdev_ops wm8739_ops = {
.core = &wm8739_core_ops,
.audio = &wm8739_audio_ops,
};
/* ------------------------------------------------------------------------ */
/* i2c implementation */
static int wm8739_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct wm8739_state *state;
struct v4l2_subdev *sd;
/* Check if the adapter supports the needed features */
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA))
return -EIO;
v4l_info(client, "chip found @ 0x%x (%s)\n",
client->addr << 1, client->adapter->name);
state = kzalloc(sizeof(struct wm8739_state), GFP_KERNEL);
if (state == NULL)
return -ENOMEM;
sd = &state->sd;
v4l2_i2c_subdev_init(sd, client, &wm8739_ops);
v4l2_ctrl_handler_init(&state->hdl, 2);
state->volume = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
V4L2_CID_AUDIO_VOLUME, 0, 65535, 65535 / 100, 50736);
state->mute = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
V4L2_CID_AUDIO_MUTE, 0, 1, 1, 0);
state->balance = v4l2_ctrl_new_std(&state->hdl, &wm8739_ctrl_ops,
V4L2_CID_AUDIO_BALANCE, 0, 65535, 65535 / 100, 32768);
sd->ctrl_handler = &state->hdl;
if (state->hdl.error) {
int err = state->hdl.error;
v4l2_ctrl_handler_free(&state->hdl);
kfree(state);
return err;
}
v4l2_ctrl_cluster(3, &state->volume);
state->clock_freq = 48000;
/* Initialize wm8739 */
/* reset */
wm8739_write(sd, R15, 0x00);
/* filter setting, high path, offet clear */
wm8739_write(sd, R5, 0x000);
/* ADC, OSC, Power Off mode Disable */
wm8739_write(sd, R6, 0x000);
/* Digital Audio interface format:
Enable Master mode, 24 bit, MSB first/left justified */
wm8739_write(sd, R7, 0x049);
/* sampling control: normal, 256fs, 48KHz sampling rate */
wm8739_write(sd, R8, 0x000);
/* activate */
wm8739_write(sd, R9, 0x001);
/* set volume/mute */
v4l2_ctrl_handler_setup(&state->hdl);
return 0;
}
static int wm8739_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct wm8739_state *state = to_state(sd);
v4l2_device_unregister_subdev(sd);
v4l2_ctrl_handler_free(&state->hdl);
kfree(to_state(sd));
return 0;
}
static const struct i2c_device_id wm8739_id[] = {
{ "wm8739", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8739_id);
static struct i2c_driver wm8739_driver = {
.driver = {
.owner = THIS_MODULE,
.name = "wm8739",
},
.probe = wm8739_probe,
.remove = wm8739_remove,
.id_table = wm8739_id,
};
module_i2c_driver(wm8739_driver);
| gpl-2.0 |
pst1337/omni_mkc_boeffla | sound/pci/lola/lola_clock.c | 8010 | 8181 | /*
* Support for Digigram Lola PCI-e boards
*
* Copyright (c) 2011 Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 59
* Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include "lola.h"
unsigned int lola_sample_rate_convert(unsigned int coded)
{
unsigned int freq;
/* base frequency */
switch (coded & 0x3) {
case 0: freq = 48000; break;
case 1: freq = 44100; break;
case 2: freq = 32000; break;
default: return 0; /* error */
}
/* multiplier / devisor */
switch (coded & 0x1c) {
case (0 << 2): break;
case (4 << 2): break;
case (1 << 2): freq *= 2; break;
case (2 << 2): freq *= 4; break;
case (5 << 2): freq /= 2; break;
case (6 << 2): freq /= 4; break;
default: return 0; /* error */
}
/* ajustement */
switch (coded & 0x60) {
case (0 << 5): break;
case (1 << 5): freq = (freq * 999) / 1000; break;
case (2 << 5): freq = (freq * 1001) / 1000; break;
default: return 0; /* error */
}
return freq;
}
/*
* Granualrity
*/
#define LOLA_MAXFREQ_AT_GRANULARITY_MIN 48000
#define LOLA_MAXFREQ_AT_GRANULARITY_BELOW_MAX 96000
static bool check_gran_clock_compatibility(struct lola *chip,
unsigned int val,
unsigned int freq)
{
if (!chip->granularity)
return true;
if (val < LOLA_GRANULARITY_MIN || val > LOLA_GRANULARITY_MAX ||
(val % LOLA_GRANULARITY_STEP) != 0)
return false;
if (val == LOLA_GRANULARITY_MIN) {
if (freq > LOLA_MAXFREQ_AT_GRANULARITY_MIN)
return false;
} else if (val < LOLA_GRANULARITY_MAX) {
if (freq > LOLA_MAXFREQ_AT_GRANULARITY_BELOW_MAX)
return false;
}
return true;
}
int lola_set_granularity(struct lola *chip, unsigned int val, bool force)
{
int err;
if (!force) {
if (val == chip->granularity)
return 0;
#if 0
/* change Gran only if there are no streams allocated ! */
if (chip->audio_in_alloc_mask || chip->audio_out_alloc_mask)
return -EBUSY;
#endif
if (!check_gran_clock_compatibility(chip, val,
chip->clock.cur_freq))
return -EINVAL;
}
chip->granularity = val;
val /= LOLA_GRANULARITY_STEP;
/* audio function group */
err = lola_codec_write(chip, 1, LOLA_VERB_SET_GRANULARITY_STEPS,
val, 0);
if (err < 0)
return err;
/* this can be a very slow function !!! */
usleep_range(400 * val, 20000);
return lola_codec_flush(chip);
}
/*
* Clock widget handling
*/
int __devinit lola_init_clock_widget(struct lola *chip, int nid)
{
unsigned int val;
int i, j, nitems, nb_verbs, idx, idx_list;
int err;
err = lola_read_param(chip, nid, LOLA_PAR_AUDIO_WIDGET_CAP, &val);
if (err < 0) {
printk(KERN_ERR SFX "Can't read wcaps for 0x%x\n", nid);
return err;
}
if ((val & 0xfff00000) != 0x01f00000) { /* test SubType and Type */
snd_printdd("No valid clock widget\n");
return 0;
}
chip->clock.nid = nid;
chip->clock.items = val & 0xff;
snd_printdd("clock_list nid=%x, entries=%d\n", nid,
chip->clock.items);
if (chip->clock.items > MAX_SAMPLE_CLOCK_COUNT) {
printk(KERN_ERR SFX "CLOCK_LIST too big: %d\n",
chip->clock.items);
return -EINVAL;
}
nitems = chip->clock.items;
nb_verbs = (nitems + 3) / 4;
idx = 0;
idx_list = 0;
for (i = 0; i < nb_verbs; i++) {
unsigned int res_ex;
unsigned short items[4];
err = lola_codec_read(chip, nid, LOLA_VERB_GET_CLOCK_LIST,
idx, 0, &val, &res_ex);
if (err < 0) {
printk(KERN_ERR SFX "Can't read CLOCK_LIST\n");
return -EINVAL;
}
items[0] = val & 0xfff;
items[1] = (val >> 16) & 0xfff;
items[2] = res_ex & 0xfff;
items[3] = (res_ex >> 16) & 0xfff;
for (j = 0; j < 4; j++) {
unsigned char type = items[j] >> 8;
unsigned int freq = items[j] & 0xff;
int format = LOLA_CLOCK_FORMAT_NONE;
bool add_clock = true;
if (type == LOLA_CLOCK_TYPE_INTERNAL) {
freq = lola_sample_rate_convert(freq);
if (freq < chip->sample_rate_min)
add_clock = false;
else if (freq == 48000) {
chip->clock.cur_index = idx_list;
chip->clock.cur_freq = 48000;
chip->clock.cur_valid = true;
}
} else if (type == LOLA_CLOCK_TYPE_VIDEO) {
freq = lola_sample_rate_convert(freq);
if (freq < chip->sample_rate_min)
add_clock = false;
/* video clock has a format (0:NTSC, 1:PAL)*/
if (items[j] & 0x80)
format = LOLA_CLOCK_FORMAT_NTSC;
else
format = LOLA_CLOCK_FORMAT_PAL;
}
if (add_clock) {
struct lola_sample_clock *sc;
sc = &chip->clock.sample_clock[idx_list];
sc->type = type;
sc->format = format;
sc->freq = freq;
/* keep the index used with the board */
chip->clock.idx_lookup[idx_list] = idx;
idx_list++;
} else {
chip->clock.items--;
}
if (++idx >= nitems)
break;
}
}
return 0;
}
/* enable unsolicited events of the clock widget */
int lola_enable_clock_events(struct lola *chip)
{
unsigned int res;
int err;
err = lola_codec_read(chip, chip->clock.nid,
LOLA_VERB_SET_UNSOLICITED_ENABLE,
LOLA_UNSOLICITED_ENABLE | LOLA_UNSOLICITED_TAG,
0, &res, NULL);
if (err < 0)
return err;
if (res) {
printk(KERN_WARNING SFX "error in enable_clock_events %d\n",
res);
return -EINVAL;
}
return 0;
}
int lola_set_clock_index(struct lola *chip, unsigned int idx)
{
unsigned int res;
int err;
err = lola_codec_read(chip, chip->clock.nid,
LOLA_VERB_SET_CLOCK_SELECT,
chip->clock.idx_lookup[idx],
0, &res, NULL);
if (err < 0)
return err;
if (res) {
printk(KERN_WARNING SFX "error in set_clock %d\n", res);
return -EINVAL;
}
return 0;
}
bool lola_update_ext_clock_freq(struct lola *chip, unsigned int val)
{
unsigned int tag;
/* the current EXTERNAL clock information gets updated by interrupt
* with an unsolicited response
*/
if (!val)
return false;
tag = (val >> LOLA_UNSOL_RESP_TAG_OFFSET) & LOLA_UNSOLICITED_TAG_MASK;
if (tag != LOLA_UNSOLICITED_TAG)
return false;
/* only for current = external clocks */
if (chip->clock.sample_clock[chip->clock.cur_index].type !=
LOLA_CLOCK_TYPE_INTERNAL) {
chip->clock.cur_freq = lola_sample_rate_convert(val & 0x7f);
chip->clock.cur_valid = (val & 0x100) != 0;
}
return true;
}
int lola_set_clock(struct lola *chip, int idx)
{
int freq = 0;
bool valid = false;
if (idx == chip->clock.cur_index) {
/* current clock is allowed */
freq = chip->clock.cur_freq;
valid = chip->clock.cur_valid;
} else if (chip->clock.sample_clock[idx].type ==
LOLA_CLOCK_TYPE_INTERNAL) {
/* internal clocks allowed */
freq = chip->clock.sample_clock[idx].freq;
valid = true;
}
if (!freq || !valid)
return -EINVAL;
if (!check_gran_clock_compatibility(chip, chip->granularity, freq))
return -EINVAL;
if (idx != chip->clock.cur_index) {
int err = lola_set_clock_index(chip, idx);
if (err < 0)
return err;
/* update new settings */
chip->clock.cur_index = idx;
chip->clock.cur_freq = freq;
chip->clock.cur_valid = true;
}
return 0;
}
int lola_set_sample_rate(struct lola *chip, int rate)
{
int i;
if (chip->clock.cur_freq == rate && chip->clock.cur_valid)
return 0;
/* search for new dwClockIndex */
for (i = 0; i < chip->clock.items; i++) {
if (chip->clock.sample_clock[i].type == LOLA_CLOCK_TYPE_INTERNAL &&
chip->clock.sample_clock[i].freq == rate)
break;
}
if (i >= chip->clock.items)
return -EINVAL;
return lola_set_clock(chip, i);
}
| gpl-2.0 |
SaatvikShukla/android_kernel_oneplus_msm8974 | arch/sh/kernel/topology.c | 8778 | 1820 | /*
* arch/sh/kernel/topology.c
*
* Copyright (C) 2007 Paul Mundt
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/init.h>
#include <linux/percpu.h>
#include <linux/topology.h>
#include <linux/node.h>
#include <linux/nodemask.h>
#include <linux/export.h>
static DEFINE_PER_CPU(struct cpu, cpu_devices);
cpumask_t cpu_core_map[NR_CPUS];
EXPORT_SYMBOL(cpu_core_map);
static cpumask_t cpu_coregroup_map(unsigned int cpu)
{
/*
* Presently all SH-X3 SMP cores are multi-cores, so just keep it
* simple until we have a method for determining topology..
*/
return *cpu_possible_mask;
}
const struct cpumask *cpu_coregroup_mask(unsigned int cpu)
{
return &cpu_core_map[cpu];
}
int arch_update_cpu_topology(void)
{
unsigned int cpu;
for_each_possible_cpu(cpu)
cpu_core_map[cpu] = cpu_coregroup_map(cpu);
return 0;
}
static int __init topology_init(void)
{
int i, ret;
#ifdef CONFIG_NEED_MULTIPLE_NODES
for_each_online_node(i)
register_one_node(i);
#endif
for_each_present_cpu(i) {
struct cpu *c = &per_cpu(cpu_devices, i);
c->hotpluggable = 1;
ret = register_cpu(c, i);
if (unlikely(ret))
printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n",
__func__, i, ret);
}
#if defined(CONFIG_NUMA) && !defined(CONFIG_SMP)
/*
* In the UP case, make sure the CPU association is still
* registered under each node. Without this, sysfs fails
* to make the connection between nodes other than node0
* and cpu0.
*/
for_each_online_node(i)
if (i != numa_node_id())
register_cpu_under_node(raw_smp_processor_id(), i);
#endif
return 0;
}
subsys_initcall(topology_init);
| gpl-2.0 |
xaxaxa/linux-3.13-socfpga-vserver-aufs | net/irda/irlmp_frame.c | 11594 | 14337 | /*********************************************************************
*
* Filename: irlmp_frame.c
* Version: 0.9
* Description: IrLMP frame implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Tue Aug 19 02:09:59 1997
* Modified at: Mon Dec 13 13:41:12 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>
* All Rights Reserved.
* Copyright (c) 2000-2003 Jean Tourrilhes <jt@hpl.hp.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/skbuff.h>
#include <linux/kernel.h>
#include <net/irda/irda.h>
#include <net/irda/irlap.h>
#include <net/irda/timer.h>
#include <net/irda/irlmp.h>
#include <net/irda/irlmp_frame.h>
#include <net/irda/discovery.h>
static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap,
__u8 slsap, int status, hashbin_t *);
inline void irlmp_send_data_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
int expedited, struct sk_buff *skb)
{
skb->data[0] = dlsap;
skb->data[1] = slsap;
if (expedited) {
IRDA_DEBUG(4, "%s(), sending expedited data\n", __func__);
irlap_data_request(self->irlap, skb, TRUE);
} else
irlap_data_request(self->irlap, skb, FALSE);
}
/*
* Function irlmp_send_lcf_pdu (dlsap, slsap, opcode,skb)
*
* Send Link Control Frame to IrLAP
*/
void irlmp_send_lcf_pdu(struct lap_cb *self, __u8 dlsap, __u8 slsap,
__u8 opcode, struct sk_buff *skb)
{
__u8 *frame;
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb != NULL, return;);
frame = skb->data;
frame[0] = dlsap | CONTROL_BIT;
frame[1] = slsap;
frame[2] = opcode;
if (opcode == DISCONNECT)
frame[3] = 0x01; /* Service user request */
else
frame[3] = 0x00; /* rsvd */
irlap_data_request(self->irlap, skb, FALSE);
}
/*
* Function irlmp_input (skb)
*
* Used by IrLAP to pass received data frames to IrLMP layer
*
*/
void irlmp_link_data_indication(struct lap_cb *self, struct sk_buff *skb,
int unreliable)
{
struct lsap_cb *lsap;
__u8 slsap_sel; /* Source (this) LSAP address */
__u8 dlsap_sel; /* Destination LSAP address */
__u8 *fp;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb->len > 2, return;);
fp = skb->data;
/*
* The next statements may be confusing, but we do this so that
* destination LSAP of received frame is source LSAP in our view
*/
slsap_sel = fp[0] & LSAP_MASK;
dlsap_sel = fp[1];
/*
* Check if this is an incoming connection, since we must deal with
* it in a different way than other established connections.
*/
if ((fp[0] & CONTROL_BIT) && (fp[2] == CONNECT_CMD)) {
IRDA_DEBUG(3, "%s(), incoming connection, "
"source LSAP=%d, dest LSAP=%d\n",
__func__, slsap_sel, dlsap_sel);
/* Try to find LSAP among the unconnected LSAPs */
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, CONNECT_CMD,
irlmp->unconnected_lsaps);
/* Maybe LSAP was already connected, so try one more time */
if (!lsap) {
IRDA_DEBUG(1, "%s(), incoming connection for LSAP already connected\n", __func__);
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
self->lsaps);
}
} else
lsap = irlmp_find_lsap(self, dlsap_sel, slsap_sel, 0,
self->lsaps);
if (lsap == NULL) {
IRDA_DEBUG(2, "IrLMP, Sorry, no LSAP for received frame!\n");
IRDA_DEBUG(2, "%s(), slsap_sel = %02x, dlsap_sel = %02x\n",
__func__, slsap_sel, dlsap_sel);
if (fp[0] & CONTROL_BIT) {
IRDA_DEBUG(2, "%s(), received control frame %02x\n",
__func__, fp[2]);
} else {
IRDA_DEBUG(2, "%s(), received data frame\n", __func__);
}
return;
}
/*
* Check if we received a control frame?
*/
if (fp[0] & CONTROL_BIT) {
switch (fp[2]) {
case CONNECT_CMD:
lsap->lap = self;
irlmp_do_lsap_event(lsap, LM_CONNECT_INDICATION, skb);
break;
case CONNECT_CNF:
irlmp_do_lsap_event(lsap, LM_CONNECT_CONFIRM, skb);
break;
case DISCONNECT:
IRDA_DEBUG(4, "%s(), Disconnect indication!\n",
__func__);
irlmp_do_lsap_event(lsap, LM_DISCONNECT_INDICATION,
skb);
break;
case ACCESSMODE_CMD:
IRDA_DEBUG(0, "Access mode cmd not implemented!\n");
break;
case ACCESSMODE_CNF:
IRDA_DEBUG(0, "Access mode cnf not implemented!\n");
break;
default:
IRDA_DEBUG(0, "%s(), Unknown control frame %02x\n",
__func__, fp[2]);
break;
}
} else if (unreliable) {
/* Optimize and bypass the state machine if possible */
if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
irlmp_udata_indication(lsap, skb);
else
irlmp_do_lsap_event(lsap, LM_UDATA_INDICATION, skb);
} else {
/* Optimize and bypass the state machine if possible */
if (lsap->lsap_state == LSAP_DATA_TRANSFER_READY)
irlmp_data_indication(lsap, skb);
else
irlmp_do_lsap_event(lsap, LM_DATA_INDICATION, skb);
}
}
/*
* Function irlmp_link_unitdata_indication (self, skb)
*
*
*
*/
#ifdef CONFIG_IRDA_ULTRA
void irlmp_link_unitdata_indication(struct lap_cb *self, struct sk_buff *skb)
{
struct lsap_cb *lsap;
__u8 slsap_sel; /* Source (this) LSAP address */
__u8 dlsap_sel; /* Destination LSAP address */
__u8 pid; /* Protocol identifier */
__u8 *fp;
unsigned long flags;
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(skb->len > 2, return;);
fp = skb->data;
/*
* The next statements may be confusing, but we do this so that
* destination LSAP of received frame is source LSAP in our view
*/
slsap_sel = fp[0] & LSAP_MASK;
dlsap_sel = fp[1];
pid = fp[2];
if (pid & 0x80) {
IRDA_DEBUG(0, "%s(), extension in PID not supp!\n",
__func__);
return;
}
/* Check if frame is addressed to the connectionless LSAP */
if ((slsap_sel != LSAP_CONNLESS) || (dlsap_sel != LSAP_CONNLESS)) {
IRDA_DEBUG(0, "%s(), dropping frame!\n", __func__);
return;
}
/* Search the connectionless LSAP */
spin_lock_irqsave(&irlmp->unconnected_lsaps->hb_spinlock, flags);
lsap = (struct lsap_cb *) hashbin_get_first(irlmp->unconnected_lsaps);
while (lsap != NULL) {
/*
* Check if source LSAP and dest LSAP selectors and PID match.
*/
if ((lsap->slsap_sel == slsap_sel) &&
(lsap->dlsap_sel == dlsap_sel) &&
(lsap->pid == pid))
{
break;
}
lsap = (struct lsap_cb *) hashbin_get_next(irlmp->unconnected_lsaps);
}
spin_unlock_irqrestore(&irlmp->unconnected_lsaps->hb_spinlock, flags);
if (lsap)
irlmp_connless_data_indication(lsap, skb);
else {
IRDA_DEBUG(0, "%s(), found no matching LSAP!\n", __func__);
}
}
#endif /* CONFIG_IRDA_ULTRA */
/*
* Function irlmp_link_disconnect_indication (reason, userdata)
*
* IrLAP has disconnected
*
*/
void irlmp_link_disconnect_indication(struct lap_cb *lap,
struct irlap_cb *irlap,
LAP_REASON reason,
struct sk_buff *skb)
{
IRDA_DEBUG(2, "%s()\n", __func__);
IRDA_ASSERT(lap != NULL, return;);
IRDA_ASSERT(lap->magic == LMP_LAP_MAGIC, return;);
lap->reason = reason;
lap->daddr = DEV_ADDR_ANY;
/* FIXME: must do something with the skb if any */
/*
* Inform station state machine
*/
irlmp_do_lap_event(lap, LM_LAP_DISCONNECT_INDICATION, NULL);
}
/*
* Function irlmp_link_connect_indication (qos)
*
* Incoming LAP connection!
*
*/
void irlmp_link_connect_indication(struct lap_cb *self, __u32 saddr,
__u32 daddr, struct qos_info *qos,
struct sk_buff *skb)
{
IRDA_DEBUG(4, "%s()\n", __func__);
/* Copy QoS settings for this session */
self->qos = qos;
/* Update destination device address */
self->daddr = daddr;
IRDA_ASSERT(self->saddr == saddr, return;);
irlmp_do_lap_event(self, LM_LAP_CONNECT_INDICATION, skb);
}
/*
* Function irlmp_link_connect_confirm (qos)
*
* LAP connection confirmed!
*
*/
void irlmp_link_connect_confirm(struct lap_cb *self, struct qos_info *qos,
struct sk_buff *skb)
{
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
IRDA_ASSERT(qos != NULL, return;);
/* Don't need use the skb for now */
/* Copy QoS settings for this session */
self->qos = qos;
irlmp_do_lap_event(self, LM_LAP_CONNECT_CONFIRM, NULL);
}
/*
* Function irlmp_link_discovery_indication (self, log)
*
* Device is discovering us
*
* It's not an answer to our own discoveries, just another device trying
* to perform discovery, but we don't want to miss the opportunity
* to exploit this information, because :
* o We may not actively perform discovery (just passive discovery)
* o This type of discovery is much more reliable. In some cases, it
* seem that less than 50% of our discoveries get an answer, while
* we always get ~100% of these.
* o Make faster discovery, statistically divide time of discovery
* events by 2 (important for the latency aspect and user feel)
* o Even is we do active discovery, the other node might not
* answer our discoveries (ex: Palm). The Palm will just perform
* one active discovery and connect directly to us.
*
* However, when both devices discover each other, they might attempt to
* connect to each other following the discovery event, and it would create
* collisions on the medium (SNRM battle).
* The "fix" for that is to disable all connection requests in IrLAP
* for 100ms after a discovery indication by setting the media_busy flag.
* Previously, we used to postpone the event which was quite ugly. Now
* that IrLAP takes care of this problem, just pass the event up...
*
* Jean II
*/
void irlmp_link_discovery_indication(struct lap_cb *self,
discovery_t *discovery)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
/* Add to main log, cleanup */
irlmp_add_discovery(irlmp->cachelog, discovery);
/* Just handle it the same way as a discovery confirm,
* bypass the LM_LAP state machine (see below) */
irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_PASSIVE);
}
/*
* Function irlmp_link_discovery_confirm (self, log)
*
* Called by IrLAP with a list of discoveries after the discovery
* request has been carried out. A NULL log is received if IrLAP
* was unable to carry out the discovery request
*
*/
void irlmp_link_discovery_confirm(struct lap_cb *self, hashbin_t *log)
{
IRDA_DEBUG(4, "%s()\n", __func__);
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == LMP_LAP_MAGIC, return;);
/* Add to main log, cleanup */
irlmp_add_discovery_log(irlmp->cachelog, log);
/* Propagate event to various LSAPs registered for it.
* We bypass the LM_LAP state machine because
* 1) We do it regardless of the LM_LAP state
* 2) It doesn't affect the LM_LAP state
* 3) Faster, slimer, simpler, ...
* Jean II */
irlmp_discovery_confirm(irlmp->cachelog, DISCOVERY_ACTIVE);
}
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
static inline void irlmp_update_cache(struct lap_cb *lap,
struct lsap_cb *lsap)
{
/* Prevent concurrent read to get garbage */
lap->cache.valid = FALSE;
/* Update cache entry */
lap->cache.dlsap_sel = lsap->dlsap_sel;
lap->cache.slsap_sel = lsap->slsap_sel;
lap->cache.lsap = lsap;
lap->cache.valid = TRUE;
}
#endif
/*
* Function irlmp_find_handle (self, dlsap_sel, slsap_sel, status, queue)
*
* Find handle associated with destination and source LSAP
*
* Any IrDA connection (LSAP/TSAP) is uniquely identified by
* 3 parameters, the local lsap, the remote lsap and the remote address.
* We may initiate multiple connections to the same remote service
* (they will have different local lsap), a remote device may initiate
* multiple connections to the same local service (they will have
* different remote lsap), or multiple devices may connect to the same
* service and may use the same remote lsap (and they will have
* different remote address).
* So, where is the remote address ? Each LAP connection is made with
* a single remote device, so imply a specific remote address.
* Jean II
*/
static struct lsap_cb *irlmp_find_lsap(struct lap_cb *self, __u8 dlsap_sel,
__u8 slsap_sel, int status,
hashbin_t *queue)
{
struct lsap_cb *lsap;
unsigned long flags;
/*
* Optimize for the common case. We assume that the last frame
* received is in the same connection as the last one, so check in
* cache first to avoid the linear search
*/
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
if ((self->cache.valid) &&
(self->cache.slsap_sel == slsap_sel) &&
(self->cache.dlsap_sel == dlsap_sel))
{
return self->cache.lsap;
}
#endif
spin_lock_irqsave(&queue->hb_spinlock, flags);
lsap = (struct lsap_cb *) hashbin_get_first(queue);
while (lsap != NULL) {
/*
* If this is an incoming connection, then the destination
* LSAP selector may have been specified as LM_ANY so that
* any client can connect. In that case we only need to check
* if the source LSAP (in our view!) match!
*/
if ((status == CONNECT_CMD) &&
(lsap->slsap_sel == slsap_sel) &&
(lsap->dlsap_sel == LSAP_ANY)) {
/* This is where the dest lsap sel is set on incoming
* lsaps */
lsap->dlsap_sel = dlsap_sel;
break;
}
/*
* Check if source LSAP and dest LSAP selectors match.
*/
if ((lsap->slsap_sel == slsap_sel) &&
(lsap->dlsap_sel == dlsap_sel))
break;
lsap = (struct lsap_cb *) hashbin_get_next(queue);
}
#ifdef CONFIG_IRDA_CACHE_LAST_LSAP
if(lsap)
irlmp_update_cache(self, lsap);
#endif
spin_unlock_irqrestore(&queue->hb_spinlock, flags);
/* Return what we've found or NULL */
return lsap;
}
| gpl-2.0 |
lollipop-og/kernel_google | net/netfilter/xt_multiport.c | 13386 | 4664 | /* Kernel module to match one of a list of TCP/UDP(-Lite)/SCTP/DCCP ports:
ports are in the same place so we can treat them as equal. */
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/types.h>
#include <linux/udp.h>
#include <linux/skbuff.h>
#include <linux/in.h>
#include <linux/netfilter/xt_multiport.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
MODULE_DESCRIPTION("Xtables: multiple port matching for TCP, UDP, UDP-Lite, SCTP and DCCP");
MODULE_ALIAS("ipt_multiport");
MODULE_ALIAS("ip6t_multiport");
/* Returns 1 if the port is matched by the test, 0 otherwise. */
static inline bool
ports_match_v1(const struct xt_multiport_v1 *minfo,
u_int16_t src, u_int16_t dst)
{
unsigned int i;
u_int16_t s, e;
for (i = 0; i < minfo->count; i++) {
s = minfo->ports[i];
if (minfo->pflags[i]) {
/* range port matching */
e = minfo->ports[++i];
pr_debug("src or dst matches with %d-%d?\n", s, e);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src >= s && src <= e)
return true ^ minfo->invert;
if (minfo->flags == XT_MULTIPORT_DESTINATION
&& dst >= s && dst <= e)
return true ^ minfo->invert;
if (minfo->flags == XT_MULTIPORT_EITHER
&& ((dst >= s && dst <= e)
|| (src >= s && src <= e)))
return true ^ minfo->invert;
} else {
/* exact port matching */
pr_debug("src or dst matches with %d?\n", s);
if (minfo->flags == XT_MULTIPORT_SOURCE
&& src == s)
return true ^ minfo->invert;
if (minfo->flags == XT_MULTIPORT_DESTINATION
&& dst == s)
return true ^ minfo->invert;
if (minfo->flags == XT_MULTIPORT_EITHER
&& (src == s || dst == s))
return true ^ minfo->invert;
}
}
return minfo->invert;
}
static bool
multiport_mt(const struct sk_buff *skb, struct xt_action_param *par)
{
const __be16 *pptr;
__be16 _ports[2];
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
if (par->fragoff != 0)
return false;
pptr = skb_header_pointer(skb, par->thoff, sizeof(_ports), _ports);
if (pptr == NULL) {
/* We've been asked to examine this packet, and we
* can't. Hence, no choice but to drop.
*/
pr_debug("Dropping evil offset=0 tinygram.\n");
par->hotdrop = true;
return false;
}
return ports_match_v1(multiinfo, ntohs(pptr[0]), ntohs(pptr[1]));
}
static inline bool
check(u_int16_t proto,
u_int8_t ip_invflags,
u_int8_t match_flags,
u_int8_t count)
{
/* Must specify supported protocol, no unknown flags or bad count */
return (proto == IPPROTO_TCP || proto == IPPROTO_UDP
|| proto == IPPROTO_UDPLITE
|| proto == IPPROTO_SCTP || proto == IPPROTO_DCCP)
&& !(ip_invflags & XT_INV_PROTO)
&& (match_flags == XT_MULTIPORT_SOURCE
|| match_flags == XT_MULTIPORT_DESTINATION
|| match_flags == XT_MULTIPORT_EITHER)
&& count <= XT_MULTI_PORTS;
}
static int multiport_mt_check(const struct xt_mtchk_param *par)
{
const struct ipt_ip *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
multiinfo->count) ? 0 : -EINVAL;
}
static int multiport_mt6_check(const struct xt_mtchk_param *par)
{
const struct ip6t_ip6 *ip = par->entryinfo;
const struct xt_multiport_v1 *multiinfo = par->matchinfo;
return check(ip->proto, ip->invflags, multiinfo->flags,
multiinfo->count) ? 0 : -EINVAL;
}
static struct xt_match multiport_mt_reg[] __read_mostly = {
{
.name = "multiport",
.family = NFPROTO_IPV4,
.revision = 1,
.checkentry = multiport_mt_check,
.match = multiport_mt,
.matchsize = sizeof(struct xt_multiport_v1),
.me = THIS_MODULE,
},
{
.name = "multiport",
.family = NFPROTO_IPV6,
.revision = 1,
.checkentry = multiport_mt6_check,
.match = multiport_mt,
.matchsize = sizeof(struct xt_multiport_v1),
.me = THIS_MODULE,
},
};
static int __init multiport_mt_init(void)
{
return xt_register_matches(multiport_mt_reg,
ARRAY_SIZE(multiport_mt_reg));
}
static void __exit multiport_mt_exit(void)
{
xt_unregister_matches(multiport_mt_reg, ARRAY_SIZE(multiport_mt_reg));
}
module_init(multiport_mt_init);
module_exit(multiport_mt_exit);
| gpl-2.0 |
blitztech/rev4 | src/server/shared/Logging/AppenderFile.cpp | 75 | 2747 | /*
* Copyright (C) 2008-2013 TrinityCore <http://www.trinitycore.org/>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "AppenderFile.h"
#include "Common.h"
AppenderFile::AppenderFile(uint8 id, std::string const& name, LogLevel level, const char* _filename, const char* _logDir, const char* _mode, AppenderFlags _flags, uint64 fileSize):
Appender(id, name, APPENDER_FILE, level, _flags),
logfile(NULL),
filename(_filename),
logDir(_logDir),
mode(_mode),
maxFileSize(fileSize),
fileSize(0)
{
dynamicName = std::string::npos != filename.find("%s");
backup = _flags & APPENDER_FLAGS_MAKE_FILE_BACKUP;
logfile = !dynamicName ? OpenFile(_filename, _mode, mode == "w" && backup) : NULL;
}
AppenderFile::~AppenderFile()
{
CloseFile();
}
void AppenderFile::_write(LogMessage const& message)
{
bool exceedMaxSize = maxFileSize > 0 && (fileSize + message.Size()) > maxFileSize;
if (dynamicName)
{
char namebuf[TRINITY_PATH_MAX];
snprintf(namebuf, TRINITY_PATH_MAX, filename.c_str(), message.param1.c_str());
logfile = OpenFile(namebuf, mode, backup || exceedMaxSize);
}
else if (exceedMaxSize)
logfile = OpenFile(filename, "w", true);
if (!logfile)
return;
fprintf(logfile, "%s%s", message.prefix.c_str(), message.text.c_str());
fflush(logfile);
fileSize += message.Size();
if (dynamicName)
CloseFile();
}
FILE* AppenderFile::OpenFile(std::string const &filename, std::string const &mode, bool backup)
{
std::string fullName(logDir + filename);
if (backup)
{
CloseFile();
std::string newName(fullName);
newName.push_back('.');
newName.append(LogMessage::getTimeStr(time(NULL)));
rename(fullName.c_str(), newName.c_str()); // no error handling... if we couldn't make a backup, just ignore
}
if (FILE* ret = fopen(fullName.c_str(), mode.c_str()))
{
fileSize = ftell(ret);
return ret;
}
return NULL;
}
void AppenderFile::CloseFile()
{
if (logfile)
{
fclose(logfile);
logfile = NULL;
}
}
| gpl-2.0 |
kgene/linux-samsung | drivers/staging/lustre/lustre/libcfs/hash.c | 331 | 57435 | /*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*
* GPL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
* libcfs/libcfs/hash.c
*
* Implement a hash class for hash process in lustre system.
*
* Author: YuZhangyong <yzy@clusterfs.com>
*
* 2008-08-15: Brian Behlendorf <behlendorf1@llnl.gov>
* - Simplified API and improved documentation
* - Added per-hash feature flags:
* * CFS_HASH_DEBUG additional validation
* * CFS_HASH_REHASH dynamic rehashing
* - Added per-hash statistics
* - General performance enhancements
*
* 2009-07-31: Liang Zhen <zhen.liang@sun.com>
* - move all stuff to libcfs
* - don't allow cur_bits != max_bits without setting of CFS_HASH_REHASH
* - ignore hs_rwlock if without CFS_HASH_REHASH setting
* - buckets are allocated one by one(instead of contiguous memory),
* to avoid unnecessary cacheline conflict
*
* 2010-03-01: Liang Zhen <zhen.liang@sun.com>
* - "bucket" is a group of hlist_head now, user can specify bucket size
* by bkt_bits of cfs_hash_create(), all hlist_heads in a bucket share
* one lock for reducing memory overhead.
*
* - support lockless hash, caller will take care of locks:
* avoid lock overhead for hash tables that are already protected
* by locking in the caller for another reason
*
* - support both spin_lock/rwlock for bucket:
* overhead of spinlock contention is lower than read/write
* contention of rwlock, so using spinlock to serialize operations on
* bucket is more reasonable for those frequently changed hash tables
*
* - support one-single lock mode:
* one lock to protect all hash operations to avoid overhead of
* multiple locks if hash table is always small
*
* - removed a lot of unnecessary addref & decref on hash element:
* addref & decref are atomic operations in many use-cases which
* are expensive.
*
* - support non-blocking cfs_hash_add() and cfs_hash_findadd():
* some lustre use-cases require these functions to be strictly
* non-blocking, we need to schedule required rehash on a different
* thread on those cases.
*
* - safer rehash on large hash table
* In old implementation, rehash function will exclusively lock the
* hash table and finish rehash in one batch, it's dangerous on SMP
* system because rehash millions of elements could take long time.
* New implemented rehash can release lock and relax CPU in middle
* of rehash, it's safe for another thread to search/change on the
* hash table even it's in rehasing.
*
* - support two different refcount modes
* . hash table has refcount on element
* . hash table doesn't change refcount on adding/removing element
*
* - support long name hash table (for param-tree)
*
* - fix a bug for cfs_hash_rehash_key:
* in old implementation, cfs_hash_rehash_key could screw up the
* hash-table because @key is overwritten without any protection.
* Now we need user to define hs_keycpy for those rehash enabled
* hash tables, cfs_hash_rehash_key will overwrite hash-key
* inside lock by calling hs_keycpy.
*
* - better hash iteration:
* Now we support both locked iteration & lockless iteration of hash
* table. Also, user can break the iteration by return 1 in callback.
*/
#include "../../include/linux/libcfs/libcfs.h"
#include <linux/seq_file.h>
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static unsigned int warn_on_depth = 8;
module_param(warn_on_depth, uint, 0644);
MODULE_PARM_DESC(warn_on_depth, "warning when hash depth is high.");
#endif
struct cfs_wi_sched *cfs_sched_rehash;
static inline void
cfs_hash_nl_lock(union cfs_hash_lock *lock, int exclusive) {}
static inline void
cfs_hash_nl_unlock(union cfs_hash_lock *lock, int exclusive) {}
static inline void
cfs_hash_spin_lock(union cfs_hash_lock *lock, int exclusive)
{
spin_lock(&lock->spin);
}
static inline void
cfs_hash_spin_unlock(union cfs_hash_lock *lock, int exclusive)
{
spin_unlock(&lock->spin);
}
static inline void
cfs_hash_rw_lock(union cfs_hash_lock *lock, int exclusive)
{
if (!exclusive)
read_lock(&lock->rw);
else
write_lock(&lock->rw);
}
static inline void
cfs_hash_rw_unlock(union cfs_hash_lock *lock, int exclusive)
{
if (!exclusive)
read_unlock(&lock->rw);
else
write_unlock(&lock->rw);
}
/** No lock hash */
static cfs_hash_lock_ops_t cfs_hash_nl_lops = {
.hs_lock = cfs_hash_nl_lock,
.hs_unlock = cfs_hash_nl_unlock,
.hs_bkt_lock = cfs_hash_nl_lock,
.hs_bkt_unlock = cfs_hash_nl_unlock,
};
/** no bucket lock, one spinlock to protect everything */
static cfs_hash_lock_ops_t cfs_hash_nbl_lops = {
.hs_lock = cfs_hash_spin_lock,
.hs_unlock = cfs_hash_spin_unlock,
.hs_bkt_lock = cfs_hash_nl_lock,
.hs_bkt_unlock = cfs_hash_nl_unlock,
};
/** spin bucket lock, rehash is enabled */
static cfs_hash_lock_ops_t cfs_hash_bkt_spin_lops = {
.hs_lock = cfs_hash_rw_lock,
.hs_unlock = cfs_hash_rw_unlock,
.hs_bkt_lock = cfs_hash_spin_lock,
.hs_bkt_unlock = cfs_hash_spin_unlock,
};
/** rw bucket lock, rehash is enabled */
static cfs_hash_lock_ops_t cfs_hash_bkt_rw_lops = {
.hs_lock = cfs_hash_rw_lock,
.hs_unlock = cfs_hash_rw_unlock,
.hs_bkt_lock = cfs_hash_rw_lock,
.hs_bkt_unlock = cfs_hash_rw_unlock,
};
/** spin bucket lock, rehash is disabled */
static cfs_hash_lock_ops_t cfs_hash_nr_bkt_spin_lops = {
.hs_lock = cfs_hash_nl_lock,
.hs_unlock = cfs_hash_nl_unlock,
.hs_bkt_lock = cfs_hash_spin_lock,
.hs_bkt_unlock = cfs_hash_spin_unlock,
};
/** rw bucket lock, rehash is disabled */
static cfs_hash_lock_ops_t cfs_hash_nr_bkt_rw_lops = {
.hs_lock = cfs_hash_nl_lock,
.hs_unlock = cfs_hash_nl_unlock,
.hs_bkt_lock = cfs_hash_rw_lock,
.hs_bkt_unlock = cfs_hash_rw_unlock,
};
static void
cfs_hash_lock_setup(struct cfs_hash *hs)
{
if (cfs_hash_with_no_lock(hs)) {
hs->hs_lops = &cfs_hash_nl_lops;
} else if (cfs_hash_with_no_bktlock(hs)) {
hs->hs_lops = &cfs_hash_nbl_lops;
spin_lock_init(&hs->hs_lock.spin);
} else if (cfs_hash_with_rehash(hs)) {
rwlock_init(&hs->hs_lock.rw);
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_bkt_rw_lops;
else if (cfs_hash_with_spin_bktlock(hs))
hs->hs_lops = &cfs_hash_bkt_spin_lops;
else
LBUG();
} else {
if (cfs_hash_with_rw_bktlock(hs))
hs->hs_lops = &cfs_hash_nr_bkt_rw_lops;
else if (cfs_hash_with_spin_bktlock(hs))
hs->hs_lops = &cfs_hash_nr_bkt_spin_lops;
else
LBUG();
}
}
/**
* Simple hash head without depth tracking
* new element is always added to head of hlist
*/
typedef struct {
struct hlist_head hh_head; /**< entries list */
} cfs_hash_head_t;
static int
cfs_hash_hh_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_head_t);
}
static struct hlist_head *
cfs_hash_hh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_head_t *head = (cfs_hash_head_t *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].hh_head;
}
static int
cfs_hash_hh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hlist_add_head(hnode, cfs_hash_hh_hhead(hs, bd));
return -1; /* unknown depth */
}
static int
cfs_hash_hh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hlist_del_init(hnode);
return -1; /* unknown depth */
}
/**
* Simple hash head with depth tracking
* new element is always added to head of hlist
*/
typedef struct {
struct hlist_head hd_head; /**< entries list */
unsigned int hd_depth; /**< list length */
} cfs_hash_head_dep_t;
static int
cfs_hash_hd_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_head_dep_t);
}
static struct hlist_head *
cfs_hash_hd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_head_dep_t *head;
head = (cfs_hash_head_dep_t *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].hd_head;
}
static int
cfs_hash_hd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
cfs_hash_head_dep_t, hd_head);
hlist_add_head(hnode, &hh->hd_head);
return ++hh->hd_depth;
}
static int
cfs_hash_hd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_head_dep_t *hh = container_of(cfs_hash_hd_hhead(hs, bd),
cfs_hash_head_dep_t, hd_head);
hlist_del_init(hnode);
return --hh->hd_depth;
}
/**
* double links hash head without depth tracking
* new element is always added to tail of hlist
*/
typedef struct {
struct hlist_head dh_head; /**< entries list */
struct hlist_node *dh_tail; /**< the last entry */
} cfs_hash_dhead_t;
static int
cfs_hash_dh_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_dhead_t);
}
static struct hlist_head *
cfs_hash_dh_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_dhead_t *head;
head = (cfs_hash_dhead_t *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].dh_head;
}
static int
cfs_hash_dh_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
cfs_hash_dhead_t, dh_head);
if (dh->dh_tail != NULL) /* not empty */
hlist_add_behind(hnode, dh->dh_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dh_head);
dh->dh_tail = hnode;
return -1; /* unknown depth */
}
static int
cfs_hash_dh_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnd)
{
cfs_hash_dhead_t *dh = container_of(cfs_hash_dh_hhead(hs, bd),
cfs_hash_dhead_t, dh_head);
if (hnd->next == NULL) { /* it's the tail */
dh->dh_tail = (hnd->pprev == &dh->dh_head.first) ? NULL :
container_of(hnd->pprev, struct hlist_node, next);
}
hlist_del_init(hnd);
return -1; /* unknown depth */
}
/**
* double links hash head with depth tracking
* new element is always added to tail of hlist
*/
typedef struct {
struct hlist_head dd_head; /**< entries list */
struct hlist_node *dd_tail; /**< the last entry */
unsigned int dd_depth; /**< list length */
} cfs_hash_dhead_dep_t;
static int
cfs_hash_dd_hhead_size(struct cfs_hash *hs)
{
return sizeof(cfs_hash_dhead_dep_t);
}
static struct hlist_head *
cfs_hash_dd_hhead(struct cfs_hash *hs, struct cfs_hash_bd *bd)
{
cfs_hash_dhead_dep_t *head;
head = (cfs_hash_dhead_dep_t *)&bd->bd_bucket->hsb_head[0];
return &head[bd->bd_offset].dd_head;
}
static int
cfs_hash_dd_hnode_add(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
cfs_hash_dhead_dep_t, dd_head);
if (dh->dd_tail != NULL) /* not empty */
hlist_add_behind(hnode, dh->dd_tail);
else /* empty list */
hlist_add_head(hnode, &dh->dd_head);
dh->dd_tail = hnode;
return ++dh->dd_depth;
}
static int
cfs_hash_dd_hnode_del(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnd)
{
cfs_hash_dhead_dep_t *dh = container_of(cfs_hash_dd_hhead(hs, bd),
cfs_hash_dhead_dep_t, dd_head);
if (hnd->next == NULL) { /* it's the tail */
dh->dd_tail = (hnd->pprev == &dh->dd_head.first) ? NULL :
container_of(hnd->pprev, struct hlist_node, next);
}
hlist_del_init(hnd);
return --dh->dd_depth;
}
static cfs_hash_hlist_ops_t cfs_hash_hh_hops = {
.hop_hhead = cfs_hash_hh_hhead,
.hop_hhead_size = cfs_hash_hh_hhead_size,
.hop_hnode_add = cfs_hash_hh_hnode_add,
.hop_hnode_del = cfs_hash_hh_hnode_del,
};
static cfs_hash_hlist_ops_t cfs_hash_hd_hops = {
.hop_hhead = cfs_hash_hd_hhead,
.hop_hhead_size = cfs_hash_hd_hhead_size,
.hop_hnode_add = cfs_hash_hd_hnode_add,
.hop_hnode_del = cfs_hash_hd_hnode_del,
};
static cfs_hash_hlist_ops_t cfs_hash_dh_hops = {
.hop_hhead = cfs_hash_dh_hhead,
.hop_hhead_size = cfs_hash_dh_hhead_size,
.hop_hnode_add = cfs_hash_dh_hnode_add,
.hop_hnode_del = cfs_hash_dh_hnode_del,
};
static cfs_hash_hlist_ops_t cfs_hash_dd_hops = {
.hop_hhead = cfs_hash_dd_hhead,
.hop_hhead_size = cfs_hash_dd_hhead_size,
.hop_hnode_add = cfs_hash_dd_hnode_add,
.hop_hnode_del = cfs_hash_dd_hnode_del,
};
static void
cfs_hash_hlist_setup(struct cfs_hash *hs)
{
if (cfs_hash_with_add_tail(hs)) {
hs->hs_hops = cfs_hash_with_depth(hs) ?
&cfs_hash_dd_hops : &cfs_hash_dh_hops;
} else {
hs->hs_hops = cfs_hash_with_depth(hs) ?
&cfs_hash_hd_hops : &cfs_hash_hh_hops;
}
}
static void
cfs_hash_bd_from_key(struct cfs_hash *hs, struct cfs_hash_bucket **bkts,
unsigned int bits, const void *key, struct cfs_hash_bd *bd)
{
unsigned int index = cfs_hash_id(hs, key, (1U << bits) - 1);
LASSERT(bits == hs->hs_cur_bits || bits == hs->hs_rehash_bits);
bd->bd_bucket = bkts[index & ((1U << (bits - hs->hs_bkt_bits)) - 1)];
bd->bd_offset = index >> (bits - hs->hs_bkt_bits);
}
void
cfs_hash_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bd)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (likely(hs->hs_rehash_buckets == NULL)) {
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, bd);
} else {
LASSERT(hs->hs_rehash_bits != 0);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, bd);
}
}
EXPORT_SYMBOL(cfs_hash_bd_get);
static inline void
cfs_hash_bd_dep_record(struct cfs_hash *hs, struct cfs_hash_bd *bd, int dep_cur)
{
if (likely(dep_cur <= bd->bd_bucket->hsb_depmax))
return;
bd->bd_bucket->hsb_depmax = dep_cur;
# if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
if (likely(warn_on_depth == 0 ||
max(warn_on_depth, hs->hs_dep_max) >= dep_cur))
return;
spin_lock(&hs->hs_dep_lock);
hs->hs_dep_max = dep_cur;
hs->hs_dep_bkt = bd->bd_bucket->hsb_index;
hs->hs_dep_off = bd->bd_offset;
hs->hs_dep_bits = hs->hs_cur_bits;
spin_unlock(&hs->hs_dep_lock);
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_dep_wi);
# endif
}
void
cfs_hash_bd_add_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
int rc;
rc = hs->hs_hops->hop_hnode_add(hs, bd, hnode);
cfs_hash_bd_dep_record(hs, bd, rc);
bd->bd_bucket->hsb_version++;
if (unlikely(bd->bd_bucket->hsb_version == 0))
bd->bd_bucket->hsb_version++;
bd->bd_bucket->hsb_count++;
if (cfs_hash_with_counter(hs))
atomic_inc(&hs->hs_count);
if (!cfs_hash_with_no_itemref(hs))
cfs_hash_get(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_bd_add_locked);
void
cfs_hash_bd_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode)
{
hs->hs_hops->hop_hnode_del(hs, bd, hnode);
LASSERT(bd->bd_bucket->hsb_count > 0);
bd->bd_bucket->hsb_count--;
bd->bd_bucket->hsb_version++;
if (unlikely(bd->bd_bucket->hsb_version == 0))
bd->bd_bucket->hsb_version++;
if (cfs_hash_with_counter(hs)) {
LASSERT(atomic_read(&hs->hs_count) > 0);
atomic_dec(&hs->hs_count);
}
if (!cfs_hash_with_no_itemref(hs))
cfs_hash_put_locked(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_bd_del_locked);
void
cfs_hash_bd_move_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd_old,
struct cfs_hash_bd *bd_new, struct hlist_node *hnode)
{
struct cfs_hash_bucket *obkt = bd_old->bd_bucket;
struct cfs_hash_bucket *nbkt = bd_new->bd_bucket;
int rc;
if (cfs_hash_bd_compare(bd_old, bd_new) == 0)
return;
/* use cfs_hash_bd_hnode_add/del, to avoid atomic & refcount ops
* in cfs_hash_bd_del/add_locked */
hs->hs_hops->hop_hnode_del(hs, bd_old, hnode);
rc = hs->hs_hops->hop_hnode_add(hs, bd_new, hnode);
cfs_hash_bd_dep_record(hs, bd_new, rc);
LASSERT(obkt->hsb_count > 0);
obkt->hsb_count--;
obkt->hsb_version++;
if (unlikely(obkt->hsb_version == 0))
obkt->hsb_version++;
nbkt->hsb_count++;
nbkt->hsb_version++;
if (unlikely(nbkt->hsb_version == 0))
nbkt->hsb_version++;
}
EXPORT_SYMBOL(cfs_hash_bd_move_locked);
enum {
/** always set, for sanity (avoid ZERO intent) */
CFS_HS_LOOKUP_MASK_FIND = 1 << 0,
/** return entry with a ref */
CFS_HS_LOOKUP_MASK_REF = 1 << 1,
/** add entry if not existing */
CFS_HS_LOOKUP_MASK_ADD = 1 << 2,
/** delete entry, ignore other masks */
CFS_HS_LOOKUP_MASK_DEL = 1 << 3,
};
typedef enum cfs_hash_lookup_intent {
/** return item w/o refcount */
CFS_HS_LOOKUP_IT_PEEK = CFS_HS_LOOKUP_MASK_FIND,
/** return item with refcount */
CFS_HS_LOOKUP_IT_FIND = (CFS_HS_LOOKUP_MASK_FIND |
CFS_HS_LOOKUP_MASK_REF),
/** return item w/o refcount if existed, otherwise add */
CFS_HS_LOOKUP_IT_ADD = (CFS_HS_LOOKUP_MASK_FIND |
CFS_HS_LOOKUP_MASK_ADD),
/** return item with refcount if existed, otherwise add */
CFS_HS_LOOKUP_IT_FINDADD = (CFS_HS_LOOKUP_IT_FIND |
CFS_HS_LOOKUP_MASK_ADD),
/** delete if existed */
CFS_HS_LOOKUP_IT_FINDDEL = (CFS_HS_LOOKUP_MASK_FIND |
CFS_HS_LOOKUP_MASK_DEL)
} cfs_hash_lookup_intent_t;
static struct hlist_node *
cfs_hash_bd_lookup_intent(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode,
cfs_hash_lookup_intent_t intent)
{
struct hlist_head *hhead = cfs_hash_bd_hhead(hs, bd);
struct hlist_node *ehnode;
struct hlist_node *match;
int intent_add = (intent & CFS_HS_LOOKUP_MASK_ADD) != 0;
/* with this function, we can avoid a lot of useless refcount ops,
* which are expensive atomic operations most time. */
match = intent_add ? NULL : hnode;
hlist_for_each(ehnode, hhead) {
if (!cfs_hash_keycmp(hs, key, ehnode))
continue;
if (match != NULL && match != ehnode) /* can't match */
continue;
/* match and ... */
if ((intent & CFS_HS_LOOKUP_MASK_DEL) != 0) {
cfs_hash_bd_del_locked(hs, bd, ehnode);
return ehnode;
}
/* caller wants refcount? */
if ((intent & CFS_HS_LOOKUP_MASK_REF) != 0)
cfs_hash_get(hs, ehnode);
return ehnode;
}
/* no match item */
if (!intent_add)
return NULL;
LASSERT(hnode != NULL);
cfs_hash_bd_add_locked(hs, bd, hnode);
return hnode;
}
struct hlist_node *
cfs_hash_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
CFS_HS_LOOKUP_IT_FIND);
}
EXPORT_SYMBOL(cfs_hash_bd_lookup_locked);
struct hlist_node *
cfs_hash_bd_peek_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd, const void *key)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, NULL,
CFS_HS_LOOKUP_IT_PEEK);
}
EXPORT_SYMBOL(cfs_hash_bd_peek_locked);
struct hlist_node *
cfs_hash_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode,
int noref)
{
return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
CFS_HS_LOOKUP_IT_ADD |
(!noref * CFS_HS_LOOKUP_MASK_REF));
}
EXPORT_SYMBOL(cfs_hash_bd_findadd_locked);
struct hlist_node *
cfs_hash_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
const void *key, struct hlist_node *hnode)
{
/* hnode can be NULL, we find the first item with @key */
return cfs_hash_bd_lookup_intent(hs, bd, key, hnode,
CFS_HS_LOOKUP_IT_FINDDEL);
}
EXPORT_SYMBOL(cfs_hash_bd_finddel_locked);
static void
cfs_hash_multi_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
/**
* bds must be ascendantly ordered by bd->bd_bucket->hsb_index.
* NB: it's possible that several bds point to the same bucket but
* have different bd::bd_offset, so need take care of deadlock.
*/
cfs_hash_for_each_bd(bds, n, i) {
if (prev == bds[i].bd_bucket)
continue;
LASSERT(prev == NULL ||
prev->hsb_index < bds[i].bd_bucket->hsb_index);
cfs_hash_bd_lock(hs, &bds[i], excl);
prev = bds[i].bd_bucket;
}
}
static void
cfs_hash_multi_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, int excl)
{
struct cfs_hash_bucket *prev = NULL;
int i;
cfs_hash_for_each_bd(bds, n, i) {
if (prev != bds[i].bd_bucket) {
cfs_hash_bd_unlock(hs, &bds[i], excl);
prev = bds[i].bd_bucket;
}
}
}
static struct hlist_node *
cfs_hash_multi_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, const void *key)
{
struct hlist_node *ehnode;
unsigned i;
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, NULL,
CFS_HS_LOOKUP_IT_FIND);
if (ehnode != NULL)
return ehnode;
}
return NULL;
}
static struct hlist_node *
cfs_hash_multi_bd_findadd_locked(struct cfs_hash *hs,
struct cfs_hash_bd *bds, unsigned n, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
int intent;
unsigned i;
LASSERT(hnode != NULL);
intent = CFS_HS_LOOKUP_IT_PEEK | (!noref * CFS_HS_LOOKUP_MASK_REF);
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key,
NULL, intent);
if (ehnode != NULL)
return ehnode;
}
if (i == 1) { /* only one bucket */
cfs_hash_bd_add_locked(hs, &bds[0], hnode);
} else {
struct cfs_hash_bd mybd;
cfs_hash_bd_get(hs, key, &mybd);
cfs_hash_bd_add_locked(hs, &mybd, hnode);
}
return hnode;
}
static struct hlist_node *
cfs_hash_multi_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
unsigned n, const void *key,
struct hlist_node *hnode)
{
struct hlist_node *ehnode;
unsigned i;
cfs_hash_for_each_bd(bds, n, i) {
ehnode = cfs_hash_bd_lookup_intent(hs, &bds[i], key, hnode,
CFS_HS_LOOKUP_IT_FINDDEL);
if (ehnode != NULL)
return ehnode;
}
return NULL;
}
static void
cfs_hash_bd_order(struct cfs_hash_bd *bd1, struct cfs_hash_bd *bd2)
{
int rc;
if (bd2->bd_bucket == NULL)
return;
if (bd1->bd_bucket == NULL) {
*bd1 = *bd2;
bd2->bd_bucket = NULL;
return;
}
rc = cfs_hash_bd_compare(bd1, bd2);
if (rc == 0) {
bd2->bd_bucket = NULL;
} else if (rc > 0) { /* swab bd1 and bd2 */
struct cfs_hash_bd tmp;
tmp = *bd2;
*bd2 = *bd1;
*bd1 = tmp;
}
}
void
cfs_hash_dual_bd_get(struct cfs_hash *hs, const void *key, struct cfs_hash_bd *bds)
{
/* NB: caller should hold hs_lock.rw if REHASH is set */
cfs_hash_bd_from_key(hs, hs->hs_buckets,
hs->hs_cur_bits, key, &bds[0]);
if (likely(hs->hs_rehash_buckets == NULL)) {
/* no rehash or not rehashing */
bds[1].bd_bucket = NULL;
return;
}
LASSERT(hs->hs_rehash_bits != 0);
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, &bds[1]);
cfs_hash_bd_order(&bds[0], &bds[1]);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_get);
void
cfs_hash_dual_bd_lock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
cfs_hash_multi_bd_lock(hs, bds, 2, excl);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_lock);
void
cfs_hash_dual_bd_unlock(struct cfs_hash *hs, struct cfs_hash_bd *bds, int excl)
{
cfs_hash_multi_bd_unlock(hs, bds, 2, excl);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_unlock);
struct hlist_node *
cfs_hash_dual_bd_lookup_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key)
{
return cfs_hash_multi_bd_lookup_locked(hs, bds, 2, key);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_lookup_locked);
struct hlist_node *
cfs_hash_dual_bd_findadd_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key, struct hlist_node *hnode,
int noref)
{
return cfs_hash_multi_bd_findadd_locked(hs, bds, 2, key,
hnode, noref);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_findadd_locked);
struct hlist_node *
cfs_hash_dual_bd_finddel_locked(struct cfs_hash *hs, struct cfs_hash_bd *bds,
const void *key, struct hlist_node *hnode)
{
return cfs_hash_multi_bd_finddel_locked(hs, bds, 2, key, hnode);
}
EXPORT_SYMBOL(cfs_hash_dual_bd_finddel_locked);
static void
cfs_hash_buckets_free(struct cfs_hash_bucket **buckets,
int bkt_size, int prev_size, int size)
{
int i;
for (i = prev_size; i < size; i++) {
if (buckets[i] != NULL)
LIBCFS_FREE(buckets[i], bkt_size);
}
LIBCFS_FREE(buckets, sizeof(buckets[0]) * size);
}
/*
* Create or grow bucket memory. Return old_buckets if no allocation was
* needed, the newly allocated buckets if allocation was needed and
* successful, and NULL on error.
*/
static struct cfs_hash_bucket **
cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
unsigned int old_size, unsigned int new_size)
{
struct cfs_hash_bucket **new_bkts;
int i;
LASSERT(old_size == 0 || old_bkts != NULL);
if (old_bkts != NULL && old_size == new_size)
return old_bkts;
LIBCFS_ALLOC(new_bkts, sizeof(new_bkts[0]) * new_size);
if (new_bkts == NULL)
return NULL;
if (old_bkts != NULL) {
memcpy(new_bkts, old_bkts,
min(old_size, new_size) * sizeof(*old_bkts));
}
for (i = old_size; i < new_size; i++) {
struct hlist_head *hhead;
struct cfs_hash_bd bd;
LIBCFS_ALLOC(new_bkts[i], cfs_hash_bkt_size(hs));
if (new_bkts[i] == NULL) {
cfs_hash_buckets_free(new_bkts, cfs_hash_bkt_size(hs),
old_size, new_size);
return NULL;
}
new_bkts[i]->hsb_index = i;
new_bkts[i]->hsb_version = 1; /* shouldn't be zero */
new_bkts[i]->hsb_depmax = -1; /* unknown */
bd.bd_bucket = new_bkts[i];
cfs_hash_bd_for_each_hlist(hs, &bd, hhead)
INIT_HLIST_HEAD(hhead);
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_no_bktlock(hs))
continue;
if (cfs_hash_with_rw_bktlock(hs))
rwlock_init(&new_bkts[i]->hsb_lock.rw);
else if (cfs_hash_with_spin_bktlock(hs))
spin_lock_init(&new_bkts[i]->hsb_lock.spin);
else
LBUG(); /* invalid use-case */
}
return new_bkts;
}
/**
* Initialize new libcfs hash, where:
* @name - Descriptive hash name
* @cur_bits - Initial hash table size, in bits
* @max_bits - Maximum allowed hash table resize, in bits
* @ops - Registered hash table operations
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
static int cfs_hash_dep_print(cfs_workitem_t *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep;
int bkt;
int off;
int bits;
spin_lock(&hs->hs_dep_lock);
dep = hs->hs_dep_max;
bkt = hs->hs_dep_bkt;
off = hs->hs_dep_off;
bits = hs->hs_dep_bits;
spin_unlock(&hs->hs_dep_lock);
LCONSOLE_WARN("#### HASH %s (bits: %d): max depth %d at bucket %d/%d\n",
hs->hs_name, bits, dep, bkt, off);
spin_lock(&hs->hs_dep_lock);
hs->hs_dep_bits = 0; /* mark as workitem done */
spin_unlock(&hs->hs_dep_lock);
return 0;
}
static void cfs_hash_depth_wi_init(struct cfs_hash *hs)
{
spin_lock_init(&hs->hs_dep_lock);
cfs_wi_init(&hs->hs_dep_wi, hs, cfs_hash_dep_print);
}
static void cfs_hash_depth_wi_cancel(struct cfs_hash *hs)
{
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_dep_wi))
return;
spin_lock(&hs->hs_dep_lock);
while (hs->hs_dep_bits != 0) {
spin_unlock(&hs->hs_dep_lock);
cond_resched();
spin_lock(&hs->hs_dep_lock);
}
spin_unlock(&hs->hs_dep_lock);
}
#else /* CFS_HASH_DEBUG_LEVEL < CFS_HASH_DEBUG_1 */
static inline void cfs_hash_depth_wi_init(struct cfs_hash *hs) {}
static inline void cfs_hash_depth_wi_cancel(struct cfs_hash *hs) {}
#endif /* CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1 */
struct cfs_hash *
cfs_hash_create(char *name, unsigned cur_bits, unsigned max_bits,
unsigned bkt_bits, unsigned extra_bytes,
unsigned min_theta, unsigned max_theta,
cfs_hash_ops_t *ops, unsigned flags)
{
struct cfs_hash *hs;
int len;
CLASSERT(CFS_HASH_THETA_BITS < 15);
LASSERT(name != NULL);
LASSERT(ops != NULL);
LASSERT(ops->hs_key);
LASSERT(ops->hs_hash);
LASSERT(ops->hs_object);
LASSERT(ops->hs_keycmp);
LASSERT(ops->hs_get != NULL);
LASSERT(ops->hs_put_locked != NULL);
if ((flags & CFS_HASH_REHASH) != 0)
flags |= CFS_HASH_COUNTER; /* must have counter */
LASSERT(cur_bits > 0);
LASSERT(cur_bits >= bkt_bits);
LASSERT(max_bits >= cur_bits && max_bits < 31);
LASSERT(ergo((flags & CFS_HASH_REHASH) == 0, cur_bits == max_bits));
LASSERT(ergo((flags & CFS_HASH_REHASH) != 0,
(flags & CFS_HASH_NO_LOCK) == 0));
LASSERT(ergo((flags & CFS_HASH_REHASH_KEY) != 0,
ops->hs_keycpy != NULL));
len = (flags & CFS_HASH_BIGNAME) == 0 ?
CFS_HASH_NAME_LEN : CFS_HASH_BIGNAME_LEN;
LIBCFS_ALLOC(hs, offsetof(struct cfs_hash, hs_name[len]));
if (hs == NULL)
return NULL;
strncpy(hs->hs_name, name, len);
hs->hs_name[len - 1] = '\0';
hs->hs_flags = flags;
atomic_set(&hs->hs_refcount, 1);
atomic_set(&hs->hs_count, 0);
cfs_hash_lock_setup(hs);
cfs_hash_hlist_setup(hs);
hs->hs_cur_bits = (__u8)cur_bits;
hs->hs_min_bits = (__u8)cur_bits;
hs->hs_max_bits = (__u8)max_bits;
hs->hs_bkt_bits = (__u8)bkt_bits;
hs->hs_ops = ops;
hs->hs_extra_bytes = extra_bytes;
hs->hs_rehash_bits = 0;
cfs_wi_init(&hs->hs_rehash_wi, hs, cfs_hash_rehash_worker);
cfs_hash_depth_wi_init(hs);
if (cfs_hash_with_rehash(hs))
__cfs_hash_set_theta(hs, min_theta, max_theta);
hs->hs_buckets = cfs_hash_buckets_realloc(hs, NULL, 0,
CFS_HASH_NBKT(hs));
if (hs->hs_buckets != NULL)
return hs;
LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[len]));
return NULL;
}
EXPORT_SYMBOL(cfs_hash_create);
/**
* Cleanup libcfs hash @hs.
*/
static void
cfs_hash_destroy(struct cfs_hash *hs)
{
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
int i;
LASSERT(hs != NULL);
LASSERT(!cfs_hash_is_exiting(hs) &&
!cfs_hash_is_iterating(hs));
/**
* prohibit further rehashes, don't need any lock because
* I'm the only (last) one can change it.
*/
hs->hs_exiting = 1;
if (cfs_hash_with_rehash(hs))
cfs_hash_rehash_cancel(hs);
cfs_hash_depth_wi_cancel(hs);
/* rehash should be done/canceled */
LASSERT(hs->hs_buckets != NULL &&
hs->hs_rehash_buckets == NULL);
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
LASSERT(bd.bd_bucket != NULL);
/* no need to take this lock, just for consistent code */
cfs_hash_bd_lock(hs, &bd, 1);
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
LASSERTF(!cfs_hash_with_assert_empty(hs),
"hash %s bucket %u(%u) is not "
" empty: %u items left\n",
hs->hs_name, bd.bd_bucket->hsb_index,
bd.bd_offset, bd.bd_bucket->hsb_count);
/* can't assert key valicate, because we
* can interrupt rehash */
cfs_hash_bd_del_locked(hs, &bd, hnode);
cfs_hash_exit(hs, hnode);
}
}
LASSERT(bd.bd_bucket->hsb_count == 0);
cfs_hash_bd_unlock(hs, &bd, 1);
cond_resched();
}
LASSERT(atomic_read(&hs->hs_count) == 0);
cfs_hash_buckets_free(hs->hs_buckets, cfs_hash_bkt_size(hs),
0, CFS_HASH_NBKT(hs));
i = cfs_hash_with_bigname(hs) ?
CFS_HASH_BIGNAME_LEN : CFS_HASH_NAME_LEN;
LIBCFS_FREE(hs, offsetof(struct cfs_hash, hs_name[i]));
}
struct cfs_hash *cfs_hash_getref(struct cfs_hash *hs)
{
if (atomic_inc_not_zero(&hs->hs_refcount))
return hs;
return NULL;
}
EXPORT_SYMBOL(cfs_hash_getref);
void cfs_hash_putref(struct cfs_hash *hs)
{
if (atomic_dec_and_test(&hs->hs_refcount))
cfs_hash_destroy(hs);
}
EXPORT_SYMBOL(cfs_hash_putref);
static inline int
cfs_hash_rehash_bits(struct cfs_hash *hs)
{
if (cfs_hash_with_no_lock(hs) ||
!cfs_hash_with_rehash(hs))
return -EOPNOTSUPP;
if (unlikely(cfs_hash_is_exiting(hs)))
return -ESRCH;
if (unlikely(cfs_hash_is_rehashing(hs)))
return -EALREADY;
if (unlikely(cfs_hash_is_iterating(hs)))
return -EAGAIN;
/* XXX: need to handle case with max_theta != 2.0
* and the case with min_theta != 0.5 */
if ((hs->hs_cur_bits < hs->hs_max_bits) &&
(__cfs_hash_theta(hs) > hs->hs_max_theta))
return hs->hs_cur_bits + 1;
if (!cfs_hash_with_shrink(hs))
return 0;
if ((hs->hs_cur_bits > hs->hs_min_bits) &&
(__cfs_hash_theta(hs) < hs->hs_min_theta))
return hs->hs_cur_bits - 1;
return 0;
}
/**
* don't allow inline rehash if:
* - user wants non-blocking change (add/del) on hash table
* - too many elements
*/
static inline int
cfs_hash_rehash_inline(struct cfs_hash *hs)
{
return !cfs_hash_with_nblk_change(hs) &&
atomic_read(&hs->hs_count) < CFS_HASH_LOOP_HOG;
}
/**
* Add item @hnode to libcfs hash @hs using @key. The registered
* ops->hs_get function will be called when the item is added.
*/
void
cfs_hash_add(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
struct cfs_hash_bd bd;
int bits;
LASSERT(hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
cfs_hash_bd_get_and_lock(hs, key, &bd, 1);
cfs_hash_key_validate(hs, key, hnode);
cfs_hash_bd_add_locked(hs, &bd, hnode);
cfs_hash_bd_unlock(hs, &bd, 1);
bits = cfs_hash_rehash_bits(hs);
cfs_hash_unlock(hs, 0);
if (bits > 0)
cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
}
EXPORT_SYMBOL(cfs_hash_add);
static struct hlist_node *
cfs_hash_find_or_add(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode, int noref)
{
struct hlist_node *ehnode;
struct cfs_hash_bd bds[2];
int bits = 0;
LASSERT(hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
cfs_hash_key_validate(hs, key, hnode);
ehnode = cfs_hash_dual_bd_findadd_locked(hs, bds, key,
hnode, noref);
cfs_hash_dual_bd_unlock(hs, bds, 1);
if (ehnode == hnode) /* new item added */
bits = cfs_hash_rehash_bits(hs);
cfs_hash_unlock(hs, 0);
if (bits > 0)
cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
return ehnode;
}
/**
* Add item @hnode to libcfs hash @hs using @key. The registered
* ops->hs_get function will be called if the item was added.
* Returns 0 on success or -EALREADY on key collisions.
*/
int
cfs_hash_add_unique(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
return cfs_hash_find_or_add(hs, key, hnode, 1) != hnode ?
-EALREADY : 0;
}
EXPORT_SYMBOL(cfs_hash_add_unique);
/**
* Add item @hnode to libcfs hash @hs using @key. If this @key
* already exists in the hash then ops->hs_get will be called on the
* conflicting entry and that entry will be returned to the caller.
* Otherwise ops->hs_get is called on the item which was added.
*/
void *
cfs_hash_findadd_unique(struct cfs_hash *hs, const void *key,
struct hlist_node *hnode)
{
hnode = cfs_hash_find_or_add(hs, key, hnode, 0);
return cfs_hash_object(hs, hnode);
}
EXPORT_SYMBOL(cfs_hash_findadd_unique);
/**
* Delete item @hnode from the libcfs hash @hs using @key. The @key
* is required to ensure the correct hash bucket is locked since there
* is no direct linkage from the item to the bucket. The object
* removed from the hash will be returned and obs->hs_put is called
* on the removed object.
*/
void *
cfs_hash_del(struct cfs_hash *hs, const void *key, struct hlist_node *hnode)
{
void *obj = NULL;
int bits = 0;
struct cfs_hash_bd bds[2];
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 1);
/* NB: do nothing if @hnode is not in hash table */
if (hnode == NULL || !hlist_unhashed(hnode)) {
if (bds[1].bd_bucket == NULL && hnode != NULL) {
cfs_hash_bd_del_locked(hs, &bds[0], hnode);
} else {
hnode = cfs_hash_dual_bd_finddel_locked(hs, bds,
key, hnode);
}
}
if (hnode != NULL) {
obj = cfs_hash_object(hs, hnode);
bits = cfs_hash_rehash_bits(hs);
}
cfs_hash_dual_bd_unlock(hs, bds, 1);
cfs_hash_unlock(hs, 0);
if (bits > 0)
cfs_hash_rehash(hs, cfs_hash_rehash_inline(hs));
return obj;
}
EXPORT_SYMBOL(cfs_hash_del);
/**
* Delete item given @key in libcfs hash @hs. The first @key found in
* the hash will be removed, if the key exists multiple times in the hash
* @hs this function must be called once per key. The removed object
* will be returned and ops->hs_put is called on the removed object.
*/
void *
cfs_hash_del_key(struct cfs_hash *hs, const void *key)
{
return cfs_hash_del(hs, key, NULL);
}
EXPORT_SYMBOL(cfs_hash_del_key);
/**
* Lookup an item using @key in the libcfs hash @hs and return it.
* If the @key is found in the hash hs->hs_get() is called and the
* matching objects is returned. It is the callers responsibility
* to call the counterpart ops->hs_put using the cfs_hash_put() macro
* when when finished with the object. If the @key was not found
* in the hash @hs NULL is returned.
*/
void *
cfs_hash_lookup(struct cfs_hash *hs, const void *key)
{
void *obj = NULL;
struct hlist_node *hnode;
struct cfs_hash_bd bds[2];
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
hnode = cfs_hash_dual_bd_lookup_locked(hs, bds, key);
if (hnode != NULL)
obj = cfs_hash_object(hs, hnode);
cfs_hash_dual_bd_unlock(hs, bds, 0);
cfs_hash_unlock(hs, 0);
return obj;
}
EXPORT_SYMBOL(cfs_hash_lookup);
static void
cfs_hash_for_each_enter(struct cfs_hash *hs)
{
LASSERT(!cfs_hash_is_exiting(hs));
if (!cfs_hash_with_rehash(hs))
return;
/*
* NB: it's race on cfs_has_t::hs_iterating, but doesn't matter
* because it's just an unreliable signal to rehash-thread,
* rehash-thread will try to finish rehash ASAP when seeing this.
*/
hs->hs_iterating = 1;
cfs_hash_lock(hs, 1);
hs->hs_iterators++;
/* NB: iteration is mostly called by service thread,
* we tend to cancel pending rehash-request, instead of
* blocking service thread, we will relaunch rehash request
* after iteration */
if (cfs_hash_is_rehashing(hs))
cfs_hash_rehash_cancel_locked(hs);
cfs_hash_unlock(hs, 1);
}
static void
cfs_hash_for_each_exit(struct cfs_hash *hs)
{
int remained;
int bits;
if (!cfs_hash_with_rehash(hs))
return;
cfs_hash_lock(hs, 1);
remained = --hs->hs_iterators;
bits = cfs_hash_rehash_bits(hs);
cfs_hash_unlock(hs, 1);
/* NB: it's race on cfs_has_t::hs_iterating, see above */
if (remained == 0)
hs->hs_iterating = 0;
if (bits > 0) {
cfs_hash_rehash(hs, atomic_read(&hs->hs_count) <
CFS_HASH_LOOP_HOG);
}
}
/**
* For each item in the libcfs hash @hs call the passed callback @func
* and pass to it as an argument each hash item and the private @data.
*
* a) the function may sleep!
* b) during the callback:
* . the bucket lock is held so the callback must never sleep.
* . if @removal_safe is true, use can remove current item by
* cfs_hash_bd_del_locked
*/
static __u64
cfs_hash_for_each_tight(struct cfs_hash *hs, cfs_hash_for_each_cb_t func,
void *data, int remove_safe)
{
struct hlist_node *hnode;
struct hlist_node *pos;
struct cfs_hash_bd bd;
__u64 count = 0;
int excl = !!remove_safe;
int loop = 0;
int i;
cfs_hash_for_each_enter(hs);
cfs_hash_lock(hs, 0);
LASSERT(!cfs_hash_is_rehashing(hs));
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, excl);
if (func == NULL) { /* only glimpse size */
count += bd.bd_bucket->hsb_count;
cfs_hash_bd_unlock(hs, &bd, excl);
continue;
}
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
cfs_hash_bucket_validate(hs, &bd, hnode);
count++;
loop++;
if (func(hs, &bd, hnode, data)) {
cfs_hash_bd_unlock(hs, &bd, excl);
goto out;
}
}
}
cfs_hash_bd_unlock(hs, &bd, excl);
if (loop < CFS_HASH_LOOP_HOG)
continue;
loop = 0;
cfs_hash_unlock(hs, 0);
cond_resched();
cfs_hash_lock(hs, 0);
}
out:
cfs_hash_unlock(hs, 0);
cfs_hash_for_each_exit(hs);
return count;
}
typedef struct {
cfs_hash_cond_opt_cb_t func;
void *arg;
} cfs_hash_cond_arg_t;
static int
cfs_hash_cond_del_locked(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
cfs_hash_cond_arg_t *cond = data;
if (cond->func(cfs_hash_object(hs, hnode), cond->arg))
cfs_hash_bd_del_locked(hs, bd, hnode);
return 0;
}
/**
* Delete item from the libcfs hash @hs when @func return true.
* The write lock being hold during loop for each bucket to avoid
* any object be reference.
*/
void
cfs_hash_cond_del(struct cfs_hash *hs, cfs_hash_cond_opt_cb_t func, void *data)
{
cfs_hash_cond_arg_t arg = {
.func = func,
.arg = data,
};
cfs_hash_for_each_tight(hs, cfs_hash_cond_del_locked, &arg, 1);
}
EXPORT_SYMBOL(cfs_hash_cond_del);
void
cfs_hash_for_each(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
cfs_hash_for_each_tight(hs, func, data, 0);
}
EXPORT_SYMBOL(cfs_hash_for_each);
void
cfs_hash_for_each_safe(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
cfs_hash_for_each_tight(hs, func, data, 1);
}
EXPORT_SYMBOL(cfs_hash_for_each_safe);
static int
cfs_hash_peek(struct cfs_hash *hs, struct cfs_hash_bd *bd,
struct hlist_node *hnode, void *data)
{
*(int *)data = 0;
return 1; /* return 1 to break the loop */
}
int
cfs_hash_is_empty(struct cfs_hash *hs)
{
int empty = 1;
cfs_hash_for_each_tight(hs, cfs_hash_peek, &empty, 0);
return empty;
}
EXPORT_SYMBOL(cfs_hash_is_empty);
__u64
cfs_hash_size_get(struct cfs_hash *hs)
{
return cfs_hash_with_counter(hs) ?
atomic_read(&hs->hs_count) :
cfs_hash_for_each_tight(hs, NULL, NULL, 0);
}
EXPORT_SYMBOL(cfs_hash_size_get);
/*
* cfs_hash_for_each_relax:
* Iterate the hash table and call @func on each item without
* any lock. This function can't guarantee to finish iteration
* if these features are enabled:
*
* a. if rehash_key is enabled, an item can be moved from
* one bucket to another bucket
* b. user can remove non-zero-ref item from hash-table,
* so the item can be removed from hash-table, even worse,
* it's possible that user changed key and insert to another
* hash bucket.
* there's no way for us to finish iteration correctly on previous
* two cases, so iteration has to be stopped on change.
*/
static int
cfs_hash_for_each_relax(struct cfs_hash *hs, cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_node *hnode;
struct hlist_node *tmp;
struct cfs_hash_bd bd;
__u32 version;
int count = 0;
int stop_on_change;
int rc;
int i;
stop_on_change = cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs) ||
CFS_HOP(hs, put_locked) == NULL;
cfs_hash_lock(hs, 0);
LASSERT(!cfs_hash_is_rehashing(hs));
cfs_hash_for_each_bucket(hs, &bd, i) {
struct hlist_head *hhead;
cfs_hash_bd_lock(hs, &bd, 0);
version = cfs_hash_bd_version_get(&bd);
cfs_hash_bd_for_each_hlist(hs, &bd, hhead) {
for (hnode = hhead->first; hnode != NULL;) {
cfs_hash_bucket_validate(hs, &bd, hnode);
cfs_hash_get(hs, hnode);
cfs_hash_bd_unlock(hs, &bd, 0);
cfs_hash_unlock(hs, 0);
rc = func(hs, &bd, hnode, data);
if (stop_on_change)
cfs_hash_put(hs, hnode);
cond_resched();
count++;
cfs_hash_lock(hs, 0);
cfs_hash_bd_lock(hs, &bd, 0);
if (!stop_on_change) {
tmp = hnode->next;
cfs_hash_put_locked(hs, hnode);
hnode = tmp;
} else { /* bucket changed? */
if (version !=
cfs_hash_bd_version_get(&bd))
break;
/* safe to continue because no change */
hnode = hnode->next;
}
if (rc) /* callback wants to break iteration */
break;
}
}
cfs_hash_bd_unlock(hs, &bd, 0);
}
cfs_hash_unlock(hs, 0);
return count;
}
int
cfs_hash_for_each_nolock(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
if (cfs_hash_with_no_lock(hs) ||
cfs_hash_with_rehash_key(hs) ||
!cfs_hash_with_no_itemref(hs))
return -EOPNOTSUPP;
if (CFS_HOP(hs, get) == NULL ||
(CFS_HOP(hs, put) == NULL &&
CFS_HOP(hs, put_locked) == NULL))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
cfs_hash_for_each_relax(hs, func, data);
cfs_hash_for_each_exit(hs);
return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_nolock);
/**
* For each hash bucket in the libcfs hash @hs call the passed callback
* @func until all the hash buckets are empty. The passed callback @func
* or the previously registered callback hs->hs_put must remove the item
* from the hash. You may either use the cfs_hash_del() or hlist_del()
* functions. No rwlocks will be held during the callback @func it is
* safe to sleep if needed. This function will not terminate until the
* hash is empty. Note it is still possible to concurrently add new
* items in to the hash. It is the callers responsibility to ensure
* the required locking is in place to prevent concurrent insertions.
*/
int
cfs_hash_for_each_empty(struct cfs_hash *hs,
cfs_hash_for_each_cb_t func, void *data)
{
unsigned i = 0;
if (cfs_hash_with_no_lock(hs))
return -EOPNOTSUPP;
if (CFS_HOP(hs, get) == NULL ||
(CFS_HOP(hs, put) == NULL &&
CFS_HOP(hs, put_locked) == NULL))
return -EOPNOTSUPP;
cfs_hash_for_each_enter(hs);
while (cfs_hash_for_each_relax(hs, func, data)) {
CDEBUG(D_INFO, "Try to empty hash: %s, loop: %u\n",
hs->hs_name, i++);
}
cfs_hash_for_each_exit(hs);
return 0;
}
EXPORT_SYMBOL(cfs_hash_for_each_empty);
void
cfs_hash_hlist_for_each(struct cfs_hash *hs, unsigned hindex,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_head *hhead;
struct hlist_node *hnode;
struct cfs_hash_bd bd;
cfs_hash_for_each_enter(hs);
cfs_hash_lock(hs, 0);
if (hindex >= CFS_HASH_NHLIST(hs))
goto out;
cfs_hash_bd_index_set(hs, hindex, &bd);
cfs_hash_bd_lock(hs, &bd, 0);
hhead = cfs_hash_bd_hhead(hs, &bd);
hlist_for_each(hnode, hhead) {
if (func(hs, &bd, hnode, data))
break;
}
cfs_hash_bd_unlock(hs, &bd, 0);
out:
cfs_hash_unlock(hs, 0);
cfs_hash_for_each_exit(hs);
}
EXPORT_SYMBOL(cfs_hash_hlist_for_each);
/*
* For each item in the libcfs hash @hs which matches the @key call
* the passed callback @func and pass to it as an argument each hash
* item and the private @data. During the callback the bucket lock
* is held so the callback must never sleep.
*/
void
cfs_hash_for_each_key(struct cfs_hash *hs, const void *key,
cfs_hash_for_each_cb_t func, void *data)
{
struct hlist_node *hnode;
struct cfs_hash_bd bds[2];
unsigned i;
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get_and_lock(hs, key, bds, 0);
cfs_hash_for_each_bd(bds, 2, i) {
struct hlist_head *hlist = cfs_hash_bd_hhead(hs, &bds[i]);
hlist_for_each(hnode, hlist) {
cfs_hash_bucket_validate(hs, &bds[i], hnode);
if (cfs_hash_keycmp(hs, key, hnode)) {
if (func(hs, &bds[i], hnode, data))
break;
}
}
}
cfs_hash_dual_bd_unlock(hs, bds, 0);
cfs_hash_unlock(hs, 0);
}
EXPORT_SYMBOL(cfs_hash_for_each_key);
/**
* Rehash the libcfs hash @hs to the given @bits. This can be used
* to grow the hash size when excessive chaining is detected, or to
* shrink the hash when it is larger than needed. When the CFS_HASH_REHASH
* flag is set in @hs the libcfs hash may be dynamically rehashed
* during addition or removal if the hash's theta value exceeds
* either the hs->hs_min_theta or hs->max_theta values. By default
* these values are tuned to keep the chained hash depth small, and
* this approach assumes a reasonably uniform hashing function. The
* theta thresholds for @hs are tunable via cfs_hash_set_theta().
*/
void
cfs_hash_rehash_cancel_locked(struct cfs_hash *hs)
{
int i;
/* need hold cfs_hash_lock(hs, 1) */
LASSERT(cfs_hash_with_rehash(hs) &&
!cfs_hash_with_no_lock(hs));
if (!cfs_hash_is_rehashing(hs))
return;
if (cfs_wi_deschedule(cfs_sched_rehash, &hs->hs_rehash_wi)) {
hs->hs_rehash_bits = 0;
return;
}
for (i = 2; cfs_hash_is_rehashing(hs); i++) {
cfs_hash_unlock(hs, 1);
/* raise console warning while waiting too long */
CDEBUG(IS_PO2(i >> 3) ? D_WARNING : D_INFO,
"hash %s is still rehashing, rescheded %d\n",
hs->hs_name, i - 1);
cond_resched();
cfs_hash_lock(hs, 1);
}
}
EXPORT_SYMBOL(cfs_hash_rehash_cancel_locked);
void
cfs_hash_rehash_cancel(struct cfs_hash *hs)
{
cfs_hash_lock(hs, 1);
cfs_hash_rehash_cancel_locked(hs);
cfs_hash_unlock(hs, 1);
}
EXPORT_SYMBOL(cfs_hash_rehash_cancel);
int
cfs_hash_rehash(struct cfs_hash *hs, int do_rehash)
{
int rc;
LASSERT(cfs_hash_with_rehash(hs) && !cfs_hash_with_no_lock(hs));
cfs_hash_lock(hs, 1);
rc = cfs_hash_rehash_bits(hs);
if (rc <= 0) {
cfs_hash_unlock(hs, 1);
return rc;
}
hs->hs_rehash_bits = rc;
if (!do_rehash) {
/* launch and return */
cfs_wi_schedule(cfs_sched_rehash, &hs->hs_rehash_wi);
cfs_hash_unlock(hs, 1);
return 0;
}
/* rehash right now */
cfs_hash_unlock(hs, 1);
return cfs_hash_rehash_worker(&hs->hs_rehash_wi);
}
EXPORT_SYMBOL(cfs_hash_rehash);
static int
cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
{
struct cfs_hash_bd new;
struct hlist_head *hhead;
struct hlist_node *hnode;
struct hlist_node *pos;
void *key;
int c = 0;
/* hold cfs_hash_lock(hs, 1), so don't need any bucket lock */
cfs_hash_bd_for_each_hlist(hs, old, hhead) {
hlist_for_each_safe(hnode, pos, hhead) {
key = cfs_hash_key(hs, hnode);
LASSERT(key != NULL);
/* Validate hnode is in the correct bucket. */
cfs_hash_bucket_validate(hs, old, hnode);
/*
* Delete from old hash bucket; move to new bucket.
* ops->hs_key must be defined.
*/
cfs_hash_bd_from_key(hs, hs->hs_rehash_buckets,
hs->hs_rehash_bits, key, &new);
cfs_hash_bd_move_locked(hs, old, &new, hnode);
c++;
}
}
return c;
}
static int
cfs_hash_rehash_worker(cfs_workitem_t *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
struct cfs_hash_bucket **bkts;
struct cfs_hash_bd bd;
unsigned int old_size;
unsigned int new_size;
int bsize;
int count = 0;
int rc = 0;
int i;
LASSERT (hs != NULL && cfs_hash_with_rehash(hs));
cfs_hash_lock(hs, 0);
LASSERT(cfs_hash_is_rehashing(hs));
old_size = CFS_HASH_NBKT(hs);
new_size = CFS_HASH_RH_NBKT(hs);
cfs_hash_unlock(hs, 0);
/*
* don't need hs::hs_rwlock for hs::hs_buckets,
* because nobody can change bkt-table except me.
*/
bkts = cfs_hash_buckets_realloc(hs, hs->hs_buckets,
old_size, new_size);
cfs_hash_lock(hs, 1);
if (bkts == NULL) {
rc = -ENOMEM;
goto out;
}
if (bkts == hs->hs_buckets) {
bkts = NULL; /* do nothing */
goto out;
}
rc = __cfs_hash_theta(hs);
if ((rc >= hs->hs_min_theta) && (rc <= hs->hs_max_theta)) {
/* free the new allocated bkt-table */
old_size = new_size;
new_size = CFS_HASH_NBKT(hs);
rc = -EALREADY;
goto out;
}
LASSERT(hs->hs_rehash_buckets == NULL);
hs->hs_rehash_buckets = bkts;
rc = 0;
cfs_hash_for_each_bucket(hs, &bd, i) {
if (cfs_hash_is_exiting(hs)) {
rc = -ESRCH;
/* someone wants to destroy the hash, abort now */
if (old_size < new_size) /* OK to free old bkt-table */
break;
/* it's shrinking, need free new bkt-table */
hs->hs_rehash_buckets = NULL;
old_size = new_size;
new_size = CFS_HASH_NBKT(hs);
goto out;
}
count += cfs_hash_rehash_bd(hs, &bd);
if (count < CFS_HASH_LOOP_HOG ||
cfs_hash_is_iterating(hs)) { /* need to finish ASAP */
continue;
}
count = 0;
cfs_hash_unlock(hs, 1);
cond_resched();
cfs_hash_lock(hs, 1);
}
hs->hs_rehash_count++;
bkts = hs->hs_buckets;
hs->hs_buckets = hs->hs_rehash_buckets;
hs->hs_rehash_buckets = NULL;
hs->hs_cur_bits = hs->hs_rehash_bits;
out:
hs->hs_rehash_bits = 0;
if (rc == -ESRCH) /* never be scheduled again */
cfs_wi_exit(cfs_sched_rehash, wi);
bsize = cfs_hash_bkt_size(hs);
cfs_hash_unlock(hs, 1);
/* can't refer to @hs anymore because it could be destroyed */
if (bkts != NULL)
cfs_hash_buckets_free(bkts, bsize, new_size, old_size);
if (rc != 0)
CDEBUG(D_INFO, "early quit of rehashing: %d\n", rc);
/* return 1 only if cfs_wi_exit is called */
return rc == -ESRCH;
}
/**
* Rehash the object referenced by @hnode in the libcfs hash @hs. The
* @old_key must be provided to locate the objects previous location
* in the hash, and the @new_key will be used to reinsert the object.
* Use this function instead of a cfs_hash_add() + cfs_hash_del()
* combo when it is critical that there is no window in time where the
* object is missing from the hash. When an object is being rehashed
* the registered cfs_hash_get() and cfs_hash_put() functions will
* not be called.
*/
void cfs_hash_rehash_key(struct cfs_hash *hs, const void *old_key,
void *new_key, struct hlist_node *hnode)
{
struct cfs_hash_bd bds[3];
struct cfs_hash_bd old_bds[2];
struct cfs_hash_bd new_bd;
LASSERT(!hlist_unhashed(hnode));
cfs_hash_lock(hs, 0);
cfs_hash_dual_bd_get(hs, old_key, old_bds);
cfs_hash_bd_get(hs, new_key, &new_bd);
bds[0] = old_bds[0];
bds[1] = old_bds[1];
bds[2] = new_bd;
/* NB: bds[0] and bds[1] are ordered already */
cfs_hash_bd_order(&bds[1], &bds[2]);
cfs_hash_bd_order(&bds[0], &bds[1]);
cfs_hash_multi_bd_lock(hs, bds, 3, 1);
if (likely(old_bds[1].bd_bucket == NULL)) {
cfs_hash_bd_move_locked(hs, &old_bds[0], &new_bd, hnode);
} else {
cfs_hash_dual_bd_finddel_locked(hs, old_bds, old_key, hnode);
cfs_hash_bd_add_locked(hs, &new_bd, hnode);
}
/* overwrite key inside locks, otherwise may screw up with
* other operations, i.e: rehash */
cfs_hash_keycpy(hs, new_key, hnode);
cfs_hash_multi_bd_unlock(hs, bds, 3, 1);
cfs_hash_unlock(hs, 0);
}
EXPORT_SYMBOL(cfs_hash_rehash_key);
int cfs_hash_debug_header(struct seq_file *m)
{
return seq_printf(m, "%-*s%6s%6s%6s%6s%6s%6s%6s%7s%8s%8s%8s%s\n",
CFS_HASH_BIGNAME_LEN,
"name", "cur", "min", "max", "theta", "t-min", "t-max",
"flags", "rehash", "count", "maxdep", "maxdepb",
" distribution");
}
EXPORT_SYMBOL(cfs_hash_debug_header);
static struct cfs_hash_bucket **
cfs_hash_full_bkts(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (hs->hs_rehash_buckets == NULL)
return hs->hs_buckets;
LASSERT(hs->hs_rehash_bits != 0);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
hs->hs_rehash_buckets : hs->hs_buckets;
}
static unsigned int
cfs_hash_full_nbkt(struct cfs_hash *hs)
{
/* NB: caller should hold hs->hs_rwlock if REHASH is set */
if (hs->hs_rehash_buckets == NULL)
return CFS_HASH_NBKT(hs);
LASSERT(hs->hs_rehash_bits != 0);
return hs->hs_rehash_bits > hs->hs_cur_bits ?
CFS_HASH_RH_NBKT(hs) : CFS_HASH_NBKT(hs);
}
int cfs_hash_debug_str(struct cfs_hash *hs, struct seq_file *m)
{
int dist[8] = { 0, };
int maxdep = -1;
int maxdepb = -1;
int total = 0;
int theta;
int i;
cfs_hash_lock(hs, 0);
theta = __cfs_hash_theta(hs);
seq_printf(m, "%-*s %5d %5d %5d %d.%03d %d.%03d %d.%03d 0x%02x %6d ",
CFS_HASH_BIGNAME_LEN, hs->hs_name,
1 << hs->hs_cur_bits, 1 << hs->hs_min_bits,
1 << hs->hs_max_bits,
__cfs_hash_theta_int(theta), __cfs_hash_theta_frac(theta),
__cfs_hash_theta_int(hs->hs_min_theta),
__cfs_hash_theta_frac(hs->hs_min_theta),
__cfs_hash_theta_int(hs->hs_max_theta),
__cfs_hash_theta_frac(hs->hs_max_theta),
hs->hs_flags, hs->hs_rehash_count);
/*
* The distribution is a summary of the chained hash depth in
* each of the libcfs hash buckets. Each buckets hsb_count is
* divided by the hash theta value and used to generate a
* histogram of the hash distribution. A uniform hash will
* result in all hash buckets being close to the average thus
* only the first few entries in the histogram will be non-zero.
* If you hash function results in a non-uniform hash the will
* be observable by outlier bucks in the distribution histogram.
*
* Uniform hash distribution: 128/128/0/0/0/0/0/0
* Non-Uniform hash distribution: 128/125/0/0/0/0/2/1
*/
for (i = 0; i < cfs_hash_full_nbkt(hs); i++) {
struct cfs_hash_bd bd;
bd.bd_bucket = cfs_hash_full_bkts(hs)[i];
cfs_hash_bd_lock(hs, &bd, 0);
if (maxdep < bd.bd_bucket->hsb_depmax) {
maxdep = bd.bd_bucket->hsb_depmax;
maxdepb = ffz(~maxdep);
}
total += bd.bd_bucket->hsb_count;
dist[min(fls(bd.bd_bucket->hsb_count / max(theta, 1)), 7)]++;
cfs_hash_bd_unlock(hs, &bd, 0);
}
seq_printf(m, "%7d %7d %7d ", total, maxdep, maxdepb);
for (i = 0; i < 8; i++)
seq_printf(m, "%d%c", dist[i], (i == 7) ? '\n' : '/');
cfs_hash_unlock(hs, 0);
return 0;
}
EXPORT_SYMBOL(cfs_hash_debug_str);
| gpl-2.0 |
Digilent/linux-Digilent-Dev | drivers/staging/comedi/drivers/pcl812.c | 331 | 35482 | /*
* comedi/drivers/pcl812.c
*
* Author: Michal Dobes <dobes@tesnet.cz>
*
* hardware driver for Advantech cards
* card: PCL-812, PCL-812PG, PCL-813, PCL-813B
* driver: pcl812, pcl812pg, pcl813, pcl813b
* and for ADlink cards
* card: ACL-8112DG, ACL-8112HG, ACL-8112PG, ACL-8113, ACL-8216
* driver: acl8112dg, acl8112hg, acl8112pg, acl8113, acl8216
* and for ICP DAS cards
* card: ISO-813, A-821PGH, A-821PGL, A-821PGL-NDA, A-822PGH, A-822PGL,
* driver: iso813, a821pgh, a-821pgl, a-821pglnda, a822pgh, a822pgl,
* card: A-823PGH, A-823PGL, A-826PG
* driver: a823pgh, a823pgl, a826pg
*/
/*
* Driver: pcl812
* Description: Advantech PCL-812/PG, PCL-813/B,
* ADLink ACL-8112DG/HG/PG, ACL-8113, ACL-8216,
* ICP DAS A-821PGH/PGL/PGL-NDA, A-822PGH/PGL, A-823PGH/PGL, A-826PG,
* ICP DAS ISO-813
* Author: Michal Dobes <dobes@tesnet.cz>
* Devices: [Advantech] PCL-812 (pcl812), PCL-812PG (pcl812pg),
* PCL-813 (pcl813), PCL-813B (pcl813b), [ADLink] ACL-8112DG (acl8112dg),
* ACL-8112HG (acl8112hg), ACL-8113 (acl-8113), ACL-8216 (acl8216),
* [ICP] ISO-813 (iso813), A-821PGH (a821pgh), A-821PGL (a821pgl),
* A-821PGL-NDA (a821pclnda), A-822PGH (a822pgh), A-822PGL (a822pgl),
* A-823PGH (a823pgh), A-823PGL (a823pgl), A-826PG (a826pg)
* Updated: Mon, 06 Aug 2007 12:03:15 +0100
* Status: works (I hope. My board fire up under my hands
* and I cann't test all features.)
*
* This driver supports insn and cmd interfaces. Some boards support only insn
* because their hardware don't allow more (PCL-813/B, ACL-8113, ISO-813).
* Data transfer over DMA is supported only when you measure only one
* channel, this is too hardware limitation of these boards.
*
* Options for PCL-812:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D input range is +/-10V
* 1=A/D input range is +/-5V
* 2=A/D input range is +/-2.5V
* 3=A/D input range is +/-1.25V
* 4=A/D input range is +/-0.625V
* 5=A/D input range is +/-0.3125V
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for PCL-812PG, ACL-8112PG:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D have max +/-5V input
* 1=A/D have max +/-10V input
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH, ACL-8216, A-826PG:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7; 10, 11, 12, 14, 15)
* [2] - DMA (0=disable, 1, 3)
* [3] - 0=trigger source is internal 8253 with 2MHz clock
* 1=trigger source is external
* [4] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
* [5] - 0=D/A outputs 0-5V (internal reference -5V)
* 1=D/A outputs 0-10V (internal reference -10V)
* 2=D/A outputs unknown (external reference)
*
* Options for A-821PGL/PGH:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
* [2] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
* [3] - 0=D/A output 0-5V (internal reference -5V)
* 1=D/A output 0-10V (internal reference -10V)
*
* Options for A-821PGL-NDA:
* [0] - IO Base
* [1] - IRQ (0=disable, 2, 3, 4, 5, 6, 7)
* [2] - 0=A/D channels are S.E.
* 1=A/D channels are DIFF
*
* Options for PCL-813:
* [0] - IO Base
*
* Options for PCL-813B:
* [0] - IO Base
* [1] - 0= bipolar inputs
* 1= unipolar inputs
*
* Options for ACL-8113, ISO-813:
* [0] - IO Base
* [1] - 0= 10V bipolar inputs
* 1= 10V unipolar inputs
* 2= 20V bipolar inputs
* 3= 20V unipolar inputs
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/gfp.h>
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/io.h>
#include <asm/dma.h>
#include "comedi_fc.h"
#include "8253.h"
/* hardware types of the cards */
#define boardPCL812PG 0 /* and ACL-8112PG */
#define boardPCL813B 1
#define boardPCL812 2
#define boardPCL813 3
#define boardISO813 5
#define boardACL8113 6
#define boardACL8112 7 /* ACL-8112DG/HG, A-822PGL/PGH, A-823PGL/PGH */
#define boardACL8216 8 /* and ICP DAS A-826PG */
#define boardA821 9 /* PGH, PGL, PGL/NDA versions */
/*
* Register I/O map
*/
#define PCL812_TIMER_BASE 0x00
#define PCL812_AI_LSB_REG 0x04
#define PCL812_AI_MSB_REG 0x05
#define PCL812_AI_MSB_DRDY (1 << 4)
#define PCL812_AO_LSB_REG(x) (0x04 + ((x) * 2))
#define PCL812_AO_MSB_REG(x) (0x05 + ((x) * 2))
#define PCL812_DI_LSB_REG 0x06
#define PCL812_DI_MSB_REG 0x07
#define PCL812_STATUS_REG 0x08
#define PCL812_STATUS_DRDY (1 << 5)
#define PCL812_RANGE_REG 0x09
#define PCL812_MUX_REG 0x0a
#define PCL812_MUX_CHAN(x) ((x) << 0)
#define PCL812_MUX_CS0 (1 << 4)
#define PCL812_MUX_CS1 (1 << 5)
#define PCL812_CTRL_REG 0x0b
#define PCL812_CTRL_DISABLE_TRIG (0 << 0)
#define PCL812_CTRL_SOFT_TRIG (1 << 0)
#define PCL812_CTRL_PACER_DMA_TRIG (2 << 0)
#define PCL812_CTRL_PACER_EOC_TRIG (6 << 0)
#define PCL812_SOFTTRIG_REG 0x0c
#define PCL812_DO_LSB_REG 0x0d
#define PCL812_DO_MSB_REG 0x0e
#define MAX_CHANLIST_LEN 256 /* length of scan list */
static const struct comedi_lrange range_pcl812pg_ai = {
5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_pcl812pg2_ai = {
5, {
BIP_RANGE(10),
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range812_bipolar1_25 = {
1, {
BIP_RANGE(1.25)
}
};
static const struct comedi_lrange range812_bipolar0_625 = {
1, {
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range812_bipolar0_3125 = {
1, {
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_pcl813b_ai = {
4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_pcl813b2_ai = {
4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_iso813_1_ai = {
5, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
BIP_RANGE(0.3125)
}
};
static const struct comedi_lrange range_iso813_1_2_ai = {
5, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
UNI_RANGE(0.625)
}
};
static const struct comedi_lrange range_iso813_2_ai = {
4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_iso813_2_2_ai = {
4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_acl8113_1_ai = {
4, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625)
}
};
static const struct comedi_lrange range_acl8113_1_2_ai = {
4, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25)
}
};
static const struct comedi_lrange range_acl8113_2_ai = {
3, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25)
}
};
static const struct comedi_lrange range_acl8113_2_2_ai = {
3, {
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5)
}
};
static const struct comedi_lrange range_acl8112dg_ai = {
9, {
BIP_RANGE(5),
BIP_RANGE(2.5),
BIP_RANGE(1.25),
BIP_RANGE(0.625),
UNI_RANGE(10),
UNI_RANGE(5),
UNI_RANGE(2.5),
UNI_RANGE(1.25),
BIP_RANGE(10)
}
};
static const struct comedi_lrange range_acl8112hg_ai = {
12, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005),
UNI_RANGE(10),
UNI_RANGE(1),
UNI_RANGE(0.1),
UNI_RANGE(0.01),
BIP_RANGE(10),
BIP_RANGE(1),
BIP_RANGE(0.1),
BIP_RANGE(0.01)
}
};
static const struct comedi_lrange range_a821pgh_ai = {
4, {
BIP_RANGE(5),
BIP_RANGE(0.5),
BIP_RANGE(0.05),
BIP_RANGE(0.005)
}
};
struct pcl812_board {
const char *name;
int board_type;
int n_aichan;
int n_aochan;
unsigned int ai_ns_min;
const struct comedi_lrange *rangelist_ai;
unsigned int IRQbits;
unsigned int has_dma:1;
unsigned int has_16bit_ai:1;
unsigned int has_mpc508_mux:1;
unsigned int has_dio:1;
};
static const struct pcl812_board boardtypes[] = {
{
.name = "pcl812",
.board_type = boardPCL812,
.n_aichan = 16,
.n_aochan = 2,
.ai_ns_min = 33000,
.rangelist_ai = &range_bipolar10,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "pcl812pg",
.board_type = boardPCL812PG,
.n_aichan = 16,
.n_aochan = 2,
.ai_ns_min = 33000,
.rangelist_ai = &range_pcl812pg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "acl8112pg",
.board_type = boardPCL812PG,
.n_aichan = 16,
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_pcl812pg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "acl8112dg",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_acl8112dg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_mpc508_mux = 1,
.has_dio = 1,
}, {
.name = "acl8112hg",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_acl8112hg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_mpc508_mux = 1,
.has_dio = 1,
}, {
.name = "a821pgl",
.board_type = boardA821,
.n_aichan = 16, /* 8 differential */
.n_aochan = 1,
.ai_ns_min = 10000,
.rangelist_ai = &range_pcl813b_ai,
.IRQbits = 0x000c,
.has_dio = 1,
}, {
.name = "a821pglnda",
.board_type = boardA821,
.n_aichan = 16, /* 8 differential */
.ai_ns_min = 10000,
.rangelist_ai = &range_pcl813b_ai,
.IRQbits = 0x000c,
}, {
.name = "a821pgh",
.board_type = boardA821,
.n_aichan = 16, /* 8 differential */
.n_aochan = 1,
.ai_ns_min = 10000,
.rangelist_ai = &range_a821pgh_ai,
.IRQbits = 0x000c,
.has_dio = 1,
}, {
.name = "a822pgl",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_acl8112dg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "a822pgh",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_acl8112hg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "a823pgl",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 8000,
.rangelist_ai = &range_acl8112dg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "a823pgh",
.board_type = boardACL8112,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 8000,
.rangelist_ai = &range_acl8112hg_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_dio = 1,
}, {
.name = "pcl813",
.board_type = boardPCL813,
.n_aichan = 32,
.rangelist_ai = &range_pcl813b_ai,
}, {
.name = "pcl813b",
.board_type = boardPCL813B,
.n_aichan = 32,
.rangelist_ai = &range_pcl813b_ai,
}, {
.name = "acl8113",
.board_type = boardACL8113,
.n_aichan = 32,
.rangelist_ai = &range_acl8113_1_ai,
}, {
.name = "iso813",
.board_type = boardISO813,
.n_aichan = 32,
.rangelist_ai = &range_iso813_1_ai,
}, {
.name = "acl8216",
.board_type = boardACL8216,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_pcl813b2_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_16bit_ai = 1,
.has_mpc508_mux = 1,
.has_dio = 1,
}, {
.name = "a826pg",
.board_type = boardACL8216,
.n_aichan = 16, /* 8 differential */
.n_aochan = 2,
.ai_ns_min = 10000,
.rangelist_ai = &range_pcl813b2_ai,
.IRQbits = 0xdcfc,
.has_dma = 1,
.has_16bit_ai = 1,
.has_dio = 1,
},
};
struct pcl812_private {
unsigned char dma; /* >0 use dma ( usedDMA channel) */
unsigned char range_correction; /* =1 we must add 1 to range number */
unsigned int last_ai_chanspec;
unsigned char mode_reg_int; /* there is stored INT number for some card */
unsigned int ai_poll_ptr; /* how many sampes transfer poll */
unsigned int ai_act_scan; /* how many scans we finished */
unsigned int dmapages;
unsigned int hwdmasize;
unsigned long dmabuf[2]; /* PTR to DMA buf */
unsigned int hwdmaptr[2]; /* HW PTR to DMA buf */
unsigned int dmabytestomove[2]; /* how many bytes DMA transfer */
int next_dma_buf; /* which buffer is next to use */
unsigned int dma_runs_to_end; /* how many times we must switch DMA buffers */
unsigned int last_dma_run; /* how many bytes to transfer on last DMA buffer */
unsigned int max_812_ai_mode0_rangewait; /* setling time for gain */
unsigned int divisor1;
unsigned int divisor2;
unsigned int use_diff:1;
unsigned int use_mpc508:1;
unsigned int use_ext_trg:1;
unsigned int ai_dma:1;
unsigned int ai_eos:1;
};
static void pcl812_start_pacer(struct comedi_device *dev, bool load_timers)
{
struct pcl812_private *devpriv = dev->private;
unsigned long timer_base = dev->iobase + PCL812_TIMER_BASE;
i8254_set_mode(timer_base, 0, 2, I8254_MODE2 | I8254_BINARY);
i8254_set_mode(timer_base, 0, 1, I8254_MODE2 | I8254_BINARY);
udelay(1);
if (load_timers) {
i8254_write(timer_base, 0, 2, devpriv->divisor2);
i8254_write(timer_base, 0, 1, devpriv->divisor1);
}
}
static void pcl812_ai_setup_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int dma_flags;
unsigned int bytes;
/* we use EOS, so adapt DMA buffer to one scan */
if (devpriv->ai_eos) {
devpriv->dmabytestomove[0] = cfc_bytes_per_scan(s);
devpriv->dmabytestomove[1] = cfc_bytes_per_scan(s);
devpriv->dma_runs_to_end = 1;
} else {
devpriv->dmabytestomove[0] = devpriv->hwdmasize;
devpriv->dmabytestomove[1] = devpriv->hwdmasize;
if (s->async->prealloc_bufsz < devpriv->hwdmasize) {
devpriv->dmabytestomove[0] =
s->async->prealloc_bufsz;
devpriv->dmabytestomove[1] =
s->async->prealloc_bufsz;
}
if (cmd->stop_src == TRIG_NONE) {
devpriv->dma_runs_to_end = 1;
} else {
/* how many samples we must transfer? */
bytes = cmd->stop_arg * cfc_bytes_per_scan(s);
/* how many DMA pages we must fill */
devpriv->dma_runs_to_end =
bytes / devpriv->dmabytestomove[0];
/* on last dma transfer must be moved */
devpriv->last_dma_run =
bytes % devpriv->dmabytestomove[0];
if (devpriv->dma_runs_to_end == 0)
devpriv->dmabytestomove[0] =
devpriv->last_dma_run;
devpriv->dma_runs_to_end--;
}
}
if (devpriv->dmabytestomove[0] > devpriv->hwdmasize) {
devpriv->dmabytestomove[0] = devpriv->hwdmasize;
devpriv->ai_eos = 0;
}
if (devpriv->dmabytestomove[1] > devpriv->hwdmasize) {
devpriv->dmabytestomove[1] = devpriv->hwdmasize;
devpriv->ai_eos = 0;
}
devpriv->next_dma_buf = 0;
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
clear_dma_ff(devpriv->dma);
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[0]);
set_dma_count(devpriv->dma, devpriv->dmabytestomove[0]);
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
static void pcl812_ai_setup_next_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
unsigned long dma_flags;
devpriv->next_dma_buf = 1 - devpriv->next_dma_buf;
disable_dma(devpriv->dma);
set_dma_mode(devpriv->dma, DMA_MODE_READ);
dma_flags = claim_dma_lock();
set_dma_addr(devpriv->dma, devpriv->hwdmaptr[devpriv->next_dma_buf]);
if (devpriv->ai_eos) {
set_dma_count(devpriv->dma,
devpriv->dmabytestomove[devpriv->next_dma_buf]);
} else {
if (devpriv->dma_runs_to_end) {
set_dma_count(devpriv->dma,
devpriv->dmabytestomove[devpriv->
next_dma_buf]);
} else {
set_dma_count(devpriv->dma, devpriv->last_dma_run);
}
devpriv->dma_runs_to_end--;
}
release_dma_lock(dma_flags);
enable_dma(devpriv->dma);
}
static void pcl812_ai_set_chan_range(struct comedi_device *dev,
unsigned int chanspec, char wait)
{
struct pcl812_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(chanspec);
unsigned int range = CR_RANGE(chanspec);
unsigned int mux = 0;
if (chanspec == devpriv->last_ai_chanspec)
return;
devpriv->last_ai_chanspec = chanspec;
if (devpriv->use_mpc508) {
if (devpriv->use_diff) {
mux |= PCL812_MUX_CS0 | PCL812_MUX_CS1;
} else {
if (chan < 8)
mux |= PCL812_MUX_CS0;
else
mux |= PCL812_MUX_CS1;
}
}
outb(mux | PCL812_MUX_CHAN(chan), dev->iobase + PCL812_MUX_REG);
outb(range + devpriv->range_correction, dev->iobase + PCL812_RANGE_REG);
if (wait)
/*
* XXX this depends on selected range and can be very long for
* some high gain ranges!
*/
udelay(devpriv->max_812_ai_mode0_rangewait);
}
static void pcl812_ai_clear_eoc(struct comedi_device *dev)
{
/* writing any value clears the interrupt request */
outb(0, dev->iobase + PCL812_STATUS_REG);
}
static void pcl812_ai_soft_trig(struct comedi_device *dev)
{
/* writing any value triggers a software conversion */
outb(255, dev->iobase + PCL812_SOFTTRIG_REG);
}
static unsigned int pcl812_ai_get_sample(struct comedi_device *dev,
struct comedi_subdevice *s)
{
unsigned int val;
val = inb(dev->iobase + PCL812_AI_MSB_REG) << 8;
val |= inb(dev->iobase + PCL812_AI_LSB_REG);
return val & s->maxdata;
}
static int pcl812_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
if (s->maxdata > 0x0fff) {
status = inb(dev->iobase + PCL812_STATUS_REG);
if ((status & PCL812_STATUS_DRDY) == 0)
return 0;
} else {
status = inb(dev->iobase + PCL812_AI_MSB_REG);
if ((status & PCL812_AI_MSB_DRDY) == 0)
return 0;
}
return -EBUSY;
}
static int pcl812_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
const struct pcl812_board *board = dev->board_ptr;
struct pcl812_private *devpriv = dev->private;
int err = 0;
unsigned int flags;
unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_FOLLOW);
if (devpriv->use_ext_trg)
flags = TRIG_EXT;
else
flags = TRIG_TIMER;
err |= cfc_check_trigger_src(&cmd->convert_src, flags);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
if (cmd->convert_src == TRIG_TIMER)
err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
board->ai_ns_min);
else /* TRIG_EXT */
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
err |= cfc_check_trigger_arg_min(&cmd->chanlist_len, 1);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
if (cmd->convert_src == TRIG_TIMER) {
arg = cmd->convert_arg;
i8253_cascade_ns_to_timer(I8254_OSC_BASE_2MHZ,
&devpriv->divisor1,
&devpriv->divisor2,
&arg, cmd->flags);
err |= cfc_check_trigger_arg_is(&cmd->convert_arg, arg);
}
if (err)
return 4;
return 0;
}
static int pcl812_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int ctrl = 0;
unsigned int i;
pcl812_start_pacer(dev, false);
pcl812_ai_set_chan_range(dev, cmd->chanlist[0], 1);
if (devpriv->dma) { /* check if we can use DMA transfer */
devpriv->ai_dma = 1;
for (i = 1; i < cmd->chanlist_len; i++)
if (cmd->chanlist[0] != cmd->chanlist[i]) {
/* we cann't use DMA :-( */
devpriv->ai_dma = 0;
break;
}
} else {
devpriv->ai_dma = 0;
}
devpriv->ai_act_scan = 0;
devpriv->ai_poll_ptr = 0;
s->async->cur_chan = 0;
/* don't we want wake up every scan? */
if (cmd->flags & CMDF_WAKE_EOS) {
devpriv->ai_eos = 1;
/* DMA is useless for this situation */
if (cmd->chanlist_len == 1)
devpriv->ai_dma = 0;
}
if (devpriv->ai_dma)
pcl812_ai_setup_dma(dev, s);
switch (cmd->convert_src) {
case TRIG_TIMER:
pcl812_start_pacer(dev, true);
break;
}
if (devpriv->ai_dma)
ctrl |= PCL812_CTRL_PACER_DMA_TRIG;
else
ctrl |= PCL812_CTRL_PACER_EOC_TRIG;
outb(devpriv->mode_reg_int | ctrl, dev->iobase + PCL812_CTRL_REG);
return 0;
}
static bool pcl812_ai_next_chan(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
s->async->events |= COMEDI_CB_BLOCK;
s->async->cur_chan++;
if (s->async->cur_chan >= cmd->chanlist_len) {
s->async->cur_chan = 0;
devpriv->ai_act_scan++;
s->async->events |= COMEDI_CB_EOS;
}
if (cmd->stop_src == TRIG_COUNT &&
devpriv->ai_act_scan >= cmd->stop_arg) {
/* all data sampled */
s->async->events |= COMEDI_CB_EOA;
return false;
}
return true;
}
static void pcl812_handle_eoc(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int next_chan;
if (pcl812_ai_eoc(dev, s, NULL, 0)) {
dev_dbg(dev->class_dev, "A/D cmd IRQ without DRDY!\n");
s->async->events |= COMEDI_CB_EOA | COMEDI_CB_ERROR;
return;
}
comedi_buf_put(s, pcl812_ai_get_sample(dev, s));
/* Set up next channel. Added by abbotti 2010-01-20, but untested. */
next_chan = s->async->cur_chan + 1;
if (next_chan >= cmd->chanlist_len)
next_chan = 0;
if (cmd->chanlist[s->async->cur_chan] != cmd->chanlist[next_chan])
pcl812_ai_set_chan_range(dev, cmd->chanlist[next_chan], 0);
pcl812_ai_next_chan(dev, s);
}
static void transfer_from_dma_buf(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned short *ptr,
unsigned int bufptr, unsigned int len)
{
unsigned int i;
for (i = len; i; i--) {
comedi_buf_put(s, ptr[bufptr++]);
if (!pcl812_ai_next_chan(dev, s))
break;
}
}
static void pcl812_handle_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
int len, bufptr;
unsigned short *ptr;
ptr = (unsigned short *)devpriv->dmabuf[devpriv->next_dma_buf];
len = (devpriv->dmabytestomove[devpriv->next_dma_buf] >> 1) -
devpriv->ai_poll_ptr;
pcl812_ai_setup_next_dma(dev, s);
bufptr = devpriv->ai_poll_ptr;
devpriv->ai_poll_ptr = 0;
transfer_from_dma_buf(dev, s, ptr, bufptr, len);
}
static irqreturn_t pcl812_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct comedi_subdevice *s = dev->read_subdev;
struct pcl812_private *devpriv = dev->private;
if (!dev->attached) {
pcl812_ai_clear_eoc(dev);
return IRQ_HANDLED;
}
if (devpriv->ai_dma)
pcl812_handle_dma(dev, s);
else
pcl812_handle_eoc(dev, s);
pcl812_ai_clear_eoc(dev);
cfc_handle_events(dev, s);
return IRQ_HANDLED;
}
static int pcl812_ai_poll(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
unsigned long flags;
unsigned int top1, top2, i;
if (!devpriv->ai_dma)
return 0; /* poll is valid only for DMA transfer */
spin_lock_irqsave(&dev->spinlock, flags);
for (i = 0; i < 10; i++) {
/* where is now DMA */
top1 = get_dma_residue(devpriv->ai_dma);
top2 = get_dma_residue(devpriv->ai_dma);
if (top1 == top2)
break;
}
if (top1 != top2) {
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
/* where is now DMA in buffer */
top1 = devpriv->dmabytestomove[1 - devpriv->next_dma_buf] - top1;
top1 >>= 1; /* sample position */
top2 = top1 - devpriv->ai_poll_ptr;
if (top2 < 1) { /* no new samples */
spin_unlock_irqrestore(&dev->spinlock, flags);
return 0;
}
transfer_from_dma_buf(dev, s,
(void *)devpriv->dmabuf[1 -
devpriv->next_dma_buf],
devpriv->ai_poll_ptr, top2);
devpriv->ai_poll_ptr = top1; /* new buffer position */
spin_unlock_irqrestore(&dev->spinlock, flags);
return comedi_buf_n_bytes_ready(s);
}
static int pcl812_ai_cancel(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct pcl812_private *devpriv = dev->private;
if (devpriv->ai_dma)
disable_dma(devpriv->dma);
outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
dev->iobase + PCL812_CTRL_REG);
pcl812_start_pacer(dev, false);
pcl812_ai_clear_eoc(dev);
return 0;
}
static int pcl812_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct pcl812_private *devpriv = dev->private;
int ret = 0;
int i;
outb(devpriv->mode_reg_int | PCL812_CTRL_SOFT_TRIG,
dev->iobase + PCL812_CTRL_REG);
pcl812_ai_set_chan_range(dev, insn->chanspec, 1);
for (i = 0; i < insn->n; i++) {
pcl812_ai_clear_eoc(dev);
pcl812_ai_soft_trig(dev);
ret = comedi_timeout(dev, s, insn, pcl812_ai_eoc, 0);
if (ret)
break;
data[i] = pcl812_ai_get_sample(dev, s);
}
outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
dev->iobase + PCL812_CTRL_REG);
pcl812_ai_clear_eoc(dev);
return ret ? ret : insn->n;
}
static int pcl812_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
for (i = 0; i < insn->n; i++) {
val = data[i];
outb(val & 0xff, dev->iobase + PCL812_AO_LSB_REG(chan));
outb((val >> 8) & 0x0f, dev->iobase + PCL812_AO_MSB_REG(chan));
}
s->readback[chan] = val;
return insn->n;
}
static int pcl812_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inb(dev->iobase + PCL812_DI_LSB_REG) |
(inb(dev->iobase + PCL812_DI_MSB_REG) << 8);
return insn->n;
}
static int pcl812_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
outb(s->state & 0xff, dev->iobase + PCL812_DO_LSB_REG);
outb((s->state >> 8), dev->iobase + PCL812_DO_MSB_REG);
}
data[1] = s->state;
return insn->n;
}
static void pcl812_reset(struct comedi_device *dev)
{
const struct pcl812_board *board = dev->board_ptr;
struct pcl812_private *devpriv = dev->private;
unsigned int chan;
/* disable analog input trigger */
outb(devpriv->mode_reg_int | PCL812_CTRL_DISABLE_TRIG,
dev->iobase + PCL812_CTRL_REG);
pcl812_ai_clear_eoc(dev);
/* stop pacer */
if (board->IRQbits)
pcl812_start_pacer(dev, false);
/*
* Invalidate last_ai_chanspec then set analog input to
* known channel/range.
*/
devpriv->last_ai_chanspec = CR_PACK(16, 0, 0);
pcl812_ai_set_chan_range(dev, CR_PACK(0, 0, 0), 0);
/* set analog output channels to 0V */
for (chan = 0; chan < board->n_aochan; chan++) {
outb(0, dev->iobase + PCL812_AO_LSB_REG(chan));
outb(0, dev->iobase + PCL812_AO_MSB_REG(chan));
}
/* set all digital outputs low */
if (board->has_dio) {
outb(0, dev->iobase + PCL812_DO_MSB_REG);
outb(0, dev->iobase + PCL812_DO_LSB_REG);
}
}
static void pcl812_set_ai_range_table(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_devconfig *it)
{
const struct pcl812_board *board = dev->board_ptr;
struct pcl812_private *devpriv = dev->private;
/* default to the range table from the boardinfo */
s->range_table = board->rangelist_ai;
/* now check the user config option based on the boardtype */
switch (board->board_type) {
case boardPCL812PG:
if (it->options[4] == 1)
s->range_table = &range_pcl812pg2_ai;
break;
case boardPCL812:
switch (it->options[4]) {
case 0:
s->range_table = &range_bipolar10;
break;
case 1:
s->range_table = &range_bipolar5;
break;
case 2:
s->range_table = &range_bipolar2_5;
break;
case 3:
s->range_table = &range812_bipolar1_25;
break;
case 4:
s->range_table = &range812_bipolar0_625;
break;
case 5:
s->range_table = &range812_bipolar0_3125;
break;
default:
s->range_table = &range_bipolar10;
break;
}
break;
case boardPCL813B:
if (it->options[1] == 1)
s->range_table = &range_pcl813b2_ai;
break;
case boardISO813:
switch (it->options[1]) {
case 0:
s->range_table = &range_iso813_1_ai;
break;
case 1:
s->range_table = &range_iso813_1_2_ai;
break;
case 2:
s->range_table = &range_iso813_2_ai;
devpriv->range_correction = 1;
break;
case 3:
s->range_table = &range_iso813_2_2_ai;
devpriv->range_correction = 1;
break;
default:
s->range_table = &range_iso813_1_ai;
break;
}
break;
case boardACL8113:
switch (it->options[1]) {
case 0:
s->range_table = &range_acl8113_1_ai;
break;
case 1:
s->range_table = &range_acl8113_1_2_ai;
break;
case 2:
s->range_table = &range_acl8113_2_ai;
devpriv->range_correction = 1;
break;
case 3:
s->range_table = &range_acl8113_2_2_ai;
devpriv->range_correction = 1;
break;
default:
s->range_table = &range_acl8113_1_ai;
break;
}
break;
}
}
static int pcl812_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct pcl812_board *board = dev->board_ptr;
struct pcl812_private *devpriv;
struct comedi_subdevice *s;
int n_subdevices;
int subdev;
int ret;
int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_request_region(dev, it->options[0], 0x10);
if (ret)
return ret;
if ((1 << it->options[1]) & board->IRQbits) {
ret = request_irq(it->options[1], pcl812_interrupt, 0,
dev->board_name, dev);
if (ret == 0)
dev->irq = it->options[1];
}
/* we need an IRQ to do DMA on channel 3 or 1 */
if (dev->irq && board->has_dma &&
(it->options[2] == 3 || it->options[2] == 1)) {
ret = request_dma(it->options[2], dev->board_name);
if (ret) {
dev_err(dev->class_dev,
"unable to request DMA channel %d\n",
it->options[2]);
return -EBUSY;
}
devpriv->dma = it->options[2];
devpriv->dmapages = 1; /* we want 8KB */
devpriv->hwdmasize = (1 << devpriv->dmapages) * PAGE_SIZE;
for (i = 0; i < 2; i++) {
unsigned long dmabuf;
dmabuf = __get_dma_pages(GFP_KERNEL, devpriv->dmapages);
if (!dmabuf)
return -ENOMEM;
devpriv->dmabuf[i] = dmabuf;
devpriv->hwdmaptr[i] = virt_to_bus((void *)dmabuf);
}
}
/* differential analog inputs? */
switch (board->board_type) {
case boardA821:
if (it->options[2] == 1)
devpriv->use_diff = 1;
break;
case boardACL8112:
case boardACL8216:
if (it->options[4] == 1)
devpriv->use_diff = 1;
break;
}
n_subdevices = 1; /* all boardtypes have analog inputs */
if (board->n_aochan > 0)
n_subdevices++;
if (board->has_dio)
n_subdevices += 2;
ret = comedi_alloc_subdevices(dev, n_subdevices);
if (ret)
return ret;
subdev = 0;
/* Analog Input subdevice */
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE;
if (devpriv->use_diff) {
s->subdev_flags |= SDF_DIFF;
s->n_chan = board->n_aichan / 2;
} else {
s->subdev_flags |= SDF_GROUND;
s->n_chan = board->n_aichan;
}
s->maxdata = board->has_16bit_ai ? 0xffff : 0x0fff;
pcl812_set_ai_range_table(dev, s, it);
s->insn_read = pcl812_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
s->subdev_flags |= SDF_CMD_READ;
s->len_chanlist = MAX_CHANLIST_LEN;
s->do_cmdtest = pcl812_ai_cmdtest;
s->do_cmd = pcl812_ai_cmd;
s->poll = pcl812_ai_poll;
s->cancel = pcl812_ai_cancel;
}
devpriv->use_mpc508 = board->has_mpc508_mux;
subdev++;
/* analog output */
if (board->n_aochan > 0) {
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE | SDF_GROUND;
s->n_chan = board->n_aochan;
s->maxdata = 0xfff;
s->range_table = &range_unipolar5;
switch (board->board_type) {
case boardA821:
if (it->options[3] == 1)
s->range_table = &range_unipolar10;
break;
case boardPCL812:
case boardACL8112:
case boardPCL812PG:
case boardACL8216:
if (it->options[5] == 1)
s->range_table = &range_unipolar10;
if (it->options[5] == 2)
s->range_table = &range_unknown;
break;
}
s->insn_write = pcl812_ao_insn_write;
s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
subdev++;
}
if (board->has_dio) {
/* Digital Input subdevice */
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pcl812_di_insn_bits;
subdev++;
/* Digital Output subdevice */
s = &dev->subdevices[subdev];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 16;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = pcl812_do_insn_bits;
subdev++;
}
switch (board->board_type) {
case boardACL8216:
case boardPCL812PG:
case boardPCL812:
case boardACL8112:
devpriv->max_812_ai_mode0_rangewait = 1;
if (it->options[3] > 0)
/* we use external trigger */
devpriv->use_ext_trg = 1;
break;
case boardA821:
devpriv->max_812_ai_mode0_rangewait = 1;
devpriv->mode_reg_int = (dev->irq << 4) & 0xf0;
break;
case boardPCL813B:
case boardPCL813:
case boardISO813:
case boardACL8113:
/* maybe there must by greatest timeout */
devpriv->max_812_ai_mode0_rangewait = 5;
break;
}
pcl812_reset(dev);
return 0;
}
static void pcl812_detach(struct comedi_device *dev)
{
struct pcl812_private *devpriv = dev->private;
if (devpriv) {
if (devpriv->dmabuf[0])
free_pages(devpriv->dmabuf[0], devpriv->dmapages);
if (devpriv->dmabuf[1])
free_pages(devpriv->dmabuf[1], devpriv->dmapages);
if (devpriv->dma)
free_dma(devpriv->dma);
}
comedi_legacy_detach(dev);
}
static struct comedi_driver pcl812_driver = {
.driver_name = "pcl812",
.module = THIS_MODULE,
.attach = pcl812_attach,
.detach = pcl812_detach,
.board_name = &boardtypes[0].name,
.num_names = ARRAY_SIZE(boardtypes),
.offset = sizeof(struct pcl812_board),
};
module_comedi_driver(pcl812_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
msdx321/android_kernel_xiaomi_msm8996 | drivers/staging/comedi/drivers/s526.c | 331 | 15868 | /*
comedi/drivers/s526.c
Sensoray s526 Comedi driver
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
*/
/*
Driver: s526
Description: Sensoray 526 driver
Devices: [Sensoray] 526 (s526)
Author: Richie
Everett Wang <everett.wang@everteq.com>
Updated: Thu, 14 Sep. 2006
Status: experimental
Encoder works
Analog input works
Analog output works
PWM output works
Commands are not supported yet.
Configuration Options:
comedi_config /dev/comedi0 s526 0x2C0,0x3
*/
#include <linux/module.h>
#include "../comedidev.h"
#include <asm/byteorder.h>
#define S526_START_AI_CONV 0
#define S526_AI_READ 0
/* Ports */
#define S526_NUM_PORTS 27
/* registers */
#define REG_TCR 0x00
#define REG_WDC 0x02
#define REG_DAC 0x04
#define REG_ADC 0x06
#define REG_ADD 0x08
#define REG_DIO 0x0A
#define REG_IER 0x0C
#define REG_ISR 0x0E
#define REG_MSC 0x10
#define REG_C0L 0x12
#define REG_C0H 0x14
#define REG_C0M 0x16
#define REG_C0C 0x18
#define REG_C1L 0x1A
#define REG_C1H 0x1C
#define REG_C1M 0x1E
#define REG_C1C 0x20
#define REG_C2L 0x22
#define REG_C2H 0x24
#define REG_C2M 0x26
#define REG_C2C 0x28
#define REG_C3L 0x2A
#define REG_C3H 0x2C
#define REG_C3M 0x2E
#define REG_C3C 0x30
#define REG_EED 0x32
#define REG_EEC 0x34
struct counter_mode_register_t {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned short coutSource:1;
unsigned short coutPolarity:1;
unsigned short autoLoadResetRcap:3;
unsigned short hwCtEnableSource:2;
unsigned short ctEnableCtrl:2;
unsigned short clockSource:2;
unsigned short countDir:1;
unsigned short countDirCtrl:1;
unsigned short outputRegLatchCtrl:1;
unsigned short preloadRegSel:1;
unsigned short reserved:1;
#elif defined(__BIG_ENDIAN_BITFIELD)
unsigned short reserved:1;
unsigned short preloadRegSel:1;
unsigned short outputRegLatchCtrl:1;
unsigned short countDirCtrl:1;
unsigned short countDir:1;
unsigned short clockSource:2;
unsigned short ctEnableCtrl:2;
unsigned short hwCtEnableSource:2;
unsigned short autoLoadResetRcap:3;
unsigned short coutPolarity:1;
unsigned short coutSource:1;
#else
#error Unknown bit field order
#endif
};
union cmReg {
struct counter_mode_register_t reg;
unsigned short value;
};
struct s526_private {
unsigned int gpct_config[4];
unsigned short ai_config;
};
static int s526_gpct_rinsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned long chan_iobase = dev->iobase + chan * 8;
unsigned int lo;
unsigned int hi;
int i;
for (i = 0; i < insn->n; i++) {
/* Read the low word first */
lo = inw(chan_iobase + REG_C0L) & 0xffff;
hi = inw(chan_iobase + REG_C0H) & 0xff;
data[i] = (hi << 16) | lo;
}
return insn->n;
}
static int s526_gpct_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned long chan_iobase = dev->iobase + chan * 8;
unsigned int val;
union cmReg cmReg;
/* Check what type of Counter the user requested, data[0] contains */
/* the Application type */
switch (data[0]) {
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
/*
data[0]: Application Type
data[1]: Counter Mode Register Value
data[2]: Pre-load Register Value
data[3]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
#if 0
/* Example of Counter Application */
/* One-shot (software trigger) */
cmReg.reg.coutSource = 0; /* out RCAP */
cmReg.reg.coutPolarity = 1; /* Polarity inverted */
cmReg.reg.autoLoadResetRcap = 0;/* Auto load disabled */
cmReg.reg.hwCtEnableSource = 3; /* NOT RCAP */
cmReg.reg.ctEnableCtrl = 2; /* Hardware */
cmReg.reg.clockSource = 2; /* Internal */
cmReg.reg.countDir = 1; /* Down */
cmReg.reg.countDirCtrl = 1; /* Software */
cmReg.reg.outputRegLatchCtrl = 0; /* latch on read */
cmReg.reg.preloadRegSel = 0; /* PR0 */
cmReg.reg.reserved = 0;
outw(cmReg.value, chan_iobase + REG_C0M);
outw(0x0001, chan_iobase + REG_C0H);
outw(0x3C68, chan_iobase + REG_C0L);
/* Reset the counter */
outw(0x8000, chan_iobase + REG_C0C);
/* Load the counter from PR0 */
outw(0x4000, chan_iobase + REG_C0C);
/* Reset RCAP (fires one-shot) */
outw(0x0008, chan_iobase + REG_C0C);
#endif
#if 1
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
outw(cmReg.value, chan_iobase + REG_C0M);
/* Reset the counter if it is software preload */
if (cmReg.reg.autoLoadResetRcap == 0) {
/* Reset the counter */
outw(0x8000, chan_iobase + REG_C0C);
/* Load the counter from PR0
* outw(0x4000, chan_iobase + REG_C0C);
*/
}
#else
/* 0 quadrature, 1 software control */
cmReg.reg.countDirCtrl = 0;
/* data[1] contains GPCT_X1, GPCT_X2 or GPCT_X4 */
if (data[1] == GPCT_X2)
cmReg.reg.clockSource = 1;
else if (data[1] == GPCT_X4)
cmReg.reg.clockSource = 2;
else
cmReg.reg.clockSource = 0;
/* When to take into account the indexpulse: */
/*if (data[2] == GPCT_IndexPhaseLowLow) {
} else if (data[2] == GPCT_IndexPhaseLowHigh) {
} else if (data[2] == GPCT_IndexPhaseHighLow) {
} else if (data[2] == GPCT_IndexPhaseHighHigh) {
}*/
/* Take into account the index pulse? */
if (data[3] == GPCT_RESET_COUNTER_ON_INDEX)
/* Auto load with INDEX^ */
cmReg.reg.autoLoadResetRcap = 4;
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
outw(cmReg.value, chan_iobase + REG_C0M);
/* Load the pre-load register high word */
val = (data[2] >> 16) & 0xffff;
outw(val, chan_iobase + REG_C0H);
/* Load the pre-load register low word */
val = data[2] & 0xffff;
outw(val, chan_iobase + REG_C0L);
/* Write the Counter Control Register */
if (data[3]) {
val = data[3] & 0xffff;
outw(val, chan_iobase + REG_C0C);
}
/* Reset the counter if it is software preload */
if (cmReg.reg.autoLoadResetRcap == 0) {
/* Reset the counter */
outw(0x8000, chan_iobase + REG_C0C);
/* Load the counter from PR0 */
outw(0x4000, chan_iobase + REG_C0C);
}
#endif
break;
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
/*
data[0]: Application Type
data[1]: Counter Mode Register Value
data[2]: Pre-load Register 0 Value
data[3]: Pre-load Register 1 Value
data[4]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
cmReg.reg.preloadRegSel = 0; /* PR0 */
outw(cmReg.value, chan_iobase + REG_C0M);
/* Load the pre-load register 0 high word */
val = (data[2] >> 16) & 0xffff;
outw(val, chan_iobase + REG_C0H);
/* Load the pre-load register 0 low word */
val = data[2] & 0xffff;
outw(val, chan_iobase + REG_C0L);
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
cmReg.reg.preloadRegSel = 1; /* PR1 */
outw(cmReg.value, chan_iobase + REG_C0M);
/* Load the pre-load register 1 high word */
val = (data[3] >> 16) & 0xffff;
outw(val, chan_iobase + REG_C0H);
/* Load the pre-load register 1 low word */
val = data[3] & 0xffff;
outw(val, chan_iobase + REG_C0L);
/* Write the Counter Control Register */
if (data[4]) {
val = data[4] & 0xffff;
outw(val, chan_iobase + REG_C0C);
}
break;
case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
/*
data[0]: Application Type
data[1]: Counter Mode Register Value
data[2]: Pre-load Register 0 Value
data[3]: Pre-load Register 1 Value
data[4]: Conter Control Register
*/
devpriv->gpct_config[chan] = data[0];
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
cmReg.reg.preloadRegSel = 0; /* PR0 */
outw(cmReg.value, chan_iobase + REG_C0M);
/* Load the pre-load register 0 high word */
val = (data[2] >> 16) & 0xffff;
outw(val, chan_iobase + REG_C0H);
/* Load the pre-load register 0 low word */
val = data[2] & 0xffff;
outw(val, chan_iobase + REG_C0L);
/* Set Counter Mode Register */
cmReg.value = data[1] & 0xffff;
cmReg.reg.preloadRegSel = 1; /* PR1 */
outw(cmReg.value, chan_iobase + REG_C0M);
/* Load the pre-load register 1 high word */
val = (data[3] >> 16) & 0xffff;
outw(val, chan_iobase + REG_C0H);
/* Load the pre-load register 1 low word */
val = data[3] & 0xffff;
outw(val, chan_iobase + REG_C0L);
/* Write the Counter Control Register */
if (data[4]) {
val = data[4] & 0xffff;
outw(val, chan_iobase + REG_C0C);
}
break;
default:
return -EINVAL;
}
return insn->n;
}
static int s526_gpct_winsn(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned long chan_iobase = dev->iobase + chan * 8;
inw(chan_iobase + REG_C0M); /* Is this read required? */
/* Check what Application of Counter this channel is configured for */
switch (devpriv->gpct_config[chan]) {
case INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
/* data[0] contains the PULSE_WIDTH
data[1] contains the PULSE_PERIOD
@pre PULSE_PERIOD > PULSE_WIDTH > 0
The above periods must be expressed as a multiple of the
pulse frequency on the selected source
*/
if ((data[1] <= data[0]) || !data[0])
return -EINVAL;
/* Fall thru to write the PULSE_WIDTH */
case INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
case INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
outw((data[0] >> 16) & 0xffff, chan_iobase + REG_C0H);
outw(data[0] & 0xffff, chan_iobase + REG_C0L);
break;
default:
return -EINVAL;
}
return insn->n;
}
#define ISR_ADC_DONE 0x4
static int s526_ai_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct s526_private *devpriv = dev->private;
int result = -EINVAL;
if (insn->n < 1)
return result;
result = insn->n;
/* data[0] : channels was set in relevant bits.
data[1] : delay
*/
/* COMMENT: abbotti 2008-07-24: I don't know why you'd want to
* enable channels here. The channel should be enabled in the
* INSN_READ handler. */
/* Enable ADC interrupt */
outw(ISR_ADC_DONE, dev->iobase + REG_IER);
devpriv->ai_config = (data[0] & 0x3ff) << 5;
if (data[1] > 0)
devpriv->ai_config |= 0x8000; /* set the delay */
devpriv->ai_config |= 0x0001; /* ADC start bit */
return result;
}
static int s526_ai_eoc(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned long context)
{
unsigned int status;
status = inw(dev->iobase + REG_ISR);
if (status & ISR_ADC_DONE)
return 0;
return -EBUSY;
}
static int s526_ai_rinsn(struct comedi_device *dev, struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
struct s526_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
int n;
unsigned short value;
unsigned int d;
int ret;
/* Set configured delay, enable channel for this channel only,
* select "ADC read" channel, set "ADC start" bit. */
value = (devpriv->ai_config & 0x8000) |
((1 << 5) << chan) | (chan << 1) | 0x0001;
/* convert n samples */
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outw(value, dev->iobase + REG_ADC);
/* wait for conversion to end */
ret = comedi_timeout(dev, s, insn, s526_ai_eoc, 0);
if (ret)
return ret;
outw(ISR_ADC_DONE, dev->iobase + REG_ISR);
/* read data */
d = inw(dev->iobase + REG_ADD);
/* munge data */
data[n] = d ^ 0x8000;
}
/* return the number of samples read/written */
return n;
}
static int s526_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int val = s->readback[chan];
int i;
outw(chan << 1, dev->iobase + REG_DAC);
for (i = 0; i < insn->n; i++) {
val = data[i];
outw(val, dev->iobase + REG_ADD);
/* starts the D/A conversion */
outw((chan << 1) | 1, dev->iobase + REG_DAC);
}
s->readback[chan] = val;
return insn->n;
}
static int s526_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outw(s->state, dev->iobase + REG_DIO);
data[1] = inw(dev->iobase + REG_DIO) & 0xff;
return insn->n;
}
static int s526_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int mask;
int ret;
if (chan < 4)
mask = 0x0f;
else
mask = 0xf0;
ret = comedi_dio_insn_config(dev, s, insn, data, mask);
if (ret)
return ret;
/* bit 10/11 set the group 1/2's mode */
if (s->io_bits & 0x0f)
s->state |= (1 << 10);
else
s->state &= ~(1 << 10);
if (s->io_bits & 0xf0)
s->state |= (1 << 11);
else
s->state &= ~(1 << 11);
outw(s->state, dev->iobase + REG_DIO);
return insn->n;
}
static int s526_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct s526_private *devpriv;
struct comedi_subdevice *s;
int ret;
ret = comedi_request_region(dev, it->options[0], 0x40);
if (ret)
return ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_alloc_subdevices(dev, 4);
if (ret)
return ret;
s = &dev->subdevices[0];
/* GENERAL-PURPOSE COUNTER/TIME (GPCT) */
s->type = COMEDI_SUBD_COUNTER;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_LSAMPL;
s->n_chan = 4;
s->maxdata = 0x00ffffff; /* 24 bit counter */
s->insn_read = s526_gpct_rinsn;
s->insn_config = s526_gpct_insn_config;
s->insn_write = s526_gpct_winsn;
s = &dev->subdevices[1];
/* analog input subdevice */
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_DIFF;
/* channels 0 to 7 are the regular differential inputs */
/* channel 8 is "reference 0" (+10V), channel 9 is "reference 1" (0V) */
s->n_chan = 10;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->len_chanlist = 16;
s->insn_read = s526_ai_rinsn;
s->insn_config = s526_ai_insn_config;
s = &dev->subdevices[2];
/* analog output subdevice */
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITABLE;
s->n_chan = 4;
s->maxdata = 0xffff;
s->range_table = &range_bipolar10;
s->insn_write = s526_ao_insn_write;
s->insn_read = comedi_readback_insn_read;
ret = comedi_alloc_subdev_readback(s);
if (ret)
return ret;
s = &dev->subdevices[3];
/* digital i/o subdevice */
s->type = COMEDI_SUBD_DIO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
s->n_chan = 8;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = s526_dio_insn_bits;
s->insn_config = s526_dio_insn_config;
return 0;
}
static struct comedi_driver s526_driver = {
.driver_name = "s526",
.module = THIS_MODULE,
.attach = s526_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(s526_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
KimLemon/AKL-Kernel | drivers/misc/mei/client.c | 843 | 18100 | /*
*
* Intel Management Engine Interface (Intel MEI) Linux driver
* Copyright (c) 2003-2012, Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
*/
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include <linux/delay.h>
#include <linux/mei.h>
#include "mei_dev.h"
#include "hbm.h"
#include "client.h"
/**
* mei_me_cl_by_uuid - locate index of me client
*
* @dev: mei device
* returns me client index or -ENOENT if not found
*/
int mei_me_cl_by_uuid(const struct mei_device *dev, const uuid_le *uuid)
{
int i, res = -ENOENT;
for (i = 0; i < dev->me_clients_num; ++i)
if (uuid_le_cmp(*uuid,
dev->me_clients[i].props.protocol_name) == 0) {
res = i;
break;
}
return res;
}
/**
* mei_me_cl_by_id return index to me_clients for client_id
*
* @dev: the device structure
* @client_id: me client id
*
* Locking: called under "dev->device_lock" lock
*
* returns index on success, -ENOENT on failure.
*/
int mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
{
int i;
for (i = 0; i < dev->me_clients_num; i++)
if (dev->me_clients[i].client_id == client_id)
break;
if (WARN_ON(dev->me_clients[i].client_id != client_id))
return -ENOENT;
if (i == dev->me_clients_num)
return -ENOENT;
return i;
}
/**
* mei_io_list_flush - removes list entry belonging to cl.
*
* @list: An instance of our list structure
* @cl: host client
*/
void mei_io_list_flush(struct mei_cl_cb *list, struct mei_cl *cl)
{
struct mei_cl_cb *cb;
struct mei_cl_cb *next;
list_for_each_entry_safe(cb, next, &list->list, list) {
if (cb->cl && mei_cl_cmp_id(cl, cb->cl))
list_del(&cb->list);
}
}
/**
* mei_io_cb_free - free mei_cb_private related memory
*
* @cb: mei callback struct
*/
void mei_io_cb_free(struct mei_cl_cb *cb)
{
if (cb == NULL)
return;
kfree(cb->request_buffer.data);
kfree(cb->response_buffer.data);
kfree(cb);
}
/**
* mei_io_cb_init - allocate and initialize io callback
*
* @cl - mei client
* @fp: pointer to file structure
*
* returns mei_cl_cb pointer or NULL;
*/
struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
{
struct mei_cl_cb *cb;
cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
if (!cb)
return NULL;
mei_io_list_init(cb);
cb->file_object = fp;
cb->cl = cl;
cb->buf_idx = 0;
return cb;
}
/**
* mei_io_cb_alloc_req_buf - allocate request buffer
*
* @cb: io callback structure
* @length: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->request_buffer.data)
return -ENOMEM;
cb->request_buffer.size = length;
return 0;
}
/**
* mei_io_cb_alloc_resp_buf - allocate respose buffer
*
* @cb: io callback structure
* @length: size of the buffer
*
* returns 0 on success
* -EINVAL if cb is NULL
* -ENOMEM if allocation failed
*/
int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
{
if (!cb)
return -EINVAL;
if (length == 0)
return 0;
cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
if (!cb->response_buffer.data)
return -ENOMEM;
cb->response_buffer.size = length;
return 0;
}
/**
* mei_cl_flush_queues - flushes queue lists belonging to cl.
*
* @cl: host client
*/
int mei_cl_flush_queues(struct mei_cl *cl)
{
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev_dbg(&cl->dev->pdev->dev, "remove list entry belonging to cl\n");
mei_io_list_flush(&cl->dev->read_list, cl);
mei_io_list_flush(&cl->dev->write_list, cl);
mei_io_list_flush(&cl->dev->write_waiting_list, cl);
mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
mei_io_list_flush(&cl->dev->amthif_cmd_list, cl);
mei_io_list_flush(&cl->dev->amthif_rd_complete_list, cl);
return 0;
}
/**
* mei_cl_init - initializes intialize cl.
*
* @cl: host client to be initialized
* @dev: mei device
*/
void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
{
memset(cl, 0, sizeof(struct mei_cl));
init_waitqueue_head(&cl->wait);
init_waitqueue_head(&cl->rx_wait);
init_waitqueue_head(&cl->tx_wait);
INIT_LIST_HEAD(&cl->link);
INIT_LIST_HEAD(&cl->device_link);
cl->reading_state = MEI_IDLE;
cl->writing_state = MEI_IDLE;
cl->dev = dev;
}
/**
* mei_cl_allocate - allocates cl structure and sets it up.
*
* @dev: mei device
* returns The allocated file or NULL on failure
*/
struct mei_cl *mei_cl_allocate(struct mei_device *dev)
{
struct mei_cl *cl;
cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
if (!cl)
return NULL;
mei_cl_init(cl, dev);
return cl;
}
/**
* mei_cl_find_read_cb - find this cl's callback in the read list
*
* @cl: host client
*
* returns cb on success, NULL on error
*/
struct mei_cl_cb *mei_cl_find_read_cb(struct mei_cl *cl)
{
struct mei_device *dev = cl->dev;
struct mei_cl_cb *cb = NULL;
struct mei_cl_cb *next = NULL;
list_for_each_entry_safe(cb, next, &dev->read_list.list, list)
if (mei_cl_cmp_id(cl, cb->cl))
return cb;
return NULL;
}
/** mei_cl_link: allocte host id in the host map
*
* @cl - host client
* @id - fixed host id or -1 for genereting one
*
* returns 0 on success
* -EINVAL on incorrect values
* -ENONET if client not found
*/
int mei_cl_link(struct mei_cl *cl, int id)
{
struct mei_device *dev;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
/* If Id is not asigned get one*/
if (id == MEI_HOST_CLIENT_ID_ANY)
id = find_first_zero_bit(dev->host_clients_map,
MEI_CLIENTS_MAX);
if (id >= MEI_CLIENTS_MAX) {
dev_err(&dev->pdev->dev, "id exceded %d", MEI_CLIENTS_MAX) ;
return -ENOENT;
}
dev->open_handle_count++;
cl->host_client_id = id;
list_add_tail(&cl->link, &dev->file_list);
set_bit(id, dev->host_clients_map);
cl->state = MEI_FILE_INITIALIZING;
dev_dbg(&dev->pdev->dev, "link cl host id = %d\n", cl->host_client_id);
return 0;
}
/**
* mei_cl_unlink - remove me_cl from the list
*
* @cl: host client
*/
int mei_cl_unlink(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl *pos, *next;
/* don't shout on error exit path */
if (!cl)
return 0;
/* wd and amthif might not be initialized */
if (!cl->dev)
return 0;
dev = cl->dev;
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if (cl->host_client_id == pos->host_client_id) {
dev_dbg(&dev->pdev->dev, "remove host client = %d, ME client = %d\n",
pos->host_client_id, pos->me_client_id);
list_del_init(&pos->link);
break;
}
}
return 0;
}
void mei_host_client_init(struct work_struct *work)
{
struct mei_device *dev = container_of(work,
struct mei_device, init_work);
struct mei_client_properties *client_props;
int i;
mutex_lock(&dev->device_lock);
bitmap_zero(dev->host_clients_map, MEI_CLIENTS_MAX);
dev->open_handle_count = 0;
/*
* Reserving the first three client IDs
* 0: Reserved for MEI Bus Message communications
* 1: Reserved for Watchdog
* 2: Reserved for AMTHI
*/
bitmap_set(dev->host_clients_map, 0, 3);
for (i = 0; i < dev->me_clients_num; i++) {
client_props = &dev->me_clients[i].props;
if (!uuid_le_cmp(client_props->protocol_name, mei_amthif_guid))
mei_amthif_host_init(dev);
else if (!uuid_le_cmp(client_props->protocol_name, mei_wd_guid))
mei_wd_host_init(dev);
else if (!uuid_le_cmp(client_props->protocol_name, mei_nfc_guid))
mei_nfc_host_init(dev);
}
dev->dev_state = MEI_DEV_ENABLED;
mutex_unlock(&dev->device_lock);
}
/**
* mei_cl_disconnect - disconnect host clinet form the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_disconnect(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets, err;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (cl->state != MEI_FILE_DISCONNECTING)
return 0;
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
cb->fop_type = MEI_FOP_CLOSE;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_disconnect_req(dev, cl)) {
rets = -ENODEV;
dev_err(&dev->pdev->dev, "failed to disconnect.\n");
goto free;
}
cl->timer_count = MEI_CONNECT_TIMEOUT;
mdelay(10); /* Wait for hardware disconnection ready */
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
dev_dbg(&dev->pdev->dev, "add disconnect cb to control write list\n");
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
err = wait_event_timeout(dev->wait_recvd_msg,
MEI_FILE_DISCONNECTED == cl->state,
mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
mutex_lock(&dev->device_lock);
if (MEI_FILE_DISCONNECTED == cl->state) {
rets = 0;
dev_dbg(&dev->pdev->dev, "successfully disconnected from FW client.\n");
} else {
rets = -ENODEV;
if (MEI_FILE_DISCONNECTED != cl->state)
dev_dbg(&dev->pdev->dev, "wrong status client disconnect.\n");
if (err)
dev_dbg(&dev->pdev->dev,
"wait failed disconnect err=%08x\n",
err);
dev_dbg(&dev->pdev->dev, "failed to disconnect from FW client.\n");
}
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
free:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_is_other_connecting - checks if other
* client with the same me client id is connecting
*
* @cl: private data of the file object
*
* returns ture if other client is connected, 0 - otherwise.
*/
bool mei_cl_is_other_connecting(struct mei_cl *cl)
{
struct mei_device *dev;
struct mei_cl *pos;
struct mei_cl *next;
if (WARN_ON(!cl || !cl->dev))
return false;
dev = cl->dev;
list_for_each_entry_safe(pos, next, &dev->file_list, link) {
if ((pos->state == MEI_FILE_CONNECTING) &&
(pos != cl) && cl->me_client_id == pos->me_client_id)
return true;
}
return false;
}
/**
* mei_cl_connect - connect host clinet to the me one
*
* @cl: host client
*
* Locking: called under "dev->device_lock" lock
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_connect(struct mei_cl *cl, struct file *file)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
long timeout = mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT);
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
cb = mei_io_cb_init(cl, file);
if (!cb) {
rets = -ENOMEM;
goto out;
}
cb->fop_type = MEI_FOP_IOCTL;
if (dev->hbuf_is_ready && !mei_cl_is_other_connecting(cl)) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_connect_req(dev, cl)) {
rets = -ENODEV;
goto out;
}
cl->timer_count = MEI_CONNECT_TIMEOUT;
list_add_tail(&cb->list, &dev->ctrl_rd_list.list);
} else {
cl->state = MEI_FILE_INITIALIZING;
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
mutex_unlock(&dev->device_lock);
rets = wait_event_timeout(dev->wait_recvd_msg,
(cl->state == MEI_FILE_CONNECTED ||
cl->state == MEI_FILE_DISCONNECTED),
timeout * HZ);
mutex_lock(&dev->device_lock);
if (cl->state != MEI_FILE_CONNECTED) {
rets = -EFAULT;
mei_io_list_flush(&dev->ctrl_rd_list, cl);
mei_io_list_flush(&dev->ctrl_wr_list, cl);
goto out;
}
rets = cl->status;
out:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_flow_ctrl_creds - checks flow_control credits for cl.
*
* @cl: private data of the file object
*
* returns 1 if mei_flow_ctrl_creds >0, 0 - otherwise.
* -ENOENT if mei_cl is not present
* -EINVAL if single_recv_buf == 0
*/
int mei_cl_flow_ctrl_creds(struct mei_cl *cl)
{
struct mei_device *dev;
int i;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
if (!dev->me_clients_num)
return 0;
if (cl->mei_flow_ctrl_creds > 0)
return 1;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->mei_flow_ctrl_creds) {
if (WARN_ON(me_cl->props.single_recv_buf == 0))
return -EINVAL;
return 1;
} else {
return 0;
}
}
}
return -ENOENT;
}
/**
* mei_cl_flow_ctrl_reduce - reduces flow_control.
*
* @cl: private data of the file object
*
* @returns
* 0 on success
* -ENOENT when me client is not found
* -EINVAL when ctrl credits are <= 0
*/
int mei_cl_flow_ctrl_reduce(struct mei_cl *cl)
{
struct mei_device *dev;
int i;
if (WARN_ON(!cl || !cl->dev))
return -EINVAL;
dev = cl->dev;
if (!dev->me_clients_num)
return -ENOENT;
for (i = 0; i < dev->me_clients_num; i++) {
struct mei_me_client *me_cl = &dev->me_clients[i];
if (me_cl->client_id == cl->me_client_id) {
if (me_cl->props.single_recv_buf != 0) {
if (WARN_ON(me_cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
dev->me_clients[i].mei_flow_ctrl_creds--;
} else {
if (WARN_ON(cl->mei_flow_ctrl_creds <= 0))
return -EINVAL;
cl->mei_flow_ctrl_creds--;
}
return 0;
}
}
return -ENOENT;
}
/**
* mei_cl_read_start - the start read client message function.
*
* @cl: host client
*
* returns 0 on success, <0 on failure.
*/
int mei_cl_read_start(struct mei_cl *cl, size_t length)
{
struct mei_device *dev;
struct mei_cl_cb *cb;
int rets;
int i;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
dev = cl->dev;
if (cl->state != MEI_FILE_CONNECTED)
return -ENODEV;
if (dev->dev_state != MEI_DEV_ENABLED)
return -ENODEV;
if (cl->read_cb) {
dev_dbg(&dev->pdev->dev, "read is pending.\n");
return -EBUSY;
}
i = mei_me_cl_by_id(dev, cl->me_client_id);
if (i < 0) {
dev_err(&dev->pdev->dev, "no such me client %d\n",
cl->me_client_id);
return -ENODEV;
}
cb = mei_io_cb_init(cl, NULL);
if (!cb)
return -ENOMEM;
/* always allocate at least client max message */
length = max_t(size_t, length, dev->me_clients[i].props.max_msg_length);
rets = mei_io_cb_alloc_resp_buf(cb, length);
if (rets)
goto err;
cb->fop_type = MEI_FOP_READ;
if (dev->hbuf_is_ready) {
dev->hbuf_is_ready = false;
if (mei_hbm_cl_flow_control_req(dev, cl)) {
rets = -ENODEV;
goto err;
}
list_add_tail(&cb->list, &dev->read_list.list);
} else {
list_add_tail(&cb->list, &dev->ctrl_wr_list.list);
}
cl->read_cb = cb;
return rets;
err:
mei_io_cb_free(cb);
return rets;
}
/**
* mei_cl_write - submit a write cb to mei device
assumes device_lock is locked
*
* @cl: host client
* @cl: write callback with filled data
*
* returns numbe of bytes sent on success, <0 on failure.
*/
int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, bool blocking)
{
struct mei_device *dev;
struct mei_msg_data *buf;
struct mei_msg_hdr mei_hdr;
int rets;
if (WARN_ON(!cl || !cl->dev))
return -ENODEV;
if (WARN_ON(!cb))
return -EINVAL;
dev = cl->dev;
buf = &cb->request_buffer;
dev_dbg(&dev->pdev->dev, "mei_cl_write %d\n", buf->size);
cb->fop_type = MEI_FOP_WRITE;
rets = mei_cl_flow_ctrl_creds(cl);
if (rets < 0)
goto err;
/* Host buffer is not ready, we queue the request */
if (rets == 0 || !dev->hbuf_is_ready) {
cb->buf_idx = 0;
/* unseting complete will enqueue the cb for write */
mei_hdr.msg_complete = 0;
cl->writing_state = MEI_WRITING;
rets = buf->size;
goto out;
}
dev->hbuf_is_ready = false;
/* Check for a maximum length */
if (buf->size > mei_hbuf_max_len(dev)) {
mei_hdr.length = mei_hbuf_max_len(dev);
mei_hdr.msg_complete = 0;
} else {
mei_hdr.length = buf->size;
mei_hdr.msg_complete = 1;
}
mei_hdr.host_addr = cl->host_client_id;
mei_hdr.me_addr = cl->me_client_id;
mei_hdr.reserved = 0;
dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
MEI_HDR_PRM(&mei_hdr));
if (mei_write_message(dev, &mei_hdr, buf->data)) {
rets = -EIO;
goto err;
}
cl->writing_state = MEI_WRITING;
cb->buf_idx = mei_hdr.length;
rets = buf->size;
out:
if (mei_hdr.msg_complete) {
if (mei_cl_flow_ctrl_reduce(cl)) {
rets = -ENODEV;
goto err;
}
list_add_tail(&cb->list, &dev->write_waiting_list.list);
} else {
list_add_tail(&cb->list, &dev->write_list.list);
}
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock);
if (wait_event_interruptible(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE)) {
if (signal_pending(current))
rets = -EINTR;
else
rets = -ERESTARTSYS;
}
mutex_lock(&dev->device_lock);
}
err:
return rets;
}
/**
* mei_cl_all_disconnect - disconnect forcefully all connected clients
*
* @dev - mei device
*/
void mei_cl_all_disconnect(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
cl->state = MEI_FILE_DISCONNECTED;
cl->mei_flow_ctrl_creds = 0;
cl->timer_count = 0;
}
}
/**
* mei_cl_all_read_wakeup - wake up all readings so they can be interrupted
*
* @dev - mei device
*/
void mei_cl_all_read_wakeup(struct mei_device *dev)
{
struct mei_cl *cl, *next;
list_for_each_entry_safe(cl, next, &dev->file_list, link) {
if (waitqueue_active(&cl->rx_wait)) {
dev_dbg(&dev->pdev->dev, "Waking up client!\n");
wake_up_interruptible(&cl->rx_wait);
}
}
}
/**
* mei_cl_all_write_clear - clear all pending writes
* @dev - mei device
*/
void mei_cl_all_write_clear(struct mei_device *dev)
{
struct mei_cl_cb *cb, *next;
struct list_head *list;
list = &dev->write_list.list;
list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
list = &dev->write_waiting_list.list;
list_for_each_entry_safe(cb, next, list, list) {
list_del(&cb->list);
mei_io_cb_free(cb);
}
}
| gpl-2.0 |
sexmachine/msm | drivers/input/keyboard/qpnp-keypad.c | 843 | 20737 | /* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/bitops.h>
#include <linux/delay.h>
#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/input/matrix_keypad.h>
#include <linux/spmi.h>
#define QPNP_MAX_ROWS 10
#define QPNP_MAX_COLS 8
#define QPNP_MIN_ROWS 2
#define QPNP_MIN_COLS 1
#define QPNP_ROW_SHIFT 3
#define QPNP_MATRIX_MAX_SIZE (QPNP_MAX_ROWS * QPNP_MAX_COLS)
/* in ms */
#define MAX_SCAN_DELAY 128
#define MIN_SCAN_DELAY 1
#define KEYP_DEFAULT_SCAN_DELAY 32
/* in ns */
#define MAX_ROW_HOLD_DELAY 250000
#define MIN_ROW_HOLD_DELAY 31250
/* in ms */
#define MAX_DEBOUNCE_TIME 20
#define MIN_DEBOUNCE_TIME 5
#define KEYP_DEFAULT_DEBOUNCE 15
/* register offsets */
#define KEYP_STATUS(base) (base + 0x08)
#define KEYP_SIZE_CTRL(base) (base + 0x40)
#define KEYP_SCAN_CTRL(base) (base + 0x42)
#define KEYP_FSM_CNTL(base) (base + 0x44)
#define KEYP_EN_CTRL(base) (base + 0x46)
#define KEYP_CTRL_KEYP_EN BIT(7)
#define KEYP_CTRL_EVNTS BIT(0)
#define KEYP_CTRL_EVNTS_MASK 0x3
#define KEYP_SIZE_COLS_SHIFT 4
#define KEYP_SIZE_COLS_MASK 0x70
#define KEYP_SIZE_ROWS_MASK 0x0F
#define KEYP_SCAN_DBC_MASK 0x03
#define KEYP_SCAN_SCNP_MASK 0x38
#define KEYP_SCAN_ROWP_MASK 0xC0
#define KEYP_SCAN_SCNP_SHIFT 3
#define KEYP_SCAN_ROWP_SHIFT 6
#define KEYP_CTRL_SCAN_ROWS_BITS 0x7
#define KEYP_SCAN_DBOUNCE_SHIFT 1
#define KEYP_SCAN_PAUSE_SHIFT 3
#define KEYP_SCAN_ROW_HOLD_SHIFT 6
#define KEYP_FSM_READ_EN BIT(0)
/* bits of these registers represent
* '0' for key press
* '1' for key release
*/
#define KEYP_RECENT_DATA(base) (base + 0x7C)
#define KEYP_OLD_DATA(base) (base + 0x5C)
#define KEYP_CLOCK_FREQ 32768
struct qpnp_kp {
const struct matrix_keymap_data *keymap_data;
struct input_dev *input;
struct spmi_device *spmi;
int key_sense_irq;
int key_stuck_irq;
u16 base;
u32 num_rows;
u32 num_cols;
u32 debounce_ms;
u32 row_hold_ns;
u32 scan_delay_ms;
bool wakeup;
bool rep;
unsigned short keycodes[QPNP_MATRIX_MAX_SIZE];
u16 keystate[QPNP_MAX_ROWS];
u16 stuckstate[QPNP_MAX_ROWS];
};
static int qpnp_kp_write_u8(struct qpnp_kp *kp, u8 data, u16 reg)
{
int rc;
rc = spmi_ext_register_writel(kp->spmi->ctrl, kp->spmi->sid,
reg, &data, 1);
if (rc < 0)
dev_err(&kp->spmi->dev,
"Error writing to address: %X - ret %d\n", reg, rc);
return rc;
}
static int qpnp_kp_read(struct qpnp_kp *kp,
u8 *data, u16 reg, unsigned num_bytes)
{
int rc;
rc = spmi_ext_register_readl(kp->spmi->ctrl, kp->spmi->sid,
reg, data, num_bytes);
if (rc < 0)
dev_err(&kp->spmi->dev,
"Error reading from address : %X - ret %d\n", reg, rc);
return rc;
}
static int qpnp_kp_read_u8(struct qpnp_kp *kp, u8 *data, u16 reg)
{
int rc;
rc = qpnp_kp_read(kp, data, reg, 1);
if (rc < 0)
dev_err(&kp->spmi->dev, "Error reading qpnp: %X - ret %d\n",
reg, rc);
return rc;
}
static u8 qpnp_col_state(struct qpnp_kp *kp, u8 col)
{
/* all keys pressed on that particular row? */
if (col == 0x00)
return 1 << kp->num_cols;
else
return col & ((1 << kp->num_cols) - 1);
}
/*
* Synchronous read protocol
*
* 1. Write '1' to ReadState bit in KEYP_FSM_CNTL register
* 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode
* synchronously
* 3. Read rows in old array first if events are more than one
* 4. Read rows in recent array
* 5. Wait 4*32KHz clocks
* 6. Write '0' to ReadState bit of KEYP_FSM_CNTL register so that hw can
* synchronously exit read mode.
*/
static int qpnp_sync_read(struct qpnp_kp *kp, bool enable)
{
int rc;
u8 fsm_ctl;
rc = qpnp_kp_read_u8(kp, &fsm_ctl, KEYP_FSM_CNTL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_FSM_CNTL reg, rc=%d\n", rc);
return rc;
}
if (enable)
fsm_ctl |= KEYP_FSM_READ_EN;
else
fsm_ctl &= ~KEYP_FSM_READ_EN;
rc = qpnp_kp_write_u8(kp, fsm_ctl, KEYP_FSM_CNTL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error writing KEYP_FSM_CNTL reg, rc=%d\n", rc);
return rc;
}
/* 2 * 32KHz clocks */
udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
return rc;
}
static int qpnp_kp_read_data(struct qpnp_kp *kp, u16 *state,
u16 data_reg, int read_rows)
{
int rc, row;
u8 new_data[QPNP_MAX_ROWS];
/*
* Check if last row will be scanned. If not, scan to clear key event
* counter
*/
if (kp->num_rows < QPNP_MAX_ROWS) {
rc = qpnp_kp_read_u8(kp, &new_data[QPNP_MAX_ROWS - 1],
data_reg + (QPNP_MAX_ROWS - 1) * 2);
if (rc)
return rc;
}
for (row = 0; row < kp->num_rows; row++) {
rc = qpnp_kp_read_u8(kp, &new_data[row], data_reg + row * 2);
if (rc)
return rc;
dev_dbg(&kp->spmi->dev, "new_data[%d] = %d\n", row,
new_data[row]);
state[row] = qpnp_col_state(kp, new_data[row]);
}
return 0;
}
static int qpnp_kp_read_matrix(struct qpnp_kp *kp, u16 *new_state,
u16 *old_state)
{
int rc, read_rows;
read_rows = kp->num_rows;
rc = qpnp_sync_read(kp, true);
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error setting the FSM read enable bit rc=%d\n", rc);
return rc;
}
if (old_state) {
rc = qpnp_kp_read_data(kp, old_state, KEYP_OLD_DATA(kp->base),
read_rows);
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_OLD_DATA, rc=%d\n", rc);
return rc;
}
}
rc = qpnp_kp_read_data(kp, new_state, KEYP_RECENT_DATA(kp->base),
read_rows);
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_RECENT_DATA, rc=%d\n", rc);
return rc;
}
/* 4 * 32KHz clocks */
udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1);
rc = qpnp_sync_read(kp, false);
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error resetting the FSM read enable bit rc=%d\n", rc);
return rc;
}
return rc;
}
static void __qpnp_kp_scan_matrix(struct qpnp_kp *kp, u16 *new_state,
u16 *old_state)
{
int row, col, code;
for (row = 0; row < kp->num_rows; row++) {
int bits_changed = new_state[row] ^ old_state[row];
if (!bits_changed)
continue;
for (col = 0; col < kp->num_cols; col++) {
if (!(bits_changed & (1 << col)))
continue;
dev_dbg(&kp->spmi->dev, "key [%d:%d] %s\n", row, col,
!(new_state[row] & (1 << col)) ?
"pressed" : "released");
code = MATRIX_SCAN_CODE(row, col, QPNP_ROW_SHIFT);
input_event(kp->input, EV_MSC, MSC_SCAN, code);
input_report_key(kp->input,
kp->keycodes[code],
!(new_state[row] & (1 << col)));
input_sync(kp->input);
}
}
}
static bool qpnp_detect_ghost_keys(struct qpnp_kp *kp, u16 *new_state)
{
int row, found_first = -1;
u16 check, row_state;
check = 0;
for (row = 0; row < kp->num_rows; row++) {
row_state = (~new_state[row]) &
((1 << kp->num_cols) - 1);
if (hweight16(row_state) > 1) {
if (found_first == -1)
found_first = row;
if (check & row_state) {
dev_dbg(&kp->spmi->dev,
"detected ghost key row[%d],row[%d]\n",
found_first, row);
return true;
}
}
check |= row_state;
}
return false;
}
static int qpnp_kp_scan_matrix(struct qpnp_kp *kp, unsigned int events)
{
u16 new_state[QPNP_MAX_ROWS];
u16 old_state[QPNP_MAX_ROWS];
int rc;
switch (events) {
case 0x1:
rc = qpnp_kp_read_matrix(kp, new_state, NULL);
if (rc < 0)
return rc;
/* detecting ghost key is not an error */
if (qpnp_detect_ghost_keys(kp, new_state))
return 0;
__qpnp_kp_scan_matrix(kp, new_state, kp->keystate);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
case 0x3: /* two events - eventcounter is gray-coded */
rc = qpnp_kp_read_matrix(kp, new_state, old_state);
if (rc < 0)
return rc;
__qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
__qpnp_kp_scan_matrix(kp, new_state, old_state);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
case 0x2:
dev_dbg(&kp->spmi->dev, "Some key events were lost\n");
rc = qpnp_kp_read_matrix(kp, new_state, old_state);
if (rc < 0)
return rc;
__qpnp_kp_scan_matrix(kp, old_state, kp->keystate);
__qpnp_kp_scan_matrix(kp, new_state, old_state);
memcpy(kp->keystate, new_state, sizeof(new_state));
break;
default:
rc = -EINVAL;
}
return rc;
}
/*
* NOTE: We are reading recent and old data registers blindly
* whenever key-stuck interrupt happens, because events counter doesn't
* get updated when this interrupt happens due to key stuck doesn't get
* considered as key state change.
*
* We are not using old data register contents after they are being read
* because it might report the key which was pressed before the key being stuck
* as stuck key because it's pressed status is stored in the old data
* register.
*/
static irqreturn_t qpnp_kp_stuck_irq(int irq, void *data)
{
u16 new_state[QPNP_MAX_ROWS];
u16 old_state[QPNP_MAX_ROWS];
int rc;
struct qpnp_kp *kp = data;
rc = qpnp_kp_read_matrix(kp, new_state, old_state);
if (rc < 0) {
dev_err(&kp->spmi->dev, "failed to read keypad matrix\n");
return IRQ_HANDLED;
}
__qpnp_kp_scan_matrix(kp, new_state, kp->stuckstate);
return IRQ_HANDLED;
}
static irqreturn_t qpnp_kp_irq(int irq, void *data)
{
struct qpnp_kp *kp = data;
u8 ctrl_val, events;
int rc;
rc = qpnp_kp_read_u8(kp, &ctrl_val, KEYP_STATUS(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_STATUS register\n");
return IRQ_HANDLED;
}
events = ctrl_val & KEYP_CTRL_EVNTS_MASK;
rc = qpnp_kp_scan_matrix(kp, events);
if (rc < 0)
dev_err(&kp->spmi->dev, "failed to scan matrix\n");
return IRQ_HANDLED;
}
static int qpnp_kpd_init(struct qpnp_kp *kp)
{
int bits, rc, cycles;
u8 kpd_scan_cntl, kpd_size_cntl;
/* Configure the SIZE register, #rows and #columns */
rc = qpnp_kp_read_u8(kp, &kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_SIZE_CTRL reg, rc=%d\n", rc);
return rc;
}
kpd_size_cntl &= (~KEYP_SIZE_COLS_MASK | ~KEYP_SIZE_ROWS_MASK);
kpd_size_cntl |= (((kp->num_cols - 1) << KEYP_SIZE_COLS_SHIFT) &
KEYP_SIZE_COLS_MASK);
kpd_size_cntl |= ((kp->num_rows - 1) & KEYP_SIZE_ROWS_MASK);
rc = qpnp_kp_write_u8(kp, kpd_size_cntl, KEYP_SIZE_CTRL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error writing to KEYP_SIZE_CTRL reg, rc=%d\n", rc);
return rc;
}
/* Configure the SCAN CTL register, debounce, row pause, scan delay */
rc = qpnp_kp_read_u8(kp, &kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_SCAN_CTRL reg, rc=%d\n", rc);
return rc;
}
kpd_scan_cntl &= (~KEYP_SCAN_DBC_MASK | ~KEYP_SCAN_SCNP_MASK |
~KEYP_SCAN_ROWP_MASK);
kpd_scan_cntl |= (((kp->debounce_ms / 5) - 1) & KEYP_SCAN_DBC_MASK);
bits = fls(kp->scan_delay_ms) - 1;
kpd_scan_cntl |= ((bits << KEYP_SCAN_SCNP_SHIFT) & KEYP_SCAN_SCNP_MASK);
/* Row hold time is a multiple of 32KHz cycles. */
cycles = (kp->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC;
if (cycles)
cycles = ilog2(cycles);
kpd_scan_cntl |= ((cycles << KEYP_SCAN_ROW_HOLD_SHIFT) &
KEYP_SCAN_ROWP_MASK);
rc = qpnp_kp_write_u8(kp, kpd_scan_cntl, KEYP_SCAN_CTRL(kp->base));
if (rc)
dev_err(&kp->spmi->dev,
"Error writing KEYP_SCAN reg, rc=%d\n", rc);
return rc;
}
static int qpnp_kp_enable(struct qpnp_kp *kp)
{
int rc;
u8 kpd_cntl;
rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
return rc;
}
kpd_cntl |= KEYP_CTRL_KEYP_EN;
rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
if (rc < 0)
dev_err(&kp->spmi->dev,
"Error writing KEYP_CTRL reg, rc=%d\n", rc);
return rc;
}
static int qpnp_kp_disable(struct qpnp_kp *kp)
{
int rc;
u8 kpd_cntl;
rc = qpnp_kp_read_u8(kp, &kpd_cntl, KEYP_EN_CTRL(kp->base));
if (rc < 0) {
dev_err(&kp->spmi->dev,
"Error reading KEYP_EN_CTRL reg, rc=%d\n", rc);
return rc;
}
kpd_cntl &= ~KEYP_CTRL_KEYP_EN;
rc = qpnp_kp_write_u8(kp, kpd_cntl, KEYP_EN_CTRL(kp->base));
if (rc < 0)
dev_err(&kp->spmi->dev,
"Error writing KEYP_CTRL reg, rc=%d\n", rc);
return rc;
}
static int qpnp_kp_open(struct input_dev *dev)
{
struct qpnp_kp *kp = input_get_drvdata(dev);
return qpnp_kp_enable(kp);
}
static void qpnp_kp_close(struct input_dev *dev)
{
struct qpnp_kp *kp = input_get_drvdata(dev);
qpnp_kp_disable(kp);
}
static int qpnp_keypad_parse_dt(struct qpnp_kp *kp)
{
struct matrix_keymap_data *keymap_data;
int rc, keymap_len, i;
u32 *keymap;
const __be32 *map;
rc = of_property_read_u32(kp->spmi->dev.of_node,
"keypad,num-rows", &kp->num_rows);
if (rc) {
dev_err(&kp->spmi->dev, "Unable to parse 'num-rows'\n");
return rc;
}
rc = of_property_read_u32(kp->spmi->dev.of_node,
"keypad,num-cols", &kp->num_cols);
if (rc) {
dev_err(&kp->spmi->dev, "Unable to parse 'num-cols'\n");
return rc;
}
rc = of_property_read_u32(kp->spmi->dev.of_node,
"qcom,scan-delay-ms", &kp->scan_delay_ms);
if (rc && rc != -EINVAL) {
dev_err(&kp->spmi->dev, "Unable to parse 'scan-delay-ms'\n");
return rc;
}
rc = of_property_read_u32(kp->spmi->dev.of_node,
"qcom,row-hold-ns", &kp->row_hold_ns);
if (rc && rc != -EINVAL) {
dev_err(&kp->spmi->dev, "Unable to parse 'row-hold-ns'\n");
return rc;
}
rc = of_property_read_u32(kp->spmi->dev.of_node,
"qcom,debounce-ms", &kp->debounce_ms);
if (rc && rc != -EINVAL) {
dev_err(&kp->spmi->dev, "Unable to parse 'debounce-ms'\n");
return rc;
}
kp->wakeup = of_property_read_bool(kp->spmi->dev.of_node,
"qcom,wakeup");
kp->rep = !of_property_read_bool(kp->spmi->dev.of_node,
"linux,keypad-no-autorepeat");
map = of_get_property(kp->spmi->dev.of_node,
"linux,keymap", &keymap_len);
if (!map) {
dev_err(&kp->spmi->dev, "Keymap not specified\n");
return -EINVAL;
}
keymap_data = devm_kzalloc(&kp->spmi->dev,
sizeof(*keymap_data), GFP_KERNEL);
if (!keymap_data) {
dev_err(&kp->spmi->dev, "Unable to allocate memory\n");
return -ENOMEM;
}
keymap_data->keymap_size = keymap_len / sizeof(u32);
keymap = devm_kzalloc(&kp->spmi->dev,
sizeof(uint32_t) * keymap_data->keymap_size, GFP_KERNEL);
if (!keymap) {
dev_err(&kp->spmi->dev, "could not allocate memory for keymap\n");
return -ENOMEM;
}
for (i = 0; i < keymap_data->keymap_size; i++) {
unsigned int key = be32_to_cpup(map + i);
int keycode, row, col;
row = (key >> 24) & 0xff;
col = (key >> 16) & 0xff;
keycode = key & 0xffff;
keymap[i] = KEY(row, col, keycode);
}
keymap_data->keymap = keymap;
kp->keymap_data = keymap_data;
return 0;
}
static int qpnp_kp_probe(struct spmi_device *spmi)
{
struct qpnp_kp *kp;
struct resource *keypad_base;
int rc = 0;
kp = devm_kzalloc(&spmi->dev, sizeof(struct qpnp_kp), GFP_KERNEL);
if (!kp) {
dev_err(&spmi->dev, "%s: Can't allocate qpnp_kp\n",
__func__);
return -ENOMEM;
}
kp->spmi = spmi;
rc = qpnp_keypad_parse_dt(kp);
if (rc < 0) {
dev_err(&spmi->dev, "Error parsing device tree\n");
return rc;
}
/* the #rows and #columns are compulsary */
if (!kp->num_cols || !kp->num_rows ||
kp->num_cols > QPNP_MAX_COLS ||
kp->num_rows > QPNP_MAX_ROWS ||
kp->num_cols < QPNP_MIN_COLS ||
kp->num_rows < QPNP_MIN_ROWS) {
dev_err(&spmi->dev, "invalid rows/cols input data\n");
return -EINVAL;
}
if (!kp->keymap_data) {
dev_err(&spmi->dev, "keymap not specified\n");
return -EINVAL;
}
/* the below parameters are optional*/
if (!kp->scan_delay_ms) {
kp->scan_delay_ms = KEYP_DEFAULT_SCAN_DELAY;
} else {
if (kp->scan_delay_ms > MAX_SCAN_DELAY ||
kp->scan_delay_ms < MIN_SCAN_DELAY) {
dev_err(&spmi->dev,
"invalid keypad scan time supplied\n");
return -EINVAL;
}
}
if (!kp->row_hold_ns) {
kp->row_hold_ns = MIN_ROW_HOLD_DELAY;
} else {
if (kp->row_hold_ns > MAX_ROW_HOLD_DELAY ||
kp->row_hold_ns < MIN_ROW_HOLD_DELAY) {
dev_err(&spmi->dev,
"invalid keypad row hold time supplied\n");
return -EINVAL;
}
}
if (!kp->debounce_ms) {
kp->debounce_ms = KEYP_DEFAULT_DEBOUNCE;
} else {
if (kp->debounce_ms > MAX_DEBOUNCE_TIME ||
kp->debounce_ms < MIN_DEBOUNCE_TIME ||
(kp->debounce_ms % 5 != 0)) {
dev_err(&spmi->dev,
"invalid debounce time supplied\n");
return -EINVAL;
}
}
kp->input = input_allocate_device();
if (!kp->input) {
dev_err(&spmi->dev, "Can't allocate keypad input device\n");
return -ENOMEM;
}
kp->key_sense_irq = spmi_get_irq_byname(spmi, NULL, "kp-sense");
if (kp->key_sense_irq < 0) {
dev_err(&spmi->dev, "Unable to get keypad sense irq\n");
return kp->key_sense_irq;
}
kp->key_stuck_irq = spmi_get_irq_byname(spmi, NULL, "kp-stuck");
if (kp->key_stuck_irq < 0) {
dev_err(&spmi->dev, "Unable to get stuck irq\n");
return kp->key_stuck_irq;
}
keypad_base = spmi_get_resource(spmi, NULL, IORESOURCE_MEM, 0);
if (!keypad_base) {
dev_err(&spmi->dev, "Unable to get keypad base address\n");
return -ENXIO;
}
kp->base = keypad_base->start;
kp->input->name = "qpnp_keypad";
kp->input->phys = "qpnp_keypad/input0";
kp->input->id.version = 0x0001;
kp->input->id.product = 0x0001;
kp->input->id.vendor = 0x0001;
kp->input->evbit[0] = BIT_MASK(EV_KEY);
if (kp->rep)
set_bit(EV_REP, kp->input->evbit);
kp->input->keycode = kp->keycodes;
kp->input->keycodemax = QPNP_MATRIX_MAX_SIZE;
kp->input->keycodesize = sizeof(kp->keycodes);
kp->input->open = qpnp_kp_open;
kp->input->close = qpnp_kp_close;
matrix_keypad_build_keymap(kp->keymap_data, NULL,
kp->num_rows, kp->num_cols,
kp->keycodes, kp->input);
input_set_capability(kp->input, EV_MSC, MSC_SCAN);
input_set_drvdata(kp->input, kp);
/* initialize keypad state */
memset(kp->keystate, 0xff, sizeof(kp->keystate));
memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate));
rc = qpnp_kpd_init(kp);
if (rc < 0) {
dev_err(&spmi->dev, "unable to initialize keypad controller\n");
return rc;
}
rc = input_register_device(kp->input);
if (rc < 0) {
dev_err(&spmi->dev, "unable to register keypad input device\n");
return rc;
}
rc = devm_request_irq(&spmi->dev, kp->key_sense_irq, qpnp_kp_irq,
IRQF_TRIGGER_RISING, "qpnp-keypad-sense", kp);
if (rc < 0) {
dev_err(&spmi->dev, "failed to request keypad sense irq\n");
return rc;
}
rc = devm_request_irq(&spmi->dev, kp->key_stuck_irq, qpnp_kp_stuck_irq,
IRQF_TRIGGER_RISING, "qpnp-keypad-stuck", kp);
if (rc < 0) {
dev_err(&spmi->dev, "failed to request keypad stuck irq\n");
return rc;
}
device_init_wakeup(&spmi->dev, kp->wakeup);
return rc;
}
static int qpnp_kp_remove(struct spmi_device *spmi)
{
struct qpnp_kp *kp = dev_get_drvdata(&spmi->dev);
device_init_wakeup(&spmi->dev, 0);
input_unregister_device(kp->input);
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int qpnp_kp_suspend(struct device *dev)
{
struct qpnp_kp *kp = dev_get_drvdata(dev);
struct input_dev *input_dev = kp->input;
if (device_may_wakeup(dev)) {
enable_irq_wake(kp->key_sense_irq);
} else {
mutex_lock(&input_dev->mutex);
if (input_dev->users)
qpnp_kp_disable(kp);
mutex_unlock(&input_dev->mutex);
}
return 0;
}
static int qpnp_kp_resume(struct device *dev)
{
struct qpnp_kp *kp = dev_get_drvdata(dev);
struct input_dev *input_dev = kp->input;
if (device_may_wakeup(dev)) {
disable_irq_wake(kp->key_sense_irq);
} else {
mutex_lock(&input_dev->mutex);
if (input_dev->users)
qpnp_kp_enable(kp);
mutex_unlock(&input_dev->mutex);
}
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(qpnp_kp_pm_ops,
qpnp_kp_suspend, qpnp_kp_resume);
static struct of_device_id spmi_match_table[] = {
{ .compatible = "qcom,qpnp-keypad",
},
{}
};
static struct spmi_driver qpnp_kp_driver = {
.probe = qpnp_kp_probe,
.remove = qpnp_kp_remove,
.driver = {
.name = "qcom,qpnp-keypad",
.of_match_table = spmi_match_table,
.owner = THIS_MODULE,
.pm = &qpnp_kp_pm_ops,
},
};
static int __init qpnp_kp_init(void)
{
return spmi_driver_register(&qpnp_kp_driver);
}
module_init(qpnp_kp_init);
static void __exit qpnp_kp_exit(void)
{
spmi_driver_unregister(&qpnp_kp_driver);
}
module_exit(qpnp_kp_exit);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("QPNP keypad driver");
| gpl-2.0 |
mbrugg/MC-EWIO-KERNEL-ORG | fs/logfs/compr.c | 1099 | 1792 | /*
* fs/logfs/compr.c - compression routines
*
* As should be obvious for Linux kernel code, license is GPLv2
*
* Copyright (c) 2005-2008 Joern Engel <joern@logfs.org>
*/
#include "logfs.h"
#include <linux/vmalloc.h>
#include <linux/zlib.h>
#define COMPR_LEVEL 3
static DEFINE_MUTEX(compr_mutex);
static struct z_stream_s stream;
int logfs_compress(void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
ret = -EIO;
mutex_lock(&compr_mutex);
err = zlib_deflateInit(&stream, COMPR_LEVEL);
if (err != Z_OK)
goto error;
stream.next_in = in;
stream.avail_in = inlen;
stream.total_in = 0;
stream.next_out = out;
stream.avail_out = outlen;
stream.total_out = 0;
err = zlib_deflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto error;
err = zlib_deflateEnd(&stream);
if (err != Z_OK)
goto error;
if (stream.total_out >= stream.total_in)
goto error;
ret = stream.total_out;
error:
mutex_unlock(&compr_mutex);
return ret;
}
int logfs_uncompress(void *in, void *out, size_t inlen, size_t outlen)
{
int err, ret;
ret = -EIO;
mutex_lock(&compr_mutex);
err = zlib_inflateInit(&stream);
if (err != Z_OK)
goto error;
stream.next_in = in;
stream.avail_in = inlen;
stream.total_in = 0;
stream.next_out = out;
stream.avail_out = outlen;
stream.total_out = 0;
err = zlib_inflate(&stream, Z_FINISH);
if (err != Z_STREAM_END)
goto error;
err = zlib_inflateEnd(&stream);
if (err != Z_OK)
goto error;
ret = 0;
error:
mutex_unlock(&compr_mutex);
return ret;
}
int __init logfs_compr_init(void)
{
size_t size = max(zlib_deflate_workspacesize(),
zlib_inflate_workspacesize());
stream.workspace = vmalloc(size);
if (!stream.workspace)
return -ENOMEM;
return 0;
}
void logfs_compr_exit(void)
{
vfree(stream.workspace);
}
| gpl-2.0 |
emwno/android_kernel_konaxx | drivers/media/video/cx23885/cx23885-core.c | 2379 | 60020 | /*
* Driver for the Conexant CX23885 PCIe bridge
*
* Copyright (c) 2006 Steven Toth <stoth@linuxtv.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kmod.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/div64.h>
#include <linux/firmware.h>
#include "cx23885.h"
#include "cimax2.h"
#include "altera-ci.h"
#include "cx23888-ir.h"
#include "cx23885-ir.h"
#include "cx23885-av.h"
#include "cx23885-input.h"
MODULE_DESCRIPTION("Driver for cx23885 based TV cards");
MODULE_AUTHOR("Steven Toth <stoth@linuxtv.org>");
MODULE_LICENSE("GPL");
static unsigned int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "enable debug messages");
static unsigned int card[] = {[0 ... (CX23885_MAXBOARDS - 1)] = UNSET };
module_param_array(card, int, NULL, 0444);
MODULE_PARM_DESC(card, "card type");
#define dprintk(level, fmt, arg...)\
do { if (debug >= level)\
printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\
} while (0)
static unsigned int cx23885_devcount;
#define NO_SYNC_LINE (-1U)
/* FIXME, these allocations will change when
* analog arrives. The be reviewed.
* CX23887 Assumptions
* 1 line = 16 bytes of CDT
* cmds size = 80
* cdt size = 16 * linesize
* iqsize = 64
* maxlines = 6
*
* Address Space:
* 0x00000000 0x00008fff FIFO clusters
* 0x00010000 0x000104af Channel Management Data Structures
* 0x000104b0 0x000104ff Free
* 0x00010500 0x000108bf 15 channels * iqsize
* 0x000108c0 0x000108ff Free
* 0x00010900 0x00010e9f IQ's + Cluster Descriptor Tables
* 15 channels * (iqsize + (maxlines * linesize))
* 0x00010ea0 0x00010xxx Free
*/
static struct sram_channel cx23885_sram_channels[] = {
[SRAM_CH01] = {
.name = "VID A",
.cmds_start = 0x10000,
.ctrl_start = 0x10380,
.cdt = 0x104c0,
.fifo_start = 0x40,
.fifo_size = 0x2800,
.ptr1_reg = DMA1_PTR1,
.ptr2_reg = DMA1_PTR2,
.cnt1_reg = DMA1_CNT1,
.cnt2_reg = DMA1_CNT2,
},
[SRAM_CH02] = {
.name = "ch2",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
.cnt2_reg = DMA2_CNT2,
},
[SRAM_CH03] = {
.name = "TS1 B",
.cmds_start = 0x100A0,
.ctrl_start = 0x10400,
.cdt = 0x10580,
.fifo_start = 0x5000,
.fifo_size = 0x1000,
.ptr1_reg = DMA3_PTR1,
.ptr2_reg = DMA3_PTR2,
.cnt1_reg = DMA3_CNT1,
.cnt2_reg = DMA3_CNT2,
},
[SRAM_CH04] = {
.name = "ch4",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA4_PTR1,
.ptr2_reg = DMA4_PTR2,
.cnt1_reg = DMA4_CNT1,
.cnt2_reg = DMA4_CNT2,
},
[SRAM_CH05] = {
.name = "ch5",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH06] = {
.name = "TS2 C",
.cmds_start = 0x10140,
.ctrl_start = 0x10440,
.cdt = 0x105e0,
.fifo_start = 0x6000,
.fifo_size = 0x1000,
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH07] = {
.name = "ch7",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
.cnt2_reg = DMA6_CNT2,
},
[SRAM_CH08] = {
.name = "ch8",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA7_PTR1,
.ptr2_reg = DMA7_PTR2,
.cnt1_reg = DMA7_CNT1,
.cnt2_reg = DMA7_CNT2,
},
[SRAM_CH09] = {
.name = "ch9",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA8_PTR1,
.ptr2_reg = DMA8_PTR2,
.cnt1_reg = DMA8_CNT1,
.cnt2_reg = DMA8_CNT2,
},
};
static struct sram_channel cx23887_sram_channels[] = {
[SRAM_CH01] = {
.name = "VID A",
.cmds_start = 0x10000,
.ctrl_start = 0x105b0,
.cdt = 0x107b0,
.fifo_start = 0x40,
.fifo_size = 0x2800,
.ptr1_reg = DMA1_PTR1,
.ptr2_reg = DMA1_PTR2,
.cnt1_reg = DMA1_CNT1,
.cnt2_reg = DMA1_CNT2,
},
[SRAM_CH02] = {
.name = "ch2",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA2_PTR1,
.ptr2_reg = DMA2_PTR2,
.cnt1_reg = DMA2_CNT1,
.cnt2_reg = DMA2_CNT2,
},
[SRAM_CH03] = {
.name = "TS1 B",
.cmds_start = 0x100A0,
.ctrl_start = 0x10630,
.cdt = 0x10870,
.fifo_start = 0x5000,
.fifo_size = 0x1000,
.ptr1_reg = DMA3_PTR1,
.ptr2_reg = DMA3_PTR2,
.cnt1_reg = DMA3_CNT1,
.cnt2_reg = DMA3_CNT2,
},
[SRAM_CH04] = {
.name = "ch4",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA4_PTR1,
.ptr2_reg = DMA4_PTR2,
.cnt1_reg = DMA4_CNT1,
.cnt2_reg = DMA4_CNT2,
},
[SRAM_CH05] = {
.name = "ch5",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH06] = {
.name = "TS2 C",
.cmds_start = 0x10140,
.ctrl_start = 0x10670,
.cdt = 0x108d0,
.fifo_start = 0x6000,
.fifo_size = 0x1000,
.ptr1_reg = DMA5_PTR1,
.ptr2_reg = DMA5_PTR2,
.cnt1_reg = DMA5_CNT1,
.cnt2_reg = DMA5_CNT2,
},
[SRAM_CH07] = {
.name = "ch7",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA6_PTR1,
.ptr2_reg = DMA6_PTR2,
.cnt1_reg = DMA6_CNT1,
.cnt2_reg = DMA6_CNT2,
},
[SRAM_CH08] = {
.name = "ch8",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA7_PTR1,
.ptr2_reg = DMA7_PTR2,
.cnt1_reg = DMA7_CNT1,
.cnt2_reg = DMA7_CNT2,
},
[SRAM_CH09] = {
.name = "ch9",
.cmds_start = 0x0,
.ctrl_start = 0x0,
.cdt = 0x0,
.fifo_start = 0x0,
.fifo_size = 0x0,
.ptr1_reg = DMA8_PTR1,
.ptr2_reg = DMA8_PTR2,
.cnt1_reg = DMA8_CNT1,
.cnt2_reg = DMA8_CNT2,
},
};
void cx23885_irq_add(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
dev->pci_irqmask |= mask;
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
}
void cx23885_irq_add_enable(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
dev->pci_irqmask |= mask;
cx_set(PCI_INT_MSK, mask);
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
}
void cx23885_irq_enable(struct cx23885_dev *dev, u32 mask)
{
u32 v;
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
v = mask & dev->pci_irqmask;
if (v)
cx_set(PCI_INT_MSK, v);
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
}
static inline void cx23885_irq_enable_all(struct cx23885_dev *dev)
{
cx23885_irq_enable(dev, 0xffffffff);
}
void cx23885_irq_disable(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
cx_clear(PCI_INT_MSK, mask);
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
}
static inline void cx23885_irq_disable_all(struct cx23885_dev *dev)
{
cx23885_irq_disable(dev, 0xffffffff);
}
void cx23885_irq_remove(struct cx23885_dev *dev, u32 mask)
{
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
dev->pci_irqmask &= ~mask;
cx_clear(PCI_INT_MSK, mask);
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
}
static u32 cx23885_irq_get_mask(struct cx23885_dev *dev)
{
u32 v;
unsigned long flags;
spin_lock_irqsave(&dev->pci_irqmask_lock, flags);
v = cx_read(PCI_INT_MSK);
spin_unlock_irqrestore(&dev->pci_irqmask_lock, flags);
return v;
}
static int cx23885_risc_decode(u32 risc)
{
static char *instr[16] = {
[RISC_SYNC >> 28] = "sync",
[RISC_WRITE >> 28] = "write",
[RISC_WRITEC >> 28] = "writec",
[RISC_READ >> 28] = "read",
[RISC_READC >> 28] = "readc",
[RISC_JUMP >> 28] = "jump",
[RISC_SKIP >> 28] = "skip",
[RISC_WRITERM >> 28] = "writerm",
[RISC_WRITECM >> 28] = "writecm",
[RISC_WRITECR >> 28] = "writecr",
};
static int incr[16] = {
[RISC_WRITE >> 28] = 3,
[RISC_JUMP >> 28] = 3,
[RISC_SKIP >> 28] = 1,
[RISC_SYNC >> 28] = 1,
[RISC_WRITERM >> 28] = 3,
[RISC_WRITECM >> 28] = 3,
[RISC_WRITECR >> 28] = 4,
};
static char *bits[] = {
"12", "13", "14", "resync",
"cnt0", "cnt1", "18", "19",
"20", "21", "22", "23",
"irq1", "irq2", "eol", "sol",
};
int i;
printk("0x%08x [ %s", risc,
instr[risc >> 28] ? instr[risc >> 28] : "INVALID");
for (i = ARRAY_SIZE(bits) - 1; i >= 0; i--)
if (risc & (1 << (i + 12)))
printk(" %s", bits[i]);
printk(" count=%d ]\n", risc & 0xfff);
return incr[risc >> 28] ? incr[risc >> 28] : 1;
}
void cx23885_wakeup(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q, u32 count)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf;
int bc;
for (bc = 0;; bc++) {
if (list_empty(&q->active))
break;
buf = list_entry(q->active.next,
struct cx23885_buffer, vb.queue);
/* count comes from the hw and is is 16bit wide --
* this trick handles wrap-arounds correctly for
* up to 32767 buffers in flight... */
if ((s16) (count - buf->count) < 0)
break;
do_gettimeofday(&buf->vb.ts);
dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.i,
count, buf->count);
buf->vb.state = VIDEOBUF_DONE;
list_del(&buf->vb.queue);
wake_up(&buf->vb.done);
}
if (list_empty(&q->active))
del_timer(&q->timeout);
else
mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
if (bc != 1)
printk(KERN_WARNING "%s: %d buffers handled (should be 1)\n",
__func__, bc);
}
int cx23885_sram_channel_setup(struct cx23885_dev *dev,
struct sram_channel *ch,
unsigned int bpl, u32 risc)
{
unsigned int i, lines;
u32 cdt;
if (ch->cmds_start == 0) {
dprintk(1, "%s() Erasing channel [%s]\n", __func__,
ch->name);
cx_write(ch->ptr1_reg, 0);
cx_write(ch->ptr2_reg, 0);
cx_write(ch->cnt2_reg, 0);
cx_write(ch->cnt1_reg, 0);
return 0;
} else {
dprintk(1, "%s() Configuring channel [%s]\n", __func__,
ch->name);
}
bpl = (bpl + 7) & ~7; /* alignment */
cdt = ch->cdt;
lines = ch->fifo_size / bpl;
if (lines > 6)
lines = 6;
BUG_ON(lines < 2);
cx_write(8 + 0, RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
cx_write(8 + 4, 8);
cx_write(8 + 8, 0);
/* write CDT */
for (i = 0; i < lines; i++) {
dprintk(2, "%s() 0x%08x <- 0x%08x\n", __func__, cdt + 16*i,
ch->fifo_start + bpl*i);
cx_write(cdt + 16*i, ch->fifo_start + bpl*i);
cx_write(cdt + 16*i + 4, 0);
cx_write(cdt + 16*i + 8, 0);
cx_write(cdt + 16*i + 12, 0);
}
/* write CMDS */
if (ch->jumponly)
cx_write(ch->cmds_start + 0, 8);
else
cx_write(ch->cmds_start + 0, risc);
cx_write(ch->cmds_start + 4, 0); /* 64 bits 63-32 */
cx_write(ch->cmds_start + 8, cdt);
cx_write(ch->cmds_start + 12, (lines*16) >> 3);
cx_write(ch->cmds_start + 16, ch->ctrl_start);
if (ch->jumponly)
cx_write(ch->cmds_start + 20, 0x80000000 | (64 >> 2));
else
cx_write(ch->cmds_start + 20, 64 >> 2);
for (i = 24; i < 80; i += 4)
cx_write(ch->cmds_start + i, 0);
/* fill registers */
cx_write(ch->ptr1_reg, ch->fifo_start);
cx_write(ch->ptr2_reg, cdt);
cx_write(ch->cnt2_reg, (lines*16) >> 3);
cx_write(ch->cnt1_reg, (bpl >> 3) - 1);
dprintk(2, "[bridge %d] sram setup %s: bpl=%d lines=%d\n",
dev->bridge,
ch->name,
bpl,
lines);
return 0;
}
void cx23885_sram_channel_dump(struct cx23885_dev *dev,
struct sram_channel *ch)
{
static char *name[] = {
"init risc lo",
"init risc hi",
"cdt base",
"cdt size",
"iq base",
"iq size",
"risc pc lo",
"risc pc hi",
"iq wr ptr",
"iq rd ptr",
"cdt current",
"pci target lo",
"pci target hi",
"line / byte",
};
u32 risc;
unsigned int i, j, n;
printk(KERN_WARNING "%s: %s - dma channel status dump\n",
dev->name, ch->name);
for (i = 0; i < ARRAY_SIZE(name); i++)
printk(KERN_WARNING "%s: cmds: %-15s: 0x%08x\n",
dev->name, name[i],
cx_read(ch->cmds_start + 4*i));
for (i = 0; i < 4; i++) {
risc = cx_read(ch->cmds_start + 4 * (i + 14));
printk(KERN_WARNING "%s: risc%d: ", dev->name, i);
cx23885_risc_decode(risc);
}
for (i = 0; i < (64 >> 2); i += n) {
risc = cx_read(ch->ctrl_start + 4 * i);
/* No consideration for bits 63-32 */
printk(KERN_WARNING "%s: (0x%08x) iq %x: ", dev->name,
ch->ctrl_start + 4 * i, i);
n = cx23885_risc_decode(risc);
for (j = 1; j < n; j++) {
risc = cx_read(ch->ctrl_start + 4 * (i + j));
printk(KERN_WARNING "%s: iq %x: 0x%08x [ arg #%d ]\n",
dev->name, i+j, risc, j);
}
}
printk(KERN_WARNING "%s: fifo: 0x%08x -> 0x%x\n",
dev->name, ch->fifo_start, ch->fifo_start+ch->fifo_size);
printk(KERN_WARNING "%s: ctrl: 0x%08x -> 0x%x\n",
dev->name, ch->ctrl_start, ch->ctrl_start + 6*16);
printk(KERN_WARNING "%s: ptr1_reg: 0x%08x\n",
dev->name, cx_read(ch->ptr1_reg));
printk(KERN_WARNING "%s: ptr2_reg: 0x%08x\n",
dev->name, cx_read(ch->ptr2_reg));
printk(KERN_WARNING "%s: cnt1_reg: 0x%08x\n",
dev->name, cx_read(ch->cnt1_reg));
printk(KERN_WARNING "%s: cnt2_reg: 0x%08x\n",
dev->name, cx_read(ch->cnt2_reg));
}
static void cx23885_risc_disasm(struct cx23885_tsport *port,
struct btcx_riscmem *risc)
{
struct cx23885_dev *dev = port->dev;
unsigned int i, j, n;
printk(KERN_INFO "%s: risc disasm: %p [dma=0x%08lx]\n",
dev->name, risc->cpu, (unsigned long)risc->dma);
for (i = 0; i < (risc->size >> 2); i += n) {
printk(KERN_INFO "%s: %04d: ", dev->name, i);
n = cx23885_risc_decode(le32_to_cpu(risc->cpu[i]));
for (j = 1; j < n; j++)
printk(KERN_INFO "%s: %04d: 0x%08x [ arg #%d ]\n",
dev->name, i + j, risc->cpu[i + j], j);
if (risc->cpu[i] == cpu_to_le32(RISC_JUMP))
break;
}
}
static void cx23885_shutdown(struct cx23885_dev *dev)
{
/* disable RISC controller */
cx_write(DEV_CNTRL2, 0);
/* Disable all IR activity */
cx_write(IR_CNTRL_REG, 0);
/* Disable Video A/B activity */
cx_write(VID_A_DMA_CTL, 0);
cx_write(VID_B_DMA_CTL, 0);
cx_write(VID_C_DMA_CTL, 0);
/* Disable Audio activity */
cx_write(AUD_INT_DMA_CTL, 0);
cx_write(AUD_EXT_DMA_CTL, 0);
/* Disable Serial port */
cx_write(UART_CTL, 0);
/* Disable Interrupts */
cx23885_irq_disable_all(dev);
cx_write(VID_A_INT_MSK, 0);
cx_write(VID_B_INT_MSK, 0);
cx_write(VID_C_INT_MSK, 0);
cx_write(AUDIO_INT_INT_MSK, 0);
cx_write(AUDIO_EXT_INT_MSK, 0);
}
static void cx23885_reset(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
cx23885_shutdown(dev);
cx_write(PCI_INT_STAT, 0xffffffff);
cx_write(VID_A_INT_STAT, 0xffffffff);
cx_write(VID_B_INT_STAT, 0xffffffff);
cx_write(VID_C_INT_STAT, 0xffffffff);
cx_write(AUDIO_INT_INT_STAT, 0xffffffff);
cx_write(AUDIO_EXT_INT_STAT, 0xffffffff);
cx_write(CLK_DELAY, cx_read(CLK_DELAY) & 0x80000000);
cx_write(PAD_CTRL, 0x00500300);
mdelay(100);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH01],
720*4, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], 128, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH03],
188*4, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH04], 128, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH05], 128, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH06],
188*4, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH07], 128, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH08], 128, 0);
cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH09], 128, 0);
cx23885_gpio_setup(dev);
}
static int cx23885_pci_quirks(struct cx23885_dev *dev)
{
dprintk(1, "%s()\n", __func__);
/* The cx23885 bridge has a weird bug which causes NMI to be asserted
* when DMA begins if RDR_TLCTL0 bit4 is not cleared. It does not
* occur on the cx23887 bridge.
*/
if (dev->bridge == CX23885_BRIDGE_885)
cx_clear(RDR_TLCTL0, 1 << 4);
return 0;
}
static int get_resources(struct cx23885_dev *dev)
{
if (request_mem_region(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0),
dev->name))
return 0;
printk(KERN_ERR "%s: can't get MMIO memory @ 0x%llx\n",
dev->name, (unsigned long long)pci_resource_start(dev->pci, 0));
return -EBUSY;
}
static void cx23885_timeout(unsigned long data);
int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value);
static int cx23885_init_tsport(struct cx23885_dev *dev,
struct cx23885_tsport *port, int portno)
{
dprintk(1, "%s(portno=%d)\n", __func__, portno);
/* Transport bus init dma queue - Common settings */
port->dma_ctl_val = 0x11; /* Enable RISC controller and Fifo */
port->ts_int_msk_val = 0x1111; /* TS port bits for RISC */
port->vld_misc_val = 0x0;
port->hw_sop_ctrl_val = (0x47 << 16 | 188 << 4);
spin_lock_init(&port->slock);
port->dev = dev;
port->nr = portno;
INIT_LIST_HEAD(&port->mpegq.active);
INIT_LIST_HEAD(&port->mpegq.queued);
port->mpegq.timeout.function = cx23885_timeout;
port->mpegq.timeout.data = (unsigned long)port;
init_timer(&port->mpegq.timeout);
mutex_init(&port->frontends.lock);
INIT_LIST_HEAD(&port->frontends.felist);
port->frontends.active_fe_id = 0;
/* This should be hardcoded allow a single frontend
* attachment to this tsport, keeping the -dvb.c
* code clean and safe.
*/
if (!port->num_frontends)
port->num_frontends = 1;
switch (portno) {
case 1:
port->reg_gpcnt = VID_B_GPCNT;
port->reg_gpcnt_ctl = VID_B_GPCNT_CTL;
port->reg_dma_ctl = VID_B_DMA_CTL;
port->reg_lngth = VID_B_LNGTH;
port->reg_hw_sop_ctrl = VID_B_HW_SOP_CTL;
port->reg_gen_ctrl = VID_B_GEN_CTL;
port->reg_bd_pkt_status = VID_B_BD_PKT_STATUS;
port->reg_sop_status = VID_B_SOP_STATUS;
port->reg_fifo_ovfl_stat = VID_B_FIFO_OVFL_STAT;
port->reg_vld_misc = VID_B_VLD_MISC;
port->reg_ts_clk_en = VID_B_TS_CLK_EN;
port->reg_src_sel = VID_B_SRC_SEL;
port->reg_ts_int_msk = VID_B_INT_MSK;
port->reg_ts_int_stat = VID_B_INT_STAT;
port->sram_chno = SRAM_CH03; /* VID_B */
port->pci_irqmask = 0x02; /* VID_B bit1 */
break;
case 2:
port->reg_gpcnt = VID_C_GPCNT;
port->reg_gpcnt_ctl = VID_C_GPCNT_CTL;
port->reg_dma_ctl = VID_C_DMA_CTL;
port->reg_lngth = VID_C_LNGTH;
port->reg_hw_sop_ctrl = VID_C_HW_SOP_CTL;
port->reg_gen_ctrl = VID_C_GEN_CTL;
port->reg_bd_pkt_status = VID_C_BD_PKT_STATUS;
port->reg_sop_status = VID_C_SOP_STATUS;
port->reg_fifo_ovfl_stat = VID_C_FIFO_OVFL_STAT;
port->reg_vld_misc = VID_C_VLD_MISC;
port->reg_ts_clk_en = VID_C_TS_CLK_EN;
port->reg_src_sel = 0;
port->reg_ts_int_msk = VID_C_INT_MSK;
port->reg_ts_int_stat = VID_C_INT_STAT;
port->sram_chno = SRAM_CH06; /* VID_C */
port->pci_irqmask = 0x04; /* VID_C bit2 */
break;
default:
BUG();
}
cx23885_risc_stopper(dev->pci, &port->mpegq.stopper,
port->reg_dma_ctl, port->dma_ctl_val, 0x00);
return 0;
}
static void cx23885_dev_checkrevision(struct cx23885_dev *dev)
{
switch (cx_read(RDR_CFG2) & 0xff) {
case 0x00:
/* cx23885 */
dev->hwrevision = 0xa0;
break;
case 0x01:
/* CX23885-12Z */
dev->hwrevision = 0xa1;
break;
case 0x02:
/* CX23885-13Z/14Z */
dev->hwrevision = 0xb0;
break;
case 0x03:
if (dev->pci->device == 0x8880) {
/* CX23888-21Z/22Z */
dev->hwrevision = 0xc0;
} else {
/* CX23885-14Z */
dev->hwrevision = 0xa4;
}
break;
case 0x04:
if (dev->pci->device == 0x8880) {
/* CX23888-31Z */
dev->hwrevision = 0xd0;
} else {
/* CX23885-15Z, CX23888-31Z */
dev->hwrevision = 0xa5;
}
break;
case 0x0e:
/* CX23887-15Z */
dev->hwrevision = 0xc0;
break;
case 0x0f:
/* CX23887-14Z */
dev->hwrevision = 0xb1;
break;
default:
printk(KERN_ERR "%s() New hardware revision found 0x%x\n",
__func__, dev->hwrevision);
}
if (dev->hwrevision)
printk(KERN_INFO "%s() Hardware revision = 0x%02x\n",
__func__, dev->hwrevision);
else
printk(KERN_ERR "%s() Hardware revision unknown 0x%x\n",
__func__, dev->hwrevision);
}
/* Find the first v4l2_subdev member of the group id in hw */
struct v4l2_subdev *cx23885_find_hw(struct cx23885_dev *dev, u32 hw)
{
struct v4l2_subdev *result = NULL;
struct v4l2_subdev *sd;
spin_lock(&dev->v4l2_dev.lock);
v4l2_device_for_each_subdev(sd, &dev->v4l2_dev) {
if (sd->grp_id == hw) {
result = sd;
break;
}
}
spin_unlock(&dev->v4l2_dev.lock);
return result;
}
static int cx23885_dev_setup(struct cx23885_dev *dev)
{
int i;
spin_lock_init(&dev->pci_irqmask_lock);
mutex_init(&dev->lock);
mutex_init(&dev->gpio_lock);
atomic_inc(&dev->refcount);
dev->nr = cx23885_devcount++;
sprintf(dev->name, "cx23885[%d]", dev->nr);
/* Configure the internal memory */
if (dev->pci->device == 0x8880) {
/* Could be 887 or 888, assume a default */
dev->bridge = CX23885_BRIDGE_887;
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 25000000;
dev->sram_channels = cx23887_sram_channels;
} else
if (dev->pci->device == 0x8852) {
dev->bridge = CX23885_BRIDGE_885;
/* Apply a sensible clock frequency for the PCIe bridge */
dev->clk_freq = 28000000;
dev->sram_channels = cx23885_sram_channels;
} else
BUG();
dprintk(1, "%s() Memory configured for PCIe bridge type %d\n",
__func__, dev->bridge);
/* board config */
dev->board = UNSET;
if (card[dev->nr] < cx23885_bcount)
dev->board = card[dev->nr];
for (i = 0; UNSET == dev->board && i < cx23885_idcount; i++)
if (dev->pci->subsystem_vendor == cx23885_subids[i].subvendor &&
dev->pci->subsystem_device == cx23885_subids[i].subdevice)
dev->board = cx23885_subids[i].card;
if (UNSET == dev->board) {
dev->board = CX23885_BOARD_UNKNOWN;
cx23885_card_list(dev);
}
/* If the user specific a clk freq override, apply it */
if (cx23885_boards[dev->board].clk_freq > 0)
dev->clk_freq = cx23885_boards[dev->board].clk_freq;
dev->pci_bus = dev->pci->bus->number;
dev->pci_slot = PCI_SLOT(dev->pci->devfn);
cx23885_irq_add(dev, 0x001f00);
/* External Master 1 Bus */
dev->i2c_bus[0].nr = 0;
dev->i2c_bus[0].dev = dev;
dev->i2c_bus[0].reg_stat = I2C1_STAT;
dev->i2c_bus[0].reg_ctrl = I2C1_CTRL;
dev->i2c_bus[0].reg_addr = I2C1_ADDR;
dev->i2c_bus[0].reg_rdata = I2C1_RDATA;
dev->i2c_bus[0].reg_wdata = I2C1_WDATA;
dev->i2c_bus[0].i2c_period = (0x9d << 24); /* 100kHz */
/* External Master 2 Bus */
dev->i2c_bus[1].nr = 1;
dev->i2c_bus[1].dev = dev;
dev->i2c_bus[1].reg_stat = I2C2_STAT;
dev->i2c_bus[1].reg_ctrl = I2C2_CTRL;
dev->i2c_bus[1].reg_addr = I2C2_ADDR;
dev->i2c_bus[1].reg_rdata = I2C2_RDATA;
dev->i2c_bus[1].reg_wdata = I2C2_WDATA;
dev->i2c_bus[1].i2c_period = (0x9d << 24); /* 100kHz */
/* Internal Master 3 Bus */
dev->i2c_bus[2].nr = 2;
dev->i2c_bus[2].dev = dev;
dev->i2c_bus[2].reg_stat = I2C3_STAT;
dev->i2c_bus[2].reg_ctrl = I2C3_CTRL;
dev->i2c_bus[2].reg_addr = I2C3_ADDR;
dev->i2c_bus[2].reg_rdata = I2C3_RDATA;
dev->i2c_bus[2].reg_wdata = I2C3_WDATA;
dev->i2c_bus[2].i2c_period = (0x07 << 24); /* 1.95MHz */
if ((cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) ||
(cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER))
cx23885_init_tsport(dev, &dev->ts1, 1);
if ((cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) ||
(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
cx23885_init_tsport(dev, &dev->ts2, 2);
if (get_resources(dev) < 0) {
printk(KERN_ERR "CORE %s No more PCIe resources for "
"subsystem: %04x:%04x\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device);
cx23885_devcount--;
return -ENODEV;
}
/* PCIe stuff */
dev->lmmio = ioremap(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0));
dev->bmmio = (u8 __iomem *)dev->lmmio;
printk(KERN_INFO "CORE %s: subsystem: %04x:%04x, board: %s [card=%d,%s]\n",
dev->name, dev->pci->subsystem_vendor,
dev->pci->subsystem_device, cx23885_boards[dev->board].name,
dev->board, card[dev->nr] == dev->board ?
"insmod option" : "autodetected");
cx23885_pci_quirks(dev);
/* Assume some sensible defaults */
dev->tuner_type = cx23885_boards[dev->board].tuner_type;
dev->tuner_addr = cx23885_boards[dev->board].tuner_addr;
dev->tuner_bus = cx23885_boards[dev->board].tuner_bus;
dev->radio_type = cx23885_boards[dev->board].radio_type;
dev->radio_addr = cx23885_boards[dev->board].radio_addr;
dprintk(1, "%s() tuner_type = 0x%x tuner_addr = 0x%x tuner_bus = %d\n",
__func__, dev->tuner_type, dev->tuner_addr, dev->tuner_bus);
dprintk(1, "%s() radio_type = 0x%x radio_addr = 0x%x\n",
__func__, dev->radio_type, dev->radio_addr);
/* The cx23417 encoder has GPIO's that need to be initialised
* before DVB, so that demodulators and tuners are out of
* reset before DVB uses them.
*/
if ((cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) ||
(cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER))
cx23885_mc417_init(dev);
/* init hardware */
cx23885_reset(dev);
cx23885_i2c_register(&dev->i2c_bus[0]);
cx23885_i2c_register(&dev->i2c_bus[1]);
cx23885_i2c_register(&dev->i2c_bus[2]);
cx23885_card_setup(dev);
call_all(dev, core, s_power, 0);
cx23885_ir_init(dev);
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO) {
if (cx23885_video_register(dev) < 0) {
printk(KERN_ERR "%s() Failed to register analog "
"video adapters on VID_A\n", __func__);
}
}
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
if (cx23885_boards[dev->board].num_fds_portb)
dev->ts1.num_frontends =
cx23885_boards[dev->board].num_fds_portb;
if (cx23885_dvb_register(&dev->ts1) < 0) {
printk(KERN_ERR "%s() Failed to register dvb adapters on VID_B\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
printk(KERN_ERR
"%s() Failed to register 417 on VID_B\n",
__func__);
}
}
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
if (cx23885_boards[dev->board].num_fds_portc)
dev->ts2.num_frontends =
cx23885_boards[dev->board].num_fds_portc;
if (cx23885_dvb_register(&dev->ts2) < 0) {
printk(KERN_ERR
"%s() Failed to register dvb on VID_C\n",
__func__);
}
} else
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER) {
if (cx23885_417_register(dev) < 0) {
printk(KERN_ERR
"%s() Failed to register 417 on VID_C\n",
__func__);
}
}
cx23885_dev_checkrevision(dev);
/* disable MSI for NetUP cards, otherwise CI is not working */
if (cx23885_boards[dev->board].ci_type > 0)
cx_clear(RDR_RDRCTL1, 1 << 8);
return 0;
}
static void cx23885_dev_unregister(struct cx23885_dev *dev)
{
release_mem_region(pci_resource_start(dev->pci, 0),
pci_resource_len(dev->pci, 0));
if (!atomic_dec_and_test(&dev->refcount))
return;
if (cx23885_boards[dev->board].porta == CX23885_ANALOG_VIDEO)
cx23885_video_unregister(dev);
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
cx23885_dvb_unregister(&dev->ts1);
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
cx23885_417_unregister(dev);
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
cx23885_dvb_unregister(&dev->ts2);
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
cx23885_417_unregister(dev);
cx23885_i2c_unregister(&dev->i2c_bus[2]);
cx23885_i2c_unregister(&dev->i2c_bus[1]);
cx23885_i2c_unregister(&dev->i2c_bus[0]);
iounmap(dev->lmmio);
}
static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
unsigned int offset, u32 sync_line,
unsigned int bpl, unsigned int padding,
unsigned int lines)
{
struct scatterlist *sg;
unsigned int line, todo;
/* sync instruction */
if (sync_line != NO_SYNC_LINE)
*(rp++) = cpu_to_le32(RISC_RESYNC | sync_line);
/* scan lines */
sg = sglist;
for (line = 0; line < lines; line++) {
while (offset && offset >= sg_dma_len(sg)) {
offset -= sg_dma_len(sg);
sg++;
}
if (bpl <= sg_dma_len(sg)-offset) {
/* fits into current chunk */
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|RISC_EOL|bpl);
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += bpl;
} else {
/* scanline needs to be split */
todo = bpl;
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_SOL|
(sg_dma_len(sg)-offset));
*(rp++) = cpu_to_le32(sg_dma_address(sg)+offset);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= (sg_dma_len(sg)-offset);
offset = 0;
sg++;
while (todo > sg_dma_len(sg)) {
*(rp++) = cpu_to_le32(RISC_WRITE|
sg_dma_len(sg));
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
todo -= sg_dma_len(sg);
sg++;
}
*(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
*(rp++) = cpu_to_le32(sg_dma_address(sg));
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
offset += todo;
}
offset += padding;
}
return rp;
}
int cx23885_risc_buffer(struct pci_dev *pci, struct btcx_riscmem *risc,
struct scatterlist *sglist, unsigned int top_offset,
unsigned int bottom_offset, unsigned int bpl,
unsigned int padding, unsigned int lines)
{
u32 instructions, fields;
__le32 *rp;
int rc;
fields = 0;
if (UNSET != top_offset)
fields++;
if (UNSET != bottom_offset)
fields++;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Padding
can cause next bpl to start close to a page border. First DMA
region may be smaller than PAGE_SIZE */
/* write and jump need and extra dword */
instructions = fields * (1 + ((bpl + padding) * lines)
/ PAGE_SIZE + lines);
instructions += 2;
rc = btcx_riscmem_alloc(pci, risc, instructions*12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
if (UNSET != top_offset)
rp = cx23885_risc_field(rp, sglist, top_offset, 0,
bpl, padding, lines);
if (UNSET != bottom_offset)
rp = cx23885_risc_field(rp, sglist, bottom_offset, 0x200,
bpl, padding, lines);
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
static int cx23885_risc_databuffer(struct pci_dev *pci,
struct btcx_riscmem *risc,
struct scatterlist *sglist,
unsigned int bpl,
unsigned int lines)
{
u32 instructions;
__le32 *rp;
int rc;
/* estimate risc mem: worst case is one write per page border +
one write per scan line + syncs + jump (all 2 dwords). Here
there is no padding and no sync. First DMA region may be smaller
than PAGE_SIZE */
/* Jump and write need an extra dword */
instructions = 1 + (bpl * lines) / PAGE_SIZE + lines;
instructions += 1;
rc = btcx_riscmem_alloc(pci, risc, instructions*12);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
rp = cx23885_risc_field(rp, sglist, 0, NO_SYNC_LINE, bpl, 0, lines);
/* save pointer to jmp instruction address */
risc->jmp = rp;
BUG_ON((risc->jmp - risc->cpu + 2) * sizeof(*risc->cpu) > risc->size);
return 0;
}
int cx23885_risc_stopper(struct pci_dev *pci, struct btcx_riscmem *risc,
u32 reg, u32 mask, u32 value)
{
__le32 *rp;
int rc;
rc = btcx_riscmem_alloc(pci, risc, 4*16);
if (rc < 0)
return rc;
/* write risc instructions */
rp = risc->cpu;
*(rp++) = cpu_to_le32(RISC_WRITECR | RISC_IRQ2);
*(rp++) = cpu_to_le32(reg);
*(rp++) = cpu_to_le32(value);
*(rp++) = cpu_to_le32(mask);
*(rp++) = cpu_to_le32(RISC_JUMP);
*(rp++) = cpu_to_le32(risc->dma);
*(rp++) = cpu_to_le32(0); /* bits 63-32 */
return 0;
}
void cx23885_free_buffer(struct videobuf_queue *q, struct cx23885_buffer *buf)
{
struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb);
BUG_ON(in_interrupt());
videobuf_waiton(q, &buf->vb, 0, 0);
videobuf_dma_unmap(q->dev, dma);
videobuf_dma_free(dma);
btcx_riscmem_free(to_pci_dev(q->dev), &buf->risc);
buf->vb.state = VIDEOBUF_NEEDS_INIT;
}
static void cx23885_tsport_reg_dump(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
dprintk(1, "%s() Register Dump\n", __func__);
dprintk(1, "%s() DEV_CNTRL2 0x%08X\n", __func__,
cx_read(DEV_CNTRL2));
dprintk(1, "%s() PCI_INT_MSK 0x%08X\n", __func__,
cx23885_irq_get_mask(dev));
dprintk(1, "%s() AUD_INT_INT_MSK 0x%08X\n", __func__,
cx_read(AUDIO_INT_INT_MSK));
dprintk(1, "%s() AUD_INT_DMA_CTL 0x%08X\n", __func__,
cx_read(AUD_INT_DMA_CTL));
dprintk(1, "%s() AUD_EXT_INT_MSK 0x%08X\n", __func__,
cx_read(AUDIO_EXT_INT_MSK));
dprintk(1, "%s() AUD_EXT_DMA_CTL 0x%08X\n", __func__,
cx_read(AUD_EXT_DMA_CTL));
dprintk(1, "%s() PAD_CTRL 0x%08X\n", __func__,
cx_read(PAD_CTRL));
dprintk(1, "%s() ALT_PIN_OUT_SEL 0x%08X\n", __func__,
cx_read(ALT_PIN_OUT_SEL));
dprintk(1, "%s() GPIO2 0x%08X\n", __func__,
cx_read(GPIO2));
dprintk(1, "%s() gpcnt(0x%08X) 0x%08X\n", __func__,
port->reg_gpcnt, cx_read(port->reg_gpcnt));
dprintk(1, "%s() gpcnt_ctl(0x%08X) 0x%08x\n", __func__,
port->reg_gpcnt_ctl, cx_read(port->reg_gpcnt_ctl));
dprintk(1, "%s() dma_ctl(0x%08X) 0x%08x\n", __func__,
port->reg_dma_ctl, cx_read(port->reg_dma_ctl));
if (port->reg_src_sel)
dprintk(1, "%s() src_sel(0x%08X) 0x%08x\n", __func__,
port->reg_src_sel, cx_read(port->reg_src_sel));
dprintk(1, "%s() lngth(0x%08X) 0x%08x\n", __func__,
port->reg_lngth, cx_read(port->reg_lngth));
dprintk(1, "%s() hw_sop_ctrl(0x%08X) 0x%08x\n", __func__,
port->reg_hw_sop_ctrl, cx_read(port->reg_hw_sop_ctrl));
dprintk(1, "%s() gen_ctrl(0x%08X) 0x%08x\n", __func__,
port->reg_gen_ctrl, cx_read(port->reg_gen_ctrl));
dprintk(1, "%s() bd_pkt_status(0x%08X) 0x%08x\n", __func__,
port->reg_bd_pkt_status, cx_read(port->reg_bd_pkt_status));
dprintk(1, "%s() sop_status(0x%08X) 0x%08x\n", __func__,
port->reg_sop_status, cx_read(port->reg_sop_status));
dprintk(1, "%s() fifo_ovfl_stat(0x%08X) 0x%08x\n", __func__,
port->reg_fifo_ovfl_stat, cx_read(port->reg_fifo_ovfl_stat));
dprintk(1, "%s() vld_misc(0x%08X) 0x%08x\n", __func__,
port->reg_vld_misc, cx_read(port->reg_vld_misc));
dprintk(1, "%s() ts_clk_en(0x%08X) 0x%08x\n", __func__,
port->reg_ts_clk_en, cx_read(port->reg_ts_clk_en));
dprintk(1, "%s() ts_int_msk(0x%08X) 0x%08x\n", __func__,
port->reg_ts_int_msk, cx_read(port->reg_ts_int_msk));
}
static int cx23885_start_dma(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q,
struct cx23885_buffer *buf)
{
struct cx23885_dev *dev = port->dev;
u32 reg;
dprintk(1, "%s() w: %d, h: %d, f: %d\n", __func__,
buf->vb.width, buf->vb.height, buf->vb.field);
/* Stop the fifo and risc engine for this port */
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
/* setup fifo + format */
cx23885_sram_channel_setup(dev,
&dev->sram_channels[port->sram_chno],
port->ts_packet_size, buf->risc.dma);
if (debug > 5) {
cx23885_sram_channel_dump(dev,
&dev->sram_channels[port->sram_chno]);
cx23885_risc_disasm(port, &buf->risc);
}
/* write TS length to chip */
cx_write(port->reg_lngth, buf->vb.width);
if ((!(cx23885_boards[dev->board].portb & CX23885_MPEG_DVB)) &&
(!(cx23885_boards[dev->board].portc & CX23885_MPEG_DVB))) {
printk("%s() Unsupported .portb/c (0x%08x)/(0x%08x)\n",
__func__,
cx23885_boards[dev->board].portb,
cx23885_boards[dev->board].portc);
return -EINVAL;
}
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
cx23885_av_clk(dev, 0);
udelay(100);
/* If the port supports SRC SELECT, configure it */
if (port->reg_src_sel)
cx_write(port->reg_src_sel, port->src_sel_val);
cx_write(port->reg_hw_sop_ctrl, port->hw_sop_ctrl_val);
cx_write(port->reg_ts_clk_en, port->ts_clk_en_val);
cx_write(port->reg_vld_misc, port->vld_misc_val);
cx_write(port->reg_gen_ctrl, port->gen_ctrl_val);
udelay(100);
/* NOTE: this is 2 (reserved) for portb, does it matter? */
/* reset counter to zero */
cx_write(port->reg_gpcnt_ctl, 3);
q->count = 1;
/* Set VIDB pins to input */
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB) {
reg = cx_read(PAD_CTRL);
reg &= ~0x3; /* Clear TS1_OE & TS1_SOP_OE */
cx_write(PAD_CTRL, reg);
}
/* Set VIDC pins to input */
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB) {
reg = cx_read(PAD_CTRL);
reg &= ~0x4; /* Clear TS2_SOP_OE */
cx_write(PAD_CTRL, reg);
}
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
reg = cx_read(PAD_CTRL);
reg = reg & ~0x1; /* Clear TS1_OE */
/* FIXME, bit 2 writing here is questionable */
/* set TS1_SOP_OE and TS1_OE_HI */
reg = reg | 0xa;
cx_write(PAD_CTRL, reg);
/* FIXME and these two registers should be documented. */
cx_write(CLK_DELAY, cx_read(CLK_DELAY) | 0x80000011);
cx_write(ALT_PIN_OUT_SEL, 0x10100045);
}
switch (dev->bridge) {
case CX23885_BRIDGE_885:
case CX23885_BRIDGE_887:
case CX23885_BRIDGE_888:
/* enable irqs */
dprintk(1, "%s() enabling TS int's and DMA\n", __func__);
cx_set(port->reg_ts_int_msk, port->ts_int_msk_val);
cx_set(port->reg_dma_ctl, port->dma_ctl_val);
cx23885_irq_add(dev, port->pci_irqmask);
cx23885_irq_enable_all(dev);
break;
default:
BUG();
}
cx_set(DEV_CNTRL2, (1<<5)); /* Enable RISC controller */
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
cx23885_av_clk(dev, 1);
if (debug > 4)
cx23885_tsport_reg_dump(port);
return 0;
}
static int cx23885_stop_dma(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
u32 reg;
dprintk(1, "%s()\n", __func__);
/* Stop interrupts and DMA */
cx_clear(port->reg_ts_int_msk, port->ts_int_msk_val);
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER) {
reg = cx_read(PAD_CTRL);
/* Set TS1_OE */
reg = reg | 0x1;
/* clear TS1_SOP_OE and TS1_OE_HI */
reg = reg & ~0xa;
cx_write(PAD_CTRL, reg);
cx_write(port->reg_src_sel, 0);
cx_write(port->reg_gen_ctrl, 8);
}
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
cx23885_av_clk(dev, 0);
return 0;
}
int cx23885_restart_queue(struct cx23885_tsport *port,
struct cx23885_dmaqueue *q)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_buffer *buf;
dprintk(5, "%s()\n", __func__);
if (list_empty(&q->active)) {
struct cx23885_buffer *prev;
prev = NULL;
dprintk(5, "%s() queue is empty\n", __func__);
for (;;) {
if (list_empty(&q->queued))
return 0;
buf = list_entry(q->queued.next, struct cx23885_buffer,
vb.queue);
if (NULL == prev) {
list_del(&buf->vb.queue);
list_add_tail(&buf->vb.queue, &q->active);
cx23885_start_dma(port, q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT);
dprintk(5, "[%p/%d] restart_queue - f/active\n",
buf, buf->vb.i);
} else if (prev->vb.width == buf->vb.width &&
prev->vb.height == buf->vb.height &&
prev->fmt == buf->fmt) {
list_del(&buf->vb.queue);
list_add_tail(&buf->vb.queue, &q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
/* 64 bit bits 63-32 */
prev->risc.jmp[2] = cpu_to_le32(0);
dprintk(5, "[%p/%d] restart_queue - m/active\n",
buf, buf->vb.i);
} else {
return 0;
}
prev = buf;
}
return 0;
}
buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue);
dprintk(2, "restart_queue [%p/%d]: restart dma\n",
buf, buf->vb.i);
cx23885_start_dma(port, q, buf);
list_for_each_entry(buf, &q->active, vb.queue)
buf->count = q->count++;
mod_timer(&q->timeout, jiffies + BUFFER_TIMEOUT);
return 0;
}
/* ------------------------------------------------------------------ */
int cx23885_buf_prepare(struct videobuf_queue *q, struct cx23885_tsport *port,
struct cx23885_buffer *buf, enum v4l2_field field)
{
struct cx23885_dev *dev = port->dev;
int size = port->ts_packet_size * port->ts_packet_count;
int rc;
dprintk(1, "%s: %p\n", __func__, buf);
if (0 != buf->vb.baddr && buf->vb.bsize < size)
return -EINVAL;
if (VIDEOBUF_NEEDS_INIT == buf->vb.state) {
buf->vb.width = port->ts_packet_size;
buf->vb.height = port->ts_packet_count;
buf->vb.size = size;
buf->vb.field = field /*V4L2_FIELD_TOP*/;
rc = videobuf_iolock(q, &buf->vb, NULL);
if (0 != rc)
goto fail;
cx23885_risc_databuffer(dev->pci, &buf->risc,
videobuf_to_dma(&buf->vb)->sglist,
buf->vb.width, buf->vb.height);
}
buf->vb.state = VIDEOBUF_PREPARED;
return 0;
fail:
cx23885_free_buffer(q, buf);
return rc;
}
void cx23885_buf_queue(struct cx23885_tsport *port, struct cx23885_buffer *buf)
{
struct cx23885_buffer *prev;
struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *cx88q = &port->mpegq;
/* add jump to stopper */
buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC);
buf->risc.jmp[1] = cpu_to_le32(cx88q->stopper.dma);
buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */
if (list_empty(&cx88q->active)) {
dprintk(1, "queue is empty - first active\n");
list_add_tail(&buf->vb.queue, &cx88q->active);
cx23885_start_dma(port, cx88q, buf);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = cx88q->count++;
mod_timer(&cx88q->timeout, jiffies + BUFFER_TIMEOUT);
dprintk(1, "[%p/%d] %s - first active\n",
buf, buf->vb.i, __func__);
} else {
dprintk(1, "queue is not empty - append to active\n");
prev = list_entry(cx88q->active.prev, struct cx23885_buffer,
vb.queue);
list_add_tail(&buf->vb.queue, &cx88q->active);
buf->vb.state = VIDEOBUF_ACTIVE;
buf->count = cx88q->count++;
prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma);
prev->risc.jmp[2] = cpu_to_le32(0); /* 64 bit bits 63-32 */
dprintk(1, "[%p/%d] %s - append to active\n",
buf, buf->vb.i, __func__);
}
}
/* ----------------------------------------------------------- */
static void do_cancel_buffers(struct cx23885_tsport *port, char *reason,
int restart)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *q = &port->mpegq;
struct cx23885_buffer *buf;
unsigned long flags;
spin_lock_irqsave(&port->slock, flags);
while (!list_empty(&q->active)) {
buf = list_entry(q->active.next, struct cx23885_buffer,
vb.queue);
list_del(&buf->vb.queue);
buf->vb.state = VIDEOBUF_ERROR;
wake_up(&buf->vb.done);
dprintk(1, "[%p/%d] %s - dma=0x%08lx\n",
buf, buf->vb.i, reason, (unsigned long)buf->risc.dma);
}
if (restart) {
dprintk(1, "restarting queue\n");
cx23885_restart_queue(port, q);
}
spin_unlock_irqrestore(&port->slock, flags);
}
void cx23885_cancel_buffers(struct cx23885_tsport *port)
{
struct cx23885_dev *dev = port->dev;
struct cx23885_dmaqueue *q = &port->mpegq;
dprintk(1, "%s()\n", __func__);
del_timer_sync(&q->timeout);
cx23885_stop_dma(port);
do_cancel_buffers(port, "cancel", 0);
}
static void cx23885_timeout(unsigned long data)
{
struct cx23885_tsport *port = (struct cx23885_tsport *)data;
struct cx23885_dev *dev = port->dev;
dprintk(1, "%s()\n", __func__);
if (debug > 5)
cx23885_sram_channel_dump(dev,
&dev->sram_channels[port->sram_chno]);
cx23885_stop_dma(port);
do_cancel_buffers(port, "timeout", 1);
}
int cx23885_irq_417(struct cx23885_dev *dev, u32 status)
{
/* FIXME: port1 assumption here. */
struct cx23885_tsport *port = &dev->ts1;
int count = 0;
int handled = 0;
if (status == 0)
return handled;
count = cx_read(port->reg_gpcnt);
dprintk(7, "status: 0x%08x mask: 0x%08x count: 0x%x\n",
status, cx_read(port->reg_ts_int_msk), count);
if ((status & VID_B_MSK_BAD_PKT) ||
(status & VID_B_MSK_OPC_ERR) ||
(status & VID_B_MSK_VBI_OPC_ERR) ||
(status & VID_B_MSK_SYNC) ||
(status & VID_B_MSK_VBI_SYNC) ||
(status & VID_B_MSK_OF) ||
(status & VID_B_MSK_VBI_OF)) {
printk(KERN_ERR "%s: V4L mpeg risc op code error, status "
"= 0x%x\n", dev->name, status);
if (status & VID_B_MSK_BAD_PKT)
dprintk(1, " VID_B_MSK_BAD_PKT\n");
if (status & VID_B_MSK_OPC_ERR)
dprintk(1, " VID_B_MSK_OPC_ERR\n");
if (status & VID_B_MSK_VBI_OPC_ERR)
dprintk(1, " VID_B_MSK_VBI_OPC_ERR\n");
if (status & VID_B_MSK_SYNC)
dprintk(1, " VID_B_MSK_SYNC\n");
if (status & VID_B_MSK_VBI_SYNC)
dprintk(1, " VID_B_MSK_VBI_SYNC\n");
if (status & VID_B_MSK_OF)
dprintk(1, " VID_B_MSK_OF\n");
if (status & VID_B_MSK_VBI_OF)
dprintk(1, " VID_B_MSK_VBI_OF\n");
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
cx23885_sram_channel_dump(dev,
&dev->sram_channels[port->sram_chno]);
cx23885_417_check_encoder(dev);
} else if (status & VID_B_MSK_RISCI1) {
dprintk(7, " VID_B_MSK_RISCI1\n");
spin_lock(&port->slock);
cx23885_wakeup(port, &port->mpegq, count);
spin_unlock(&port->slock);
} else if (status & VID_B_MSK_RISCI2) {
dprintk(7, " VID_B_MSK_RISCI2\n");
spin_lock(&port->slock);
cx23885_restart_queue(port, &port->mpegq);
spin_unlock(&port->slock);
}
if (status) {
cx_write(port->reg_ts_int_stat, status);
handled = 1;
}
return handled;
}
static int cx23885_irq_ts(struct cx23885_tsport *port, u32 status)
{
struct cx23885_dev *dev = port->dev;
int handled = 0;
u32 count;
if ((status & VID_BC_MSK_OPC_ERR) ||
(status & VID_BC_MSK_BAD_PKT) ||
(status & VID_BC_MSK_SYNC) ||
(status & VID_BC_MSK_OF)) {
if (status & VID_BC_MSK_OPC_ERR)
dprintk(7, " (VID_BC_MSK_OPC_ERR 0x%08x)\n",
VID_BC_MSK_OPC_ERR);
if (status & VID_BC_MSK_BAD_PKT)
dprintk(7, " (VID_BC_MSK_BAD_PKT 0x%08x)\n",
VID_BC_MSK_BAD_PKT);
if (status & VID_BC_MSK_SYNC)
dprintk(7, " (VID_BC_MSK_SYNC 0x%08x)\n",
VID_BC_MSK_SYNC);
if (status & VID_BC_MSK_OF)
dprintk(7, " (VID_BC_MSK_OF 0x%08x)\n",
VID_BC_MSK_OF);
printk(KERN_ERR "%s: mpeg risc op code error\n", dev->name);
cx_clear(port->reg_dma_ctl, port->dma_ctl_val);
cx23885_sram_channel_dump(dev,
&dev->sram_channels[port->sram_chno]);
} else if (status & VID_BC_MSK_RISCI1) {
dprintk(7, " (RISCI1 0x%08x)\n", VID_BC_MSK_RISCI1);
spin_lock(&port->slock);
count = cx_read(port->reg_gpcnt);
cx23885_wakeup(port, &port->mpegq, count);
spin_unlock(&port->slock);
} else if (status & VID_BC_MSK_RISCI2) {
dprintk(7, " (RISCI2 0x%08x)\n", VID_BC_MSK_RISCI2);
spin_lock(&port->slock);
cx23885_restart_queue(port, &port->mpegq);
spin_unlock(&port->slock);
}
if (status) {
cx_write(port->reg_ts_int_stat, status);
handled = 1;
}
return handled;
}
static irqreturn_t cx23885_irq(int irq, void *dev_id)
{
struct cx23885_dev *dev = dev_id;
struct cx23885_tsport *ts1 = &dev->ts1;
struct cx23885_tsport *ts2 = &dev->ts2;
u32 pci_status, pci_mask;
u32 vida_status, vida_mask;
u32 ts1_status, ts1_mask;
u32 ts2_status, ts2_mask;
int vida_count = 0, ts1_count = 0, ts2_count = 0, handled = 0;
bool subdev_handled;
pci_status = cx_read(PCI_INT_STAT);
pci_mask = cx23885_irq_get_mask(dev);
vida_status = cx_read(VID_A_INT_STAT);
vida_mask = cx_read(VID_A_INT_MSK);
ts1_status = cx_read(VID_B_INT_STAT);
ts1_mask = cx_read(VID_B_INT_MSK);
ts2_status = cx_read(VID_C_INT_STAT);
ts2_mask = cx_read(VID_C_INT_MSK);
if ((pci_status == 0) && (ts2_status == 0) && (ts1_status == 0))
goto out;
vida_count = cx_read(VID_A_GPCNT);
ts1_count = cx_read(ts1->reg_gpcnt);
ts2_count = cx_read(ts2->reg_gpcnt);
dprintk(7, "pci_status: 0x%08x pci_mask: 0x%08x\n",
pci_status, pci_mask);
dprintk(7, "vida_status: 0x%08x vida_mask: 0x%08x count: 0x%x\n",
vida_status, vida_mask, vida_count);
dprintk(7, "ts1_status: 0x%08x ts1_mask: 0x%08x count: 0x%x\n",
ts1_status, ts1_mask, ts1_count);
dprintk(7, "ts2_status: 0x%08x ts2_mask: 0x%08x count: 0x%x\n",
ts2_status, ts2_mask, ts2_count);
if (pci_status & (PCI_MSK_RISC_RD | PCI_MSK_RISC_WR |
PCI_MSK_AL_RD | PCI_MSK_AL_WR | PCI_MSK_APB_DMA |
PCI_MSK_VID_C | PCI_MSK_VID_B | PCI_MSK_VID_A |
PCI_MSK_AUD_INT | PCI_MSK_AUD_EXT |
PCI_MSK_GPIO0 | PCI_MSK_GPIO1 |
PCI_MSK_AV_CORE | PCI_MSK_IR)) {
if (pci_status & PCI_MSK_RISC_RD)
dprintk(7, " (PCI_MSK_RISC_RD 0x%08x)\n",
PCI_MSK_RISC_RD);
if (pci_status & PCI_MSK_RISC_WR)
dprintk(7, " (PCI_MSK_RISC_WR 0x%08x)\n",
PCI_MSK_RISC_WR);
if (pci_status & PCI_MSK_AL_RD)
dprintk(7, " (PCI_MSK_AL_RD 0x%08x)\n",
PCI_MSK_AL_RD);
if (pci_status & PCI_MSK_AL_WR)
dprintk(7, " (PCI_MSK_AL_WR 0x%08x)\n",
PCI_MSK_AL_WR);
if (pci_status & PCI_MSK_APB_DMA)
dprintk(7, " (PCI_MSK_APB_DMA 0x%08x)\n",
PCI_MSK_APB_DMA);
if (pci_status & PCI_MSK_VID_C)
dprintk(7, " (PCI_MSK_VID_C 0x%08x)\n",
PCI_MSK_VID_C);
if (pci_status & PCI_MSK_VID_B)
dprintk(7, " (PCI_MSK_VID_B 0x%08x)\n",
PCI_MSK_VID_B);
if (pci_status & PCI_MSK_VID_A)
dprintk(7, " (PCI_MSK_VID_A 0x%08x)\n",
PCI_MSK_VID_A);
if (pci_status & PCI_MSK_AUD_INT)
dprintk(7, " (PCI_MSK_AUD_INT 0x%08x)\n",
PCI_MSK_AUD_INT);
if (pci_status & PCI_MSK_AUD_EXT)
dprintk(7, " (PCI_MSK_AUD_EXT 0x%08x)\n",
PCI_MSK_AUD_EXT);
if (pci_status & PCI_MSK_GPIO0)
dprintk(7, " (PCI_MSK_GPIO0 0x%08x)\n",
PCI_MSK_GPIO0);
if (pci_status & PCI_MSK_GPIO1)
dprintk(7, " (PCI_MSK_GPIO1 0x%08x)\n",
PCI_MSK_GPIO1);
if (pci_status & PCI_MSK_AV_CORE)
dprintk(7, " (PCI_MSK_AV_CORE 0x%08x)\n",
PCI_MSK_AV_CORE);
if (pci_status & PCI_MSK_IR)
dprintk(7, " (PCI_MSK_IR 0x%08x)\n",
PCI_MSK_IR);
}
if (cx23885_boards[dev->board].ci_type == 1 &&
(pci_status & (PCI_MSK_GPIO1 | PCI_MSK_GPIO0)))
handled += netup_ci_slot_status(dev, pci_status);
if (cx23885_boards[dev->board].ci_type == 2 &&
(pci_status & PCI_MSK_GPIO0))
handled += altera_ci_irq(dev);
if (ts1_status) {
if (cx23885_boards[dev->board].portb == CX23885_MPEG_DVB)
handled += cx23885_irq_ts(ts1, ts1_status);
else
if (cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER)
handled += cx23885_irq_417(dev, ts1_status);
}
if (ts2_status) {
if (cx23885_boards[dev->board].portc == CX23885_MPEG_DVB)
handled += cx23885_irq_ts(ts2, ts2_status);
else
if (cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER)
handled += cx23885_irq_417(dev, ts2_status);
}
if (vida_status)
handled += cx23885_video_irq(dev, vida_status);
if (pci_status & PCI_MSK_IR) {
subdev_handled = false;
v4l2_subdev_call(dev->sd_ir, core, interrupt_service_routine,
pci_status, &subdev_handled);
if (subdev_handled)
handled++;
}
if ((pci_status & pci_mask) & PCI_MSK_AV_CORE) {
cx23885_irq_disable(dev, PCI_MSK_AV_CORE);
if (!schedule_work(&dev->cx25840_work))
printk(KERN_ERR "%s: failed to set up deferred work for"
" AV Core/IR interrupt. Interrupt is disabled"
" and won't be re-enabled\n", dev->name);
handled++;
}
if (handled)
cx_write(PCI_INT_STAT, pci_status);
out:
return IRQ_RETVAL(handled);
}
static void cx23885_v4l2_dev_notify(struct v4l2_subdev *sd,
unsigned int notification, void *arg)
{
struct cx23885_dev *dev;
if (sd == NULL)
return;
dev = to_cx23885(sd->v4l2_dev);
switch (notification) {
case V4L2_SUBDEV_IR_RX_NOTIFY: /* Possibly called in an IRQ context */
if (sd == dev->sd_ir)
cx23885_ir_rx_v4l2_dev_notify(sd, *(u32 *)arg);
break;
case V4L2_SUBDEV_IR_TX_NOTIFY: /* Possibly called in an IRQ context */
if (sd == dev->sd_ir)
cx23885_ir_tx_v4l2_dev_notify(sd, *(u32 *)arg);
break;
}
}
static void cx23885_v4l2_dev_notify_init(struct cx23885_dev *dev)
{
INIT_WORK(&dev->cx25840_work, cx23885_av_work_handler);
INIT_WORK(&dev->ir_rx_work, cx23885_ir_rx_work_handler);
INIT_WORK(&dev->ir_tx_work, cx23885_ir_tx_work_handler);
dev->v4l2_dev.notify = cx23885_v4l2_dev_notify;
}
static inline int encoder_on_portb(struct cx23885_dev *dev)
{
return cx23885_boards[dev->board].portb == CX23885_MPEG_ENCODER;
}
static inline int encoder_on_portc(struct cx23885_dev *dev)
{
return cx23885_boards[dev->board].portc == CX23885_MPEG_ENCODER;
}
/* Mask represents 32 different GPIOs, GPIO's are split into multiple
* registers depending on the board configuration (and whether the
* 417 encoder (wi it's own GPIO's) are present. Each GPIO bit will
* be pushed into the correct hardware register, regardless of the
* physical location. Certain registers are shared so we sanity check
* and report errors if we think we're tampering with a GPIo that might
* be assigned to the encoder (and used for the host bus).
*
* GPIO 2 thru 0 - On the cx23885 bridge
* GPIO 18 thru 3 - On the cx23417 host bus interface
* GPIO 23 thru 19 - On the cx25840 a/v core
*/
void cx23885_gpio_set(struct cx23885_dev *dev, u32 mask)
{
if (mask & 0x7)
cx_set(GP0_IO, mask & 0x7);
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
printk(KERN_ERR
"%s: Setting GPIO on encoder ports\n",
dev->name);
cx_set(MC417_RWD, (mask & 0x0007fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
printk(KERN_INFO "%s: Unsupported\n", dev->name);
}
void cx23885_gpio_clear(struct cx23885_dev *dev, u32 mask)
{
if (mask & 0x00000007)
cx_clear(GP0_IO, mask & 0x7);
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
printk(KERN_ERR
"%s: Clearing GPIO moving on encoder ports\n",
dev->name);
cx_clear(MC417_RWD, (mask & 0x7fff8) >> 3);
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
printk(KERN_INFO "%s: Unsupported\n", dev->name);
}
u32 cx23885_gpio_get(struct cx23885_dev *dev, u32 mask)
{
if (mask & 0x00000007)
return (cx_read(GP0_IO) >> 8) & mask & 0x7;
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
printk(KERN_ERR
"%s: Reading GPIO moving on encoder ports\n",
dev->name);
return (cx_read(MC417_RWD) & ((mask & 0x7fff8) >> 3)) << 3;
}
/* TODO: 23-19 */
if (mask & 0x00f80000)
printk(KERN_INFO "%s: Unsupported\n", dev->name);
return 0;
}
void cx23885_gpio_enable(struct cx23885_dev *dev, u32 mask, int asoutput)
{
if ((mask & 0x00000007) && asoutput)
cx_set(GP0_IO, (mask & 0x7) << 16);
else if ((mask & 0x00000007) && !asoutput)
cx_clear(GP0_IO, (mask & 0x7) << 16);
if (mask & 0x0007fff8) {
if (encoder_on_portb(dev) || encoder_on_portc(dev))
printk(KERN_ERR
"%s: Enabling GPIO on encoder ports\n",
dev->name);
}
/* MC417_OEN is active low for output, write 1 for an input */
if ((mask & 0x0007fff8) && asoutput)
cx_clear(MC417_OEN, (mask & 0x7fff8) >> 3);
else if ((mask & 0x0007fff8) && !asoutput)
cx_set(MC417_OEN, (mask & 0x7fff8) >> 3);
/* TODO: 23-19 */
}
static int __devinit cx23885_initdev(struct pci_dev *pci_dev,
const struct pci_device_id *pci_id)
{
struct cx23885_dev *dev;
int err;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (NULL == dev)
return -ENOMEM;
err = v4l2_device_register(&pci_dev->dev, &dev->v4l2_dev);
if (err < 0)
goto fail_free;
/* Prepare to handle notifications from subdevices */
cx23885_v4l2_dev_notify_init(dev);
/* pci init */
dev->pci = pci_dev;
if (pci_enable_device(pci_dev)) {
err = -EIO;
goto fail_unreg;
}
if (cx23885_dev_setup(dev) < 0) {
err = -EINVAL;
goto fail_unreg;
}
/* print pci info */
dev->pci_rev = pci_dev->revision;
pci_read_config_byte(pci_dev, PCI_LATENCY_TIMER, &dev->pci_lat);
printk(KERN_INFO "%s/0: found at %s, rev: %d, irq: %d, "
"latency: %d, mmio: 0x%llx\n", dev->name,
pci_name(pci_dev), dev->pci_rev, pci_dev->irq,
dev->pci_lat,
(unsigned long long)pci_resource_start(pci_dev, 0));
pci_set_master(pci_dev);
if (!pci_dma_supported(pci_dev, 0xffffffff)) {
printk("%s/0: Oops: no 32bit PCI DMA ???\n", dev->name);
err = -EIO;
goto fail_irq;
}
err = request_irq(pci_dev->irq, cx23885_irq,
IRQF_SHARED | IRQF_DISABLED, dev->name, dev);
if (err < 0) {
printk(KERN_ERR "%s: can't get IRQ %d\n",
dev->name, pci_dev->irq);
goto fail_irq;
}
switch (dev->board) {
case CX23885_BOARD_NETUP_DUAL_DVBS2_CI:
cx23885_irq_add_enable(dev, PCI_MSK_GPIO1 | PCI_MSK_GPIO0);
break;
case CX23885_BOARD_NETUP_DUAL_DVB_T_C_CI_RF:
cx23885_irq_add_enable(dev, PCI_MSK_GPIO0);
break;
}
/*
* The CX2388[58] IR controller can start firing interrupts when
* enabled, so these have to take place after the cx23885_irq() handler
* is hooked up by the call to request_irq() above.
*/
cx23885_ir_pci_int_enable(dev);
cx23885_input_init(dev);
return 0;
fail_irq:
cx23885_dev_unregister(dev);
fail_unreg:
v4l2_device_unregister(&dev->v4l2_dev);
fail_free:
kfree(dev);
return err;
}
static void __devexit cx23885_finidev(struct pci_dev *pci_dev)
{
struct v4l2_device *v4l2_dev = pci_get_drvdata(pci_dev);
struct cx23885_dev *dev = to_cx23885(v4l2_dev);
cx23885_input_fini(dev);
cx23885_ir_fini(dev);
cx23885_shutdown(dev);
pci_disable_device(pci_dev);
/* unregister stuff */
free_irq(pci_dev->irq, dev);
cx23885_dev_unregister(dev);
v4l2_device_unregister(v4l2_dev);
kfree(dev);
}
static struct pci_device_id cx23885_pci_tbl[] = {
{
/* CX23885 */
.vendor = 0x14f1,
.device = 0x8852,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, {
/* CX23887 Rev 2 */
.vendor = 0x14f1,
.device = 0x8880,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, {
/* --- end of list --- */
}
};
MODULE_DEVICE_TABLE(pci, cx23885_pci_tbl);
static struct pci_driver cx23885_pci_driver = {
.name = "cx23885",
.id_table = cx23885_pci_tbl,
.probe = cx23885_initdev,
.remove = __devexit_p(cx23885_finidev),
/* TODO */
.suspend = NULL,
.resume = NULL,
};
static int __init cx23885_init(void)
{
printk(KERN_INFO "cx23885 driver version %d.%d.%d loaded\n",
(CX23885_VERSION_CODE >> 16) & 0xff,
(CX23885_VERSION_CODE >> 8) & 0xff,
CX23885_VERSION_CODE & 0xff);
#ifdef SNAPSHOT
printk(KERN_INFO "cx23885: snapshot date %04d-%02d-%02d\n",
SNAPSHOT/10000, (SNAPSHOT/100)%100, SNAPSHOT%100);
#endif
return pci_register_driver(&cx23885_pci_driver);
}
static void __exit cx23885_fini(void)
{
pci_unregister_driver(&cx23885_pci_driver);
}
module_init(cx23885_init);
module_exit(cx23885_fini);
/* ----------------------------------------------------------- */
| gpl-2.0 |
sattarvoybek/android_kernel_zte_p839f30 | drivers/platform/x86/toshiba_bluetooth.c | 2379 | 3535 | /*
* Toshiba Bluetooth Enable Driver
*
* Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com>
*
* Thanks to Matthew Garrett for background info on ACPI innards which
* normal people aren't meant to understand :-)
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Note the Toshiba Bluetooth RFKill switch seems to be a strange
* fish. It only provides a BT event when the switch is flipped to
* the 'on' position. When flipping it to 'off', the USB device is
* simply pulled away underneath us, without any BT event being
* delivered.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <acpi/acpi_bus.h>
#include <acpi/acpi_drivers.h>
MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>");
MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver");
MODULE_LICENSE("GPL");
static int toshiba_bt_rfkill_add(struct acpi_device *device);
static int toshiba_bt_rfkill_remove(struct acpi_device *device);
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event);
static const struct acpi_device_id bt_device_ids[] = {
{ "TOS6205", 0},
{ "", 0},
};
MODULE_DEVICE_TABLE(acpi, bt_device_ids);
#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev);
#endif
static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume);
static struct acpi_driver toshiba_bt_rfkill_driver = {
.name = "Toshiba BT",
.class = "Toshiba",
.ids = bt_device_ids,
.ops = {
.add = toshiba_bt_rfkill_add,
.remove = toshiba_bt_rfkill_remove,
.notify = toshiba_bt_rfkill_notify,
},
.owner = THIS_MODULE,
.drv.pm = &toshiba_bt_pm,
};
static int toshiba_bluetooth_enable(acpi_handle handle)
{
acpi_status res1, res2;
u64 result;
/*
* Query ACPI to verify RFKill switch is set to 'on'.
* If not, we return silently, no need to report it as
* an error.
*/
res1 = acpi_evaluate_integer(handle, "BTST", NULL, &result);
if (ACPI_FAILURE(res1))
return res1;
if (!(result & 0x01))
return 0;
pr_info("Re-enabling Toshiba Bluetooth\n");
res1 = acpi_evaluate_object(handle, "AUSB", NULL, NULL);
res2 = acpi_evaluate_object(handle, "BTPO", NULL, NULL);
if (!ACPI_FAILURE(res1) || !ACPI_FAILURE(res2))
return 0;
pr_warn("Failed to re-enable Toshiba Bluetooth\n");
return -ENODEV;
}
static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event)
{
toshiba_bluetooth_enable(device->handle);
}
#ifdef CONFIG_PM_SLEEP
static int toshiba_bt_resume(struct device *dev)
{
return toshiba_bluetooth_enable(to_acpi_device(dev)->handle);
}
#endif
static int toshiba_bt_rfkill_add(struct acpi_device *device)
{
acpi_status status;
u64 bt_present;
int result = -ENODEV;
/*
* Some Toshiba laptops may have a fake TOS6205 device in
* their ACPI BIOS, so query the _STA method to see if there
* is really anything there, before trying to enable it.
*/
status = acpi_evaluate_integer(device->handle, "_STA", NULL,
&bt_present);
if (!ACPI_FAILURE(status) && bt_present) {
pr_info("Detected Toshiba ACPI Bluetooth device - "
"installing RFKill handler\n");
result = toshiba_bluetooth_enable(device->handle);
}
return result;
}
static int toshiba_bt_rfkill_remove(struct acpi_device *device)
{
/* clean up */
return 0;
}
module_acpi_driver(toshiba_bt_rfkill_driver);
| gpl-2.0 |
Coolexe/shooter-ics-crc-3.0.16-294f767 | arch/cris/arch-v32/kernel/time.c | 2891 | 8850 | /*
* linux/arch/cris/arch-v32/kernel/time.c
*
* Copyright (C) 2003-2010 Axis Communications AB
*
*/
#include <linux/timex.h>
#include <linux/time.h>
#include <linux/clocksource.h>
#include <linux/interrupt.h>
#include <linux/swap.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/cpufreq.h>
#include <asm/types.h>
#include <asm/signal.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/rtc.h>
#include <asm/irq.h>
#include <asm/irq_regs.h>
#include <hwregs/reg_map.h>
#include <hwregs/reg_rdwr.h>
#include <hwregs/timer_defs.h>
#include <hwregs/intr_vect_defs.h>
#ifdef CONFIG_CRIS_MACH_ARTPEC3
#include <hwregs/clkgen_defs.h>
#endif
/* Watchdog defines */
#define ETRAX_WD_KEY_MASK 0x7F /* key is 7 bit */
#define ETRAX_WD_HZ 763 /* watchdog counts at 763 Hz */
/* Number of 763 counts before watchdog bites */
#define ETRAX_WD_CNT ((2*ETRAX_WD_HZ)/HZ + 1)
/* Register the continuos readonly timer available in FS and ARTPEC-3. */
static cycle_t read_cont_rotime(struct clocksource *cs)
{
return (u32)REG_RD(timer, regi_timer0, r_time);
}
static struct clocksource cont_rotime = {
.name = "crisv32_rotime",
.rating = 300,
.read = read_cont_rotime,
.mask = CLOCKSOURCE_MASK(32),
.shift = 10,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init etrax_init_cont_rotime(void)
{
cont_rotime.mult = clocksource_khz2mult(100000, cont_rotime.shift);
clocksource_register(&cont_rotime);
return 0;
}
arch_initcall(etrax_init_cont_rotime);
unsigned long timer_regs[NR_CPUS] =
{
regi_timer0,
#ifdef CONFIG_SMP
regi_timer2
#endif
};
extern int set_rtc_mmss(unsigned long nowtime);
extern int have_rtc;
#ifdef CONFIG_CPU_FREQ
static int
cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
void *data);
static struct notifier_block cris_time_freq_notifier_block = {
.notifier_call = cris_time_freq_notifier,
};
#endif
unsigned long get_ns_in_jiffie(void)
{
reg_timer_r_tmr0_data data;
unsigned long ns;
data = REG_RD(timer, regi_timer0, r_tmr0_data);
ns = (TIMER0_DIV - data) * 10;
return ns;
}
/* From timer MDS describing the hardware watchdog:
* 4.3.1 Watchdog Operation
* The watchdog timer is an 8-bit timer with a configurable start value.
* Once started the watchdog counts downwards with a frequency of 763 Hz
* (100/131072 MHz). When the watchdog counts down to 1, it generates an
* NMI (Non Maskable Interrupt), and when it counts down to 0, it resets the
* chip.
*/
/* This gives us 1.3 ms to do something useful when the NMI comes */
/* Right now, starting the watchdog is the same as resetting it */
#define start_watchdog reset_watchdog
#if defined(CONFIG_ETRAX_WATCHDOG)
static short int watchdog_key = 42; /* arbitrary 7 bit number */
#endif
/* Number of pages to consider "out of memory". It is normal that the memory
* is used though, so set this really low. */
#define WATCHDOG_MIN_FREE_PAGES 8
void reset_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG)
reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
/* Only keep watchdog happy as long as we have memory left! */
if(nr_free_pages() > WATCHDOG_MIN_FREE_PAGES) {
/* Reset the watchdog with the inverse of the old key */
/* Invert key, which is 7 bits */
watchdog_key ^= ETRAX_WD_KEY_MASK;
wd_ctrl.cnt = ETRAX_WD_CNT;
wd_ctrl.cmd = regk_timer_start;
wd_ctrl.key = watchdog_key;
REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl);
}
#endif
}
/* stop the watchdog - we still need the correct key */
void stop_watchdog(void)
{
#if defined(CONFIG_ETRAX_WATCHDOG)
reg_timer_rw_wd_ctrl wd_ctrl = { 0 };
watchdog_key ^= ETRAX_WD_KEY_MASK; /* invert key, which is 7 bits */
wd_ctrl.cnt = ETRAX_WD_CNT;
wd_ctrl.cmd = regk_timer_stop;
wd_ctrl.key = watchdog_key;
REG_WR(timer, regi_timer0, rw_wd_ctrl, wd_ctrl);
#endif
}
extern void show_registers(struct pt_regs *regs);
void handle_watchdog_bite(struct pt_regs *regs)
{
#if defined(CONFIG_ETRAX_WATCHDOG)
extern int cause_of_death;
oops_in_progress = 1;
printk(KERN_WARNING "Watchdog bite\n");
/* Check if forced restart or unexpected watchdog */
if (cause_of_death == 0xbedead) {
#ifdef CONFIG_CRIS_MACH_ARTPEC3
/* There is a bug in Artpec-3 (voodoo TR 78) that requires
* us to go to lower frequency for the reset to be reliable
*/
reg_clkgen_rw_clk_ctrl ctrl =
REG_RD(clkgen, regi_clkgen, rw_clk_ctrl);
ctrl.pll = 0;
REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, ctrl);
#endif
while(1);
}
/* Unexpected watchdog, stop the watchdog and dump registers. */
stop_watchdog();
printk(KERN_WARNING "Oops: bitten by watchdog\n");
show_registers(regs);
oops_in_progress = 0;
#ifndef CONFIG_ETRAX_WATCHDOG_NICE_DOGGY
reset_watchdog();
#endif
while(1) /* nothing */;
#endif
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick.
*/
extern void cris_do_profile(struct pt_regs *regs);
static inline irqreturn_t timer_interrupt(int irq, void *dev_id)
{
struct pt_regs *regs = get_irq_regs();
int cpu = smp_processor_id();
reg_timer_r_masked_intr masked_intr;
reg_timer_rw_ack_intr ack_intr = { 0 };
/* Check if the timer interrupt is for us (a tmr0 int) */
masked_intr = REG_RD(timer, timer_regs[cpu], r_masked_intr);
if (!masked_intr.tmr0)
return IRQ_NONE;
/* Acknowledge the timer irq. */
ack_intr.tmr0 = 1;
REG_WR(timer, timer_regs[cpu], rw_ack_intr, ack_intr);
/* Reset watchdog otherwise it resets us! */
reset_watchdog();
/* Update statistics. */
update_process_times(user_mode(regs));
cris_do_profile(regs); /* Save profiling information */
/* The master CPU is responsible for the time keeping. */
if (cpu != 0)
return IRQ_HANDLED;
/* Call the real timer interrupt handler */
xtime_update(1);
return IRQ_HANDLED;
}
/* Timer is IRQF_SHARED so drivers can add stuff to the timer irq chain.
* It needs to be IRQF_DISABLED to make the jiffies update work properly.
*/
static struct irqaction irq_timer = {
.handler = timer_interrupt,
.flags = IRQF_SHARED | IRQF_DISABLED,
.name = "timer"
};
void __init cris_timer_init(void)
{
int cpu = smp_processor_id();
reg_timer_rw_tmr0_ctrl tmr0_ctrl = { 0 };
reg_timer_rw_tmr0_div tmr0_div = TIMER0_DIV;
reg_timer_rw_intr_mask timer_intr_mask;
/* Setup the etrax timers.
* Base frequency is 100MHz, divider 1000000 -> 100 HZ
* We use timer0, so timer1 is free.
* The trig timer is used by the fasttimer API if enabled.
*/
tmr0_ctrl.op = regk_timer_ld;
tmr0_ctrl.freq = regk_timer_f100;
REG_WR(timer, timer_regs[cpu], rw_tmr0_div, tmr0_div);
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Load */
tmr0_ctrl.op = regk_timer_run;
REG_WR(timer, timer_regs[cpu], rw_tmr0_ctrl, tmr0_ctrl); /* Start */
/* Enable the timer irq. */
timer_intr_mask = REG_RD(timer, timer_regs[cpu], rw_intr_mask);
timer_intr_mask.tmr0 = 1;
REG_WR(timer, timer_regs[cpu], rw_intr_mask, timer_intr_mask);
}
void __init time_init(void)
{
reg_intr_vect_rw_mask intr_mask;
/* Probe for the RTC and read it if it exists.
* Before the RTC can be probed the loops_per_usec variable needs
* to be initialized to make usleep work. A better value for
* loops_per_usec is calculated by the kernel later once the
* clock has started.
*/
loops_per_usec = 50;
if(RTC_INIT() < 0)
have_rtc = 0;
else
have_rtc = 1;
/* Start CPU local timer. */
cris_timer_init();
/* Enable the timer irq in global config. */
intr_mask = REG_RD_VECT(intr_vect, regi_irq, rw_mask, 1);
intr_mask.timer0 = 1;
REG_WR_VECT(intr_vect, regi_irq, rw_mask, 1, intr_mask);
/* Now actually register the timer irq handler that calls
* timer_interrupt(). */
setup_irq(TIMER0_INTR_VECT, &irq_timer);
/* Enable watchdog if we should use one. */
#if defined(CONFIG_ETRAX_WATCHDOG)
printk(KERN_INFO "Enabling watchdog...\n");
start_watchdog();
/* If we use the hardware watchdog, we want to trap it as an NMI
* and dump registers before it resets us. For this to happen, we
* must set the "m" NMI enable flag (which once set, is unset only
* when an NMI is taken). */
{
unsigned long flags;
local_save_flags(flags);
flags |= (1<<30); /* NMI M flag is at bit 30 */
local_irq_restore(flags);
}
#endif
#ifdef CONFIG_CPU_FREQ
cpufreq_register_notifier(&cris_time_freq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
#endif
}
#ifdef CONFIG_CPU_FREQ
static int
cris_time_freq_notifier(struct notifier_block *nb, unsigned long val,
void *data)
{
struct cpufreq_freqs *freqs = data;
if (val == CPUFREQ_POSTCHANGE) {
reg_timer_r_tmr0_data data;
reg_timer_rw_tmr0_div div = (freqs->new * 500) / HZ;
do {
data = REG_RD(timer, timer_regs[freqs->cpu],
r_tmr0_data);
} while (data > 20);
REG_WR(timer, timer_regs[freqs->cpu], rw_tmr0_div, div);
}
return 0;
}
#endif
| gpl-2.0 |
LeeDroid-/LeeDrOiD-Hima-M9 | drivers/dma/bestcomm/bestcomm.c | 3147 | 13417 | /*
* Driver for MPC52xx processor BestComm peripheral controller
*
*
* Copyright (C) 2006-2007 Sylvain Munaut <tnt@246tNt.com>
* Copyright (C) 2005 Varma Electronics Oy,
* ( by Andrey Volkov <avolkov@varma-el.com> )
* Copyright (C) 2003-2004 MontaVista, Software, Inc.
* ( by Dale Farnsworth <dfarnsworth@mvista.com> )
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
* kind, whether express or implied.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_platform.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/mpc52xx.h>
#include <linux/fsl/bestcomm/sram.h>
#include <linux/fsl/bestcomm/bestcomm_priv.h>
#include "linux/fsl/bestcomm/bestcomm.h"
#define DRIVER_NAME "bestcomm-core"
/* MPC5200 device tree match tables */
static struct of_device_id mpc52xx_sram_ids[] = {
{ .compatible = "fsl,mpc5200-sram", },
{ .compatible = "mpc5200-sram", },
{}
};
struct bcom_engine *bcom_eng = NULL;
EXPORT_SYMBOL_GPL(bcom_eng); /* needed for inline functions */
/* ======================================================================== */
/* Public and private API */
/* ======================================================================== */
/* Private API */
struct bcom_task *
bcom_task_alloc(int bd_count, int bd_size, int priv_size)
{
int i, tasknum = -1;
struct bcom_task *tsk;
/* Don't try to do anything if bestcomm init failed */
if (!bcom_eng)
return NULL;
/* Get and reserve a task num */
spin_lock(&bcom_eng->lock);
for (i=0; i<BCOM_MAX_TASKS; i++)
if (!bcom_eng->tdt[i].stop) { /* we use stop as a marker */
bcom_eng->tdt[i].stop = 0xfffffffful; /* dummy addr */
tasknum = i;
break;
}
spin_unlock(&bcom_eng->lock);
if (tasknum < 0)
return NULL;
/* Allocate our structure */
tsk = kzalloc(sizeof(struct bcom_task) + priv_size, GFP_KERNEL);
if (!tsk)
goto error;
tsk->tasknum = tasknum;
if (priv_size)
tsk->priv = (void*)tsk + sizeof(struct bcom_task);
/* Get IRQ of that task */
tsk->irq = irq_of_parse_and_map(bcom_eng->ofnode, tsk->tasknum);
if (tsk->irq == NO_IRQ)
goto error;
/* Init the BDs, if needed */
if (bd_count) {
tsk->cookie = kmalloc(sizeof(void*) * bd_count, GFP_KERNEL);
if (!tsk->cookie)
goto error;
tsk->bd = bcom_sram_alloc(bd_count * bd_size, 4, &tsk->bd_pa);
if (!tsk->bd)
goto error;
memset(tsk->bd, 0x00, bd_count * bd_size);
tsk->num_bd = bd_count;
tsk->bd_size = bd_size;
}
return tsk;
error:
if (tsk) {
if (tsk->irq != NO_IRQ)
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
kfree(tsk);
}
bcom_eng->tdt[tasknum].stop = 0;
return NULL;
}
EXPORT_SYMBOL_GPL(bcom_task_alloc);
void
bcom_task_free(struct bcom_task *tsk)
{
/* Stop the task */
bcom_disable_task(tsk->tasknum);
/* Clear TDT */
bcom_eng->tdt[tsk->tasknum].start = 0;
bcom_eng->tdt[tsk->tasknum].stop = 0;
/* Free everything */
irq_dispose_mapping(tsk->irq);
bcom_sram_free(tsk->bd);
kfree(tsk->cookie);
kfree(tsk);
}
EXPORT_SYMBOL_GPL(bcom_task_free);
int
bcom_load_image(int task, u32 *task_image)
{
struct bcom_task_header *hdr = (struct bcom_task_header *)task_image;
struct bcom_tdt *tdt;
u32 *desc, *var, *inc;
u32 *desc_src, *var_src, *inc_src;
/* Safety checks */
if (hdr->magic != BCOM_TASK_MAGIC) {
printk(KERN_ERR DRIVER_NAME
": Trying to load invalid microcode\n");
return -EINVAL;
}
if ((task < 0) || (task >= BCOM_MAX_TASKS)) {
printk(KERN_ERR DRIVER_NAME
": Trying to load invalid task %d\n", task);
return -EINVAL;
}
/* Initial load or reload */
tdt = &bcom_eng->tdt[task];
if (tdt->start) {
desc = bcom_task_desc(task);
if (hdr->desc_size != bcom_task_num_descs(task)) {
printk(KERN_ERR DRIVER_NAME
": Trying to reload wrong task image "
"(%d size %d/%d)!\n",
task,
hdr->desc_size,
bcom_task_num_descs(task));
return -EINVAL;
}
} else {
phys_addr_t start_pa;
desc = bcom_sram_alloc(hdr->desc_size * sizeof(u32), 4, &start_pa);
if (!desc)
return -ENOMEM;
tdt->start = start_pa;
tdt->stop = start_pa + ((hdr->desc_size-1) * sizeof(u32));
}
var = bcom_task_var(task);
inc = bcom_task_inc(task);
/* Clear & copy */
memset(var, 0x00, BCOM_VAR_SIZE);
memset(inc, 0x00, BCOM_INC_SIZE);
desc_src = (u32 *)(hdr + 1);
var_src = desc_src + hdr->desc_size;
inc_src = var_src + hdr->var_size;
memcpy(desc, desc_src, hdr->desc_size * sizeof(u32));
memcpy(var + hdr->first_var, var_src, hdr->var_size * sizeof(u32));
memcpy(inc, inc_src, hdr->inc_size * sizeof(u32));
return 0;
}
EXPORT_SYMBOL_GPL(bcom_load_image);
void
bcom_set_initiator(int task, int initiator)
{
int i;
int num_descs;
u32 *desc;
int next_drd_has_initiator;
bcom_set_tcr_initiator(task, initiator);
/* Just setting tcr is apparently not enough due to some problem */
/* with it. So we just go thru all the microcode and replace in */
/* the DRD directly */
desc = bcom_task_desc(task);
next_drd_has_initiator = 1;
num_descs = bcom_task_num_descs(task);
for (i=0; i<num_descs; i++, desc++) {
if (!bcom_desc_is_drd(*desc))
continue;
if (next_drd_has_initiator)
if (bcom_desc_initiator(*desc) != BCOM_INITIATOR_ALWAYS)
bcom_set_desc_initiator(desc, initiator);
next_drd_has_initiator = !bcom_drd_is_extended(*desc);
}
}
EXPORT_SYMBOL_GPL(bcom_set_initiator);
/* Public API */
void
bcom_enable(struct bcom_task *tsk)
{
bcom_enable_task(tsk->tasknum);
}
EXPORT_SYMBOL_GPL(bcom_enable);
void
bcom_disable(struct bcom_task *tsk)
{
bcom_disable_task(tsk->tasknum);
}
EXPORT_SYMBOL_GPL(bcom_disable);
/* ======================================================================== */
/* Engine init/cleanup */
/* ======================================================================== */
/* Function Descriptor table */
/* this will need to be updated if Freescale changes their task code FDT */
static u32 fdt_ops[] = {
0xa0045670, /* FDT[48] - load_acc() */
0x80045670, /* FDT[49] - unload_acc() */
0x21800000, /* FDT[50] - and() */
0x21e00000, /* FDT[51] - or() */
0x21500000, /* FDT[52] - xor() */
0x21400000, /* FDT[53] - andn() */
0x21500000, /* FDT[54] - not() */
0x20400000, /* FDT[55] - add() */
0x20500000, /* FDT[56] - sub() */
0x20800000, /* FDT[57] - lsh() */
0x20a00000, /* FDT[58] - rsh() */
0xc0170000, /* FDT[59] - crc8() */
0xc0145670, /* FDT[60] - crc16() */
0xc0345670, /* FDT[61] - crc32() */
0xa0076540, /* FDT[62] - endian32() */
0xa0000760, /* FDT[63] - endian16() */
};
static int bcom_engine_init(void)
{
int task;
phys_addr_t tdt_pa, ctx_pa, var_pa, fdt_pa;
unsigned int tdt_size, ctx_size, var_size, fdt_size;
/* Allocate & clear SRAM zones for FDT, TDTs, contexts and vars/incs */
tdt_size = BCOM_MAX_TASKS * sizeof(struct bcom_tdt);
ctx_size = BCOM_MAX_TASKS * BCOM_CTX_SIZE;
var_size = BCOM_MAX_TASKS * (BCOM_VAR_SIZE + BCOM_INC_SIZE);
fdt_size = BCOM_FDT_SIZE;
bcom_eng->tdt = bcom_sram_alloc(tdt_size, sizeof(u32), &tdt_pa);
bcom_eng->ctx = bcom_sram_alloc(ctx_size, BCOM_CTX_ALIGN, &ctx_pa);
bcom_eng->var = bcom_sram_alloc(var_size, BCOM_VAR_ALIGN, &var_pa);
bcom_eng->fdt = bcom_sram_alloc(fdt_size, BCOM_FDT_ALIGN, &fdt_pa);
if (!bcom_eng->tdt || !bcom_eng->ctx || !bcom_eng->var || !bcom_eng->fdt) {
printk(KERN_ERR "DMA: SRAM alloc failed in engine init !\n");
bcom_sram_free(bcom_eng->tdt);
bcom_sram_free(bcom_eng->ctx);
bcom_sram_free(bcom_eng->var);
bcom_sram_free(bcom_eng->fdt);
return -ENOMEM;
}
memset(bcom_eng->tdt, 0x00, tdt_size);
memset(bcom_eng->ctx, 0x00, ctx_size);
memset(bcom_eng->var, 0x00, var_size);
memset(bcom_eng->fdt, 0x00, fdt_size);
/* Copy the FDT for the EU#3 */
memcpy(&bcom_eng->fdt[48], fdt_ops, sizeof(fdt_ops));
/* Initialize Task base structure */
for (task=0; task<BCOM_MAX_TASKS; task++)
{
out_be16(&bcom_eng->regs->tcr[task], 0);
out_8(&bcom_eng->regs->ipr[task], 0);
bcom_eng->tdt[task].context = ctx_pa;
bcom_eng->tdt[task].var = var_pa;
bcom_eng->tdt[task].fdt = fdt_pa;
var_pa += BCOM_VAR_SIZE + BCOM_INC_SIZE;
ctx_pa += BCOM_CTX_SIZE;
}
out_be32(&bcom_eng->regs->taskBar, tdt_pa);
/* Init 'always' initiator */
out_8(&bcom_eng->regs->ipr[BCOM_INITIATOR_ALWAYS], BCOM_IPR_ALWAYS);
/* Disable COMM Bus Prefetch on the original 5200; it's broken */
if ((mfspr(SPRN_SVR) & MPC5200_SVR_MASK) == MPC5200_SVR)
bcom_disable_prefetch();
/* Init lock */
spin_lock_init(&bcom_eng->lock);
return 0;
}
static void
bcom_engine_cleanup(void)
{
int task;
/* Stop all tasks */
for (task=0; task<BCOM_MAX_TASKS; task++)
{
out_be16(&bcom_eng->regs->tcr[task], 0);
out_8(&bcom_eng->regs->ipr[task], 0);
}
out_be32(&bcom_eng->regs->taskBar, 0ul);
/* Release the SRAM zones */
bcom_sram_free(bcom_eng->tdt);
bcom_sram_free(bcom_eng->ctx);
bcom_sram_free(bcom_eng->var);
bcom_sram_free(bcom_eng->fdt);
}
/* ======================================================================== */
/* OF platform driver */
/* ======================================================================== */
static int mpc52xx_bcom_probe(struct platform_device *op)
{
struct device_node *ofn_sram;
struct resource res_bcom;
int rv;
/* Inform user we're ok so far */
printk(KERN_INFO "DMA: MPC52xx BestComm driver\n");
/* Get the bestcomm node */
of_node_get(op->dev.of_node);
/* Prepare SRAM */
ofn_sram = of_find_matching_node(NULL, mpc52xx_sram_ids);
if (!ofn_sram) {
printk(KERN_ERR DRIVER_NAME ": "
"No SRAM found in device tree\n");
rv = -ENODEV;
goto error_ofput;
}
rv = bcom_sram_init(ofn_sram, DRIVER_NAME);
of_node_put(ofn_sram);
if (rv) {
printk(KERN_ERR DRIVER_NAME ": "
"Error in SRAM init\n");
goto error_ofput;
}
/* Get a clean struct */
bcom_eng = kzalloc(sizeof(struct bcom_engine), GFP_KERNEL);
if (!bcom_eng) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't allocate state structure\n");
rv = -ENOMEM;
goto error_sramclean;
}
/* Save the node */
bcom_eng->ofnode = op->dev.of_node;
/* Get, reserve & map io */
if (of_address_to_resource(op->dev.of_node, 0, &res_bcom)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't get resource\n");
rv = -EINVAL;
goto error_sramclean;
}
if (!request_mem_region(res_bcom.start, resource_size(&res_bcom),
DRIVER_NAME)) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't request registers region\n");
rv = -EBUSY;
goto error_sramclean;
}
bcom_eng->regs_base = res_bcom.start;
bcom_eng->regs = ioremap(res_bcom.start, sizeof(struct mpc52xx_sdma));
if (!bcom_eng->regs) {
printk(KERN_ERR DRIVER_NAME ": "
"Can't map registers\n");
rv = -ENOMEM;
goto error_release;
}
/* Now, do the real init */
rv = bcom_engine_init();
if (rv)
goto error_unmap;
/* Done ! */
printk(KERN_INFO "DMA: MPC52xx BestComm engine @%08lx ok !\n",
(long)bcom_eng->regs_base);
return 0;
/* Error path */
error_unmap:
iounmap(bcom_eng->regs);
error_release:
release_mem_region(res_bcom.start, sizeof(struct mpc52xx_sdma));
error_sramclean:
kfree(bcom_eng);
bcom_sram_cleanup();
error_ofput:
of_node_put(op->dev.of_node);
printk(KERN_ERR "DMA: MPC52xx BestComm init failed !\n");
return rv;
}
static int mpc52xx_bcom_remove(struct platform_device *op)
{
/* Clean up the engine */
bcom_engine_cleanup();
/* Cleanup SRAM */
bcom_sram_cleanup();
/* Release regs */
iounmap(bcom_eng->regs);
release_mem_region(bcom_eng->regs_base, sizeof(struct mpc52xx_sdma));
/* Release the node */
of_node_put(bcom_eng->ofnode);
/* Release memory */
kfree(bcom_eng);
bcom_eng = NULL;
return 0;
}
static struct of_device_id mpc52xx_bcom_of_match[] = {
{ .compatible = "fsl,mpc5200-bestcomm", },
{ .compatible = "mpc5200-bestcomm", },
{},
};
MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match);
static struct platform_driver mpc52xx_bcom_of_platform_driver = {
.probe = mpc52xx_bcom_probe,
.remove = mpc52xx_bcom_remove,
.driver = {
.name = DRIVER_NAME,
.owner = THIS_MODULE,
.of_match_table = mpc52xx_bcom_of_match,
},
};
/* ======================================================================== */
/* Module */
/* ======================================================================== */
static int __init
mpc52xx_bcom_init(void)
{
return platform_driver_register(&mpc52xx_bcom_of_platform_driver);
}
static void __exit
mpc52xx_bcom_exit(void)
{
platform_driver_unregister(&mpc52xx_bcom_of_platform_driver);
}
/* If we're not a module, we must make sure everything is setup before */
/* anyone tries to use us ... that's why we use subsys_initcall instead */
/* of module_init. */
subsys_initcall(mpc52xx_bcom_init);
module_exit(mpc52xx_bcom_exit);
MODULE_DESCRIPTION("Freescale MPC52xx BestComm DMA");
MODULE_AUTHOR("Sylvain Munaut <tnt@246tNt.com>");
MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
MODULE_AUTHOR("Dale Farnsworth <dfarnsworth@mvista.com>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
whdgmawkd/furnace_kernel_lge_hammerhead | sound/soc/blackfin/bf5xx-ad1836.c | 4939 | 3066 | /*
* File: sound/soc/blackfin/bf5xx-ad1836.c
* Author: Barry Song <Barry.Song@analog.com>
*
* Created: Aug 4 2009
* Description: Board driver for ad1836 sound chip
*
* Bugs: Enter bugs at http://blackfin.uclinux.org/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <sound/pcm_params.h>
#include <asm/blackfin.h>
#include <asm/cacheflush.h>
#include <asm/irq.h>
#include <asm/dma.h>
#include <asm/portmux.h>
#include "../codecs/ad1836.h"
#include "bf5xx-tdm-pcm.h"
#include "bf5xx-tdm.h"
static struct snd_soc_card bf5xx_ad1836;
static int bf5xx_ad1836_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int channel_map[] = {0, 4, 1, 5, 2, 6, 3, 7};
int ret = 0;
/* set cpu DAI channel mapping */
ret = snd_soc_dai_set_channel_map(cpu_dai, ARRAY_SIZE(channel_map),
channel_map, ARRAY_SIZE(channel_map), channel_map);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops bf5xx_ad1836_ops = {
.hw_params = bf5xx_ad1836_hw_params,
};
#define BF5XX_AD1836_DAIFMT (SND_SOC_DAIFMT_DSP_A | SND_SOC_DAIFMT_IB_IF | \
SND_SOC_DAIFMT_CBM_CFM)
static struct snd_soc_dai_link bf5xx_ad1836_dai[] = {
{
.name = "ad1836",
.stream_name = "AD1836",
.cpu_dai_name = "bfin-tdm.0",
.codec_dai_name = "ad1836-hifi",
.platform_name = "bfin-tdm-pcm-audio",
.codec_name = "spi0.4",
.ops = &bf5xx_ad1836_ops,
.dai_fmt = BF5XX_AD1836_DAIFMT,
},
{
.name = "ad1836",
.stream_name = "AD1836",
.cpu_dai_name = "bfin-tdm.1",
.codec_dai_name = "ad1836-hifi",
.platform_name = "bfin-tdm-pcm-audio",
.codec_name = "spi0.4",
.ops = &bf5xx_ad1836_ops,
.dai_fmt = BF5XX_AD1836_DAIFMT,
},
};
static struct snd_soc_card bf5xx_ad1836 = {
.name = "bfin-ad1836",
.owner = THIS_MODULE,
.dai_link = &bf5xx_ad1836_dai[CONFIG_SND_BF5XX_SPORT_NUM],
.num_links = 1,
};
static struct platform_device *bfxx_ad1836_snd_device;
static int __init bf5xx_ad1836_init(void)
{
int ret;
bfxx_ad1836_snd_device = platform_device_alloc("soc-audio", -1);
if (!bfxx_ad1836_snd_device)
return -ENOMEM;
platform_set_drvdata(bfxx_ad1836_snd_device, &bf5xx_ad1836);
ret = platform_device_add(bfxx_ad1836_snd_device);
if (ret)
platform_device_put(bfxx_ad1836_snd_device);
return ret;
}
static void __exit bf5xx_ad1836_exit(void)
{
platform_device_unregister(bfxx_ad1836_snd_device);
}
module_init(bf5xx_ad1836_init);
module_exit(bf5xx_ad1836_exit);
/* Module information */
MODULE_AUTHOR("Barry Song");
MODULE_DESCRIPTION("ALSA SoC AD1836 board driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
MaxiCM/android_kernel_motorola_msm8226 | drivers/staging/rtl8192u/ieee80211/ieee80211_tx.c | 5451 | 27282 | /******************************************************************************
Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
Andrea Merello <andreamrl@tiscali.it>
A special thanks goes to Realtek for their support !
******************************************************************************/
#include <linux/compiler.h>
//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/if_vlan.h>
#include "ieee80211.h"
/*
802.11 Data Frame
802.11 frame_contorl for data frames - 2 bytes
,-----------------------------------------------------------------------------------------.
bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
|----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
|----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
| | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
'-----------------------------------------------------------------------------------------'
/\
|
802.11 Data Frame |
,--------- 'ctrl' expands to >-----------'
|
,--'---,-------------------------------------------------------------.
Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
|------|------|---------|---------|---------|------|---------|------|
Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
| | tion | (BSSID) | | | ence | data | |
`--------------------------------------------------| |------'
Total: 28 non-data bytes `----.----'
|
.- 'Frame data' expands to <---------------------------'
|
V
,---------------------------------------------------.
Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
|------|------|---------|----------|------|---------|
Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
| DSAP | SSAP | | | | Packet |
| 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
`-----------------------------------------| |
Total: 8 non-data bytes `----.----'
|
.- 'IP Packet' expands, if WEP enabled, to <--'
|
V
,-----------------------.
Bytes | 4 | 0-2296 | 4 |
|-----|-----------|-----|
Desc. | IV | Encrypted | ICV |
| | IP Packet | |
`-----------------------'
Total: 8 non-data bytes
802.3 Ethernet Data Frame
,-----------------------------------------.
Bytes | 6 | 6 | 2 | Variable | 4 |
|-------|-------|------|-----------|------|
Desc. | Dest. | Source| Type | IP Packet | fcs |
| MAC | MAC | | | |
`-----------------------------------------'
Total: 18 non-data bytes
In the event that fragmentation is required, the incoming payload is split into
N parts of size ieee->fts. The first fragment contains the SNAP header and the
remaining packets are just data.
If encryption is enabled, each fragment payload size is reduced by enough space
to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
So if you have 1500 bytes of payload with ieee->fts set to 500 without
encryption it will take 3 frames. With WEP it will take 4 frames as the
payload of each frame is reduced to 492 bytes.
* SKB visualization
*
* ,- skb->data
* |
* | ETHERNET HEADER ,-<-- PAYLOAD
* | | 14 bytes from skb->data
* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
* | | | |
* |,-Dest.--. ,--Src.---. | | |
* | 6 bytes| | 6 bytes | | | |
* v | | | | | |
* 0 | v 1 | v | v 2
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
* ^ | ^ | ^ |
* | | | | | |
* | | | | `T' <---- 2 bytes for Type
* | | | |
* | | '---SNAP--' <-------- 6 bytes for SNAP
* | |
* `-IV--' <-------------------- 4 bytes for IV (WEP)
*
* SNAP HEADER
*
*/
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
{
struct ieee80211_snap_hdr *snap;
u8 *oui;
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
snap->ssap = 0xaa;
snap->ctrl = 0x03;
if (h_proto == 0x8137 || h_proto == 0x80f3)
oui = P802_1H_OUI;
else
oui = RFC1042_OUI;
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
*(u16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
int ieee80211_encrypt_fragment(
struct ieee80211_device *ieee,
struct sk_buff *frag,
int hdr_len)
{
struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
int res;
if (!(crypt && crypt->ops))
{
printk("=========>%s(), crypt is null\n", __FUNCTION__);
return -1;
}
#ifdef CONFIG_IEEE80211_CRYPT_TKIP
struct ieee80211_hdr *header;
if (ieee->tkip_countermeasures &&
crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
header = (struct ieee80211_hdr *) frag->data;
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"TX packet to %pM\n",
ieee->dev->name, header->addr1);
}
return -1;
}
#endif
/* To encrypt, frame format is:
* IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
// PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
* call both MSDU and MPDU encryption functions from here. */
atomic_inc(&crypt->refcnt);
res = 0;
if (crypt->ops->encrypt_msdu)
res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
if (res == 0 && crypt->ops->encrypt_mpdu)
res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
ieee->dev->name, frag->len);
ieee->ieee_stats.tx_discards++;
return -1;
}
return 0;
}
void ieee80211_txb_free(struct ieee80211_txb *txb) {
//int i;
if (unlikely(!txb))
return;
kfree(txb);
}
struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
int gfp_mask)
{
struct ieee80211_txb *txb;
int i;
txb = kmalloc(
sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
gfp_mask);
if (!txb)
return NULL;
memset(txb, 0, sizeof(struct ieee80211_txb));
txb->nr_frags = nr_frags;
txb->frag_size = txb_size;
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
if (unlikely(!txb->fragments[i])) {
i--;
break;
}
memset(txb->fragments[i]->cb, 0, sizeof(txb->fragments[i]->cb));
}
if (unlikely(i != nr_frags)) {
while (i >= 0)
dev_kfree_skb_any(txb->fragments[i--]);
kfree(txb);
return NULL;
}
return txb;
}
// Classify the to-be send data packet
// Need to acquire the sent queue index.
static int
ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
{
struct ethhdr *eth;
struct iphdr *ip;
eth = (struct ethhdr *)skb->data;
if (eth->h_proto != htons(ETH_P_IP))
return 0;
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len);
ip = ip_hdr(skb);
switch (ip->tos & 0xfc) {
case 0x20:
return 2;
case 0x40:
return 1;
case 0x60:
return 3;
case 0x80:
return 4;
case 0xa0:
return 5;
case 0xc0:
return 6;
case 0xe0:
return 7;
default:
return 0;
}
}
#define SN_LESS(a, b) (((a-b)&0x800)!=0)
void ieee80211_tx_query_agg_cap(struct ieee80211_device* ieee, struct sk_buff* skb, cb_desc* tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
PTX_TS_RECORD pTxTs = NULL;
struct ieee80211_hdr_1addr* hdr = (struct ieee80211_hdr_1addr*)skb->data;
if (!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
if (!IsQoSDataFrame(skb->data))
return;
if (is_multicast_ether_addr(hdr->addr1) || is_broadcast_ether_addr(hdr->addr1))
return;
//check packet and mode later
#ifdef TO_DO_LIST
if(pTcb->PacketLength >= 4096)
return;
// For RTL819X, if pairwisekey = wep/tkip, we don't aggrregation.
if(!Adapter->HalFunc.GetNmodeSupportBySecCfgHandler(Adapter))
return;
#endif
if(!ieee->GetNmodeSupportBySecCfg(ieee->dev))
{
return;
}
if(pHTInfo->bCurrentAMPDUEnable)
{
if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTxTs), hdr->addr1, skb->priority, TX_DIR, true))
{
printk("===>can't get TS\n");
return;
}
if (pTxTs->TxAdmittedBARecord.bValid == false)
{
TsStartAddBaProcess(ieee, pTxTs);
goto FORCED_AGG_SETTING;
}
else if (pTxTs->bUsingBa == false)
{
if (SN_LESS(pTxTs->TxAdmittedBARecord.BaStartSeqCtrl.field.SeqNum, (pTxTs->TxCurSeq+1)%4096))
pTxTs->bUsingBa = true;
else
goto FORCED_AGG_SETTING;
}
if (ieee->iw_mode == IW_MODE_INFRA)
{
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_factor = pHTInfo->CurrentAMPDUFactor;
tcb_desc->ampdu_density = pHTInfo->CurrentMPDUDensity;
}
}
FORCED_AGG_SETTING:
switch(pHTInfo->ForcedAMPDUMode )
{
case HT_AGG_AUTO:
break;
case HT_AGG_FORCE_ENABLE:
tcb_desc->bAMPDUEnable = true;
tcb_desc->ampdu_density = pHTInfo->ForcedMPDUDensity;
tcb_desc->ampdu_factor = pHTInfo->ForcedAMPDUFactor;
break;
case HT_AGG_FORCE_DISABLE:
tcb_desc->bAMPDUEnable = false;
tcb_desc->ampdu_density = 0;
tcb_desc->ampdu_factor = 0;
break;
}
return;
}
extern void ieee80211_qurey_ShortPreambleMode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
{
tcb_desc->bUseShortPreamble = false;
if (tcb_desc->data_rate == 2)
{//// 1M can only use Long Preamble. 11B spec
return;
}
else if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
{
tcb_desc->bUseShortPreamble = true;
}
return;
}
extern void
ieee80211_query_HTCapShortGI(struct ieee80211_device *ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
tcb_desc->bUseShortGI = false;
if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
if(pHTInfo->bForcedShortGI)
{
tcb_desc->bUseShortGI = true;
return;
}
if((pHTInfo->bCurBW40MHz==true) && pHTInfo->bCurShortGI40MHz)
tcb_desc->bUseShortGI = true;
else if((pHTInfo->bCurBW40MHz==false) && pHTInfo->bCurShortGI20MHz)
tcb_desc->bUseShortGI = true;
}
void ieee80211_query_BandwidthMode(struct ieee80211_device* ieee, cb_desc *tcb_desc)
{
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
tcb_desc->bPacketBW = false;
if(!pHTInfo->bCurrentHTSupport||!pHTInfo->bEnableHT)
return;
if(tcb_desc->bMulticast || tcb_desc->bBroadcast)
return;
if((tcb_desc->data_rate & 0x80)==0) // If using legacy rate, it shall use 20MHz channel.
return;
//BandWidthAutoSwitch is for auto switch to 20 or 40 in long distance
if(pHTInfo->bCurBW40MHz && pHTInfo->bCurTxBW40MHz && !ieee->bandwidth_auto_switch.bforced_tx20Mhz)
tcb_desc->bPacketBW = true;
return;
}
void ieee80211_query_protectionmode(struct ieee80211_device* ieee, cb_desc* tcb_desc, struct sk_buff* skb)
{
// Common Settings
tcb_desc->bRTSSTBC = false;
tcb_desc->bRTSUseShortGI = false; // Since protection frames are always sent by legacy rate, ShortGI will never be used.
tcb_desc->bCTSEnable = false; // Most of protection using RTS/CTS
tcb_desc->RTSSC = 0; // 20MHz: Don't care; 40MHz: Duplicate.
tcb_desc->bRTSBW = false; // RTS frame bandwidth is always 20MHz
if(tcb_desc->bBroadcast || tcb_desc->bMulticast)//only unicast frame will use rts/cts
return;
if (is_broadcast_ether_addr(skb->data+16)) //check addr3 as infrastructure add3 is DA.
return;
if (ieee->mode < IEEE_N_24G) //b, g mode
{
// (1) RTS_Threshold is compared to the MPDU, not MSDU.
// (2) If there are more than one frag in this MSDU, only the first frag uses protection frame.
// Other fragments are protected by previous fragment.
// So we only need to check the length of first fragment.
if (skb->len > ieee->rts)
{
tcb_desc->bRTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
}
else if (ieee->current_network.buseprotection)
{
// Use CTS-to-SELF in protection mode.
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
}
//otherwise return;
return;
}
else
{// 11n High throughput case.
PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo;
while (true)
{
//check ERP protection
if (ieee->current_network.buseprotection)
{// CTS-to-SELF
tcb_desc->bRTSEnable = true;
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
break;
}
//check HT op mode
if(pHTInfo->bCurrentHTSupport && pHTInfo->bEnableHT)
{
u8 HTOpMode = pHTInfo->CurrentOpMode;
if((pHTInfo->bCurBW40MHz && (HTOpMode == 2 || HTOpMode == 3)) ||
(!pHTInfo->bCurBW40MHz && HTOpMode == 3) )
{
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
}
//check rts
if (skb->len > ieee->rts)
{
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
tcb_desc->bRTSEnable = true;
break;
}
//to do list: check MIMO power save condition.
//check AMPDU aggregation for TXOP
if(tcb_desc->bAMPDUEnable)
{
tcb_desc->rts_rate = MGN_24M; // Rate is 24Mbps.
// According to 8190 design, firmware sends CF-End only if RTS/CTS is enabled. However, it degrads
// throughput around 10M, so we disable of this mechanism. 2007.08.03 by Emily
tcb_desc->bRTSEnable = false;
break;
}
//check IOT action
if(pHTInfo->IOTAction & HT_IOT_ACT_FORCED_CTS2SELF)
{
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
break;
}
// Totally no protection case!!
goto NO_PROTECTION;
}
}
// For test , CTS replace with RTS
if( 0 )
{
tcb_desc->bCTSEnable = true;
tcb_desc->rts_rate = MGN_24M;
tcb_desc->bRTSEnable = true;
}
if (ieee->current_network.capability & WLAN_CAPABILITY_SHORT_PREAMBLE)
tcb_desc->bUseShortPreamble = true;
if (ieee->mode == IW_MODE_MASTER)
goto NO_PROTECTION;
return;
NO_PROTECTION:
tcb_desc->bRTSEnable = false;
tcb_desc->bCTSEnable = false;
tcb_desc->rts_rate = 0;
tcb_desc->RTSSC = 0;
tcb_desc->bRTSBW = false;
}
void ieee80211_txrate_selectmode(struct ieee80211_device* ieee, cb_desc* tcb_desc)
{
#ifdef TO_DO_LIST
if(!IsDataFrame(pFrame))
{
pTcb->bTxDisableRateFallBack = TRUE;
pTcb->bTxUseDriverAssingedRate = TRUE;
pTcb->RATRIndex = 7;
return;
}
if(pMgntInfo->ForcedDataRate!= 0)
{
pTcb->bTxDisableRateFallBack = TRUE;
pTcb->bTxUseDriverAssingedRate = TRUE;
return;
}
#endif
if(ieee->bTxDisableRateFallBack)
tcb_desc->bTxDisableRateFallBack = true;
if(ieee->bTxUseDriverAssingedRate)
tcb_desc->bTxUseDriverAssingedRate = true;
if(!tcb_desc->bTxDisableRateFallBack || !tcb_desc->bTxUseDriverAssingedRate)
{
if (ieee->iw_mode == IW_MODE_INFRA || ieee->iw_mode == IW_MODE_ADHOC)
tcb_desc->RATRIndex = 0;
}
}
void ieee80211_query_seqnum(struct ieee80211_device*ieee, struct sk_buff* skb, u8* dst)
{
if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst))
return;
if (IsQoSDataFrame(skb->data)) //we deal qos data only
{
PTX_TS_RECORD pTS = NULL;
if (!GetTs(ieee, (PTS_COMMON_INFO*)(&pTS), dst, skb->priority, TX_DIR, true))
{
return;
}
pTS->TxCurSeq = (pTS->TxCurSeq+1)%4096;
}
}
int ieee80211_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
struct ieee80211_hdr_3addrqos *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
unsigned long flags;
struct net_device_stats *stats = &ieee->stats;
int ether_type = 0, encrypt;
int bytes, fc, qos_ctl = 0, hdr_len;
struct sk_buff *skb_frag;
struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
.duration_id = 0,
.seq_ctl = 0,
.qos_ctl = 0
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
int qos_actived = ieee->current_network.qos_data.active;
struct ieee80211_crypt_data* crypt;
cb_desc *tcb_desc;
spin_lock_irqsave(&ieee->lock, flags);
/* If there is no driver handler to take the TXB, dont' bother
* creating it... */
if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
printk(KERN_WARNING "%s: No xmit handler.\n",
ieee->dev->name);
goto success;
}
if(likely(ieee->raw_tx == 0)){
if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
ieee->dev->name, skb->len);
goto success;
}
memset(skb->cb, 0, sizeof(skb->cb));
ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
crypt = ieee->crypt[ieee->tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
ieee->host_encrypt && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
stats->tx_dropped++;
goto success;
}
#ifdef CONFIG_IEEE80211_DEBUG
if (crypt && !encrypt && ether_type == ETH_P_PAE) {
struct eapol *eap = (struct eapol *)(skb->data +
sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
eap_get_type(eap->type));
}
#endif
/* Save source and destination addresses */
memcpy(&dest, skb->data, ETH_ALEN);
memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_WEP;
else
fc = IEEE80211_FTYPE_DATA;
//if(ieee->current_network.QoS_Enable)
if(qos_actived)
fc |= IEEE80211_STYPE_QOS_DATA;
else
fc |= IEEE80211_STYPE_DATA;
if (ieee->iw_mode == IW_MODE_INFRA) {
fc |= IEEE80211_FCTL_TODS;
/* To DS: Addr1 = BSSID, Addr2 = SA,
Addr3 = DA */
memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(&header.addr2, &src, ETH_ALEN);
memcpy(&header.addr3, &dest, ETH_ALEN);
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
/* not From/To DS: Addr1 = DA, Addr2 = SA,
Addr3 = BSSID */
memcpy(&header.addr1, dest, ETH_ALEN);
memcpy(&header.addr2, src, ETH_ALEN);
memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
}
header.frame_ctl = cpu_to_le16(fc);
/* Determine fragmentation size based on destination (multicast
* and broadcast are not fragmented) */
if (is_multicast_ether_addr(header.addr1) ||
is_broadcast_ether_addr(header.addr1)) {
frag_size = MAX_FRAG_THRESHOLD;
qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
}
else {
frag_size = ieee->fts;//default:392
qos_ctl = 0;
}
//if (ieee->current_network.QoS_Enable)
if(qos_actived)
{
hdr_len = IEEE80211_3ADDR_LEN + 2;
skb->priority = ieee80211_classify(skb, &ieee->current_network);
qos_ctl |= skb->priority; //set in the ieee80211_classify
header.qos_ctl = cpu_to_le16(qos_ctl & IEEE80211_QOS_TID);
} else {
hdr_len = IEEE80211_3ADDR_LEN;
}
/* Determine amount of payload per fragment. Regardless of if
* this stack is providing the full 802.11 header, one will
* eventually be affixed to this fragment -- so we must account for
* it when determining the amount of payload space. */
bytes_per_frag = frag_size - hdr_len;
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
bytes_per_frag -= IEEE80211_FCS_LEN;
/* Each fragment may need to have room for encryption pre/postfix */
if (encrypt)
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment */
nr_frags = bytes / bytes_per_frag;
bytes_last_frag = bytes % bytes_per_frag;
if (bytes_last_frag)
nr_frags++;
else
bytes_last_frag = bytes_per_frag;
/* When we allocate the TXB we allocate enough space for the reserve
* and full fragment bytes (bytes_per_frag doesn't include prefix,
* postfix, header, FCS, etc.) */
txb = ieee80211_alloc_txb(nr_frags, frag_size + ieee->tx_headroom, GFP_ATOMIC);
if (unlikely(!txb)) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = encrypt;
txb->payload_size = bytes;
//if (ieee->current_network.QoS_Enable)
if(qos_actived)
{
txb->queue_index = UP2AC(skb->priority);
} else {
txb->queue_index = WME_AC_BK;
}
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
tcb_desc = (cb_desc *)(skb_frag->cb + MAX_DEV_ADDR_SIZE);
if(qos_actived){
skb_frag->priority = skb->priority;//UP2AC(skb->priority);
tcb_desc->queue_index = UP2AC(skb->priority);
} else {
skb_frag->priority = WME_AC_BK;
tcb_desc->queue_index = WME_AC_BK;
}
skb_reserve(skb_frag, ieee->tx_headroom);
if (encrypt){
if (ieee->hwsec_active)
tcb_desc->bHwSec = 1;
else
tcb_desc->bHwSec = 0;
skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
}
else
{
tcb_desc->bHwSec = 0;
}
frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
* bit to the frame control */
if (i != nr_frags - 1) {
frag_hdr->frame_ctl = cpu_to_le16(
fc | IEEE80211_FCTL_MOREFRAGS);
bytes = bytes_per_frag;
} else {
/* The last fragment takes the remaining length */
bytes = bytes_last_frag;
}
//if(ieee->current_network.QoS_Enable)
if(qos_actived)
{
// add 1 only indicate to corresponding seq number control 2006/7/12
frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
} else {
frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
}
/* Put a SNAP header on the first fragment */
if (i == 0) {
ieee80211_put_snap(
skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
ether_type);
bytes -= SNAP_SIZE + sizeof(u16);
}
memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
/* Advance the SKB... */
skb_pull(skb, bytes);
/* Encryption routine will move the header forward in order
* to insert the IV between the header and the payload */
if (encrypt)
ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
skb_put(skb_frag, 4);
}
if(qos_actived)
{
if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
else
ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
}
}else{
if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
ieee->dev->name, skb->len);
goto success;
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
if(!txb){
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = 0;
txb->payload_size = skb->len;
memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
}
success:
//WB add to fill data tcb_desc here. only first fragment is considered, need to change, and you may remove to other place.
if (txb)
{
cb_desc *tcb_desc = (cb_desc *)(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
tcb_desc->bTxEnableFwCalcDur = 1;
if (is_multicast_ether_addr(header.addr1))
tcb_desc->bMulticast = 1;
if (is_broadcast_ether_addr(header.addr1))
tcb_desc->bBroadcast = 1;
ieee80211_txrate_selectmode(ieee, tcb_desc);
if ( tcb_desc->bMulticast || tcb_desc->bBroadcast)
tcb_desc->data_rate = ieee->basic_rate;
else
//tcb_desc->data_rate = CURRENT_RATE(ieee->current_network.mode, ieee->rate, ieee->HTCurrentOperaRate);
tcb_desc->data_rate = CURRENT_RATE(ieee->mode, ieee->rate, ieee->HTCurrentOperaRate);
ieee80211_qurey_ShortPreambleMode(ieee, tcb_desc);
ieee80211_tx_query_agg_cap(ieee, txb->fragments[0], tcb_desc);
ieee80211_query_HTCapShortGI(ieee, tcb_desc);
ieee80211_query_BandwidthMode(ieee, tcb_desc);
ieee80211_query_protectionmode(ieee, tcb_desc, txb->fragments[0]);
ieee80211_query_seqnum(ieee, txb->fragments[0], header.addr1);
// IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, txb->fragments[0]->data, txb->fragments[0]->len);
//IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, tcb_desc, sizeof(cb_desc));
}
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
ieee80211_softmac_xmit(txb, ieee);
}else{
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += txb->payload_size;
return 0;
}
ieee80211_txb_free(txb);
}
}
return 0;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
netif_stop_queue(dev);
stats->tx_errors++;
return 1;
}
EXPORT_SYMBOL(ieee80211_txb_free);
| gpl-2.0 |
saydulk/linux | arch/arm/common/sa1111.c | 76 | 37478 | /*
* linux/arch/arm/common/sa1111.c
*
* SA1111 support
*
* Original code by John Dorsey
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This file contains all generic SA1111 support.
*
* All initialization functions provided here are intended to be called
* from machine specific code with proper arguments when required.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <mach/hardware.h>
#include <asm/mach/irq.h>
#include <asm/mach-types.h>
#include <asm/sizes.h>
#include <asm/hardware/sa1111.h>
/* SA1111 IRQs */
#define IRQ_GPAIN0 (0)
#define IRQ_GPAIN1 (1)
#define IRQ_GPAIN2 (2)
#define IRQ_GPAIN3 (3)
#define IRQ_GPBIN0 (4)
#define IRQ_GPBIN1 (5)
#define IRQ_GPBIN2 (6)
#define IRQ_GPBIN3 (7)
#define IRQ_GPBIN4 (8)
#define IRQ_GPBIN5 (9)
#define IRQ_GPCIN0 (10)
#define IRQ_GPCIN1 (11)
#define IRQ_GPCIN2 (12)
#define IRQ_GPCIN3 (13)
#define IRQ_GPCIN4 (14)
#define IRQ_GPCIN5 (15)
#define IRQ_GPCIN6 (16)
#define IRQ_GPCIN7 (17)
#define IRQ_MSTXINT (18)
#define IRQ_MSRXINT (19)
#define IRQ_MSSTOPERRINT (20)
#define IRQ_TPTXINT (21)
#define IRQ_TPRXINT (22)
#define IRQ_TPSTOPERRINT (23)
#define SSPXMTINT (24)
#define SSPRCVINT (25)
#define SSPROR (26)
#define AUDXMTDMADONEA (32)
#define AUDRCVDMADONEA (33)
#define AUDXMTDMADONEB (34)
#define AUDRCVDMADONEB (35)
#define AUDTFSR (36)
#define AUDRFSR (37)
#define AUDTUR (38)
#define AUDROR (39)
#define AUDDTS (40)
#define AUDRDD (41)
#define AUDSTO (42)
#define IRQ_USBPWR (43)
#define IRQ_HCIM (44)
#define IRQ_HCIBUFFACC (45)
#define IRQ_HCIRMTWKP (46)
#define IRQ_NHCIMFCIR (47)
#define IRQ_USB_PORT_RESUME (48)
#define IRQ_S0_READY_NINT (49)
#define IRQ_S1_READY_NINT (50)
#define IRQ_S0_CD_VALID (51)
#define IRQ_S1_CD_VALID (52)
#define IRQ_S0_BVD1_STSCHG (53)
#define IRQ_S1_BVD1_STSCHG (54)
#define SA1111_IRQ_NR (55)
extern void sa1110_mb_enable(void);
extern void sa1110_mb_disable(void);
/*
* We keep the following data for the overall SA1111. Note that the
* struct device and struct resource are "fake"; they should be supplied
* by the bus above us. However, in the interests of getting all SA1111
* drivers converted over to the device model, we provide this as an
* anchor point for all the other drivers.
*/
struct sa1111 {
struct device *dev;
struct clk *clk;
unsigned long phys;
int irq;
int irq_base; /* base for cascaded on-chip IRQs */
spinlock_t lock;
void __iomem *base;
struct sa1111_platform_data *pdata;
#ifdef CONFIG_PM
void *saved_state;
#endif
};
/*
* We _really_ need to eliminate this. Its only users
* are the PWM and DMA checking code.
*/
static struct sa1111 *g_sa1111;
struct sa1111_dev_info {
unsigned long offset;
unsigned long skpcr_mask;
bool dma;
unsigned int devid;
unsigned int irq[6];
};
static struct sa1111_dev_info sa1111_devices[] = {
{
.offset = SA1111_USB,
.skpcr_mask = SKPCR_UCLKEN,
.dma = true,
.devid = SA1111_DEVID_USB,
.irq = {
IRQ_USBPWR,
IRQ_HCIM,
IRQ_HCIBUFFACC,
IRQ_HCIRMTWKP,
IRQ_NHCIMFCIR,
IRQ_USB_PORT_RESUME
},
},
{
.offset = 0x0600,
.skpcr_mask = SKPCR_I2SCLKEN | SKPCR_L3CLKEN,
.dma = true,
.devid = SA1111_DEVID_SAC,
.irq = {
AUDXMTDMADONEA,
AUDXMTDMADONEB,
AUDRCVDMADONEA,
AUDRCVDMADONEB
},
},
{
.offset = 0x0800,
.skpcr_mask = SKPCR_SCLKEN,
.devid = SA1111_DEVID_SSP,
},
{
.offset = SA1111_KBD,
.skpcr_mask = SKPCR_PTCLKEN,
.devid = SA1111_DEVID_PS2_KBD,
.irq = {
IRQ_TPRXINT,
IRQ_TPTXINT
},
},
{
.offset = SA1111_MSE,
.skpcr_mask = SKPCR_PMCLKEN,
.devid = SA1111_DEVID_PS2_MSE,
.irq = {
IRQ_MSRXINT,
IRQ_MSTXINT
},
},
{
.offset = 0x1800,
.skpcr_mask = 0,
.devid = SA1111_DEVID_PCMCIA,
.irq = {
IRQ_S0_READY_NINT,
IRQ_S0_CD_VALID,
IRQ_S0_BVD1_STSCHG,
IRQ_S1_READY_NINT,
IRQ_S1_CD_VALID,
IRQ_S1_BVD1_STSCHG,
},
},
};
/*
* SA1111 interrupt support. Since clearing an IRQ while there are
* active IRQs causes the interrupt output to pulse, the upper levels
* will call us again if there are more interrupts to process.
*/
static void
sa1111_irq_handler(unsigned int __irq, struct irq_desc *desc)
{
unsigned int irq = irq_desc_get_irq(desc);
unsigned int stat0, stat1, i;
struct sa1111 *sachip = irq_desc_get_handler_data(desc);
void __iomem *mapbase = sachip->base + SA1111_INTC;
stat0 = sa1111_readl(mapbase + SA1111_INTSTATCLR0);
stat1 = sa1111_readl(mapbase + SA1111_INTSTATCLR1);
sa1111_writel(stat0, mapbase + SA1111_INTSTATCLR0);
desc->irq_data.chip->irq_ack(&desc->irq_data);
sa1111_writel(stat1, mapbase + SA1111_INTSTATCLR1);
if (stat0 == 0 && stat1 == 0) {
do_bad_IRQ(irq, desc);
return;
}
for (i = 0; stat0; i++, stat0 >>= 1)
if (stat0 & 1)
generic_handle_irq(i + sachip->irq_base);
for (i = 32; stat1; i++, stat1 >>= 1)
if (stat1 & 1)
generic_handle_irq(i + sachip->irq_base);
/* For level-based interrupts */
desc->irq_data.chip->irq_unmask(&desc->irq_data);
}
#define SA1111_IRQMASK_LO(x) (1 << (x - sachip->irq_base))
#define SA1111_IRQMASK_HI(x) (1 << (x - sachip->irq_base - 32))
static void sa1111_ack_irq(struct irq_data *d)
{
}
static void sa1111_mask_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie0;
ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
ie0 &= ~SA1111_IRQMASK_LO(d->irq);
writel(ie0, mapbase + SA1111_INTEN0);
}
static void sa1111_unmask_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie0;
ie0 = sa1111_readl(mapbase + SA1111_INTEN0);
ie0 |= SA1111_IRQMASK_LO(d->irq);
sa1111_writel(ie0, mapbase + SA1111_INTEN0);
}
/*
* Attempt to re-trigger the interrupt. The SA1111 contains a register
* (INTSET) which claims to do this. However, in practice no amount of
* manipulation of INTEN and INTSET guarantees that the interrupt will
* be triggered. In fact, its very difficult, if not impossible to get
* INTSET to re-trigger the interrupt.
*/
static int sa1111_retrigger_lowirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long ip0;
int i;
ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
for (i = 0; i < 8; i++) {
sa1111_writel(ip0 ^ mask, mapbase + SA1111_INTPOL0);
sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
if (sa1111_readl(mapbase + SA1111_INTSTATCLR0) & mask)
break;
}
if (i == 8)
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq);
return i == 8 ? -1 : 0;
}
static int sa1111_type_lowirq(struct irq_data *d, unsigned int flags)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long ip0;
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip0 = sa1111_readl(mapbase + SA1111_INTPOL0);
if (flags & IRQ_TYPE_EDGE_RISING)
ip0 &= ~mask;
else
ip0 |= mask;
sa1111_writel(ip0, mapbase + SA1111_INTPOL0);
sa1111_writel(ip0, mapbase + SA1111_WAKEPOL0);
return 0;
}
static int sa1111_wake_lowirq(struct irq_data *d, unsigned int on)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_LO(d->irq);
unsigned long we0;
we0 = sa1111_readl(mapbase + SA1111_WAKEEN0);
if (on)
we0 |= mask;
else
we0 &= ~mask;
sa1111_writel(we0, mapbase + SA1111_WAKEEN0);
return 0;
}
static struct irq_chip sa1111_low_chip = {
.name = "SA1111-l",
.irq_ack = sa1111_ack_irq,
.irq_mask = sa1111_mask_lowirq,
.irq_unmask = sa1111_unmask_lowirq,
.irq_retrigger = sa1111_retrigger_lowirq,
.irq_set_type = sa1111_type_lowirq,
.irq_set_wake = sa1111_wake_lowirq,
};
static void sa1111_mask_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie1;
ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
ie1 &= ~SA1111_IRQMASK_HI(d->irq);
sa1111_writel(ie1, mapbase + SA1111_INTEN1);
}
static void sa1111_unmask_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned long ie1;
ie1 = sa1111_readl(mapbase + SA1111_INTEN1);
ie1 |= SA1111_IRQMASK_HI(d->irq);
sa1111_writel(ie1, mapbase + SA1111_INTEN1);
}
/*
* Attempt to re-trigger the interrupt. The SA1111 contains a register
* (INTSET) which claims to do this. However, in practice no amount of
* manipulation of INTEN and INTSET guarantees that the interrupt will
* be triggered. In fact, its very difficult, if not impossible to get
* INTSET to re-trigger the interrupt.
*/
static int sa1111_retrigger_highirq(struct irq_data *d)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long ip1;
int i;
ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
for (i = 0; i < 8; i++) {
sa1111_writel(ip1 ^ mask, mapbase + SA1111_INTPOL1);
sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
if (sa1111_readl(mapbase + SA1111_INTSTATCLR1) & mask)
break;
}
if (i == 8)
pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
d->irq);
return i == 8 ? -1 : 0;
}
static int sa1111_type_highirq(struct irq_data *d, unsigned int flags)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long ip1;
if (flags == IRQ_TYPE_PROBE)
return 0;
if ((!(flags & IRQ_TYPE_EDGE_RISING) ^ !(flags & IRQ_TYPE_EDGE_FALLING)) == 0)
return -EINVAL;
ip1 = sa1111_readl(mapbase + SA1111_INTPOL1);
if (flags & IRQ_TYPE_EDGE_RISING)
ip1 &= ~mask;
else
ip1 |= mask;
sa1111_writel(ip1, mapbase + SA1111_INTPOL1);
sa1111_writel(ip1, mapbase + SA1111_WAKEPOL1);
return 0;
}
static int sa1111_wake_highirq(struct irq_data *d, unsigned int on)
{
struct sa1111 *sachip = irq_data_get_irq_chip_data(d);
void __iomem *mapbase = sachip->base + SA1111_INTC;
unsigned int mask = SA1111_IRQMASK_HI(d->irq);
unsigned long we1;
we1 = sa1111_readl(mapbase + SA1111_WAKEEN1);
if (on)
we1 |= mask;
else
we1 &= ~mask;
sa1111_writel(we1, mapbase + SA1111_WAKEEN1);
return 0;
}
static struct irq_chip sa1111_high_chip = {
.name = "SA1111-h",
.irq_ack = sa1111_ack_irq,
.irq_mask = sa1111_mask_highirq,
.irq_unmask = sa1111_unmask_highirq,
.irq_retrigger = sa1111_retrigger_highirq,
.irq_set_type = sa1111_type_highirq,
.irq_set_wake = sa1111_wake_highirq,
};
static int sa1111_setup_irq(struct sa1111 *sachip, unsigned irq_base)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
unsigned i, irq;
int ret;
/*
* We're guaranteed that this region hasn't been taken.
*/
request_mem_region(sachip->phys + SA1111_INTC, 512, "irq");
ret = irq_alloc_descs(-1, irq_base, SA1111_IRQ_NR, -1);
if (ret <= 0) {
dev_err(sachip->dev, "unable to allocate %u irqs: %d\n",
SA1111_IRQ_NR, ret);
if (ret == 0)
ret = -EINVAL;
return ret;
}
sachip->irq_base = ret;
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
/*
* detect on rising edge. Note: Feb 2001 Errata for SA1111
* specifies that S0ReadyInt and S1ReadyInt should be '1'.
*/
sa1111_writel(0, irqbase + SA1111_INTPOL0);
sa1111_writel(SA1111_IRQMASK_HI(IRQ_S0_READY_NINT) |
SA1111_IRQMASK_HI(IRQ_S1_READY_NINT),
irqbase + SA1111_INTPOL1);
/* clear all IRQs */
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR0);
sa1111_writel(~0, irqbase + SA1111_INTSTATCLR1);
for (i = IRQ_GPAIN0; i <= SSPROR; i++) {
irq = sachip->irq_base + i;
irq_set_chip_and_handler(irq, &sa1111_low_chip,
handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
for (i = AUDXMTDMADONEA; i <= IRQ_S1_BVD1_STSCHG; i++) {
irq = sachip->irq_base + i;
irq_set_chip_and_handler(irq, &sa1111_high_chip,
handle_edge_irq);
irq_set_chip_data(irq, sachip);
irq_clear_status_flags(irq, IRQ_NOREQUEST | IRQ_NOPROBE);
}
/*
* Register SA1111 interrupt
*/
irq_set_irq_type(sachip->irq, IRQ_TYPE_EDGE_RISING);
irq_set_chained_handler_and_data(sachip->irq, sa1111_irq_handler,
sachip);
dev_info(sachip->dev, "Providing IRQ%u-%u\n",
sachip->irq_base, sachip->irq_base + SA1111_IRQ_NR - 1);
return 0;
}
/*
* Bring the SA1111 out of reset. This requires a set procedure:
* 1. nRESET asserted (by hardware)
* 2. CLK turned on from SA1110
* 3. nRESET deasserted
* 4. VCO turned on, PLL_BYPASS turned off
* 5. Wait lock time, then assert RCLKEn
* 7. PCR set to allow clocking of individual functions
*
* Until we've done this, the only registers we can access are:
* SBI_SKCR
* SBI_SMCR
* SBI_SKID
*/
static void sa1111_wake(struct sa1111 *sachip)
{
unsigned long flags, r;
spin_lock_irqsave(&sachip->lock, flags);
clk_enable(sachip->clk);
/*
* Turn VCO on, and disable PLL Bypass.
*/
r = sa1111_readl(sachip->base + SA1111_SKCR);
r &= ~SKCR_VCO_OFF;
sa1111_writel(r, sachip->base + SA1111_SKCR);
r |= SKCR_PLL_BYPASS | SKCR_OE_EN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
/*
* Wait lock time. SA1111 manual _doesn't_
* specify a figure for this! We choose 100us.
*/
udelay(100);
/*
* Enable RCLK. We also ensure that RDYEN is set.
*/
r |= SKCR_RCLKEN | SKCR_RDYEN;
sa1111_writel(r, sachip->base + SA1111_SKCR);
/*
* Wait 14 RCLK cycles for the chip to finish coming out
* of reset. (RCLK=24MHz). This is 590ns.
*/
udelay(1);
/*
* Ensure all clocks are initially off.
*/
sa1111_writel(0, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
#ifdef CONFIG_ARCH_SA1100
static u32 sa1111_dma_mask[] = {
~0,
~(1 << 20),
~(1 << 23),
~(1 << 24),
~(1 << 25),
~(1 << 20),
~(1 << 20),
0,
};
/*
* Configure the SA1111 shared memory controller.
*/
void
sa1111_configure_smc(struct sa1111 *sachip, int sdram, unsigned int drac,
unsigned int cas_latency)
{
unsigned int smcr = SMCR_DTIM | SMCR_MBGE | FInsrt(drac, SMCR_DRAC);
if (cas_latency == 3)
smcr |= SMCR_CLAT;
sa1111_writel(smcr, sachip->base + SA1111_SMCR);
/*
* Now clear the bits in the DMA mask to work around the SA1111
* DMA erratum (Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update, June 2000, Erratum #7).
*/
if (sachip->dev->dma_mask)
*sachip->dev->dma_mask &= sa1111_dma_mask[drac >> 2];
sachip->dev->coherent_dma_mask &= sa1111_dma_mask[drac >> 2];
}
#endif
static void sa1111_dev_release(struct device *_dev)
{
struct sa1111_dev *dev = SA1111_DEV(_dev);
kfree(dev);
}
static int
sa1111_init_one_child(struct sa1111 *sachip, struct resource *parent,
struct sa1111_dev_info *info)
{
struct sa1111_dev *dev;
unsigned i;
int ret;
dev = kzalloc(sizeof(struct sa1111_dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err_alloc;
}
device_initialize(&dev->dev);
dev_set_name(&dev->dev, "%4.4lx", info->offset);
dev->devid = info->devid;
dev->dev.parent = sachip->dev;
dev->dev.bus = &sa1111_bus_type;
dev->dev.release = sa1111_dev_release;
dev->res.start = sachip->phys + info->offset;
dev->res.end = dev->res.start + 511;
dev->res.name = dev_name(&dev->dev);
dev->res.flags = IORESOURCE_MEM;
dev->mapbase = sachip->base + info->offset;
dev->skpcr_mask = info->skpcr_mask;
for (i = 0; i < ARRAY_SIZE(info->irq); i++)
dev->irq[i] = sachip->irq_base + info->irq[i];
/*
* If the parent device has a DMA mask associated with it, and
* this child supports DMA, propagate it down to the children.
*/
if (info->dma && sachip->dev->dma_mask) {
dev->dma_mask = *sachip->dev->dma_mask;
dev->dev.dma_mask = &dev->dma_mask;
dev->dev.coherent_dma_mask = sachip->dev->coherent_dma_mask;
}
ret = request_resource(parent, &dev->res);
if (ret) {
dev_err(sachip->dev, "failed to allocate resource for %s\n",
dev->res.name);
goto err_resource;
}
ret = device_add(&dev->dev);
if (ret)
goto err_add;
return 0;
err_add:
release_resource(&dev->res);
err_resource:
put_device(&dev->dev);
err_alloc:
return ret;
}
/**
* sa1111_probe - probe for a single SA1111 chip.
* @phys_addr: physical address of device.
*
* Probe for a SA1111 chip. This must be called
* before any other SA1111-specific code.
*
* Returns:
* %-ENODEV device not found.
* %-EBUSY physical address already marked in-use.
* %-EINVAL no platform data passed
* %0 successful.
*/
static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
{
struct sa1111_platform_data *pd = me->platform_data;
struct sa1111 *sachip;
unsigned long id;
unsigned int has_devs;
int i, ret = -ENODEV;
if (!pd)
return -EINVAL;
sachip = kzalloc(sizeof(struct sa1111), GFP_KERNEL);
if (!sachip)
return -ENOMEM;
sachip->clk = clk_get(me, "SA1111_CLK");
if (IS_ERR(sachip->clk)) {
ret = PTR_ERR(sachip->clk);
goto err_free;
}
ret = clk_prepare(sachip->clk);
if (ret)
goto err_clkput;
spin_lock_init(&sachip->lock);
sachip->dev = me;
dev_set_drvdata(sachip->dev, sachip);
sachip->pdata = pd;
sachip->phys = mem->start;
sachip->irq = irq;
/*
* Map the whole region. This also maps the
* registers for our children.
*/
sachip->base = ioremap(mem->start, PAGE_SIZE * 2);
if (!sachip->base) {
ret = -ENOMEM;
goto err_clk_unprep;
}
/*
* Probe for the chip. Only touch the SBI registers.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
printk(KERN_DEBUG "SA1111 not detected: ID = %08lx\n", id);
ret = -ENODEV;
goto err_unmap;
}
pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n",
(id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK);
/*
* We found it. Wake the chip up, and initialise.
*/
sa1111_wake(sachip);
/*
* The interrupt controller must be initialised before any
* other device to ensure that the interrupts are available.
*/
if (sachip->irq != NO_IRQ) {
ret = sa1111_setup_irq(sachip, pd->irq_base);
if (ret)
goto err_unmap;
}
#ifdef CONFIG_ARCH_SA1100
{
unsigned int val;
/*
* The SDRAM configuration of the SA1110 and the SA1111 must
* match. This is very important to ensure that SA1111 accesses
* don't corrupt the SDRAM. Note that this ungates the SA1111's
* MBGNT signal, so we must have called sa1110_mb_disable()
* beforehand.
*/
sa1111_configure_smc(sachip, 1,
FExtr(MDCNFG, MDCNFG_SA1110_DRAC0),
FExtr(MDCNFG, MDCNFG_SA1110_TDL0));
/*
* We only need to turn on DCLK whenever we want to use the
* DMA. It can otherwise be held firmly in the off position.
* (currently, we always enable it.)
*/
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | SKPCR_DCLKEN, sachip->base + SA1111_SKPCR);
/*
* Enable the SA1110 memory bus request and grant signals.
*/
sa1110_mb_enable();
}
#endif
g_sa1111 = sachip;
has_devs = ~0;
if (pd)
has_devs &= ~pd->disable_devs;
for (i = 0; i < ARRAY_SIZE(sa1111_devices); i++)
if (sa1111_devices[i].devid & has_devs)
sa1111_init_one_child(sachip, mem, &sa1111_devices[i]);
return 0;
err_unmap:
iounmap(sachip->base);
err_clk_unprep:
clk_unprepare(sachip->clk);
err_clkput:
clk_put(sachip->clk);
err_free:
kfree(sachip);
return ret;
}
static int sa1111_remove_one(struct device *dev, void *data)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
device_del(&sadev->dev);
release_resource(&sadev->res);
put_device(&sadev->dev);
return 0;
}
static void __sa1111_remove(struct sa1111 *sachip)
{
void __iomem *irqbase = sachip->base + SA1111_INTC;
device_for_each_child(sachip->dev, NULL, sa1111_remove_one);
/* disable all IRQs */
sa1111_writel(0, irqbase + SA1111_INTEN0);
sa1111_writel(0, irqbase + SA1111_INTEN1);
sa1111_writel(0, irqbase + SA1111_WAKEEN0);
sa1111_writel(0, irqbase + SA1111_WAKEEN1);
clk_disable(sachip->clk);
clk_unprepare(sachip->clk);
if (sachip->irq != NO_IRQ) {
irq_set_chained_handler_and_data(sachip->irq, NULL, NULL);
irq_free_descs(sachip->irq_base, SA1111_IRQ_NR);
release_mem_region(sachip->phys + SA1111_INTC, 512);
}
iounmap(sachip->base);
clk_put(sachip->clk);
kfree(sachip);
}
struct sa1111_save_data {
unsigned int skcr;
unsigned int skpcr;
unsigned int skcdr;
unsigned char skaud;
unsigned char skpwm0;
unsigned char skpwm1;
/*
* Interrupt controller
*/
unsigned int intpol0;
unsigned int intpol1;
unsigned int inten0;
unsigned int inten1;
unsigned int wakepol0;
unsigned int wakepol1;
unsigned int wakeen0;
unsigned int wakeen1;
};
#ifdef CONFIG_PM
static int sa1111_suspend(struct platform_device *dev, pm_message_t state)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags;
unsigned int val;
void __iomem *base;
save = kmalloc(sizeof(struct sa1111_save_data), GFP_KERNEL);
if (!save)
return -ENOMEM;
sachip->saved_state = save;
spin_lock_irqsave(&sachip->lock, flags);
/*
* Save state.
*/
base = sachip->base;
save->skcr = sa1111_readl(base + SA1111_SKCR);
save->skpcr = sa1111_readl(base + SA1111_SKPCR);
save->skcdr = sa1111_readl(base + SA1111_SKCDR);
save->skaud = sa1111_readl(base + SA1111_SKAUD);
save->skpwm0 = sa1111_readl(base + SA1111_SKPWM0);
save->skpwm1 = sa1111_readl(base + SA1111_SKPWM1);
sa1111_writel(0, sachip->base + SA1111_SKPWM0);
sa1111_writel(0, sachip->base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
save->intpol0 = sa1111_readl(base + SA1111_INTPOL0);
save->intpol1 = sa1111_readl(base + SA1111_INTPOL1);
save->inten0 = sa1111_readl(base + SA1111_INTEN0);
save->inten1 = sa1111_readl(base + SA1111_INTEN1);
save->wakepol0 = sa1111_readl(base + SA1111_WAKEPOL0);
save->wakepol1 = sa1111_readl(base + SA1111_WAKEPOL1);
save->wakeen0 = sa1111_readl(base + SA1111_WAKEEN0);
save->wakeen1 = sa1111_readl(base + SA1111_WAKEEN1);
/*
* Disable.
*/
val = sa1111_readl(sachip->base + SA1111_SKCR);
sa1111_writel(val | SKCR_SLEEP, sachip->base + SA1111_SKCR);
clk_disable(sachip->clk);
spin_unlock_irqrestore(&sachip->lock, flags);
#ifdef CONFIG_ARCH_SA1100
sa1110_mb_disable();
#endif
return 0;
}
/*
* sa1111_resume - Restore the SA1111 device state.
* @dev: device to restore
*
* Restore the general state of the SA1111; clock control and
* interrupt controller. Other parts of the SA1111 must be
* restored by their respective drivers, and must be called
* via LDM after this function.
*/
static int sa1111_resume(struct platform_device *dev)
{
struct sa1111 *sachip = platform_get_drvdata(dev);
struct sa1111_save_data *save;
unsigned long flags, id;
void __iomem *base;
save = sachip->saved_state;
if (!save)
return 0;
/*
* Ensure that the SA1111 is still here.
* FIXME: shouldn't do this here.
*/
id = sa1111_readl(sachip->base + SA1111_SKID);
if ((id & SKID_ID_MASK) != SKID_SA1111_ID) {
__sa1111_remove(sachip);
platform_set_drvdata(dev, NULL);
kfree(save);
return 0;
}
/*
* First of all, wake up the chip.
*/
sa1111_wake(sachip);
#ifdef CONFIG_ARCH_SA1100
/* Enable the memory bus request/grant signals */
sa1110_mb_enable();
#endif
/*
* Only lock for write ops. Also, sa1111_wake must be called with
* released spinlock!
*/
spin_lock_irqsave(&sachip->lock, flags);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN0);
sa1111_writel(0, sachip->base + SA1111_INTC + SA1111_INTEN1);
base = sachip->base;
sa1111_writel(save->skcr, base + SA1111_SKCR);
sa1111_writel(save->skpcr, base + SA1111_SKPCR);
sa1111_writel(save->skcdr, base + SA1111_SKCDR);
sa1111_writel(save->skaud, base + SA1111_SKAUD);
sa1111_writel(save->skpwm0, base + SA1111_SKPWM0);
sa1111_writel(save->skpwm1, base + SA1111_SKPWM1);
base = sachip->base + SA1111_INTC;
sa1111_writel(save->intpol0, base + SA1111_INTPOL0);
sa1111_writel(save->intpol1, base + SA1111_INTPOL1);
sa1111_writel(save->inten0, base + SA1111_INTEN0);
sa1111_writel(save->inten1, base + SA1111_INTEN1);
sa1111_writel(save->wakepol0, base + SA1111_WAKEPOL0);
sa1111_writel(save->wakepol1, base + SA1111_WAKEPOL1);
sa1111_writel(save->wakeen0, base + SA1111_WAKEEN0);
sa1111_writel(save->wakeen1, base + SA1111_WAKEEN1);
spin_unlock_irqrestore(&sachip->lock, flags);
sachip->saved_state = NULL;
kfree(save);
return 0;
}
#else
#define sa1111_suspend NULL
#define sa1111_resume NULL
#endif
static int sa1111_probe(struct platform_device *pdev)
{
struct resource *mem;
int irq;
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem)
return -EINVAL;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return -ENXIO;
return __sa1111_probe(&pdev->dev, mem, irq);
}
static int sa1111_remove(struct platform_device *pdev)
{
struct sa1111 *sachip = platform_get_drvdata(pdev);
if (sachip) {
#ifdef CONFIG_PM
kfree(sachip->saved_state);
sachip->saved_state = NULL;
#endif
__sa1111_remove(sachip);
platform_set_drvdata(pdev, NULL);
}
return 0;
}
/*
* Not sure if this should be on the system bus or not yet.
* We really want some way to register a system device at
* the per-machine level, and then have this driver pick
* up the registered devices.
*
* We also need to handle the SDRAM configuration for
* PXA250/SA1110 machine classes.
*/
static struct platform_driver sa1111_device_driver = {
.probe = sa1111_probe,
.remove = sa1111_remove,
.suspend = sa1111_suspend,
.resume = sa1111_resume,
.driver = {
.name = "sa1111",
},
};
/*
* Get the parent device driver (us) structure
* from a child function device
*/
static inline struct sa1111 *sa1111_chip_driver(struct sa1111_dev *sadev)
{
return (struct sa1111 *)dev_get_drvdata(sadev->dev.parent);
}
/*
* The bits in the opdiv field are non-linear.
*/
static unsigned char opdiv_table[] = { 1, 4, 2, 8 };
static unsigned int __sa1111_pll_clock(struct sa1111 *sachip)
{
unsigned int skcdr, fbdiv, ipdiv, opdiv;
skcdr = sa1111_readl(sachip->base + SA1111_SKCDR);
fbdiv = (skcdr & 0x007f) + 2;
ipdiv = ((skcdr & 0x0f80) >> 7) + 2;
opdiv = opdiv_table[(skcdr & 0x3000) >> 12];
return 3686400 * fbdiv / (ipdiv * opdiv);
}
/**
* sa1111_pll_clock - return the current PLL clock frequency.
* @sadev: SA1111 function block
*
* BUG: we should look at SKCR. We also blindly believe that
* the chip is being fed with the 3.6864MHz clock.
*
* Returns the PLL clock in Hz.
*/
unsigned int sa1111_pll_clock(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
return __sa1111_pll_clock(sachip);
}
EXPORT_SYMBOL(sa1111_pll_clock);
/**
* sa1111_select_audio_mode - select I2S or AC link mode
* @sadev: SA1111 function block
* @mode: One of %SA1111_AUDIO_ACLINK or %SA1111_AUDIO_I2S
*
* Frob the SKCR to select AC Link mode or I2S mode for
* the audio block.
*/
void sa1111_select_audio_mode(struct sa1111_dev *sadev, int mode)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKCR);
if (mode == SA1111_AUDIO_I2S) {
val &= ~SKCR_SELAC;
} else {
val |= SKCR_SELAC;
}
sa1111_writel(val, sachip->base + SA1111_SKCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_select_audio_mode);
/**
* sa1111_set_audio_rate - set the audio sample rate
* @sadev: SA1111 SAC function block
* @rate: sample rate to select
*/
int sa1111_set_audio_rate(struct sa1111_dev *sadev, int rate)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned int div;
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
div = (__sa1111_pll_clock(sachip) / 256 + rate / 2) / rate;
if (div == 0)
div = 1;
if (div > 128)
div = 128;
sa1111_writel(div - 1, sachip->base + SA1111_SKAUD);
return 0;
}
EXPORT_SYMBOL(sa1111_set_audio_rate);
/**
* sa1111_get_audio_rate - get the audio sample rate
* @sadev: SA1111 SAC function block device
*/
int sa1111_get_audio_rate(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long div;
if (sadev->devid != SA1111_DEVID_SAC)
return -EINVAL;
div = sa1111_readl(sachip->base + SA1111_SKAUD) + 1;
return __sa1111_pll_clock(sachip) / (256 * div);
}
EXPORT_SYMBOL(sa1111_get_audio_rate);
void sa1111_set_io_dir(struct sa1111_dev *sadev,
unsigned int bits, unsigned int dir,
unsigned int sleep_dir)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
#define MODIFY_BITS(port, mask, dir) \
if (mask) { \
val = sa1111_readl(port); \
val &= ~(mask); \
val |= (dir) & (mask); \
sa1111_writel(val, port); \
}
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PADDR, bits & 15, dir);
MODIFY_BITS(gpio + SA1111_GPIO_PBDDR, (bits >> 8) & 255, dir >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCDDR, (bits >> 16) & 255, dir >> 16);
MODIFY_BITS(gpio + SA1111_GPIO_PASDR, bits & 15, sleep_dir);
MODIFY_BITS(gpio + SA1111_GPIO_PBSDR, (bits >> 8) & 255, sleep_dir >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCSDR, (bits >> 16) & 255, sleep_dir >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_io_dir);
void sa1111_set_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PADWR, bits & 15, v);
MODIFY_BITS(gpio + SA1111_GPIO_PBDWR, (bits >> 8) & 255, v >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCDWR, (bits >> 16) & 255, v >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_io);
void sa1111_set_sleep_io(struct sa1111_dev *sadev, unsigned int bits, unsigned int v)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
void __iomem *gpio = sachip->base + SA1111_GPIO;
spin_lock_irqsave(&sachip->lock, flags);
MODIFY_BITS(gpio + SA1111_GPIO_PASSR, bits & 15, v);
MODIFY_BITS(gpio + SA1111_GPIO_PBSSR, (bits >> 8) & 255, v >> 8);
MODIFY_BITS(gpio + SA1111_GPIO_PCSSR, (bits >> 16) & 255, v >> 16);
spin_unlock_irqrestore(&sachip->lock, flags);
}
EXPORT_SYMBOL(sa1111_set_sleep_io);
/*
* Individual device operations.
*/
/**
* sa1111_enable_device - enable an on-chip SA1111 function block
* @sadev: SA1111 function block device to enable
*/
int sa1111_enable_device(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
int ret = 0;
if (sachip->pdata && sachip->pdata->enable)
ret = sachip->pdata->enable(sachip->pdata->data, sadev->devid);
if (ret == 0) {
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val | sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
}
return ret;
}
EXPORT_SYMBOL(sa1111_enable_device);
/**
* sa1111_disable_device - disable an on-chip SA1111 function block
* @sadev: SA1111 function block device to disable
*/
void sa1111_disable_device(struct sa1111_dev *sadev)
{
struct sa1111 *sachip = sa1111_chip_driver(sadev);
unsigned long flags;
unsigned int val;
spin_lock_irqsave(&sachip->lock, flags);
val = sa1111_readl(sachip->base + SA1111_SKPCR);
sa1111_writel(val & ~sadev->skpcr_mask, sachip->base + SA1111_SKPCR);
spin_unlock_irqrestore(&sachip->lock, flags);
if (sachip->pdata && sachip->pdata->disable)
sachip->pdata->disable(sachip->pdata->data, sadev->devid);
}
EXPORT_SYMBOL(sa1111_disable_device);
/*
* SA1111 "Register Access Bus."
*
* We model this as a regular bus type, and hang devices directly
* off this.
*/
static int sa1111_match(struct device *_dev, struct device_driver *_drv)
{
struct sa1111_dev *dev = SA1111_DEV(_dev);
struct sa1111_driver *drv = SA1111_DRV(_drv);
return dev->devid & drv->devid;
}
static int sa1111_bus_suspend(struct device *dev, pm_message_t state)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->suspend)
ret = drv->suspend(sadev, state);
return ret;
}
static int sa1111_bus_resume(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv && drv->resume)
ret = drv->resume(sadev);
return ret;
}
static void sa1111_bus_shutdown(struct device *dev)
{
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
if (drv && drv->shutdown)
drv->shutdown(SA1111_DEV(dev));
}
static int sa1111_bus_probe(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = -ENODEV;
if (drv->probe)
ret = drv->probe(sadev);
return ret;
}
static int sa1111_bus_remove(struct device *dev)
{
struct sa1111_dev *sadev = SA1111_DEV(dev);
struct sa1111_driver *drv = SA1111_DRV(dev->driver);
int ret = 0;
if (drv->remove)
ret = drv->remove(sadev);
return ret;
}
struct bus_type sa1111_bus_type = {
.name = "sa1111-rab",
.match = sa1111_match,
.probe = sa1111_bus_probe,
.remove = sa1111_bus_remove,
.suspend = sa1111_bus_suspend,
.resume = sa1111_bus_resume,
.shutdown = sa1111_bus_shutdown,
};
EXPORT_SYMBOL(sa1111_bus_type);
int sa1111_driver_register(struct sa1111_driver *driver)
{
driver->drv.bus = &sa1111_bus_type;
return driver_register(&driver->drv);
}
EXPORT_SYMBOL(sa1111_driver_register);
void sa1111_driver_unregister(struct sa1111_driver *driver)
{
driver_unregister(&driver->drv);
}
EXPORT_SYMBOL(sa1111_driver_unregister);
#ifdef CONFIG_DMABOUNCE
/*
* According to the "Intel StrongARM SA-1111 Microprocessor Companion
* Chip Specification Update" (June 2000), erratum #7, there is a
* significant bug in the SA1111 SDRAM shared memory controller. If
* an access to a region of memory above 1MB relative to the bank base,
* it is important that address bit 10 _NOT_ be asserted. Depending
* on the configuration of the RAM, bit 10 may correspond to one
* of several different (processor-relative) address bits.
*
* This routine only identifies whether or not a given DMA address
* is susceptible to the bug.
*
* This should only get called for sa1111_device types due to the
* way we configure our device dma_masks.
*/
static int sa1111_needs_bounce(struct device *dev, dma_addr_t addr, size_t size)
{
/*
* Section 4.6 of the "Intel StrongARM SA-1111 Development Module
* User's Guide" mentions that jumpers R51 and R52 control the
* target of SA-1111 DMA (either SDRAM bank 0 on Assabet, or
* SDRAM bank 1 on Neponset). The default configuration selects
* Assabet, so any address in bank 1 is necessarily invalid.
*/
return (machine_is_assabet() || machine_is_pfs168()) &&
(addr >= 0xc8000000 || (addr + size) >= 0xc8000000);
}
static int sa1111_notifier_call(struct notifier_block *n, unsigned long action,
void *data)
{
struct sa1111_dev *dev = SA1111_DEV(data);
switch (action) {
case BUS_NOTIFY_ADD_DEVICE:
if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL) {
int ret = dmabounce_register_dev(&dev->dev, 1024, 4096,
sa1111_needs_bounce);
if (ret)
dev_err(&dev->dev, "failed to register with dmabounce: %d\n", ret);
}
break;
case BUS_NOTIFY_DEL_DEVICE:
if (dev->dev.dma_mask && dev->dma_mask < 0xffffffffUL)
dmabounce_unregister_dev(&dev->dev);
break;
}
return NOTIFY_OK;
}
static struct notifier_block sa1111_bus_notifier = {
.notifier_call = sa1111_notifier_call,
};
#endif
static int __init sa1111_init(void)
{
int ret = bus_register(&sa1111_bus_type);
#ifdef CONFIG_DMABOUNCE
if (ret == 0)
bus_register_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
#endif
if (ret == 0)
platform_driver_register(&sa1111_device_driver);
return ret;
}
static void __exit sa1111_exit(void)
{
platform_driver_unregister(&sa1111_device_driver);
#ifdef CONFIG_DMABOUNCE
bus_unregister_notifier(&sa1111_bus_type, &sa1111_bus_notifier);
#endif
bus_unregister(&sa1111_bus_type);
}
subsys_initcall(sa1111_init);
module_exit(sa1111_exit);
MODULE_DESCRIPTION("Intel Corporation SA1111 core driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fastly/linux | sound/usb/mixer.c | 332 | 66873 | /*
* (Tentative) USB Audio Driver for ALSA
*
* Mixer control part
*
* Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de>
*
* Many codes borrowed from audio.c by
* Alan Cox (alan@lxorguk.ukuu.org.uk)
* Thomas Sailer (sailer@ife.ee.ethz.ch)
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* TODOs, for both the mixer and the streaming interfaces:
*
* - support for UAC2 effect units
* - support for graphical equalizers
* - RANGE and MEM set commands (UAC2)
* - RANGE and MEM interrupt dispatchers (UAC2)
* - audio channel clustering (UAC2)
* - audio sample rate converter units (UAC2)
* - proper handling of clock multipliers (UAC2)
* - dispatch clock change notifications (UAC2)
* - stop PCM streams which use a clock that became invalid
* - stop PCM streams which use a clock selector that has changed
* - parse available sample rates again when clock sources changed
*/
#include <linux/bitops.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/usb.h>
#include <linux/usb/audio.h>
#include <linux/usb/audio-v2.h>
#include <sound/core.h>
#include <sound/control.h>
#include <sound/hwdep.h>
#include <sound/info.h>
#include <sound/tlv.h>
#include "usbaudio.h"
#include "mixer.h"
#include "helper.h"
#include "mixer_quirks.h"
#include "power.h"
#define MAX_ID_ELEMS 256
struct usb_audio_term {
int id;
int type;
int channels;
unsigned int chconfig;
int name;
};
struct usbmix_name_map;
struct mixer_build {
struct snd_usb_audio *chip;
struct usb_mixer_interface *mixer;
unsigned char *buffer;
unsigned int buflen;
DECLARE_BITMAP(unitbitmap, MAX_ID_ELEMS);
struct usb_audio_term oterm;
const struct usbmix_name_map *map;
const struct usbmix_selector_map *selector_map;
};
/*E-mu 0202/0404/0204 eXtension Unit(XU) control*/
enum {
USB_XU_CLOCK_RATE = 0xe301,
USB_XU_CLOCK_SOURCE = 0xe302,
USB_XU_DIGITAL_IO_STATUS = 0xe303,
USB_XU_DEVICE_OPTIONS = 0xe304,
USB_XU_DIRECT_MONITORING = 0xe305,
USB_XU_METERING = 0xe306
};
enum {
USB_XU_CLOCK_SOURCE_SELECTOR = 0x02, /* clock source*/
USB_XU_CLOCK_RATE_SELECTOR = 0x03, /* clock rate */
USB_XU_DIGITAL_FORMAT_SELECTOR = 0x01, /* the spdif format */
USB_XU_SOFT_LIMIT_SELECTOR = 0x03 /* soft limiter */
};
/*
* manual mapping of mixer names
* if the mixer topology is too complicated and the parsed names are
* ambiguous, add the entries in usbmixer_maps.c.
*/
#include "mixer_maps.c"
static const struct usbmix_name_map *
find_map(struct mixer_build *state, int unitid, int control)
{
const struct usbmix_name_map *p = state->map;
if (!p)
return NULL;
for (p = state->map; p->id; p++) {
if (p->id == unitid &&
(!control || !p->control || control == p->control))
return p;
}
return NULL;
}
/* get the mapped name if the unit matches */
static int
check_mapped_name(const struct usbmix_name_map *p, char *buf, int buflen)
{
if (!p || !p->name)
return 0;
buflen--;
return strlcpy(buf, p->name, buflen);
}
/* check whether the control should be ignored */
static inline int
check_ignored_ctl(const struct usbmix_name_map *p)
{
if (!p || p->name || p->dB)
return 0;
return 1;
}
/* dB mapping */
static inline void check_mapped_dB(const struct usbmix_name_map *p,
struct usb_mixer_elem_info *cval)
{
if (p && p->dB) {
cval->dBmin = p->dB->min;
cval->dBmax = p->dB->max;
cval->initialized = 1;
}
}
/* get the mapped selector source name */
static int check_mapped_selector_name(struct mixer_build *state, int unitid,
int index, char *buf, int buflen)
{
const struct usbmix_selector_map *p;
if (! state->selector_map)
return 0;
for (p = state->selector_map; p->id; p++) {
if (p->id == unitid && index < p->count)
return strlcpy(buf, p->names[index], buflen);
}
return 0;
}
/*
* find an audio control unit with the given unit id
*/
static void *find_audio_control_unit(struct mixer_build *state, unsigned char unit)
{
/* we just parse the header */
struct uac_feature_unit_descriptor *hdr = NULL;
while ((hdr = snd_usb_find_desc(state->buffer, state->buflen, hdr,
USB_DT_CS_INTERFACE)) != NULL) {
if (hdr->bLength >= 4 &&
hdr->bDescriptorSubtype >= UAC_INPUT_TERMINAL &&
hdr->bDescriptorSubtype <= UAC2_SAMPLE_RATE_CONVERTER &&
hdr->bUnitID == unit)
return hdr;
}
return NULL;
}
/*
* copy a string with the given id
*/
static int snd_usb_copy_string_desc(struct mixer_build *state, int index, char *buf, int maxlen)
{
int len = usb_string(state->chip->dev, index, buf, maxlen - 1);
buf[len] = 0;
return len;
}
/*
* convert from the byte/word on usb descriptor to the zero-based integer
*/
static int convert_signed_value(struct usb_mixer_elem_info *cval, int val)
{
switch (cval->val_type) {
case USB_MIXER_BOOLEAN:
return !!val;
case USB_MIXER_INV_BOOLEAN:
return !val;
case USB_MIXER_U8:
val &= 0xff;
break;
case USB_MIXER_S8:
val &= 0xff;
if (val >= 0x80)
val -= 0x100;
break;
case USB_MIXER_U16:
val &= 0xffff;
break;
case USB_MIXER_S16:
val &= 0xffff;
if (val >= 0x8000)
val -= 0x10000;
break;
}
return val;
}
/*
* convert from the zero-based int to the byte/word for usb descriptor
*/
static int convert_bytes_value(struct usb_mixer_elem_info *cval, int val)
{
switch (cval->val_type) {
case USB_MIXER_BOOLEAN:
return !!val;
case USB_MIXER_INV_BOOLEAN:
return !val;
case USB_MIXER_S8:
case USB_MIXER_U8:
return val & 0xff;
case USB_MIXER_S16:
case USB_MIXER_U16:
return val & 0xffff;
}
return 0; /* not reached */
}
static int get_relative_value(struct usb_mixer_elem_info *cval, int val)
{
if (! cval->res)
cval->res = 1;
if (val < cval->min)
return 0;
else if (val >= cval->max)
return (cval->max - cval->min + cval->res - 1) / cval->res;
else
return (val - cval->min) / cval->res;
}
static int get_abs_value(struct usb_mixer_elem_info *cval, int val)
{
if (val < 0)
return cval->min;
if (! cval->res)
cval->res = 1;
val *= cval->res;
val += cval->min;
if (val > cval->max)
return cval->max;
return val;
}
/*
* retrieve a mixer value
*/
static int get_ctl_value_v1(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
struct snd_usb_audio *chip = cval->mixer->chip;
unsigned char buf[2];
int val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
int timeout = 10;
int idx = 0, err;
err = snd_usb_autoresume(cval->mixer->chip);
if (err < 0)
return -EIO;
down_read(&chip->shutdown_rwsem);
while (timeout-- > 0) {
if (chip->shutdown)
break;
idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
if (snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, val_len) >= val_len) {
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(buf, val_len));
err = 0;
goto out;
}
}
snd_printdd(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
request, validx, idx, cval->val_type);
err = -EINVAL;
out:
up_read(&chip->shutdown_rwsem);
snd_usb_autosuspend(cval->mixer->chip);
return err;
}
static int get_ctl_value_v2(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
struct snd_usb_audio *chip = cval->mixer->chip;
unsigned char buf[2 + 3*sizeof(__u16)]; /* enough space for one range */
unsigned char *val;
int idx = 0, ret, size;
__u8 bRequest;
if (request == UAC_GET_CUR) {
bRequest = UAC2_CS_CUR;
size = sizeof(__u16);
} else {
bRequest = UAC2_CS_RANGE;
size = sizeof(buf);
}
memset(buf, 0, sizeof(buf));
ret = snd_usb_autoresume(chip) ? -EIO : 0;
if (ret)
goto error;
down_read(&chip->shutdown_rwsem);
if (chip->shutdown)
ret = -ENODEV;
else {
idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
ret = snd_usb_ctl_msg(chip->dev, usb_rcvctrlpipe(chip->dev, 0), bRequest,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_IN,
validx, idx, buf, size);
}
up_read(&chip->shutdown_rwsem);
snd_usb_autosuspend(chip);
if (ret < 0) {
error:
snd_printk(KERN_ERR "cannot get ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n",
request, validx, idx, cval->val_type);
return ret;
}
/* FIXME: how should we handle multiple triplets here? */
switch (request) {
case UAC_GET_CUR:
val = buf;
break;
case UAC_GET_MIN:
val = buf + sizeof(__u16);
break;
case UAC_GET_MAX:
val = buf + sizeof(__u16) * 2;
break;
case UAC_GET_RES:
val = buf + sizeof(__u16) * 3;
break;
default:
return -EINVAL;
}
*value_ret = convert_signed_value(cval, snd_usb_combine_bytes(val, sizeof(__u16)));
return 0;
}
static int get_ctl_value(struct usb_mixer_elem_info *cval, int request, int validx, int *value_ret)
{
validx += cval->idx_off;
return (cval->mixer->protocol == UAC_VERSION_1) ?
get_ctl_value_v1(cval, request, validx, value_ret) :
get_ctl_value_v2(cval, request, validx, value_ret);
}
static int get_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int *value)
{
return get_ctl_value(cval, UAC_GET_CUR, validx, value);
}
/* channel = 0: master, 1 = first channel */
static inline int get_cur_mix_raw(struct usb_mixer_elem_info *cval,
int channel, int *value)
{
return get_ctl_value(cval, UAC_GET_CUR, (cval->control << 8) | channel, value);
}
static int get_cur_mix_value(struct usb_mixer_elem_info *cval,
int channel, int index, int *value)
{
int err;
if (cval->cached & (1 << channel)) {
*value = cval->cache_val[index];
return 0;
}
err = get_cur_mix_raw(cval, channel, value);
if (err < 0) {
if (!cval->mixer->ignore_ctl_error)
snd_printd(KERN_ERR "cannot get current value for control %d ch %d: err = %d\n",
cval->control, channel, err);
return err;
}
cval->cached |= 1 << channel;
cval->cache_val[index] = *value;
return 0;
}
/*
* set a mixer value
*/
int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval,
int request, int validx, int value_set)
{
struct snd_usb_audio *chip = cval->mixer->chip;
unsigned char buf[2];
int idx = 0, val_len, err, timeout = 10;
validx += cval->idx_off;
if (cval->mixer->protocol == UAC_VERSION_1) {
val_len = cval->val_type >= USB_MIXER_S16 ? 2 : 1;
} else { /* UAC_VERSION_2 */
/* audio class v2 controls are always 2 bytes in size */
val_len = sizeof(__u16);
/* FIXME */
if (request != UAC_SET_CUR) {
snd_printdd(KERN_WARNING "RANGE setting not yet supported\n");
return -EINVAL;
}
request = UAC2_CS_CUR;
}
value_set = convert_bytes_value(cval, value_set);
buf[0] = value_set & 0xff;
buf[1] = (value_set >> 8) & 0xff;
err = snd_usb_autoresume(chip);
if (err < 0)
return -EIO;
down_read(&chip->shutdown_rwsem);
while (timeout-- > 0) {
if (chip->shutdown)
break;
idx = snd_usb_ctrl_intf(chip) | (cval->id << 8);
if (snd_usb_ctl_msg(chip->dev,
usb_sndctrlpipe(chip->dev, 0), request,
USB_RECIP_INTERFACE | USB_TYPE_CLASS | USB_DIR_OUT,
validx, idx, buf, val_len) >= 0) {
err = 0;
goto out;
}
}
snd_printdd(KERN_ERR "cannot set ctl value: req = %#x, wValue = %#x, wIndex = %#x, type = %d, data = %#x/%#x\n",
request, validx, idx, cval->val_type, buf[0], buf[1]);
err = -EINVAL;
out:
up_read(&chip->shutdown_rwsem);
snd_usb_autosuspend(chip);
return err;
}
static int set_cur_ctl_value(struct usb_mixer_elem_info *cval, int validx, int value)
{
return snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, validx, value);
}
static int set_cur_mix_value(struct usb_mixer_elem_info *cval, int channel,
int index, int value)
{
int err;
unsigned int read_only = (channel == 0) ?
cval->master_readonly :
cval->ch_readonly & (1 << (channel - 1));
if (read_only) {
snd_printdd(KERN_INFO "%s(): channel %d of control %d is read_only\n",
__func__, channel, cval->control);
return 0;
}
err = snd_usb_mixer_set_ctl_value(cval, UAC_SET_CUR, (cval->control << 8) | channel,
value);
if (err < 0)
return err;
cval->cached |= 1 << channel;
cval->cache_val[index] = value;
return 0;
}
/*
* TLV callback for mixer volume controls
*/
int snd_usb_mixer_vol_tlv(struct snd_kcontrol *kcontrol, int op_flag,
unsigned int size, unsigned int __user *_tlv)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
DECLARE_TLV_DB_MINMAX(scale, 0, 0);
if (size < sizeof(scale))
return -ENOMEM;
scale[2] = cval->dBmin;
scale[3] = cval->dBmax;
if (copy_to_user(_tlv, scale, sizeof(scale)))
return -EFAULT;
return 0;
}
/*
* parser routines begin here...
*/
static int parse_audio_unit(struct mixer_build *state, int unitid);
/*
* check if the input/output channel routing is enabled on the given bitmap.
* used for mixer unit parser
*/
static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_outs)
{
int idx = ich * num_outs + och;
return bmap[idx >> 3] & (0x80 >> (idx & 7));
}
/*
* add an alsa control element
* search and increment the index until an empty slot is found.
*
* if failed, give up and free the control instance.
*/
int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer,
struct snd_kcontrol *kctl)
{
struct usb_mixer_elem_info *cval = kctl->private_data;
int err;
while (snd_ctl_find_id(mixer->chip->card, &kctl->id))
kctl->id.index++;
if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) {
snd_printd(KERN_ERR "cannot add control (err = %d)\n", err);
return err;
}
cval->elem_id = &kctl->id;
cval->next_id_elem = mixer->id_elems[cval->id];
mixer->id_elems[cval->id] = cval;
return 0;
}
/*
* get a terminal name string
*/
static struct iterm_name_combo {
int type;
char *name;
} iterm_names[] = {
{ 0x0300, "Output" },
{ 0x0301, "Speaker" },
{ 0x0302, "Headphone" },
{ 0x0303, "HMD Audio" },
{ 0x0304, "Desktop Speaker" },
{ 0x0305, "Room Speaker" },
{ 0x0306, "Com Speaker" },
{ 0x0307, "LFE" },
{ 0x0600, "External In" },
{ 0x0601, "Analog In" },
{ 0x0602, "Digital In" },
{ 0x0603, "Line" },
{ 0x0604, "Legacy In" },
{ 0x0605, "IEC958 In" },
{ 0x0606, "1394 DA Stream" },
{ 0x0607, "1394 DV Stream" },
{ 0x0700, "Embedded" },
{ 0x0701, "Noise Source" },
{ 0x0702, "Equalization Noise" },
{ 0x0703, "CD" },
{ 0x0704, "DAT" },
{ 0x0705, "DCC" },
{ 0x0706, "MiniDisk" },
{ 0x0707, "Analog Tape" },
{ 0x0708, "Phonograph" },
{ 0x0709, "VCR Audio" },
{ 0x070a, "Video Disk Audio" },
{ 0x070b, "DVD Audio" },
{ 0x070c, "TV Tuner Audio" },
{ 0x070d, "Satellite Rec Audio" },
{ 0x070e, "Cable Tuner Audio" },
{ 0x070f, "DSS Audio" },
{ 0x0710, "Radio Receiver" },
{ 0x0711, "Radio Transmitter" },
{ 0x0712, "Multi-Track Recorder" },
{ 0x0713, "Synthesizer" },
{ 0 },
};
static int get_term_name(struct mixer_build *state, struct usb_audio_term *iterm,
unsigned char *name, int maxlen, int term_only)
{
struct iterm_name_combo *names;
if (iterm->name)
return snd_usb_copy_string_desc(state, iterm->name, name, maxlen);
/* virtual type - not a real terminal */
if (iterm->type >> 16) {
if (term_only)
return 0;
switch (iterm->type >> 16) {
case UAC_SELECTOR_UNIT:
strcpy(name, "Selector"); return 8;
case UAC1_PROCESSING_UNIT:
strcpy(name, "Process Unit"); return 12;
case UAC1_EXTENSION_UNIT:
strcpy(name, "Ext Unit"); return 8;
case UAC_MIXER_UNIT:
strcpy(name, "Mixer"); return 5;
default:
return sprintf(name, "Unit %d", iterm->id);
}
}
switch (iterm->type & 0xff00) {
case 0x0100:
strcpy(name, "PCM"); return 3;
case 0x0200:
strcpy(name, "Mic"); return 3;
case 0x0400:
strcpy(name, "Headset"); return 7;
case 0x0500:
strcpy(name, "Phone"); return 5;
}
for (names = iterm_names; names->type; names++)
if (names->type == iterm->type) {
strcpy(name, names->name);
return strlen(names->name);
}
return 0;
}
/*
* parse the source unit recursively until it reaches to a terminal
* or a branched unit.
*/
static int check_input_term(struct mixer_build *state, int id, struct usb_audio_term *term)
{
int err;
void *p1;
memset(term, 0, sizeof(*term));
while ((p1 = find_audio_control_unit(state, id)) != NULL) {
unsigned char *hdr = p1;
term->id = id;
switch (hdr[2]) {
case UAC_INPUT_TERMINAL:
if (state->mixer->protocol == UAC_VERSION_1) {
struct uac_input_terminal_descriptor *d = p1;
term->type = le16_to_cpu(d->wTerminalType);
term->channels = d->bNrChannels;
term->chconfig = le16_to_cpu(d->wChannelConfig);
term->name = d->iTerminal;
} else { /* UAC_VERSION_2 */
struct uac2_input_terminal_descriptor *d = p1;
term->type = le16_to_cpu(d->wTerminalType);
term->channels = d->bNrChannels;
term->chconfig = le32_to_cpu(d->bmChannelConfig);
term->name = d->iTerminal;
/* call recursively to get the clock selectors */
err = check_input_term(state, d->bCSourceID, term);
if (err < 0)
return err;
}
return 0;
case UAC_FEATURE_UNIT: {
/* the header is the same for v1 and v2 */
struct uac_feature_unit_descriptor *d = p1;
id = d->bSourceID;
break; /* continue to parse */
}
case UAC_MIXER_UNIT: {
struct uac_mixer_unit_descriptor *d = p1;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->channels = uac_mixer_unit_bNrChannels(d);
term->chconfig = uac_mixer_unit_wChannelConfig(d, state->mixer->protocol);
term->name = uac_mixer_unit_iMixer(d);
return 0;
}
case UAC_SELECTOR_UNIT:
case UAC2_CLOCK_SELECTOR: {
struct uac_selector_unit_descriptor *d = p1;
/* call recursively to retrieve the channel info */
err = check_input_term(state, d->baSourceID[0], term);
if (err < 0)
return err;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
term->name = uac_selector_unit_iSelector(d);
return 0;
}
case UAC1_PROCESSING_UNIT:
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 */
/* UAC2_EFFECT_UNIT */
case UAC2_EXTENSION_UNIT_V2: {
struct uac_processing_unit_descriptor *d = p1;
if (state->mixer->protocol == UAC_VERSION_2 &&
hdr[2] == UAC2_EFFECT_UNIT) {
/* UAC2/UAC1 unit IDs overlap here in an
* uncompatible way. Ignore this unit for now.
*/
return 0;
}
if (d->bNrInPins) {
id = d->baSourceID[0];
break; /* continue to parse */
}
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->channels = uac_processing_unit_bNrChannels(d);
term->chconfig = uac_processing_unit_wChannelConfig(d, state->mixer->protocol);
term->name = uac_processing_unit_iProcessing(d, state->mixer->protocol);
return 0;
}
case UAC2_CLOCK_SOURCE: {
struct uac_clock_source_descriptor *d = p1;
term->type = d->bDescriptorSubtype << 16; /* virtual type */
term->id = id;
term->name = d->iClockSource;
return 0;
}
default:
return -ENODEV;
}
}
return -ENODEV;
}
/*
* Feature Unit
*/
/* feature unit control information */
struct usb_feature_control_info {
const char *name;
unsigned int type; /* control type (mute, volume, etc.) */
};
static struct usb_feature_control_info audio_feature_info[] = {
{ "Mute", USB_MIXER_INV_BOOLEAN },
{ "Volume", USB_MIXER_S16 },
{ "Tone Control - Bass", USB_MIXER_S8 },
{ "Tone Control - Mid", USB_MIXER_S8 },
{ "Tone Control - Treble", USB_MIXER_S8 },
{ "Graphic Equalizer", USB_MIXER_S8 }, /* FIXME: not implemeted yet */
{ "Auto Gain Control", USB_MIXER_BOOLEAN },
{ "Delay Control", USB_MIXER_U16 },
{ "Bass Boost", USB_MIXER_BOOLEAN },
{ "Loudness", USB_MIXER_BOOLEAN },
/* UAC2 specific */
{ "Input Gain Control", USB_MIXER_U16 },
{ "Input Gain Pad Control", USB_MIXER_BOOLEAN },
{ "Phase Inverter Control", USB_MIXER_BOOLEAN },
};
/* private_free callback */
static void usb_mixer_elem_free(struct snd_kcontrol *kctl)
{
kfree(kctl->private_data);
kctl->private_data = NULL;
}
/*
* interface to ALSA control for feature/mixer units
*/
/* volume control quirks */
static void volume_control_quirks(struct usb_mixer_elem_info *cval,
struct snd_kcontrol *kctl)
{
switch (cval->mixer->chip->usb_id) {
case USB_ID(0x0763, 0x2030): /* M-Audio Fast Track C400 */
case USB_ID(0x0763, 0x2031): /* M-Audio Fast Track C600 */
if (strcmp(kctl->id.name, "Effect Duration") == 0) {
cval->min = 0x0000;
cval->max = 0xffff;
cval->res = 0x00e6;
break;
}
if (strcmp(kctl->id.name, "Effect Volume") == 0 ||
strcmp(kctl->id.name, "Effect Feedback Volume") == 0) {
cval->min = 0x00;
cval->max = 0xff;
break;
}
if (strstr(kctl->id.name, "Effect Return") != NULL) {
cval->min = 0xb706;
cval->max = 0xff7b;
cval->res = 0x0073;
break;
}
if ((strstr(kctl->id.name, "Playback Volume") != NULL) ||
(strstr(kctl->id.name, "Effect Send") != NULL)) {
cval->min = 0xb5fb; /* -73 dB = 0xb6ff */
cval->max = 0xfcfe;
cval->res = 0x0073;
}
break;
case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */
case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */
if (strcmp(kctl->id.name, "Effect Duration") == 0) {
snd_printk(KERN_INFO
"usb-audio: set quirk for FTU Effect Duration\n");
cval->min = 0x0000;
cval->max = 0x7f00;
cval->res = 0x0100;
break;
}
if (strcmp(kctl->id.name, "Effect Volume") == 0 ||
strcmp(kctl->id.name, "Effect Feedback Volume") == 0) {
snd_printk(KERN_INFO
"usb-audio: set quirks for FTU Effect Feedback/Volume\n");
cval->min = 0x00;
cval->max = 0x7f;
break;
}
break;
case USB_ID(0x0471, 0x0101):
case USB_ID(0x0471, 0x0104):
case USB_ID(0x0471, 0x0105):
case USB_ID(0x0672, 0x1041):
/* quirk for UDA1321/N101.
* note that detection between firmware 2.1.1.7 (N101)
* and later 2.1.1.21 is not very clear from datasheets.
* I hope that the min value is -15360 for newer firmware --jk
*/
if (!strcmp(kctl->id.name, "PCM Playback Volume") &&
cval->min == -15616) {
snd_printk(KERN_INFO
"set volume quirk for UDA1321/N101 chip\n");
cval->max = -256;
}
break;
case USB_ID(0x046d, 0x09a4):
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
snd_printk(KERN_INFO
"set volume quirk for QuickCam E3500\n");
cval->min = 6080;
cval->max = 8768;
cval->res = 192;
}
break;
case USB_ID(0x046d, 0x0807): /* Logitech Webcam C500 */
case USB_ID(0x046d, 0x0808):
case USB_ID(0x046d, 0x0809):
case USB_ID(0x046d, 0x081b): /* HD Webcam c310 */
case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
case USB_ID(0x046d, 0x0991):
/* Most audio usb devices lie about volume resolution.
* Most Logitech webcams have res = 384.
* Proboly there is some logitech magic behind this number --fishor
*/
if (!strcmp(kctl->id.name, "Mic Capture Volume")) {
snd_printk(KERN_INFO
"set resolution quirk: cval->res = 384\n");
cval->res = 384;
}
break;
}
}
/*
* retrieve the minimum and maximum values for the specified control
*/
static int get_min_max_with_quirks(struct usb_mixer_elem_info *cval,
int default_min, struct snd_kcontrol *kctl)
{
/* for failsafe */
cval->min = default_min;
cval->max = cval->min + 1;
cval->res = 1;
cval->dBmin = cval->dBmax = 0;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
cval->initialized = 1;
} else {
int minchn = 0;
if (cval->cmask) {
int i;
for (i = 0; i < MAX_CHANNELS; i++)
if (cval->cmask & (1 << i)) {
minchn = i + 1;
break;
}
}
if (get_ctl_value(cval, UAC_GET_MAX, (cval->control << 8) | minchn, &cval->max) < 0 ||
get_ctl_value(cval, UAC_GET_MIN, (cval->control << 8) | minchn, &cval->min) < 0) {
snd_printd(KERN_ERR "%d:%d: cannot get min/max values for control %d (id %d)\n",
cval->id, snd_usb_ctrl_intf(cval->mixer->chip), cval->control, cval->id);
return -EINVAL;
}
if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0) {
cval->res = 1;
} else {
int last_valid_res = cval->res;
while (cval->res > 1) {
if (snd_usb_mixer_set_ctl_value(cval, UAC_SET_RES,
(cval->control << 8) | minchn, cval->res / 2) < 0)
break;
cval->res /= 2;
}
if (get_ctl_value(cval, UAC_GET_RES, (cval->control << 8) | minchn, &cval->res) < 0)
cval->res = last_valid_res;
}
if (cval->res == 0)
cval->res = 1;
/* Additional checks for the proper resolution
*
* Some devices report smaller resolutions than actually
* reacting. They don't return errors but simply clip
* to the lower aligned value.
*/
if (cval->min + cval->res < cval->max) {
int last_valid_res = cval->res;
int saved, test, check;
get_cur_mix_raw(cval, minchn, &saved);
for (;;) {
test = saved;
if (test < cval->max)
test += cval->res;
else
test -= cval->res;
if (test < cval->min || test > cval->max ||
set_cur_mix_value(cval, minchn, 0, test) ||
get_cur_mix_raw(cval, minchn, &check)) {
cval->res = last_valid_res;
break;
}
if (test == check)
break;
cval->res *= 2;
}
set_cur_mix_value(cval, minchn, 0, saved);
}
cval->initialized = 1;
}
if (kctl)
volume_control_quirks(cval, kctl);
/* USB descriptions contain the dB scale in 1/256 dB unit
* while ALSA TLV contains in 1/100 dB unit
*/
cval->dBmin = (convert_signed_value(cval, cval->min) * 100) / 256;
cval->dBmax = (convert_signed_value(cval, cval->max) * 100) / 256;
if (cval->dBmin > cval->dBmax) {
/* something is wrong; assume it's either from/to 0dB */
if (cval->dBmin < 0)
cval->dBmax = 0;
else if (cval->dBmin > 0)
cval->dBmin = 0;
if (cval->dBmin > cval->dBmax) {
/* totally crap, return an error */
return -EINVAL;
}
}
return 0;
}
#define get_min_max(cval, def) get_min_max_with_quirks(cval, def, NULL)
/* get a feature/mixer unit info */
static int mixer_ctl_feature_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN)
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
else
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = cval->channels;
if (cval->val_type == USB_MIXER_BOOLEAN ||
cval->val_type == USB_MIXER_INV_BOOLEAN) {
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
} else {
if (!cval->initialized) {
get_min_max_with_quirks(cval, 0, kcontrol);
if (cval->initialized && cval->dBmin >= cval->dBmax) {
kcontrol->vd[0].access &=
~(SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK);
snd_ctl_notify(cval->mixer->chip->card,
SNDRV_CTL_EVENT_MASK_INFO,
&kcontrol->id);
}
}
uinfo->value.integer.min = 0;
uinfo->value.integer.max =
(cval->max - cval->min + cval->res - 1) / cval->res;
}
return 0;
}
/* get the current value from feature/mixer unit */
static int mixer_ctl_feature_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int c, cnt, val, err;
ucontrol->value.integer.value[0] = cval->min;
if (cval->cmask) {
cnt = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
err = get_cur_mix_value(cval, c + 1, cnt, &val);
if (err < 0)
return cval->mixer->ignore_ctl_error ? 0 : err;
val = get_relative_value(cval, val);
ucontrol->value.integer.value[cnt] = val;
cnt++;
}
return 0;
} else {
/* master channel */
err = get_cur_mix_value(cval, 0, 0, &val);
if (err < 0)
return cval->mixer->ignore_ctl_error ? 0 : err;
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
}
return 0;
}
/* put the current value to feature/mixer unit */
static int mixer_ctl_feature_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int c, cnt, val, oval, err;
int changed = 0;
if (cval->cmask) {
cnt = 0;
for (c = 0; c < MAX_CHANNELS; c++) {
if (!(cval->cmask & (1 << c)))
continue;
err = get_cur_mix_value(cval, c + 1, cnt, &oval);
if (err < 0)
return cval->mixer->ignore_ctl_error ? 0 : err;
val = ucontrol->value.integer.value[cnt];
val = get_abs_value(cval, val);
if (oval != val) {
set_cur_mix_value(cval, c + 1, cnt, val);
changed = 1;
}
cnt++;
}
} else {
/* master channel */
err = get_cur_mix_value(cval, 0, 0, &oval);
if (err < 0)
return cval->mixer->ignore_ctl_error ? 0 : err;
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
set_cur_mix_value(cval, 0, 0, val);
changed = 1;
}
}
return changed;
}
static struct snd_kcontrol_new usb_feature_unit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_feature_get,
.put = mixer_ctl_feature_put,
};
/* the read-only variant */
static struct snd_kcontrol_new usb_feature_unit_ctl_ro = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later manually */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_feature_get,
.put = NULL,
};
/* This symbol is exported in order to allow the mixer quirks to
* hook up to the standard feature unit control mechanism */
struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl;
/*
* build a feature control
*/
static size_t append_ctl_name(struct snd_kcontrol *kctl, const char *str)
{
return strlcat(kctl->id.name, str, sizeof(kctl->id.name));
}
/* A lot of headsets/headphones have a "Speaker" mixer. Make sure we
rename it to "Headphone". We determine if something is a headphone
similar to how udev determines form factor. */
static void check_no_speaker_on_headset(struct snd_kcontrol *kctl,
struct snd_card *card)
{
const char *names_to_check[] = {
"Headset", "headset", "Headphone", "headphone", NULL};
const char **s;
bool found = 0;
if (strcmp("Speaker", kctl->id.name))
return;
for (s = names_to_check; *s; s++)
if (strstr(card->shortname, *s)) {
found = 1;
break;
}
if (!found)
return;
strlcpy(kctl->id.name, "Headphone", sizeof(kctl->id.name));
}
static void build_feature_ctl(struct mixer_build *state, void *raw_desc,
unsigned int ctl_mask, int control,
struct usb_audio_term *iterm, int unitid,
int readonly_mask)
{
struct uac_feature_unit_descriptor *desc = raw_desc;
unsigned int len = 0;
int mapped_name = 0;
int nameid = uac_feature_unit_iFeature(desc);
struct snd_kcontrol *kctl;
struct usb_mixer_elem_info *cval;
const struct usbmix_name_map *map;
unsigned int range;
control++; /* change from zero-based to 1-based value */
if (control == UAC_FU_GRAPHIC_EQUALIZER) {
/* FIXME: not supported yet */
return;
}
map = find_map(state, unitid, control);
if (check_ignored_ctl(map))
return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (! cval) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return;
}
cval->mixer = state->mixer;
cval->id = unitid;
cval->control = control;
cval->cmask = ctl_mask;
cval->val_type = audio_feature_info[control-1].type;
if (ctl_mask == 0) {
cval->channels = 1; /* master channel */
cval->master_readonly = readonly_mask;
} else {
int i, c = 0;
for (i = 0; i < 16; i++)
if (ctl_mask & (1 << i))
c++;
cval->channels = c;
cval->ch_readonly = readonly_mask;
}
/* if all channels in the mask are marked read-only, make the control
* read-only. set_cur_mix_value() will check the mask again and won't
* issue write commands to read-only channels. */
if (cval->channels == readonly_mask)
kctl = snd_ctl_new1(&usb_feature_unit_ctl_ro, cval);
else
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (! kctl) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
kfree(cval);
return;
}
kctl->private_free = usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
mapped_name = len != 0;
if (! len && nameid)
len = snd_usb_copy_string_desc(state, nameid,
kctl->id.name, sizeof(kctl->id.name));
switch (control) {
case UAC_FU_MUTE:
case UAC_FU_VOLUME:
/* determine the control name. the rule is:
* - if a name id is given in descriptor, use it.
* - if the connected input can be determined, then use the name
* of terminal type.
* - if the connected output can be determined, use it.
* - otherwise, anonymous name.
*/
if (! len) {
len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 1);
if (! len)
len = get_term_name(state, &state->oterm, kctl->id.name, sizeof(kctl->id.name), 1);
if (! len)
len = snprintf(kctl->id.name, sizeof(kctl->id.name),
"Feature %d", unitid);
}
if (!mapped_name)
check_no_speaker_on_headset(kctl, state->mixer->chip->card);
/* determine the stream direction:
* if the connected output is USB stream, then it's likely a
* capture stream. otherwise it should be playback (hopefully :)
*/
if (! mapped_name && ! (state->oterm.type >> 16)) {
if ((state->oterm.type & 0xff00) == 0x0100) {
len = append_ctl_name(kctl, " Capture");
} else {
len = append_ctl_name(kctl, " Playback");
}
}
append_ctl_name(kctl, control == UAC_FU_MUTE ?
" Switch" : " Volume");
break;
default:
if (! len)
strlcpy(kctl->id.name, audio_feature_info[control-1].name,
sizeof(kctl->id.name));
break;
}
/* get min/max values */
get_min_max_with_quirks(cval, 0, kctl);
if (control == UAC_FU_VOLUME) {
check_mapped_dB(map, cval);
if (cval->dBmin < cval->dBmax || !cval->initialized) {
kctl->tlv.c = snd_usb_mixer_vol_tlv;
kctl->vd[0].access |=
SNDRV_CTL_ELEM_ACCESS_TLV_READ |
SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK;
}
}
range = (cval->max - cval->min) / cval->res;
/* Are there devices with volume range more than 255? I use a bit more
* to be sure. 384 is a resolution magic number found on Logitech
* devices. It will definitively catch all buggy Logitech devices.
*/
if (range > 384) {
snd_printk(KERN_WARNING "usb_audio: Warning! Unlikely big "
"volume range (=%u), cval->res is probably wrong.",
range);
snd_printk(KERN_WARNING "usb_audio: [%d] FU [%s] ch = %d, "
"val = %d/%d/%d", cval->id,
kctl->id.name, cval->channels,
cval->min, cval->max, cval->res);
}
snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res);
snd_usb_mixer_add_control(state->mixer, kctl);
}
/*
* parse a feature unit
*
* most of controls are defined here.
*/
static int parse_audio_feature_unit(struct mixer_build *state, int unitid, void *_ftr)
{
int channels, i, j;
struct usb_audio_term iterm;
unsigned int master_bits, first_ch_bits;
int err, csize;
struct uac_feature_unit_descriptor *hdr = _ftr;
__u8 *bmaControls;
if (state->mixer->protocol == UAC_VERSION_1) {
csize = hdr->bControlSize;
if (!csize) {
snd_printdd(KERN_ERR "usbaudio: unit %u: "
"invalid bControlSize == 0\n", unitid);
return -EINVAL;
}
channels = (hdr->bLength - 7) / csize - 1;
bmaControls = hdr->bmaControls;
if (hdr->bLength < 7 + csize) {
snd_printk(KERN_ERR "usbaudio: unit %u: "
"invalid UAC_FEATURE_UNIT descriptor\n",
unitid);
return -EINVAL;
}
} else {
struct uac2_feature_unit_descriptor *ftr = _ftr;
csize = 4;
channels = (hdr->bLength - 6) / 4 - 1;
bmaControls = ftr->bmaControls;
if (hdr->bLength < 6 + csize) {
snd_printk(KERN_ERR "usbaudio: unit %u: "
"invalid UAC_FEATURE_UNIT descriptor\n",
unitid);
return -EINVAL;
}
}
/* parse the source unit */
if ((err = parse_audio_unit(state, hdr->bSourceID)) < 0)
return err;
/* determine the input source type and name */
err = check_input_term(state, hdr->bSourceID, &iterm);
if (err < 0)
return err;
master_bits = snd_usb_combine_bytes(bmaControls, csize);
/* master configuration quirks */
switch (state->chip->usb_id) {
case USB_ID(0x08bb, 0x2702):
snd_printk(KERN_INFO
"usbmixer: master volume quirk for PCM2702 chip\n");
/* disable non-functional volume control */
master_bits &= ~UAC_CONTROL_BIT(UAC_FU_VOLUME);
break;
case USB_ID(0x1130, 0xf211):
snd_printk(KERN_INFO
"usbmixer: volume control quirk for Tenx TP6911 Audio Headset\n");
/* disable non-functional volume control */
channels = 0;
break;
}
if (channels > 0)
first_ch_bits = snd_usb_combine_bytes(bmaControls + csize, csize);
else
first_ch_bits = 0;
if (state->mixer->protocol == UAC_VERSION_1) {
/* check all control types */
for (i = 0; i < 10; i++) {
unsigned int ch_bits = 0;
for (j = 0; j < channels; j++) {
unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize);
if (mask & (1 << i))
ch_bits |= (1 << j);
}
/* audio class v1 controls are never read-only */
if (ch_bits & 1) /* the first channel must be set (for ease of programming) */
build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, 0);
if (master_bits & (1 << i))
build_feature_ctl(state, _ftr, 0, i, &iterm, unitid, 0);
}
} else { /* UAC_VERSION_2 */
for (i = 0; i < ARRAY_SIZE(audio_feature_info); i++) {
unsigned int ch_bits = 0;
unsigned int ch_read_only = 0;
for (j = 0; j < channels; j++) {
unsigned int mask = snd_usb_combine_bytes(bmaControls + csize * (j+1), csize);
if (uac2_control_is_readable(mask, i)) {
ch_bits |= (1 << j);
if (!uac2_control_is_writeable(mask, i))
ch_read_only |= (1 << j);
}
}
/* NOTE: build_feature_ctl() will mark the control read-only if all channels
* are marked read-only in the descriptors. Otherwise, the control will be
* reported as writeable, but the driver will not actually issue a write
* command for read-only channels */
if (ch_bits & 1) /* the first channel must be set (for ease of programming) */
build_feature_ctl(state, _ftr, ch_bits, i, &iterm, unitid, ch_read_only);
if (uac2_control_is_readable(master_bits, i))
build_feature_ctl(state, _ftr, 0, i, &iterm, unitid,
!uac2_control_is_writeable(master_bits, i));
}
}
return 0;
}
/*
* Mixer Unit
*/
/*
* build a mixer unit control
*
* the callbacks are identical with feature unit.
* input channel number (zero based) is given in control field instead.
*/
static void build_mixer_unit_ctl(struct mixer_build *state,
struct uac_mixer_unit_descriptor *desc,
int in_pin, int in_ch, int unitid,
struct usb_audio_term *iterm)
{
struct usb_mixer_elem_info *cval;
unsigned int num_outs = uac_mixer_unit_bNrChannels(desc);
unsigned int i, len;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
map = find_map(state, unitid, 0);
if (check_ignored_ctl(map))
return;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (! cval)
return;
cval->mixer = state->mixer;
cval->id = unitid;
cval->control = in_ch + 1; /* based on 1 */
cval->val_type = USB_MIXER_S16;
for (i = 0; i < num_outs; i++) {
if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol), in_ch, i, num_outs)) {
cval->cmask |= (1 << i);
cval->channels++;
}
}
/* get min/max values */
get_min_max(cval, 0);
kctl = snd_ctl_new1(&usb_feature_unit_ctl, cval);
if (! kctl) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
kfree(cval);
return;
}
kctl->private_free = usb_mixer_elem_free;
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (! len)
len = get_term_name(state, iterm, kctl->id.name, sizeof(kctl->id.name), 0);
if (! len)
len = sprintf(kctl->id.name, "Mixer Source %d", in_ch + 1);
append_ctl_name(kctl, " Volume");
snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
snd_usb_mixer_add_control(state->mixer, kctl);
}
/*
* parse a mixer unit
*/
static int parse_audio_mixer_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
struct uac_mixer_unit_descriptor *desc = raw_desc;
struct usb_audio_term iterm;
int input_pins, num_ins, num_outs;
int pin, ich, err;
if (desc->bLength < 11 || ! (input_pins = desc->bNrInPins) || ! (num_outs = uac_mixer_unit_bNrChannels(desc))) {
snd_printk(KERN_ERR "invalid MIXER UNIT descriptor %d\n", unitid);
return -EINVAL;
}
/* no bmControls field (e.g. Maya44) -> ignore */
if (desc->bLength <= 10 + input_pins) {
snd_printdd(KERN_INFO "MU %d has no bmControls field\n", unitid);
return 0;
}
num_ins = 0;
ich = 0;
for (pin = 0; pin < input_pins; pin++) {
err = parse_audio_unit(state, desc->baSourceID[pin]);
if (err < 0)
continue;
err = check_input_term(state, desc->baSourceID[pin], &iterm);
if (err < 0)
return err;
num_ins += iterm.channels;
for (; ich < num_ins; ++ich) {
int och, ich_has_controls = 0;
for (och = 0; och < num_outs; ++och) {
if (check_matrix_bitmap(uac_mixer_unit_bmControls(desc, state->mixer->protocol),
ich, och, num_outs)) {
ich_has_controls = 1;
break;
}
}
if (ich_has_controls)
build_mixer_unit_ctl(state, desc, pin, ich,
unitid, &iterm);
}
}
return 0;
}
/*
* Processing Unit / Extension Unit
*/
/* get callback for processing/extension unit */
static int mixer_ctl_procunit_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int err, val;
err = get_cur_ctl_value(cval, cval->control << 8, &val);
if (err < 0 && cval->mixer->ignore_ctl_error) {
ucontrol->value.integer.value[0] = cval->min;
return 0;
}
if (err < 0)
return err;
val = get_relative_value(cval, val);
ucontrol->value.integer.value[0] = val;
return 0;
}
/* put callback for processing/extension unit */
static int mixer_ctl_procunit_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
if (err < 0) {
if (cval->mixer->ignore_ctl_error)
return 0;
return err;
}
val = ucontrol->value.integer.value[0];
val = get_abs_value(cval, val);
if (val != oval) {
set_cur_ctl_value(cval, cval->control << 8, val);
return 1;
}
return 0;
}
/* alsa control interface for processing/extension unit */
static struct snd_kcontrol_new mixer_procunit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = mixer_ctl_feature_info,
.get = mixer_ctl_procunit_get,
.put = mixer_ctl_procunit_put,
};
/*
* predefined data for processing units
*/
struct procunit_value_info {
int control;
char *suffix;
int val_type;
int min_value;
};
struct procunit_info {
int type;
char *name;
struct procunit_value_info *values;
};
static struct procunit_value_info updown_proc_info[] = {
{ UAC_UD_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_UD_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
static struct procunit_value_info prologic_proc_info[] = {
{ UAC_DP_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DP_MODE_SELECT, "Mode Select", USB_MIXER_U8, 1 },
{ 0 }
};
static struct procunit_value_info threed_enh_proc_info[] = {
{ UAC_3D_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_3D_SPACE, "Spaciousness", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info reverb_proc_info[] = {
{ UAC_REVERB_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_REVERB_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_REVERB_TIME, "Time", USB_MIXER_U16 },
{ UAC_REVERB_FEEDBACK, "Feedback", USB_MIXER_U8 },
{ 0 }
};
static struct procunit_value_info chorus_proc_info[] = {
{ UAC_CHORUS_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_CHORUS_LEVEL, "Level", USB_MIXER_U8 },
{ UAC_CHORUS_RATE, "Rate", USB_MIXER_U16 },
{ UAC_CHORUS_DEPTH, "Depth", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_value_info dcr_proc_info[] = {
{ UAC_DCR_ENABLE, "Switch", USB_MIXER_BOOLEAN },
{ UAC_DCR_RATE, "Ratio", USB_MIXER_U16 },
{ UAC_DCR_MAXAMPL, "Max Amp", USB_MIXER_S16 },
{ UAC_DCR_THRESHOLD, "Threshold", USB_MIXER_S16 },
{ UAC_DCR_ATTACK_TIME, "Attack Time", USB_MIXER_U16 },
{ UAC_DCR_RELEASE_TIME, "Release Time", USB_MIXER_U16 },
{ 0 }
};
static struct procunit_info procunits[] = {
{ UAC_PROCESS_UP_DOWNMIX, "Up Down", updown_proc_info },
{ UAC_PROCESS_DOLBY_PROLOGIC, "Dolby Prologic", prologic_proc_info },
{ UAC_PROCESS_STEREO_EXTENDER, "3D Stereo Extender", threed_enh_proc_info },
{ UAC_PROCESS_REVERB, "Reverb", reverb_proc_info },
{ UAC_PROCESS_CHORUS, "Chorus", chorus_proc_info },
{ UAC_PROCESS_DYN_RANGE_COMP, "DCR", dcr_proc_info },
{ 0 },
};
/*
* predefined data for extension units
*/
static struct procunit_value_info clock_rate_xu_info[] = {
{ USB_XU_CLOCK_RATE_SELECTOR, "Selector", USB_MIXER_U8, 0 },
{ 0 }
};
static struct procunit_value_info clock_source_xu_info[] = {
{ USB_XU_CLOCK_SOURCE_SELECTOR, "External", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_value_info spdif_format_xu_info[] = {
{ USB_XU_DIGITAL_FORMAT_SELECTOR, "SPDIF/AC3", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_value_info soft_limit_xu_info[] = {
{ USB_XU_SOFT_LIMIT_SELECTOR, " ", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_info extunits[] = {
{ USB_XU_CLOCK_RATE, "Clock rate", clock_rate_xu_info },
{ USB_XU_CLOCK_SOURCE, "DigitalIn CLK source", clock_source_xu_info },
{ USB_XU_DIGITAL_IO_STATUS, "DigitalOut format:", spdif_format_xu_info },
{ USB_XU_DEVICE_OPTIONS, "AnalogueIn Soft Limit", soft_limit_xu_info },
{ 0 }
};
/*
* build a processing/extension unit
*/
static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw_desc, struct procunit_info *list, char *name)
{
struct uac_processing_unit_descriptor *desc = raw_desc;
int num_ins = desc->bNrInPins;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
int i, err, nameid, type, len;
struct procunit_info *info;
struct procunit_value_info *valinfo;
const struct usbmix_name_map *map;
static struct procunit_value_info default_value_info[] = {
{ 0x01, "Switch", USB_MIXER_BOOLEAN },
{ 0 }
};
static struct procunit_info default_info = {
0, NULL, default_value_info
};
if (desc->bLength < 13 || desc->bLength < 13 + num_ins ||
desc->bLength < num_ins + uac_processing_unit_bControlSize(desc, state->mixer->protocol)) {
snd_printk(KERN_ERR "invalid %s descriptor (id %d)\n", name, unitid);
return -EINVAL;
}
for (i = 0; i < num_ins; i++) {
if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
type = le16_to_cpu(desc->wProcessType);
for (info = list; info && info->type; info++)
if (info->type == type)
break;
if (! info || ! info->type)
info = &default_info;
for (valinfo = info->values; valinfo->control; valinfo++) {
__u8 *controls = uac_processing_unit_bmControls(desc, state->mixer->protocol);
if (! (controls[valinfo->control / 8] & (1 << ((valinfo->control % 8) - 1))))
continue;
map = find_map(state, unitid, valinfo->control);
if (check_ignored_ctl(map))
continue;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (! cval) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return -ENOMEM;
}
cval->mixer = state->mixer;
cval->id = unitid;
cval->control = valinfo->control;
cval->val_type = valinfo->val_type;
cval->channels = 1;
/* get min/max values */
if (type == UAC_PROCESS_UP_DOWNMIX && cval->control == UAC_UD_MODE_SELECT) {
__u8 *control_spec = uac_processing_unit_specific(desc, state->mixer->protocol);
/* FIXME: hard-coded */
cval->min = 1;
cval->max = control_spec[0];
cval->res = 1;
cval->initialized = 1;
} else {
if (type == USB_XU_CLOCK_RATE) {
/* E-Mu USB 0404/0202/TrackerPre/0204
* samplerate control quirk
*/
cval->min = 0;
cval->max = 5;
cval->res = 1;
cval->initialized = 1;
} else
get_min_max(cval, valinfo->min_value);
}
kctl = snd_ctl_new1(&mixer_procunit_ctl, cval);
if (! kctl) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
kfree(cval);
return -ENOMEM;
}
kctl->private_free = usb_mixer_elem_free;
if (check_mapped_name(map, kctl->id.name,
sizeof(kctl->id.name)))
/* nothing */ ;
else if (info->name)
strlcpy(kctl->id.name, info->name, sizeof(kctl->id.name));
else {
nameid = uac_processing_unit_iProcessing(desc, state->mixer->protocol);
len = 0;
if (nameid)
len = snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
if (! len)
strlcpy(kctl->id.name, name, sizeof(kctl->id.name));
}
append_ctl_name(kctl, " ");
append_ctl_name(kctl, valinfo->suffix);
snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n",
cval->id, kctl->id.name, cval->channels, cval->min, cval->max);
if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
return err;
}
return 0;
}
static int parse_audio_processing_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
return build_audio_procunit(state, unitid, raw_desc, procunits, "Processing Unit");
}
static int parse_audio_extension_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
/* Note that we parse extension units with processing unit descriptors.
* That's ok as the layout is the same */
return build_audio_procunit(state, unitid, raw_desc, extunits, "Extension Unit");
}
/*
* Selector Unit
*/
/* info callback for selector unit
* use an enumerator type for routing
*/
static int mixer_ctl_selector_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
const char **itemlist = (const char **)kcontrol->private_value;
if (snd_BUG_ON(!itemlist))
return -EINVAL;
return snd_ctl_enum_info(uinfo, 1, cval->max, itemlist);
}
/* get callback for selector unit */
static int mixer_ctl_selector_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, err;
err = get_cur_ctl_value(cval, cval->control << 8, &val);
if (err < 0) {
if (cval->mixer->ignore_ctl_error) {
ucontrol->value.enumerated.item[0] = 0;
return 0;
}
return err;
}
val = get_relative_value(cval, val);
ucontrol->value.enumerated.item[0] = val;
return 0;
}
/* put callback for selector unit */
static int mixer_ctl_selector_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol)
{
struct usb_mixer_elem_info *cval = kcontrol->private_data;
int val, oval, err;
err = get_cur_ctl_value(cval, cval->control << 8, &oval);
if (err < 0) {
if (cval->mixer->ignore_ctl_error)
return 0;
return err;
}
val = ucontrol->value.enumerated.item[0];
val = get_abs_value(cval, val);
if (val != oval) {
set_cur_ctl_value(cval, cval->control << 8, val);
return 1;
}
return 0;
}
/* alsa control interface for selector unit */
static struct snd_kcontrol_new mixer_selectunit_ctl = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "", /* will be filled later */
.info = mixer_ctl_selector_info,
.get = mixer_ctl_selector_get,
.put = mixer_ctl_selector_put,
};
/* private free callback.
* free both private_data and private_value
*/
static void usb_mixer_selector_elem_free(struct snd_kcontrol *kctl)
{
int i, num_ins = 0;
if (kctl->private_data) {
struct usb_mixer_elem_info *cval = kctl->private_data;
num_ins = cval->max;
kfree(cval);
kctl->private_data = NULL;
}
if (kctl->private_value) {
char **itemlist = (char **)kctl->private_value;
for (i = 0; i < num_ins; i++)
kfree(itemlist[i]);
kfree(itemlist);
kctl->private_value = 0;
}
}
/*
* parse a selector unit
*/
static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void *raw_desc)
{
struct uac_selector_unit_descriptor *desc = raw_desc;
unsigned int i, nameid, len;
int err;
struct usb_mixer_elem_info *cval;
struct snd_kcontrol *kctl;
const struct usbmix_name_map *map;
char **namelist;
if (!desc->bNrInPins || desc->bLength < 5 + desc->bNrInPins) {
snd_printk(KERN_ERR "invalid SELECTOR UNIT descriptor %d\n", unitid);
return -EINVAL;
}
for (i = 0; i < desc->bNrInPins; i++) {
if ((err = parse_audio_unit(state, desc->baSourceID[i])) < 0)
return err;
}
if (desc->bNrInPins == 1) /* only one ? nonsense! */
return 0;
map = find_map(state, unitid, 0);
if (check_ignored_ctl(map))
return 0;
cval = kzalloc(sizeof(*cval), GFP_KERNEL);
if (! cval) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
return -ENOMEM;
}
cval->mixer = state->mixer;
cval->id = unitid;
cval->val_type = USB_MIXER_U8;
cval->channels = 1;
cval->min = 1;
cval->max = desc->bNrInPins;
cval->res = 1;
cval->initialized = 1;
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
cval->control = UAC2_CX_CLOCK_SELECTOR;
else
cval->control = 0;
namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
if (! namelist) {
snd_printk(KERN_ERR "cannot malloc\n");
kfree(cval);
return -ENOMEM;
}
#define MAX_ITEM_NAME_LEN 64
for (i = 0; i < desc->bNrInPins; i++) {
struct usb_audio_term iterm;
len = 0;
namelist[i] = kmalloc(MAX_ITEM_NAME_LEN, GFP_KERNEL);
if (! namelist[i]) {
snd_printk(KERN_ERR "cannot malloc\n");
while (i--)
kfree(namelist[i]);
kfree(namelist);
kfree(cval);
return -ENOMEM;
}
len = check_mapped_selector_name(state, unitid, i, namelist[i],
MAX_ITEM_NAME_LEN);
if (! len && check_input_term(state, desc->baSourceID[i], &iterm) >= 0)
len = get_term_name(state, &iterm, namelist[i], MAX_ITEM_NAME_LEN, 0);
if (! len)
sprintf(namelist[i], "Input %d", i);
}
kctl = snd_ctl_new1(&mixer_selectunit_ctl, cval);
if (! kctl) {
snd_printk(KERN_ERR "cannot malloc kcontrol\n");
kfree(namelist);
kfree(cval);
return -ENOMEM;
}
kctl->private_value = (unsigned long)namelist;
kctl->private_free = usb_mixer_selector_elem_free;
nameid = uac_selector_unit_iSelector(desc);
len = check_mapped_name(map, kctl->id.name, sizeof(kctl->id.name));
if (len)
;
else if (nameid)
snd_usb_copy_string_desc(state, nameid, kctl->id.name, sizeof(kctl->id.name));
else {
len = get_term_name(state, &state->oterm,
kctl->id.name, sizeof(kctl->id.name), 0);
if (! len)
strlcpy(kctl->id.name, "USB", sizeof(kctl->id.name));
if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR)
append_ctl_name(kctl, " Clock Source");
else if ((state->oterm.type & 0xff00) == 0x0100)
append_ctl_name(kctl, " Capture Source");
else
append_ctl_name(kctl, " Playback Source");
}
snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n",
cval->id, kctl->id.name, desc->bNrInPins);
if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0)
return err;
return 0;
}
/*
* parse an audio unit recursively
*/
static int parse_audio_unit(struct mixer_build *state, int unitid)
{
unsigned char *p1;
if (test_and_set_bit(unitid, state->unitbitmap))
return 0; /* the unit already visited */
p1 = find_audio_control_unit(state, unitid);
if (!p1) {
snd_printk(KERN_ERR "usbaudio: unit %d not found!\n", unitid);
return -EINVAL;
}
switch (p1[2]) {
case UAC_INPUT_TERMINAL:
case UAC2_CLOCK_SOURCE:
return 0; /* NOP */
case UAC_MIXER_UNIT:
return parse_audio_mixer_unit(state, unitid, p1);
case UAC_SELECTOR_UNIT:
case UAC2_CLOCK_SELECTOR:
return parse_audio_selector_unit(state, unitid, p1);
case UAC_FEATURE_UNIT:
return parse_audio_feature_unit(state, unitid, p1);
case UAC1_PROCESSING_UNIT:
/* UAC2_EFFECT_UNIT has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_processing_unit(state, unitid, p1);
else
return 0; /* FIXME - effect units not implemented yet */
case UAC1_EXTENSION_UNIT:
/* UAC2_PROCESSING_UNIT_V2 has the same value */
if (state->mixer->protocol == UAC_VERSION_1)
return parse_audio_extension_unit(state, unitid, p1);
else /* UAC_VERSION_2 */
return parse_audio_processing_unit(state, unitid, p1);
case UAC2_EXTENSION_UNIT_V2:
return parse_audio_extension_unit(state, unitid, p1);
default:
snd_printk(KERN_ERR "usbaudio: unit %u: unexpected type 0x%02x\n", unitid, p1[2]);
return -EINVAL;
}
}
static void snd_usb_mixer_free(struct usb_mixer_interface *mixer)
{
kfree(mixer->id_elems);
if (mixer->urb) {
kfree(mixer->urb->transfer_buffer);
usb_free_urb(mixer->urb);
}
usb_free_urb(mixer->rc_urb);
kfree(mixer->rc_setup_packet);
kfree(mixer);
}
static int snd_usb_mixer_dev_free(struct snd_device *device)
{
struct usb_mixer_interface *mixer = device->device_data;
snd_usb_mixer_free(mixer);
return 0;
}
/*
* create mixer controls
*
* walk through all UAC_OUTPUT_TERMINAL descriptors to search for mixers
*/
static int snd_usb_mixer_controls(struct usb_mixer_interface *mixer)
{
struct mixer_build state;
int err;
const struct usbmix_ctl_map *map;
void *p;
memset(&state, 0, sizeof(state));
state.chip = mixer->chip;
state.mixer = mixer;
state.buffer = mixer->hostif->extra;
state.buflen = mixer->hostif->extralen;
/* check the mapping table */
for (map = usbmix_ctl_maps; map->id; map++) {
if (map->id == state.chip->usb_id) {
state.map = map->map;
state.selector_map = map->selector_map;
mixer->ignore_ctl_error = map->ignore_ctl_error;
break;
}
}
p = NULL;
while ((p = snd_usb_find_csint_desc(mixer->hostif->extra, mixer->hostif->extralen,
p, UAC_OUTPUT_TERMINAL)) != NULL) {
if (mixer->protocol == UAC_VERSION_1) {
struct uac1_output_terminal_descriptor *desc = p;
if (desc->bLength < sizeof(*desc))
continue; /* invalid descriptor? */
set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */
state.oterm.id = desc->bTerminalID;
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
if (err < 0 && err != -EINVAL)
return err;
} else { /* UAC_VERSION_2 */
struct uac2_output_terminal_descriptor *desc = p;
if (desc->bLength < sizeof(*desc))
continue; /* invalid descriptor? */
set_bit(desc->bTerminalID, state.unitbitmap); /* mark terminal ID as visited */
state.oterm.id = desc->bTerminalID;
state.oterm.type = le16_to_cpu(desc->wTerminalType);
state.oterm.name = desc->iTerminal;
err = parse_audio_unit(&state, desc->bSourceID);
if (err < 0 && err != -EINVAL)
return err;
/* for UAC2, use the same approach to also add the clock selectors */
err = parse_audio_unit(&state, desc->bCSourceID);
if (err < 0 && err != -EINVAL)
return err;
}
}
return 0;
}
void snd_usb_mixer_notify_id(struct usb_mixer_interface *mixer, int unitid)
{
struct usb_mixer_elem_info *info;
for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem)
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
info->elem_id);
}
static void snd_usb_mixer_dump_cval(struct snd_info_buffer *buffer,
int unitid,
struct usb_mixer_elem_info *cval)
{
static char *val_types[] = {"BOOLEAN", "INV_BOOLEAN",
"S8", "U8", "S16", "U16"};
snd_iprintf(buffer, " Unit: %i\n", unitid);
if (cval->elem_id)
snd_iprintf(buffer, " Control: name=\"%s\", index=%i\n",
cval->elem_id->name, cval->elem_id->index);
snd_iprintf(buffer, " Info: id=%i, control=%i, cmask=0x%x, "
"channels=%i, type=\"%s\"\n", cval->id,
cval->control, cval->cmask, cval->channels,
val_types[cval->val_type]);
snd_iprintf(buffer, " Volume: min=%i, max=%i, dBmin=%i, dBmax=%i\n",
cval->min, cval->max, cval->dBmin, cval->dBmax);
}
static void snd_usb_mixer_proc_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct snd_usb_audio *chip = entry->private_data;
struct usb_mixer_interface *mixer;
struct usb_mixer_elem_info *cval;
int unitid;
list_for_each_entry(mixer, &chip->mixer_list, list) {
snd_iprintf(buffer,
"USB Mixer: usb_id=0x%08x, ctrlif=%i, ctlerr=%i\n",
chip->usb_id, snd_usb_ctrl_intf(chip),
mixer->ignore_ctl_error);
snd_iprintf(buffer, "Card: %s\n", chip->card->longname);
for (unitid = 0; unitid < MAX_ID_ELEMS; unitid++) {
for (cval = mixer->id_elems[unitid]; cval;
cval = cval->next_id_elem)
snd_usb_mixer_dump_cval(buffer, unitid, cval);
}
}
}
static void snd_usb_mixer_interrupt_v2(struct usb_mixer_interface *mixer,
int attribute, int value, int index)
{
struct usb_mixer_elem_info *info;
__u8 unitid = (index >> 8) & 0xff;
__u8 control = (value >> 8) & 0xff;
__u8 channel = value & 0xff;
if (channel >= MAX_CHANNELS) {
snd_printk(KERN_DEBUG "%s(): bogus channel number %d\n",
__func__, channel);
return;
}
for (info = mixer->id_elems[unitid]; info; info = info->next_id_elem) {
if (info->control != control)
continue;
switch (attribute) {
case UAC2_CS_CUR:
/* invalidate cache, so the value is read from the device */
if (channel)
info->cached &= ~(1 << channel);
else /* master channel */
info->cached = 0;
snd_ctl_notify(mixer->chip->card, SNDRV_CTL_EVENT_MASK_VALUE,
info->elem_id);
break;
case UAC2_CS_RANGE:
/* TODO */
break;
case UAC2_CS_MEM:
/* TODO */
break;
default:
snd_printk(KERN_DEBUG "unknown attribute %d in interrupt\n",
attribute);
break;
} /* switch */
}
}
static void snd_usb_mixer_interrupt(struct urb *urb)
{
struct usb_mixer_interface *mixer = urb->context;
int len = urb->actual_length;
int ustatus = urb->status;
if (ustatus != 0)
goto requeue;
if (mixer->protocol == UAC_VERSION_1) {
struct uac1_status_word *status;
for (status = urb->transfer_buffer;
len >= sizeof(*status);
len -= sizeof(*status), status++) {
snd_printd(KERN_DEBUG "status interrupt: %02x %02x\n",
status->bStatusType,
status->bOriginator);
/* ignore any notifications not from the control interface */
if ((status->bStatusType & UAC1_STATUS_TYPE_ORIG_MASK) !=
UAC1_STATUS_TYPE_ORIG_AUDIO_CONTROL_IF)
continue;
if (status->bStatusType & UAC1_STATUS_TYPE_MEM_CHANGED)
snd_usb_mixer_rc_memory_change(mixer, status->bOriginator);
else
snd_usb_mixer_notify_id(mixer, status->bOriginator);
}
} else { /* UAC_VERSION_2 */
struct uac2_interrupt_data_msg *msg;
for (msg = urb->transfer_buffer;
len >= sizeof(*msg);
len -= sizeof(*msg), msg++) {
/* drop vendor specific and endpoint requests */
if ((msg->bInfo & UAC2_INTERRUPT_DATA_MSG_VENDOR) ||
(msg->bInfo & UAC2_INTERRUPT_DATA_MSG_EP))
continue;
snd_usb_mixer_interrupt_v2(mixer, msg->bAttribute,
le16_to_cpu(msg->wValue),
le16_to_cpu(msg->wIndex));
}
}
requeue:
if (ustatus != -ENOENT && ustatus != -ECONNRESET && ustatus != -ESHUTDOWN) {
urb->dev = mixer->chip->dev;
usb_submit_urb(urb, GFP_ATOMIC);
}
}
/* stop any bus activity of a mixer */
void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer)
{
usb_kill_urb(mixer->urb);
usb_kill_urb(mixer->rc_urb);
}
int snd_usb_mixer_activate(struct usb_mixer_interface *mixer)
{
int err;
if (mixer->urb) {
err = usb_submit_urb(mixer->urb, GFP_NOIO);
if (err < 0)
return err;
}
return 0;
}
/* create the handler for the optional status interrupt endpoint */
static int snd_usb_mixer_status_create(struct usb_mixer_interface *mixer)
{
struct usb_endpoint_descriptor *ep;
void *transfer_buffer;
int buffer_length;
unsigned int epnum;
/* we need one interrupt input endpoint */
if (get_iface_desc(mixer->hostif)->bNumEndpoints < 1)
return 0;
ep = get_endpoint(mixer->hostif, 0);
if (!usb_endpoint_dir_in(ep) || !usb_endpoint_xfer_int(ep))
return 0;
epnum = usb_endpoint_num(ep);
buffer_length = le16_to_cpu(ep->wMaxPacketSize);
transfer_buffer = kmalloc(buffer_length, GFP_KERNEL);
if (!transfer_buffer)
return -ENOMEM;
mixer->urb = usb_alloc_urb(0, GFP_KERNEL);
if (!mixer->urb) {
kfree(transfer_buffer);
return -ENOMEM;
}
usb_fill_int_urb(mixer->urb, mixer->chip->dev,
usb_rcvintpipe(mixer->chip->dev, epnum),
transfer_buffer, buffer_length,
snd_usb_mixer_interrupt, mixer, ep->bInterval);
usb_submit_urb(mixer->urb, GFP_KERNEL);
return 0;
}
int snd_usb_create_mixer(struct snd_usb_audio *chip, int ctrlif,
int ignore_error)
{
static struct snd_device_ops dev_ops = {
.dev_free = snd_usb_mixer_dev_free
};
struct usb_mixer_interface *mixer;
struct snd_info_entry *entry;
int err;
strcpy(chip->card->mixername, "USB Mixer");
mixer = kzalloc(sizeof(*mixer), GFP_KERNEL);
if (!mixer)
return -ENOMEM;
mixer->chip = chip;
mixer->ignore_ctl_error = ignore_error;
mixer->id_elems = kcalloc(MAX_ID_ELEMS, sizeof(*mixer->id_elems),
GFP_KERNEL);
if (!mixer->id_elems) {
kfree(mixer);
return -ENOMEM;
}
mixer->hostif = &usb_ifnum_to_if(chip->dev, ctrlif)->altsetting[0];
switch (get_iface_desc(mixer->hostif)->bInterfaceProtocol) {
case UAC_VERSION_1:
default:
mixer->protocol = UAC_VERSION_1;
break;
case UAC_VERSION_2:
mixer->protocol = UAC_VERSION_2;
break;
}
if ((err = snd_usb_mixer_controls(mixer)) < 0 ||
(err = snd_usb_mixer_status_create(mixer)) < 0)
goto _error;
snd_usb_mixer_apply_create_quirk(mixer);
err = snd_device_new(chip->card, SNDRV_DEV_LOWLEVEL, mixer, &dev_ops);
if (err < 0)
goto _error;
if (list_empty(&chip->mixer_list) &&
!snd_card_proc_new(chip->card, "usbmixer", &entry))
snd_info_set_text_ops(entry, chip, snd_usb_mixer_proc_read);
list_add(&mixer->list, &chip->mixer_list);
return 0;
_error:
snd_usb_mixer_free(mixer);
return err;
}
void snd_usb_mixer_disconnect(struct list_head *p)
{
struct usb_mixer_interface *mixer;
mixer = list_entry(p, struct usb_mixer_interface, list);
usb_kill_urb(mixer->urb);
usb_kill_urb(mixer->rc_urb);
}
| gpl-2.0 |
t0mm13b/ZTE-Blade-2.6.35.11 | drivers/i2c/busses/i2c-designware.c | 1100 | 22733 | /*
* Synopsys DesignWare I2C adapter driver (master only).
*
* Based on the TI DAVINCI I2C adapter driver.
*
* Copyright (C) 2006 Texas Instruments.
* Copyright (C) 2007 MontaVista Software Inc.
* Copyright (C) 2009 Provigent Ltd.
*
* ----------------------------------------------------------------------------
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* ----------------------------------------------------------------------------
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/clk.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
/*
* Registers offset
*/
#define DW_IC_CON 0x0
#define DW_IC_TAR 0x4
#define DW_IC_DATA_CMD 0x10
#define DW_IC_SS_SCL_HCNT 0x14
#define DW_IC_SS_SCL_LCNT 0x18
#define DW_IC_FS_SCL_HCNT 0x1c
#define DW_IC_FS_SCL_LCNT 0x20
#define DW_IC_INTR_STAT 0x2c
#define DW_IC_INTR_MASK 0x30
#define DW_IC_RAW_INTR_STAT 0x34
#define DW_IC_RX_TL 0x38
#define DW_IC_TX_TL 0x3c
#define DW_IC_CLR_INTR 0x40
#define DW_IC_CLR_RX_UNDER 0x44
#define DW_IC_CLR_RX_OVER 0x48
#define DW_IC_CLR_TX_OVER 0x4c
#define DW_IC_CLR_RD_REQ 0x50
#define DW_IC_CLR_TX_ABRT 0x54
#define DW_IC_CLR_RX_DONE 0x58
#define DW_IC_CLR_ACTIVITY 0x5c
#define DW_IC_CLR_STOP_DET 0x60
#define DW_IC_CLR_START_DET 0x64
#define DW_IC_CLR_GEN_CALL 0x68
#define DW_IC_ENABLE 0x6c
#define DW_IC_STATUS 0x70
#define DW_IC_TXFLR 0x74
#define DW_IC_RXFLR 0x78
#define DW_IC_COMP_PARAM_1 0xf4
#define DW_IC_TX_ABRT_SOURCE 0x80
#define DW_IC_CON_MASTER 0x1
#define DW_IC_CON_SPEED_STD 0x2
#define DW_IC_CON_SPEED_FAST 0x4
#define DW_IC_CON_10BITADDR_MASTER 0x10
#define DW_IC_CON_RESTART_EN 0x20
#define DW_IC_CON_SLAVE_DISABLE 0x40
#define DW_IC_INTR_RX_UNDER 0x001
#define DW_IC_INTR_RX_OVER 0x002
#define DW_IC_INTR_RX_FULL 0x004
#define DW_IC_INTR_TX_OVER 0x008
#define DW_IC_INTR_TX_EMPTY 0x010
#define DW_IC_INTR_RD_REQ 0x020
#define DW_IC_INTR_TX_ABRT 0x040
#define DW_IC_INTR_RX_DONE 0x080
#define DW_IC_INTR_ACTIVITY 0x100
#define DW_IC_INTR_STOP_DET 0x200
#define DW_IC_INTR_START_DET 0x400
#define DW_IC_INTR_GEN_CALL 0x800
#define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \
DW_IC_INTR_TX_EMPTY | \
DW_IC_INTR_TX_ABRT | \
DW_IC_INTR_STOP_DET)
#define DW_IC_STATUS_ACTIVITY 0x1
#define DW_IC_ERR_TX_ABRT 0x1
/*
* status codes
*/
#define STATUS_IDLE 0x0
#define STATUS_WRITE_IN_PROGRESS 0x1
#define STATUS_READ_IN_PROGRESS 0x2
#define TIMEOUT 20 /* ms */
/*
* hardware abort codes from the DW_IC_TX_ABRT_SOURCE register
*
* only expected abort codes are listed here
* refer to the datasheet for the full list
*/
#define ABRT_7B_ADDR_NOACK 0
#define ABRT_10ADDR1_NOACK 1
#define ABRT_10ADDR2_NOACK 2
#define ABRT_TXDATA_NOACK 3
#define ABRT_GCALL_NOACK 4
#define ABRT_GCALL_READ 5
#define ABRT_SBYTE_ACKDET 7
#define ABRT_SBYTE_NORSTRT 9
#define ABRT_10B_RD_NORSTRT 10
#define ABRT_MASTER_DIS 11
#define ARB_LOST 12
#define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK)
#define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK)
#define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK)
#define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK)
#define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK)
#define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ)
#define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET)
#define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT)
#define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT)
#define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS)
#define DW_IC_TX_ARB_LOST (1UL << ARB_LOST)
#define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \
DW_IC_TX_ABRT_10ADDR1_NOACK | \
DW_IC_TX_ABRT_10ADDR2_NOACK | \
DW_IC_TX_ABRT_TXDATA_NOACK | \
DW_IC_TX_ABRT_GCALL_NOACK)
static char *abort_sources[] = {
[ABRT_7B_ADDR_NOACK] =
"slave address not acknowledged (7bit mode)",
[ABRT_10ADDR1_NOACK] =
"first address byte not acknowledged (10bit mode)",
[ABRT_10ADDR2_NOACK] =
"second address byte not acknowledged (10bit mode)",
[ABRT_TXDATA_NOACK] =
"data not acknowledged",
[ABRT_GCALL_NOACK] =
"no acknowledgement for a general call",
[ABRT_GCALL_READ] =
"read after general call",
[ABRT_SBYTE_ACKDET] =
"start byte acknowledged",
[ABRT_SBYTE_NORSTRT] =
"trying to send start byte when restart is disabled",
[ABRT_10B_RD_NORSTRT] =
"trying to read when restart is disabled (10bit mode)",
[ABRT_MASTER_DIS] =
"trying to use disabled adapter",
[ARB_LOST] =
"lost arbitration",
};
/**
* struct dw_i2c_dev - private i2c-designware data
* @dev: driver model device node
* @base: IO registers pointer
* @cmd_complete: tx completion indicator
* @lock: protect this struct and IO registers
* @clk: input reference clock
* @cmd_err: run time hadware error code
* @msgs: points to an array of messages currently being transfered
* @msgs_num: the number of elements in msgs
* @msg_write_idx: the element index of the current tx message in the msgs
* array
* @tx_buf_len: the length of the current tx buffer
* @tx_buf: the current tx buffer
* @msg_read_idx: the element index of the current rx message in the msgs
* array
* @rx_buf_len: the length of the current rx buffer
* @rx_buf: the current rx buffer
* @msg_err: error status of the current transfer
* @status: i2c master status, one of STATUS_*
* @abort_source: copy of the TX_ABRT_SOURCE register
* @irq: interrupt number for the i2c master
* @adapter: i2c subsystem adapter node
* @tx_fifo_depth: depth of the hardware tx fifo
* @rx_fifo_depth: depth of the hardware rx fifo
*/
struct dw_i2c_dev {
struct device *dev;
void __iomem *base;
struct completion cmd_complete;
struct mutex lock;
struct clk *clk;
int cmd_err;
struct i2c_msg *msgs;
int msgs_num;
int msg_write_idx;
u32 tx_buf_len;
u8 *tx_buf;
int msg_read_idx;
u32 rx_buf_len;
u8 *rx_buf;
int msg_err;
unsigned int status;
u32 abort_source;
int irq;
struct i2c_adapter adapter;
unsigned int tx_fifo_depth;
unsigned int rx_fifo_depth;
};
static u32
i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset)
{
/*
* DesignWare I2C core doesn't seem to have solid strategy to meet
* the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec
* will result in violation of the tHD;STA spec.
*/
if (cond)
/*
* Conditional expression:
*
* IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH
*
* This is based on the DW manuals, and represents an ideal
* configuration. The resulting I2C bus speed will be
* faster than any of the others.
*
* If your hardware is free from tHD;STA issue, try this one.
*/
return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset;
else
/*
* Conditional expression:
*
* IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf)
*
* This is just experimental rule; the tHD;STA period turned
* out to be proportinal to (_HCNT + 3). With this setting,
* we could meet both tHIGH and tHD;STA timing specs.
*
* If unsure, you'd better to take this alternative.
*
* The reason why we need to take into account "tf" here,
* is the same as described in i2c_dw_scl_lcnt().
*/
return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset;
}
static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset)
{
/*
* Conditional expression:
*
* IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf)
*
* DW I2C core starts counting the SCL CNTs for the LOW period
* of the SCL clock (tLOW) as soon as it pulls the SCL line.
* In order to meet the tLOW timing spec, we need to take into
* account the fall time of SCL signal (tf). Default tf value
* should be 0.3 us, for safety.
*/
return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset;
}
/**
* i2c_dw_init() - initialize the designware i2c master hardware
* @dev: device private data
*
* This functions configures and enables the I2C master.
* This function is called during I2C init function, and in case of timeout at
* run time.
*/
static void i2c_dw_init(struct dw_i2c_dev *dev)
{
u32 input_clock_khz = clk_get_rate(dev->clk) / 1000;
u32 ic_con, hcnt, lcnt;
/* Disable the adapter */
writel(0, dev->base + DW_IC_ENABLE);
/* set standard and fast speed deviders for high/low periods */
/* Standard-mode */
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
40, /* tHD;STA = tHIGH = 4.0 us */
3, /* tf = 0.3 us */
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
47, /* tLOW = 4.7 us */
3, /* tf = 0.3 us */
0); /* No offset */
writel(hcnt, dev->base + DW_IC_SS_SCL_HCNT);
writel(lcnt, dev->base + DW_IC_SS_SCL_LCNT);
dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Fast-mode */
hcnt = i2c_dw_scl_hcnt(input_clock_khz,
6, /* tHD;STA = tHIGH = 0.6 us */
3, /* tf = 0.3 us */
0, /* 0: DW default, 1: Ideal */
0); /* No offset */
lcnt = i2c_dw_scl_lcnt(input_clock_khz,
13, /* tLOW = 1.3 us */
3, /* tf = 0.3 us */
0); /* No offset */
writel(hcnt, dev->base + DW_IC_FS_SCL_HCNT);
writel(lcnt, dev->base + DW_IC_FS_SCL_LCNT);
dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt);
/* Configure Tx/Rx FIFO threshold levels */
writel(dev->tx_fifo_depth - 1, dev->base + DW_IC_TX_TL);
writel(0, dev->base + DW_IC_RX_TL);
/* configure the i2c master */
ic_con = DW_IC_CON_MASTER | DW_IC_CON_SLAVE_DISABLE |
DW_IC_CON_RESTART_EN | DW_IC_CON_SPEED_FAST;
writel(ic_con, dev->base + DW_IC_CON);
}
/*
* Waiting for bus not busy
*/
static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev)
{
int timeout = TIMEOUT;
while (readl(dev->base + DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) {
if (timeout <= 0) {
dev_warn(dev->dev, "timeout waiting for bus ready\n");
return -ETIMEDOUT;
}
timeout--;
mdelay(1);
}
return 0;
}
static void i2c_dw_xfer_init(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 ic_con;
/* Disable the adapter */
writel(0, dev->base + DW_IC_ENABLE);
/* set the slave (target) address */
writel(msgs[dev->msg_write_idx].addr, dev->base + DW_IC_TAR);
/* if the slave address is ten bit address, enable 10BITADDR */
ic_con = readl(dev->base + DW_IC_CON);
if (msgs[dev->msg_write_idx].flags & I2C_M_TEN)
ic_con |= DW_IC_CON_10BITADDR_MASTER;
else
ic_con &= ~DW_IC_CON_10BITADDR_MASTER;
writel(ic_con, dev->base + DW_IC_CON);
/* Enable the adapter */
writel(1, dev->base + DW_IC_ENABLE);
/* Enable interrupts */
writel(DW_IC_INTR_DEFAULT_MASK, dev->base + DW_IC_INTR_MASK);
}
/*
* Initiate (and continue) low level master read/write transaction.
* This function is only called from i2c_dw_isr, and pumping i2c_msg
* messages into the tx buffer. Even if the size of i2c_msg data is
* longer than the size of the tx buffer, it handles everything.
*/
static void
i2c_dw_xfer_msg(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
u32 intr_mask;
int tx_limit, rx_limit;
u32 addr = msgs[dev->msg_write_idx].addr;
u32 buf_len = dev->tx_buf_len;
u8 *buf = dev->tx_buf;;
intr_mask = DW_IC_INTR_DEFAULT_MASK;
for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) {
/*
* if target address has changed, we need to
* reprogram the target address in the i2c
* adapter when we are done with this transfer
*/
if (msgs[dev->msg_write_idx].addr != addr) {
dev_err(dev->dev,
"%s: invalid target address\n", __func__);
dev->msg_err = -EINVAL;
break;
}
if (msgs[dev->msg_write_idx].len == 0) {
dev_err(dev->dev,
"%s: invalid message length\n", __func__);
dev->msg_err = -EINVAL;
break;
}
if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) {
/* new i2c_msg */
buf = msgs[dev->msg_write_idx].buf;
buf_len = msgs[dev->msg_write_idx].len;
}
tx_limit = dev->tx_fifo_depth - readl(dev->base + DW_IC_TXFLR);
rx_limit = dev->rx_fifo_depth - readl(dev->base + DW_IC_RXFLR);
while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) {
if (msgs[dev->msg_write_idx].flags & I2C_M_RD) {
writel(0x100, dev->base + DW_IC_DATA_CMD);
rx_limit--;
} else
writel(*buf++, dev->base + DW_IC_DATA_CMD);
tx_limit--; buf_len--;
}
dev->tx_buf = buf;
dev->tx_buf_len = buf_len;
if (buf_len > 0) {
/* more bytes to be written */
dev->status |= STATUS_WRITE_IN_PROGRESS;
break;
} else
dev->status &= ~STATUS_WRITE_IN_PROGRESS;
}
/*
* If i2c_msg index search is completed, we don't need TX_EMPTY
* interrupt any more.
*/
if (dev->msg_write_idx == dev->msgs_num)
intr_mask &= ~DW_IC_INTR_TX_EMPTY;
if (dev->msg_err)
intr_mask = 0;
writel(intr_mask, dev->base + DW_IC_INTR_MASK);
}
static void
i2c_dw_read(struct dw_i2c_dev *dev)
{
struct i2c_msg *msgs = dev->msgs;
int rx_valid;
for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) {
u32 len;
u8 *buf;
if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD))
continue;
if (!(dev->status & STATUS_READ_IN_PROGRESS)) {
len = msgs[dev->msg_read_idx].len;
buf = msgs[dev->msg_read_idx].buf;
} else {
len = dev->rx_buf_len;
buf = dev->rx_buf;
}
rx_valid = readl(dev->base + DW_IC_RXFLR);
for (; len > 0 && rx_valid > 0; len--, rx_valid--)
*buf++ = readl(dev->base + DW_IC_DATA_CMD);
if (len > 0) {
dev->status |= STATUS_READ_IN_PROGRESS;
dev->rx_buf_len = len;
dev->rx_buf = buf;
return;
} else
dev->status &= ~STATUS_READ_IN_PROGRESS;
}
}
static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev)
{
unsigned long abort_source = dev->abort_source;
int i;
if (abort_source & DW_IC_TX_ABRT_NOACK) {
for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_dbg(dev->dev,
"%s: %s\n", __func__, abort_sources[i]);
return -EREMOTEIO;
}
for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources))
dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]);
if (abort_source & DW_IC_TX_ARB_LOST)
return -EAGAIN;
else if (abort_source & DW_IC_TX_ABRT_GCALL_READ)
return -EINVAL; /* wrong msgs[] data */
else
return -EIO;
}
/*
* Prepare controller for a transaction and call i2c_dw_xfer_msg
*/
static int
i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num)
{
struct dw_i2c_dev *dev = i2c_get_adapdata(adap);
int ret;
dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num);
mutex_lock(&dev->lock);
INIT_COMPLETION(dev->cmd_complete);
dev->msgs = msgs;
dev->msgs_num = num;
dev->cmd_err = 0;
dev->msg_write_idx = 0;
dev->msg_read_idx = 0;
dev->msg_err = 0;
dev->status = STATUS_IDLE;
dev->abort_source = 0;
ret = i2c_dw_wait_bus_not_busy(dev);
if (ret < 0)
goto done;
/* start the transfers */
i2c_dw_xfer_init(dev);
/* wait for tx to complete */
ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ);
if (ret == 0) {
dev_err(dev->dev, "controller timed out\n");
i2c_dw_init(dev);
ret = -ETIMEDOUT;
goto done;
} else if (ret < 0)
goto done;
if (dev->msg_err) {
ret = dev->msg_err;
goto done;
}
/* no error */
if (likely(!dev->cmd_err)) {
/* Disable the adapter */
writel(0, dev->base + DW_IC_ENABLE);
ret = num;
goto done;
}
/* We have an error */
if (dev->cmd_err == DW_IC_ERR_TX_ABRT) {
ret = i2c_dw_handle_tx_abort(dev);
goto done;
}
ret = -EIO;
done:
mutex_unlock(&dev->lock);
return ret;
}
static u32 i2c_dw_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C |
I2C_FUNC_10BIT_ADDR |
I2C_FUNC_SMBUS_BYTE |
I2C_FUNC_SMBUS_BYTE_DATA |
I2C_FUNC_SMBUS_WORD_DATA |
I2C_FUNC_SMBUS_I2C_BLOCK;
}
static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev)
{
u32 stat;
/*
* The IC_INTR_STAT register just indicates "enabled" interrupts.
* Ths unmasked raw version of interrupt status bits are available
* in the IC_RAW_INTR_STAT register.
*
* That is,
* stat = readl(IC_INTR_STAT);
* equals to,
* stat = readl(IC_RAW_INTR_STAT) & readl(IC_INTR_MASK);
*
* The raw version might be useful for debugging purposes.
*/
stat = readl(dev->base + DW_IC_INTR_STAT);
/*
* Do not use the IC_CLR_INTR register to clear interrupts, or
* you'll miss some interrupts, triggered during the period from
* readl(IC_INTR_STAT) to readl(IC_CLR_INTR).
*
* Instead, use the separately-prepared IC_CLR_* registers.
*/
if (stat & DW_IC_INTR_RX_UNDER)
readl(dev->base + DW_IC_CLR_RX_UNDER);
if (stat & DW_IC_INTR_RX_OVER)
readl(dev->base + DW_IC_CLR_RX_OVER);
if (stat & DW_IC_INTR_TX_OVER)
readl(dev->base + DW_IC_CLR_TX_OVER);
if (stat & DW_IC_INTR_RD_REQ)
readl(dev->base + DW_IC_CLR_RD_REQ);
if (stat & DW_IC_INTR_TX_ABRT) {
/*
* The IC_TX_ABRT_SOURCE register is cleared whenever
* the IC_CLR_TX_ABRT is read. Preserve it beforehand.
*/
dev->abort_source = readl(dev->base + DW_IC_TX_ABRT_SOURCE);
readl(dev->base + DW_IC_CLR_TX_ABRT);
}
if (stat & DW_IC_INTR_RX_DONE)
readl(dev->base + DW_IC_CLR_RX_DONE);
if (stat & DW_IC_INTR_ACTIVITY)
readl(dev->base + DW_IC_CLR_ACTIVITY);
if (stat & DW_IC_INTR_STOP_DET)
readl(dev->base + DW_IC_CLR_STOP_DET);
if (stat & DW_IC_INTR_START_DET)
readl(dev->base + DW_IC_CLR_START_DET);
if (stat & DW_IC_INTR_GEN_CALL)
readl(dev->base + DW_IC_CLR_GEN_CALL);
return stat;
}
/*
* Interrupt service routine. This gets called whenever an I2C interrupt
* occurs.
*/
static irqreturn_t i2c_dw_isr(int this_irq, void *dev_id)
{
struct dw_i2c_dev *dev = dev_id;
u32 stat;
stat = i2c_dw_read_clear_intrbits(dev);
dev_dbg(dev->dev, "%s: stat=0x%x\n", __func__, stat);
if (stat & DW_IC_INTR_TX_ABRT) {
dev->cmd_err |= DW_IC_ERR_TX_ABRT;
dev->status = STATUS_IDLE;
/*
* Anytime TX_ABRT is set, the contents of the tx/rx
* buffers are flushed. Make sure to skip them.
*/
writel(0, dev->base + DW_IC_INTR_MASK);
goto tx_aborted;
}
if (stat & DW_IC_INTR_RX_FULL)
i2c_dw_read(dev);
if (stat & DW_IC_INTR_TX_EMPTY)
i2c_dw_xfer_msg(dev);
/*
* No need to modify or disable the interrupt mask here.
* i2c_dw_xfer_msg() will take care of it according to
* the current transmit status.
*/
tx_aborted:
if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err)
complete(&dev->cmd_complete);
return IRQ_HANDLED;
}
static struct i2c_algorithm i2c_dw_algo = {
.master_xfer = i2c_dw_xfer,
.functionality = i2c_dw_func,
};
static int __devinit dw_i2c_probe(struct platform_device *pdev)
{
struct dw_i2c_dev *dev;
struct i2c_adapter *adap;
struct resource *mem, *ioarea;
int irq, r;
/* NOTE: driver uses the static register mapping */
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!mem) {
dev_err(&pdev->dev, "no mem resource?\n");
return -EINVAL;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
dev_err(&pdev->dev, "no irq resource?\n");
return irq; /* -ENXIO */
}
ioarea = request_mem_region(mem->start, resource_size(mem),
pdev->name);
if (!ioarea) {
dev_err(&pdev->dev, "I2C region already claimed\n");
return -EBUSY;
}
dev = kzalloc(sizeof(struct dw_i2c_dev), GFP_KERNEL);
if (!dev) {
r = -ENOMEM;
goto err_release_region;
}
init_completion(&dev->cmd_complete);
mutex_init(&dev->lock);
dev->dev = get_device(&pdev->dev);
dev->irq = irq;
platform_set_drvdata(pdev, dev);
dev->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(dev->clk)) {
r = -ENODEV;
goto err_free_mem;
}
clk_enable(dev->clk);
dev->base = ioremap(mem->start, resource_size(mem));
if (dev->base == NULL) {
dev_err(&pdev->dev, "failure mapping io resources\n");
r = -EBUSY;
goto err_unuse_clocks;
}
{
u32 param1 = readl(dev->base + DW_IC_COMP_PARAM_1);
dev->tx_fifo_depth = ((param1 >> 16) & 0xff) + 1;
dev->rx_fifo_depth = ((param1 >> 8) & 0xff) + 1;
}
i2c_dw_init(dev);
writel(0, dev->base + DW_IC_INTR_MASK); /* disable IRQ */
r = request_irq(dev->irq, i2c_dw_isr, IRQF_DISABLED, pdev->name, dev);
if (r) {
dev_err(&pdev->dev, "failure requesting irq %i\n", dev->irq);
goto err_iounmap;
}
adap = &dev->adapter;
i2c_set_adapdata(adap, dev);
adap->owner = THIS_MODULE;
adap->class = I2C_CLASS_HWMON;
strlcpy(adap->name, "Synopsys DesignWare I2C adapter",
sizeof(adap->name));
adap->algo = &i2c_dw_algo;
adap->dev.parent = &pdev->dev;
adap->nr = pdev->id;
r = i2c_add_numbered_adapter(adap);
if (r) {
dev_err(&pdev->dev, "failure adding adapter\n");
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(dev->irq, dev);
err_iounmap:
iounmap(dev->base);
err_unuse_clocks:
clk_disable(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
err_free_mem:
platform_set_drvdata(pdev, NULL);
put_device(&pdev->dev);
kfree(dev);
err_release_region:
release_mem_region(mem->start, resource_size(mem));
return r;
}
static int __devexit dw_i2c_remove(struct platform_device *pdev)
{
struct dw_i2c_dev *dev = platform_get_drvdata(pdev);
struct resource *mem;
platform_set_drvdata(pdev, NULL);
i2c_del_adapter(&dev->adapter);
put_device(&pdev->dev);
clk_disable(dev->clk);
clk_put(dev->clk);
dev->clk = NULL;
writel(0, dev->base + DW_IC_ENABLE);
free_irq(dev->irq, dev);
kfree(dev);
mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(mem->start, resource_size(mem));
return 0;
}
/* work with hotplug and coldplug */
MODULE_ALIAS("platform:i2c_designware");
static struct platform_driver dw_i2c_driver = {
.remove = __devexit_p(dw_i2c_remove),
.driver = {
.name = "i2c_designware",
.owner = THIS_MODULE,
},
};
static int __init dw_i2c_init_driver(void)
{
return platform_driver_probe(&dw_i2c_driver, dw_i2c_probe);
}
module_init(dw_i2c_init_driver);
static void __exit dw_i2c_exit_driver(void)
{
platform_driver_unregister(&dw_i2c_driver);
}
module_exit(dw_i2c_exit_driver);
MODULE_AUTHOR("Baruch Siach <baruch@tkos.co.il>");
MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter");
MODULE_LICENSE("GPL");
| gpl-2.0 |
anwarMov/android_kernel_asus_a500cg | drivers/watchdog/ts72xx_wdt.c | 1868 | 11554 | /*
* Watchdog driver for Technologic Systems TS-72xx based SBCs
* (TS-7200, TS-7250 and TS-7260). These boards have external
* glue logic CPLD chip, which includes programmable watchdog
* timer.
*
* Copyright (c) 2009 Mika Westerberg <mika.westerberg@iki.fi>
*
* This driver is based on ep93xx_wdt and wm831x_wdt drivers.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/fs.h>
#include <linux/io.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/watchdog.h>
#include <linux/uaccess.h>
#define TS72XX_WDT_FEED_VAL 0x05
#define TS72XX_WDT_DEFAULT_TIMEOUT 8
static int timeout = TS72XX_WDT_DEFAULT_TIMEOUT;
module_param(timeout, int, 0);
MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. "
"(1 <= timeout <= 8, default="
__MODULE_STRING(TS72XX_WDT_DEFAULT_TIMEOUT)
")");
static bool nowayout = WATCHDOG_NOWAYOUT;
module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Disable watchdog shutdown on close");
/**
* struct ts72xx_wdt - watchdog control structure
* @lock: lock that protects this structure
* @regval: watchdog timeout value suitable for control register
* @flags: flags controlling watchdog device state
* @control_reg: watchdog control register
* @feed_reg: watchdog feed register
* @pdev: back pointer to platform dev
*/
struct ts72xx_wdt {
struct mutex lock;
int regval;
#define TS72XX_WDT_BUSY_FLAG 1
#define TS72XX_WDT_EXPECT_CLOSE_FLAG 2
int flags;
void __iomem *control_reg;
void __iomem *feed_reg;
struct platform_device *pdev;
};
struct platform_device *ts72xx_wdt_pdev;
/*
* TS-72xx Watchdog supports following timeouts (value written
* to control register):
* value description
* -------------------------
* 0x00 watchdog disabled
* 0x01 250ms
* 0x02 500ms
* 0x03 1s
* 0x04 reserved
* 0x05 2s
* 0x06 4s
* 0x07 8s
*
* Timeouts below 1s are not very usable so we don't
* allow them at all.
*
* We provide two functions that convert between these:
* timeout_to_regval() and regval_to_timeout().
*/
static const struct {
int timeout;
int regval;
} ts72xx_wdt_map[] = {
{ 1, 3 },
{ 2, 5 },
{ 4, 6 },
{ 8, 7 },
};
/**
* timeout_to_regval() - converts given timeout to control register value
* @new_timeout: timeout in seconds to be converted
*
* Function converts given @new_timeout into valid value that can
* be programmed into watchdog control register. When conversion is
* not possible, function returns %-EINVAL.
*/
static int timeout_to_regval(int new_timeout)
{
int i;
/* first limit it to 1 - 8 seconds */
new_timeout = clamp_val(new_timeout, 1, 8);
for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) {
if (ts72xx_wdt_map[i].timeout >= new_timeout)
return ts72xx_wdt_map[i].regval;
}
return -EINVAL;
}
/**
* regval_to_timeout() - converts control register value to timeout
* @regval: control register value to be converted
*
* Function converts given @regval to timeout in seconds (1, 2, 4 or 8).
* If @regval cannot be converted, function returns %-EINVAL.
*/
static int regval_to_timeout(int regval)
{
int i;
for (i = 0; i < ARRAY_SIZE(ts72xx_wdt_map); i++) {
if (ts72xx_wdt_map[i].regval == regval)
return ts72xx_wdt_map[i].timeout;
}
return -EINVAL;
}
/**
* ts72xx_wdt_kick() - kick the watchdog
* @wdt: watchdog to be kicked
*
* Called with @wdt->lock held.
*/
static inline void ts72xx_wdt_kick(struct ts72xx_wdt *wdt)
{
__raw_writeb(TS72XX_WDT_FEED_VAL, wdt->feed_reg);
}
/**
* ts72xx_wdt_start() - starts the watchdog timer
* @wdt: watchdog to be started
*
* This function programs timeout to watchdog timer
* and starts it.
*
* Called with @wdt->lock held.
*/
static void ts72xx_wdt_start(struct ts72xx_wdt *wdt)
{
/*
* To program the wdt, it first must be "fed" and
* only after that (within 30 usecs) the configuration
* can be changed.
*/
ts72xx_wdt_kick(wdt);
__raw_writeb((u8)wdt->regval, wdt->control_reg);
}
/**
* ts72xx_wdt_stop() - stops the watchdog timer
* @wdt: watchdog to be stopped
*
* Called with @wdt->lock held.
*/
static void ts72xx_wdt_stop(struct ts72xx_wdt *wdt)
{
ts72xx_wdt_kick(wdt);
__raw_writeb(0, wdt->control_reg);
}
static int ts72xx_wdt_open(struct inode *inode, struct file *file)
{
struct ts72xx_wdt *wdt = platform_get_drvdata(ts72xx_wdt_pdev);
int regval;
/*
* Try to convert default timeout to valid register
* value first.
*/
regval = timeout_to_regval(timeout);
if (regval < 0) {
dev_err(&wdt->pdev->dev,
"failed to convert timeout (%d) to register value\n",
timeout);
return -EINVAL;
}
if (mutex_lock_interruptible(&wdt->lock))
return -ERESTARTSYS;
if ((wdt->flags & TS72XX_WDT_BUSY_FLAG) != 0) {
mutex_unlock(&wdt->lock);
return -EBUSY;
}
wdt->flags = TS72XX_WDT_BUSY_FLAG;
wdt->regval = regval;
file->private_data = wdt;
ts72xx_wdt_start(wdt);
mutex_unlock(&wdt->lock);
return nonseekable_open(inode, file);
}
static int ts72xx_wdt_release(struct inode *inode, struct file *file)
{
struct ts72xx_wdt *wdt = file->private_data;
if (mutex_lock_interruptible(&wdt->lock))
return -ERESTARTSYS;
if ((wdt->flags & TS72XX_WDT_EXPECT_CLOSE_FLAG) != 0) {
ts72xx_wdt_stop(wdt);
} else {
dev_warn(&wdt->pdev->dev,
"TS-72XX WDT device closed unexpectly. "
"Watchdog timer will not stop!\n");
/*
* Kick it one more time, to give userland some time
* to recover (for example, respawning the kicker
* daemon).
*/
ts72xx_wdt_kick(wdt);
}
wdt->flags = 0;
mutex_unlock(&wdt->lock);
return 0;
}
static ssize_t ts72xx_wdt_write(struct file *file,
const char __user *data,
size_t len,
loff_t *ppos)
{
struct ts72xx_wdt *wdt = file->private_data;
if (!len)
return 0;
if (mutex_lock_interruptible(&wdt->lock))
return -ERESTARTSYS;
ts72xx_wdt_kick(wdt);
/*
* Support for magic character closing. User process
* writes 'V' into the device, just before it is closed.
* This means that we know that the wdt timer can be
* stopped after user closes the device.
*/
if (!nowayout) {
int i;
for (i = 0; i < len; i++) {
char c;
/* In case it was set long ago */
wdt->flags &= ~TS72XX_WDT_EXPECT_CLOSE_FLAG;
if (get_user(c, data + i)) {
mutex_unlock(&wdt->lock);
return -EFAULT;
}
if (c == 'V') {
wdt->flags |= TS72XX_WDT_EXPECT_CLOSE_FLAG;
break;
}
}
}
mutex_unlock(&wdt->lock);
return len;
}
static const struct watchdog_info winfo = {
.options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT |
WDIOF_MAGICCLOSE,
.firmware_version = 1,
.identity = "TS-72XX WDT",
};
static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct ts72xx_wdt *wdt = file->private_data;
void __user *argp = (void __user *)arg;
int __user *p = (int __user *)argp;
int error = 0;
if (mutex_lock_interruptible(&wdt->lock))
return -ERESTARTSYS;
switch (cmd) {
case WDIOC_GETSUPPORT:
error = copy_to_user(argp, &winfo, sizeof(winfo));
break;
case WDIOC_GETSTATUS:
case WDIOC_GETBOOTSTATUS:
error = put_user(0, p);
break;
case WDIOC_KEEPALIVE:
ts72xx_wdt_kick(wdt);
break;
case WDIOC_SETOPTIONS: {
int options;
if (get_user(options, p)) {
error = -EFAULT;
break;
}
error = -EINVAL;
if ((options & WDIOS_DISABLECARD) != 0) {
ts72xx_wdt_stop(wdt);
error = 0;
}
if ((options & WDIOS_ENABLECARD) != 0) {
ts72xx_wdt_start(wdt);
error = 0;
}
break;
}
case WDIOC_SETTIMEOUT: {
int new_timeout;
if (get_user(new_timeout, p)) {
error = -EFAULT;
} else {
int regval;
regval = timeout_to_regval(new_timeout);
if (regval < 0) {
error = -EINVAL;
} else {
ts72xx_wdt_stop(wdt);
wdt->regval = regval;
ts72xx_wdt_start(wdt);
}
}
if (error)
break;
/*FALLTHROUGH*/
}
case WDIOC_GETTIMEOUT:
if (put_user(regval_to_timeout(wdt->regval), p))
error = -EFAULT;
break;
default:
error = -ENOTTY;
break;
}
mutex_unlock(&wdt->lock);
return error;
}
static const struct file_operations ts72xx_wdt_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.open = ts72xx_wdt_open,
.release = ts72xx_wdt_release,
.write = ts72xx_wdt_write,
.unlocked_ioctl = ts72xx_wdt_ioctl,
};
static struct miscdevice ts72xx_wdt_miscdev = {
.minor = WATCHDOG_MINOR,
.name = "watchdog",
.fops = &ts72xx_wdt_fops,
};
static int ts72xx_wdt_probe(struct platform_device *pdev)
{
struct ts72xx_wdt *wdt;
struct resource *r1, *r2;
int error = 0;
wdt = kzalloc(sizeof(struct ts72xx_wdt), GFP_KERNEL);
if (!wdt) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
r1 = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r1) {
dev_err(&pdev->dev, "failed to get memory resource\n");
error = -ENODEV;
goto fail;
}
r1 = request_mem_region(r1->start, resource_size(r1), pdev->name);
if (!r1) {
dev_err(&pdev->dev, "cannot request memory region\n");
error = -EBUSY;
goto fail;
}
wdt->control_reg = ioremap(r1->start, resource_size(r1));
if (!wdt->control_reg) {
dev_err(&pdev->dev, "failed to map memory\n");
error = -ENODEV;
goto fail_free_control;
}
r2 = platform_get_resource(pdev, IORESOURCE_MEM, 1);
if (!r2) {
dev_err(&pdev->dev, "failed to get memory resource\n");
error = -ENODEV;
goto fail_unmap_control;
}
r2 = request_mem_region(r2->start, resource_size(r2), pdev->name);
if (!r2) {
dev_err(&pdev->dev, "cannot request memory region\n");
error = -EBUSY;
goto fail_unmap_control;
}
wdt->feed_reg = ioremap(r2->start, resource_size(r2));
if (!wdt->feed_reg) {
dev_err(&pdev->dev, "failed to map memory\n");
error = -ENODEV;
goto fail_free_feed;
}
platform_set_drvdata(pdev, wdt);
ts72xx_wdt_pdev = pdev;
wdt->pdev = pdev;
mutex_init(&wdt->lock);
/* make sure that the watchdog is disabled */
ts72xx_wdt_stop(wdt);
error = misc_register(&ts72xx_wdt_miscdev);
if (error) {
dev_err(&pdev->dev, "failed to register miscdev\n");
goto fail_unmap_feed;
}
dev_info(&pdev->dev, "TS-72xx Watchdog driver\n");
return 0;
fail_unmap_feed:
platform_set_drvdata(pdev, NULL);
iounmap(wdt->feed_reg);
fail_free_feed:
release_mem_region(r2->start, resource_size(r2));
fail_unmap_control:
iounmap(wdt->control_reg);
fail_free_control:
release_mem_region(r1->start, resource_size(r1));
fail:
kfree(wdt);
return error;
}
static int ts72xx_wdt_remove(struct platform_device *pdev)
{
struct ts72xx_wdt *wdt = platform_get_drvdata(pdev);
struct resource *res;
int error;
error = misc_deregister(&ts72xx_wdt_miscdev);
platform_set_drvdata(pdev, NULL);
iounmap(wdt->feed_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
release_mem_region(res->start, resource_size(res));
iounmap(wdt->control_reg);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
kfree(wdt);
return error;
}
static struct platform_driver ts72xx_wdt_driver = {
.probe = ts72xx_wdt_probe,
.remove = ts72xx_wdt_remove,
.driver = {
.name = "ts72xx-wdt",
.owner = THIS_MODULE,
},
};
module_platform_driver(ts72xx_wdt_driver);
MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
MODULE_DESCRIPTION("TS-72xx SBC Watchdog");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:ts72xx-wdt");
| gpl-2.0 |
percy-g2/android_kernel_sony_u8500_OLD | drivers/net/sunhme.c | 2380 | 93641 | /* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching,
* auto carrier detecting ethernet driver. Also known as the
* "Happy Meal Ethernet" found on SunSwift SBUS cards.
*
* Copyright (C) 1996, 1998, 1999, 2002, 2003,
* 2006, 2008 David S. Miller (davem@davemloft.net)
*
* Changes :
* 2000/11/11 Willy Tarreau <willy AT meta-x.org>
* - port to non-sparc architectures. Tested only on x86 and
* only currently works with QFE PCI cards.
* - ability to specify the MAC address at module load time by passing this
* argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/ethtool.h>
#include <linux/mii.h>
#include <linux/crc32.h>
#include <linux/random.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <asm/system.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#ifdef CONFIG_SPARC
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/prom.h>
#include <asm/auxio.h>
#endif
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#ifdef CONFIG_PCI
#include <linux/pci.h>
#endif
#include "sunhme.h"
#define DRV_NAME "sunhme"
#define DRV_VERSION "3.10"
#define DRV_RELDATE "August 26, 2008"
#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver");
MODULE_LICENSE("GPL");
static int macaddr[6];
/* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */
module_param_array(macaddr, int, NULL, 0);
MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set");
#ifdef CONFIG_SBUS
static struct quattro *qfe_sbus_list;
#endif
#ifdef CONFIG_PCI
static struct quattro *qfe_pci_list;
#endif
#undef HMEDEBUG
#undef SXDEBUG
#undef RXDEBUG
#undef TXDEBUG
#undef TXLOGGING
#ifdef TXLOGGING
struct hme_tx_logent {
unsigned int tstamp;
int tx_new, tx_old;
unsigned int action;
#define TXLOG_ACTION_IRQ 0x01
#define TXLOG_ACTION_TXMIT 0x02
#define TXLOG_ACTION_TBUSY 0x04
#define TXLOG_ACTION_NBUFS 0x08
unsigned int status;
};
#define TX_LOG_LEN 128
static struct hme_tx_logent tx_log[TX_LOG_LEN];
static int txlog_cur_entry;
static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s)
{
struct hme_tx_logent *tlp;
unsigned long flags;
local_irq_save(flags);
tlp = &tx_log[txlog_cur_entry];
tlp->tstamp = (unsigned int)jiffies;
tlp->tx_new = hp->tx_new;
tlp->tx_old = hp->tx_old;
tlp->action = a;
tlp->status = s;
txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1);
local_irq_restore(flags);
}
static __inline__ void tx_dump_log(void)
{
int i, this;
this = txlog_cur_entry;
for (i = 0; i < TX_LOG_LEN; i++) {
printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i,
tx_log[this].tstamp,
tx_log[this].tx_new, tx_log[this].tx_old,
tx_log[this].action, tx_log[this].status);
this = (this + 1) & (TX_LOG_LEN - 1);
}
}
static __inline__ void tx_dump_ring(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
struct happy_meal_txd *tp = &hb->happy_meal_txd[0];
int i;
for (i = 0; i < TX_RING_SIZE; i+=4) {
printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n",
i, i + 4,
le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr),
le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr),
le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr),
le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr));
}
}
#else
#define tx_add_log(hp, a, s) do { } while(0)
#define tx_dump_log() do { } while(0)
#define tx_dump_ring(hp) do { } while(0)
#endif
#ifdef HMEDEBUG
#define HMD(x) printk x
#else
#define HMD(x)
#endif
/* #define AUTO_SWITCH_DEBUG */
#ifdef AUTO_SWITCH_DEBUG
#define ASD(x) printk x
#else
#define ASD(x)
#endif
#define DEFAULT_IPG0 16 /* For lance-mode only */
#define DEFAULT_IPG1 8 /* For all modes */
#define DEFAULT_IPG2 4 /* For all modes */
#define DEFAULT_JAMSIZE 4 /* Toe jam */
/* NOTE: In the descriptor writes one _must_ write the address
* member _first_. The card must not be allowed to see
* the updated descriptor flags until the address is
* correct. I've added a write memory barrier between
* the two stores so that I can sleep well at night... -DaveM
*/
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
static void sbus_hme_write32(void __iomem *reg, u32 val)
{
sbus_writel(val, reg);
}
static u32 sbus_hme_read32(void __iomem *reg)
{
return sbus_readl(reg);
}
static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = (__force hme32)addr;
wmb();
rxd->rx_flags = (__force hme32)flags;
}
static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
txd->tx_addr = (__force hme32)addr;
wmb();
txd->tx_flags = (__force hme32)flags;
}
static u32 sbus_hme_read_desc32(hme32 *p)
{
return (__force u32)*p;
}
static void pci_hme_write32(void __iomem *reg, u32 val)
{
writel(val, reg);
}
static u32 pci_hme_read32(void __iomem *reg)
{
return readl(reg);
}
static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr)
{
rxd->rx_addr = (__force hme32)cpu_to_le32(addr);
wmb();
rxd->rx_flags = (__force hme32)cpu_to_le32(flags);
}
static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr)
{
txd->tx_addr = (__force hme32)cpu_to_le32(addr);
wmb();
txd->tx_flags = (__force hme32)cpu_to_le32(flags);
}
static u32 pci_hme_read_desc32(hme32 *p)
{
return le32_to_cpup((__le32 *)p);
}
#define hme_write32(__hp, __reg, __val) \
((__hp)->write32((__reg), (__val)))
#define hme_read32(__hp, __reg) \
((__hp)->read32(__reg))
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
((__hp)->write_rxd((__rxd), (__flags), (__addr)))
#define hme_write_txd(__hp, __txd, __flags, __addr) \
((__hp)->write_txd((__txd), (__flags), (__addr)))
#define hme_read_desc32(__hp, __p) \
((__hp)->read_desc32(__p))
#define hme_dma_map(__hp, __ptr, __size, __dir) \
((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir)))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir)))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)))
#else
#ifdef CONFIG_SBUS
/* SBUS only compilation */
#define hme_write32(__hp, __reg, __val) \
sbus_writel((__val), (__reg))
#define hme_read32(__hp, __reg) \
sbus_readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \
wmb(); \
(__rxd)->rx_flags = (__force hme32)(u32)(__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \
wmb(); \
(__txd)->tx_flags = (__force hme32)(u32)(__flags); \
} while(0)
#define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p))
#define hme_dma_map(__hp, __ptr, __size, __dir) \
dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
#else
/* PCI only compilation */
#define hme_write32(__hp, __reg, __val) \
writel((__val), (__reg))
#define hme_read32(__hp, __reg) \
readl(__reg)
#define hme_write_rxd(__hp, __rxd, __flags, __addr) \
do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \
wmb(); \
(__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \
} while(0)
#define hme_write_txd(__hp, __txd, __flags, __addr) \
do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \
wmb(); \
(__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \
} while(0)
static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p)
{
return le32_to_cpup((__le32 *)p);
}
#define hme_dma_map(__hp, __ptr, __size, __dir) \
pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir))
#define hme_dma_unmap(__hp, __addr, __size, __dir) \
pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \
pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))
#define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \
pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))
#endif
#endif
/* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */
static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit)
{
hme_write32(hp, tregs + TCVR_BBDATA, bit);
hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
}
#if 0
static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal)
{
u32 ret;
hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
ret = hme_read32(hp, tregs + TCVR_CFG);
if (internal)
ret &= TCV_CFG_MDIO0;
else
ret &= TCV_CFG_MDIO1;
return ret;
}
#endif
static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal)
{
u32 retval;
hme_write32(hp, tregs + TCVR_BBCLOCK, 0);
udelay(1);
retval = hme_read32(hp, tregs + TCVR_CFG);
if (internal)
retval &= TCV_CFG_MDIO0;
else
retval &= TCV_CFG_MDIO1;
hme_write32(hp, tregs + TCVR_BBCLOCK, 1);
return retval;
}
#define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */
static int happy_meal_bb_read(struct happy_meal *hp,
void __iomem *tregs, int reg)
{
u32 tmp;
int retval = 0;
int i;
ASD(("happy_meal_bb_read: reg=%d ", reg));
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
/* Force BitBang into the idle state. */
for (i = 0; i < 32; i++)
BB_PUT_BIT(hp, tregs, 1);
/* Give it the read sequence. */
BB_PUT_BIT(hp, tregs, 0);
BB_PUT_BIT(hp, tregs, 1);
BB_PUT_BIT(hp, tregs, 1);
BB_PUT_BIT(hp, tregs, 0);
/* Give it the PHY address. */
tmp = hp->paddr & 0xff;
for (i = 4; i >= 0; i--)
BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
/* Tell it what register we want to read. */
tmp = (reg & 0xff);
for (i = 4; i >= 0; i--)
BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
/* Close down the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 0);
/* Now read in the value. */
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
for (i = 15; i >= 0; i--)
retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
(void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal));
ASD(("value=%x\n", retval));
return retval;
}
static void happy_meal_bb_write(struct happy_meal *hp,
void __iomem *tregs, int reg,
unsigned short value)
{
u32 tmp;
int i;
ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value));
/* Enable the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 1);
/* Force BitBang into the idle state. */
for (i = 0; i < 32; i++)
BB_PUT_BIT(hp, tregs, 1);
/* Give it write sequence. */
BB_PUT_BIT(hp, tregs, 0);
BB_PUT_BIT(hp, tregs, 1);
BB_PUT_BIT(hp, tregs, 0);
BB_PUT_BIT(hp, tregs, 1);
/* Give it the PHY address. */
tmp = (hp->paddr & 0xff);
for (i = 4; i >= 0; i--)
BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
/* Tell it what register we will be writing. */
tmp = (reg & 0xff);
for (i = 4; i >= 0; i--)
BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1));
/* Tell it to become ready for the bits. */
BB_PUT_BIT(hp, tregs, 1);
BB_PUT_BIT(hp, tregs, 0);
for (i = 15; i >= 0; i--)
BB_PUT_BIT(hp, tregs, ((value >> i) & 1));
/* Close down the MIF BitBang outputs. */
hme_write32(hp, tregs + TCVR_BBOENAB, 0);
}
#define TCVR_READ_TRIES 16
static int happy_meal_tcvr_read(struct happy_meal *hp,
void __iomem *tregs, int reg)
{
int tries = TCVR_READ_TRIES;
int retval;
ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg));
if (hp->tcvr_type == none) {
ASD(("no transceiver, value=TCVR_FAILURE\n"));
return TCVR_FAILURE;
}
if (!(hp->happy_flags & HFLAG_FENABLE)) {
ASD(("doing bit bang\n"));
return happy_meal_bb_read(hp, tregs, reg);
}
hme_write32(hp, tregs + TCVR_FRAME,
(FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18)));
while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
udelay(20);
if (!tries) {
printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n");
return TCVR_FAILURE;
}
retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff;
ASD(("value=%04x\n", retval));
return retval;
}
#define TCVR_WRITE_TRIES 16
static void happy_meal_tcvr_write(struct happy_meal *hp,
void __iomem *tregs, int reg,
unsigned short value)
{
int tries = TCVR_WRITE_TRIES;
ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value));
/* Welcome to Sun Microsystems, can I take your order please? */
if (!(hp->happy_flags & HFLAG_FENABLE)) {
happy_meal_bb_write(hp, tregs, reg, value);
return;
}
/* Would you like fries with that? */
hme_write32(hp, tregs + TCVR_FRAME,
(FRAME_WRITE | (hp->paddr << 23) |
((reg & 0xff) << 18) | (value & 0xffff)));
while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries)
udelay(20);
/* Anything else? */
if (!tries)
printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n");
/* Fifty-two cents is your change, have a nice day. */
}
/* Auto negotiation. The scheme is very simple. We have a timer routine
* that keeps watching the auto negotiation process as it progresses.
* The DP83840 is first told to start doing it's thing, we set up the time
* and place the timer state machine in it's initial state.
*
* Here the timer peeks at the DP83840 status registers at each click to see
* if the auto negotiation has completed, we assume here that the DP83840 PHY
* will time out at some point and just tell us what (didn't) happen. For
* complete coverage we only allow so many of the ticks at this level to run,
* when this has expired we print a warning message and try another strategy.
* This "other" strategy is to force the interface into various speed/duplex
* configurations and we stop when we see a link-up condition before the
* maximum number of "peek" ticks have occurred.
*
* Once a valid link status has been detected we configure the BigMAC and
* the rest of the Happy Meal to speak the most efficient protocol we could
* get a clean link for. The priority for link configurations, highest first
* is:
* 100 Base-T Full Duplex
* 100 Base-T Half Duplex
* 10 Base-T Full Duplex
* 10 Base-T Half Duplex
*
* We start a new timer now, after a successful auto negotiation status has
* been detected. This timer just waits for the link-up bit to get set in
* the BMCR of the DP83840. When this occurs we print a kernel log message
* describing the link type in use and the fact that it is up.
*
* If a fatal error of some sort is signalled and detected in the interrupt
* service routine, and the chip is reset, or the link is ifconfig'd down
* and then back up, this entire process repeats itself all over again.
*/
static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs)
{
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
/* Downgrade from full to half duplex. Only possible
* via ethtool.
*/
if (hp->sw_bmcr & BMCR_FULLDPLX) {
hp->sw_bmcr &= ~(BMCR_FULLDPLX);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
return 0;
}
/* Downgrade from 100 to 10. */
if (hp->sw_bmcr & BMCR_SPEED100) {
hp->sw_bmcr &= ~(BMCR_SPEED100);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
return 0;
}
/* We've tried everything. */
return -1;
}
static void display_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
printk(KERN_INFO "%s: Link is up using ", hp->dev->name);
if (hp->tcvr_type == external)
printk("external ");
else
printk("internal ");
printk("transceiver at ");
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) {
if (hp->sw_lpa & LPA_100FULL)
printk("100Mb/s, Full Duplex.\n");
else
printk("100Mb/s, Half Duplex.\n");
} else {
if (hp->sw_lpa & LPA_10FULL)
printk("10Mb/s, Full Duplex.\n");
else
printk("10Mb/s, Half Duplex.\n");
}
}
static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs)
{
printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name);
if (hp->tcvr_type == external)
printk("external ");
else
printk("internal ");
printk("transceiver at ");
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (hp->sw_bmcr & BMCR_SPEED100)
printk("100Mb/s, ");
else
printk("10Mb/s, ");
if (hp->sw_bmcr & BMCR_FULLDPLX)
printk("Full Duplex.\n");
else
printk("Half Duplex.\n");
}
static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs)
{
int full;
/* All we care about is making sure the bigmac tx_cfg has a
* proper duplex setting.
*/
if (hp->timer_state == arbwait) {
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL)))
goto no_response;
if (hp->sw_lpa & LPA_100FULL)
full = 1;
else if (hp->sw_lpa & LPA_100HALF)
full = 0;
else if (hp->sw_lpa & LPA_10FULL)
full = 1;
else
full = 0;
} else {
/* Forcing a link mode. */
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (hp->sw_bmcr & BMCR_FULLDPLX)
full = 1;
else
full = 0;
}
/* Before changing other bits in the tx_cfg register, and in
* general any of other the TX config registers too, you
* must:
* 1) Clear Enable
* 2) Poll with reads until that bit reads back as zero
* 3) Make TX configuration changes
* 4) Set Enable once more
*/
hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
~(BIGMAC_TXCFG_ENABLE));
while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE)
barrier();
if (full) {
hp->happy_flags |= HFLAG_FULL;
hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
BIGMAC_TXCFG_FULLDPLX);
} else {
hp->happy_flags &= ~(HFLAG_FULL);
hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) &
~(BIGMAC_TXCFG_FULLDPLX));
}
hme_write32(hp, hp->bigmacregs + BMAC_TXCFG,
hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) |
BIGMAC_TXCFG_ENABLE);
return 0;
no_response:
return 1;
}
static int happy_meal_init(struct happy_meal *hp);
static int is_lucent_phy(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
unsigned short mr2, mr3;
int ret = 0;
mr2 = happy_meal_tcvr_read(hp, tregs, 2);
mr3 = happy_meal_tcvr_read(hp, tregs, 3);
if ((mr2 & 0xffff) == 0x0180 &&
((mr3 & 0xffff) >> 10) == 0x1d)
ret = 1;
return ret;
}
static void happy_meal_timer(unsigned long data)
{
struct happy_meal *hp = (struct happy_meal *) data;
void __iomem *tregs = hp->tcvregs;
int restart_timer = 0;
spin_lock_irq(&hp->happy_lock);
hp->timer_ticks++;
switch(hp->timer_state) {
case arbwait:
/* Only allow for 5 ticks, thats 10 seconds and much too
* long to wait for arbitration to complete.
*/
if (hp->timer_ticks >= 10) {
/* Enter force mode. */
do_force_mode:
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n",
hp->dev->name);
hp->sw_bmcr = BMCR_SPEED100;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
if (!is_lucent_phy(hp)) {
/* OK, seems we need do disable the transceiver for the first
* tick to make sure we get an accurate link state at the
* second tick.
*/
hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig);
}
hp->timer_state = ltrywait;
hp->timer_ticks = 0;
restart_timer = 1;
} else {
/* Anything interesting happen? */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) {
int ret;
/* Just what we've been waiting for... */
ret = set_happy_link_modes(hp, tregs);
if (ret) {
/* Ooops, something bad happened, go to force
* mode.
*
* XXX Broken hubs which don't support 802.3u
* XXX auto-negotiation make this happen as well.
*/
goto do_force_mode;
}
/* Success, at least so far, advance our state engine. */
hp->timer_state = lupwait;
restart_timer = 1;
} else {
restart_timer = 1;
}
}
break;
case lupwait:
/* Auto negotiation was successful and we are awaiting a
* link up status. I have decided to let this timer run
* forever until some sort of error is signalled, reporting
* a message to the user at 10 second intervals.
*/
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
if (hp->sw_bmsr & BMSR_LSTATUS) {
/* Wheee, it's up, display the link mode in use and put
* the timer to sleep.
*/
display_link_mode(hp, tregs);
hp->timer_state = asleep;
restart_timer = 0;
} else {
if (hp->timer_ticks >= 10) {
printk(KERN_NOTICE "%s: Auto negotiation successful, link still "
"not completely up.\n", hp->dev->name);
hp->timer_ticks = 0;
restart_timer = 1;
} else {
restart_timer = 1;
}
}
break;
case ltrywait:
/* Making the timeout here too long can make it take
* annoyingly long to attempt all of the link mode
* permutations, but then again this is essentially
* error recovery code for the most part.
*/
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG);
if (hp->timer_ticks == 1) {
if (!is_lucent_phy(hp)) {
/* Re-enable transceiver, we'll re-enable the transceiver next
* tick, then check link state on the following tick.
*/
hp->sw_csconfig |= CSCONFIG_TCVDISAB;
happy_meal_tcvr_write(hp, tregs,
DP83840_CSCONFIG, hp->sw_csconfig);
}
restart_timer = 1;
break;
}
if (hp->timer_ticks == 2) {
if (!is_lucent_phy(hp)) {
hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
happy_meal_tcvr_write(hp, tregs,
DP83840_CSCONFIG, hp->sw_csconfig);
}
restart_timer = 1;
break;
}
if (hp->sw_bmsr & BMSR_LSTATUS) {
/* Force mode selection success. */
display_forced_link_mode(hp, tregs);
set_happy_link_modes(hp, tregs); /* XXX error? then what? */
hp->timer_state = asleep;
restart_timer = 0;
} else {
if (hp->timer_ticks >= 4) { /* 6 seconds or so... */
int ret;
ret = try_next_permutation(hp, tregs);
if (ret == -1) {
/* Aieee, tried them all, reset the
* chip and try all over again.
*/
/* Let the user know... */
printk(KERN_NOTICE "%s: Link down, cable problem?\n",
hp->dev->name);
ret = happy_meal_init(hp);
if (ret) {
/* ho hum... */
printk(KERN_ERR "%s: Error, cannot re-init the "
"Happy Meal.\n", hp->dev->name);
}
goto out;
}
if (!is_lucent_phy(hp)) {
hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
hp->sw_csconfig |= CSCONFIG_TCVDISAB;
happy_meal_tcvr_write(hp, tregs,
DP83840_CSCONFIG, hp->sw_csconfig);
}
hp->timer_ticks = 0;
restart_timer = 1;
} else {
restart_timer = 1;
}
}
break;
case asleep:
default:
/* Can't happens.... */
printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n",
hp->dev->name);
restart_timer = 0;
hp->timer_ticks = 0;
hp->timer_state = asleep; /* foo on you */
break;
}
if (restart_timer) {
hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */
add_timer(&hp->happy_timer);
}
out:
spin_unlock_irq(&hp->happy_lock);
}
#define TX_RESET_TRIES 32
#define RX_RESET_TRIES 32
/* hp->happy_lock must be held */
static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = TX_RESET_TRIES;
HMD(("happy_meal_tx_reset: reset, "));
/* Would you like to try our SMCC Delux? */
hme_write32(hp, bregs + BMAC_TXSWRESET, 0);
while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries)
udelay(20);
/* Lettuce, tomato, buggy hardware (no extra charge)? */
if (!tries)
printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!");
/* Take care. */
HMD(("done\n"));
}
/* hp->happy_lock must be held */
static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs)
{
int tries = RX_RESET_TRIES;
HMD(("happy_meal_rx_reset: reset, "));
/* We have a special on GNU/Viking hardware bugs today. */
hme_write32(hp, bregs + BMAC_RXSWRESET, 0);
while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries)
udelay(20);
/* Will that be all? */
if (!tries)
printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!");
/* Don't forget your vik_1137125_wa. Have a nice day. */
HMD(("done\n"));
}
#define STOP_TRIES 16
/* hp->happy_lock must be held */
static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs)
{
int tries = STOP_TRIES;
HMD(("happy_meal_stop: reset, "));
/* We're consolidating our STB products, it's your lucky day. */
hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL);
while (hme_read32(hp, gregs + GREG_SWRESET) && --tries)
udelay(20);
/* Come back next week when we are "Sun Microelectronics". */
if (!tries)
printk(KERN_ERR "happy meal: Fry guys.");
/* Remember: "Different name, same old buggy as shit hardware." */
HMD(("done\n"));
}
/* hp->happy_lock must be held */
static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs)
{
struct net_device_stats *stats = &hp->net_stats;
stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR);
hme_write32(hp, bregs + BMAC_RCRCECTR, 0);
stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR);
hme_write32(hp, bregs + BMAC_UNALECTR, 0);
stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR);
hme_write32(hp, bregs + BMAC_GLECTR, 0);
stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR);
stats->collisions +=
(hme_read32(hp, bregs + BMAC_EXCTR) +
hme_read32(hp, bregs + BMAC_LTCTR));
hme_write32(hp, bregs + BMAC_EXCTR, 0);
hme_write32(hp, bregs + BMAC_LTCTR, 0);
}
/* hp->happy_lock must be held */
static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs)
{
ASD(("happy_meal_poll_stop: "));
/* If polling disabled or not polling already, nothing to do. */
if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) !=
(HFLAG_POLLENABLE | HFLAG_POLL)) {
HMD(("not polling, return\n"));
return;
}
/* Shut up the MIF. */
ASD(("were polling, mif ints off, "));
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* Turn off polling. */
ASD(("polling off, "));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE));
/* We are no longer polling. */
hp->happy_flags &= ~(HFLAG_POLL);
/* Let the bits set. */
udelay(200);
ASD(("done\n"));
}
/* Only Sun can take such nice parts and fuck up the programming interface
* like this. Good job guys...
*/
#define TCVR_RESET_TRIES 16 /* It should reset quickly */
#define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */
/* hp->happy_lock must be held */
static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs)
{
u32 tconfig;
int result, tries = TCVR_RESET_TRIES;
tconfig = hme_read32(hp, tregs + TCVR_CFG);
ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig));
if (hp->tcvr_type == external) {
ASD(("external<"));
hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
ASD(("phyread_fail>\n"));
return -1;
}
ASD(("phyread_ok,PSELECT>"));
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->tcvr_type = external;
hp->paddr = TCV_PADDR_ETX;
} else {
if (tconfig & TCV_CFG_MDIO1) {
ASD(("internal<PSELECT,"));
hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT));
ASD(("ISOLATE,"));
happy_meal_tcvr_write(hp, tregs, MII_BMCR,
(BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE));
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE) {
ASD(("phyread_fail>\n"));
return -1;
}
ASD(("phyread_ok,~PSELECT>"));
hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT)));
hp->tcvr_type = internal;
hp->paddr = TCV_PADDR_ITX;
}
}
ASD(("BMCR_RESET "));
happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET);
while (--tries) {
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE)
return -1;
hp->sw_bmcr = result;
if (!(result & BMCR_RESET))
break;
udelay(20);
}
if (!tries) {
ASD(("BMCR RESET FAILED!\n"));
return -1;
}
ASD(("RESET_OK\n"));
/* Get fresh copies of the PHY registers. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
ASD(("UNISOLATE"));
hp->sw_bmcr &= ~(BMCR_ISOLATE);
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
tries = TCVR_UNISOLATE_TRIES;
while (--tries) {
result = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (result == TCVR_FAILURE)
return -1;
if (!(result & BMCR_ISOLATE))
break;
udelay(20);
}
if (!tries) {
ASD((" FAILED!\n"));
return -1;
}
ASD((" SUCCESS and CSCONFIG_DFBYPASS\n"));
if (!is_lucent_phy(hp)) {
result = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
happy_meal_tcvr_write(hp, tregs,
DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS));
}
return 0;
}
/* Figure out whether we have an internal or external transceiver.
*
* hp->happy_lock must be held
*/
static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs)
{
unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG);
ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig));
if (hp->happy_flags & HFLAG_POLL) {
/* If we are polling, we must stop to get the transceiver type. */
ASD(("<polling> "));
if (hp->tcvr_type == internal) {
if (tconfig & TCV_CFG_MDIO1) {
ASD(("<internal> <poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
ASD(("<external>\n"));
tconfig &= ~(TCV_CFG_PENABLE);
tconfig |= TCV_CFG_PSELECT;
hme_write32(hp, tregs + TCVR_CFG, tconfig);
}
} else {
if (hp->tcvr_type == external) {
ASD(("<external> "));
if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) {
ASD(("<poll stop> "));
happy_meal_poll_stop(hp, tregs);
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
ASD(("<internal>\n"));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) &
~(TCV_CFG_PSELECT));
}
ASD(("\n"));
} else {
ASD(("<none>\n"));
}
}
} else {
u32 reread = hme_read32(hp, tregs + TCVR_CFG);
/* Else we can just work off of the MDIO bits. */
ASD(("<not polling> "));
if (reread & TCV_CFG_MDIO1) {
hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT);
hp->paddr = TCV_PADDR_ETX;
hp->tcvr_type = external;
ASD(("<external>\n"));
} else {
if (reread & TCV_CFG_MDIO0) {
hme_write32(hp, tregs + TCVR_CFG,
tconfig & ~(TCV_CFG_PSELECT));
hp->paddr = TCV_PADDR_ITX;
hp->tcvr_type = internal;
ASD(("<internal>\n"));
} else {
printk(KERN_ERR "happy meal: Transceiver and a coke please.");
hp->tcvr_type = none; /* Grrr... */
ASD(("<none>\n"));
}
}
}
}
/* The receive ring buffers are a bit tricky to get right. Here goes...
*
* The buffers we dma into must be 64 byte aligned. So we use a special
* alloc_skb() routine for the happy meal to allocate 64 bytes more than
* we really need.
*
* We use skb_reserve() to align the data block we get in the skb. We
* also program the etxregs->cfg register to use an offset of 2. This
* imperical constant plus the ethernet header size will always leave
* us with a nicely aligned ip header once we pass things up to the
* protocol layers.
*
* The numbers work out to:
*
* Max ethernet frame size 1518
* Ethernet header size 14
* Happy Meal base offset 2
*
* Say a skb data area is at 0xf001b010, and its size alloced is
* (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes.
*
* First our alloc_skb() routine aligns the data base to a 64 byte
* boundary. We now have 0xf001b040 as our skb data address. We
* plug this into the receive descriptor address.
*
* Next, we skb_reserve() 2 bytes to account for the Happy Meal offset.
* So now the data we will end up looking at starts at 0xf001b042. When
* the packet arrives, we will check out the size received and subtract
* this from the skb->length. Then we just pass the packet up to the
* protocols as is, and allocate a new skb to replace this slot we have
* just received from.
*
* The ethernet layer will strip the ether header from the front of the
* skb we just sent to it, this leaves us with the ip header sitting
* nicely aligned at 0xf001b050. Also, for tcp and udp packets the
* Happy Meal has even checksummed the tcp/udp data for us. The 16
* bit checksum is obtained from the low bits of the receive descriptor
* flags, thus:
*
* skb->csum = rxd->rx_flags & 0xffff;
* skb->ip_summed = CHECKSUM_COMPLETE;
*
* before sending off the skb to the protocols, and we are good as gold.
*/
static void happy_meal_clean_rings(struct happy_meal *hp)
{
int i;
for (i = 0; i < RX_RING_SIZE; i++) {
if (hp->rx_skbs[i] != NULL) {
struct sk_buff *skb = hp->rx_skbs[i];
struct happy_meal_rxd *rxd;
u32 dma_addr;
rxd = &hp->happy_block->happy_meal_rxd[i];
dma_addr = hme_read_desc32(hp, &rxd->rx_addr);
dma_unmap_single(hp->dma_dev, dma_addr,
RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
hp->rx_skbs[i] = NULL;
}
}
for (i = 0; i < TX_RING_SIZE; i++) {
if (hp->tx_skbs[i] != NULL) {
struct sk_buff *skb = hp->tx_skbs[i];
struct happy_meal_txd *txd;
u32 dma_addr;
int frag;
hp->tx_skbs[i] = NULL;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
txd = &hp->happy_block->happy_meal_txd[i];
dma_addr = hme_read_desc32(hp, &txd->tx_addr);
if (!frag)
dma_unmap_single(hp->dma_dev, dma_addr,
(hme_read_desc32(hp, &txd->tx_flags)
& TXFLAG_SIZE),
DMA_TO_DEVICE);
else
dma_unmap_page(hp->dma_dev, dma_addr,
(hme_read_desc32(hp, &txd->tx_flags)
& TXFLAG_SIZE),
DMA_TO_DEVICE);
if (frag != skb_shinfo(skb)->nr_frags)
i++;
}
dev_kfree_skb_any(skb);
}
}
}
/* hp->happy_lock must be held */
static void happy_meal_init_rings(struct happy_meal *hp)
{
struct hmeal_init_block *hb = hp->happy_block;
struct net_device *dev = hp->dev;
int i;
HMD(("happy_meal_init_rings: counters to zero, "));
hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0;
/* Free any skippy bufs left around in the rings. */
HMD(("clean, "));
happy_meal_clean_rings(hp);
/* Now get new skippy bufs for the receive ring. */
HMD(("init rxring, "));
for (i = 0; i < RX_RING_SIZE; i++) {
struct sk_buff *skb;
skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (!skb) {
hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
continue;
}
hp->rx_skbs[i] = skb;
skb->dev = dev;
/* Because we reserve afterwards. */
skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, &hb->happy_meal_rxd[i],
(RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
DMA_FROM_DEVICE));
skb_reserve(skb, RX_OFFSET);
}
HMD(("init txring, "));
for (i = 0; i < TX_RING_SIZE; i++)
hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0);
HMD(("done\n"));
}
/* hp->happy_lock must be held */
static void happy_meal_begin_auto_negotiation(struct happy_meal *hp,
void __iomem *tregs,
struct ethtool_cmd *ep)
{
int timeout;
/* Read all of the registers we are interested in now. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1);
hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2);
/* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
/* Advertise everything we can support. */
if (hp->sw_bmsr & BMSR_10HALF)
hp->sw_advertise |= (ADVERTISE_10HALF);
else
hp->sw_advertise &= ~(ADVERTISE_10HALF);
if (hp->sw_bmsr & BMSR_10FULL)
hp->sw_advertise |= (ADVERTISE_10FULL);
else
hp->sw_advertise &= ~(ADVERTISE_10FULL);
if (hp->sw_bmsr & BMSR_100HALF)
hp->sw_advertise |= (ADVERTISE_100HALF);
else
hp->sw_advertise &= ~(ADVERTISE_100HALF);
if (hp->sw_bmsr & BMSR_100FULL)
hp->sw_advertise |= (ADVERTISE_100FULL);
else
hp->sw_advertise &= ~(ADVERTISE_100FULL);
happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
/* XXX Currently no Happy Meal cards I know off support 100BaseT4,
* XXX and this is because the DP83840 does not support it, changes
* XXX would need to be made to the tx/rx logic in the driver as well
* XXX so I completely skip checking for it in the BMSR for now.
*/
#ifdef AUTO_SWITCH_DEBUG
ASD(("%s: Advertising [ ", hp->dev->name));
if (hp->sw_advertise & ADVERTISE_10HALF)
ASD(("10H "));
if (hp->sw_advertise & ADVERTISE_10FULL)
ASD(("10F "));
if (hp->sw_advertise & ADVERTISE_100HALF)
ASD(("100H "));
if (hp->sw_advertise & ADVERTISE_100FULL)
ASD(("100F "));
#endif
/* Enable Auto-Negotiation, this is usually on already... */
hp->sw_bmcr |= BMCR_ANENABLE;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
/* Restart it to make sure it is going. */
hp->sw_bmcr |= BMCR_ANRESTART;
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
/* BMCR_ANRESTART self clears when the process has begun. */
timeout = 64; /* More than enough. */
while (--timeout) {
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
if (!(hp->sw_bmcr & BMCR_ANRESTART))
break; /* got it. */
udelay(10);
}
if (!timeout) {
printk(KERN_ERR "%s: Happy Meal would not start auto negotiation "
"BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr);
printk(KERN_NOTICE "%s: Performing force link detection.\n",
hp->dev->name);
goto force_link;
} else {
hp->timer_state = arbwait;
}
} else {
force_link:
/* Force the link up, trying first a particular mode.
* Either we are here at the request of ethtool or
* because the Happy Meal would not start to autoneg.
*/
/* Disable auto-negotiation in BMCR, enable the duplex and
* speed setting, init the timer state machine, and fire it off.
*/
if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) {
hp->sw_bmcr = BMCR_SPEED100;
} else {
if (ethtool_cmd_speed(ep) == SPEED_100)
hp->sw_bmcr = BMCR_SPEED100;
else
hp->sw_bmcr = 0;
if (ep->duplex == DUPLEX_FULL)
hp->sw_bmcr |= BMCR_FULLDPLX;
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
if (!is_lucent_phy(hp)) {
/* OK, seems we need do disable the transceiver for the first
* tick to make sure we get an accurate link state at the
* second tick.
*/
hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs,
DP83840_CSCONFIG);
hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB);
happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG,
hp->sw_csconfig);
}
hp->timer_state = ltrywait;
}
hp->timer_ticks = 0;
hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */
hp->happy_timer.data = (unsigned long) hp;
hp->happy_timer.function = happy_meal_timer;
add_timer(&hp->happy_timer);
}
/* hp->happy_lock must be held */
static int happy_meal_init(struct happy_meal *hp)
{
void __iomem *gregs = hp->gregs;
void __iomem *etxregs = hp->etxregs;
void __iomem *erxregs = hp->erxregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *tregs = hp->tcvregs;
u32 regtmp, rxcfg;
unsigned char *e = &hp->dev->dev_addr[0];
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
HMD(("happy_meal_init: happy_flags[%08x] ",
hp->happy_flags));
if (!(hp->happy_flags & HFLAG_INIT)) {
HMD(("set HFLAG_INIT, "));
hp->happy_flags |= HFLAG_INIT;
happy_meal_get_counters(hp, bregs);
}
/* Stop polling. */
HMD(("to happy_meal_poll_stop\n"));
happy_meal_poll_stop(hp, tregs);
/* Stop transmitter and receiver. */
HMD(("happy_meal_init: to happy_meal_stop\n"));
happy_meal_stop(hp, gregs);
/* Alloc and reset the tx/rx descriptor chains. */
HMD(("happy_meal_init: to happy_meal_init_rings\n"));
happy_meal_init_rings(hp);
/* Shut up the MIF. */
HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ",
hme_read32(hp, tregs + TCVR_IMASK)));
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
/* See if we can enable the MIF frame on this card to speak to the DP83840. */
if (hp->happy_flags & HFLAG_FENABLE) {
HMD(("use frame old[%08x], ",
hme_read32(hp, tregs + TCVR_CFG)));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
} else {
HMD(("use bitbang old[%08x], ",
hme_read32(hp, tregs + TCVR_CFG)));
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
}
/* Check the state of the transceiver. */
HMD(("to happy_meal_transceiver_check\n"));
happy_meal_transceiver_check(hp, tregs);
/* Put the Big Mac into a sane state. */
HMD(("happy_meal_init: "));
switch(hp->tcvr_type) {
case none:
/* Cannot operate if we don't know the transceiver type! */
HMD(("AAIEEE no transceiver type, EAGAIN"));
return -EAGAIN;
case internal:
/* Using the MII buffers. */
HMD(("internal, using MII, "));
hme_write32(hp, bregs + BMAC_XIFCFG, 0);
break;
case external:
/* Not using the MII, disable it. */
HMD(("external, disable MII, "));
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
}
if (happy_meal_tcvr_reset(hp, tregs))
return -EAGAIN;
/* Reset the Happy Meal Big Mac transceiver and the receiver. */
HMD(("tx/rx reset, "));
happy_meal_tx_reset(hp, bregs);
happy_meal_rx_reset(hp, bregs);
/* Set jam size and inter-packet gaps to reasonable defaults. */
HMD(("jsize/ipg1/ipg2, "));
hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE);
hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1);
hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2);
/* Load up the MAC address and random seed. */
HMD(("rseed/macaddr, "));
/* The docs recommend to use the 10LSB of our MAC here. */
hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff));
hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5]));
hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3]));
hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1]));
HMD(("htable, "));
if ((hp->dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(hp->dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if ((hp->dev->flags & IFF_PROMISC) == 0) {
u16 hash_table[4];
struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, hp->dev) {
addrs = ha->addr;
if (!(*addrs & 1))
continue;
crc = ether_crc_le(6, addrs);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
} else {
hme_write32(hp, bregs + BMAC_HTABLE3, 0);
hme_write32(hp, bregs + BMAC_HTABLE2, 0);
hme_write32(hp, bregs + BMAC_HTABLE1, 0);
hme_write32(hp, bregs + BMAC_HTABLE0, 0);
}
/* Set the RX and TX ring ptrs. */
HMD(("ring ptrs rxr[%08x] txr[%08x]\n",
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)),
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))));
hme_write32(hp, erxregs + ERX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)));
hme_write32(hp, etxregs + ETX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)));
/* Parity issues in the ERX unit of some HME revisions can cause some
* registers to not be written unless their parity is even. Detect such
* lost writes and simply rewrite with a low bit set (which will be ignored
* since the rxring needs to be 2K aligned).
*/
if (hme_read32(hp, erxregs + ERX_RING) !=
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)))
hme_write32(hp, erxregs + ERX_RING,
((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))
| 0x4);
/* Set the supported burst sizes. */
HMD(("happy_meal_init: old[%08x] bursts<",
hme_read32(hp, gregs + GREG_CFG)));
#ifndef CONFIG_SPARC
/* It is always PCI and can handle 64byte bursts. */
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64);
#else
if ((hp->happy_bursts & DMA_BURST64) &&
((hp->happy_flags & HFLAG_PCI) != 0
#ifdef CONFIG_SBUS
|| sbus_can_burst64()
#endif
|| 0)) {
u32 gcfg = GREG_CFG_BURST64;
/* I have no idea if I should set the extended
* transfer mode bit for Cheerio, so for now I
* do not. -DaveM
*/
#ifdef CONFIG_SBUS
if ((hp->happy_flags & HFLAG_PCI) == 0) {
struct platform_device *op = hp->happy_dev;
if (sbus_can_dma_64bit()) {
sbus_set_sbus64(&op->dev,
hp->happy_bursts);
gcfg |= GREG_CFG_64BIT;
}
}
#endif
HMD(("64>"));
hme_write32(hp, gregs + GREG_CFG, gcfg);
} else if (hp->happy_bursts & DMA_BURST32) {
HMD(("32>"));
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32);
} else if (hp->happy_bursts & DMA_BURST16) {
HMD(("16>"));
hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16);
} else {
HMD(("XXX>"));
hme_write32(hp, gregs + GREG_CFG, 0);
}
#endif /* CONFIG_SPARC */
/* Turn off interrupts we do not want to hear. */
HMD((", enable global interrupts, "));
hme_write32(hp, gregs + GREG_IMASK,
(GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP |
GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR));
/* Set the transmit ring buffer size. */
HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE,
hme_read32(hp, etxregs + ETX_RSIZE)));
hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1);
/* Enable transmitter DVMA. */
HMD(("tx dma enable old[%08x], ",
hme_read32(hp, etxregs + ETX_CFG)));
hme_write32(hp, etxregs + ETX_CFG,
hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE);
/* This chip really rots, for the receiver sometimes when you
* write to its control registers not all the bits get there
* properly. I cannot think of a sane way to provide complete
* coverage for this hardware bug yet.
*/
HMD(("erx regs bug old[%08x]\n",
hme_read32(hp, erxregs + ERX_CFG)));
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
regtmp = hme_read32(hp, erxregs + ERX_CFG);
hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET));
if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) {
printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n");
printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n",
ERX_CFG_DEFAULT(RX_OFFSET), regtmp);
/* XXX Should return failure here... */
}
/* Enable Big Mac hash table filter. */
HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ",
hme_read32(hp, bregs + BMAC_RXCFG)));
rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME;
if (hp->dev->flags & IFF_PROMISC)
rxcfg |= BIGMAC_RXCFG_PMISC;
hme_write32(hp, bregs + BMAC_RXCFG, rxcfg);
/* Let the bits settle in the chip. */
udelay(10);
/* Ok, configure the Big Mac transmitter. */
HMD(("BIGMAC init, "));
regtmp = 0;
if (hp->happy_flags & HFLAG_FULL)
regtmp |= BIGMAC_TXCFG_FULLDPLX;
/* Don't turn on the "don't give up" bit for now. It could cause hme
* to deadlock with the PHY if a Jabber occurs.
*/
hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/);
/* Give up after 16 TX attempts. */
hme_write32(hp, bregs + BMAC_ALIMIT, 16);
/* Enable the output drivers no matter what. */
regtmp = BIGMAC_XCFG_ODENABLE;
/* If card can do lance mode, enable it. */
if (hp->happy_flags & HFLAG_LANCE)
regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE;
/* Disable the MII buffers if using external transceiver. */
if (hp->tcvr_type == external)
regtmp |= BIGMAC_XCFG_MIIDISAB;
HMD(("XIF config old[%08x], ",
hme_read32(hp, bregs + BMAC_XIFCFG)));
hme_write32(hp, bregs + BMAC_XIFCFG, regtmp);
/* Start things up. */
HMD(("tx old[%08x] and rx [%08x] ON!\n",
hme_read32(hp, bregs + BMAC_TXCFG),
hme_read32(hp, bregs + BMAC_RXCFG)));
/* Set larger TX/RX size to allow for 802.1q */
hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8);
hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8);
hme_write32(hp, bregs + BMAC_TXCFG,
hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE);
hme_write32(hp, bregs + BMAC_RXCFG,
hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE);
/* Get the autonegotiation started, and the watch timer ticking. */
happy_meal_begin_auto_negotiation(hp, tregs, NULL);
/* Success. */
return 0;
}
/* hp->happy_lock must be held */
static void happy_meal_set_initial_advertisement(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
void __iomem *bregs = hp->bigmacregs;
void __iomem *gregs = hp->gregs;
happy_meal_stop(hp, gregs);
hme_write32(hp, tregs + TCVR_IMASK, 0xffff);
if (hp->happy_flags & HFLAG_FENABLE)
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE));
else
hme_write32(hp, tregs + TCVR_CFG,
hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE);
happy_meal_transceiver_check(hp, tregs);
switch(hp->tcvr_type) {
case none:
return;
case internal:
hme_write32(hp, bregs + BMAC_XIFCFG, 0);
break;
case external:
hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB);
break;
}
if (happy_meal_tcvr_reset(hp, tregs))
return;
/* Latch PHY registers as of now. */
hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR);
hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE);
/* Advertise everything we can support. */
if (hp->sw_bmsr & BMSR_10HALF)
hp->sw_advertise |= (ADVERTISE_10HALF);
else
hp->sw_advertise &= ~(ADVERTISE_10HALF);
if (hp->sw_bmsr & BMSR_10FULL)
hp->sw_advertise |= (ADVERTISE_10FULL);
else
hp->sw_advertise &= ~(ADVERTISE_10FULL);
if (hp->sw_bmsr & BMSR_100HALF)
hp->sw_advertise |= (ADVERTISE_100HALF);
else
hp->sw_advertise &= ~(ADVERTISE_100HALF);
if (hp->sw_bmsr & BMSR_100FULL)
hp->sw_advertise |= (ADVERTISE_100FULL);
else
hp->sw_advertise &= ~(ADVERTISE_100FULL);
/* Update the PHY advertisement register. */
happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise);
}
/* Once status is latched (by happy_meal_interrupt) it is cleared by
* the hardware, so we cannot re-read it and get a correct value.
*
* hp->happy_lock must be held
*/
static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status)
{
int reset = 0;
/* Only print messages for non-counter related interrupts. */
if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND |
GREG_STAT_MAXPKTERR | GREG_STAT_RXERR |
GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR |
GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR |
GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR |
GREG_STAT_SLVPERR))
printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n",
hp->dev->name, status);
if (status & GREG_STAT_RFIFOVF) {
/* Receive FIFO overflow is harmless and the hardware will take
care of it, just some packets are lost. Who cares. */
printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name);
}
if (status & GREG_STAT_STSTERR) {
/* BigMAC SQE link test failed. */
printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name);
reset = 1;
}
if (status & GREG_STAT_TFIFO_UND) {
/* Transmit FIFO underrun, again DMA error likely. */
printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n",
hp->dev->name);
reset = 1;
}
if (status & GREG_STAT_MAXPKTERR) {
/* Driver error, tried to transmit something larger
* than ethernet max mtu.
*/
printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name);
reset = 1;
}
if (status & GREG_STAT_NORXD) {
/* This is harmless, it just means the system is
* quite loaded and the incoming packet rate was
* faster than the interrupt handler could keep up
* with.
*/
printk(KERN_INFO "%s: Happy Meal out of receive "
"descriptors, packet dropped.\n",
hp->dev->name);
}
if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) {
/* All sorts of DMA receive errors. */
printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name);
if (status & GREG_STAT_RXERR)
printk("GenericError ");
if (status & GREG_STAT_RXPERR)
printk("ParityError ");
if (status & GREG_STAT_RXTERR)
printk("RxTagBotch ");
printk("]\n");
reset = 1;
}
if (status & GREG_STAT_EOPERR) {
/* Driver bug, didn't set EOP bit in tx descriptor given
* to the happy meal.
*/
printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n",
hp->dev->name);
reset = 1;
}
if (status & GREG_STAT_MIFIRQ) {
/* MIF signalled an interrupt, were we polling it? */
printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name);
}
if (status &
(GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) {
/* All sorts of transmit DMA errors. */
printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name);
if (status & GREG_STAT_TXEACK)
printk("GenericError ");
if (status & GREG_STAT_TXLERR)
printk("LateError ");
if (status & GREG_STAT_TXPERR)
printk("ParityErro ");
if (status & GREG_STAT_TXTERR)
printk("TagBotch ");
printk("]\n");
reset = 1;
}
if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) {
/* Bus or parity error when cpu accessed happy meal registers
* or it's internal FIFO's. Should never see this.
*/
printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n",
hp->dev->name,
(status & GREG_STAT_SLVPERR) ? "parity" : "generic");
reset = 1;
}
if (reset) {
printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name);
happy_meal_init(hp);
return 1;
}
return 0;
}
/* hp->happy_lock must be held */
static void happy_meal_mif_interrupt(struct happy_meal *hp)
{
void __iomem *tregs = hp->tcvregs;
printk(KERN_INFO "%s: Link status change.\n", hp->dev->name);
hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR);
hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA);
/* Use the fastest transmission protocol possible. */
if (hp->sw_lpa & LPA_100FULL) {
printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name);
hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100);
} else if (hp->sw_lpa & LPA_100HALF) {
printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name);
hp->sw_bmcr |= BMCR_SPEED100;
} else if (hp->sw_lpa & LPA_10FULL) {
printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name);
hp->sw_bmcr |= BMCR_FULLDPLX;
} else {
printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name);
}
happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr);
/* Finally stop polling and shut up the MIF. */
happy_meal_poll_stop(hp, tregs);
}
#ifdef TXDEBUG
#define TXD(x) printk x
#else
#define TXD(x)
#endif
/* hp->happy_lock must be held */
static void happy_meal_tx(struct happy_meal *hp)
{
struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
struct happy_meal_txd *this;
struct net_device *dev = hp->dev;
int elem;
elem = hp->tx_old;
TXD(("TX<"));
while (elem != hp->tx_new) {
struct sk_buff *skb;
u32 flags, dma_addr, dma_len;
int frag;
TXD(("[%d]", elem));
this = &txbase[elem];
flags = hme_read_desc32(hp, &this->tx_flags);
if (flags & TXFLAG_OWN)
break;
skb = hp->tx_skbs[elem];
if (skb_shinfo(skb)->nr_frags) {
int last;
last = elem + skb_shinfo(skb)->nr_frags;
last &= (TX_RING_SIZE - 1);
flags = hme_read_desc32(hp, &txbase[last].tx_flags);
if (flags & TXFLAG_OWN)
break;
}
hp->tx_skbs[elem] = NULL;
hp->net_stats.tx_bytes += skb->len;
for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) {
dma_addr = hme_read_desc32(hp, &this->tx_addr);
dma_len = hme_read_desc32(hp, &this->tx_flags);
dma_len &= TXFLAG_SIZE;
if (!frag)
dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
else
dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE);
elem = NEXT_TX(elem);
this = &txbase[elem];
}
dev_kfree_skb_irq(skb);
hp->net_stats.tx_packets++;
}
hp->tx_old = elem;
TXD((">"));
if (netif_queue_stopped(dev) &&
TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1))
netif_wake_queue(dev);
}
#ifdef RXDEBUG
#define RXD(x) printk x
#else
#define RXD(x)
#endif
/* Originally I used to handle the allocation failure by just giving back just
* that one ring buffer to the happy meal. Problem is that usually when that
* condition is triggered, the happy meal expects you to do something reasonable
* with all of the packets it has DMA'd in. So now I just drop the entire
* ring when we cannot get a new skb and give them all back to the happy meal,
* maybe things will be "happier" now.
*
* hp->happy_lock must be held
*/
static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
{
struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0];
struct happy_meal_rxd *this;
int elem = hp->rx_new, drops = 0;
u32 flags;
RXD(("RX<"));
this = &rxbase[elem];
while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) {
struct sk_buff *skb;
int len = flags >> 16;
u16 csum = flags & RXFLAG_CSUM;
u32 dma_addr = hme_read_desc32(hp, &this->rx_addr);
RXD(("[%d ", elem));
/* Check for errors. */
if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) {
RXD(("ERR(%08x)]", flags));
hp->net_stats.rx_errors++;
if (len < ETH_ZLEN)
hp->net_stats.rx_length_errors++;
if (len & (RXFLAG_OVERFLOW >> 16)) {
hp->net_stats.rx_over_errors++;
hp->net_stats.rx_fifo_errors++;
}
/* Return it to the Happy meal. */
drop_it:
hp->net_stats.rx_dropped++;
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_addr);
goto next;
}
skb = hp->rx_skbs[elem];
if (len > RX_COPY_THRESHOLD) {
struct sk_buff *new_skb;
/* Now refill the entry, if we can. */
new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
if (new_skb == NULL) {
drops++;
goto drop_it;
}
dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
hp->rx_skbs[elem] = new_skb;
new_skb->dev = dev;
skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE,
DMA_FROM_DEVICE));
skb_reserve(new_skb, RX_OFFSET);
/* Trim the original skb for the netif. */
skb_trim(skb, len);
} else {
struct sk_buff *copy_skb = dev_alloc_skb(len + 2);
if (copy_skb == NULL) {
drops++;
goto drop_it;
}
skb_reserve(copy_skb, 2);
skb_put(copy_skb, len);
dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, copy_skb->data, len);
dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE);
/* Reuse original ring buffer. */
hme_write_rxd(hp, this,
(RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
dma_addr);
skb = copy_skb;
}
/* This card is _fucking_ hot... */
skb->csum = csum_unfold(~(__force __sum16)htons(csum));
skb->ip_summed = CHECKSUM_COMPLETE;
RXD(("len=%d csum=%4x]", len, csum));
skb->protocol = eth_type_trans(skb, dev);
netif_rx(skb);
hp->net_stats.rx_packets++;
hp->net_stats.rx_bytes += len;
next:
elem = NEXT_RX(elem);
this = &rxbase[elem];
}
hp->rx_new = elem;
if (drops)
printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name);
RXD((">"));
}
static irqreturn_t happy_meal_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
HMD(("happy_meal_interrupt: status=%08x ", happy_status));
spin_lock(&hp->happy_lock);
if (happy_status & GREG_STAT_ERRORS) {
HMD(("ERRORS "));
if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status))
goto out;
}
if (happy_status & GREG_STAT_MIFIRQ) {
HMD(("MIFIRQ "));
happy_meal_mif_interrupt(hp);
}
if (happy_status & GREG_STAT_TXALL) {
HMD(("TXALL "));
happy_meal_tx(hp);
}
if (happy_status & GREG_STAT_RXTOHOST) {
HMD(("RXTOHOST "));
happy_meal_rx(hp, dev);
}
HMD(("done\n"));
out:
spin_unlock(&hp->happy_lock);
return IRQ_HANDLED;
}
#ifdef CONFIG_SBUS
static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie)
{
struct quattro *qp = (struct quattro *) cookie;
int i;
for (i = 0; i < 4; i++) {
struct net_device *dev = qp->happy_meals[i];
struct happy_meal *hp = netdev_priv(dev);
u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT);
HMD(("quattro_interrupt: status=%08x ", happy_status));
if (!(happy_status & (GREG_STAT_ERRORS |
GREG_STAT_MIFIRQ |
GREG_STAT_TXALL |
GREG_STAT_RXTOHOST)))
continue;
spin_lock(&hp->happy_lock);
if (happy_status & GREG_STAT_ERRORS) {
HMD(("ERRORS "));
if (happy_meal_is_not_so_happy(hp, happy_status))
goto next;
}
if (happy_status & GREG_STAT_MIFIRQ) {
HMD(("MIFIRQ "));
happy_meal_mif_interrupt(hp);
}
if (happy_status & GREG_STAT_TXALL) {
HMD(("TXALL "));
happy_meal_tx(hp);
}
if (happy_status & GREG_STAT_RXTOHOST) {
HMD(("RXTOHOST "));
happy_meal_rx(hp, dev);
}
next:
spin_unlock(&hp->happy_lock);
}
HMD(("done\n"));
return IRQ_HANDLED;
}
#endif
static int happy_meal_open(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
int res;
HMD(("happy_meal_open: "));
/* On SBUS Quattro QFE cards, all hme interrupts are concentrated
* into a single source which we register handling at probe time.
*/
if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
if (request_irq(dev->irq, happy_meal_interrupt,
IRQF_SHARED, dev->name, (void *)dev)) {
HMD(("EAGAIN\n"));
printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n",
dev->irq);
return -EAGAIN;
}
}
HMD(("to happy_meal_init\n"));
spin_lock_irq(&hp->happy_lock);
res = happy_meal_init(hp);
spin_unlock_irq(&hp->happy_lock);
if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO))
free_irq(dev->irq, dev);
return res;
}
static int happy_meal_close(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
spin_lock_irq(&hp->happy_lock);
happy_meal_stop(hp, hp->gregs);
happy_meal_clean_rings(hp);
/* If auto-negotiation timer is running, kill it. */
del_timer(&hp->happy_timer);
spin_unlock_irq(&hp->happy_lock);
/* On Quattro QFE cards, all hme interrupts are concentrated
* into a single source which we register handling at probe
* time and never unregister.
*/
if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)
free_irq(dev->irq, dev);
return 0;
}
#ifdef SXDEBUG
#define SXD(x) printk x
#else
#define SXD(x)
#endif
static void happy_meal_tx_timeout(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
tx_dump_log();
printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name,
hme_read32(hp, hp->gregs + GREG_STAT),
hme_read32(hp, hp->etxregs + ETX_CFG),
hme_read32(hp, hp->bigmacregs + BMAC_TXCFG));
spin_lock_irq(&hp->happy_lock);
happy_meal_init(hp);
spin_unlock_irq(&hp->happy_lock);
netif_wake_queue(dev);
}
static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
int entry;
u32 tx_flags;
tx_flags = TXFLAG_OWN;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
const u32 csum_start_off = skb_checksum_start_offset(skb);
const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) |
((csum_stuff_off << 20) & TXFLAG_CSLOCATION));
}
spin_lock_irq(&hp->happy_lock);
if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) {
netif_stop_queue(dev);
spin_unlock_irq(&hp->happy_lock);
printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n",
dev->name);
return NETDEV_TX_BUSY;
}
entry = hp->tx_new;
SXD(("SX<l[%d]e[%d]>", len, entry));
hp->tx_skbs[entry] = skb;
if (skb_shinfo(skb)->nr_frags == 0) {
u32 mapping, len;
len = skb->len;
mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(tx_flags | (len & TXFLAG_SIZE)),
mapping);
entry = NEXT_TX(entry);
} else {
u32 first_len, first_mapping;
int frag, first_entry = entry;
/* We must give this initial chunk to the device last.
* Otherwise we could race with the device.
*/
first_len = skb_headlen(skb);
first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
DMA_TO_DEVICE);
entry = NEXT_TX(entry);
for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
u32 len, mapping, this_txflags;
len = this_frag->size;
mapping = dma_map_page(hp->dma_dev, this_frag->page,
this_frag->page_offset, len,
DMA_TO_DEVICE);
this_txflags = tx_flags;
if (frag == skb_shinfo(skb)->nr_frags - 1)
this_txflags |= TXFLAG_EOP;
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
(this_txflags | (len & TXFLAG_SIZE)),
mapping);
entry = NEXT_TX(entry);
}
hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry],
(tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)),
first_mapping);
}
hp->tx_new = entry;
if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1))
netif_stop_queue(dev);
/* Get it going. */
hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP);
spin_unlock_irq(&hp->happy_lock);
tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
return NETDEV_TX_OK;
}
static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
spin_lock_irq(&hp->happy_lock);
happy_meal_get_counters(hp, hp->bigmacregs);
spin_unlock_irq(&hp->happy_lock);
return &hp->net_stats;
}
static void happy_meal_set_multicast(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
void __iomem *bregs = hp->bigmacregs;
struct netdev_hw_addr *ha;
char *addrs;
u32 crc;
spin_lock_irq(&hp->happy_lock);
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff);
hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff);
} else if (dev->flags & IFF_PROMISC) {
hme_write32(hp, bregs + BMAC_RXCFG,
hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC);
} else {
u16 hash_table[4];
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
addrs = ha->addr;
if (!(*addrs & 1))
continue;
crc = ether_crc_le(6, addrs);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]);
hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]);
hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]);
hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]);
}
spin_unlock_irq(&hp->happy_lock);
}
/* Ethtool support... */
static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
u32 speed;
cmd->supported =
(SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full |
SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII);
/* XXX hardcoded stuff for now */
cmd->port = PORT_TP; /* XXX no MII support */
cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */
cmd->phy_address = 0; /* XXX fixed PHYAD */
/* Record PHY settings. */
spin_lock_irq(&hp->happy_lock);
hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA);
spin_unlock_irq(&hp->happy_lock);
if (hp->sw_bmcr & BMCR_ANENABLE) {
cmd->autoneg = AUTONEG_ENABLE;
speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ?
SPEED_100 : SPEED_10);
if (speed == SPEED_100)
cmd->duplex =
(hp->sw_lpa & (LPA_100FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
else
cmd->duplex =
(hp->sw_lpa & (LPA_10FULL)) ?
DUPLEX_FULL : DUPLEX_HALF;
} else {
cmd->autoneg = AUTONEG_DISABLE;
speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10;
cmd->duplex =
(hp->sw_bmcr & BMCR_FULLDPLX) ?
DUPLEX_FULL : DUPLEX_HALF;
}
ethtool_cmd_speed_set(cmd, speed);
return 0;
}
static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct happy_meal *hp = netdev_priv(dev);
/* Verify the settings we care about. */
if (cmd->autoneg != AUTONEG_ENABLE &&
cmd->autoneg != AUTONEG_DISABLE)
return -EINVAL;
if (cmd->autoneg == AUTONEG_DISABLE &&
((ethtool_cmd_speed(cmd) != SPEED_100 &&
ethtool_cmd_speed(cmd) != SPEED_10) ||
(cmd->duplex != DUPLEX_HALF &&
cmd->duplex != DUPLEX_FULL)))
return -EINVAL;
/* Ok, do it to it. */
spin_lock_irq(&hp->happy_lock);
del_timer(&hp->happy_timer);
happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd);
spin_unlock_irq(&hp->happy_lock);
return 0;
}
static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct happy_meal *hp = netdev_priv(dev);
strcpy(info->driver, "sunhme");
strcpy(info->version, "2.02");
if (hp->happy_flags & HFLAG_PCI) {
struct pci_dev *pdev = hp->happy_dev;
strcpy(info->bus_info, pci_name(pdev));
}
#ifdef CONFIG_SBUS
else {
const struct linux_prom_registers *regs;
struct platform_device *op = hp->happy_dev;
regs = of_get_property(op->dev.of_node, "regs", NULL);
if (regs)
sprintf(info->bus_info, "SBUS:%d",
regs->which_io);
}
#endif
}
static u32 hme_get_link(struct net_device *dev)
{
struct happy_meal *hp = netdev_priv(dev);
spin_lock_irq(&hp->happy_lock);
hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR);
spin_unlock_irq(&hp->happy_lock);
return hp->sw_bmsr & BMSR_LSTATUS;
}
static const struct ethtool_ops hme_ethtool_ops = {
.get_settings = hme_get_settings,
.set_settings = hme_set_settings,
.get_drvinfo = hme_get_drvinfo,
.get_link = hme_get_link,
};
static int hme_version_printed;
#ifdef CONFIG_SBUS
/* Given a happy meal sbus device, find it's quattro parent.
* If none exist, allocate and return a new one.
*
* Return NULL on failure.
*/
static struct quattro * __devinit quattro_sbus_find(struct platform_device *child)
{
struct device *parent = child->dev.parent;
struct platform_device *op;
struct quattro *qp;
op = to_platform_device(parent);
qp = dev_get_drvdata(&op->dev);
if (qp)
return qp;
qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
if (qp != NULL) {
int i;
for (i = 0; i < 4; i++)
qp->happy_meals[i] = NULL;
qp->quattro_dev = child;
qp->next = qfe_sbus_list;
qfe_sbus_list = qp;
dev_set_drvdata(&op->dev, qp);
}
return qp;
}
/* After all quattro cards have been probed, we call these functions
* to register the IRQ handlers for the cards that have been
* successfully probed and skip the cards that failed to initialize
*/
static int __init quattro_sbus_register_irqs(void)
{
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct platform_device *op = qp->quattro_dev;
int err, qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
err = request_irq(op->archdata.irqs[0],
quattro_sbus_interrupt,
IRQF_SHARED, "Quattro",
qp);
if (err != 0) {
printk(KERN_ERR "Quattro HME: IRQ registration "
"error %d.\n", err);
return err;
}
}
return 0;
}
static void quattro_sbus_free_irqs(void)
{
struct quattro *qp;
for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) {
struct platform_device *op = qp->quattro_dev;
int qfe_slot, skip = 0;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) {
if (!qp->happy_meals[qfe_slot])
skip = 1;
}
if (skip)
continue;
free_irq(op->archdata.irqs[0], qp);
}
}
#endif /* CONFIG_SBUS */
#ifdef CONFIG_PCI
static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev)
{
struct pci_dev *bdev = pdev->bus->self;
struct quattro *qp;
if (!bdev) return NULL;
for (qp = qfe_pci_list; qp != NULL; qp = qp->next) {
struct pci_dev *qpdev = qp->quattro_dev;
if (qpdev == bdev)
return qp;
}
qp = kmalloc(sizeof(struct quattro), GFP_KERNEL);
if (qp != NULL) {
int i;
for (i = 0; i < 4; i++)
qp->happy_meals[i] = NULL;
qp->quattro_dev = bdev;
qp->next = qfe_pci_list;
qfe_pci_list = qp;
/* No range tricks necessary on PCI. */
qp->nranges = 0;
}
return qp;
}
#endif /* CONFIG_PCI */
static const struct net_device_ops hme_netdev_ops = {
.ndo_open = happy_meal_open,
.ndo_stop = happy_meal_close,
.ndo_start_xmit = happy_meal_start_xmit,
.ndo_tx_timeout = happy_meal_tx_timeout,
.ndo_get_stats = happy_meal_get_stats,
.ndo_set_multicast_list = happy_meal_set_multicast,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
#ifdef CONFIG_SBUS
static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe)
{
struct device_node *dp = op->dev.of_node, *sbus_dp;
struct quattro *qp = NULL;
struct happy_meal *hp;
struct net_device *dev;
int i, qfe_slot = -1;
int err = -ENODEV;
sbus_dp = op->dev.parent->of_node;
/* We can match PCI devices too, do not accept those here. */
if (strcmp(sbus_dp->name, "sbus"))
return err;
if (is_qfe) {
qp = quattro_sbus_find(op);
if (qp == NULL)
goto err_out;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
if (qp->happy_meals[qfe_slot] == NULL)
break;
if (qfe_slot == 4)
goto err_out;
}
err = -ENOMEM;
dev = alloc_etherdev(sizeof(struct happy_meal));
if (!dev)
goto err_out;
SET_NETDEV_DEV(dev, &op->dev);
if (hme_version_printed++ == 0)
printk(KERN_INFO "%s", version);
/* If user did not specify a MAC address specifically, use
* the Quattro local-mac-address property...
*/
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
break;
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = macaddr[i];
macaddr[5]++;
} else {
const unsigned char *addr;
int len;
addr = of_get_property(dp, "local-mac-address", &len);
if (qfe_slot != -1 && addr && len == 6)
memcpy(dev->dev_addr, addr, 6);
else
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
}
hp = netdev_priv(dev);
hp->happy_dev = op;
hp->dma_dev = &op->dev;
spin_lock_init(&hp->happy_lock);
err = -ENODEV;
if (qp != NULL) {
hp->qfe_parent = qp;
hp->qfe_ent = qfe_slot;
qp->happy_meals[qfe_slot] = dev;
}
hp->gregs = of_ioremap(&op->resource[0], 0,
GREG_REG_SIZE, "HME Global Regs");
if (!hp->gregs) {
printk(KERN_ERR "happymeal: Cannot map global registers.\n");
goto err_out_free_netdev;
}
hp->etxregs = of_ioremap(&op->resource[1], 0,
ETX_REG_SIZE, "HME TX Regs");
if (!hp->etxregs) {
printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n");
goto err_out_iounmap;
}
hp->erxregs = of_ioremap(&op->resource[2], 0,
ERX_REG_SIZE, "HME RX Regs");
if (!hp->erxregs) {
printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n");
goto err_out_iounmap;
}
hp->bigmacregs = of_ioremap(&op->resource[3], 0,
BMAC_REG_SIZE, "HME BIGMAC Regs");
if (!hp->bigmacregs) {
printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n");
goto err_out_iounmap;
}
hp->tcvregs = of_ioremap(&op->resource[4], 0,
TCVR_REG_SIZE, "HME Tranceiver Regs");
if (!hp->tcvregs) {
printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n");
goto err_out_iounmap;
}
hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
if (hp->hm_revision == 0xff)
hp->hm_revision = 0xa0;
/* Now enable the feature flags we can. */
if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
hp->happy_flags = HFLAG_20_21;
else if (hp->hm_revision != 0xa0)
hp->happy_flags = HFLAG_NOT_A0;
if (qp != NULL)
hp->happy_flags |= HFLAG_QUATTRO;
/* Get the supported DVMA burst sizes from our Happy SBUS. */
hp->happy_bursts = of_getintprop_default(sbus_dp,
"burst-sizes", 0x00);
hp->happy_block = dma_alloc_coherent(hp->dma_dev,
PAGE_SIZE,
&hp->hblock_dvma,
GFP_ATOMIC);
err = -ENOMEM;
if (!hp->happy_block) {
printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n");
goto err_out_iounmap;
}
/* Force check of the link first time we are brought up. */
hp->linkcheck = 0;
/* Force timer state to 'asleep' with count of zero. */
hp->timer_state = asleep;
hp->timer_ticks = 0;
init_timer(&hp->happy_timer);
hp->dev = dev;
dev->netdev_ops = &hme_netdev_ops;
dev->watchdog_timeo = 5*HZ;
dev->ethtool_ops = &hme_ethtool_ops;
/* Happy Meal can do it all... */
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
dev->irq = op->archdata.irqs[0];
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up SBUS register/descriptor accessors. */
hp->read_desc32 = sbus_hme_read_desc32;
hp->write_txd = sbus_hme_write_txd;
hp->write_rxd = sbus_hme_write_rxd;
hp->read32 = sbus_hme_read32;
hp->write32 = sbus_hme_write32;
#endif
/* Grrr, Happy Meal comes up by default not advertising
* full duplex 100baseT capabilities, fix this.
*/
spin_lock_irq(&hp->happy_lock);
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
err = register_netdev(hp->dev);
if (err) {
printk(KERN_ERR "happymeal: Cannot register net device, "
"aborting.\n");
goto err_out_free_coherent;
}
dev_set_drvdata(&op->dev, hp);
if (qfe_slot != -1)
printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ",
dev->name, qfe_slot);
else
printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ",
dev->name);
printk("%pM\n", dev->dev_addr);
return 0;
err_out_free_coherent:
dma_free_coherent(hp->dma_dev,
PAGE_SIZE,
hp->happy_block,
hp->hblock_dvma);
err_out_iounmap:
if (hp->gregs)
of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
if (hp->etxregs)
of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
if (hp->erxregs)
of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
if (hp->bigmacregs)
of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
if (hp->tcvregs)
of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
if (qp)
qp->happy_meals[qfe_slot] = NULL;
err_out_free_netdev:
free_netdev(dev);
err_out:
return err;
}
#endif
#ifdef CONFIG_PCI
#ifndef CONFIG_SPARC
static int is_quattro_p(struct pci_dev *pdev)
{
struct pci_dev *busdev = pdev->bus->self;
struct list_head *tmp;
int n_hmes;
if (busdev == NULL ||
busdev->vendor != PCI_VENDOR_ID_DEC ||
busdev->device != PCI_DEVICE_ID_DEC_21153)
return 0;
n_hmes = 0;
tmp = pdev->bus->devices.next;
while (tmp != &pdev->bus->devices) {
struct pci_dev *this_pdev = pci_dev_b(tmp);
if (this_pdev->vendor == PCI_VENDOR_ID_SUN &&
this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL)
n_hmes++;
tmp = tmp->next;
}
if (n_hmes != 4)
return 0;
return 1;
}
/* Fetch MAC address from vital product data of PCI ROM. */
static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr)
{
int this_offset;
for (this_offset = 0x20; this_offset < len; this_offset++) {
void __iomem *p = rom_base + this_offset;
if (readb(p + 0) != 0x90 ||
readb(p + 1) != 0x00 ||
readb(p + 2) != 0x09 ||
readb(p + 3) != 0x4e ||
readb(p + 4) != 0x41 ||
readb(p + 5) != 0x06)
continue;
this_offset += 6;
p += 6;
if (index == 0) {
int i;
for (i = 0; i < 6; i++)
dev_addr[i] = readb(p + i);
return 1;
}
index--;
}
return 0;
}
static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr)
{
size_t size;
void __iomem *p = pci_map_rom(pdev, &size);
if (p) {
int index = 0;
int found;
if (is_quattro_p(pdev))
index = PCI_SLOT(pdev->devfn);
found = readb(p) == 0x55 &&
readb(p + 1) == 0xaa &&
find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr);
pci_unmap_rom(pdev, p);
if (found)
return;
}
/* Sun MAC prefix then 3 random bytes. */
dev_addr[0] = 0x08;
dev_addr[1] = 0x00;
dev_addr[2] = 0x20;
get_random_bytes(&dev_addr[3], 3);
}
#endif /* !(CONFIG_SPARC) */
static int __devinit happy_meal_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct quattro *qp = NULL;
#ifdef CONFIG_SPARC
struct device_node *dp;
#endif
struct happy_meal *hp;
struct net_device *dev;
void __iomem *hpreg_base;
unsigned long hpreg_res;
int i, qfe_slot = -1;
char prom_name[64];
int err;
/* Now make sure pci_dev cookie is there. */
#ifdef CONFIG_SPARC
dp = pci_device_to_OF_node(pdev);
strcpy(prom_name, dp->name);
#else
if (is_quattro_p(pdev))
strcpy(prom_name, "SUNW,qfe");
else
strcpy(prom_name, "SUNW,hme");
#endif
err = -ENODEV;
if (pci_enable_device(pdev))
goto err_out;
pci_set_master(pdev);
if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) {
qp = quattro_pci_find(pdev);
if (qp == NULL)
goto err_out;
for (qfe_slot = 0; qfe_slot < 4; qfe_slot++)
if (qp->happy_meals[qfe_slot] == NULL)
break;
if (qfe_slot == 4)
goto err_out;
}
dev = alloc_etherdev(sizeof(struct happy_meal));
err = -ENOMEM;
if (!dev)
goto err_out;
SET_NETDEV_DEV(dev, &pdev->dev);
if (hme_version_printed++ == 0)
printk(KERN_INFO "%s", version);
dev->base_addr = (long) pdev;
hp = netdev_priv(dev);
hp->happy_dev = pdev;
hp->dma_dev = &pdev->dev;
spin_lock_init(&hp->happy_lock);
if (qp != NULL) {
hp->qfe_parent = qp;
hp->qfe_ent = qfe_slot;
qp->happy_meals[qfe_slot] = dev;
}
hpreg_res = pci_resource_start(pdev, 0);
err = -ENODEV;
if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) {
printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n");
goto err_out_clear_quattro;
}
if (pci_request_regions(pdev, DRV_NAME)) {
printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, "
"aborting.\n");
goto err_out_clear_quattro;
}
if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) {
printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n");
goto err_out_free_res;
}
for (i = 0; i < 6; i++) {
if (macaddr[i] != 0)
break;
}
if (i < 6) { /* a mac address was given */
for (i = 0; i < 6; i++)
dev->dev_addr[i] = macaddr[i];
macaddr[5]++;
} else {
#ifdef CONFIG_SPARC
const unsigned char *addr;
int len;
if (qfe_slot != -1 &&
(addr = of_get_property(dp, "local-mac-address", &len))
!= NULL &&
len == 6) {
memcpy(dev->dev_addr, addr, 6);
} else {
memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
}
#else
get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]);
#endif
}
/* Layout registers. */
hp->gregs = (hpreg_base + 0x0000UL);
hp->etxregs = (hpreg_base + 0x2000UL);
hp->erxregs = (hpreg_base + 0x4000UL);
hp->bigmacregs = (hpreg_base + 0x6000UL);
hp->tcvregs = (hpreg_base + 0x7000UL);
#ifdef CONFIG_SPARC
hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff);
if (hp->hm_revision == 0xff)
hp->hm_revision = 0xc0 | (pdev->revision & 0x0f);
#else
/* works with this on non-sparc hosts */
hp->hm_revision = 0x20;
#endif
/* Now enable the feature flags we can. */
if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21)
hp->happy_flags = HFLAG_20_21;
else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0)
hp->happy_flags = HFLAG_NOT_A0;
if (qp != NULL)
hp->happy_flags |= HFLAG_QUATTRO;
/* And of course, indicate this is PCI. */
hp->happy_flags |= HFLAG_PCI;
#ifdef CONFIG_SPARC
/* Assume PCI happy meals can handle all burst sizes. */
hp->happy_bursts = DMA_BURSTBITS;
#endif
hp->happy_block = (struct hmeal_init_block *)
dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL);
err = -ENODEV;
if (!hp->happy_block) {
printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n");
goto err_out_iounmap;
}
hp->linkcheck = 0;
hp->timer_state = asleep;
hp->timer_ticks = 0;
init_timer(&hp->happy_timer);
hp->dev = dev;
dev->netdev_ops = &hme_netdev_ops;
dev->watchdog_timeo = 5*HZ;
dev->ethtool_ops = &hme_ethtool_ops;
dev->irq = pdev->irq;
dev->dma = 0;
/* Happy Meal can do it all... */
dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM;
dev->features |= dev->hw_features | NETIF_F_RXCSUM;
#if defined(CONFIG_SBUS) && defined(CONFIG_PCI)
/* Hook up PCI register/descriptor accessors. */
hp->read_desc32 = pci_hme_read_desc32;
hp->write_txd = pci_hme_write_txd;
hp->write_rxd = pci_hme_write_rxd;
hp->read32 = pci_hme_read32;
hp->write32 = pci_hme_write32;
#endif
/* Grrr, Happy Meal comes up by default not advertising
* full duplex 100baseT capabilities, fix this.
*/
spin_lock_irq(&hp->happy_lock);
happy_meal_set_initial_advertisement(hp);
spin_unlock_irq(&hp->happy_lock);
err = register_netdev(hp->dev);
if (err) {
printk(KERN_ERR "happymeal(PCI): Cannot register net device, "
"aborting.\n");
goto err_out_iounmap;
}
dev_set_drvdata(&pdev->dev, hp);
if (!qfe_slot) {
struct pci_dev *qpdev = qp->quattro_dev;
prom_name[0] = 0;
if (!strncmp(dev->name, "eth", 3)) {
int i = simple_strtoul(dev->name + 3, NULL, 10);
sprintf(prom_name, "-%d", i + 3);
}
printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name);
if (qpdev->vendor == PCI_VENDOR_ID_DEC &&
qpdev->device == PCI_DEVICE_ID_DEC_21153)
printk("DEC 21153 PCI Bridge\n");
else
printk("unknown bridge %04x.%04x\n",
qpdev->vendor, qpdev->device);
}
if (qfe_slot != -1)
printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ",
dev->name, qfe_slot);
else
printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ",
dev->name);
printk("%pM\n", dev->dev_addr);
return 0;
err_out_iounmap:
iounmap(hp->gregs);
err_out_free_res:
pci_release_regions(pdev);
err_out_clear_quattro:
if (qp != NULL)
qp->happy_meals[qfe_slot] = NULL;
free_netdev(dev);
err_out:
return err;
}
static void __devexit happy_meal_pci_remove(struct pci_dev *pdev)
{
struct happy_meal *hp = dev_get_drvdata(&pdev->dev);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
dma_free_coherent(hp->dma_dev, PAGE_SIZE,
hp->happy_block, hp->hblock_dvma);
iounmap(hp->gregs);
pci_release_regions(hp->happy_dev);
free_netdev(net_dev);
dev_set_drvdata(&pdev->dev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, happymeal_pci_ids);
static struct pci_driver hme_pci_driver = {
.name = "hme",
.id_table = happymeal_pci_ids,
.probe = happy_meal_pci_probe,
.remove = __devexit_p(happy_meal_pci_remove),
};
static int __init happy_meal_pci_init(void)
{
return pci_register_driver(&hme_pci_driver);
}
static void happy_meal_pci_exit(void)
{
pci_unregister_driver(&hme_pci_driver);
while (qfe_pci_list) {
struct quattro *qfe = qfe_pci_list;
struct quattro *next = qfe->next;
kfree(qfe);
qfe_pci_list = next;
}
}
#endif
#ifdef CONFIG_SBUS
static const struct of_device_id hme_sbus_match[];
static int __devinit hme_sbus_probe(struct platform_device *op)
{
const struct of_device_id *match;
struct device_node *dp = op->dev.of_node;
const char *model = of_get_property(dp, "model", NULL);
int is_qfe;
match = of_match_device(hme_sbus_match, &op->dev);
if (!match)
return -EINVAL;
is_qfe = (match->data != NULL);
if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe"))
is_qfe = 1;
return happy_meal_sbus_probe_one(op, is_qfe);
}
static int __devexit hme_sbus_remove(struct platform_device *op)
{
struct happy_meal *hp = dev_get_drvdata(&op->dev);
struct net_device *net_dev = hp->dev;
unregister_netdev(net_dev);
/* XXX qfe parent interrupt... */
of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE);
of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE);
of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE);
of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE);
of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE);
dma_free_coherent(hp->dma_dev,
PAGE_SIZE,
hp->happy_block,
hp->hblock_dvma);
free_netdev(net_dev);
dev_set_drvdata(&op->dev, NULL);
return 0;
}
static const struct of_device_id hme_sbus_match[] = {
{
.name = "SUNW,hme",
},
{
.name = "SUNW,qfe",
.data = (void *) 1,
},
{
.name = "qfe",
.data = (void *) 1,
},
{},
};
MODULE_DEVICE_TABLE(of, hme_sbus_match);
static struct platform_driver hme_sbus_driver = {
.driver = {
.name = "hme",
.owner = THIS_MODULE,
.of_match_table = hme_sbus_match,
},
.probe = hme_sbus_probe,
.remove = __devexit_p(hme_sbus_remove),
};
static int __init happy_meal_sbus_init(void)
{
int err;
err = platform_driver_register(&hme_sbus_driver);
if (!err)
err = quattro_sbus_register_irqs();
return err;
}
static void happy_meal_sbus_exit(void)
{
platform_driver_unregister(&hme_sbus_driver);
quattro_sbus_free_irqs();
while (qfe_sbus_list) {
struct quattro *qfe = qfe_sbus_list;
struct quattro *next = qfe->next;
kfree(qfe);
qfe_sbus_list = next;
}
}
#endif
static int __init happy_meal_probe(void)
{
int err = 0;
#ifdef CONFIG_SBUS
err = happy_meal_sbus_init();
#endif
#ifdef CONFIG_PCI
if (!err) {
err = happy_meal_pci_init();
#ifdef CONFIG_SBUS
if (err)
happy_meal_sbus_exit();
#endif
}
#endif
return err;
}
static void __exit happy_meal_exit(void)
{
#ifdef CONFIG_SBUS
happy_meal_sbus_exit();
#endif
#ifdef CONFIG_PCI
happy_meal_pci_exit();
#endif
}
module_init(happy_meal_probe);
module_exit(happy_meal_exit);
| gpl-2.0 |
NookieDevs/android_kernel_bn_encore | drivers/regulator/db8500-prcmu.c | 2636 | 12887 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License v2
* Authors: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
* Bengt Jonsson <bengt.g.jonsson@stericsson.com> for ST-Ericsson
*
* Power domain regulators on DB8500
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/err.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/mfd/db8500-prcmu.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/db8500-prcmu.h>
/*
* power state reference count
*/
static int power_state_active_cnt; /* will initialize to zero */
static DEFINE_SPINLOCK(power_state_active_lock);
static void power_state_active_enable(void)
{
unsigned long flags;
spin_lock_irqsave(&power_state_active_lock, flags);
power_state_active_cnt++;
spin_unlock_irqrestore(&power_state_active_lock, flags);
}
static int power_state_active_disable(void)
{
int ret = 0;
unsigned long flags;
spin_lock_irqsave(&power_state_active_lock, flags);
if (power_state_active_cnt <= 0) {
pr_err("power state: unbalanced enable/disable calls\n");
ret = -EINVAL;
goto out;
}
power_state_active_cnt--;
out:
spin_unlock_irqrestore(&power_state_active_lock, flags);
return ret;
}
/*
* Exported interface for CPUIdle only. This function is called when interrupts
* are turned off. Hence, no locking.
*/
int power_state_active_is_enabled(void)
{
return (power_state_active_cnt > 0);
}
/**
* struct db8500_regulator_info - db8500 regulator information
* @dev: device pointer
* @desc: regulator description
* @rdev: regulator device pointer
* @is_enabled: status of the regulator
* @epod_id: id for EPOD (power domain)
* @is_ramret: RAM retention switch for EPOD (power domain)
* @operating_point: operating point (only for vape, to be removed)
*
*/
struct db8500_regulator_info {
struct device *dev;
struct regulator_desc desc;
struct regulator_dev *rdev;
bool is_enabled;
u16 epod_id;
bool is_ramret;
bool exclude_from_power_state;
unsigned int operating_point;
};
static int db8500_regulator_enable(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-enable\n",
info->desc.name);
info->is_enabled = true;
if (!info->exclude_from_power_state)
power_state_active_enable();
return 0;
}
static int db8500_regulator_disable(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
int ret = 0;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-disable\n",
info->desc.name);
info->is_enabled = false;
if (!info->exclude_from_power_state)
ret = power_state_active_disable();
return ret;
}
static int db8500_regulator_is_enabled(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-%s-is_enabled (is_enabled):"
" %i\n", info->desc.name, info->is_enabled);
return info->is_enabled;
}
/* db8500 regulator operations */
static struct regulator_ops db8500_regulator_ops = {
.enable = db8500_regulator_enable,
.disable = db8500_regulator_disable,
.is_enabled = db8500_regulator_is_enabled,
};
/*
* EPOD control
*/
static bool epod_on[NUM_EPOD_ID];
static bool epod_ramret[NUM_EPOD_ID];
static int enable_epod(u16 epod_id, bool ramret)
{
int ret;
if (ramret) {
if (!epod_on[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
if (ret < 0)
return ret;
}
epod_ramret[epod_id] = true;
} else {
ret = prcmu_set_epod(epod_id, EPOD_STATE_ON);
if (ret < 0)
return ret;
epod_on[epod_id] = true;
}
return 0;
}
static int disable_epod(u16 epod_id, bool ramret)
{
int ret;
if (ramret) {
if (!epod_on[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
if (ret < 0)
return ret;
}
epod_ramret[epod_id] = false;
} else {
if (epod_ramret[epod_id]) {
ret = prcmu_set_epod(epod_id, EPOD_STATE_RAMRET);
if (ret < 0)
return ret;
} else {
ret = prcmu_set_epod(epod_id, EPOD_STATE_OFF);
if (ret < 0)
return ret;
}
epod_on[epod_id] = false;
}
return 0;
}
/*
* Regulator switch
*/
static int db8500_regulator_switch_enable(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-enable\n",
info->desc.name);
ret = enable_epod(info->epod_id, info->is_ramret);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"regulator-switch-%s-enable: prcmu call failed\n",
info->desc.name);
goto out;
}
info->is_enabled = true;
out:
return ret;
}
static int db8500_regulator_switch_disable(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
int ret;
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev), "regulator-switch-%s-disable\n",
info->desc.name);
ret = disable_epod(info->epod_id, info->is_ramret);
if (ret < 0) {
dev_err(rdev_get_dev(rdev),
"regulator_switch-%s-disable: prcmu call failed\n",
info->desc.name);
goto out;
}
info->is_enabled = 0;
out:
return ret;
}
static int db8500_regulator_switch_is_enabled(struct regulator_dev *rdev)
{
struct db8500_regulator_info *info = rdev_get_drvdata(rdev);
if (info == NULL)
return -EINVAL;
dev_vdbg(rdev_get_dev(rdev),
"regulator-switch-%s-is_enabled (is_enabled): %i\n",
info->desc.name, info->is_enabled);
return info->is_enabled;
}
static struct regulator_ops db8500_regulator_switch_ops = {
.enable = db8500_regulator_switch_enable,
.disable = db8500_regulator_switch_disable,
.is_enabled = db8500_regulator_switch_is_enabled,
};
/*
* Regulator information
*/
static struct db8500_regulator_info
db8500_regulator_info[DB8500_NUM_REGULATORS] = {
[DB8500_REGULATOR_VAPE] = {
.desc = {
.name = "db8500-vape",
.id = DB8500_REGULATOR_VAPE,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VARM] = {
.desc = {
.name = "db8500-varm",
.id = DB8500_REGULATOR_VARM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VMODEM] = {
.desc = {
.name = "db8500-vmodem",
.id = DB8500_REGULATOR_VMODEM,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VPLL] = {
.desc = {
.name = "db8500-vpll",
.id = DB8500_REGULATOR_VPLL,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VSMPS1] = {
.desc = {
.name = "db8500-vsmps1",
.id = DB8500_REGULATOR_VSMPS1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VSMPS2] = {
.desc = {
.name = "db8500-vsmps2",
.id = DB8500_REGULATOR_VSMPS2,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.exclude_from_power_state = true,
},
[DB8500_REGULATOR_VSMPS3] = {
.desc = {
.name = "db8500-vsmps3",
.id = DB8500_REGULATOR_VSMPS3,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_VRF1] = {
.desc = {
.name = "db8500-vrf1",
.id = DB8500_REGULATOR_VRF1,
.ops = &db8500_regulator_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
},
[DB8500_REGULATOR_SWITCH_SVAMMDSP] = {
.desc = {
.name = "db8500-sva-mmdsp",
.id = DB8500_REGULATOR_SWITCH_SVAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAMMDSP,
},
[DB8500_REGULATOR_SWITCH_SVAMMDSPRET] = {
.desc = {
.name = "db8500-sva-mmdsp-ret",
.id = DB8500_REGULATOR_SWITCH_SVAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAMMDSP,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_SVAPIPE] = {
.desc = {
.name = "db8500-sva-pipe",
.id = DB8500_REGULATOR_SWITCH_SVAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SVAPIPE,
},
[DB8500_REGULATOR_SWITCH_SIAMMDSP] = {
.desc = {
.name = "db8500-sia-mmdsp",
.id = DB8500_REGULATOR_SWITCH_SIAMMDSP,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAMMDSP,
},
[DB8500_REGULATOR_SWITCH_SIAMMDSPRET] = {
.desc = {
.name = "db8500-sia-mmdsp-ret",
.id = DB8500_REGULATOR_SWITCH_SIAMMDSPRET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAMMDSP,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_SIAPIPE] = {
.desc = {
.name = "db8500-sia-pipe",
.id = DB8500_REGULATOR_SWITCH_SIAPIPE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SIAPIPE,
},
[DB8500_REGULATOR_SWITCH_SGA] = {
.desc = {
.name = "db8500-sga",
.id = DB8500_REGULATOR_SWITCH_SGA,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_SGA,
},
[DB8500_REGULATOR_SWITCH_B2R2_MCDE] = {
.desc = {
.name = "db8500-b2r2-mcde",
.id = DB8500_REGULATOR_SWITCH_B2R2_MCDE,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_B2R2_MCDE,
},
[DB8500_REGULATOR_SWITCH_ESRAM12] = {
.desc = {
.name = "db8500-esram12",
.id = DB8500_REGULATOR_SWITCH_ESRAM12,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM12,
.is_enabled = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM12RET] = {
.desc = {
.name = "db8500-esram12-ret",
.id = DB8500_REGULATOR_SWITCH_ESRAM12RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM12,
.is_ramret = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM34] = {
.desc = {
.name = "db8500-esram34",
.id = DB8500_REGULATOR_SWITCH_ESRAM34,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM34,
.is_enabled = true,
},
[DB8500_REGULATOR_SWITCH_ESRAM34RET] = {
.desc = {
.name = "db8500-esram34-ret",
.id = DB8500_REGULATOR_SWITCH_ESRAM34RET,
.ops = &db8500_regulator_switch_ops,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
},
.epod_id = EPOD_ID_ESRAM34,
.is_ramret = true,
},
};
static int __devinit db8500_regulator_probe(struct platform_device *pdev)
{
struct regulator_init_data *db8500_init_data =
dev_get_platdata(&pdev->dev);
int i, err;
/* register all regulators */
for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
struct db8500_regulator_info *info;
struct regulator_init_data *init_data = &db8500_init_data[i];
/* assign per-regulator data */
info = &db8500_regulator_info[i];
info->dev = &pdev->dev;
/* register with the regulator framework */
info->rdev = regulator_register(&info->desc, &pdev->dev,
init_data, info);
if (IS_ERR(info->rdev)) {
err = PTR_ERR(info->rdev);
dev_err(&pdev->dev, "failed to register %s: err %i\n",
info->desc.name, err);
/* if failing, unregister all earlier regulators */
while (--i >= 0) {
info = &db8500_regulator_info[i];
regulator_unregister(info->rdev);
}
return err;
}
dev_dbg(rdev_get_dev(info->rdev),
"regulator-%s-probed\n", info->desc.name);
}
return 0;
}
static int __exit db8500_regulator_remove(struct platform_device *pdev)
{
int i;
for (i = 0; i < ARRAY_SIZE(db8500_regulator_info); i++) {
struct db8500_regulator_info *info;
info = &db8500_regulator_info[i];
dev_vdbg(rdev_get_dev(info->rdev),
"regulator-%s-remove\n", info->desc.name);
regulator_unregister(info->rdev);
}
return 0;
}
static struct platform_driver db8500_regulator_driver = {
.driver = {
.name = "db8500-prcmu-regulators",
.owner = THIS_MODULE,
},
.probe = db8500_regulator_probe,
.remove = __exit_p(db8500_regulator_remove),
};
static int __init db8500_regulator_init(void)
{
return platform_driver_register(&db8500_regulator_driver);
}
static void __exit db8500_regulator_exit(void)
{
platform_driver_unregister(&db8500_regulator_driver);
}
arch_initcall(db8500_regulator_init);
module_exit(db8500_regulator_exit);
MODULE_AUTHOR("STMicroelectronics/ST-Ericsson");
MODULE_DESCRIPTION("DB8500 regulator driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
kevin0100/android_kernel_cyanogen_msm8916 | arch/arm/mach-at91/board-kafa.c | 2636 | 2844 | /*
* linux/arch/arm/mach-at91/board-kafa.c
*
* Copyright (C) 2006 Sperry-Sun
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/types.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <mach/hardware.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/irq.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <asm/mach/irq.h>
#include <mach/cpu.h>
#include "at91_aic.h"
#include "board.h"
#include "generic.h"
static void __init kafa_init_early(void)
{
/* Set cpu type: PQFP */
at91rm9200_set_type(ARCH_REVISON_9200_PQFP);
/* Initialize processor: 18.432 MHz crystal */
at91_initialize(18432000);
}
static struct macb_platform_data __initdata kafa_eth_data = {
.phy_irq_pin = AT91_PIN_PC4,
.is_rmii = 0,
};
static struct at91_usbh_data __initdata kafa_usbh_data = {
.ports = 1,
.vbus_pin = {-EINVAL, -EINVAL},
.overcurrent_pin= {-EINVAL, -EINVAL},
};
static struct at91_udc_data __initdata kafa_udc_data = {
.vbus_pin = AT91_PIN_PB6,
.pullup_pin = AT91_PIN_PB7,
};
/*
* LEDs
*/
static struct gpio_led kafa_leds[] = {
{ /* D1 */
.name = "led1",
.gpio = AT91_PIN_PB4,
.active_low = 1,
.default_trigger = "heartbeat",
},
};
static void __init kafa_board_init(void)
{
/* Serial */
/* DBGU on ttyS0. (Rx & Tx only) */
at91_register_uart(0, 0, 0);
/* USART0 on ttyS1 (Rx, Tx, CTS, RTS) */
at91_register_uart(AT91RM9200_ID_US0, 1, ATMEL_UART_CTS | ATMEL_UART_RTS);
at91_add_device_serial();
/* Ethernet */
at91_add_device_eth(&kafa_eth_data);
/* USB Host */
at91_add_device_usbh(&kafa_usbh_data);
/* USB Device */
at91_add_device_udc(&kafa_udc_data);
/* I2C */
at91_add_device_i2c(NULL, 0);
/* SPI */
at91_add_device_spi(NULL, 0);
/* LEDs */
at91_gpio_leds(kafa_leds, ARRAY_SIZE(kafa_leds));
}
MACHINE_START(KAFA, "Sperry-Sun KAFA")
/* Maintainer: Sergei Sharonov */
.init_time = at91rm9200_timer_init,
.map_io = at91_map_io,
.handle_irq = at91_aic_handle_irq,
.init_early = kafa_init_early,
.init_irq = at91_init_irq_default,
.init_machine = kafa_board_init,
MACHINE_END
| gpl-2.0 |
hiikezoe/android_kernel_panasonic_p02e | net/can/gw.c | 3660 | 24103 | /*
* gw.c - CAN frame Gateway/Router/Bridge with netlink interface
*
* Copyright (c) 2011 Volkswagen Group Electronic Research
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Neither the name of Volkswagen nor the names of its contributors
* may be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* Alternatively, provided that this notice is retained in full, this
* software may be distributed under the terms of the GNU General
* Public License ("GPL") version 2, in which case the provisions of the
* GPL apply INSTEAD OF those given above.
*
* The provided data structures and external interfaces from this code
* are not restricted to be used by modules with a GPL compatible license.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/rculist.h>
#include <linux/net.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <linux/can.h>
#include <linux/can/core.h>
#include <linux/can/gw.h>
#include <net/rtnetlink.h>
#include <net/net_namespace.h>
#include <net/sock.h>
#define CAN_GW_VERSION "20101209"
static __initdata const char banner[] =
KERN_INFO "can: netlink gateway (rev " CAN_GW_VERSION ")\n";
MODULE_DESCRIPTION("PF_CAN netlink gateway");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>");
MODULE_ALIAS("can-gw");
HLIST_HEAD(cgw_list);
static struct notifier_block notifier;
static struct kmem_cache *cgw_cache __read_mostly;
/* structure that contains the (on-the-fly) CAN frame modifications */
struct cf_mod {
struct {
struct can_frame and;
struct can_frame or;
struct can_frame xor;
struct can_frame set;
} modframe;
struct {
u8 and;
u8 or;
u8 xor;
u8 set;
} modtype;
void (*modfunc[MAX_MODFUNCTIONS])(struct can_frame *cf,
struct cf_mod *mod);
/* CAN frame checksum calculation after CAN frame modifications */
struct {
struct cgw_csum_xor xor;
struct cgw_csum_crc8 crc8;
} csum;
struct {
void (*xor)(struct can_frame *cf, struct cgw_csum_xor *xor);
void (*crc8)(struct can_frame *cf, struct cgw_csum_crc8 *crc8);
} csumfunc;
};
/*
* So far we just support CAN -> CAN routing and frame modifications.
*
* The internal can_can_gw structure contains data and attributes for
* a CAN -> CAN gateway job.
*/
struct can_can_gw {
struct can_filter filter;
int src_idx;
int dst_idx;
};
/* list entry for CAN gateways jobs */
struct cgw_job {
struct hlist_node list;
struct rcu_head rcu;
u32 handled_frames;
u32 dropped_frames;
struct cf_mod mod;
union {
/* CAN frame data source */
struct net_device *dev;
} src;
union {
/* CAN frame data destination */
struct net_device *dev;
} dst;
union {
struct can_can_gw ccgw;
/* tbc */
};
u8 gwtype;
u16 flags;
};
/* modification functions that are invoked in the hot path in can_can_gw_rcv */
#define MODFUNC(func, op) static void func(struct can_frame *cf, \
struct cf_mod *mod) { op ; }
MODFUNC(mod_and_id, cf->can_id &= mod->modframe.and.can_id)
MODFUNC(mod_and_dlc, cf->can_dlc &= mod->modframe.and.can_dlc)
MODFUNC(mod_and_data, *(u64 *)cf->data &= *(u64 *)mod->modframe.and.data)
MODFUNC(mod_or_id, cf->can_id |= mod->modframe.or.can_id)
MODFUNC(mod_or_dlc, cf->can_dlc |= mod->modframe.or.can_dlc)
MODFUNC(mod_or_data, *(u64 *)cf->data |= *(u64 *)mod->modframe.or.data)
MODFUNC(mod_xor_id, cf->can_id ^= mod->modframe.xor.can_id)
MODFUNC(mod_xor_dlc, cf->can_dlc ^= mod->modframe.xor.can_dlc)
MODFUNC(mod_xor_data, *(u64 *)cf->data ^= *(u64 *)mod->modframe.xor.data)
MODFUNC(mod_set_id, cf->can_id = mod->modframe.set.can_id)
MODFUNC(mod_set_dlc, cf->can_dlc = mod->modframe.set.can_dlc)
MODFUNC(mod_set_data, *(u64 *)cf->data = *(u64 *)mod->modframe.set.data)
static inline void canframecpy(struct can_frame *dst, struct can_frame *src)
{
/*
* Copy the struct members separately to ensure that no uninitialized
* data are copied in the 3 bytes hole of the struct. This is needed
* to make easy compares of the data in the struct cf_mod.
*/
dst->can_id = src->can_id;
dst->can_dlc = src->can_dlc;
*(u64 *)dst->data = *(u64 *)src->data;
}
static int cgw_chk_csum_parms(s8 fr, s8 to, s8 re)
{
/*
* absolute dlc values 0 .. 7 => 0 .. 7, e.g. data [0]
* relative to received dlc -1 .. -8 :
* e.g. for received dlc = 8
* -1 => index = 7 (data[7])
* -3 => index = 5 (data[5])
* -8 => index = 0 (data[0])
*/
if (fr > -9 && fr < 8 &&
to > -9 && to < 8 &&
re > -9 && re < 8)
return 0;
else
return -EINVAL;
}
static inline int calc_idx(int idx, int rx_dlc)
{
if (idx < 0)
return rx_dlc + idx;
else
return idx;
}
static void cgw_csum_xor_rel(struct can_frame *cf, struct cgw_csum_xor *xor)
{
int from = calc_idx(xor->from_idx, cf->can_dlc);
int to = calc_idx(xor->to_idx, cf->can_dlc);
int res = calc_idx(xor->result_idx, cf->can_dlc);
u8 val = xor->init_xor_val;
int i;
if (from < 0 || to < 0 || res < 0)
return;
if (from <= to) {
for (i = from; i <= to; i++)
val ^= cf->data[i];
} else {
for (i = from; i >= to; i--)
val ^= cf->data[i];
}
cf->data[res] = val;
}
static void cgw_csum_xor_pos(struct can_frame *cf, struct cgw_csum_xor *xor)
{
u8 val = xor->init_xor_val;
int i;
for (i = xor->from_idx; i <= xor->to_idx; i++)
val ^= cf->data[i];
cf->data[xor->result_idx] = val;
}
static void cgw_csum_xor_neg(struct can_frame *cf, struct cgw_csum_xor *xor)
{
u8 val = xor->init_xor_val;
int i;
for (i = xor->from_idx; i >= xor->to_idx; i--)
val ^= cf->data[i];
cf->data[xor->result_idx] = val;
}
static void cgw_csum_crc8_rel(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
{
int from = calc_idx(crc8->from_idx, cf->can_dlc);
int to = calc_idx(crc8->to_idx, cf->can_dlc);
int res = calc_idx(crc8->result_idx, cf->can_dlc);
u8 crc = crc8->init_crc_val;
int i;
if (from < 0 || to < 0 || res < 0)
return;
if (from <= to) {
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
crc = crc8->crctab[crc^cf->data[i]];
} else {
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
crc = crc8->crctab[crc^cf->data[i]];
}
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc^crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
}
static void cgw_csum_crc8_pos(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
{
u8 crc = crc8->init_crc_val;
int i;
for (i = crc8->from_idx; i <= crc8->to_idx; i++)
crc = crc8->crctab[crc^cf->data[i]];
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc^crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
}
static void cgw_csum_crc8_neg(struct can_frame *cf, struct cgw_csum_crc8 *crc8)
{
u8 crc = crc8->init_crc_val;
int i;
for (i = crc8->from_idx; i >= crc8->to_idx; i--)
crc = crc8->crctab[crc^cf->data[i]];
switch (crc8->profile) {
case CGW_CRC8PRF_1U8:
crc = crc8->crctab[crc^crc8->profile_data[0]];
break;
case CGW_CRC8PRF_16U8:
crc = crc8->crctab[crc^crc8->profile_data[cf->data[1] & 0xF]];
break;
case CGW_CRC8PRF_SFFID_XOR:
crc = crc8->crctab[crc^(cf->can_id & 0xFF)^
(cf->can_id >> 8 & 0xFF)];
break;
}
cf->data[crc8->result_idx] = crc^crc8->final_xor_val;
}
/* the receive & process & send function */
static void can_can_gw_rcv(struct sk_buff *skb, void *data)
{
struct cgw_job *gwj = (struct cgw_job *)data;
struct can_frame *cf;
struct sk_buff *nskb;
int modidx = 0;
/* do not handle already routed frames - see comment below */
if (skb_mac_header_was_set(skb))
return;
if (!(gwj->dst.dev->flags & IFF_UP)) {
gwj->dropped_frames++;
return;
}
/*
* clone the given skb, which has not been done in can_rcv()
*
* When there is at least one modification function activated,
* we need to copy the skb as we want to modify skb->data.
*/
if (gwj->mod.modfunc[0])
nskb = skb_copy(skb, GFP_ATOMIC);
else
nskb = skb_clone(skb, GFP_ATOMIC);
if (!nskb) {
gwj->dropped_frames++;
return;
}
/*
* Mark routed frames by setting some mac header length which is
* not relevant for the CAN frames located in the skb->data section.
*
* As dev->header_ops is not set in CAN netdevices no one is ever
* accessing the various header offsets in the CAN skbuffs anyway.
* E.g. using the packet socket to read CAN frames is still working.
*/
skb_set_mac_header(nskb, 8);
nskb->dev = gwj->dst.dev;
/* pointer to modifiable CAN frame */
cf = (struct can_frame *)nskb->data;
/* perform preprocessed modification functions if there are any */
while (modidx < MAX_MODFUNCTIONS && gwj->mod.modfunc[modidx])
(*gwj->mod.modfunc[modidx++])(cf, &gwj->mod);
/* check for checksum updates when the CAN frame has been modified */
if (modidx) {
if (gwj->mod.csumfunc.crc8)
(*gwj->mod.csumfunc.crc8)(cf, &gwj->mod.csum.crc8);
if (gwj->mod.csumfunc.xor)
(*gwj->mod.csumfunc.xor)(cf, &gwj->mod.csum.xor);
}
/* clear the skb timestamp if not configured the other way */
if (!(gwj->flags & CGW_FLAGS_CAN_SRC_TSTAMP))
nskb->tstamp.tv64 = 0;
/* send to netdevice */
if (can_send(nskb, gwj->flags & CGW_FLAGS_CAN_ECHO))
gwj->dropped_frames++;
else
gwj->handled_frames++;
}
static inline int cgw_register_filter(struct cgw_job *gwj)
{
return can_rx_register(gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv,
gwj, "gw");
}
static inline void cgw_unregister_filter(struct cgw_job *gwj)
{
can_rx_unregister(gwj->src.dev, gwj->ccgw.filter.can_id,
gwj->ccgw.filter.can_mask, can_can_gw_rcv, gwj);
}
static int cgw_notifier(struct notifier_block *nb,
unsigned long msg, void *data)
{
struct net_device *dev = (struct net_device *)data;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (dev->type != ARPHRD_CAN)
return NOTIFY_DONE;
if (msg == NETDEV_UNREGISTER) {
struct cgw_job *gwj = NULL;
struct hlist_node *n, *nx;
ASSERT_RTNL();
hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
if (gwj->src.dev == dev || gwj->dst.dev == dev) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
}
}
}
return NOTIFY_DONE;
}
static int cgw_put_job(struct sk_buff *skb, struct cgw_job *gwj)
{
struct cgw_frame_mod mb;
struct rtcanmsg *rtcan;
struct nlmsghdr *nlh = nlmsg_put(skb, 0, 0, 0, sizeof(*rtcan), 0);
if (!nlh)
return -EMSGSIZE;
rtcan = nlmsg_data(nlh);
rtcan->can_family = AF_CAN;
rtcan->gwtype = gwj->gwtype;
rtcan->flags = gwj->flags;
/* add statistics if available */
if (gwj->handled_frames) {
if (nla_put_u32(skb, CGW_HANDLED, gwj->handled_frames) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
if (gwj->dropped_frames) {
if (nla_put_u32(skb, CGW_DROPPED, gwj->dropped_frames) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
/* check non default settings of attributes */
if (gwj->mod.modtype.and) {
memcpy(&mb.cf, &gwj->mod.modframe.and, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.and;
if (nla_put(skb, CGW_MOD_AND, sizeof(mb), &mb) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.or) {
memcpy(&mb.cf, &gwj->mod.modframe.or, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.or;
if (nla_put(skb, CGW_MOD_OR, sizeof(mb), &mb) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.xor) {
memcpy(&mb.cf, &gwj->mod.modframe.xor, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.xor;
if (nla_put(skb, CGW_MOD_XOR, sizeof(mb), &mb) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.modtype.set) {
memcpy(&mb.cf, &gwj->mod.modframe.set, sizeof(mb.cf));
mb.modtype = gwj->mod.modtype.set;
if (nla_put(skb, CGW_MOD_SET, sizeof(mb), &mb) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(mb));
}
if (gwj->mod.csumfunc.crc8) {
if (nla_put(skb, CGW_CS_CRC8, CGW_CS_CRC8_LEN,
&gwj->mod.csum.crc8) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + \
NLA_ALIGN(CGW_CS_CRC8_LEN);
}
if (gwj->mod.csumfunc.xor) {
if (nla_put(skb, CGW_CS_XOR, CGW_CS_XOR_LEN,
&gwj->mod.csum.xor) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + \
NLA_ALIGN(CGW_CS_XOR_LEN);
}
if (gwj->gwtype == CGW_TYPE_CAN_CAN) {
if (gwj->ccgw.filter.can_id || gwj->ccgw.filter.can_mask) {
if (nla_put(skb, CGW_FILTER, sizeof(struct can_filter),
&gwj->ccgw.filter) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN +
NLA_ALIGN(sizeof(struct can_filter));
}
if (nla_put_u32(skb, CGW_SRC_IF, gwj->ccgw.src_idx) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
if (nla_put_u32(skb, CGW_DST_IF, gwj->ccgw.dst_idx) < 0)
goto cancel;
else
nlh->nlmsg_len += NLA_HDRLEN + NLA_ALIGN(sizeof(u32));
}
return skb->len;
cancel:
nlmsg_cancel(skb, nlh);
return -EMSGSIZE;
}
/* Dump information about all CAN gateway jobs, in response to RTM_GETROUTE */
static int cgw_dump_jobs(struct sk_buff *skb, struct netlink_callback *cb)
{
struct cgw_job *gwj = NULL;
struct hlist_node *n;
int idx = 0;
int s_idx = cb->args[0];
rcu_read_lock();
hlist_for_each_entry_rcu(gwj, n, &cgw_list, list) {
if (idx < s_idx)
goto cont;
if (cgw_put_job(skb, gwj) < 0)
break;
cont:
idx++;
}
rcu_read_unlock();
cb->args[0] = idx;
return skb->len;
}
/* check for common and gwtype specific attributes */
static int cgw_parse_attr(struct nlmsghdr *nlh, struct cf_mod *mod,
u8 gwtype, void *gwtypeattr)
{
struct nlattr *tb[CGW_MAX+1];
struct cgw_frame_mod mb;
int modidx = 0;
int err = 0;
/* initialize modification & checksum data space */
memset(mod, 0, sizeof(*mod));
err = nlmsg_parse(nlh, sizeof(struct rtcanmsg), tb, CGW_MAX, NULL);
if (err < 0)
return err;
/* check for AND/OR/XOR/SET modifications */
if (tb[CGW_MOD_AND] &&
nla_len(tb[CGW_MOD_AND]) == CGW_MODATTR_LEN) {
nla_memcpy(&mb, tb[CGW_MOD_AND], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.and, &mb.cf);
mod->modtype.and = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_and_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_and_dlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_and_data;
}
if (tb[CGW_MOD_OR] &&
nla_len(tb[CGW_MOD_OR]) == CGW_MODATTR_LEN) {
nla_memcpy(&mb, tb[CGW_MOD_OR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.or, &mb.cf);
mod->modtype.or = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_or_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_or_dlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_or_data;
}
if (tb[CGW_MOD_XOR] &&
nla_len(tb[CGW_MOD_XOR]) == CGW_MODATTR_LEN) {
nla_memcpy(&mb, tb[CGW_MOD_XOR], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.xor, &mb.cf);
mod->modtype.xor = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_xor_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_xor_dlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_xor_data;
}
if (tb[CGW_MOD_SET] &&
nla_len(tb[CGW_MOD_SET]) == CGW_MODATTR_LEN) {
nla_memcpy(&mb, tb[CGW_MOD_SET], CGW_MODATTR_LEN);
canframecpy(&mod->modframe.set, &mb.cf);
mod->modtype.set = mb.modtype;
if (mb.modtype & CGW_MOD_ID)
mod->modfunc[modidx++] = mod_set_id;
if (mb.modtype & CGW_MOD_DLC)
mod->modfunc[modidx++] = mod_set_dlc;
if (mb.modtype & CGW_MOD_DATA)
mod->modfunc[modidx++] = mod_set_data;
}
/* check for checksum operations after CAN frame modifications */
if (modidx) {
if (tb[CGW_CS_CRC8] &&
nla_len(tb[CGW_CS_CRC8]) == CGW_CS_CRC8_LEN) {
struct cgw_csum_crc8 *c = (struct cgw_csum_crc8 *)\
nla_data(tb[CGW_CS_CRC8]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
if (err)
return err;
nla_memcpy(&mod->csum.crc8, tb[CGW_CS_CRC8],
CGW_CS_CRC8_LEN);
/*
* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 ||
c->result_idx < 0)
mod->csumfunc.crc8 = cgw_csum_crc8_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.crc8 = cgw_csum_crc8_pos;
else
mod->csumfunc.crc8 = cgw_csum_crc8_neg;
}
if (tb[CGW_CS_XOR] &&
nla_len(tb[CGW_CS_XOR]) == CGW_CS_XOR_LEN) {
struct cgw_csum_xor *c = (struct cgw_csum_xor *)\
nla_data(tb[CGW_CS_XOR]);
err = cgw_chk_csum_parms(c->from_idx, c->to_idx,
c->result_idx);
if (err)
return err;
nla_memcpy(&mod->csum.xor, tb[CGW_CS_XOR],
CGW_CS_XOR_LEN);
/*
* select dedicated processing function to reduce
* runtime operations in receive hot path.
*/
if (c->from_idx < 0 || c->to_idx < 0 ||
c->result_idx < 0)
mod->csumfunc.xor = cgw_csum_xor_rel;
else if (c->from_idx <= c->to_idx)
mod->csumfunc.xor = cgw_csum_xor_pos;
else
mod->csumfunc.xor = cgw_csum_xor_neg;
}
}
if (gwtype == CGW_TYPE_CAN_CAN) {
/* check CGW_TYPE_CAN_CAN specific attributes */
struct can_can_gw *ccgw = (struct can_can_gw *)gwtypeattr;
memset(ccgw, 0, sizeof(*ccgw));
/* check for can_filter in attributes */
if (tb[CGW_FILTER] &&
nla_len(tb[CGW_FILTER]) == sizeof(struct can_filter))
nla_memcpy(&ccgw->filter, tb[CGW_FILTER],
sizeof(struct can_filter));
err = -ENODEV;
/* specifying two interfaces is mandatory */
if (!tb[CGW_SRC_IF] || !tb[CGW_DST_IF])
return err;
if (nla_len(tb[CGW_SRC_IF]) == sizeof(u32))
nla_memcpy(&ccgw->src_idx, tb[CGW_SRC_IF],
sizeof(u32));
if (nla_len(tb[CGW_DST_IF]) == sizeof(u32))
nla_memcpy(&ccgw->dst_idx, tb[CGW_DST_IF],
sizeof(u32));
/* both indices set to 0 for flushing all routing entries */
if (!ccgw->src_idx && !ccgw->dst_idx)
return 0;
/* only one index set to 0 is an error */
if (!ccgw->src_idx || !ccgw->dst_idx)
return err;
}
/* add the checks for other gwtypes here */
return 0;
}
static int cgw_create_job(struct sk_buff *skb, struct nlmsghdr *nlh,
void *arg)
{
struct rtcanmsg *r;
struct cgw_job *gwj;
int err = 0;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
gwj = kmem_cache_alloc(cgw_cache, GFP_KERNEL);
if (!gwj)
return -ENOMEM;
gwj->handled_frames = 0;
gwj->dropped_frames = 0;
gwj->flags = r->flags;
gwj->gwtype = r->gwtype;
err = cgw_parse_attr(nlh, &gwj->mod, CGW_TYPE_CAN_CAN, &gwj->ccgw);
if (err < 0)
goto out;
err = -ENODEV;
/* ifindex == 0 is not allowed for job creation */
if (!gwj->ccgw.src_idx || !gwj->ccgw.dst_idx)
goto out;
gwj->src.dev = dev_get_by_index(&init_net, gwj->ccgw.src_idx);
if (!gwj->src.dev)
goto out;
/* check for CAN netdev not using header_ops - see gw_rcv() */
if (gwj->src.dev->type != ARPHRD_CAN || gwj->src.dev->header_ops)
goto put_src_out;
gwj->dst.dev = dev_get_by_index(&init_net, gwj->ccgw.dst_idx);
if (!gwj->dst.dev)
goto put_src_out;
/* check for CAN netdev not using header_ops - see gw_rcv() */
if (gwj->dst.dev->type != ARPHRD_CAN || gwj->dst.dev->header_ops)
goto put_src_dst_out;
ASSERT_RTNL();
err = cgw_register_filter(gwj);
if (!err)
hlist_add_head_rcu(&gwj->list, &cgw_list);
put_src_dst_out:
dev_put(gwj->dst.dev);
put_src_out:
dev_put(gwj->src.dev);
out:
if (err)
kmem_cache_free(cgw_cache, gwj);
return err;
}
static void cgw_remove_all_jobs(void)
{
struct cgw_job *gwj = NULL;
struct hlist_node *n, *nx;
ASSERT_RTNL();
hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
}
}
static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
{
struct cgw_job *gwj = NULL;
struct hlist_node *n, *nx;
struct rtcanmsg *r;
struct cf_mod mod;
struct can_can_gw ccgw;
int err = 0;
if (nlmsg_len(nlh) < sizeof(*r))
return -EINVAL;
r = nlmsg_data(nlh);
if (r->can_family != AF_CAN)
return -EPFNOSUPPORT;
/* so far we only support CAN -> CAN routings */
if (r->gwtype != CGW_TYPE_CAN_CAN)
return -EINVAL;
err = cgw_parse_attr(nlh, &mod, CGW_TYPE_CAN_CAN, &ccgw);
if (err < 0)
return err;
/* two interface indices both set to 0 => remove all entries */
if (!ccgw.src_idx && !ccgw.dst_idx) {
cgw_remove_all_jobs();
return 0;
}
err = -EINVAL;
ASSERT_RTNL();
/* remove only the first matching entry */
hlist_for_each_entry_safe(gwj, n, nx, &cgw_list, list) {
if (gwj->flags != r->flags)
continue;
if (memcmp(&gwj->mod, &mod, sizeof(mod)))
continue;
/* if (r->gwtype == CGW_TYPE_CAN_CAN) - is made sure here */
if (memcmp(&gwj->ccgw, &ccgw, sizeof(ccgw)))
continue;
hlist_del(&gwj->list);
cgw_unregister_filter(gwj);
kfree(gwj);
err = 0;
break;
}
return err;
}
static __init int cgw_module_init(void)
{
printk(banner);
cgw_cache = kmem_cache_create("can_gw", sizeof(struct cgw_job),
0, 0, NULL);
if (!cgw_cache)
return -ENOMEM;
/* set notifier */
notifier.notifier_call = cgw_notifier;
register_netdevice_notifier(¬ifier);
if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
unregister_netdevice_notifier(¬ifier);
kmem_cache_destroy(cgw_cache);
return -ENOBUFS;
}
/* Only the first call to __rtnl_register can fail */
__rtnl_register(PF_CAN, RTM_NEWROUTE, cgw_create_job, NULL, NULL);
__rtnl_register(PF_CAN, RTM_DELROUTE, cgw_remove_job, NULL, NULL);
return 0;
}
static __exit void cgw_module_exit(void)
{
rtnl_unregister_all(PF_CAN);
unregister_netdevice_notifier(¬ifier);
rtnl_lock();
cgw_remove_all_jobs();
rtnl_unlock();
rcu_barrier(); /* Wait for completion of call_rcu()'s */
kmem_cache_destroy(cgw_cache);
}
module_init(cgw_module_init);
module_exit(cgw_module_exit);
| gpl-2.0 |
hallovveen31/ICED_KERNEL | arch/arm/mach-u300/padmux.c | 4428 | 7965 | /*
*
* arch/arm/mach-u300/padmux.c
*
*
* Copyright (C) 2009 ST-Ericsson AB
* License terms: GNU General Public License (GPL) version 2
* U300 PADMUX functions
* Author: Martin Persson <martin.persson@stericsson.com>
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/mutex.h>
#include <linux/string.h>
#include <linux/bug.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <mach/u300-regs.h>
#include <mach/syscon.h>
#include "padmux.h"
static DEFINE_MUTEX(pmx_mutex);
const u32 pmx_registers[] = {
(U300_SYSCON_VBASE + U300_SYSCON_PMC1LR),
(U300_SYSCON_VBASE + U300_SYSCON_PMC1HR),
(U300_SYSCON_VBASE + U300_SYSCON_PMC2R),
(U300_SYSCON_VBASE + U300_SYSCON_PMC3R),
(U300_SYSCON_VBASE + U300_SYSCON_PMC4R)
};
/* High level functionality */
/* Lazy dog:
* onmask = {
* {"PMC1LR" mask, "PMC1LR" value},
* {"PMC1HR" mask, "PMC1HR" value},
* {"PMC2R" mask, "PMC2R" value},
* {"PMC3R" mask, "PMC3R" value},
* {"PMC4R" mask, "PMC4R" value}
* }
*/
static struct pmx mmc_setting = {
.setting = U300_APP_PMX_MMC_SETTING,
.default_on = false,
.activated = false,
.name = "MMC",
.onmask = {
{U300_SYSCON_PMC1LR_MMCSD_MASK,
U300_SYSCON_PMC1LR_MMCSD_MMCSD},
{0, 0},
{0, 0},
{0, 0},
{U300_SYSCON_PMC4R_APP_MISC_12_MASK,
U300_SYSCON_PMC4R_APP_MISC_12_APP_GPIO}
},
};
static struct pmx spi_setting = {
.setting = U300_APP_PMX_SPI_SETTING,
.default_on = false,
.activated = false,
.name = "SPI",
.onmask = {{0, 0},
{U300_SYSCON_PMC1HR_APP_SPI_2_MASK |
U300_SYSCON_PMC1HR_APP_SPI_CS_1_MASK |
U300_SYSCON_PMC1HR_APP_SPI_CS_2_MASK,
U300_SYSCON_PMC1HR_APP_SPI_2_SPI |
U300_SYSCON_PMC1HR_APP_SPI_CS_1_SPI |
U300_SYSCON_PMC1HR_APP_SPI_CS_2_SPI},
{0, 0},
{0, 0},
{0, 0}
},
};
/* Available padmux settings */
static struct pmx *pmx_settings[] = {
&mmc_setting,
&spi_setting,
};
static void update_registers(struct pmx *pmx, bool activate)
{
u16 regval, val, mask;
int i;
for (i = 0; i < ARRAY_SIZE(pmx_registers); i++) {
if (activate)
val = pmx->onmask[i].val;
else
val = 0;
mask = pmx->onmask[i].mask;
if (mask != 0) {
regval = readw(pmx_registers[i]);
regval &= ~mask;
regval |= val;
writew(regval, pmx_registers[i]);
}
}
}
struct pmx *pmx_get(struct device *dev, enum pmx_settings setting)
{
int i;
struct pmx *pmx = ERR_PTR(-ENOENT);
if (dev == NULL)
return ERR_PTR(-EINVAL);
mutex_lock(&pmx_mutex);
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
if (setting == pmx_settings[i]->setting) {
if (pmx_settings[i]->dev != NULL) {
WARN(1, "padmux: required setting "
"in use by another consumer\n");
} else {
pmx = pmx_settings[i];
pmx->dev = dev;
dev_dbg(dev, "padmux: setting nr %d is now "
"bound to %s and ready to use\n",
setting, dev_name(dev));
break;
}
}
}
mutex_unlock(&pmx_mutex);
return pmx;
}
EXPORT_SYMBOL(pmx_get);
int pmx_put(struct device *dev, struct pmx *pmx)
{
int i;
int ret = -ENOENT;
if (pmx == NULL || dev == NULL)
return -EINVAL;
mutex_lock(&pmx_mutex);
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
if (pmx->setting == pmx_settings[i]->setting) {
if (dev != pmx->dev) {
WARN(1, "padmux: cannot release handle as "
"it is bound to another consumer\n");
ret = -EINVAL;
break;
} else {
pmx_settings[i]->dev = NULL;
ret = 0;
break;
}
}
}
mutex_unlock(&pmx_mutex);
return ret;
}
EXPORT_SYMBOL(pmx_put);
int pmx_activate(struct device *dev, struct pmx *pmx)
{
int i, j, ret;
ret = 0;
if (pmx == NULL || dev == NULL)
return -EINVAL;
mutex_lock(&pmx_mutex);
/* Make sure the required bits are not used */
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
if (pmx_settings[i]->dev == NULL || pmx_settings[i] == pmx)
continue;
for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
if (pmx_settings[i]->onmask[j].mask & pmx->
onmask[j].mask) {
/* More than one entry on the same bits */
WARN(1, "padmux: cannot activate "
"setting. Bit conflict with "
"an active setting\n");
ret = -EUSERS;
goto exit;
}
}
}
update_registers(pmx, true);
pmx->activated = true;
dev_dbg(dev, "padmux: setting nr %d is activated\n",
pmx->setting);
exit:
mutex_unlock(&pmx_mutex);
return ret;
}
EXPORT_SYMBOL(pmx_activate);
int pmx_deactivate(struct device *dev, struct pmx *pmx)
{
int i;
int ret = -ENOENT;
if (pmx == NULL || dev == NULL)
return -EINVAL;
mutex_lock(&pmx_mutex);
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
if (pmx_settings[i]->dev == NULL)
continue;
if (pmx->setting == pmx_settings[i]->setting) {
if (dev != pmx->dev) {
WARN(1, "padmux: cannot deactivate "
"pmx setting as it was activated "
"by another consumer\n");
ret = -EBUSY;
continue;
} else {
update_registers(pmx, false);
pmx_settings[i]->dev = NULL;
pmx->activated = false;
ret = 0;
dev_dbg(dev, "padmux: setting nr %d is deactivated",
pmx->setting);
break;
}
}
}
mutex_unlock(&pmx_mutex);
return ret;
}
EXPORT_SYMBOL(pmx_deactivate);
/*
* For internal use only. If it is to be exported,
* it should be reentrant. Notice that pmx_activate
* (i.e. runtime settings) always override default settings.
*/
static int pmx_set_default(void)
{
/* Used to identify several entries on the same bits */
u16 modbits[ARRAY_SIZE(pmx_registers)];
int i, j;
memset(modbits, 0, ARRAY_SIZE(pmx_registers) * sizeof(u16));
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
if (!pmx_settings[i]->default_on)
continue;
for (j = 0; j < ARRAY_SIZE(pmx_registers); j++) {
/* Make sure there is only one entry on the same bits */
if (modbits[j] & pmx_settings[i]->onmask[j].mask) {
BUG();
return -EUSERS;
}
modbits[j] |= pmx_settings[i]->onmask[j].mask;
}
update_registers(pmx_settings[i], true);
}
return 0;
}
#if (defined(CONFIG_DEBUG_FS) && defined(CONFIG_U300_DEBUG))
static int pmx_show(struct seq_file *s, void *data)
{
int i;
seq_printf(s, "-------------------------------------------------\n");
seq_printf(s, "SETTING BOUND TO DEVICE STATE\n");
seq_printf(s, "-------------------------------------------------\n");
mutex_lock(&pmx_mutex);
for (i = 0; i < ARRAY_SIZE(pmx_settings); i++) {
/* Format pmx and device name nicely */
char cdp[33];
int chars;
chars = snprintf(&cdp[0], 17, "%s", pmx_settings[i]->name);
while (chars < 16) {
cdp[chars] = ' ';
chars++;
}
chars = snprintf(&cdp[16], 17, "%s", pmx_settings[i]->dev ?
dev_name(pmx_settings[i]->dev) : "N/A");
while (chars < 16) {
cdp[chars+16] = ' ';
chars++;
}
cdp[32] = '\0';
seq_printf(s,
"%s\t%s\n",
&cdp[0],
pmx_settings[i]->activated ?
"ACTIVATED" : "DEACTIVATED"
);
}
mutex_unlock(&pmx_mutex);
return 0;
}
static int pmx_open(struct inode *inode, struct file *file)
{
return single_open(file, pmx_show, NULL);
}
static const struct file_operations pmx_operations = {
.owner = THIS_MODULE,
.open = pmx_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init init_pmx_read_debugfs(void)
{
/* Expose a simple debugfs interface to view pmx settings */
(void) debugfs_create_file("padmux", S_IFREG | S_IRUGO,
NULL, NULL,
&pmx_operations);
return 0;
}
/*
* This needs to come in after the core_initcall(),
* because debugfs is not available until
* the subsystems come up.
*/
module_init(init_pmx_read_debugfs);
#endif
static int __init pmx_init(void)
{
int ret;
ret = pmx_set_default();
if (IS_ERR_VALUE(ret))
pr_crit("padmux: default settings could not be set\n");
return 0;
}
/* Should be initialized before consumers */
core_initcall(pmx_init);
| gpl-2.0 |
SlimSaber/kernel_sony_msm8974 | net/mac80211/mesh_pathtbl.c | 4684 | 29862 | /*
* Copyright (c) 2008, 2009 open80211s Ltd.
* Author: Luis Carlos Cobo <luisca@cozybit.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/etherdevice.h>
#include <linux/list.h>
#include <linux/random.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <net/mac80211.h>
#include "wme.h"
#include "ieee80211_i.h"
#include "mesh.h"
#ifdef CONFIG_MAC80211_VERBOSE_MPATH_DEBUG
#define mpath_dbg(fmt, args...) printk(KERN_DEBUG fmt, ##args)
#else
#define mpath_dbg(fmt, args...) do { (void)(0); } while (0)
#endif
/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
#define INIT_PATHS_SIZE_ORDER 2
/* Keep the mean chain length below this constant */
#define MEAN_CHAIN_LEN 2
#define MPATH_EXPIRED(mpath) ((mpath->flags & MESH_PATH_ACTIVE) && \
time_after(jiffies, mpath->exp_time) && \
!(mpath->flags & MESH_PATH_FIXED))
struct mpath_node {
struct hlist_node list;
struct rcu_head rcu;
/* This indirection allows two different tables to point to the same
* mesh_path structure, useful when resizing
*/
struct mesh_path *mpath;
};
static struct mesh_table __rcu *mesh_paths;
static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
int mesh_paths_generation;
/* This lock will have the grow table function as writer and add / delete nodes
* as readers. RCU provides sufficient protection only when reading the table
* (i.e. doing lookups). Adding or adding or removing nodes requires we take
* the read lock or we risk operating on an old table. The write lock is only
* needed when modifying the number of buckets a table.
*/
static DEFINE_RWLOCK(pathtbl_resize_lock);
static inline struct mesh_table *resize_dereference_mesh_paths(void)
{
return rcu_dereference_protected(mesh_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
static inline struct mesh_table *resize_dereference_mpp_paths(void)
{
return rcu_dereference_protected(mpp_paths,
lockdep_is_held(&pathtbl_resize_lock));
}
/*
* CAREFUL -- "tbl" must not be an expression,
* in particular not an rcu_dereference(), since
* it's used twice. So it is illegal to do
* for_each_mesh_entry(rcu_dereference(...), ...)
*/
#define for_each_mesh_entry(tbl, p, node, i) \
for (i = 0; i <= tbl->hash_mask; i++) \
hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
static struct mesh_table *mesh_table_alloc(int size_order)
{
int i;
struct mesh_table *newtbl;
newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
if (!newtbl)
return NULL;
newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hash_buckets) {
kfree(newtbl);
return NULL;
}
newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
(1 << size_order), GFP_ATOMIC);
if (!newtbl->hashwlock) {
kfree(newtbl->hash_buckets);
kfree(newtbl);
return NULL;
}
newtbl->size_order = size_order;
newtbl->hash_mask = (1 << size_order) - 1;
atomic_set(&newtbl->entries, 0);
get_random_bytes(&newtbl->hash_rnd,
sizeof(newtbl->hash_rnd));
for (i = 0; i <= newtbl->hash_mask; i++)
spin_lock_init(&newtbl->hashwlock[i]);
spin_lock_init(&newtbl->gates_lock);
return newtbl;
}
static void __mesh_table_free(struct mesh_table *tbl)
{
kfree(tbl->hash_buckets);
kfree(tbl->hashwlock);
kfree(tbl);
}
static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
{
struct hlist_head *mesh_hash;
struct hlist_node *p, *q;
struct mpath_node *gate;
int i;
mesh_hash = tbl->hash_buckets;
for (i = 0; i <= tbl->hash_mask; i++) {
spin_lock_bh(&tbl->hashwlock[i]);
hlist_for_each_safe(p, q, &mesh_hash[i]) {
tbl->free_node(p, free_leafs);
atomic_dec(&tbl->entries);
}
spin_unlock_bh(&tbl->hashwlock[i]);
}
if (free_leafs) {
spin_lock_bh(&tbl->gates_lock);
hlist_for_each_entry_safe(gate, p, q,
tbl->known_gates, list) {
hlist_del(&gate->list);
kfree(gate);
}
kfree(tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
}
__mesh_table_free(tbl);
}
static int mesh_table_grow(struct mesh_table *oldtbl,
struct mesh_table *newtbl)
{
struct hlist_head *oldhash;
struct hlist_node *p, *q;
int i;
if (atomic_read(&oldtbl->entries)
< oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
return -EAGAIN;
newtbl->free_node = oldtbl->free_node;
newtbl->mean_chain_len = oldtbl->mean_chain_len;
newtbl->copy_node = oldtbl->copy_node;
newtbl->known_gates = oldtbl->known_gates;
atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
oldhash = oldtbl->hash_buckets;
for (i = 0; i <= oldtbl->hash_mask; i++)
hlist_for_each(p, &oldhash[i])
if (oldtbl->copy_node(p, newtbl) < 0)
goto errcopy;
return 0;
errcopy:
for (i = 0; i <= newtbl->hash_mask; i++) {
hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
oldtbl->free_node(p, 0);
}
return -ENOMEM;
}
static u32 mesh_table_hash(u8 *addr, struct ieee80211_sub_if_data *sdata,
struct mesh_table *tbl)
{
/* Use last four bytes of hw addr and interface index as hash index */
return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex, tbl->hash_rnd)
& tbl->hash_mask;
}
/**
*
* mesh_path_assign_nexthop - update mesh path next hop
*
* @mpath: mesh path to update
* @sta: next hop to assign
*
* Locking: mpath->state_lock must be held when calling this function
*/
void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
{
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
struct sk_buff_head tmpq;
unsigned long flags;
rcu_assign_pointer(mpath->next_hop, sta);
__skb_queue_head_init(&tmpq);
spin_lock_irqsave(&mpath->frame_queue.lock, flags);
while ((skb = __skb_dequeue(&mpath->frame_queue)) != NULL) {
hdr = (struct ieee80211_hdr *) skb->data;
memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
__skb_queue_tail(&tmpq, skb);
}
skb_queue_splice(&tmpq, &mpath->frame_queue);
spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
}
static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
struct mesh_path *gate_mpath)
{
struct ieee80211_hdr *hdr;
struct ieee80211s_hdr *mshdr;
int mesh_hdrlen, hdrlen;
char *next_hop;
hdr = (struct ieee80211_hdr *) skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
if (!(mshdr->flags & MESH_FLAGS_AE)) {
/* size of the fixed part of the mesh header */
mesh_hdrlen = 6;
/* make room for the two extended addresses */
skb_push(skb, 2 * ETH_ALEN);
memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
hdr = (struct ieee80211_hdr *) skb->data;
/* we preserve the previous mesh header and only add
* the new addreses */
mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
mshdr->flags = MESH_FLAGS_AE_A5_A6;
memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
}
/* update next hop */
hdr = (struct ieee80211_hdr *) skb->data;
rcu_read_lock();
next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
memcpy(hdr->addr1, next_hop, ETH_ALEN);
rcu_read_unlock();
memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
memcpy(hdr->addr3, dst_addr, ETH_ALEN);
}
/**
*
* mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
*
* This function is used to transfer or copy frames from an unresolved mpath to
* a gate mpath. The function also adds the Address Extension field and
* updates the next hop.
*
* If a frame already has an Address Extension field, only the next hop and
* destination addresses are updated.
*
* The gate mpath must be an active mpath with a valid mpath->next_hop.
*
* @mpath: An active mpath the frames will be sent to (i.e. the gate)
* @from_mpath: The failed mpath
* @copy: When true, copy all the frames to the new mpath queue. When false,
* move them.
*/
static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
struct mesh_path *from_mpath,
bool copy)
{
struct sk_buff *skb, *cp_skb = NULL;
struct sk_buff_head gateq, failq;
unsigned long flags;
int num_skbs;
BUG_ON(gate_mpath == from_mpath);
BUG_ON(!gate_mpath->next_hop);
__skb_queue_head_init(&gateq);
__skb_queue_head_init(&failq);
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice_init(&from_mpath->frame_queue, &failq);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
num_skbs = skb_queue_len(&failq);
while (num_skbs--) {
skb = __skb_dequeue(&failq);
if (copy) {
cp_skb = skb_copy(skb, GFP_ATOMIC);
if (cp_skb)
__skb_queue_tail(&failq, cp_skb);
}
prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
__skb_queue_tail(&gateq, skb);
}
spin_lock_irqsave(&gate_mpath->frame_queue.lock, flags);
skb_queue_splice(&gateq, &gate_mpath->frame_queue);
mpath_dbg("Mpath queue for gate %pM has %d frames\n",
gate_mpath->dst,
skb_queue_len(&gate_mpath->frame_queue));
spin_unlock_irqrestore(&gate_mpath->frame_queue.lock, flags);
if (!copy)
return;
spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
skb_queue_splice(&failq, &from_mpath->frame_queue);
spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
}
static struct mesh_path *mpath_lookup(struct mesh_table *tbl, u8 *dst,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct hlist_node *n;
struct hlist_head *bucket;
struct mpath_node *node;
bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
hlist_for_each_entry_rcu(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
compare_ether_addr(dst, mpath->dst) == 0) {
if (MPATH_EXPIRED(mpath)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&mpath->state_lock);
}
return mpath;
}
}
return NULL;
}
/**
* mesh_path_lookup - look up a path in the mesh path table
* @dst: hardware address (ETH_ALEN length) of destination
* @sdata: local subif
*
* Returns: pointer to the mesh path structure, or NULL if not found
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
}
struct mesh_path *mpp_path_lookup(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
}
/**
* mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
* @idx: index
* @sdata: local subif, or NULL for all entries
*
* Returns: pointer to the mesh path structure, or NULL if not found.
*
* Locking: must be called within a read rcu section.
*/
struct mesh_path *mesh_path_lookup_by_idx(int idx, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl = rcu_dereference(mesh_paths);
struct mpath_node *node;
struct hlist_node *p;
int i;
int j = 0;
for_each_mesh_entry(tbl, p, node, i) {
if (sdata && node->mpath->sdata != sdata)
continue;
if (j++ == idx) {
if (MPATH_EXPIRED(node->mpath)) {
spin_lock_bh(&node->mpath->state_lock);
node->mpath->flags &= ~MESH_PATH_ACTIVE;
spin_unlock_bh(&node->mpath->state_lock);
}
return node->mpath;
}
}
return NULL;
}
/**
* mesh_path_add_gate - add the given mpath to a mesh gate to our path table
* @mpath: gate path to add to table
*/
int mesh_path_add_gate(struct mesh_path *mpath)
{
struct mesh_table *tbl;
struct mpath_node *gate, *new_gate;
struct hlist_node *n;
int err;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
if (gate->mpath == mpath) {
err = -EEXIST;
goto err_rcu;
}
new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_gate) {
err = -ENOMEM;
goto err_rcu;
}
mpath->is_gate = true;
mpath->sdata->u.mesh.num_gates++;
new_gate->mpath = mpath;
spin_lock_bh(&tbl->gates_lock);
hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
spin_unlock_bh(&tbl->gates_lock);
rcu_read_unlock();
mpath_dbg("Mesh path (%s): Recorded new gate: %pM. %d known gates\n",
mpath->sdata->name, mpath->dst,
mpath->sdata->u.mesh.num_gates);
return 0;
err_rcu:
rcu_read_unlock();
return err;
}
/**
* mesh_gate_del - remove a mesh gate from the list of known gates
* @tbl: table which holds our list of known gates
* @mpath: gate mpath
*
* Returns: 0 on success
*
* Locking: must be called inside rcu_read_lock() section
*/
static int mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
{
struct mpath_node *gate;
struct hlist_node *p, *q;
hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list)
if (gate->mpath == mpath) {
spin_lock_bh(&tbl->gates_lock);
hlist_del_rcu(&gate->list);
kfree_rcu(gate, rcu);
spin_unlock_bh(&tbl->gates_lock);
mpath->sdata->u.mesh.num_gates--;
mpath->is_gate = false;
mpath_dbg("Mesh path (%s): Deleted gate: %pM. "
"%d known gates\n", mpath->sdata->name,
mpath->dst, mpath->sdata->u.mesh.num_gates);
break;
}
return 0;
}
/**
* mesh_gate_num - number of gates known to this interface
* @sdata: subif data
*/
int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
{
return sdata->u.mesh.num_gates;
}
/**
* mesh_path_add - allocate and add a new path to the mesh path table
* @addr: destination address of the path (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 on success
*
* State: the initial state of the new path is set to 0
*/
int mesh_path_add(u8 *dst, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
return -ENOSPC;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
new_mpath->timer.data = (unsigned long) new_mpath;
new_mpath->timer.function = mesh_path_timer;
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
init_timer(&new_mpath->timer);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
mesh_paths_generation++;
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
atomic_dec(&sdata->u.mesh.mpaths);
return err;
}
static void mesh_table_free_rcu(struct rcu_head *rcu)
{
struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
mesh_table_free(tbl, false);
}
void mesh_mpath_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mesh_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mesh_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
void mesh_mpp_table_grow(void)
{
struct mesh_table *oldtbl, *newtbl;
write_lock_bh(&pathtbl_resize_lock);
oldtbl = resize_dereference_mpp_paths();
newtbl = mesh_table_alloc(oldtbl->size_order + 1);
if (!newtbl)
goto out;
if (mesh_table_grow(oldtbl, newtbl) < 0) {
__mesh_table_free(newtbl);
goto out;
}
rcu_assign_pointer(mpp_paths, newtbl);
call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
out:
write_unlock_bh(&pathtbl_resize_lock);
}
int mpp_path_add(u8 *dst, u8 *mpp, struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
struct ieee80211_local *local = sdata->local;
struct mesh_table *tbl;
struct mesh_path *mpath, *new_mpath;
struct mpath_node *node, *new_node;
struct hlist_head *bucket;
struct hlist_node *n;
int grow = 0;
int err = 0;
u32 hash_idx;
if (compare_ether_addr(dst, sdata->vif.addr) == 0)
/* never add ourselves as neighbours */
return -ENOTSUPP;
if (is_multicast_ether_addr(dst))
return -ENOTSUPP;
err = -ENOMEM;
new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
if (!new_mpath)
goto err_path_alloc;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (!new_node)
goto err_node_alloc;
read_lock_bh(&pathtbl_resize_lock);
memcpy(new_mpath->dst, dst, ETH_ALEN);
memcpy(new_mpath->mpp, mpp, ETH_ALEN);
new_mpath->sdata = sdata;
new_mpath->flags = 0;
skb_queue_head_init(&new_mpath->frame_queue);
new_node->mpath = new_mpath;
init_timer(&new_mpath->timer);
new_mpath->exp_time = jiffies;
spin_lock_init(&new_mpath->state_lock);
tbl = resize_dereference_mpp_paths();
hash_idx = mesh_table_hash(dst, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
err = -EEXIST;
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
compare_ether_addr(dst, mpath->dst) == 0)
goto err_exists;
}
hlist_add_head_rcu(&new_node->list, bucket);
if (atomic_inc_return(&tbl->entries) >=
tbl->mean_chain_len * (tbl->hash_mask + 1))
grow = 1;
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
if (grow) {
set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
return 0;
err_exists:
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
kfree(new_node);
err_node_alloc:
kfree(new_mpath);
err_path_alloc:
return err;
}
/**
* mesh_plink_broken - deactivates paths and sends perr when a link breaks
*
* @sta: broken peer link
*
* This function must be called from the rate control algorithm if enough
* delivery errors suggest that a peer link is no longer usable.
*/
void mesh_plink_broken(struct sta_info *sta)
{
struct mesh_table *tbl;
static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
struct ieee80211_sub_if_data *sdata = sta->sdata;
int i;
__le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta &&
mpath->flags & MESH_PATH_ACTIVE &&
!(mpath->flags & MESH_PATH_FIXED)) {
spin_lock_bh(&mpath->state_lock);
mpath->flags &= ~MESH_PATH_ACTIVE;
++mpath->sn;
spin_unlock_bh(&mpath->state_lock);
mesh_path_error_tx(sdata->u.mesh.mshcfg.element_ttl,
mpath->dst, cpu_to_le32(mpath->sn),
reason, bcast, sdata);
}
}
rcu_read_unlock();
}
static void mesh_path_node_reclaim(struct rcu_head *rp)
{
struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
del_timer_sync(&node->mpath->timer);
atomic_dec(&sdata->u.mesh.mpaths);
kfree(node->mpath);
kfree(node);
}
/* needs to be called with the corresponding hashwlock taken */
static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
{
struct mesh_path *mpath;
mpath = node->mpath;
spin_lock(&mpath->state_lock);
mpath->flags |= MESH_PATH_RESOLVING;
if (mpath->is_gate)
mesh_gate_del(tbl, mpath);
hlist_del_rcu(&node->list);
call_rcu(&node->rcu, mesh_path_node_reclaim);
spin_unlock(&mpath->state_lock);
atomic_dec(&tbl->entries);
}
/**
* mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
*
* @sta - mesh peer to match
*
* RCU notes: this function is called when a mesh plink transitions from
* PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
* allows path creation. This will happen before the sta can be freed (because
* sta_info_destroy() calls this) so any reader in a rcu read block will be
* protected against the plink disappearing.
*/
void mesh_path_flush_by_nexthop(struct sta_info *sta)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (rcu_dereference(mpath->next_hop) == sta) {
spin_lock(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock(&tbl->hashwlock[i]);
}
}
read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
static void table_flush_by_iface(struct mesh_table *tbl,
struct ieee80211_sub_if_data *sdata)
{
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
WARN_ON(!rcu_read_lock_held());
for_each_mesh_entry(tbl, p, node, i) {
mpath = node->mpath;
if (mpath->sdata != sdata)
continue;
spin_lock_bh(&tbl->hashwlock[i]);
__mesh_path_del(tbl, node);
spin_unlock_bh(&tbl->hashwlock[i]);
}
}
/**
* mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
*
* This function deletes both mesh paths as well as mesh portal paths.
*
* @sdata - interface data to match
*
*/
void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
rcu_read_lock();
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
table_flush_by_iface(tbl, sdata);
tbl = resize_dereference_mpp_paths();
table_flush_by_iface(tbl, sdata);
read_unlock_bh(&pathtbl_resize_lock);
rcu_read_unlock();
}
/**
* mesh_path_del - delete a mesh path from the table
*
* @addr: dst address (ETH_ALEN length)
* @sdata: local subif
*
* Returns: 0 if successful
*/
int mesh_path_del(u8 *addr, struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_head *bucket;
struct hlist_node *n;
int hash_idx;
int err = 0;
read_lock_bh(&pathtbl_resize_lock);
tbl = resize_dereference_mesh_paths();
hash_idx = mesh_table_hash(addr, sdata, tbl);
bucket = &tbl->hash_buckets[hash_idx];
spin_lock(&tbl->hashwlock[hash_idx]);
hlist_for_each_entry(node, n, bucket, list) {
mpath = node->mpath;
if (mpath->sdata == sdata &&
compare_ether_addr(addr, mpath->dst) == 0) {
__mesh_path_del(tbl, node);
goto enddel;
}
}
err = -ENXIO;
enddel:
mesh_paths_generation++;
spin_unlock(&tbl->hashwlock[hash_idx]);
read_unlock_bh(&pathtbl_resize_lock);
return err;
}
/**
* mesh_path_tx_pending - sends pending frames in a mesh path queue
*
* @mpath: mesh path to activate
*
* Locking: the state_lock of the mpath structure must NOT be held when calling
* this function.
*/
void mesh_path_tx_pending(struct mesh_path *mpath)
{
if (mpath->flags & MESH_PATH_ACTIVE)
ieee80211_add_pending_skbs(mpath->sdata->local,
&mpath->frame_queue);
}
/**
* mesh_path_send_to_gates - sends pending frames to all known mesh gates
*
* @mpath: mesh path whose queue will be emptied
*
* If there is only one gate, the frames are transferred from the failed mpath
* queue to that gate's queue. If there are more than one gates, the frames
* are copied from each gate to the next. After frames are copied, the
* mpath queues are emptied onto the transmission queue.
*/
int mesh_path_send_to_gates(struct mesh_path *mpath)
{
struct ieee80211_sub_if_data *sdata = mpath->sdata;
struct hlist_node *n;
struct mesh_table *tbl;
struct mesh_path *from_mpath = mpath;
struct mpath_node *gate = NULL;
bool copy = false;
struct hlist_head *known_gates;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
known_gates = tbl->known_gates;
rcu_read_unlock();
if (!known_gates)
return -EHOSTUNREACH;
hlist_for_each_entry_rcu(gate, n, known_gates, list) {
if (gate->mpath->sdata != sdata)
continue;
if (gate->mpath->flags & MESH_PATH_ACTIVE) {
mpath_dbg("Forwarding to %pM\n", gate->mpath->dst);
mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
from_mpath = gate->mpath;
copy = true;
} else {
mpath_dbg("Not forwarding %p\n", gate->mpath);
mpath_dbg("flags %x\n", gate->mpath->flags);
}
}
hlist_for_each_entry_rcu(gate, n, known_gates, list)
if (gate->mpath->sdata == sdata) {
mpath_dbg("Sending to %pM\n", gate->mpath->dst);
mesh_path_tx_pending(gate->mpath);
}
return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
}
/**
* mesh_path_discard_frame - discard a frame whose path could not be resolved
*
* @skb: frame to discard
* @sdata: network subif the frame was to be sent through
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_discard_frame(struct sk_buff *skb,
struct ieee80211_sub_if_data *sdata)
{
kfree_skb(skb);
sdata->u.mesh.mshstats.dropped_frames_no_route++;
}
/**
* mesh_path_flush_pending - free the pending queue of a mesh path
*
* @mpath: mesh path whose queue has to be freed
*
* Locking: the function must me called within a rcu_read_lock region
*/
void mesh_path_flush_pending(struct mesh_path *mpath)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
mesh_path_discard_frame(skb, mpath->sdata);
}
/**
* mesh_path_fix_nexthop - force a specific next hop for a mesh path
*
* @mpath: the mesh path to modify
* @next_hop: the next hop to force
*
* Locking: this function must be called holding mpath->state_lock
*/
void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
{
spin_lock_bh(&mpath->state_lock);
mesh_path_assign_nexthop(mpath, next_hop);
mpath->sn = 0xffff;
mpath->metric = 0;
mpath->hop_count = 0;
mpath->exp_time = 0;
mpath->flags |= MESH_PATH_FIXED;
mesh_path_activate(mpath);
spin_unlock_bh(&mpath->state_lock);
mesh_path_tx_pending(mpath);
}
static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
{
struct mesh_path *mpath;
struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
hlist_del_rcu(p);
if (free_leafs) {
del_timer_sync(&mpath->timer);
kfree(mpath);
}
kfree(node);
}
static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
{
struct mesh_path *mpath;
struct mpath_node *node, *new_node;
u32 hash_idx;
new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
if (new_node == NULL)
return -ENOMEM;
node = hlist_entry(p, struct mpath_node, list);
mpath = node->mpath;
new_node->mpath = mpath;
hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
hlist_add_head(&new_node->list,
&newtbl->hash_buckets[hash_idx]);
return 0;
}
int mesh_pathtbl_init(void)
{
struct mesh_table *tbl_path, *tbl_mpp;
int ret;
tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_path)
return -ENOMEM;
tbl_path->free_node = &mesh_path_node_free;
tbl_path->copy_node = &mesh_path_node_copy;
tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_path->known_gates) {
ret = -ENOMEM;
goto free_path;
}
INIT_HLIST_HEAD(tbl_path->known_gates);
tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
if (!tbl_mpp) {
ret = -ENOMEM;
goto free_path;
}
tbl_mpp->free_node = &mesh_path_node_free;
tbl_mpp->copy_node = &mesh_path_node_copy;
tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
if (!tbl_mpp->known_gates) {
ret = -ENOMEM;
goto free_mpp;
}
INIT_HLIST_HEAD(tbl_mpp->known_gates);
/* Need no locking since this is during init */
RCU_INIT_POINTER(mesh_paths, tbl_path);
RCU_INIT_POINTER(mpp_paths, tbl_mpp);
return 0;
free_mpp:
mesh_table_free(tbl_mpp, true);
free_path:
mesh_table_free(tbl_path, true);
return ret;
}
void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
{
struct mesh_table *tbl;
struct mesh_path *mpath;
struct mpath_node *node;
struct hlist_node *p;
int i;
rcu_read_lock();
tbl = rcu_dereference(mesh_paths);
for_each_mesh_entry(tbl, p, node, i) {
if (node->mpath->sdata != sdata)
continue;
mpath = node->mpath;
if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
(!(mpath->flags & MESH_PATH_FIXED)) &&
time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
mesh_path_del(mpath->dst, mpath->sdata);
}
rcu_read_unlock();
}
void mesh_pathtbl_unregister(void)
{
/* no need for locking during exit path */
mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
}
| gpl-2.0 |
slayher/android_kernel_lge_zee | drivers/video/w100fb.c | 4940 | 48927 | /*
* linux/drivers/video/w100fb.c
*
* Frame Buffer Device for ATI Imageon w100 (Wallaby)
*
* Copyright (C) 2002, ATI Corp.
* Copyright (C) 2004-2006 Richard Purdie
* Copyright (c) 2005 Ian Molton
* Copyright (c) 2006 Alberto Mardegan
*
* Rewritten for 2.6 by Richard Purdie <rpurdie@rpsys.net>
*
* Generic platform support by Ian Molton <spyro@f2s.com>
* and Richard Purdie <rpurdie@rpsys.net>
*
* w32xx support by Ian Molton
*
* Hardware acceleration support by Alberto Mardegan
* <mardy@users.sourceforge.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <video/w100fb.h>
#include "w100fb.h"
/*
* Prototypes
*/
static void w100_suspend(u32 mode);
static void w100_vsync(void);
static void w100_hw_init(struct w100fb_par*);
static void w100_pwm_setup(struct w100fb_par*);
static void w100_init_clocks(struct w100fb_par*);
static void w100_setup_memory(struct w100fb_par*);
static void w100_init_lcd(struct w100fb_par*);
static void w100_set_dispregs(struct w100fb_par*);
static void w100_update_enable(void);
static void w100_update_disable(void);
static void calc_hsync(struct w100fb_par *par);
static void w100_init_graphic_engine(struct w100fb_par *par);
struct w100_pll_info *w100_get_xtal_table(unsigned int freq) __devinit;
/* Pseudo palette size */
#define MAX_PALETTES 16
#define W100_SUSPEND_EXTMEM 0
#define W100_SUSPEND_ALL 1
#define BITS_PER_PIXEL 16
/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
static void *remapped_base;
static void *remapped_regs;
static void *remapped_fbuf;
#define REMAPPED_FB_LEN 0x15ffff
/* This is the offset in the w100's address space we map the current
framebuffer memory to. We use the position of external memory as
we can remap internal memory to there if external isn't present. */
#define W100_FB_BASE MEM_EXT_BASE_VALUE
/*
* Sysfs functions
*/
static ssize_t flip_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fb_info *info = dev_get_drvdata(dev);
struct w100fb_par *par=info->par;
return sprintf(buf, "%d\n",par->flip);
}
static ssize_t flip_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
unsigned int flip;
struct fb_info *info = dev_get_drvdata(dev);
struct w100fb_par *par=info->par;
flip = simple_strtoul(buf, NULL, 10);
if (flip > 0)
par->flip = 1;
else
par->flip = 0;
w100_update_disable();
w100_set_dispregs(par);
w100_update_enable();
calc_hsync(par);
return count;
}
static DEVICE_ATTR(flip, 0644, flip_show, flip_store);
static ssize_t w100fb_reg_read(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long regs, param;
regs = simple_strtoul(buf, NULL, 16);
param = readl(remapped_regs + regs);
printk("Read Register 0x%08lX: 0x%08lX\n", regs, param);
return count;
}
static DEVICE_ATTR(reg_read, 0200, NULL, w100fb_reg_read);
static ssize_t w100fb_reg_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
unsigned long regs, param;
sscanf(buf, "%lx %lx", ®s, ¶m);
if (regs <= 0x2000) {
printk("Write Register 0x%08lX: 0x%08lX\n", regs, param);
writel(param, remapped_regs + regs);
}
return count;
}
static DEVICE_ATTR(reg_write, 0200, NULL, w100fb_reg_write);
static ssize_t fastpllclk_show(struct device *dev, struct device_attribute *attr, char *buf)
{
struct fb_info *info = dev_get_drvdata(dev);
struct w100fb_par *par=info->par;
return sprintf(buf, "%d\n",par->fastpll_mode);
}
static ssize_t fastpllclk_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *info = dev_get_drvdata(dev);
struct w100fb_par *par=info->par;
if (simple_strtoul(buf, NULL, 10) > 0) {
par->fastpll_mode=1;
printk("w100fb: Using fast system clock (if possible)\n");
} else {
par->fastpll_mode=0;
printk("w100fb: Using normal system clock\n");
}
w100_init_clocks(par);
calc_hsync(par);
return count;
}
static DEVICE_ATTR(fastpllclk, 0644, fastpllclk_show, fastpllclk_store);
/*
* Some touchscreens need hsync information from the video driver to
* function correctly. We export it here.
*/
unsigned long w100fb_get_hsynclen(struct device *dev)
{
struct fb_info *info = dev_get_drvdata(dev);
struct w100fb_par *par=info->par;
/* If display is blanked/suspended, hsync isn't active */
if (par->blanked)
return 0;
else
return par->hsync_len;
}
EXPORT_SYMBOL(w100fb_get_hsynclen);
static void w100fb_clear_screen(struct w100fb_par *par)
{
memset_io(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), 0, (par->xres * par->yres * BITS_PER_PIXEL/8));
}
/*
* Set a palette value from rgb components
*/
static int w100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
u_int trans, struct fb_info *info)
{
unsigned int val;
int ret = 1;
/*
* If greyscale is true, then we convert the RGB value
* to greyscale no matter what visual we are using.
*/
if (info->var.grayscale)
red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16;
/*
* 16-bit True Colour. We encode the RGB value
* according to the RGB bitfield information.
*/
if (regno < MAX_PALETTES) {
u32 *pal = info->pseudo_palette;
val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11);
pal[regno] = val;
ret = 0;
}
return ret;
}
/*
* Blank the display based on value in blank_mode
*/
static int w100fb_blank(int blank_mode, struct fb_info *info)
{
struct w100fb_par *par = info->par;
struct w100_tg_info *tg = par->mach->tg;
switch(blank_mode) {
case FB_BLANK_NORMAL: /* Normal blanking */
case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */
case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */
case FB_BLANK_POWERDOWN: /* Poweroff */
if (par->blanked == 0) {
if(tg && tg->suspend)
tg->suspend(par);
par->blanked = 1;
}
break;
case FB_BLANK_UNBLANK: /* Unblanking */
if (par->blanked != 0) {
if(tg && tg->resume)
tg->resume(par);
par->blanked = 0;
}
break;
}
return 0;
}
static void w100_fifo_wait(int entries)
{
union rbbm_status_u status;
int i;
for (i = 0; i < 2000000; i++) {
status.val = readl(remapped_regs + mmRBBM_STATUS);
if (status.f.cmdfifo_avail >= entries)
return;
udelay(1);
}
printk(KERN_ERR "w100fb: FIFO Timeout!\n");
}
static int w100fb_sync(struct fb_info *info)
{
union rbbm_status_u status;
int i;
for (i = 0; i < 2000000; i++) {
status.val = readl(remapped_regs + mmRBBM_STATUS);
if (!status.f.gui_active)
return 0;
udelay(1);
}
printk(KERN_ERR "w100fb: Graphic engine timeout!\n");
return -EBUSY;
}
static void w100_init_graphic_engine(struct w100fb_par *par)
{
union dp_gui_master_cntl_u gmc;
union dp_mix_u dp_mix;
union dp_datatype_u dp_datatype;
union dp_cntl_u dp_cntl;
w100_fifo_wait(4);
writel(W100_FB_BASE, remapped_regs + mmDST_OFFSET);
writel(par->xres, remapped_regs + mmDST_PITCH);
writel(W100_FB_BASE, remapped_regs + mmSRC_OFFSET);
writel(par->xres, remapped_regs + mmSRC_PITCH);
w100_fifo_wait(3);
writel(0, remapped_regs + mmSC_TOP_LEFT);
writel((par->yres << 16) | par->xres, remapped_regs + mmSC_BOTTOM_RIGHT);
writel(0x1fff1fff, remapped_regs + mmSRC_SC_BOTTOM_RIGHT);
w100_fifo_wait(4);
dp_cntl.val = 0;
dp_cntl.f.dst_x_dir = 1;
dp_cntl.f.dst_y_dir = 1;
dp_cntl.f.src_x_dir = 1;
dp_cntl.f.src_y_dir = 1;
dp_cntl.f.dst_major_x = 1;
dp_cntl.f.src_major_x = 1;
writel(dp_cntl.val, remapped_regs + mmDP_CNTL);
gmc.val = 0;
gmc.f.gmc_src_pitch_offset_cntl = 1;
gmc.f.gmc_dst_pitch_offset_cntl = 1;
gmc.f.gmc_src_clipping = 1;
gmc.f.gmc_dst_clipping = 1;
gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
gmc.f.gmc_dst_datatype = 3; /* from DstType_16Bpp_444 */
gmc.f.gmc_src_datatype = SRC_DATATYPE_EQU_DST;
gmc.f.gmc_byte_pix_order = 1;
gmc.f.gmc_default_sel = 0;
gmc.f.gmc_rop3 = ROP3_SRCCOPY;
gmc.f.gmc_dp_src_source = DP_SRC_MEM_RECTANGULAR;
gmc.f.gmc_clr_cmp_fcn_dis = 1;
gmc.f.gmc_wr_msk_dis = 1;
gmc.f.gmc_dp_op = DP_OP_ROP;
writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
dp_datatype.val = dp_mix.val = 0;
dp_datatype.f.dp_dst_datatype = gmc.f.gmc_dst_datatype;
dp_datatype.f.dp_brush_datatype = gmc.f.gmc_brush_datatype;
dp_datatype.f.dp_src2_type = 0;
dp_datatype.f.dp_src2_datatype = gmc.f.gmc_src_datatype;
dp_datatype.f.dp_src_datatype = gmc.f.gmc_src_datatype;
dp_datatype.f.dp_byte_pix_order = gmc.f.gmc_byte_pix_order;
writel(dp_datatype.val, remapped_regs + mmDP_DATATYPE);
dp_mix.f.dp_src_source = gmc.f.gmc_dp_src_source;
dp_mix.f.dp_src2_source = 1;
dp_mix.f.dp_rop3 = gmc.f.gmc_rop3;
dp_mix.f.dp_op = gmc.f.gmc_dp_op;
writel(dp_mix.val, remapped_regs + mmDP_MIX);
}
static void w100fb_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
union dp_gui_master_cntl_u gmc;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_fillrect(info, rect);
return;
}
gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
gmc.f.gmc_rop3 = ROP3_PATCOPY;
gmc.f.gmc_brush_datatype = GMC_BRUSH_SOLID_COLOR;
w100_fifo_wait(2);
writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
writel(rect->color, remapped_regs + mmDP_BRUSH_FRGD_CLR);
w100_fifo_wait(2);
writel((rect->dy << 16) | (rect->dx & 0xffff), remapped_regs + mmDST_Y_X);
writel((rect->width << 16) | (rect->height & 0xffff),
remapped_regs + mmDST_WIDTH_HEIGHT);
}
static void w100fb_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
u32 dx = area->dx, dy = area->dy, sx = area->sx, sy = area->sy;
u32 h = area->height, w = area->width;
union dp_gui_master_cntl_u gmc;
if (info->state != FBINFO_STATE_RUNNING)
return;
if (info->flags & FBINFO_HWACCEL_DISABLED) {
cfb_copyarea(info, area);
return;
}
gmc.val = readl(remapped_regs + mmDP_GUI_MASTER_CNTL);
gmc.f.gmc_rop3 = ROP3_SRCCOPY;
gmc.f.gmc_brush_datatype = GMC_BRUSH_NONE;
w100_fifo_wait(1);
writel(gmc.val, remapped_regs + mmDP_GUI_MASTER_CNTL);
w100_fifo_wait(3);
writel((sy << 16) | (sx & 0xffff), remapped_regs + mmSRC_Y_X);
writel((dy << 16) | (dx & 0xffff), remapped_regs + mmDST_Y_X);
writel((w << 16) | (h & 0xffff), remapped_regs + mmDST_WIDTH_HEIGHT);
}
/*
* Change the resolution by calling the appropriate hardware functions
*/
static void w100fb_activate_var(struct w100fb_par *par)
{
struct w100_tg_info *tg = par->mach->tg;
w100_pwm_setup(par);
w100_setup_memory(par);
w100_init_clocks(par);
w100fb_clear_screen(par);
w100_vsync();
w100_update_disable();
w100_init_lcd(par);
w100_set_dispregs(par);
w100_update_enable();
w100_init_graphic_engine(par);
calc_hsync(par);
if (!par->blanked && tg && tg->change)
tg->change(par);
}
/* Select the smallest mode that allows the desired resolution to be
* displayed. If desired, the x and y parameters can be rounded up to
* match the selected mode.
*/
static struct w100_mode *w100fb_get_mode(struct w100fb_par *par, unsigned int *x, unsigned int *y, int saveval)
{
struct w100_mode *mode = NULL;
struct w100_mode *modelist = par->mach->modelist;
unsigned int best_x = 0xffffffff, best_y = 0xffffffff;
unsigned int i;
for (i = 0 ; i < par->mach->num_modes ; i++) {
if (modelist[i].xres >= *x && modelist[i].yres >= *y &&
modelist[i].xres < best_x && modelist[i].yres < best_y) {
best_x = modelist[i].xres;
best_y = modelist[i].yres;
mode = &modelist[i];
} else if(modelist[i].xres >= *y && modelist[i].yres >= *x &&
modelist[i].xres < best_y && modelist[i].yres < best_x) {
best_x = modelist[i].yres;
best_y = modelist[i].xres;
mode = &modelist[i];
}
}
if (mode && saveval) {
*x = best_x;
*y = best_y;
}
return mode;
}
/*
* w100fb_check_var():
* Get the video params out of 'var'. If a value doesn't fit, round it up,
* if it's too big, return -EINVAL.
*/
static int w100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
{
struct w100fb_par *par=info->par;
if(!w100fb_get_mode(par, &var->xres, &var->yres, 1))
return -EINVAL;
if (par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (par->mach->mem->size+1)))
return -EINVAL;
if (!par->mach->mem && ((var->xres*var->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)))
return -EINVAL;
var->xres_virtual = max(var->xres_virtual, var->xres);
var->yres_virtual = max(var->yres_virtual, var->yres);
if (var->bits_per_pixel > BITS_PER_PIXEL)
return -EINVAL;
else
var->bits_per_pixel = BITS_PER_PIXEL;
var->red.offset = 11;
var->red.length = 5;
var->green.offset = 5;
var->green.length = 6;
var->blue.offset = 0;
var->blue.length = 5;
var->transp.offset = var->transp.length = 0;
var->nonstd = 0;
var->height = -1;
var->width = -1;
var->vmode = FB_VMODE_NONINTERLACED;
var->sync = 0;
var->pixclock = 0x04; /* 171521; */
return 0;
}
/*
* w100fb_set_par():
* Set the user defined part of the display for the specified console
* by looking at the values in info.var
*/
static int w100fb_set_par(struct fb_info *info)
{
struct w100fb_par *par=info->par;
if (par->xres != info->var.xres || par->yres != info->var.yres) {
par->xres = info->var.xres;
par->yres = info->var.yres;
par->mode = w100fb_get_mode(par, &par->xres, &par->yres, 0);
info->fix.visual = FB_VISUAL_TRUECOLOR;
info->fix.ypanstep = 0;
info->fix.ywrapstep = 0;
info->fix.line_length = par->xres * BITS_PER_PIXEL / 8;
mutex_lock(&info->mm_lock);
if ((par->xres*par->yres*BITS_PER_PIXEL/8) > (MEM_INT_SIZE+1)) {
par->extmem_active = 1;
info->fix.smem_len = par->mach->mem->size+1;
} else {
par->extmem_active = 0;
info->fix.smem_len = MEM_INT_SIZE+1;
}
mutex_unlock(&info->mm_lock);
w100fb_activate_var(par);
}
return 0;
}
/*
* Frame buffer operations
*/
static struct fb_ops w100fb_ops = {
.owner = THIS_MODULE,
.fb_check_var = w100fb_check_var,
.fb_set_par = w100fb_set_par,
.fb_setcolreg = w100fb_setcolreg,
.fb_blank = w100fb_blank,
.fb_fillrect = w100fb_fillrect,
.fb_copyarea = w100fb_copyarea,
.fb_imageblit = cfb_imageblit,
.fb_sync = w100fb_sync,
};
#ifdef CONFIG_PM
static void w100fb_save_vidmem(struct w100fb_par *par)
{
int memsize;
if (par->extmem_active) {
memsize=par->mach->mem->size;
par->saved_extmem = vmalloc(memsize);
if (par->saved_extmem)
memcpy_fromio(par->saved_extmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
}
memsize=MEM_INT_SIZE;
par->saved_intmem = vmalloc(memsize);
if (par->saved_intmem && par->extmem_active)
memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), memsize);
else if (par->saved_intmem)
memcpy_fromio(par->saved_intmem, remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), memsize);
}
static void w100fb_restore_vidmem(struct w100fb_par *par)
{
int memsize;
if (par->extmem_active && par->saved_extmem) {
memsize=par->mach->mem->size;
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_extmem, memsize);
vfree(par->saved_extmem);
}
if (par->saved_intmem) {
memsize=MEM_INT_SIZE;
if (par->extmem_active)
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_INT_BASE_VALUE), par->saved_intmem, memsize);
else
memcpy_toio(remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE), par->saved_intmem, memsize);
vfree(par->saved_intmem);
}
}
static int w100fb_suspend(struct platform_device *dev, pm_message_t state)
{
struct fb_info *info = platform_get_drvdata(dev);
struct w100fb_par *par=info->par;
struct w100_tg_info *tg = par->mach->tg;
w100fb_save_vidmem(par);
if(tg && tg->suspend)
tg->suspend(par);
w100_suspend(W100_SUSPEND_ALL);
par->blanked = 1;
return 0;
}
static int w100fb_resume(struct platform_device *dev)
{
struct fb_info *info = platform_get_drvdata(dev);
struct w100fb_par *par=info->par;
struct w100_tg_info *tg = par->mach->tg;
w100_hw_init(par);
w100fb_activate_var(par);
w100fb_restore_vidmem(par);
if(tg && tg->resume)
tg->resume(par);
par->blanked = 0;
return 0;
}
#else
#define w100fb_suspend NULL
#define w100fb_resume NULL
#endif
int __devinit w100fb_probe(struct platform_device *pdev)
{
int err = -EIO;
struct w100fb_mach_info *inf;
struct fb_info *info = NULL;
struct w100fb_par *par;
struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
unsigned int chip_id;
if (!mem)
return -EINVAL;
/* Remap the chip base address */
remapped_base = ioremap_nocache(mem->start+W100_CFG_BASE, W100_CFG_LEN);
if (remapped_base == NULL)
goto out;
/* Map the register space */
remapped_regs = ioremap_nocache(mem->start+W100_REG_BASE, W100_REG_LEN);
if (remapped_regs == NULL)
goto out;
/* Identify the chip */
printk("Found ");
chip_id = readl(remapped_regs + mmCHIP_ID);
switch(chip_id) {
case CHIP_ID_W100: printk("w100"); break;
case CHIP_ID_W3200: printk("w3200"); break;
case CHIP_ID_W3220: printk("w3220"); break;
default:
printk("Unknown imageon chip ID\n");
err = -ENODEV;
goto out;
}
printk(" at 0x%08lx.\n", (unsigned long) mem->start+W100_CFG_BASE);
/* Remap the framebuffer */
remapped_fbuf = ioremap_nocache(mem->start+MEM_WINDOW_BASE, MEM_WINDOW_SIZE);
if (remapped_fbuf == NULL)
goto out;
info=framebuffer_alloc(sizeof(struct w100fb_par), &pdev->dev);
if (!info) {
err = -ENOMEM;
goto out;
}
par = info->par;
platform_set_drvdata(pdev, info);
inf = pdev->dev.platform_data;
par->chip_id = chip_id;
par->mach = inf;
par->fastpll_mode = 0;
par->blanked = 0;
par->pll_table=w100_get_xtal_table(inf->xtal_freq);
if (!par->pll_table) {
printk(KERN_ERR "No matching Xtal definition found\n");
err = -EINVAL;
goto out;
}
info->pseudo_palette = kmalloc(sizeof (u32) * MAX_PALETTES, GFP_KERNEL);
if (!info->pseudo_palette) {
err = -ENOMEM;
goto out;
}
info->fbops = &w100fb_ops;
info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA |
FBINFO_HWACCEL_FILLRECT;
info->node = -1;
info->screen_base = remapped_fbuf + (W100_FB_BASE-MEM_WINDOW_BASE);
info->screen_size = REMAPPED_FB_LEN;
strcpy(info->fix.id, "w100fb");
info->fix.type = FB_TYPE_PACKED_PIXELS;
info->fix.type_aux = 0;
info->fix.accel = FB_ACCEL_NONE;
info->fix.smem_start = mem->start+W100_FB_BASE;
info->fix.mmio_start = mem->start+W100_REG_BASE;
info->fix.mmio_len = W100_REG_LEN;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENOMEM;
goto out;
}
par->mode = &inf->modelist[0];
if(inf->init_mode & INIT_MODE_ROTATED) {
info->var.xres = par->mode->yres;
info->var.yres = par->mode->xres;
}
else {
info->var.xres = par->mode->xres;
info->var.yres = par->mode->yres;
}
if(inf->init_mode &= INIT_MODE_FLIPPED)
par->flip = 1;
else
par->flip = 0;
info->var.xres_virtual = info->var.xres;
info->var.yres_virtual = info->var.yres;
info->var.pixclock = 0x04; /* 171521; */
info->var.sync = 0;
info->var.grayscale = 0;
info->var.xoffset = info->var.yoffset = 0;
info->var.accel_flags = 0;
info->var.activate = FB_ACTIVATE_NOW;
w100_hw_init(par);
if (w100fb_check_var(&info->var, info) < 0) {
err = -EINVAL;
goto out;
}
if (register_framebuffer(info) < 0) {
err = -EINVAL;
goto out;
}
err = device_create_file(&pdev->dev, &dev_attr_fastpllclk);
err |= device_create_file(&pdev->dev, &dev_attr_reg_read);
err |= device_create_file(&pdev->dev, &dev_attr_reg_write);
err |= device_create_file(&pdev->dev, &dev_attr_flip);
if (err != 0)
printk(KERN_WARNING "fb%d: failed to register attributes (%d)\n",
info->node, err);
printk(KERN_INFO "fb%d: %s frame buffer device\n", info->node, info->fix.id);
return 0;
out:
if (info) {
fb_dealloc_cmap(&info->cmap);
kfree(info->pseudo_palette);
}
if (remapped_fbuf != NULL)
iounmap(remapped_fbuf);
if (remapped_regs != NULL)
iounmap(remapped_regs);
if (remapped_base != NULL)
iounmap(remapped_base);
if (info)
framebuffer_release(info);
return err;
}
static int __devexit w100fb_remove(struct platform_device *pdev)
{
struct fb_info *info = platform_get_drvdata(pdev);
struct w100fb_par *par=info->par;
device_remove_file(&pdev->dev, &dev_attr_fastpllclk);
device_remove_file(&pdev->dev, &dev_attr_reg_read);
device_remove_file(&pdev->dev, &dev_attr_reg_write);
device_remove_file(&pdev->dev, &dev_attr_flip);
unregister_framebuffer(info);
vfree(par->saved_intmem);
vfree(par->saved_extmem);
kfree(info->pseudo_palette);
fb_dealloc_cmap(&info->cmap);
iounmap(remapped_base);
iounmap(remapped_regs);
iounmap(remapped_fbuf);
framebuffer_release(info);
return 0;
}
/* ------------------- chipset specific functions -------------------------- */
static void w100_soft_reset(void)
{
u16 val = readw((u16 *) remapped_base + cfgSTATUS);
writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS);
udelay(100);
writew(0x00, (u16 *) remapped_base + cfgSTATUS);
udelay(100);
}
static void w100_update_disable(void)
{
union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
/* Prevent display updates */
disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
disp_db_buf_wr_cntl.f.update_db_buf = 0;
disp_db_buf_wr_cntl.f.en_db_buf = 0;
writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
}
static void w100_update_enable(void)
{
union disp_db_buf_cntl_wr_u disp_db_buf_wr_cntl;
/* Enable display updates */
disp_db_buf_wr_cntl.f.db_buf_cntl = 0x1e;
disp_db_buf_wr_cntl.f.update_db_buf = 1;
disp_db_buf_wr_cntl.f.en_db_buf = 1;
writel((u32) (disp_db_buf_wr_cntl.val), remapped_regs + mmDISP_DB_BUF_CNTL);
}
unsigned long w100fb_gpio_read(int port)
{
unsigned long value;
if (port==W100_GPIO_PORT_A)
value = readl(remapped_regs + mmGPIO_DATA);
else
value = readl(remapped_regs + mmGPIO_DATA2);
return value;
}
void w100fb_gpio_write(int port, unsigned long value)
{
if (port==W100_GPIO_PORT_A)
writel(value, remapped_regs + mmGPIO_DATA);
else
writel(value, remapped_regs + mmGPIO_DATA2);
}
EXPORT_SYMBOL(w100fb_gpio_read);
EXPORT_SYMBOL(w100fb_gpio_write);
/*
* Initialization of critical w100 hardware
*/
static void w100_hw_init(struct w100fb_par *par)
{
u32 temp32;
union cif_cntl_u cif_cntl;
union intf_cntl_u intf_cntl;
union cfgreg_base_u cfgreg_base;
union wrap_top_dir_u wrap_top_dir;
union cif_read_dbg_u cif_read_dbg;
union cpu_defaults_u cpu_default;
union cif_write_dbg_u cif_write_dbg;
union wrap_start_dir_u wrap_start_dir;
union cif_io_u cif_io;
struct w100_gpio_regs *gpio = par->mach->gpio;
w100_soft_reset();
/* This is what the fpga_init code does on reset. May be wrong
but there is little info available */
writel(0x31, remapped_regs + mmSCRATCH_UMSK);
for (temp32 = 0; temp32 < 10000; temp32++)
readl(remapped_regs + mmSCRATCH_UMSK);
writel(0x30, remapped_regs + mmSCRATCH_UMSK);
/* Set up CIF */
cif_io.val = defCIF_IO;
writel((u32)(cif_io.val), remapped_regs + mmCIF_IO);
cif_write_dbg.val = readl(remapped_regs + mmCIF_WRITE_DBG);
cif_write_dbg.f.dis_packer_ful_during_rbbm_timeout = 0;
cif_write_dbg.f.en_dword_split_to_rbbm = 1;
cif_write_dbg.f.dis_timeout_during_rbbm = 1;
writel((u32) (cif_write_dbg.val), remapped_regs + mmCIF_WRITE_DBG);
cif_read_dbg.val = readl(remapped_regs + mmCIF_READ_DBG);
cif_read_dbg.f.dis_rd_same_byte_to_trig_fetch = 1;
writel((u32) (cif_read_dbg.val), remapped_regs + mmCIF_READ_DBG);
cif_cntl.val = readl(remapped_regs + mmCIF_CNTL);
cif_cntl.f.dis_system_bits = 1;
cif_cntl.f.dis_mr = 1;
cif_cntl.f.en_wait_to_compensate_dq_prop_dly = 0;
cif_cntl.f.intb_oe = 1;
cif_cntl.f.interrupt_active_high = 1;
writel((u32) (cif_cntl.val), remapped_regs + mmCIF_CNTL);
/* Setup cfgINTF_CNTL and cfgCPU defaults */
intf_cntl.val = defINTF_CNTL;
intf_cntl.f.ad_inc_a = 1;
intf_cntl.f.ad_inc_b = 1;
intf_cntl.f.rd_data_rdy_a = 0;
intf_cntl.f.rd_data_rdy_b = 0;
writeb((u8) (intf_cntl.val), remapped_base + cfgINTF_CNTL);
cpu_default.val = defCPU_DEFAULTS;
cpu_default.f.access_ind_addr_a = 1;
cpu_default.f.access_ind_addr_b = 1;
cpu_default.f.access_scratch_reg = 1;
cpu_default.f.transition_size = 0;
writeb((u8) (cpu_default.val), remapped_base + cfgCPU_DEFAULTS);
/* set up the apertures */
writeb((u8) (W100_REG_BASE >> 16), remapped_base + cfgREG_BASE);
cfgreg_base.val = defCFGREG_BASE;
cfgreg_base.f.cfgreg_base = W100_CFG_BASE;
writel((u32) (cfgreg_base.val), remapped_regs + mmCFGREG_BASE);
wrap_start_dir.val = defWRAP_START_DIR;
wrap_start_dir.f.start_addr = WRAP_BUF_BASE_VALUE >> 1;
writel((u32) (wrap_start_dir.val), remapped_regs + mmWRAP_START_DIR);
wrap_top_dir.val = defWRAP_TOP_DIR;
wrap_top_dir.f.top_addr = WRAP_BUF_TOP_VALUE >> 1;
writel((u32) (wrap_top_dir.val), remapped_regs + mmWRAP_TOP_DIR);
writel((u32) 0x2440, remapped_regs + mmRBBM_CNTL);
/* Set the hardware to 565 colour */
temp32 = readl(remapped_regs + mmDISP_DEBUG2);
temp32 &= 0xff7fffff;
temp32 |= 0x00800000;
writel(temp32, remapped_regs + mmDISP_DEBUG2);
/* Initialise the GPIO lines */
if (gpio) {
writel(gpio->init_data1, remapped_regs + mmGPIO_DATA);
writel(gpio->init_data2, remapped_regs + mmGPIO_DATA2);
writel(gpio->gpio_dir1, remapped_regs + mmGPIO_CNTL1);
writel(gpio->gpio_oe1, remapped_regs + mmGPIO_CNTL2);
writel(gpio->gpio_dir2, remapped_regs + mmGPIO_CNTL3);
writel(gpio->gpio_oe2, remapped_regs + mmGPIO_CNTL4);
}
}
struct power_state {
union clk_pin_cntl_u clk_pin_cntl;
union pll_ref_fb_div_u pll_ref_fb_div;
union pll_cntl_u pll_cntl;
union sclk_cntl_u sclk_cntl;
union pclk_cntl_u pclk_cntl;
union pwrmgt_cntl_u pwrmgt_cntl;
int auto_mode; /* system clock auto changing? */
};
static struct power_state w100_pwr_state;
/* The PLL Fout is determined by (XtalFreq/(M+1)) * ((N_int+1) + (N_fac/8)) */
/* 12.5MHz Crystal PLL Table */
static struct w100_pll_info xtal_12500000[] = {
/*freq M N_int N_fac tfgoal lock_time */
{ 50, 0, 1, 0, 0xe0, 56}, /* 50.00 MHz */
{ 75, 0, 5, 0, 0xde, 37}, /* 75.00 MHz */
{100, 0, 7, 0, 0xe0, 28}, /* 100.00 MHz */
{125, 0, 9, 0, 0xe0, 22}, /* 125.00 MHz */
{150, 0, 11, 0, 0xe0, 17}, /* 150.00 MHz */
{ 0, 0, 0, 0, 0, 0}, /* Terminator */
};
/* 14.318MHz Crystal PLL Table */
static struct w100_pll_info xtal_14318000[] = {
/*freq M N_int N_fac tfgoal lock_time */
{ 40, 4, 13, 0, 0xe0, 80}, /* tfgoal guessed */
{ 50, 1, 6, 0, 0xe0, 64}, /* 50.05 MHz */
{ 57, 2, 11, 0, 0xe0, 53}, /* tfgoal guessed */
{ 75, 0, 4, 3, 0xe0, 43}, /* 75.08 MHz */
{100, 0, 6, 0, 0xe0, 32}, /* 100.10 MHz */
{ 0, 0, 0, 0, 0, 0},
};
/* 16MHz Crystal PLL Table */
static struct w100_pll_info xtal_16000000[] = {
/*freq M N_int N_fac tfgoal lock_time */
{ 72, 1, 8, 0, 0xe0, 48}, /* tfgoal guessed */
{ 80, 1, 9, 0, 0xe0, 13}, /* tfgoal guessed */
{ 95, 1, 10, 7, 0xe0, 38}, /* tfgoal guessed */
{ 96, 1, 11, 0, 0xe0, 36}, /* tfgoal guessed */
{ 0, 0, 0, 0, 0, 0},
};
static struct pll_entries {
int xtal_freq;
struct w100_pll_info *pll_table;
} w100_pll_tables[] = {
{ 12500000, &xtal_12500000[0] },
{ 14318000, &xtal_14318000[0] },
{ 16000000, &xtal_16000000[0] },
{ 0 },
};
struct w100_pll_info __devinit *w100_get_xtal_table(unsigned int freq)
{
struct pll_entries *pll_entry = w100_pll_tables;
do {
if (freq == pll_entry->xtal_freq)
return pll_entry->pll_table;
pll_entry++;
} while (pll_entry->xtal_freq);
return 0;
}
static unsigned int w100_get_testcount(unsigned int testclk_sel)
{
union clk_test_cntl_u clk_test_cntl;
udelay(5);
/* Select the test clock source and reset */
clk_test_cntl.f.start_check_freq = 0x0;
clk_test_cntl.f.testclk_sel = testclk_sel;
clk_test_cntl.f.tstcount_rst = 0x1; /* set reset */
writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
clk_test_cntl.f.tstcount_rst = 0x0; /* clear reset */
writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
/* Run clock test */
clk_test_cntl.f.start_check_freq = 0x1;
writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
/* Give the test time to complete */
udelay(20);
/* Return the result */
clk_test_cntl.val = readl(remapped_regs + mmCLK_TEST_CNTL);
clk_test_cntl.f.start_check_freq = 0x0;
writel((u32) (clk_test_cntl.val), remapped_regs + mmCLK_TEST_CNTL);
return clk_test_cntl.f.test_count;
}
static int w100_pll_adjust(struct w100_pll_info *pll)
{
unsigned int tf80;
unsigned int tf20;
/* Initial Settings */
w100_pwr_state.pll_cntl.f.pll_pwdn = 0x0; /* power down */
w100_pwr_state.pll_cntl.f.pll_reset = 0x0; /* not reset */
w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x1; /* Hi-Z */
w100_pwr_state.pll_cntl.f.pll_pvg = 0x0; /* VCO gain = 0 */
w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0; /* VCO frequency range control = off */
w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0; /* current offset inside VCO = 0 */
w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
/* Wai Ming 80 percent of VDD 1.3V gives 1.04V, minimum operating voltage is 1.08V
* therefore, commented out the following lines
* tf80 meant tf100
*/
do {
/* set VCO input = 0.8 * VDD */
w100_pwr_state.pll_cntl.f.pll_dactal = 0xd;
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
tf80 = w100_get_testcount(TESTCLK_SRC_PLL);
if (tf80 >= (pll->tfgoal)) {
/* set VCO input = 0.2 * VDD */
w100_pwr_state.pll_cntl.f.pll_dactal = 0x7;
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
tf20 = w100_get_testcount(TESTCLK_SRC_PLL);
if (tf20 <= (pll->tfgoal))
return 1; /* Success */
if ((w100_pwr_state.pll_cntl.f.pll_vcofr == 0x0) &&
((w100_pwr_state.pll_cntl.f.pll_pvg == 0x7) ||
(w100_pwr_state.pll_cntl.f.pll_ioffset == 0x0))) {
/* slow VCO config */
w100_pwr_state.pll_cntl.f.pll_vcofr = 0x1;
w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
continue;
}
}
if ((w100_pwr_state.pll_cntl.f.pll_ioffset) < 0x3) {
w100_pwr_state.pll_cntl.f.pll_ioffset += 0x1;
} else if ((w100_pwr_state.pll_cntl.f.pll_pvg) < 0x7) {
w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
w100_pwr_state.pll_cntl.f.pll_pvg += 0x1;
} else {
return 0; /* Error */
}
} while(1);
}
/*
* w100_pll_calibration
*/
static int w100_pll_calibration(struct w100_pll_info *pll)
{
int status;
status = w100_pll_adjust(pll);
/* PLL Reset And Lock */
/* set VCO input = 0.5 * VDD */
w100_pwr_state.pll_cntl.f.pll_dactal = 0xa;
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
udelay(1); /* reset time */
/* enable charge pump */
w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0; /* normal */
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
/* set VCO input = Hi-Z, disable DAC */
w100_pwr_state.pll_cntl.f.pll_dactal = 0x0;
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
udelay(400); /* lock time */
/* PLL locked */
return status;
}
static int w100_pll_set_clk(struct w100_pll_info *pll)
{
int status;
if (w100_pwr_state.auto_mode == 1) /* auto mode */
{
w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0; /* disable fast to normal */
w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0; /* disable normal to fast */
writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
}
/* Set system clock source to XTAL whilst adjusting the PLL! */
w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = pll->M;
w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = pll->N_int;
w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = pll->N_fac;
w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = pll->lock_time;
writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0;
writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
status = w100_pll_calibration(pll);
if (w100_pwr_state.auto_mode == 1) /* auto mode */
{
w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x1; /* reenable fast to normal */
w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x1; /* reenable normal to fast */
writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
}
return status;
}
/* freq = target frequency of the PLL */
static int w100_set_pll_freq(struct w100fb_par *par, unsigned int freq)
{
struct w100_pll_info *pll = par->pll_table;
do {
if (freq == pll->freq) {
return w100_pll_set_clk(pll);
}
pll++;
} while(pll->freq);
return 0;
}
/* Set up an initial state. Some values/fields set
here will be overwritten. */
static void w100_pwm_setup(struct w100fb_par *par)
{
w100_pwr_state.clk_pin_cntl.f.osc_en = 0x1;
w100_pwr_state.clk_pin_cntl.f.osc_gain = 0x1f;
w100_pwr_state.clk_pin_cntl.f.dont_use_xtalin = 0x0;
w100_pwr_state.clk_pin_cntl.f.xtalin_pm_en = 0x0;
w100_pwr_state.clk_pin_cntl.f.xtalin_dbl_en = par->mach->xtal_dbl ? 1 : 0;
w100_pwr_state.clk_pin_cntl.f.cg_debug = 0x0;
writel((u32) (w100_pwr_state.clk_pin_cntl.val), remapped_regs + mmCLK_PIN_CNTL);
w100_pwr_state.sclk_cntl.f.sclk_src_sel = CLK_SRC_XTAL;
w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = 0x0; /* Pfast = 1 */
w100_pwr_state.sclk_cntl.f.sclk_clkon_hys = 0x3;
w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = 0x0; /* Pslow = 1 */
w100_pwr_state.sclk_cntl.f.disp_cg_ok2switch_en = 0x0;
w100_pwr_state.sclk_cntl.f.sclk_force_reg = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_disp = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_mc = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_extmc = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_cp = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_e2 = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_e3 = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_idct = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.sclk_force_bist = 0x0; /* Dynamic */
w100_pwr_state.sclk_cntl.f.busy_extend_cp = 0x0;
w100_pwr_state.sclk_cntl.f.busy_extend_e2 = 0x0;
w100_pwr_state.sclk_cntl.f.busy_extend_e3 = 0x0;
w100_pwr_state.sclk_cntl.f.busy_extend_idct = 0x0;
writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
w100_pwr_state.pclk_cntl.f.pclk_src_sel = CLK_SRC_XTAL;
w100_pwr_state.pclk_cntl.f.pclk_post_div = 0x1; /* P = 2 */
w100_pwr_state.pclk_cntl.f.pclk_force_disp = 0x0; /* Dynamic */
writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
w100_pwr_state.pll_ref_fb_div.f.pll_ref_div = 0x0; /* M = 1 */
w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_int = 0x0; /* N = 1.0 */
w100_pwr_state.pll_ref_fb_div.f.pll_fb_div_frac = 0x0;
w100_pwr_state.pll_ref_fb_div.f.pll_reset_time = 0x5;
w100_pwr_state.pll_ref_fb_div.f.pll_lock_time = 0xff;
writel((u32) (w100_pwr_state.pll_ref_fb_div.val), remapped_regs + mmPLL_REF_FB_DIV);
w100_pwr_state.pll_cntl.f.pll_pwdn = 0x1;
w100_pwr_state.pll_cntl.f.pll_reset = 0x1;
w100_pwr_state.pll_cntl.f.pll_pm_en = 0x0;
w100_pwr_state.pll_cntl.f.pll_mode = 0x0; /* uses VCO clock */
w100_pwr_state.pll_cntl.f.pll_refclk_sel = 0x0;
w100_pwr_state.pll_cntl.f.pll_fbclk_sel = 0x0;
w100_pwr_state.pll_cntl.f.pll_tcpoff = 0x0;
w100_pwr_state.pll_cntl.f.pll_pcp = 0x4;
w100_pwr_state.pll_cntl.f.pll_pvg = 0x0;
w100_pwr_state.pll_cntl.f.pll_vcofr = 0x0;
w100_pwr_state.pll_cntl.f.pll_ioffset = 0x0;
w100_pwr_state.pll_cntl.f.pll_pecc_mode = 0x0;
w100_pwr_state.pll_cntl.f.pll_pecc_scon = 0x0;
w100_pwr_state.pll_cntl.f.pll_dactal = 0x0; /* Hi-Z */
w100_pwr_state.pll_cntl.f.pll_cp_clip = 0x3;
w100_pwr_state.pll_cntl.f.pll_conf = 0x2;
w100_pwr_state.pll_cntl.f.pll_mbctrl = 0x2;
w100_pwr_state.pll_cntl.f.pll_ring_off = 0x0;
writel((u32) (w100_pwr_state.pll_cntl.val), remapped_regs + mmPLL_CNTL);
w100_pwr_state.pwrmgt_cntl.f.pwm_enable = 0x0;
w100_pwr_state.pwrmgt_cntl.f.pwm_mode_req = 0x1; /* normal mode (0, 1, 3) */
w100_pwr_state.pwrmgt_cntl.f.pwm_wakeup_cond = 0x0;
w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_hw_en = 0x0;
w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_hw_en = 0x0;
w100_pwr_state.pwrmgt_cntl.f.pwm_fast_noml_cond = 0x1; /* PM4,ENG */
w100_pwr_state.pwrmgt_cntl.f.pwm_noml_fast_cond = 0x1; /* PM4,ENG */
w100_pwr_state.pwrmgt_cntl.f.pwm_idle_timer = 0xFF;
w100_pwr_state.pwrmgt_cntl.f.pwm_busy_timer = 0xFF;
writel((u32) (w100_pwr_state.pwrmgt_cntl.val), remapped_regs + mmPWRMGT_CNTL);
w100_pwr_state.auto_mode = 0; /* manual mode */
}
/*
* Setup the w100 clocks for the specified mode
*/
static void w100_init_clocks(struct w100fb_par *par)
{
struct w100_mode *mode = par->mode;
if (mode->pixclk_src == CLK_SRC_PLL || mode->sysclk_src == CLK_SRC_PLL)
w100_set_pll_freq(par, (par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq);
w100_pwr_state.sclk_cntl.f.sclk_src_sel = mode->sysclk_src;
w100_pwr_state.sclk_cntl.f.sclk_post_div_fast = mode->sysclk_divider;
w100_pwr_state.sclk_cntl.f.sclk_post_div_slow = mode->sysclk_divider;
writel((u32) (w100_pwr_state.sclk_cntl.val), remapped_regs + mmSCLK_CNTL);
}
static void w100_init_lcd(struct w100fb_par *par)
{
u32 temp32;
struct w100_mode *mode = par->mode;
struct w100_gen_regs *regs = par->mach->regs;
union active_h_disp_u active_h_disp;
union active_v_disp_u active_v_disp;
union graphic_h_disp_u graphic_h_disp;
union graphic_v_disp_u graphic_v_disp;
union crtc_total_u crtc_total;
/* w3200 doesn't like undefined bits being set so zero register values first */
active_h_disp.val = 0;
active_h_disp.f.active_h_start=mode->left_margin;
active_h_disp.f.active_h_end=mode->left_margin + mode->xres;
writel(active_h_disp.val, remapped_regs + mmACTIVE_H_DISP);
active_v_disp.val = 0;
active_v_disp.f.active_v_start=mode->upper_margin;
active_v_disp.f.active_v_end=mode->upper_margin + mode->yres;
writel(active_v_disp.val, remapped_regs + mmACTIVE_V_DISP);
graphic_h_disp.val = 0;
graphic_h_disp.f.graphic_h_start=mode->left_margin;
graphic_h_disp.f.graphic_h_end=mode->left_margin + mode->xres;
writel(graphic_h_disp.val, remapped_regs + mmGRAPHIC_H_DISP);
graphic_v_disp.val = 0;
graphic_v_disp.f.graphic_v_start=mode->upper_margin;
graphic_v_disp.f.graphic_v_end=mode->upper_margin + mode->yres;
writel(graphic_v_disp.val, remapped_regs + mmGRAPHIC_V_DISP);
crtc_total.val = 0;
crtc_total.f.crtc_h_total=mode->left_margin + mode->xres + mode->right_margin;
crtc_total.f.crtc_v_total=mode->upper_margin + mode->yres + mode->lower_margin;
writel(crtc_total.val, remapped_regs + mmCRTC_TOTAL);
writel(mode->crtc_ss, remapped_regs + mmCRTC_SS);
writel(mode->crtc_ls, remapped_regs + mmCRTC_LS);
writel(mode->crtc_gs, remapped_regs + mmCRTC_GS);
writel(mode->crtc_vpos_gs, remapped_regs + mmCRTC_VPOS_GS);
writel(mode->crtc_rev, remapped_regs + mmCRTC_REV);
writel(mode->crtc_dclk, remapped_regs + mmCRTC_DCLK);
writel(mode->crtc_gclk, remapped_regs + mmCRTC_GCLK);
writel(mode->crtc_goe, remapped_regs + mmCRTC_GOE);
writel(mode->crtc_ps1_active, remapped_regs + mmCRTC_PS1_ACTIVE);
writel(regs->lcd_format, remapped_regs + mmLCD_FORMAT);
writel(regs->lcdd_cntl1, remapped_regs + mmLCDD_CNTL1);
writel(regs->lcdd_cntl2, remapped_regs + mmLCDD_CNTL2);
writel(regs->genlcd_cntl1, remapped_regs + mmGENLCD_CNTL1);
writel(regs->genlcd_cntl2, remapped_regs + mmGENLCD_CNTL2);
writel(regs->genlcd_cntl3, remapped_regs + mmGENLCD_CNTL3);
writel(0x00000000, remapped_regs + mmCRTC_FRAME);
writel(0x00000000, remapped_regs + mmCRTC_FRAME_VPOS);
writel(0x00000000, remapped_regs + mmCRTC_DEFAULT_COUNT);
writel(0x0000FF00, remapped_regs + mmLCD_BACKGROUND_COLOR);
/* Hack for overlay in ext memory */
temp32 = readl(remapped_regs + mmDISP_DEBUG2);
temp32 |= 0xc0000000;
writel(temp32, remapped_regs + mmDISP_DEBUG2);
}
static void w100_setup_memory(struct w100fb_par *par)
{
union mc_ext_mem_location_u extmem_location;
union mc_fb_location_u intmem_location;
struct w100_mem_info *mem = par->mach->mem;
struct w100_bm_mem_info *bm_mem = par->mach->bm_mem;
if (!par->extmem_active) {
w100_suspend(W100_SUSPEND_EXTMEM);
/* Map Internal Memory at FB Base */
intmem_location.f.mc_fb_start = W100_FB_BASE >> 8;
intmem_location.f.mc_fb_top = (W100_FB_BASE+MEM_INT_SIZE) >> 8;
writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
/* Unmap External Memory - value is *probably* irrelevant but may have meaning
to acceleration libraries */
extmem_location.f.mc_ext_mem_start = MEM_EXT_BASE_VALUE >> 8;
extmem_location.f.mc_ext_mem_top = (MEM_EXT_BASE_VALUE-1) >> 8;
writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
} else {
/* Map Internal Memory to its default location */
intmem_location.f.mc_fb_start = MEM_INT_BASE_VALUE >> 8;
intmem_location.f.mc_fb_top = (MEM_INT_BASE_VALUE+MEM_INT_SIZE) >> 8;
writel((u32) (intmem_location.val), remapped_regs + mmMC_FB_LOCATION);
/* Map External Memory at FB Base */
extmem_location.f.mc_ext_mem_start = W100_FB_BASE >> 8;
extmem_location.f.mc_ext_mem_top = (W100_FB_BASE+par->mach->mem->size) >> 8;
writel((u32) (extmem_location.val), remapped_regs + mmMC_EXT_MEM_LOCATION);
writel(0x00007800, remapped_regs + mmMC_BIST_CTRL);
writel(mem->ext_cntl, remapped_regs + mmMEM_EXT_CNTL);
writel(0x00200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
udelay(100);
writel(0x80200021, remapped_regs + mmMEM_SDRAM_MODE_REG);
udelay(100);
writel(mem->sdram_mode_reg, remapped_regs + mmMEM_SDRAM_MODE_REG);
udelay(100);
writel(mem->ext_timing_cntl, remapped_regs + mmMEM_EXT_TIMING_CNTL);
writel(mem->io_cntl, remapped_regs + mmMEM_IO_CNTL);
if (bm_mem) {
writel(bm_mem->ext_mem_bw, remapped_regs + mmBM_EXT_MEM_BANDWIDTH);
writel(bm_mem->offset, remapped_regs + mmBM_OFFSET);
writel(bm_mem->ext_timing_ctl, remapped_regs + mmBM_MEM_EXT_TIMING_CNTL);
writel(bm_mem->ext_cntl, remapped_regs + mmBM_MEM_EXT_CNTL);
writel(bm_mem->mode_reg, remapped_regs + mmBM_MEM_MODE_REG);
writel(bm_mem->io_cntl, remapped_regs + mmBM_MEM_IO_CNTL);
writel(bm_mem->config, remapped_regs + mmBM_CONFIG);
}
}
}
static void w100_set_dispregs(struct w100fb_par *par)
{
unsigned long rot=0, divider, offset=0;
union graphic_ctrl_u graphic_ctrl;
/* See if the mode has been rotated */
if (par->xres == par->mode->xres) {
if (par->flip) {
rot=3; /* 180 degree */
offset=(par->xres * par->yres) - 1;
} /* else 0 degree */
divider = par->mode->pixclk_divider;
} else {
if (par->flip) {
rot=2; /* 270 degree */
offset=par->xres - 1;
} else {
rot=1; /* 90 degree */
offset=par->xres * (par->yres - 1);
}
divider = par->mode->pixclk_divider_rotated;
}
graphic_ctrl.val = 0; /* w32xx doesn't like undefined bits */
switch (par->chip_id) {
case CHIP_ID_W100:
graphic_ctrl.f_w100.color_depth=6;
graphic_ctrl.f_w100.en_crtc=1;
graphic_ctrl.f_w100.en_graphic_req=1;
graphic_ctrl.f_w100.en_graphic_crtc=1;
graphic_ctrl.f_w100.lcd_pclk_on=1;
graphic_ctrl.f_w100.lcd_sclk_on=1;
graphic_ctrl.f_w100.low_power_on=0;
graphic_ctrl.f_w100.req_freq=0;
graphic_ctrl.f_w100.portrait_mode=rot;
/* Zaurus needs this */
switch(par->xres) {
case 240:
case 320:
default:
graphic_ctrl.f_w100.total_req_graphic=0xa0;
break;
case 480:
case 640:
switch(rot) {
case 0: /* 0 */
case 3: /* 180 */
graphic_ctrl.f_w100.low_power_on=1;
graphic_ctrl.f_w100.req_freq=5;
break;
case 1: /* 90 */
case 2: /* 270 */
graphic_ctrl.f_w100.req_freq=4;
break;
default:
break;
}
graphic_ctrl.f_w100.total_req_graphic=0xf0;
break;
}
break;
case CHIP_ID_W3200:
case CHIP_ID_W3220:
graphic_ctrl.f_w32xx.color_depth=6;
graphic_ctrl.f_w32xx.en_crtc=1;
graphic_ctrl.f_w32xx.en_graphic_req=1;
graphic_ctrl.f_w32xx.en_graphic_crtc=1;
graphic_ctrl.f_w32xx.lcd_pclk_on=1;
graphic_ctrl.f_w32xx.lcd_sclk_on=1;
graphic_ctrl.f_w32xx.low_power_on=0;
graphic_ctrl.f_w32xx.req_freq=0;
graphic_ctrl.f_w32xx.total_req_graphic=par->mode->xres >> 1; /* panel xres, not mode */
graphic_ctrl.f_w32xx.portrait_mode=rot;
break;
}
/* Set the pixel clock source and divider */
w100_pwr_state.pclk_cntl.f.pclk_src_sel = par->mode->pixclk_src;
w100_pwr_state.pclk_cntl.f.pclk_post_div = divider;
writel((u32) (w100_pwr_state.pclk_cntl.val), remapped_regs + mmPCLK_CNTL);
writel(graphic_ctrl.val, remapped_regs + mmGRAPHIC_CTRL);
writel(W100_FB_BASE + ((offset * BITS_PER_PIXEL/8)&~0x03UL), remapped_regs + mmGRAPHIC_OFFSET);
writel((par->xres*BITS_PER_PIXEL/8), remapped_regs + mmGRAPHIC_PITCH);
}
/*
* Work out how long the sync pulse lasts
* Value is 1/(time in seconds)
*/
static void calc_hsync(struct w100fb_par *par)
{
unsigned long hsync;
struct w100_mode *mode = par->mode;
union crtc_ss_u crtc_ss;
if (mode->pixclk_src == CLK_SRC_XTAL)
hsync=par->mach->xtal_freq;
else
hsync=((par->fastpll_mode && mode->fast_pll_freq) ? mode->fast_pll_freq : mode->pll_freq)*100000;
hsync /= (w100_pwr_state.pclk_cntl.f.pclk_post_div + 1);
crtc_ss.val = readl(remapped_regs + mmCRTC_SS);
if (crtc_ss.val)
par->hsync_len = hsync / (crtc_ss.f.ss_end-crtc_ss.f.ss_start);
else
par->hsync_len = 0;
}
static void w100_suspend(u32 mode)
{
u32 val;
writel(0x7FFF8000, remapped_regs + mmMC_EXT_MEM_LOCATION);
writel(0x00FF0000, remapped_regs + mmMC_PERF_MON_CNTL);
val = readl(remapped_regs + mmMEM_EXT_TIMING_CNTL);
val &= ~(0x00100000); /* bit20=0 */
val |= 0xFF000000; /* bit31:24=0xff */
writel(val, remapped_regs + mmMEM_EXT_TIMING_CNTL);
val = readl(remapped_regs + mmMEM_EXT_CNTL);
val &= ~(0x00040000); /* bit18=0 */
val |= 0x00080000; /* bit19=1 */
writel(val, remapped_regs + mmMEM_EXT_CNTL);
udelay(1); /* wait 1us */
if (mode == W100_SUSPEND_EXTMEM) {
/* CKE: Tri-State */
val = readl(remapped_regs + mmMEM_EXT_CNTL);
val |= 0x40000000; /* bit30=1 */
writel(val, remapped_regs + mmMEM_EXT_CNTL);
/* CLK: Stop */
val = readl(remapped_regs + mmMEM_EXT_CNTL);
val &= ~(0x00000001); /* bit0=0 */
writel(val, remapped_regs + mmMEM_EXT_CNTL);
} else {
writel(0x00000000, remapped_regs + mmSCLK_CNTL);
writel(0x000000BF, remapped_regs + mmCLK_PIN_CNTL);
writel(0x00000015, remapped_regs + mmPWRMGT_CNTL);
udelay(5);
val = readl(remapped_regs + mmPLL_CNTL);
val |= 0x00000004; /* bit2=1 */
writel(val, remapped_regs + mmPLL_CNTL);
writel(0x0000001d, remapped_regs + mmPWRMGT_CNTL);
}
}
static void w100_vsync(void)
{
u32 tmp;
int timeout = 30000; /* VSync timeout = 30[ms] > 16.8[ms] */
tmp = readl(remapped_regs + mmACTIVE_V_DISP);
/* set vline pos */
writel((tmp >> 16) & 0x3ff, remapped_regs + mmDISP_INT_CNTL);
/* disable vline irq */
tmp = readl(remapped_regs + mmGEN_INT_CNTL);
tmp &= ~0x00000002;
writel(tmp, remapped_regs + mmGEN_INT_CNTL);
/* clear vline irq status */
writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
/* enable vline irq */
writel((tmp | 0x00000002), remapped_regs + mmGEN_INT_CNTL);
/* clear vline irq status */
writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
while(timeout > 0) {
if (readl(remapped_regs + mmGEN_INT_STATUS) & 0x00000002)
break;
udelay(1);
timeout--;
}
/* disable vline irq */
writel(tmp, remapped_regs + mmGEN_INT_CNTL);
/* clear vline irq status */
writel(0x00000002, remapped_regs + mmGEN_INT_STATUS);
}
static struct platform_driver w100fb_driver = {
.probe = w100fb_probe,
.remove = __devexit_p(w100fb_remove),
.suspend = w100fb_suspend,
.resume = w100fb_resume,
.driver = {
.name = "w100fb",
},
};
module_platform_driver(w100fb_driver);
MODULE_DESCRIPTION("ATI Imageon w100 framebuffer driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
RenderBroken/msm8974_Victara_render_kernel | drivers/scsi/bfa/bfa_fcs_rport.c | 4940 | 78075 | /*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*
* Linux driver for Brocade Fibre Channel Host Bus Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* rport.c Remote port implementation.
*/
#include "bfad_drv.h"
#include "bfad_im.h"
#include "bfa_fcs.h"
#include "bfa_fcbuild.h"
BFA_TRC_FILE(FCS, RPORT);
static u32
bfa_fcs_rport_del_timeout = BFA_FCS_RPORT_DEF_DEL_TIMEOUT * 1000;
/* In millisecs */
/*
* forward declarations
*/
static struct bfa_fcs_rport_s *bfa_fcs_rport_alloc(
struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid);
static void bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport,
struct fc_logi_s *plogi);
static void bfa_fcs_rport_timeout(void *arg);
static void bfa_fcs_rport_send_plogi(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rport_send_plogiacc(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rport_plogi_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
static void bfa_fcs_rport_send_adisc(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rport_adisc_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
static void bfa_fcs_rport_send_nsdisc(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rport_gidpn_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
static void bfa_fcs_rport_gpnid_response(void *fcsarg,
struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs);
static void bfa_fcs_rport_send_logo(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rport_send_logo_acc(void *rport_cbarg);
static void bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u8 reason_code,
u8 reason_code_expl);
static void bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len);
static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
static void bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static void bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event);
static struct bfa_sm_table_s rport_sm_table[] = {
{BFA_SM(bfa_fcs_rport_sm_uninit), BFA_RPORT_UNINIT},
{BFA_SM(bfa_fcs_rport_sm_plogi_sending), BFA_RPORT_PLOGI},
{BFA_SM(bfa_fcs_rport_sm_plogiacc_sending), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_plogi_retry), BFA_RPORT_PLOGI_RETRY},
{BFA_SM(bfa_fcs_rport_sm_plogi), BFA_RPORT_PLOGI},
{BFA_SM(bfa_fcs_rport_sm_hal_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_online), BFA_RPORT_ONLINE},
{BFA_SM(bfa_fcs_rport_sm_nsquery_sending), BFA_RPORT_NSQUERY},
{BFA_SM(bfa_fcs_rport_sm_nsquery), BFA_RPORT_NSQUERY},
{BFA_SM(bfa_fcs_rport_sm_adisc_sending), BFA_RPORT_ADISC},
{BFA_SM(bfa_fcs_rport_sm_adisc), BFA_RPORT_ADISC},
{BFA_SM(bfa_fcs_rport_sm_fc4_logorcv), BFA_RPORT_LOGORCV},
{BFA_SM(bfa_fcs_rport_sm_fc4_logosend), BFA_RPORT_LOGO},
{BFA_SM(bfa_fcs_rport_sm_fc4_offline), BFA_RPORT_OFFLINE},
{BFA_SM(bfa_fcs_rport_sm_hcb_offline), BFA_RPORT_OFFLINE},
{BFA_SM(bfa_fcs_rport_sm_hcb_logorcv), BFA_RPORT_LOGORCV},
{BFA_SM(bfa_fcs_rport_sm_hcb_logosend), BFA_RPORT_LOGO},
{BFA_SM(bfa_fcs_rport_sm_logo_sending), BFA_RPORT_LOGO},
{BFA_SM(bfa_fcs_rport_sm_offline), BFA_RPORT_OFFLINE},
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sending), BFA_RPORT_NSDISC},
{BFA_SM(bfa_fcs_rport_sm_nsdisc_retry), BFA_RPORT_NSDISC},
{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
};
/*
* Beginning state.
*/
static void
bfa_fcs_rport_sm_uninit(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_PLOGI_SEND:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcs_rport_hal_online(rport);
break;
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_ADDRESS_DISC:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* PLOGI is being sent.
*/
static void
bfa_fcs_rport_sm_plogi_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_SCN:
/* query the NS */
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* PLOGI is being sent.
*/
static void
bfa_fcs_rport_sm_plogiacc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcs_rport_hal_online(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_PLOGI_COMP:
case RPSM_EVENT_SCN:
/*
* Ignore, SCN is possibly online notification.
*/
break;
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_HCB_OFFLINE:
/*
* Ignore BFA callback, on a PLOGI receive we call bfa offline.
*/
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* PLOGI is sent.
*/
static void
bfa_fcs_rport_sm_plogi_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_TIMEOUT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
bfa_fcs_rport_send_plogi(rport, NULL);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_LOGO_RCVD:
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_SCN:
bfa_timer_stop(&rport->timer);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_stop(&rport->timer);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* PLOGI is sent.
*/
static void
bfa_fcs_rport_sm_plogi(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_ACCEPTED:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
rport->plogi_retries = 0;
bfa_fcs_rport_hal_online(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
/*
* !! fall through !!
*/
case RPSM_EVENT_PRLO_RCVD:
if (rport->prlo == BFA_TRUE)
bfa_fcs_rport_send_prlo_acc(rport);
bfa_fcxp_discard(rport->fcxp);
/*
* !! fall through !!
*/
case RPSM_EVENT_FAILED:
if (rport->plogi_retries < BFA_FCS_RPORT_MAX_RETRIES) {
rport->plogi_retries++;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
BFA_FCS_RETRY_TIMEOUT);
} else {
bfa_stats(rport->port, rport_del_max_plogi_retry);
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
}
break;
case RPSM_EVENT_PLOGI_RETRY:
rport->plogi_retries = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_retry);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
(FC_RA_TOV * 1000));
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_SCN:
bfa_fcxp_discard(rport->fcxp);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_hal_online(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* PLOGI is complete. Awaiting BFA rport online callback. FC-4s
* are offline.
*/
static void
bfa_fcs_rport_sm_hal_online(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_HCB_ONLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
bfa_fcs_rport_online_action(rport);
break;
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_PLOGI_COMP:
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_PLOGI_RCVD:
rport->plogi_pending = BFA_TRUE;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_SCN:
/*
* @todo
* Ignore SCN - PLOGI just completed, FC-4 login should detect
* device failures.
*/
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is ONLINE. FC-4s active.
*/
static void
bfa_fcs_rport_sm_online(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_SCN:
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsquery_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
bfa_fcs_rport_send_adisc(rport, NULL);
}
break;
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* An SCN event is received in ONLINE state. NS query is being sent
* prior to ADISC authentication with rport. FC-4s are paused.
*/
static void
bfa_fcs_rport_sm_nsquery_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsquery);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_SCN:
/*
* ignore SCN, wait for response to query itself
*/
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* An SCN event is received in ONLINE state. NS query is sent to rport.
* FC-4s are paused.
*/
static void
bfa_fcs_rport_sm_nsquery(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_ACCEPTED:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc_sending);
bfa_fcs_rport_send_adisc(rport, NULL);
break;
case RPSM_EVENT_FAILED:
rport->ns_retries++;
if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsquery_sending);
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
}
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_SCN:
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
case RPSM_EVENT_ADDRESS_CHANGE:
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* An SCN event is received in ONLINE state. ADISC is being sent for
* authenticating with rport. FC-4s are paused.
*/
static void
bfa_fcs_rport_sm_adisc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_adisc);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_SCN:
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* An SCN event is received in ONLINE state. ADISC is to rport.
* FC-4s are paused.
*/
static void
bfa_fcs_rport_sm_adisc(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_ACCEPTED:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_online);
break;
case RPSM_EVENT_PLOGI_RCVD:
/*
* Too complex to cleanup FC-4 & rport and then acc to PLOGI.
* At least go offline when a PLOGI is received.
*/
bfa_fcxp_discard(rport->fcxp);
/*
* !!! fall through !!!
*/
case RPSM_EVENT_FAILED:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_SCN:
/*
* already processing RSCN
*/
break;
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logorcv);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_offline_action(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport has sent LOGO. Awaiting FC-4 offline completion callback.
*/
static void
bfa_fcs_rport_sm_fc4_logorcv(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logorcv);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* LOGO needs to be sent to rport. Awaiting FC-4 offline completion
* callback.
*/
static void
bfa_fcs_rport_sm_fc4_logosend(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is going offline. Awaiting FC-4 offline completion callback.
*/
static void
bfa_fcs_rport_sm_fc4_offline(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FC4_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_OFFLINE);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_IMP:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
/*
* rport is already going offline.
* SCN - ignore and wait till transitioning to offline state
*/
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_fc4_logosend);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback.
*/
static void
bfa_fcs_rport_sm_hcb_offline(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_HCB_OFFLINE:
if (bfa_fcs_lport_is_online(rport->port) &&
(rport->plogi_pending)) {
rport->plogi_pending = BFA_FALSE;
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
}
/*
* !! fall through !!
*/
case RPSM_EVENT_ADDRESS_CHANGE:
if (bfa_fcs_lport_is_online(rport->port)) {
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
}
} else {
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
}
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_PLOGI_RCVD:
case RPSM_EVENT_LOGO_IMP:
/*
* Ignore, already offline.
*/
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is offline. FC-4s are offline. Awaiting BFA rport offline
* callback to send LOGO accept.
*/
static void
bfa_fcs_rport_sm_hcb_logorcv(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_HCB_OFFLINE:
case RPSM_EVENT_ADDRESS_CHANGE:
if (rport->pid && (rport->prlo == BFA_TRUE))
bfa_fcs_rport_send_prlo_acc(rport);
if (rport->pid && (rport->prlo == BFA_FALSE))
bfa_fcs_rport_send_logo_acc(rport);
/*
* If the lport is online and if the rport is not a well
* known address port,
* we try to re-discover the r-port.
*/
if (bfa_fcs_lport_is_online(rport->port) &&
(!BFA_FCS_PID_IS_WKA(rport->pid))) {
if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
/* For N2N Direct Attach, try to re-login */
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
}
} else {
/*
* if it is not a well known address, reset the
* pid to 0.
*/
if (!BFA_FCS_PID_IS_WKA(rport->pid))
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
}
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_logosend);
break;
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hcb_offline);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
/*
* Ignore - already processing a LOGO.
*/
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is being deleted. FC-4s are offline.
* Awaiting BFA rport offline
* callback to send LOGO.
*/
static void
bfa_fcs_rport_sm_hcb_logosend(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_HCB_OFFLINE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_logo_sending);
bfa_fcs_rport_send_logo(rport, NULL);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_ADDRESS_CHANGE:
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is being deleted. FC-4s are offline. LOGO is being sent.
*/
static void
bfa_fcs_rport_sm_logo_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
/* Once LOGO is sent, we donot wait for the response */
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_ADDRESS_CHANGE:
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport is offline. FC-4s are offline. BFA rport is offline.
* Timer active to delete stale rport.
*/
static void
bfa_fcs_rport_sm_offline(struct bfa_fcs_rport_s *rport, enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_TIMEOUT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
bfa_timer_stop(&rport->timer);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_LOGO_IMP:
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
break;
case RPSM_EVENT_PLOGI_SEND:
bfa_timer_stop(&rport->timer);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
rport->plogi_retries = 0;
bfa_fcs_rport_send_plogi(rport, NULL);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport address has changed. Nameserver discovery request is being sent.
*/
static void
bfa_fcs_rport_sm_nsdisc_sending(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sent);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_LOGO_RCVD:
case RPSM_EVENT_PRLO_RCVD:
case RPSM_EVENT_PLOGI_SEND:
break;
case RPSM_EVENT_ADDRESS_CHANGE:
rport->ns_retries = 0; /* reset the retry count */
break;
case RPSM_EVENT_LOGO_IMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rport->fcxp_wqe);
bfa_fcs_rport_hal_online(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Nameserver discovery failed. Waiting for timeout to retry.
*/
static void
bfa_fcs_rport_sm_nsdisc_retry(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_TIMEOUT:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_SCN:
case RPSM_EVENT_ADDRESS_CHANGE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_nsdisc_sending);
bfa_timer_stop(&rport->timer);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_stop(&rport->timer);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_LOGO_RCVD:
bfa_fcs_rport_send_logo_acc(rport);
break;
case RPSM_EVENT_PRLO_RCVD:
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_timer_stop(&rport->timer);
bfa_fcs_rport_hal_online(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Rport address has changed. Nameserver discovery request is sent.
*/
static void
bfa_fcs_rport_sm_nsdisc_sent(struct bfa_fcs_rport_s *rport,
enum rport_event event)
{
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPSM_EVENT_ACCEPTED:
case RPSM_EVENT_ADDRESS_CHANGE:
if (rport->pid) {
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogi_sending);
bfa_fcs_rport_send_plogi(rport, NULL);
} else {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
rport->ns_retries = 0;
bfa_fcs_rport_send_nsdisc(rport, NULL);
}
break;
case RPSM_EVENT_FAILED:
rport->ns_retries++;
if (rport->ns_retries < BFA_FCS_RPORT_MAX_RETRIES) {
bfa_sm_set_state(rport,
bfa_fcs_rport_sm_nsdisc_sending);
bfa_fcs_rport_send_nsdisc(rport, NULL);
} else {
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
};
break;
case RPSM_EVENT_DELETE:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_free(rport);
break;
case RPSM_EVENT_PLOGI_RCVD:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_plogiacc_sending);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_send_plogiacc(rport, NULL);
break;
case RPSM_EVENT_LOGO_IMP:
rport->pid = 0;
bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
bfa_fcxp_discard(rport->fcxp);
bfa_timer_start(rport->fcs->bfa, &rport->timer,
bfa_fcs_rport_timeout, rport,
bfa_fcs_rport_del_timeout);
break;
case RPSM_EVENT_PRLO_RCVD:
bfa_fcs_rport_send_prlo_acc(rport);
break;
case RPSM_EVENT_SCN:
/*
* ignore, wait for NS query response
*/
break;
case RPSM_EVENT_LOGO_RCVD:
/*
* Not logged-in yet. Accept LOGO.
*/
bfa_fcs_rport_send_logo_acc(rport);
break;
case RPSM_EVENT_PLOGI_COMP:
bfa_sm_set_state(rport, bfa_fcs_rport_sm_hal_online);
bfa_fcxp_discard(rport->fcxp);
bfa_fcs_rport_hal_online(rport);
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* fcs_rport_private FCS RPORT provate functions
*/
static void
bfa_fcs_rport_send_plogi(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_plogi, rport);
return;
}
rport->fcxp = fcxp;
len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn,
bfa_fcport_get_maxfrsize(port->fcs->bfa),
bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
(void *)rport, FC_MAX_PDUSZ, FC_ELS_TOV);
rport->stats.plogis++;
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_rport_plogi_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
struct fc_logi_s *plogi_rsp;
struct fc_ls_rjt_s *ls_rjt;
struct bfa_fcs_rport_s *twin;
struct list_head *qe;
bfa_trc(rport->fcs, rport->pwwn);
/*
* Sanity Checks
*/
if (req_status != BFA_STATUS_OK) {
bfa_trc(rport->fcs, req_status);
rport->stats.plogi_failed++;
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
return;
}
plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
/*
* Check for failure first.
*/
if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
bfa_trc(rport->fcs, ls_rjt->reason_code);
bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
if ((ls_rjt->reason_code == FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD) &&
(ls_rjt->reason_code_expl == FC_LS_RJT_EXP_INSUFF_RES)) {
rport->stats.rjt_insuff_res++;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RETRY);
return;
}
rport->stats.plogi_rejects++;
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
return;
}
/*
* PLOGI is complete. Make sure this device is not one of the known
* device with a new FC port address.
*/
list_for_each(qe, &rport->port->rport_q) {
twin = (struct bfa_fcs_rport_s *) qe;
if (twin == rport)
continue;
if (!rport->pwwn && (plogi_rsp->port_name == twin->pwwn)) {
bfa_trc(rport->fcs, twin->pid);
bfa_trc(rport->fcs, rport->pid);
/* Update plogi stats in twin */
twin->stats.plogis += rport->stats.plogis;
twin->stats.plogi_rejects +=
rport->stats.plogi_rejects;
twin->stats.plogi_timeouts +=
rport->stats.plogi_timeouts;
twin->stats.plogi_failed +=
rport->stats.plogi_failed;
twin->stats.plogi_rcvd += rport->stats.plogi_rcvd;
twin->stats.plogi_accs++;
bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
bfa_fcs_rport_update(twin, plogi_rsp);
twin->pid = rsp_fchs->s_id;
bfa_sm_send_event(twin, RPSM_EVENT_PLOGI_COMP);
return;
}
}
/*
* Normal login path -- no evil twins.
*/
rport->stats.plogi_accs++;
bfa_fcs_rport_update(rport, plogi_rsp);
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
}
static void
bfa_fcs_rport_send_plogiacc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->reply_oxid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_plogiacc, rport);
return;
}
rport->fcxp = fcxp;
len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rport->pid, bfa_fcs_lport_get_fcid(port),
rport->reply_oxid, port->port_cfg.pwwn,
port->port_cfg.nwwn,
bfa_fcport_get_maxfrsize(port->fcs->bfa),
bfa_fcport_get_rx_bbcredit(port->fcs->bfa));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_rport_send_adisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_adisc, rport);
return;
}
rport->fcxp = fcxp;
len = fc_adisc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_lport_get_fcid(port), 0,
port->port_cfg.pwwn, port->port_cfg.nwwn);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_rport_adisc_response,
rport, FC_MAX_PDUSZ, FC_ELS_TOV);
rport->stats.adisc_sent++;
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_rport_adisc_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
void *pld = bfa_fcxp_get_rspbuf(fcxp);
struct fc_ls_rjt_s *ls_rjt;
if (req_status != BFA_STATUS_OK) {
bfa_trc(rport->fcs, req_status);
rport->stats.adisc_failed++;
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
return;
}
if (fc_adisc_rsp_parse((struct fc_adisc_s *)pld, rsp_len, rport->pwwn,
rport->nwwn) == FC_PARSE_OK) {
rport->stats.adisc_accs++;
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
return;
}
rport->stats.adisc_rejects++;
ls_rjt = pld;
bfa_trc(rport->fcs, ls_rjt->els_cmd.els_code);
bfa_trc(rport->fcs, ls_rjt->reason_code);
bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
}
static void
bfa_fcs_rport_send_nsdisc(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
int len;
bfa_cb_fcxp_send_t cbfn;
bfa_trc(rport->fcs, rport->pid);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_nsdisc, rport);
return;
}
rport->fcxp = fcxp;
if (rport->pwwn) {
len = fc_gidpn_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0, rport->pwwn);
cbfn = bfa_fcs_rport_gidpn_response;
} else {
len = fc_gpnid_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
bfa_fcs_lport_get_fcid(port), 0, rport->pid);
cbfn = bfa_fcs_rport_gpnid_response;
}
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, cbfn,
(void *)rport, FC_MAX_PDUSZ, FC_FCCT_TOV);
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_rport_gidpn_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
struct ct_hdr_s *cthdr;
struct fcgs_gidpn_resp_s *gidpn_rsp;
struct bfa_fcs_rport_s *twin;
struct list_head *qe;
bfa_trc(rport->fcs, rport->pwwn);
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
/* Check if the pid is the same as before. */
gidpn_rsp = (struct fcgs_gidpn_resp_s *) (cthdr + 1);
if (gidpn_rsp->dap == rport->pid) {
/* Device is online */
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
} else {
/*
* Device's PID has changed. We need to cleanup
* and re-login. If there is another device with
* the the newly discovered pid, send an scn notice
* so that its new pid can be discovered.
*/
list_for_each(qe, &rport->port->rport_q) {
twin = (struct bfa_fcs_rport_s *) qe;
if (twin == rport)
continue;
if (gidpn_rsp->dap == twin->pid) {
bfa_trc(rport->fcs, twin->pid);
bfa_trc(rport->fcs, rport->pid);
twin->pid = 0;
bfa_sm_send_event(twin,
RPSM_EVENT_ADDRESS_CHANGE);
}
}
rport->pid = gidpn_rsp->dap;
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_CHANGE);
}
return;
}
/*
* Reject Response
*/
switch (cthdr->reason_code) {
case CT_RSN_LOGICAL_BUSY:
/*
* Need to retry
*/
bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
break;
case CT_RSN_UNABLE_TO_PERF:
/*
* device doesn't exist : Start timer to cleanup this later.
*/
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
break;
default:
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
break;
}
}
static void
bfa_fcs_rport_gpnid_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
struct ct_hdr_s *cthdr;
bfa_trc(rport->fcs, rport->pwwn);
cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
return;
}
/*
* Reject Response
*/
switch (cthdr->reason_code) {
case CT_RSN_LOGICAL_BUSY:
/*
* Need to retry
*/
bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
break;
case CT_RSN_UNABLE_TO_PERF:
/*
* device doesn't exist : Start timer to cleanup this later.
*/
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
break;
default:
bfa_sm_send_event(rport, RPSM_EVENT_FAILED);
break;
}
}
/*
* Called to send a logout to the rport.
*/
static void
bfa_fcs_rport_send_logo(void *rport_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u16 len;
bfa_trc(rport->fcs, rport->pid);
port = rport->port;
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rport->fcxp_wqe,
bfa_fcs_rport_send_logo, rport);
return;
}
rport->fcxp = fcxp;
len = fc_logo_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_lport_get_fcid(port), 0,
bfa_fcs_lport_get_pwwn(port));
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL,
rport, FC_MAX_PDUSZ, FC_ELS_TOV);
rport->stats.logos++;
bfa_fcxp_discard(rport->fcxp);
bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
}
/*
* Send ACC for a LOGO received.
*/
static void
bfa_fcs_rport_send_logo_acc(void *rport_cbarg)
{
struct bfa_fcs_rport_s *rport = rport_cbarg;
struct bfa_fcs_lport_s *port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
u16 len;
bfa_trc(rport->fcs, rport->pid);
port = rport->port;
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
rport->stats.logo_rcvd++;
len = fc_logo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rport->pid, bfa_fcs_lport_get_fcid(port),
rport->reply_oxid);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
/*
* brief
* This routine will be called by bfa_timer on timer timeouts.
*
* param[in] rport - pointer to bfa_fcs_lport_ns_t.
* param[out] rport_status - pointer to return vport status in
*
* return
* void
*
* Special Considerations:
*
* note
*/
static void
bfa_fcs_rport_timeout(void *arg)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) arg;
rport->stats.plogi_timeouts++;
bfa_stats(rport->port, rport_plogi_timeouts);
bfa_sm_send_event(rport, RPSM_EVENT_TIMEOUT);
}
static void
bfa_fcs_rport_process_prli(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len)
{
struct bfa_fcxp_s *fcxp;
struct fchs_s fchs;
struct bfa_fcs_lport_s *port = rport->port;
struct fc_prli_s *prli;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
rport->stats.prli_rcvd++;
/*
* We are in Initiator Mode
*/
prli = (struct fc_prli_s *) (rx_fchs + 1);
if (prli->parampage.servparams.target) {
/*
* PRLI from a target ?
* Send the Acc.
* PRLI sent by us will be used to transition the IT nexus,
* once the response is received from the target.
*/
bfa_trc(port->fcs, rx_fchs->s_id);
rport->scsi_function = BFA_RPORT_TARGET;
} else {
bfa_trc(rport->fcs, prli->parampage.type);
rport->scsi_function = BFA_RPORT_INITIATOR;
bfa_fcs_itnim_is_initiator(rport->itnim);
}
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_prli_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, port->port_cfg.roles);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
static void
bfa_fcs_rport_process_rpsc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len)
{
struct bfa_fcxp_s *fcxp;
struct fchs_s fchs;
struct bfa_fcs_lport_s *port = rport->port;
struct fc_rpsc_speed_info_s speeds;
struct bfa_port_attr_s pport_attr;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
rport->stats.rpsc_rcvd++;
speeds.port_speed_cap =
RPSC_SPEED_CAP_1G | RPSC_SPEED_CAP_2G | RPSC_SPEED_CAP_4G |
RPSC_SPEED_CAP_8G;
/*
* get curent speed from pport attributes from BFA
*/
bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_rpsc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, &speeds);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
}
static void
bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
struct fchs_s *rx_fchs, u16 len)
{
struct bfa_fcxp_s *fcxp;
struct fchs_s fchs;
struct bfa_fcs_lport_s *port = rport->port;
struct fc_adisc_s *adisc;
bfa_trc(port->fcs, rx_fchs->s_id);
bfa_trc(port->fcs, rx_fchs->d_id);
rport->stats.adisc_rcvd++;
adisc = (struct fc_adisc_s *) (rx_fchs + 1);
/*
* Accept if the itnim for this rport is online.
* Else reject the ADISC.
*/
if (bfa_fcs_itnim_get_online_state(rport->itnim) == BFA_STATUS_OK) {
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_adisc_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, port->port_cfg.pwwn,
port->port_cfg.nwwn);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
} else {
rport->stats.adisc_rejected++;
bfa_fcs_rport_send_ls_rjt(rport, rx_fchs,
FC_LS_RJT_RSN_UNABLE_TO_PERF_CMD,
FC_LS_RJT_EXP_LOGIN_REQUIRED);
}
}
static void
bfa_fcs_rport_hal_online(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfa_rport_info_s rport_info;
rport_info.pid = rport->pid;
rport_info.local_pid = port->pid;
rport_info.lp_tag = port->lp_tag;
rport_info.vf_id = port->fabric->vf_id;
rport_info.vf_en = port->fabric->is_vf;
rport_info.fc_class = rport->fc_cos;
rport_info.cisc = rport->cisc;
rport_info.max_frmsz = rport->maxfrsize;
bfa_rport_online(rport->bfa_rport, &rport_info);
}
static struct bfa_fcs_rport_s *
bfa_fcs_rport_alloc(struct bfa_fcs_lport_s *port, wwn_t pwwn, u32 rpid)
{
struct bfa_fcs_s *fcs = port->fcs;
struct bfa_fcs_rport_s *rport;
struct bfad_rport_s *rport_drv;
/*
* allocate rport
*/
if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
!= BFA_STATUS_OK) {
bfa_trc(fcs, rpid);
return NULL;
}
/*
* Initialize r-port
*/
rport->port = port;
rport->fcs = fcs;
rport->rp_drv = rport_drv;
rport->pid = rpid;
rport->pwwn = pwwn;
/*
* allocate BFA rport
*/
rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
if (!rport->bfa_rport) {
bfa_trc(fcs, rpid);
kfree(rport_drv);
return NULL;
}
/*
* allocate FC-4s
*/
WARN_ON(!bfa_fcs_lport_is_initiator(port));
if (bfa_fcs_lport_is_initiator(port)) {
rport->itnim = bfa_fcs_itnim_create(rport);
if (!rport->itnim) {
bfa_trc(fcs, rpid);
bfa_sm_send_event(rport->bfa_rport,
BFA_RPORT_SM_DELETE);
kfree(rport_drv);
return NULL;
}
}
bfa_fcs_lport_add_rport(port, rport);
bfa_sm_set_state(rport, bfa_fcs_rport_sm_uninit);
/* Initialize the Rport Features(RPF) Sub Module */
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_init(rport);
return rport;
}
static void
bfa_fcs_rport_free(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
/*
* - delete FC-4s
* - delete BFA rport
* - remove from queue of rports
*/
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_delete(rport->itnim);
if (rport->pid != 0 && !BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_offline(rport);
}
bfa_sm_send_event(rport->bfa_rport, BFA_RPORT_SM_DELETE);
bfa_fcs_lport_del_rport(port, rport);
kfree(rport->rp_drv);
}
static void
bfa_fcs_rport_aen_post(struct bfa_fcs_rport_s *rport,
enum bfa_rport_aen_event event,
struct bfa_rport_aen_data_s *data)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
struct bfa_aen_entry_s *aen_entry;
bfad_get_aen_entry(bfad, aen_entry);
if (!aen_entry)
return;
if (event == BFA_RPORT_AEN_QOS_PRIO)
aen_entry->aen_data.rport.priv.qos = data->priv.qos;
else if (event == BFA_RPORT_AEN_QOS_FLOWID)
aen_entry->aen_data.rport.priv.qos = data->priv.qos;
aen_entry->aen_data.rport.vf_id = rport->port->fabric->vf_id;
aen_entry->aen_data.rport.ppwwn = bfa_fcs_lport_get_pwwn(
bfa_fcs_get_base_port(rport->fcs));
aen_entry->aen_data.rport.lpwwn = bfa_fcs_lport_get_pwwn(rport->port);
aen_entry->aen_data.rport.rpwwn = rport->pwwn;
/* Send the AEN notification */
bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq,
BFA_AEN_CAT_RPORT, event);
}
static void
bfa_fcs_rport_online_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
rport->stats.onlines++;
if ((!rport->pid) || (!rport->pwwn)) {
bfa_trc(rport->fcs, rport->pid);
bfa_sm_fault(rport->fcs, rport->pid);
}
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_rport_online(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_online(rport);
};
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) online for logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_ONLINE, NULL);
}
}
static void
bfa_fcs_rport_offline_action(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct bfad_s *bfad = (struct bfad_s *)port->fcs->bfad;
char lpwwn_buf[BFA_STRING_32];
char rpwwn_buf[BFA_STRING_32];
rport->stats.offlines++;
rport->plogi_pending = BFA_FALSE;
wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(port));
wwn2str(rpwwn_buf, rport->pwwn);
if (!BFA_FCS_PID_IS_WKA(rport->pid)) {
if (bfa_fcs_lport_is_online(rport->port) == BFA_TRUE) {
BFA_LOG(KERN_ERR, bfad, bfa_log_level,
"Remote port (WWN = %s) connectivity lost for "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
bfa_fcs_rport_aen_post(rport,
BFA_RPORT_AEN_DISCONNECT, NULL);
} else {
BFA_LOG(KERN_INFO, bfad, bfa_log_level,
"Remote port (WWN = %s) offlined by "
"logical port (WWN = %s)\n",
rpwwn_buf, lpwwn_buf);
bfa_fcs_rport_aen_post(rport,
BFA_RPORT_AEN_OFFLINE, NULL);
}
}
if (bfa_fcs_lport_is_initiator(port)) {
bfa_fcs_itnim_rport_offline(rport->itnim);
if (!BFA_FCS_PID_IS_WKA(rport->pid))
bfa_fcs_rpf_rport_offline(rport);
}
}
/*
* Update rport parameters from PLOGI or PLOGI accept.
*/
static void
bfa_fcs_rport_update(struct bfa_fcs_rport_s *rport, struct fc_logi_s *plogi)
{
bfa_fcs_lport_t *port = rport->port;
/*
* - port name
* - node name
*/
rport->pwwn = plogi->port_name;
rport->nwwn = plogi->node_name;
/*
* - class of service
*/
rport->fc_cos = 0;
if (plogi->class3.class_valid)
rport->fc_cos = FC_CLASS_3;
if (plogi->class2.class_valid)
rport->fc_cos |= FC_CLASS_2;
/*
* - CISC
* - MAX receive frame size
*/
rport->cisc = plogi->csp.cisc;
if (be16_to_cpu(plogi->class3.rxsz) < be16_to_cpu(plogi->csp.rxsz))
rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
else
rport->maxfrsize = be16_to_cpu(plogi->csp.rxsz);
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
/*
* Direct Attach P2P mode :
* This is to handle a bug (233476) in IBM targets in Direct Attach
* Mode. Basically, in FLOGI Accept the target would have
* erroneously set the BB Credit to the value used in the FLOGI
* sent by the HBA. It uses the correct value (its own BB credit)
* in PLOGI.
*/
if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
(be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
bfa_trc(port->fcs, port->fabric->bb_credit);
port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
port->fabric->bb_credit, 0);
}
}
/*
* Called to handle LOGO received from an existing remote port.
*/
static void
bfa_fcs_rport_process_logo(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs)
{
rport->reply_oxid = fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
rport->prlo = BFA_FALSE;
rport->stats.logo_rcvd++;
bfa_sm_send_event(rport, RPSM_EVENT_LOGO_RCVD);
}
/*
* fcs_rport_public FCS rport public interfaces
*/
/*
* Called by bport/vport to create a remote port instance for a discovered
* remote device.
*
* @param[in] port - base port or vport
* @param[in] rpid - remote port ID
*
* @return None
*/
struct bfa_fcs_rport_s *
bfa_fcs_rport_create(struct bfa_fcs_lport_s *port, u32 rpid)
{
struct bfa_fcs_rport_s *rport;
bfa_trc(port->fcs, rpid);
rport = bfa_fcs_rport_alloc(port, WWN_NULL, rpid);
if (!rport)
return NULL;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
return rport;
}
/*
* Called to create a rport for which only the wwn is known.
*
* @param[in] port - base port
* @param[in] rpwwn - remote port wwn
*
* @return None
*/
struct bfa_fcs_rport_s *
bfa_fcs_rport_create_by_wwn(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
{
struct bfa_fcs_rport_s *rport;
bfa_trc(port->fcs, rpwwn);
rport = bfa_fcs_rport_alloc(port, rpwwn, 0);
if (!rport)
return NULL;
bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
return rport;
}
/*
* Called by bport in private loop topology to indicate that a
* rport has been discovered and plogi has been completed.
*
* @param[in] port - base port or vport
* @param[in] rpid - remote port ID
*/
void
bfa_fcs_rport_start(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
struct fc_logi_s *plogi)
{
struct bfa_fcs_rport_s *rport;
rport = bfa_fcs_rport_alloc(port, WWN_NULL, fchs->s_id);
if (!rport)
return;
bfa_fcs_rport_update(rport, plogi);
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
}
/*
* Called by bport/vport to handle PLOGI received from a new remote port.
* If an existing rport does a plogi, it will be handled separately.
*/
void
bfa_fcs_rport_plogi_create(struct bfa_fcs_lport_s *port, struct fchs_s *fchs,
struct fc_logi_s *plogi)
{
struct bfa_fcs_rport_s *rport;
rport = bfa_fcs_rport_alloc(port, plogi->port_name, fchs->s_id);
if (!rport)
return;
bfa_fcs_rport_update(rport, plogi);
rport->reply_oxid = fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
rport->stats.plogi_rcvd++;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
/*
* Called by bport/vport to handle PLOGI received from an existing
* remote port.
*/
void
bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
struct fc_logi_s *plogi)
{
/*
* @todo Handle P2P and initiator-initiator.
*/
bfa_fcs_rport_update(rport, plogi);
rport->reply_oxid = rx_fchs->ox_id;
bfa_trc(rport->fcs, rport->reply_oxid);
rport->pid = rx_fchs->s_id;
bfa_trc(rport->fcs, rport->pid);
rport->stats.plogi_rcvd++;
bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
}
/*
* Called by bport/vport to notify SCN for the remote port
*/
void
bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport)
{
rport->stats.rscns++;
bfa_sm_send_event(rport, RPSM_EVENT_SCN);
}
/*
* brief
* This routine BFA callback for bfa_rport_online() call.
*
* param[in] cb_arg - rport struct.
*
* return
* void
*
* Special Considerations:
*
* note
*/
void
bfa_cb_rport_online(void *cbarg)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
bfa_trc(rport->fcs, rport->pwwn);
bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
}
/*
* brief
* This routine BFA callback for bfa_rport_offline() call.
*
* param[in] rport -
*
* return
* void
*
* Special Considerations:
*
* note
*/
void
bfa_cb_rport_offline(void *cbarg)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
bfa_trc(rport->fcs, rport->pwwn);
bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
}
/*
* brief
* This routine is a static BFA callback when there is a QoS flow_id
* change notification
*
* param[in] rport -
*
* return
* void
*
* Special Considerations:
*
* note
*/
void
bfa_cb_rport_qos_scn_flowid(void *cbarg,
struct bfa_rport_qos_attr_s old_qos_attr,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
aen_data.priv.qos = new_qos_attr;
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_FLOWID, &aen_data);
}
/*
* brief
* This routine is a static BFA callback when there is a QoS priority
* change notification
*
* param[in] rport -
*
* return
* void
*
* Special Considerations:
*
* note
*/
void
bfa_cb_rport_qos_scn_prio(void *cbarg,
struct bfa_rport_qos_attr_s old_qos_attr,
struct bfa_rport_qos_attr_s new_qos_attr)
{
struct bfa_fcs_rport_s *rport = (struct bfa_fcs_rport_s *) cbarg;
struct bfa_rport_aen_data_s aen_data;
bfa_trc(rport->fcs, rport->pwwn);
aen_data.priv.qos = new_qos_attr;
bfa_fcs_rport_aen_post(rport, BFA_RPORT_AEN_QOS_PRIO, &aen_data);
}
/*
* Called to process any unsolicted frames from this remote port
*/
void
bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport,
struct fchs_s *fchs, u16 len)
{
struct bfa_fcs_lport_s *port = rport->port;
struct fc_els_cmd_s *els_cmd;
bfa_trc(rport->fcs, fchs->s_id);
bfa_trc(rport->fcs, fchs->d_id);
bfa_trc(rport->fcs, fchs->type);
if (fchs->type != FC_TYPE_ELS)
return;
els_cmd = (struct fc_els_cmd_s *) (fchs + 1);
bfa_trc(rport->fcs, els_cmd->els_code);
switch (els_cmd->els_code) {
case FC_ELS_LOGO:
bfa_stats(port, plogi_rcvd);
bfa_fcs_rport_process_logo(rport, fchs);
break;
case FC_ELS_ADISC:
bfa_stats(port, adisc_rcvd);
bfa_fcs_rport_process_adisc(rport, fchs, len);
break;
case FC_ELS_PRLO:
bfa_stats(port, prlo_rcvd);
if (bfa_fcs_lport_is_initiator(port))
bfa_fcs_fcpim_uf_recv(rport->itnim, fchs, len);
break;
case FC_ELS_PRLI:
bfa_stats(port, prli_rcvd);
bfa_fcs_rport_process_prli(rport, fchs, len);
break;
case FC_ELS_RPSC:
bfa_stats(port, rpsc_rcvd);
bfa_fcs_rport_process_rpsc(rport, fchs, len);
break;
default:
bfa_stats(port, un_handled_els_rcvd);
bfa_fcs_rport_send_ls_rjt(rport, fchs,
FC_LS_RJT_RSN_CMD_NOT_SUPP,
FC_LS_RJT_EXP_NO_ADDL_INFO);
break;
}
}
/* send best case acc to prlo */
static void
bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
int len;
bfa_trc(rport->fcs, rport->pid);
fcxp = bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp)
return;
len = fc_prlo_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rport->pid, bfa_fcs_lport_get_fcid(port),
rport->reply_oxid, 0);
bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id,
port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs,
NULL, NULL, FC_MAX_PDUSZ, 0);
}
/*
* Send a LS reject
*/
static void
bfa_fcs_rport_send_ls_rjt(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
u8 reason_code, u8 reason_code_expl)
{
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
struct bfa_fcxp_s *fcxp;
int len;
bfa_trc(rport->fcs, rx_fchs->s_id);
fcxp = bfa_fcs_fcxp_alloc(rport->fcs);
if (!fcxp)
return;
len = fc_ls_rjt_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
rx_fchs->s_id, bfa_fcs_lport_get_fcid(port),
rx_fchs->ox_id, reason_code, reason_code_expl);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag,
BFA_FALSE, FC_CLASS_3, len, &fchs, NULL, NULL,
FC_MAX_PDUSZ, 0);
}
/*
* Return state of rport.
*/
int
bfa_fcs_rport_get_state(struct bfa_fcs_rport_s *rport)
{
return bfa_sm_to_state(rport_sm_table, rport->sm);
}
/*
* brief
* Called by the Driver to set rport delete/ageout timeout
*
* param[in] rport timeout value in seconds.
*
* return None
*/
void
bfa_fcs_rport_set_del_timeout(u8 rport_tmo)
{
/* convert to Millisecs */
if (rport_tmo > 0)
bfa_fcs_rport_del_timeout = rport_tmo * 1000;
}
void
bfa_fcs_rport_prlo(struct bfa_fcs_rport_s *rport, __be16 ox_id)
{
bfa_trc(rport->fcs, rport->pid);
rport->prlo = BFA_TRUE;
rport->reply_oxid = ox_id;
bfa_sm_send_event(rport, RPSM_EVENT_PRLO_RCVD);
}
void
bfa_fcs_rport_get_attr(struct bfa_fcs_rport_s *rport,
struct bfa_rport_attr_s *rport_attr)
{
struct bfa_rport_qos_attr_s qos_attr;
struct bfa_fcs_lport_s *port = rport->port;
bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
memset(&qos_attr, 0, sizeof(struct bfa_rport_qos_attr_s));
rport_attr->pid = rport->pid;
rport_attr->pwwn = rport->pwwn;
rport_attr->nwwn = rport->nwwn;
rport_attr->cos_supported = rport->fc_cos;
rport_attr->df_sz = rport->maxfrsize;
rport_attr->state = bfa_fcs_rport_get_state(rport);
rport_attr->fc_cos = rport->fc_cos;
rport_attr->cisc = rport->cisc;
rport_attr->scsi_function = rport->scsi_function;
rport_attr->curr_speed = rport->rpf.rpsc_speed;
rport_attr->assigned_speed = rport->rpf.assigned_speed;
qos_attr.qos_priority = rport->bfa_rport->qos_attr.qos_priority;
qos_attr.qos_flow_id =
cpu_to_be32(rport->bfa_rport->qos_attr.qos_flow_id);
rport_attr->qos_attr = qos_attr;
rport_attr->trl_enforced = BFA_FALSE;
if (bfa_fcport_is_ratelim(port->fcs->bfa) &&
(rport->scsi_function == BFA_RPORT_TARGET)) {
if (rport_speed == BFA_PORT_SPEED_UNKNOWN)
rport_speed =
bfa_fcport_get_ratelim_speed(rport->fcs->bfa);
if (rport_speed < bfa_fcs_lport_get_rport_max_speed(port))
rport_attr->trl_enforced = BFA_TRUE;
}
}
/*
* Remote port implementation.
*/
/*
* fcs_rport_api FCS rport API.
*/
struct bfa_fcs_rport_s *
bfa_fcs_rport_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn)
{
struct bfa_fcs_rport_s *rport;
rport = bfa_fcs_lport_get_rport_by_pwwn(port, rpwwn);
if (rport == NULL) {
/*
* TBD Error handling
*/
}
return rport;
}
struct bfa_fcs_rport_s *
bfa_fcs_rport_lookup_by_nwwn(struct bfa_fcs_lport_s *port, wwn_t rnwwn)
{
struct bfa_fcs_rport_s *rport;
rport = bfa_fcs_lport_get_rport_by_nwwn(port, rnwwn);
if (rport == NULL) {
/*
* TBD Error handling
*/
}
return rport;
}
/*
* Remote port features (RPF) implementation.
*/
#define BFA_FCS_RPF_RETRIES (3)
#define BFA_FCS_RPF_RETRY_TIMEOUT (1000) /* 1 sec (In millisecs) */
static void bfa_fcs_rpf_send_rpsc2(void *rport_cbarg,
struct bfa_fcxp_s *fcxp_alloced);
static void bfa_fcs_rpf_rpsc2_response(void *fcsarg,
struct bfa_fcxp_s *fcxp,
void *cbarg,
bfa_status_t req_status,
u32 rsp_len,
u32 resid_len,
struct fchs_s *rsp_fchs);
static void bfa_fcs_rpf_timeout(void *arg);
/*
* fcs_rport_ftrs_sm FCS rport state machine events
*/
enum rpf_event {
RPFSM_EVENT_RPORT_OFFLINE = 1, /* Rport offline */
RPFSM_EVENT_RPORT_ONLINE = 2, /* Rport online */
RPFSM_EVENT_FCXP_SENT = 3, /* Frame from has been sent */
RPFSM_EVENT_TIMEOUT = 4, /* Rport SM timeout event */
RPFSM_EVENT_RPSC_COMP = 5,
RPFSM_EVENT_RPSC_FAIL = 6,
RPFSM_EVENT_RPSC_ERROR = 7,
};
static void bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf,
enum rpf_event event);
static void
bfa_fcs_rpf_sm_uninit(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
struct bfa_fcs_fabric_s *fabric = &rport->fcs->fabric;
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_RPORT_ONLINE:
/* Send RPSC2 to a Brocade fabric only. */
if ((!BFA_FCS_PID_IS_WKA(rport->pid)) &&
((rport->port->fabric->lps->brcd_switch) ||
(bfa_fcs_fabric_get_switch_oui(fabric) ==
BFA_FCS_BRCD_SWITCH_OUI))) {
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
rpf->rpsc_retries = 0;
bfa_fcs_rpf_send_rpsc2(rpf, NULL);
}
break;
case RPFSM_EVENT_RPORT_OFFLINE:
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
static void
bfa_fcs_rpf_sm_rpsc_sending(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_FCXP_SENT:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc);
break;
case RPFSM_EVENT_RPORT_OFFLINE:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
bfa_fcxp_walloc_cancel(rport->fcs->bfa, &rpf->fcxp_wqe);
rpf->rpsc_retries = 0;
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
static void
bfa_fcs_rpf_sm_rpsc(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_RPSC_COMP:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
/* Update speed info in f/w via BFA */
if (rpf->rpsc_speed != BFA_PORT_SPEED_UNKNOWN)
bfa_rport_speed(rport->bfa_rport, rpf->rpsc_speed);
else if (rpf->assigned_speed != BFA_PORT_SPEED_UNKNOWN)
bfa_rport_speed(rport->bfa_rport, rpf->assigned_speed);
break;
case RPFSM_EVENT_RPSC_FAIL:
/* RPSC not supported by rport */
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
break;
case RPFSM_EVENT_RPSC_ERROR:
/* need to retry...delayed a bit. */
if (rpf->rpsc_retries++ < BFA_FCS_RPF_RETRIES) {
bfa_timer_start(rport->fcs->bfa, &rpf->timer,
bfa_fcs_rpf_timeout, rpf,
BFA_FCS_RPF_RETRY_TIMEOUT);
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_retry);
} else {
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_online);
}
break;
case RPFSM_EVENT_RPORT_OFFLINE:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
bfa_fcxp_discard(rpf->fcxp);
rpf->rpsc_retries = 0;
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
static void
bfa_fcs_rpf_sm_rpsc_retry(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_TIMEOUT:
/* re-send the RPSC */
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
bfa_fcs_rpf_send_rpsc2(rpf, NULL);
break;
case RPFSM_EVENT_RPORT_OFFLINE:
bfa_timer_stop(&rpf->timer);
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
rpf->rpsc_retries = 0;
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
static void
bfa_fcs_rpf_sm_online(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_RPORT_OFFLINE:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_offline);
rpf->rpsc_retries = 0;
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
static void
bfa_fcs_rpf_sm_offline(struct bfa_fcs_rpf_s *rpf, enum rpf_event event)
{
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, rport->pwwn);
bfa_trc(rport->fcs, rport->pid);
bfa_trc(rport->fcs, event);
switch (event) {
case RPFSM_EVENT_RPORT_ONLINE:
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_rpsc_sending);
bfa_fcs_rpf_send_rpsc2(rpf, NULL);
break;
case RPFSM_EVENT_RPORT_OFFLINE:
break;
default:
bfa_sm_fault(rport->fcs, event);
}
}
/*
* Called when Rport is created.
*/
void
bfa_fcs_rpf_init(struct bfa_fcs_rport_s *rport)
{
struct bfa_fcs_rpf_s *rpf = &rport->rpf;
bfa_trc(rport->fcs, rport->pid);
rpf->rport = rport;
bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
}
/*
* Called when Rport becomes online
*/
void
bfa_fcs_rpf_rport_online(struct bfa_fcs_rport_s *rport)
{
bfa_trc(rport->fcs, rport->pid);
if (__fcs_min_cfg(rport->port->fcs))
return;
if (bfa_fcs_fabric_is_switched(rport->port->fabric))
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
}
/*
* Called when Rport becomes offline
*/
void
bfa_fcs_rpf_rport_offline(struct bfa_fcs_rport_s *rport)
{
bfa_trc(rport->fcs, rport->pid);
if (__fcs_min_cfg(rport->port->fcs))
return;
rport->rpf.rpsc_speed = 0;
bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_OFFLINE);
}
static void
bfa_fcs_rpf_timeout(void *arg)
{
struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) arg;
struct bfa_fcs_rport_s *rport = rpf->rport;
bfa_trc(rport->fcs, rport->pid);
bfa_sm_send_event(rpf, RPFSM_EVENT_TIMEOUT);
}
static void
bfa_fcs_rpf_send_rpsc2(void *rpf_cbarg, struct bfa_fcxp_s *fcxp_alloced)
{
struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *)rpf_cbarg;
struct bfa_fcs_rport_s *rport = rpf->rport;
struct bfa_fcs_lport_s *port = rport->port;
struct fchs_s fchs;
int len;
struct bfa_fcxp_s *fcxp;
bfa_trc(rport->fcs, rport->pwwn);
fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs);
if (!fcxp) {
bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &rpf->fcxp_wqe,
bfa_fcs_rpf_send_rpsc2, rpf);
return;
}
rpf->fcxp = fcxp;
len = fc_rpsc2_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
bfa_fcs_lport_get_fcid(port), &rport->pid, 1);
bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
FC_CLASS_3, len, &fchs, bfa_fcs_rpf_rpsc2_response,
rpf, FC_MAX_PDUSZ, FC_ELS_TOV);
rport->stats.rpsc_sent++;
bfa_sm_send_event(rpf, RPFSM_EVENT_FCXP_SENT);
}
static void
bfa_fcs_rpf_rpsc2_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg,
bfa_status_t req_status, u32 rsp_len,
u32 resid_len, struct fchs_s *rsp_fchs)
{
struct bfa_fcs_rpf_s *rpf = (struct bfa_fcs_rpf_s *) cbarg;
struct bfa_fcs_rport_s *rport = rpf->rport;
struct fc_ls_rjt_s *ls_rjt;
struct fc_rpsc2_acc_s *rpsc2_acc;
u16 num_ents;
bfa_trc(rport->fcs, req_status);
if (req_status != BFA_STATUS_OK) {
bfa_trc(rport->fcs, req_status);
if (req_status == BFA_STATUS_ETIMER)
rport->stats.rpsc_failed++;
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
return;
}
rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
rport->stats.rpsc_accs++;
num_ents = be16_to_cpu(rpsc2_acc->num_pids);
bfa_trc(rport->fcs, num_ents);
if (num_ents > 0) {
WARN_ON(rpsc2_acc->port_info[0].pid == rport->pid);
bfa_trc(rport->fcs,
be16_to_cpu(rpsc2_acc->port_info[0].pid));
bfa_trc(rport->fcs,
be16_to_cpu(rpsc2_acc->port_info[0].speed));
bfa_trc(rport->fcs,
be16_to_cpu(rpsc2_acc->port_info[0].index));
bfa_trc(rport->fcs,
rpsc2_acc->port_info[0].type);
if (rpsc2_acc->port_info[0].speed == 0) {
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
return;
}
rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
be16_to_cpu(rpsc2_acc->port_info[0].speed));
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
}
} else {
ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp);
bfa_trc(rport->fcs, ls_rjt->reason_code);
bfa_trc(rport->fcs, ls_rjt->reason_code_expl);
rport->stats.rpsc_rejects++;
if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP)
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_FAIL);
else
bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_ERROR);
}
}
| gpl-2.0 |
djmax81/android_kernel_samsung_exynos5420 | drivers/staging/vt6655/device_main.c | 4940 | 119221 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* File: device_main.c
*
* Purpose: driver entry for initial, open, close, tx and rx.
*
* Author: Lyndon Chen
*
* Date: Jan 8, 2003
*
* Functions:
*
* vt6655_probe - module initial (insmod) driver entry
* vt6655_remove - module remove entry
* vt6655_init_info - device structure resource allocation function
* device_free_info - device structure resource free function
* device_get_pci_info - get allocated pci io/mem resource
* device_print_info - print out resource
* device_open - allocate dma/descripter resource & initial mac/bbp function
* device_xmit - asynchrous data tx function
* device_intr - interrupt handle function
* device_set_multi - set mac filter
* device_ioctl - ioctl entry
* device_close - shutdown mac/bbp & free dma/descripter resource
* device_rx_srv - rx service function
* device_receive_frame - rx data function
* device_alloc_rx_buf - rx buffer pre-allocated function
* device_alloc_frag_buf - rx fragement pre-allocated function
* device_free_tx_buf - free tx buffer function
* device_free_frag_buf- free de-fragement buffer
* device_dma0_tx_80211- tx 802.11 frame via dma0
* device_dma0_xmit- tx PS bufferred frame via dma0
* device_init_rd0_ring- initial rd dma0 ring
* device_init_rd1_ring- initial rd dma1 ring
* device_init_td0_ring- initial tx dma0 ring buffer
* device_init_td1_ring- initial tx dma1 ring buffer
* device_init_registers- initial MAC & BBP & RF internal registers.
* device_init_rings- initial tx/rx ring buffer
* device_init_defrag_cb- initial & allocate de-fragement buffer.
* device_free_rings- free all allocated ring buffer
* device_tx_srv- tx interrupt service function
*
* Revision History:
*/
#undef __NO_VERSION__
#include "device.h"
#include "card.h"
#include "channel.h"
#include "baseband.h"
#include "mac.h"
#include "tether.h"
#include "wmgr.h"
#include "wctl.h"
#include "power.h"
#include "wcmd.h"
#include "iocmd.h"
#include "tcrc.h"
#include "rxtx.h"
#include "wroute.h"
#include "bssdb.h"
#include "hostap.h"
#include "wpactl.h"
#include "ioctl.h"
#include "iwctl.h"
#include "dpc.h"
#include "datarate.h"
#include "rf.h"
#include "iowpa.h"
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/slab.h>
//#define DEBUG
/*--------------------- Static Definitions -------------------------*/
//static int msglevel =MSG_LEVEL_DEBUG;
static int msglevel = MSG_LEVEL_INFO;
//#define PLICE_DEBUG
//
// Define module options
//
MODULE_AUTHOR("VIA Networking Technologies, Inc., <lyndonchen@vntek.com.tw>");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("VIA Networking Solomon-A/B/G Wireless LAN Adapter Driver");
//PLICE_DEBUG ->
static int mlme_kill;
//static struct task_struct * mlme_task;
//PLICE_DEBUG <-
#define DEVICE_PARAM(N,D)
/*
static const int N[MAX_UINTS]=OPTION_DEFAULT;\
MODULE_PARM(N, "1-" __MODULE_STRING(MAX_UINTS) "i");\
MODULE_PARM_DESC(N, D);
*/
#define RX_DESC_MIN0 16
#define RX_DESC_MAX0 128
#define RX_DESC_DEF0 32
DEVICE_PARAM(RxDescriptors0,"Number of receive descriptors0");
#define RX_DESC_MIN1 16
#define RX_DESC_MAX1 128
#define RX_DESC_DEF1 32
DEVICE_PARAM(RxDescriptors1,"Number of receive descriptors1");
#define TX_DESC_MIN0 16
#define TX_DESC_MAX0 128
#define TX_DESC_DEF0 32
DEVICE_PARAM(TxDescriptors0,"Number of transmit descriptors0");
#define TX_DESC_MIN1 16
#define TX_DESC_MAX1 128
#define TX_DESC_DEF1 64
DEVICE_PARAM(TxDescriptors1,"Number of transmit descriptors1");
#define IP_ALIG_DEF 0
/* IP_byte_align[] is used for IP header unsigned long byte aligned
0: indicate the IP header won't be unsigned long byte aligned.(Default) .
1: indicate the IP header will be unsigned long byte aligned.
In some environment, the IP header should be unsigned long byte aligned,
or the packet will be droped when we receive it. (eg: IPVS)
*/
DEVICE_PARAM(IP_byte_align,"Enable IP header dword aligned");
#define INT_WORKS_DEF 20
#define INT_WORKS_MIN 10
#define INT_WORKS_MAX 64
DEVICE_PARAM(int_works,"Number of packets per interrupt services");
#define CHANNEL_MIN 1
#define CHANNEL_MAX 14
#define CHANNEL_DEF 6
DEVICE_PARAM(Channel, "Channel number");
/* PreambleType[] is the preamble length used for transmit.
0: indicate allows long preamble type
1: indicate allows short preamble type
*/
#define PREAMBLE_TYPE_DEF 1
DEVICE_PARAM(PreambleType, "Preamble Type");
#define RTS_THRESH_MIN 512
#define RTS_THRESH_MAX 2347
#define RTS_THRESH_DEF 2347
DEVICE_PARAM(RTSThreshold, "RTS threshold");
#define FRAG_THRESH_MIN 256
#define FRAG_THRESH_MAX 2346
#define FRAG_THRESH_DEF 2346
DEVICE_PARAM(FragThreshold, "Fragmentation threshold");
#define DATA_RATE_MIN 0
#define DATA_RATE_MAX 13
#define DATA_RATE_DEF 13
/* datarate[] index
0: indicate 1 Mbps 0x02
1: indicate 2 Mbps 0x04
2: indicate 5.5 Mbps 0x0B
3: indicate 11 Mbps 0x16
4: indicate 6 Mbps 0x0c
5: indicate 9 Mbps 0x12
6: indicate 12 Mbps 0x18
7: indicate 18 Mbps 0x24
8: indicate 24 Mbps 0x30
9: indicate 36 Mbps 0x48
10: indicate 48 Mbps 0x60
11: indicate 54 Mbps 0x6c
12: indicate 72 Mbps 0x90
13: indicate auto rate
*/
DEVICE_PARAM(ConnectionRate, "Connection data rate");
#define OP_MODE_DEF 0
DEVICE_PARAM(OPMode, "Infrastruct, adhoc, AP mode ");
/* OpMode[] is used for transmit.
0: indicate infrastruct mode used
1: indicate adhoc mode used
2: indicate AP mode used
*/
/* PSMode[]
0: indicate disable power saving mode
1: indicate enable power saving mode
*/
#define PS_MODE_DEF 0
DEVICE_PARAM(PSMode, "Power saving mode");
#define SHORT_RETRY_MIN 0
#define SHORT_RETRY_MAX 31
#define SHORT_RETRY_DEF 8
DEVICE_PARAM(ShortRetryLimit, "Short frame retry limits");
#define LONG_RETRY_MIN 0
#define LONG_RETRY_MAX 15
#define LONG_RETRY_DEF 4
DEVICE_PARAM(LongRetryLimit, "long frame retry limits");
/* BasebandType[] baseband type selected
0: indicate 802.11a type
1: indicate 802.11b type
2: indicate 802.11g type
*/
#define BBP_TYPE_MIN 0
#define BBP_TYPE_MAX 2
#define BBP_TYPE_DEF 2
DEVICE_PARAM(BasebandType, "baseband type");
/* 80211hEnable[]
0: indicate disable 802.11h
1: indicate enable 802.11h
*/
#define X80211h_MODE_DEF 0
DEVICE_PARAM(b80211hEnable, "802.11h mode");
/* 80211hEnable[]
0: indicate disable 802.11h
1: indicate enable 802.11h
*/
#define DIVERSITY_ANT_DEF 0
DEVICE_PARAM(bDiversityANTEnable, "ANT diversity mode");
//
// Static vars definitions
//
static int device_nics =0;
static PSDevice pDevice_Infos =NULL;
static struct net_device *root_device_dev = NULL;
static CHIP_INFO chip_info_table[]= {
{ VT3253, "VIA Networking Solomon-A/B/G Wireless LAN Adapter ",
256, 1, DEVICE_FLAGS_IP_ALIGN|DEVICE_FLAGS_TX_ALIGN },
{0,NULL}
};
DEFINE_PCI_DEVICE_TABLE(vt6655_pci_id_table) = {
{ PCI_VDEVICE(VIA, 0x3253), (kernel_ulong_t)chip_info_table},
{ 0, }
};
/*--------------------- Static Functions --------------------------*/
static int vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent);
static bool vt6655_init_info(struct pci_dev* pcid, PSDevice* ppDevice, PCHIP_INFO);
static void device_free_info(PSDevice pDevice);
static bool device_get_pci_info(PSDevice, struct pci_dev* pcid);
static void device_print_info(PSDevice pDevice);
static struct net_device_stats *device_get_stats(struct net_device *dev);
static void device_init_diversity_timer(PSDevice pDevice);
static int device_open(struct net_device *dev);
static int device_xmit(struct sk_buff *skb, struct net_device *dev);
static irqreturn_t device_intr(int irq, void*dev_instance);
static void device_set_multi(struct net_device *dev);
static int device_close(struct net_device *dev);
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
#ifdef CONFIG_PM
static int device_notify_reboot(struct notifier_block *, unsigned long event, void *ptr);
static int viawget_suspend(struct pci_dev *pcid, pm_message_t state);
static int viawget_resume(struct pci_dev *pcid);
struct notifier_block device_notifier = {
.notifier_call = device_notify_reboot,
.next = NULL,
.priority = 0,
};
#endif
static void device_init_rd0_ring(PSDevice pDevice);
static void device_init_rd1_ring(PSDevice pDevice);
static void device_init_defrag_cb(PSDevice pDevice);
static void device_init_td0_ring(PSDevice pDevice);
static void device_init_td1_ring(PSDevice pDevice);
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev);
//2008-0714<Add>by Mike Liu
static bool device_release_WPADEV(PSDevice pDevice);
static int ethtool_ioctl(struct net_device *dev, void *useraddr);
static int device_rx_srv(PSDevice pDevice, unsigned int uIdx);
static int device_tx_srv(PSDevice pDevice, unsigned int uIdx);
static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pDesc);
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType);
static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc);
static void device_free_td0_ring(PSDevice pDevice);
static void device_free_td1_ring(PSDevice pDevice);
static void device_free_rd0_ring(PSDevice pDevice);
static void device_free_rd1_ring(PSDevice pDevice);
static void device_free_rings(PSDevice pDevice);
static void device_free_frag_buf(PSDevice pDevice);
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest, unsigned char *source);
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
static char* get_chip_name(int chip_id) {
int i;
for (i=0;chip_info_table[i].name!=NULL;i++)
if (chip_info_table[i].chip_id==chip_id)
break;
return chip_info_table[i].name;
}
static void __devexit vt6655_remove(struct pci_dev *pcid)
{
PSDevice pDevice=pci_get_drvdata(pcid);
if (pDevice==NULL)
return;
device_free_info(pDevice);
}
/*
static void
device_set_int_opt(int *opt, int val, int min, int max, int def,char* name,char* devname) {
if (val==-1)
*opt=def;
else if (val<min || val>max) {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: the value of parameter %s is invalid, the valid range is (%d-%d)\n" ,
devname,name, min,max);
*opt=def;
} else {
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: set value of parameter %s to %d\n",
devname, name, val);
*opt=val;
}
}
static void
device_set_bool_opt(unsigned int *opt, int val,bool def,u32 flag, char* name,char* devname) {
(*opt)&=(~flag);
if (val==-1)
*opt|=(def ? flag : 0);
else if (val<0 || val>1) {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE
"%s: the value of parameter %s is invalid, the valid range is (0-1)\n",devname,name);
*opt|=(def ? flag : 0);
} else {
DBG_PRT(MSG_LEVEL_INFO, KERN_NOTICE "%s: set parameter %s to %s\n",
devname,name , val ? "true" : "false");
*opt|=(val ? flag : 0);
}
}
*/
static void
device_get_options(PSDevice pDevice, int index, char* devname) {
POPTIONS pOpts = &(pDevice->sOpts);
pOpts->nRxDescs0=RX_DESC_DEF0;
pOpts->nRxDescs1=RX_DESC_DEF1;
pOpts->nTxDescs[0]=TX_DESC_DEF0;
pOpts->nTxDescs[1]=TX_DESC_DEF1;
pOpts->flags|=DEVICE_FLAGS_IP_ALIGN;
pOpts->int_works=INT_WORKS_DEF;
pOpts->rts_thresh=RTS_THRESH_DEF;
pOpts->frag_thresh=FRAG_THRESH_DEF;
pOpts->data_rate=DATA_RATE_DEF;
pOpts->channel_num=CHANNEL_DEF;
pOpts->flags|=DEVICE_FLAGS_PREAMBLE_TYPE;
pOpts->flags|=DEVICE_FLAGS_OP_MODE;
//pOpts->flags|=DEVICE_FLAGS_PS_MODE;
pOpts->short_retry=SHORT_RETRY_DEF;
pOpts->long_retry=LONG_RETRY_DEF;
pOpts->bbp_type=BBP_TYPE_DEF;
pOpts->flags|=DEVICE_FLAGS_80211h_MODE;
pOpts->flags|=DEVICE_FLAGS_DiversityANT;
}
static void
device_set_options(PSDevice pDevice) {
unsigned char abyBroadcastAddr[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
unsigned char abySNAP_RFC1042[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0x00};
unsigned char abySNAP_Bridgetunnel[ETH_ALEN] = {0xAA, 0xAA, 0x03, 0x00, 0x00, 0xF8};
memcpy(pDevice->abyBroadcastAddr, abyBroadcastAddr, ETH_ALEN);
memcpy(pDevice->abySNAP_RFC1042, abySNAP_RFC1042, ETH_ALEN);
memcpy(pDevice->abySNAP_Bridgetunnel, abySNAP_Bridgetunnel, ETH_ALEN);
pDevice->uChannel = pDevice->sOpts.channel_num;
pDevice->wRTSThreshold = pDevice->sOpts.rts_thresh;
pDevice->wFragmentationThreshold = pDevice->sOpts.frag_thresh;
pDevice->byShortRetryLimit = pDevice->sOpts.short_retry;
pDevice->byLongRetryLimit = pDevice->sOpts.long_retry;
pDevice->wMaxTransmitMSDULifetime = DEFAULT_MSDU_LIFETIME;
pDevice->byShortPreamble = (pDevice->sOpts.flags & DEVICE_FLAGS_PREAMBLE_TYPE) ? 1 : 0;
pDevice->byOpMode = (pDevice->sOpts.flags & DEVICE_FLAGS_OP_MODE) ? 1 : 0;
pDevice->ePSMode = (pDevice->sOpts.flags & DEVICE_FLAGS_PS_MODE) ? 1 : 0;
pDevice->b11hEnable = (pDevice->sOpts.flags & DEVICE_FLAGS_80211h_MODE) ? 1 : 0;
pDevice->bDiversityRegCtlON = (pDevice->sOpts.flags & DEVICE_FLAGS_DiversityANT) ? 1 : 0;
pDevice->uConnectionRate = pDevice->sOpts.data_rate;
if (pDevice->uConnectionRate < RATE_AUTO) pDevice->bFixRate = true;
pDevice->byBBType = pDevice->sOpts.bbp_type;
pDevice->byPacketType = pDevice->byBBType;
//PLICE_DEBUG->
pDevice->byAutoFBCtrl = AUTO_FB_0;
//pDevice->byAutoFBCtrl = AUTO_FB_1;
//PLICE_DEBUG<-
pDevice->bUpdateBBVGA = true;
pDevice->byFOETuning = 0;
pDevice->wCTSDuration = 0;
pDevice->byPreambleType = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uChannel= %d\n",(int)pDevice->uChannel);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byOpMode= %d\n",(int)pDevice->byOpMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" ePSMode= %d\n",(int)pDevice->ePSMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" wRTSThreshold= %d\n",(int)pDevice->wRTSThreshold);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortRetryLimit= %d\n",(int)pDevice->byShortRetryLimit);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byLongRetryLimit= %d\n",(int)pDevice->byLongRetryLimit);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byPreambleType= %d\n",(int)pDevice->byPreambleType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byShortPreamble= %d\n",(int)pDevice->byShortPreamble);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" uConnectionRate= %d\n",(int)pDevice->uConnectionRate);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" byBBType= %d\n",(int)pDevice->byBBType);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->b11hEnable= %d\n",(int)pDevice->b11hEnable);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pDevice->bDiversityRegCtlON= %d\n",(int)pDevice->bDiversityRegCtlON);
}
static void s_vCompleteCurrentMeasure (PSDevice pDevice, unsigned char byResult)
{
unsigned int ii;
unsigned long dwDuration = 0;
unsigned char byRPI0 = 0;
for(ii=1;ii<8;ii++) {
pDevice->dwRPIs[ii] *= 255;
dwDuration |= *((unsigned short *) (pDevice->pCurrMeasureEID->sReq.abyDuration));
dwDuration <<= 10;
pDevice->dwRPIs[ii] /= dwDuration;
pDevice->abyRPIs[ii] = (unsigned char) pDevice->dwRPIs[ii];
byRPI0 += pDevice->abyRPIs[ii];
}
pDevice->abyRPIs[0] = (0xFF - byRPI0);
if (pDevice->uNumOfMeasureEIDs == 0) {
VNTWIFIbMeasureReport( pDevice->pMgmt,
true,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
pDevice->byCCAFraction,
pDevice->abyRPIs
);
} else {
VNTWIFIbMeasureReport( pDevice->pMgmt,
false,
pDevice->pCurrMeasureEID,
byResult,
pDevice->byBasicMap,
pDevice->byCCAFraction,
pDevice->abyRPIs
);
CARDbStartMeasure (pDevice, pDevice->pCurrMeasureEID++, pDevice->uNumOfMeasureEIDs);
}
}
//
// Initialiation of MAC & BBP registers
//
static void device_init_registers(PSDevice pDevice, DEVICE_INIT_TYPE InitType)
{
unsigned int ii;
unsigned char byValue;
unsigned char byValue1;
unsigned char byCCKPwrdBm = 0;
unsigned char byOFDMPwrdBm = 0;
int zonetype=0;
PSMgmtObject pMgmt = &(pDevice->sMgmtObj);
MACbShutdown(pDevice->PortOffset);
BBvSoftwareReset(pDevice->PortOffset);
if ((InitType == DEVICE_INIT_COLD) ||
(InitType == DEVICE_INIT_DXPL)) {
// Do MACbSoftwareReset in MACvInitialize
MACbSoftwareReset(pDevice->PortOffset);
// force CCK
pDevice->bCCK = true;
pDevice->bAES = false;
pDevice->bProtectMode = false; //Only used in 11g type, sync with ERP IE
pDevice->bNonERPPresent = false;
pDevice->bBarkerPreambleMd = false;
pDevice->wCurrentRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_24M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byRevId = 0; //Target to IF pin while programming to RF chip.
// init MAC
MACvInitialize(pDevice->PortOffset);
// Get Local ID
VNSvInPortB(pDevice->PortOffset + MAC_REG_LOCALID, &(pDevice->byLocalID));
spin_lock_irq(&pDevice->lock);
SROMvReadAllContents(pDevice->PortOffset,pDevice->abyEEPROM);
spin_unlock_irq(&pDevice->lock);
// Get Channel range
pDevice->byMinChannel = 1;
pDevice->byMaxChannel = CB_MAX_CHANNEL;
// Get Antena
byValue = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
if (byValue & EEP_ANTINV)
pDevice->bTxRxAntInv = true;
else
pDevice->bTxRxAntInv = false;
#ifdef PLICE_DEBUG
//printk("init_register:TxRxAntInv is %d,byValue is %d\n",pDevice->bTxRxAntInv,byValue);
#endif
byValue &= (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
if (byValue == 0) // if not set default is All
byValue = (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN);
#ifdef PLICE_DEBUG
//printk("init_register:byValue is %d\n",byValue);
#endif
pDevice->ulDiversityNValue = 100*260;//100*SROMbyReadEmbedded(pDevice->PortOffset, 0x51);
pDevice->ulDiversityMValue = 100*16;//SROMbyReadEmbedded(pDevice->PortOffset, 0x52);
pDevice->byTMax = 1;//SROMbyReadEmbedded(pDevice->PortOffset, 0x53);
pDevice->byTMax2 = 4;//SROMbyReadEmbedded(pDevice->PortOffset, 0x54);
pDevice->ulSQ3TH = 0;//(unsigned long) SROMbyReadEmbedded(pDevice->PortOffset, 0x55);
pDevice->byTMax3 = 64;//SROMbyReadEmbedded(pDevice->PortOffset, 0x56);
if (byValue == (EEP_ANTENNA_AUX | EEP_ANTENNA_MAIN)) {
pDevice->byAntennaCount = 2;
pDevice->byTxAntennaMode = ANT_B;
pDevice->dwTxAntennaSel = 1;
pDevice->dwRxAntennaSel = 1;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
// chester for antenna
byValue1 = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ANTENNA);
// if (pDevice->bDiversityRegCtlON)
if((byValue1&0x08)==0)
pDevice->bDiversityEnable = false;//SROMbyReadEmbedded(pDevice->PortOffset, 0x50);
else
pDevice->bDiversityEnable = true;
#ifdef PLICE_DEBUG
//printk("aux |main antenna: RxAntennaMode is %d\n",pDevice->byRxAntennaMode);
#endif
} else {
pDevice->bDiversityEnable = false;
pDevice->byAntennaCount = 1;
pDevice->dwTxAntennaSel = 0;
pDevice->dwRxAntennaSel = 0;
if (byValue & EEP_ANTENNA_AUX) {
pDevice->byTxAntennaMode = ANT_A;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_B;
else
pDevice->byRxAntennaMode = ANT_A;
} else {
pDevice->byTxAntennaMode = ANT_B;
if (pDevice->bTxRxAntInv == true)
pDevice->byRxAntennaMode = ANT_A;
else
pDevice->byRxAntennaMode = ANT_B;
}
}
#ifdef PLICE_DEBUG
//printk("init registers: TxAntennaMode is %d\n",pDevice->byTxAntennaMode);
#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "bDiversityEnable=[%d],NValue=[%d],MValue=[%d],TMax=[%d],TMax2=[%d]\n",
pDevice->bDiversityEnable,(int)pDevice->ulDiversityNValue,(int)pDevice->ulDiversityMValue,pDevice->byTMax,pDevice->byTMax2);
//#ifdef ZoneType_DefaultSetting
//2008-8-4 <add> by chester
//zonetype initial
pDevice->byOriginalZonetype = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
zonetype = Config_FileOperation(pDevice,false,NULL);
if (zonetype >= 0) { //read zonetype file ok!
if ((zonetype == 0)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] !=0x00)){ //for USA
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0B;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :USA\n");
}
else if((zonetype == 1)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x01)){ //for Japan
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x01;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
}
else if((zonetype == 2)&&
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE]!=0x02)){ //for Europe
pDevice->abyEEPROM[EEP_OFS_ZONETYPE] = 0x02;
pDevice->abyEEPROM[EEP_OFS_MAXCHANNEL] = 0x0D;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Init Zone Type :Europe\n");
}
else
{
if(zonetype!=pDevice->abyEEPROM[EEP_OFS_ZONETYPE])
printk("zonetype in file[%02x] mismatch with in EEPROM[%02x]\n",zonetype,pDevice->abyEEPROM[EEP_OFS_ZONETYPE]);
else
printk("Read Zonetype file success,use default zonetype setting[%02x]\n",zonetype);
}
}
else
printk("Read Zonetype file fail,use default zonetype setting[%02x]\n",SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_ZONETYPE));
// Get RFType
pDevice->byRFType = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RFTYPE);
if ((pDevice->byRFType & RF_EMU) != 0) {
// force change RevID for VT3253 emu
pDevice->byRevId = 0x80;
}
pDevice->byRFType &= RF_MASK;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRFType = %x\n", pDevice->byRFType);
if (pDevice->bZoneRegExist == false) {
pDevice->byZoneType = pDevice->abyEEPROM[EEP_OFS_ZONETYPE];
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byZoneType = %x\n", pDevice->byZoneType);
//Init RF module
RFbInit(pDevice);
//Get Desire Power Value
pDevice->byCurPwr = 0xFF;
pDevice->byCCKPwr = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_CCK);
pDevice->byOFDMPwrG = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_PWR_OFDMG);
//byCCKPwrdBm = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_CCK_PWR_dBm);
//byOFDMPwrdBm = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_OFDM_PWR_dBm);
//printk("CCKPwrdBm is 0x%x,byOFDMPwrdBm is 0x%x\n",byCCKPwrdBm,byOFDMPwrdBm);
// Load power Table
for (ii=0;ii<CB_MAX_CHANNEL_24G;ii++) {
pDevice->abyCCKPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_CCK_PWR_TBL));
if (pDevice->abyCCKPwrTbl[ii+1] == 0) {
pDevice->abyCCKPwrTbl[ii+1] = pDevice->byCCKPwr;
}
pDevice->abyOFDMPwrTbl[ii+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDM_PWR_TBL));
if (pDevice->abyOFDMPwrTbl[ii+1] == 0) {
pDevice->abyOFDMPwrTbl[ii+1] = pDevice->byOFDMPwrG;
}
pDevice->abyCCKDefaultPwr[ii+1] = byCCKPwrdBm;
pDevice->abyOFDMDefaultPwr[ii+1] = byOFDMPwrdBm;
}
//2008-8-4 <add> by chester
//recover 12,13 ,14channel for EUROPE by 11 channel
if(((pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Japan) ||
(pDevice->abyEEPROM[EEP_OFS_ZONETYPE] == ZoneType_Europe))&&
(pDevice->byOriginalZonetype == ZoneType_USA)) {
for(ii=11;ii<14;ii++) {
pDevice->abyCCKPwrTbl[ii] = pDevice->abyCCKPwrTbl[10];
pDevice->abyOFDMPwrTbl[ii] = pDevice->abyOFDMPwrTbl[10];
}
}
// Load OFDM A Power Table
for (ii=0;ii<CB_MAX_CHANNEL_5G;ii++) { //RobertYu:20041224, bug using CB_MAX_CHANNEL
pDevice->abyOFDMPwrTbl[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_TBL));
pDevice->abyOFDMDefaultPwr[ii+CB_MAX_CHANNEL_24G+1] = SROMbyReadEmbedded(pDevice->PortOffset, (unsigned char)(ii + EEP_OFS_OFDMA_PWR_dBm));
}
init_channel_table((void *)pDevice);
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortB(pDevice->PortOffset + MAC_REG_MSRCTL + 1, (MSRCTL1_TXPWR | MSRCTL1_CSAPAREN));
MACvSelectPage0(pDevice->PortOffset);
}
// use relative tx timeout and 802.11i D4
MACvWordRegBitsOn(pDevice->PortOffset, MAC_REG_CFG, (CFG_TKIPOPT | CFG_NOTXTIMEOUT));
// set performance parameter by registry
MACvSetShortRetryLimit(pDevice->PortOffset, pDevice->byShortRetryLimit);
MACvSetLongRetryLimit(pDevice->PortOffset, pDevice->byLongRetryLimit);
// reset TSF counter
VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTRST);
// enable TSF counter
VNSvOutPortB(pDevice->PortOffset + MAC_REG_TFTCTL, TFTCTL_TSFCNTREN);
// initialize BBP registers
BBbVT3253Init(pDevice);
if (pDevice->bUpdateBBVGA) {
pDevice->byBBVGACurrent = pDevice->abyBBVGA[0];
pDevice->byBBVGANew = pDevice->byBBVGACurrent;
BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]);
}
#ifdef PLICE_DEBUG
//printk("init registers:RxAntennaMode is %x,TxAntennaMode is %x\n",pDevice->byRxAntennaMode,pDevice->byTxAntennaMode);
#endif
BBvSetRxAntennaMode(pDevice->PortOffset, pDevice->byRxAntennaMode);
BBvSetTxAntennaMode(pDevice->PortOffset, pDevice->byTxAntennaMode);
pDevice->byCurrentCh = 0;
//pDevice->NetworkType = Ndis802_11Automode;
// Set BB and packet type at the same time.
// Set Short Slot Time, xIFS, and RSPINF.
if (pDevice->uConnectionRate == RATE_AUTO) {
pDevice->wCurrentRate = RATE_54M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
// default G Mode
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_11G);
VNTWIFIbConfigPhyMode(pDevice->pMgmt, PHY_TYPE_AUTO);
pDevice->bRadioOff = false;
pDevice->byRadioCtl = SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RADIOCTL);
pDevice->bHWRadioOff = false;
if (pDevice->byRadioCtl & EEP_RADIOCTL_ENABLE) {
// Get GPIO
MACvGPIOIn(pDevice->PortOffset, &pDevice->byGPIO);
//2008-4-14 <add> by chester for led issue
#ifdef FOR_LED_ON_NOTEBOOK
if (pDevice->byGPIO & GPIO0_DATA){pDevice->bHWRadioOff = true;}
if ( !(pDevice->byGPIO & GPIO0_DATA)){pDevice->bHWRadioOff = false;}
}
if ( (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
}
else CARDbRadioPowerOn(pDevice);
#else
if (((pDevice->byGPIO & GPIO0_DATA) && !(pDevice->byRadioCtl & EEP_RADIOCTL_INV)) ||
( !(pDevice->byGPIO & GPIO0_DATA) && (pDevice->byRadioCtl & EEP_RADIOCTL_INV))) {
pDevice->bHWRadioOff = true;
}
}
if ((pDevice->bHWRadioOff == true) || (pDevice->bRadioControlOff == true)) {
CARDbRadioPowerOff(pDevice);
}
#endif
}
pMgmt->eScanType = WMAC_SCAN_PASSIVE;
// get Permanent network address
SROMvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Network address = %pM\n",
pDevice->abyCurrentNetAddr);
// reset Tx pointer
CARDvSafeResetRx(pDevice);
// reset Rx pointer
CARDvSafeResetTx(pDevice);
if (pDevice->byLocalID <= REV_ID_VT3253_A1) {
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_RCR, RCR_WPAERR);
}
pDevice->eEncryptionStatus = Ndis802_11EncryptionDisabled;
// Turn On Rx DMA
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
// start the adapter
MACvStart(pDevice->PortOffset);
netif_stop_queue(pDevice->dev);
}
static void device_init_diversity_timer(PSDevice pDevice) {
init_timer(&pDevice->TimerSQ3Tmax1);
pDevice->TimerSQ3Tmax1.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax1.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax1.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax2);
pDevice->TimerSQ3Tmax2.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax2.function = (TimerFunction)TimerSQ3CallBack;
pDevice->TimerSQ3Tmax2.expires = RUN_AT(HZ);
init_timer(&pDevice->TimerSQ3Tmax3);
pDevice->TimerSQ3Tmax3.data = (unsigned long) pDevice;
pDevice->TimerSQ3Tmax3.function = (TimerFunction)TimerState1CallBack;
pDevice->TimerSQ3Tmax3.expires = RUN_AT(HZ);
return;
}
static bool device_release_WPADEV(PSDevice pDevice)
{
viawget_wpa_header *wpahdr;
int ii=0;
// wait_queue_head_t Set_wait;
//send device close to wpa_supplicnat layer
if (pDevice->bWPADEVUp==true) {
wpahdr = (viawget_wpa_header *)pDevice->skb->data;
wpahdr->type = VIAWGET_DEVICECLOSE_MSG;
wpahdr->resp_ie_len = 0;
wpahdr->req_ie_len = 0;
skb_put(pDevice->skb, sizeof(viawget_wpa_header));
pDevice->skb->dev = pDevice->wpadev;
skb_reset_mac_header(pDevice->skb);
pDevice->skb->pkt_type = PACKET_HOST;
pDevice->skb->protocol = htons(ETH_P_802_2);
memset(pDevice->skb->cb, 0, sizeof(pDevice->skb->cb));
netif_rx(pDevice->skb);
pDevice->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
//wait release WPADEV
// init_waitqueue_head(&Set_wait);
// wait_event_timeout(Set_wait, ((pDevice->wpadev==NULL)&&(pDevice->skb == NULL)),5*HZ); //1s wait
while((pDevice->bWPADEVUp==true)) {
set_current_state(TASK_UNINTERRUPTIBLE);
schedule_timeout (HZ/20); //wait 50ms
ii++;
if(ii>20)
break;
}
}
return true;
}
static const struct net_device_ops device_netdev_ops = {
.ndo_open = device_open,
.ndo_stop = device_close,
.ndo_do_ioctl = device_ioctl,
.ndo_get_stats = device_get_stats,
.ndo_start_xmit = device_xmit,
.ndo_set_rx_mode = device_set_multi,
};
static int __devinit
vt6655_probe(struct pci_dev *pcid, const struct pci_device_id *ent)
{
static bool bFirst = true;
struct net_device* dev = NULL;
PCHIP_INFO pChip_info = (PCHIP_INFO)ent->driver_data;
PSDevice pDevice;
int rc;
if (device_nics ++>= MAX_UINTS) {
printk(KERN_NOTICE DEVICE_NAME ": already found %d NICs\n", device_nics);
return -ENODEV;
}
dev = alloc_etherdev(sizeof(DEVICE_INFO));
pDevice = (PSDevice) netdev_priv(dev);
if (dev == NULL) {
printk(KERN_ERR DEVICE_NAME ": allocate net device failed \n");
return -ENODEV;
}
// Chain it all together
// SET_MODULE_OWNER(dev);
SET_NETDEV_DEV(dev, &pcid->dev);
if (bFirst) {
printk(KERN_NOTICE "%s Ver. %s\n",DEVICE_FULL_DRV_NAM, DEVICE_VERSION);
printk(KERN_NOTICE "Copyright (c) 2003 VIA Networking Technologies, Inc.\n");
bFirst=false;
}
if (!vt6655_init_info(pcid, &pDevice, pChip_info)) {
return -ENOMEM;
}
pDevice->dev = dev;
pDevice->next_module = root_device_dev;
root_device_dev = dev;
if (pci_enable_device(pcid)) {
device_free_info(pDevice);
return -ENODEV;
}
dev->irq = pcid->irq;
#ifdef DEBUG
printk("Before get pci_info memaddr is %x\n",pDevice->memaddr);
#endif
if (device_get_pci_info(pDevice,pcid) == false) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device.\n");
device_free_info(pDevice);
return -ENODEV;
}
#if 1
#ifdef DEBUG
//pci_read_config_byte(pcid, PCI_BASE_ADDRESS_0, &pDevice->byRevId);
printk("after get pci_info memaddr is %x, io addr is %x,io_size is %d\n",pDevice->memaddr,pDevice->ioaddr,pDevice->io_size);
{
int i;
u32 bar,len;
u32 address[] = {
PCI_BASE_ADDRESS_0,
PCI_BASE_ADDRESS_1,
PCI_BASE_ADDRESS_2,
PCI_BASE_ADDRESS_3,
PCI_BASE_ADDRESS_4,
PCI_BASE_ADDRESS_5,
0};
for (i=0;address[i];i++)
{
//pci_write_config_dword(pcid,address[i], 0xFFFFFFFF);
pci_read_config_dword(pcid, address[i], &bar);
printk("bar %d is %x\n",i,bar);
if (!bar)
{
printk("bar %d not implemented\n",i);
continue;
}
if (bar & PCI_BASE_ADDRESS_SPACE_IO) {
/* This is IO */
len = bar & (PCI_BASE_ADDRESS_IO_MASK & 0xFFFF);
len = len & ~(len - 1);
printk("IO space: len in IO %x, BAR %d\n", len, i);
}
else
{
len = bar & 0xFFFFFFF0;
len = ~len + 1;
printk("len in MEM %x, BAR %d\n", len, i);
}
}
}
#endif
#endif
#ifdef DEBUG
//return 0 ;
#endif
pDevice->PortOffset = (unsigned long)ioremap(pDevice->memaddr & PCI_BASE_ADDRESS_MEM_MASK, pDevice->io_size);
//pDevice->PortOffset = (unsigned long)ioremap(pDevice->ioaddr & PCI_BASE_ADDRESS_IO_MASK, pDevice->io_size);
if(pDevice->PortOffset == 0) {
printk(KERN_ERR DEVICE_NAME ": Failed to IO remapping ..\n");
device_free_info(pDevice);
return -ENODEV;
}
rc = pci_request_regions(pcid, DEVICE_NAME);
if (rc) {
printk(KERN_ERR DEVICE_NAME ": Failed to find PCI device\n");
device_free_info(pDevice);
return -ENODEV;
}
dev->base_addr = pDevice->ioaddr;
#ifdef PLICE_DEBUG
unsigned char value;
VNSvInPortB(pDevice->PortOffset+0x4F, &value);
printk("Before write: value is %x\n",value);
//VNSvInPortB(pDevice->PortOffset+0x3F, 0x00);
VNSvOutPortB(pDevice->PortOffset,value);
VNSvInPortB(pDevice->PortOffset+0x4F, &value);
printk("After write: value is %x\n",value);
#endif
#ifdef IO_MAP
pDevice->PortOffset = pDevice->ioaddr;
#endif
// do reset
if (!MACbSoftwareReset(pDevice->PortOffset)) {
printk(KERN_ERR DEVICE_NAME ": Failed to access MAC hardware..\n");
device_free_info(pDevice);
return -ENODEV;
}
// initial to reload eeprom
MACvInitialize(pDevice->PortOffset);
MACvReadEtherAddress(pDevice->PortOffset, dev->dev_addr);
device_get_options(pDevice, device_nics-1, dev->name);
device_set_options(pDevice);
//Mask out the options cannot be set to the chip
pDevice->sOpts.flags &= pChip_info->flags;
//Enable the chip specified capbilities
pDevice->flags = pDevice->sOpts.flags | (pChip_info->flags & 0xFF000000UL);
pDevice->tx_80211 = device_dma0_tx_80211;
pDevice->sMgmtObj.pAdapter = (void *)pDevice;
pDevice->pMgmt = &(pDevice->sMgmtObj);
dev->irq = pcid->irq;
dev->netdev_ops = &device_netdev_ops;
dev->wireless_handlers = (struct iw_handler_def *)&iwctl_handler_def;
rc = register_netdev(dev);
if (rc)
{
printk(KERN_ERR DEVICE_NAME " Failed to register netdev\n");
device_free_info(pDevice);
return -ENODEV;
}
//2008-07-21-01<Add>by MikeLiu
//register wpadev
#if 0
if(wpa_set_wpadev(pDevice, 1)!=0) {
printk("Fail to Register WPADEV?\n");
unregister_netdev(pDevice->dev);
free_netdev(dev);
}
#endif
device_print_info(pDevice);
pci_set_drvdata(pcid, pDevice);
return 0;
}
static void device_print_info(PSDevice pDevice)
{
struct net_device* dev=pDevice->dev;
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: %s\n",dev->name, get_chip_name(pDevice->chip_id));
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO "%s: MAC=%pM", dev->name, dev->dev_addr);
#ifdef IO_MAP
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx ",(unsigned long) pDevice->ioaddr);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IRQ=%d \n", pDevice->dev->irq);
#else
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IO=0x%lx Mem=0x%lx ",
(unsigned long) pDevice->ioaddr,(unsigned long) pDevice->PortOffset);
DBG_PRT(MSG_LEVEL_INFO, KERN_INFO" IRQ=%d \n", pDevice->dev->irq);
#endif
}
static bool __devinit vt6655_init_info(struct pci_dev* pcid, PSDevice* ppDevice,
PCHIP_INFO pChip_info) {
PSDevice p;
memset(*ppDevice,0,sizeof(DEVICE_INFO));
if (pDevice_Infos == NULL) {
pDevice_Infos =*ppDevice;
}
else {
for (p=pDevice_Infos;p->next!=NULL;p=p->next)
do {} while (0);
p->next = *ppDevice;
(*ppDevice)->prev = p;
}
(*ppDevice)->pcid = pcid;
(*ppDevice)->chip_id = pChip_info->chip_id;
(*ppDevice)->io_size = pChip_info->io_size;
(*ppDevice)->nTxQueues = pChip_info->nTxQueue;
(*ppDevice)->multicast_limit =32;
spin_lock_init(&((*ppDevice)->lock));
return true;
}
static bool device_get_pci_info(PSDevice pDevice, struct pci_dev* pcid) {
u16 pci_cmd;
u8 b;
unsigned int cis_addr;
#ifdef PLICE_DEBUG
unsigned char pci_config[256];
unsigned char value =0x00;
int ii,j;
u16 max_lat=0x0000;
memset(pci_config,0x00,256);
#endif
pci_read_config_byte(pcid, PCI_REVISION_ID, &pDevice->byRevId);
pci_read_config_word(pcid, PCI_SUBSYSTEM_ID,&pDevice->SubSystemID);
pci_read_config_word(pcid, PCI_SUBSYSTEM_VENDOR_ID, &pDevice->SubVendorID);
pci_read_config_word(pcid, PCI_COMMAND, (u16 *) & (pci_cmd));
pci_set_master(pcid);
pDevice->memaddr = pci_resource_start(pcid,0);
pDevice->ioaddr = pci_resource_start(pcid,1);
#ifdef DEBUG
// pDevice->ioaddr = pci_resource_start(pcid, 0);
// pDevice->memaddr = pci_resource_start(pcid,1);
#endif
cis_addr = pci_resource_start(pcid,2);
pDevice->pcid = pcid;
pci_read_config_byte(pcid, PCI_COMMAND, &b);
pci_write_config_byte(pcid, PCI_COMMAND, (b|PCI_COMMAND_MASTER));
#ifdef PLICE_DEBUG
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
//printk("max lat is %x,SubSystemID is %x\n",max_lat,pDevice->SubSystemID);
//for (ii=0;ii<0xFF;ii++)
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
//max_lat = 0x20;
//pci_write_config_word(pcid,PCI_MAX_LAT,max_lat);
//pci_read_config_word(pcid,PCI_MAX_LAT,&max_lat);
//printk("max lat is %x\n",max_lat);
for (ii=0;ii<0xFF;ii++)
{
pci_read_config_byte(pcid,ii,&value);
pci_config[ii] = value;
}
for (ii=0,j=1;ii<0x100;ii++,j++)
{
if (j %16 == 0)
{
printk("%x:",pci_config[ii]);
printk("\n");
}
else
{
printk("%x:",pci_config[ii]);
}
}
#endif
return true;
}
static void device_free_info(PSDevice pDevice) {
PSDevice ptr;
struct net_device* dev=pDevice->dev;
ASSERT(pDevice);
//2008-0714-01<Add>by chester
device_release_WPADEV(pDevice);
//2008-07-21-01<Add>by MikeLiu
//unregister wpadev
if(wpa_set_wpadev(pDevice, 0)!=0)
printk("unregister wpadev fail?\n");
if (pDevice_Infos==NULL)
return;
for (ptr=pDevice_Infos;ptr && (ptr!=pDevice);ptr=ptr->next)
do {} while (0);
if (ptr==pDevice) {
if (ptr==pDevice_Infos)
pDevice_Infos=ptr->next;
else
ptr->prev->next=ptr->next;
}
else {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR "info struct not found\n");
return;
}
#ifdef HOSTAP
if (dev)
vt6655_hostap_set_hostapd(pDevice, 0, 0);
#endif
if (dev)
unregister_netdev(dev);
if (pDevice->PortOffset)
iounmap((void *)pDevice->PortOffset);
if (pDevice->pcid)
pci_release_regions(pDevice->pcid);
if (dev)
free_netdev(dev);
if (pDevice->pcid) {
pci_set_drvdata(pDevice->pcid,NULL);
}
}
static bool device_init_rings(PSDevice pDevice) {
void* vir_pool;
/*allocate all RD/TD rings a single pool*/
vir_pool = pci_alloc_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
&pDevice->pool_dma);
if (vir_pool == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s : allocate desc dma memory failed\n", pDevice->dev->name);
return false;
}
memset(vir_pool, 0,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
);
pDevice->aRD0Ring = vir_pool;
pDevice->aRD1Ring = vir_pool +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
pDevice->rd0_pool_dma = pDevice->pool_dma;
pDevice->rd1_pool_dma = pDevice->rd0_pool_dma +
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc);
pDevice->tx0_bufs = pci_alloc_consistent(pDevice->pcid,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
&pDevice->tx_bufs_dma0);
if (pDevice->tx0_bufs == NULL) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: allocate buf dma memory failed\n", pDevice->dev->name);
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc),
vir_pool, pDevice->pool_dma
);
return false;
}
memset(pDevice->tx0_bufs, 0,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE
);
pDevice->td0_pool_dma = pDevice->rd1_pool_dma +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
pDevice->td1_pool_dma = pDevice->td0_pool_dma +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
// vir_pool: pvoid type
pDevice->apTD0Rings = vir_pool
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc);
pDevice->apTD1Rings = vir_pool
+ pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc)
+ pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc)
+ pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc);
pDevice->tx1_bufs = pDevice->tx0_bufs +
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
pDevice->tx_beacon_bufs = pDevice->tx1_bufs +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
pDevice->pbyTmpBuff = pDevice->tx_beacon_bufs +
CB_BEACON_BUF_SIZE;
pDevice->tx_bufs_dma1 = pDevice->tx_bufs_dma0 +
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ;
pDevice->tx_beacon_dma = pDevice->tx_bufs_dma1 +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ;
return true;
}
static void device_free_rings(PSDevice pDevice) {
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nRxDescs0 * sizeof(SRxDesc) +
pDevice->sOpts.nRxDescs1 * sizeof(SRxDesc) +
pDevice->sOpts.nTxDescs[0] * sizeof(STxDesc) +
pDevice->sOpts.nTxDescs[1] * sizeof(STxDesc)
,
pDevice->aRD0Ring, pDevice->pool_dma
);
if (pDevice->tx0_bufs)
pci_free_consistent(pDevice->pcid,
pDevice->sOpts.nTxDescs[0] * PKT_BUF_SZ +
pDevice->sOpts.nTxDescs[1] * PKT_BUF_SZ +
CB_BEACON_BUF_SIZE +
CB_MAX_BUF_SIZE,
pDevice->tx0_bufs, pDevice->tx_bufs_dma0
);
}
static void device_init_rd0_ring(PSDevice pDevice) {
int i;
dma_addr_t curr = pDevice->rd0_pool_dma;
PSRxDesc pDesc;
/* Init the RD0 ring entries */
for (i = 0; i < pDevice->sOpts.nRxDescs0; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD0Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
ASSERT(pDesc->pRDInfo);
if (!device_alloc_rx_buf(pDevice, pDesc)) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc rx bufs\n",
pDevice->dev->name);
}
pDesc->next = &(pDevice->aRD0Ring[(i+1) % pDevice->sOpts.nRxDescs0]);
pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
if (i > 0)
pDevice->aRD0Ring[i-1].next_desc = cpu_to_le32(pDevice->rd0_pool_dma);
pDevice->pCurrRD[0] = &(pDevice->aRD0Ring[0]);
}
static void device_init_rd1_ring(PSDevice pDevice) {
int i;
dma_addr_t curr = pDevice->rd1_pool_dma;
PSRxDesc pDesc;
/* Init the RD1 ring entries */
for (i = 0; i < pDevice->sOpts.nRxDescs1; i ++, curr += sizeof(SRxDesc)) {
pDesc = &(pDevice->aRD1Ring[i]);
pDesc->pRDInfo = alloc_rd_info();
ASSERT(pDesc->pRDInfo);
if (!device_alloc_rx_buf(pDevice, pDesc)) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc rx bufs\n",
pDevice->dev->name);
}
pDesc->next = &(pDevice->aRD1Ring[(i+1) % pDevice->sOpts.nRxDescs1]);
pDesc->pRDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr + sizeof(SRxDesc));
}
if (i > 0)
pDevice->aRD1Ring[i-1].next_desc = cpu_to_le32(pDevice->rd1_pool_dma);
pDevice->pCurrRD[1] = &(pDevice->aRD1Ring[0]);
}
static void device_init_defrag_cb(PSDevice pDevice) {
int i;
PSDeFragControlBlock pDeF;
/* Init the fragment ctl entries */
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (!device_alloc_frag_buf(pDevice, pDeF)) {
DBG_PRT(MSG_LEVEL_ERR,KERN_ERR "%s: can not alloc frag bufs\n",
pDevice->dev->name);
}
}
pDevice->cbDFCB = CB_MAX_RX_FRAG;
pDevice->cbFreeDFCB = pDevice->cbDFCB;
}
static void device_free_rd0_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nRxDescs0; i++) {
PSRxDesc pDesc =&(pDevice->aRD0Ring[i]);
PDEVICE_RD_INFO pRDInfo =pDesc->pRDInfo;
pci_unmap_single(pDevice->pcid,pRDInfo->skb_dma,
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(pRDInfo->skb);
kfree((void *)pDesc->pRDInfo);
}
}
static void device_free_rd1_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nRxDescs1; i++) {
PSRxDesc pDesc=&(pDevice->aRD1Ring[i]);
PDEVICE_RD_INFO pRDInfo=pDesc->pRDInfo;
pci_unmap_single(pDevice->pcid,pRDInfo->skb_dma,
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
dev_kfree_skb(pRDInfo->skb);
kfree((void *)pDesc->pRDInfo);
}
}
static void device_free_frag_buf(PSDevice pDevice) {
PSDeFragControlBlock pDeF;
int i;
for (i = 0; i < CB_MAX_RX_FRAG; i++) {
pDeF = &(pDevice->sRxDFCB[i]);
if (pDeF->skb)
dev_kfree_skb(pDeF->skb);
}
}
static void device_init_td0_ring(PSDevice pDevice) {
int i;
dma_addr_t curr;
PSTxDesc pDesc;
curr = pDevice->td0_pool_dma;
for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++, curr += sizeof(STxDesc)) {
pDesc = &(pDevice->apTD0Rings[i]);
pDesc->pTDInfo = alloc_td_info();
ASSERT(pDesc->pTDInfo);
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
pDesc->pTDInfo->buf = pDevice->tx0_bufs + (i)*PKT_BUF_SZ;
pDesc->pTDInfo->buf_dma = pDevice->tx_bufs_dma0 + (i)*PKT_BUF_SZ;
}
pDesc->next =&(pDevice->apTD0Rings[(i+1) % pDevice->sOpts.nTxDescs[0]]);
pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
}
if (i > 0)
pDevice->apTD0Rings[i-1].next_desc = cpu_to_le32(pDevice->td0_pool_dma);
pDevice->apTailTD[0] = pDevice->apCurrTD[0] =&(pDevice->apTD0Rings[0]);
}
static void device_init_td1_ring(PSDevice pDevice) {
int i;
dma_addr_t curr;
PSTxDesc pDesc;
/* Init the TD ring entries */
curr=pDevice->td1_pool_dma;
for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++, curr+=sizeof(STxDesc)) {
pDesc=&(pDevice->apTD1Rings[i]);
pDesc->pTDInfo = alloc_td_info();
ASSERT(pDesc->pTDInfo);
if (pDevice->flags & DEVICE_FLAGS_TX_ALIGN) {
pDesc->pTDInfo->buf=pDevice->tx1_bufs+(i)*PKT_BUF_SZ;
pDesc->pTDInfo->buf_dma=pDevice->tx_bufs_dma1+(i)*PKT_BUF_SZ;
}
pDesc->next=&(pDevice->apTD1Rings[(i+1) % pDevice->sOpts.nTxDescs[1]]);
pDesc->pTDInfo->curr_desc = cpu_to_le32(curr);
pDesc->next_desc = cpu_to_le32(curr+sizeof(STxDesc));
}
if (i > 0)
pDevice->apTD1Rings[i-1].next_desc = cpu_to_le32(pDevice->td1_pool_dma);
pDevice->apTailTD[1] = pDevice->apCurrTD[1] = &(pDevice->apTD1Rings[0]);
}
static void device_free_td0_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[0]; i++) {
PSTxDesc pDesc=&(pDevice->apTD0Rings[i]);
PDEVICE_TD_INFO pTDInfo=pDesc->pTDInfo;
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
pci_unmap_single(pDevice->pcid,pTDInfo->skb_dma,
pTDInfo->skb->len, PCI_DMA_TODEVICE);
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
kfree((void *)pDesc->pTDInfo);
}
}
static void device_free_td1_ring(PSDevice pDevice) {
int i;
for (i = 0; i < pDevice->sOpts.nTxDescs[1]; i++) {
PSTxDesc pDesc=&(pDevice->apTD1Rings[i]);
PDEVICE_TD_INFO pTDInfo=pDesc->pTDInfo;
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma))
pci_unmap_single(pDevice->pcid, pTDInfo->skb_dma,
pTDInfo->skb->len, PCI_DMA_TODEVICE);
if (pTDInfo->skb)
dev_kfree_skb(pTDInfo->skb);
kfree((void *)pDesc->pTDInfo);
}
}
/*-----------------------------------------------------------------*/
static int device_rx_srv(PSDevice pDevice, unsigned int uIdx) {
PSRxDesc pRD;
int works = 0;
for (pRD = pDevice->pCurrRD[uIdx];
pRD->m_rd0RD0.f1Owner == OWNED_BY_HOST;
pRD = pRD->next) {
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->pCurrRD = %x, works = %d\n", pRD, works);
if (works++>15)
break;
if (device_receive_frame(pDevice, pRD)) {
if (!device_alloc_rx_buf(pDevice,pRD)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
"%s: can not allocate rx buf\n", pDevice->dev->name);
break;
}
}
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
pDevice->dev->last_rx = jiffies;
}
pDevice->pCurrRD[uIdx]=pRD;
return works;
}
static bool device_alloc_rx_buf(PSDevice pDevice, PSRxDesc pRD) {
PDEVICE_RD_INFO pRDInfo=pRD->pRDInfo;
pRDInfo->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
#ifdef PLICE_DEBUG
//printk("device_alloc_rx_buf:skb is %x\n",pRDInfo->skb);
#endif
if (pRDInfo->skb==NULL)
return false;
ASSERT(pRDInfo->skb);
pRDInfo->skb->dev = pDevice->dev;
pRDInfo->skb_dma = pci_map_single(pDevice->pcid, skb_tail_pointer(pRDInfo->skb),
pDevice->rx_buf_sz, PCI_DMA_FROMDEVICE);
*((unsigned int *) &(pRD->m_rd0RD0)) = 0; /* FIX cast */
pRD->m_rd0RD0.wResCount = cpu_to_le16(pDevice->rx_buf_sz);
pRD->m_rd0RD0.f1Owner = OWNED_BY_NIC;
pRD->m_rd1RD1.wReqCount = cpu_to_le16(pDevice->rx_buf_sz);
pRD->buff_addr = cpu_to_le32(pRDInfo->skb_dma);
return true;
}
bool device_alloc_frag_buf(PSDevice pDevice, PSDeFragControlBlock pDeF) {
pDeF->skb = dev_alloc_skb((int)pDevice->rx_buf_sz);
if (pDeF->skb == NULL)
return false;
ASSERT(pDeF->skb);
pDeF->skb->dev = pDevice->dev;
return true;
}
static int device_tx_srv(PSDevice pDevice, unsigned int uIdx) {
PSTxDesc pTD;
bool bFull=false;
int works = 0;
unsigned char byTsr0;
unsigned char byTsr1;
unsigned int uFrameSize, uFIFOHeaderSize;
PSTxBufHead pTxBufHead;
struct net_device_stats* pStats = &pDevice->stats;
struct sk_buff* skb;
unsigned int uNodeIndex;
PSMgmtObject pMgmt = pDevice->pMgmt;
for (pTD = pDevice->apTailTD[uIdx]; pDevice->iTDUsed[uIdx] >0; pTD = pTD->next) {
if (pTD->m_td0TD0.f1Owner == OWNED_BY_NIC)
break;
if (works++>15)
break;
byTsr0 = pTD->m_td0TD0.byTSR0;
byTsr1 = pTD->m_td0TD0.byTSR1;
//Only the status of first TD in the chain is correct
if (pTD->m_td1TD1.byTCR & TCR_STP) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
uFIFOHeaderSize = pTD->pTDInfo->dwHeaderLength;
uFrameSize = pTD->pTDInfo->dwReqCount - uFIFOHeaderSize;
pTxBufHead = (PSTxBufHead) (pTD->pTDInfo->buf);
// Update the statistics based on the Transmit status
// now, we DO'NT check TSR0_CDH
STAvUpdateTDStatCounter(&pDevice->scStatistic,
byTsr0, byTsr1,
(unsigned char *)(pTD->pTDInfo->buf + uFIFOHeaderSize),
uFrameSize, uIdx);
BSSvUpdateNodeTxCounter(pDevice,
byTsr0, byTsr1,
(unsigned char *)(pTD->pTDInfo->buf),
uFIFOHeaderSize
);
if ( !(byTsr1 & TSR1_TERR)) {
if (byTsr0 != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
}
if ((pTxBufHead->wFragCtl & FRAGCTL_ENDFRAG) != FRAGCTL_NONFRAG) {
pDevice->s802_11Counter.TransmittedFragmentCount ++;
}
pStats->tx_packets++;
pStats->tx_bytes += pTD->pTDInfo->skb->len;
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] dropped & tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
pStats->tx_errors++;
pStats->tx_dropped++;
}
}
if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
if (pDevice->bEnableHostapd) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "tx call back netif.. \n");
skb = pTD->pTDInfo->skb;
skb->dev = pDevice->apdev;
skb_reset_mac_header(skb);
skb->pkt_type = PACKET_OTHERHOST;
//skb->protocol = htons(ETH_P_802_2);
memset(skb->cb, 0, sizeof(skb->cb));
netif_rx(skb);
}
}
if (byTsr1 & TSR1_TERR) {
if ((pTD->pTDInfo->byFlags & TD_FLAGS_PRIV_SKB) != 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
(int)uIdx, byTsr1, byTsr0);
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Tx[%d] fail has error. tsr1[%02X] tsr0[%02X].\n",
// (int)uIdx, byTsr1, byTsr0);
if ((pMgmt->eCurrMode == WMAC_MODE_ESS_AP) &&
(pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)) {
unsigned short wAID;
unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
skb = pTD->pTDInfo->skb;
if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
// set tx map
wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
pTD->pTDInfo->byFlags &= ~(TD_FLAGS_NETIF_SKB);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "tx_srv:tx fail re-queue sta index= %d, QueCnt= %d\n"
,(int)uNodeIndex, pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt);
pStats->tx_errors--;
pStats->tx_dropped--;
}
}
}
}
device_free_tx_buf(pDevice,pTD);
pDevice->iTDUsed[uIdx]--;
}
}
if (uIdx == TYPE_AC0DMA) {
// RESERV_AC0DMA reserved for relay
if (AVAIL_TD(pDevice, uIdx) < RESERV_AC0DMA) {
bFull = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " AC0DMA is Full = %d\n", pDevice->iTDUsed[uIdx]);
}
if (netif_queue_stopped(pDevice->dev) && (bFull==false)){
netif_wake_queue(pDevice->dev);
}
}
pDevice->apTailTD[uIdx] = pTD;
return works;
}
static void device_error(PSDevice pDevice, unsigned short status) {
if (status & ISR_FETALERR) {
DBG_PRT(MSG_LEVEL_ERR, KERN_ERR
"%s: Hardware fatal error.\n",
pDevice->dev->name);
netif_stop_queue(pDevice->dev);
del_timer(&pDevice->sTimerCommand);
del_timer(&(pDevice->pMgmt->sTimerSecondCallback));
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
return;
}
}
static void device_free_tx_buf(PSDevice pDevice, PSTxDesc pDesc) {
PDEVICE_TD_INFO pTDInfo=pDesc->pTDInfo;
struct sk_buff* skb=pTDInfo->skb;
// pre-allocated buf_dma can't be unmapped.
if (pTDInfo->skb_dma && (pTDInfo->skb_dma != pTDInfo->buf_dma)) {
pci_unmap_single(pDevice->pcid,pTDInfo->skb_dma,skb->len,
PCI_DMA_TODEVICE);
}
if ((pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0)
dev_kfree_skb_irq(skb);
pTDInfo->skb_dma = 0;
pTDInfo->skb = 0;
pTDInfo->byFlags = 0;
}
//PLICE_DEBUG ->
void InitRxManagementQueue(PSDevice pDevice)
{
pDevice->rxManeQueue.packet_num = 0;
pDevice->rxManeQueue.head = pDevice->rxManeQueue.tail = 0;
}
//PLICE_DEBUG<-
//PLICE_DEBUG ->
int MlmeThread(
void * Context)
{
PSDevice pDevice = (PSDevice) Context;
PSRxMgmtPacket pRxMgmtPacket;
// int i ;
//complete(&pDevice->notify);
//printk("Enter MngWorkItem,Queue packet num is %d\n",pDevice->rxManeQueue.packet_num);
//printk("Enter MlmeThread,packet _num is %d\n",pDevice->rxManeQueue.packet_num);
//i = 0;
#if 1
while (1)
{
//printk("DDDD\n");
//down(&pDevice->mlme_semaphore);
// pRxMgmtPacket = DeQueue(pDevice);
#if 1
spin_lock_irq(&pDevice->lock);
while(pDevice->rxManeQueue.packet_num != 0)
{
pRxMgmtPacket = DeQueue(pDevice);
//pDevice;
//DequeueManageObject(pDevice->FirstRecvMngList, pDevice->LastRecvMngList);
vMgrRxManagePacket(pDevice, pDevice->pMgmt, pRxMgmtPacket);
//printk("packet_num is %d\n",pDevice->rxManeQueue.packet_num);
}
spin_unlock_irq(&pDevice->lock);
if (mlme_kill == 0)
break;
//udelay(200);
#endif
//printk("Before schedule thread jiffies is %x\n",jiffies);
schedule();
//printk("after schedule thread jiffies is %x\n",jiffies);
if (mlme_kill == 0)
break;
//printk("i is %d\n",i);
}
#endif
return 0;
}
static int device_open(struct net_device *dev) {
PSDevice pDevice=(PSDevice) netdev_priv(dev);
int i;
#ifdef WPA_SM_Transtatus
extern SWPAResult wpa_Result;
#endif
pDevice->rx_buf_sz = PKT_BUF_SZ;
if (!device_init_rings(pDevice)) {
return -ENOMEM;
}
//2008-5-13 <add> by chester
i=request_irq(pDevice->pcid->irq, &device_intr, IRQF_SHARED, dev->name, dev);
if (i)
return i;
//printk("DEBUG1\n");
#ifdef WPA_SM_Transtatus
memset(wpa_Result.ifname,0,sizeof(wpa_Result.ifname));
wpa_Result.proto = 0;
wpa_Result.key_mgmt = 0;
wpa_Result.eap_type = 0;
wpa_Result.authenticated = false;
pDevice->fWPA_Authened = false;
#endif
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device init rd0 ring\n");
device_init_rd0_ring(pDevice);
device_init_rd1_ring(pDevice);
device_init_defrag_cb(pDevice);
device_init_td0_ring(pDevice);
device_init_td1_ring(pDevice);
// VNTWIFIvSet11h(pDevice->pMgmt, pDevice->b11hEnable);
if (pDevice->bDiversityRegCtlON) {
device_init_diversity_timer(pDevice);
}
vMgrObjectInit(pDevice);
vMgrTimerInit(pDevice);
//PLICE_DEBUG->
#ifdef TASK_LET
tasklet_init (&pDevice->RxMngWorkItem,(void *)MngWorkItem,(unsigned long )pDevice);
#endif
#ifdef THREAD
InitRxManagementQueue(pDevice);
mlme_kill = 0;
mlme_task = kthread_run(MlmeThread,(void *) pDevice, "MLME");
if (IS_ERR(mlme_task)) {
printk("thread create fail\n");
return -1;
}
mlme_kill = 1;
#endif
#if 0
pDevice->MLMEThr_pid = kernel_thread(MlmeThread, pDevice, CLONE_VM);
if (pDevice->MLMEThr_pid <0 )
{
printk("unable start thread MlmeThread\n");
return -1;
}
#endif
//printk("thread id is %d\n",pDevice->MLMEThr_pid);
//printk("Create thread time is %x\n",jiffies);
//wait_for_completion(&pDevice->notify);
// if (( SROMbyReadEmbedded(pDevice->PortOffset, EEP_OFS_RADIOCTL)&0x06)==0x04)
// return -ENOMEM;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call device_init_registers\n");
device_init_registers(pDevice, DEVICE_INIT_COLD);
MACvReadEtherAddress(pDevice->PortOffset, pDevice->abyCurrentNetAddr);
memcpy(pDevice->pMgmt->abyMACAddr, pDevice->abyCurrentNetAddr, ETH_ALEN);
device_set_multi(pDevice->dev);
// Init for Key Management
KeyvInitTable(&pDevice->sKey, pDevice->PortOffset);
add_timer(&(pDevice->pMgmt->sTimerSecondCallback));
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
/*
pDevice->bwextstep0 = false;
pDevice->bwextstep1 = false;
pDevice->bwextstep2 = false;
pDevice->bwextstep3 = false;
*/
pDevice->bwextcount=0;
pDevice->bWPASuppWextEnabled = false;
#endif
pDevice->byReAssocCount = 0;
pDevice->bWPADEVUp = false;
// Patch: if WEP key already set by iwconfig but device not yet open
if ((pDevice->bEncryptionEnable == true) && (pDevice->bTransmitKey == true)) {
KeybSetDefaultKey(&(pDevice->sKey),
(unsigned long)(pDevice->byKeyIndex | (1 << 31)),
pDevice->uKeyLength,
NULL,
pDevice->abyKey,
KEY_CTL_WEP,
pDevice->PortOffset,
pDevice->byLocalID
);
pDevice->eEncryptionStatus = Ndis802_11Encryption1Enabled;
}
//printk("DEBUG2\n");
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "call MACvIntEnable\n");
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
if (pDevice->pMgmt->eConfigMode == WMAC_CONFIG_AP) {
bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
}
else {
bScheduleCommand((void *)pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *)pDevice, WLAN_CMD_SSID, NULL);
}
pDevice->flags |=DEVICE_FLAGS_OPENED;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_open success.. \n");
return 0;
}
static int device_close(struct net_device *dev) {
PSDevice pDevice=(PSDevice) netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
//PLICE_DEBUG->
#ifdef THREAD
mlme_kill = 0;
#endif
//PLICE_DEBUG<-
//2007-1121-02<Add>by EinsnLiu
if (pDevice->bLinkPass) {
bScheduleCommand((void *)pDevice, WLAN_CMD_DISASSOCIATE, NULL);
mdelay(30);
}
#ifdef TxInSleep
del_timer(&pDevice->sTimerTxData);
#endif
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
if (pDevice->bDiversityRegCtlON) {
del_timer(&pDevice->TimerSQ3Tmax1);
del_timer(&pDevice->TimerSQ3Tmax2);
del_timer(&pDevice->TimerSQ3Tmax3);
}
#ifdef TASK_LET
tasklet_kill(&pDevice->RxMngWorkItem);
#endif
netif_stop_queue(dev);
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACbSoftwareReset(pDevice->PortOffset);
CARDbRadioPowerOff(pDevice);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
device_free_td0_ring(pDevice);
device_free_td1_ring(pDevice);
device_free_rd0_ring(pDevice);
device_free_rd1_ring(pDevice);
device_free_frag_buf(pDevice);
device_free_rings(pDevice);
BSSvClearNodeDBTable(pDevice, 0);
free_irq(dev->irq, dev);
pDevice->flags &=(~DEVICE_FLAGS_OPENED);
//2008-0714-01<Add>by chester
device_release_WPADEV(pDevice);
//PLICE_DEBUG->
//tasklet_kill(&pDevice->RxMngWorkItem);
//PLICE_DEBUG<-
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close.. \n");
return 0;
}
static int device_dma0_tx_80211(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice=netdev_priv(dev);
unsigned char *pbMPDU;
unsigned int cbMPDULen = 0;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211\n");
spin_lock_irq(&pDevice->lock);
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_tx_80211, td0 <=0\n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
cbMPDULen = skb->len;
pbMPDU = skb->data;
vDMA0_tx_80211(pDevice, skb, pbMPDU, cbMPDULen);
spin_unlock_irq(&pDevice->lock);
return 0;
}
bool device_dma0_xmit(PSDevice pDevice, struct sk_buff *skb, unsigned int uNodeIndex) {
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
unsigned int cbFrameBodySize;
unsigned int uMACfragNum;
unsigned char byPktType;
bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
unsigned int cbHeaderSize;
unsigned int ii;
SKeyItem STempKey;
// unsigned char byKeyIndex = 0;
if (pDevice->bStopTx0Pkt == true) {
dev_kfree_skb_irq(skb);
return false;
}
if (AVAIL_TD(pDevice, TYPE_TXDMA0) <= 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, td0 <=0\n");
return false;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->uAssocCount == 0) {
dev_kfree_skb_irq(skb);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_dma0_xmit, assocCount = 0\n");
return false;
}
}
pHeadTD = pDevice->apCurrTD[TYPE_TXDMA0];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if ( uMACfragNum > AVAIL_TD(pDevice, TYPE_TXDMA0)) {
dev_kfree_skb_irq(skb);
return false;
}
byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
else {
pDevice->wCurrentRate = pDevice->pMgmt->sNodeDBTable[uNodeIndex].wTxDataRate;
}
//preamble type
if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->byPreambleType = pDevice->byShortPreamble;
}
else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dma0: pDevice->wCurrentRate = %d \n", pDevice->wCurrentRate);
if (pDevice->wCurrentRate <= RATE_11M) {
byPktType = PK_TYPE_11B;
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
if (pDevice->bEncryptionEnable == true)
bNeedEncryption = true;
if (pDevice->bEnableHostWEP) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_TXDMA0, pHeadTD,
&pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
// Poll Transmit the adapter
wmb();
pHeadTD->m_td0TD0.f1Owner=OWNED_BY_NIC;
wmb();
if (ii == (uMACfragNum - 1))
pLastTD = pHeadTD;
pHeadTD = pHeadTD->next;
}
// Save the information needed by the tx interrupt handler
// to complete the Send request
pLastTD->pTDInfo->skb = skb;
pLastTD->pTDInfo->byFlags = 0;
pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
pDevice->apCurrTD[TYPE_TXDMA0] = pHeadTD;
MACvTransmit0(pDevice->PortOffset);
return true;
}
//TYPE_AC0DMA data tx
static int device_xmit(struct sk_buff *skb, struct net_device *dev) {
PSDevice pDevice=netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
PSTxDesc pHeadTD, pLastTD;
unsigned int uNodeIndex = 0;
unsigned char byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80};
unsigned short wAID;
unsigned int uMACfragNum = 1;
unsigned int cbFrameBodySize;
unsigned char byPktType;
unsigned int cbHeaderSize;
bool bNeedEncryption = false;
PSKeyItem pTransmitKey = NULL;
SKeyItem STempKey;
unsigned int ii;
bool bTKIP_UseGTK = false;
bool bNeedDeAuth = false;
unsigned char *pbyBSSID;
bool bNodeExist = false;
spin_lock_irq(&pDevice->lock);
if (pDevice->bLinkPass == false) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pDevice->bStopDataPkt) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) {
if (pDevice->uAssocCount == 0) {
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (is_multicast_ether_addr((unsigned char *)(skb->data))) {
uNodeIndex = 0;
bNodeExist = true;
if (pMgmt->sNodeDBTable[0].bPSEnable) {
skb_queue_tail(&(pMgmt->sNodeDBTable[0].sTxPSQueue), skb);
pMgmt->sNodeDBTable[0].wEnQueueCnt++;
// set tx map
pMgmt->abyPSTxMap[0] |= byMask[0];
spin_unlock_irq(&pDevice->lock);
return 0;
}
}else {
if (BSSDBbIsSTAInNodeDB(pMgmt, (unsigned char *)(skb->data), &uNodeIndex)) {
if (pMgmt->sNodeDBTable[uNodeIndex].bPSEnable) {
skb_queue_tail(&pMgmt->sNodeDBTable[uNodeIndex].sTxPSQueue, skb);
pMgmt->sNodeDBTable[uNodeIndex].wEnQueueCnt++;
// set tx map
wAID = pMgmt->sNodeDBTable[uNodeIndex].wAID;
pMgmt->abyPSTxMap[wAID >> 3] |= byMask[wAID & 7];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Set:pMgmt->abyPSTxMap[%d]= %d\n",
(wAID >> 3), pMgmt->abyPSTxMap[wAID >> 3]);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pMgmt->sNodeDBTable[uNodeIndex].bShortPreamble) {
pDevice->byPreambleType = pDevice->byShortPreamble;
}else {
pDevice->byPreambleType = PREAMBLE_LONG;
}
bNodeExist = true;
}
}
if (bNodeExist == false) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Unknown STA not found in node DB \n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
}
pHeadTD = pDevice->apCurrTD[TYPE_AC0DMA];
pHeadTD->m_td1TD1.byTCR = (TCR_EDP|TCR_STP);
memcpy(pDevice->sTxEthHeader.abyDstAddr, (unsigned char *)(skb->data), ETH_HLEN);
cbFrameBodySize = skb->len - ETH_HLEN;
// 802.1H
if (ntohs(pDevice->sTxEthHeader.wType) > ETH_DATA_LEN) {
cbFrameBodySize += 8;
}
if (pDevice->bEncryptionEnable == true) {
bNeedEncryption = true;
// get Transmit key
do {
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) &&
(pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
pbyBSSID = pDevice->abyBSSID;
// get pairwise key
if (KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == false) {
// get group key
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == true) {
bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
break;
}
} else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get PTK.\n");
break;
}
}else if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
pbyBSSID = pDevice->sTxEthHeader.abyDstAddr; //TO_DS = 0 and FROM_DS = 0 --> 802.11 MAC Address1
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"IBSS Serach Key: \n");
for (ii = 0; ii< 6; ii++)
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"%x \n", *(pbyBSSID+ii));
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"\n");
// get pairwise key
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, PAIRWISE_KEY, &pTransmitKey) == true)
break;
}
// get group key
pbyBSSID = pDevice->abyBroadcastAddr;
if(KeybGetTransmitKey(&(pDevice->sKey), pbyBSSID, GROUP_KEY, &pTransmitKey) == false) {
pTransmitKey = NULL;
if (pDevice->pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
}
else
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"NOT IBSS and KEY is NULL. [%d]\n", pDevice->pMgmt->eCurrMode);
} else {
bTKIP_UseGTK = true;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"Get GTK.\n");
}
} while(false);
}
if (pDevice->bEnableHostWEP) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_DEBUG"acdma0: STA index %d\n", uNodeIndex);
if (pDevice->bEncryptionEnable == true) {
pTransmitKey = &STempKey;
pTransmitKey->byCipherSuite = pMgmt->sNodeDBTable[uNodeIndex].byCipherSuite;
pTransmitKey->dwKeyIndex = pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex;
pTransmitKey->uKeyLength = pMgmt->sNodeDBTable[uNodeIndex].uWepKeyLength;
pTransmitKey->dwTSC47_16 = pMgmt->sNodeDBTable[uNodeIndex].dwTSC47_16;
pTransmitKey->wTSC15_0 = pMgmt->sNodeDBTable[uNodeIndex].wTSC15_0;
memcpy(pTransmitKey->abyKey,
&pMgmt->sNodeDBTable[uNodeIndex].abyWepKey[0],
pTransmitKey->uKeyLength
);
}
}
uMACfragNum = cbGetFragCount(pDevice, pTransmitKey, cbFrameBodySize, &pDevice->sTxEthHeader);
if (uMACfragNum > AVAIL_TD(pDevice, TYPE_AC0DMA)) {
DBG_PRT(MSG_LEVEL_ERR, KERN_DEBUG "uMACfragNum > AVAIL_TD(TYPE_AC0DMA) = %d\n", uMACfragNum);
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
if (pTransmitKey != NULL) {
if ((pTransmitKey->byCipherSuite == KEY_CTL_WEP) &&
(pTransmitKey->uKeyLength == WLAN_WEP232_KEYLEN)) {
uMACfragNum = 1; //WEP256 doesn't support fragment
}
}
byPktType = (unsigned char)pDevice->byPacketType;
if (pDevice->bFixRate) {
#ifdef PLICE_DEBUG
printk("Fix Rate: PhyType is %d,ConnectionRate is %d\n",pDevice->eCurrentPHYType,pDevice->uConnectionRate);
#endif
if (pDevice->eCurrentPHYType == PHY_TYPE_11B) {
if (pDevice->uConnectionRate >= RATE_11M) {
pDevice->wCurrentRate = RATE_11M;
} else {
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
} else {
if ((pDevice->eCurrentPHYType == PHY_TYPE_11A) &&
(pDevice->uConnectionRate <= RATE_6M)) {
pDevice->wCurrentRate = RATE_6M;
} else {
if (pDevice->uConnectionRate >= RATE_54M)
pDevice->wCurrentRate = RATE_54M;
else
pDevice->wCurrentRate = (unsigned short)pDevice->uConnectionRate;
}
}
pDevice->byACKRate = (unsigned char) pDevice->wCurrentRate;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
}
else {
//auto rate
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if (pDevice->eCurrentPHYType != PHY_TYPE_11A) {
pDevice->wCurrentRate = RATE_1M;
pDevice->byACKRate = RATE_1M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
} else {
pDevice->wCurrentRate = RATE_6M;
pDevice->byACKRate = RATE_6M;
pDevice->byTopCCKBasicRate = RATE_1M;
pDevice->byTopOFDMBasicRate = RATE_6M;
}
}
else {
VNTWIFIvGetTxRate( pDevice->pMgmt,
pDevice->sTxEthHeader.abyDstAddr,
&(pDevice->wCurrentRate),
&(pDevice->byACKRate),
&(pDevice->byTopCCKBasicRate),
&(pDevice->byTopOFDMBasicRate));
#if 0
printk("auto rate:Rate : %d,AckRate:%d,TopCCKRate:%d,TopOFDMRate:%d\n",
pDevice->wCurrentRate,pDevice->byACKRate,
pDevice->byTopCCKBasicRate,pDevice->byTopOFDMBasicRate);
#endif
#if 0
pDevice->wCurrentRate = 11;
pDevice->byACKRate = 8;
pDevice->byTopCCKBasicRate = 3;
pDevice->byTopOFDMBasicRate = 8;
#endif
}
}
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "acdma0: pDevice->wCurrentRate = %d \n", pDevice->wCurrentRate);
if (pDevice->wCurrentRate <= RATE_11M) {
byPktType = PK_TYPE_11B;
} else if (pDevice->eCurrentPHYType == PHY_TYPE_11A) {
byPktType = PK_TYPE_11A;
} else {
if (pDevice->bProtectMode == true) {
byPktType = PK_TYPE_11GB;
} else {
byPktType = PK_TYPE_11GA;
}
}
//#ifdef PLICE_DEBUG
// printk("FIX RATE:CurrentRate is %d");
//#endif
if (bNeedEncryption == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ntohs Pkt Type=%04x\n", ntohs(pDevice->sTxEthHeader.wType));
if ((pDevice->sTxEthHeader.wType) == TYPE_PKT_802_1x) {
bNeedEncryption = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Pkt Type=%04x\n", (pDevice->sTxEthHeader.wType));
if ((pDevice->pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pDevice->pMgmt->eCurrState == WMAC_STATE_ASSOC)) {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Don't Find TX KEY\n");
}
else {
if (bTKIP_UseGTK == true) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"error: KEY is GTK!!~~\n");
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
bNeedEncryption = true;
}
}
}
if (pDevice->byCntMeasure == 2) {
bNeedDeAuth = true;
pDevice->s802_11Counter.TKIPCounterMeasuresInvoked++;
}
if (pDevice->bEnableHostWEP) {
if ((uNodeIndex != 0) &&
(pMgmt->sNodeDBTable[uNodeIndex].dwKeyIndex & PAIRWISE_KEY)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Find PTK [%lX]\n", pTransmitKey->dwKeyIndex);
bNeedEncryption = true;
}
}
}
else {
if (pTransmitKey == NULL) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"return no tx key\n");
dev_kfree_skb_irq(skb);
spin_unlock_irq(&pDevice->lock);
return 0;
}
}
}
#ifdef PLICE_DEBUG
//if (skb->len == 98)
//{
// printk("ping:len is %d\n");
//}
#endif
vGenerateFIFOHeader(pDevice, byPktType, pDevice->pbyTmpBuff, bNeedEncryption,
cbFrameBodySize, TYPE_AC0DMA, pHeadTD,
&pDevice->sTxEthHeader, (unsigned char *)skb->data, pTransmitKey, uNodeIndex,
&uMACfragNum,
&cbHeaderSize
);
if (MACbIsRegBitsOn(pDevice->PortOffset, MAC_REG_PSCTL, PSCTL_PS)) {
// Disable PS
MACbPSWakeup(pDevice->PortOffset);
}
pDevice->bPWBitOn = false;
pLastTD = pHeadTD;
for (ii = 0; ii < uMACfragNum; ii++) {
// Poll Transmit the adapter
wmb();
pHeadTD->m_td0TD0.f1Owner=OWNED_BY_NIC;
wmb();
if (ii == uMACfragNum - 1)
pLastTD = pHeadTD;
pHeadTD = pHeadTD->next;
}
// Save the information needed by the tx interrupt handler
// to complete the Send request
pLastTD->pTDInfo->skb = skb;
pLastTD->pTDInfo->byFlags = 0;
pLastTD->pTDInfo->byFlags |= TD_FLAGS_NETIF_SKB;
#ifdef TxInSleep
pDevice->nTxDataTimeCout=0; //2008-8-21 chester <add> for send null packet
#endif
if (AVAIL_TD(pDevice, TYPE_AC0DMA) <= 1) {
netif_stop_queue(dev);
}
pDevice->apCurrTD[TYPE_AC0DMA] = pHeadTD;
//#ifdef PLICE_DEBUG
if (pDevice->bFixRate)
{
printk("FixRate:Rate is %d,TxPower is %d\n",pDevice->wCurrentRate,pDevice->byCurPwr);
}
else
{
//printk("Auto Rate:Rate is %d,TxPower is %d\n",pDevice->wCurrentRate,pDevice->byCurPwr);
}
//#endif
{
unsigned char Protocol_Version; //802.1x Authentication
unsigned char Packet_Type; //802.1x Authentication
unsigned char Descriptor_type;
unsigned short Key_info;
bool bTxeapol_key = false;
Protocol_Version = skb->data[ETH_HLEN];
Packet_Type = skb->data[ETH_HLEN+1];
Descriptor_type = skb->data[ETH_HLEN+1+1+2];
Key_info = (skb->data[ETH_HLEN+1+1+2+1] << 8)|(skb->data[ETH_HLEN+1+1+2+2]);
if (pDevice->sTxEthHeader.wType == TYPE_PKT_802_1x) {
if(((Protocol_Version==1) ||(Protocol_Version==2)) &&
(Packet_Type==3)) { //802.1x OR eapol-key challenge frame transfer
bTxeapol_key = true;
if((Descriptor_type==254)||(Descriptor_type==2)) { //WPA or RSN
if(!(Key_info & BIT3) && //group-key challenge
(Key_info & BIT8) && (Key_info & BIT9)) { //send 2/2 key
pDevice->fWPA_Authened = true;
if(Descriptor_type==254)
printk("WPA ");
else
printk("WPA2 ");
printk("Authentication completed!!\n");
}
}
}
}
}
MACvTransmitAC0(pDevice->PortOffset);
// DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "acdma0:pDevice->apCurrTD= %p\n", pHeadTD);
dev->trans_start = jiffies;
spin_unlock_irq(&pDevice->lock);
return 0;
}
static irqreturn_t device_intr(int irq, void *dev_instance) {
struct net_device* dev=dev_instance;
PSDevice pDevice=(PSDevice) netdev_priv(dev);
int max_count=0;
unsigned long dwMIBCounter=0;
PSMgmtObject pMgmt = pDevice->pMgmt;
unsigned char byOrgPageSel=0;
int handled = 0;
unsigned char byData = 0;
int ii= 0;
// unsigned char byRSSI;
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
if (pDevice->dwIsr == 0)
return IRQ_RETVAL(handled);
if (pDevice->dwIsr == 0xffffffff) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "dwIsr = 0xffff\n");
return IRQ_RETVAL(handled);
}
/*
// 2008-05-21 <mark> by Richardtai, we can't read RSSI here, because no packet bound with RSSI
if ((pDevice->dwIsr & ISR_RXDMA0) &&
(pDevice->byLocalID != REV_ID_VT3253_B0) &&
(pDevice->bBSSIDFilter == true)) {
// update RSSI
//BBbReadEmbeded(pDevice->PortOffset, 0x3E, &byRSSI);
//pDevice->uCurrRSSI = byRSSI;
}
*/
handled = 1;
MACvIntDisable(pDevice->PortOffset);
spin_lock_irq(&pDevice->lock);
//Make sure current page is 0
VNSvInPortB(pDevice->PortOffset + MAC_REG_PAGE1SEL, &byOrgPageSel);
if (byOrgPageSel == 1) {
MACvSelectPage0(pDevice->PortOffset);
}
else
byOrgPageSel = 0;
MACvReadMIBCounter(pDevice->PortOffset, &dwMIBCounter);
// TBD....
// Must do this after doing rx/tx, cause ISR bit is slow
// than RD/TD write back
// update ISR counter
STAvUpdate802_11Counter(&pDevice->s802_11Counter, &pDevice->scStatistic , dwMIBCounter);
while (pDevice->dwIsr != 0) {
STAvUpdateIsrStatCounter(&pDevice->scStatistic, pDevice->dwIsr);
MACvWriteISR(pDevice->PortOffset, pDevice->dwIsr);
if (pDevice->dwIsr & ISR_FETALERR){
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " ISR_FETALERR \n");
VNSvOutPortB(pDevice->PortOffset + MAC_REG_SOFTPWRCTL, 0);
VNSvOutPortW(pDevice->PortOffset + MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPECTI);
device_error(pDevice, pDevice->dwIsr);
}
if (pDevice->byLocalID > REV_ID_VT3253_B1) {
if (pDevice->dwIsr & ISR_MEASURESTART) {
// 802.11h measure start
pDevice->byOrgChannel = pDevice->byCurrentCh;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byOrgRCR));
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, (RCR_RXALLTYPE | RCR_UNICAST | RCR_BROADCAST | RCR_MULTICAST | RCR_WPAERR));
MACvSelectPage1(pDevice->PortOffset);
VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR0, &(pDevice->dwOrgMAR0));
VNSvInPortD(pDevice->PortOffset + MAC_REG_MAR4, &(pDevice->dwOrgMAR4));
MACvSelectPage0(pDevice->PortOffset);
//xxxx
// WCMDbFlushCommandQueue(pDevice->pMgmt, true);
if (set_channel(pDevice, pDevice->pCurrMeasureEID->sReq.byChannel) == true) {
pDevice->bMeasureInProgress = true;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_READY);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byBasicMap = 0;
pDevice->byCCAFraction = 0;
for(ii=0;ii<8;ii++) {
pDevice->dwRPIs[ii] = 0;
}
} else {
// can not measure because set channel fail
// WCMDbResetCommandQueue(pDevice->pMgmt);
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_INCAPABLE);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
}
}
if (pDevice->dwIsr & ISR_MEASUREEND) {
// 802.11h measure end
pDevice->bMeasureInProgress = false;
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byOrgRCR);
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, pDevice->dwOrgMAR0);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR4, pDevice->dwOrgMAR4);
VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRBBSTS, &byData);
pDevice->byBasicMap |= (byData >> 4);
VNSvInPortB(pDevice->PortOffset + MAC_REG_CCAFRACTION, &pDevice->byCCAFraction);
VNSvInPortB(pDevice->PortOffset + MAC_REG_MSRCTL, &byData);
// clear measure control
MACvRegBitsOff(pDevice->PortOffset, MAC_REG_MSRCTL, MSRCTL_EN);
MACvSelectPage0(pDevice->PortOffset);
set_channel(pDevice, pDevice->byOrgChannel);
// WCMDbResetCommandQueue(pDevice->pMgmt);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
if (byData & MSRCTL_FINISH) {
// measure success
s_vCompleteCurrentMeasure(pDevice, 0);
} else {
// can not measure because not ready before end of measure time
s_vCompleteCurrentMeasure(pDevice, MEASURE_MODE_LATE);
}
}
if (pDevice->dwIsr & ISR_QUIETSTART) {
do {
;
} while (CARDbStartQuiet(pDevice) == false);
}
}
if (pDevice->dwIsr & ISR_TBTT) {
if (pDevice->bEnableFirstQuiet == true) {
pDevice->byQuietStartCount--;
if (pDevice->byQuietStartCount == 0) {
pDevice->bEnableFirstQuiet = false;
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL, (MSRCTL_QUIETTXCHK | MSRCTL_QUIETEN));
MACvSelectPage0(pDevice->PortOffset);
}
}
if ((pDevice->bChannelSwitch == true) &&
(pDevice->eOPMode == OP_MODE_INFRASTRUCTURE)) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
}
}
if (pDevice->eOPMode == OP_MODE_ADHOC) {
//pDevice->bBeaconSent = false;
} else {
if ((pDevice->bUpdateBBVGA) && (pDevice->bLinkPass == true) && (pDevice->uCurrRSSI != 0)) {
long ldBm;
RFvRSSITodBm(pDevice, (unsigned char) pDevice->uCurrRSSI, &ldBm);
for (ii=0;ii<BB_VGA_LEVEL;ii++) {
if (ldBm < pDevice->ldBmThreshold[ii]) {
pDevice->byBBVGANew = pDevice->abyBBVGA[ii];
break;
}
}
if (pDevice->byBBVGANew != pDevice->byBBVGACurrent) {
pDevice->uBBVGADiffCount++;
if (pDevice->uBBVGADiffCount == 1) {
// first VGA diff gain
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"First RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)ldBm, pDevice->byBBVGANew, pDevice->byBBVGACurrent, (int)pDevice->uBBVGADiffCount);
}
if (pDevice->uBBVGADiffCount >= BB_VGA_CHANGE_THRESHOLD) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RSSI[%d] NewGain[%d] OldGain[%d] Count[%d]\n",
(int)ldBm, pDevice->byBBVGANew, pDevice->byBBVGACurrent, (int)pDevice->uBBVGADiffCount);
BBvSetVGAGainOffset(pDevice, pDevice->byBBVGANew);
}
} else {
pDevice->uBBVGADiffCount = 1;
}
}
}
pDevice->bBeaconSent = false;
if (pDevice->bEnablePSMode) {
PSbIsNextTBTTWakeUp((void *)pDevice);
}
if ((pDevice->eOPMode == OP_MODE_AP) ||
(pDevice->eOPMode == OP_MODE_ADHOC)) {
MACvOneShotTimer1MicroSec(pDevice->PortOffset,
(pMgmt->wIBSSBeaconPeriod - MAKE_BEACON_RESERVED) << 10);
}
if (pDevice->eOPMode == OP_MODE_ADHOC && pDevice->pMgmt->wCurrATIMWindow > 0) {
// todo adhoc PS mode
}
}
if (pDevice->dwIsr & ISR_BNTX) {
if (pDevice->eOPMode == OP_MODE_ADHOC) {
pDevice->bIsBeaconBufReadySet = false;
pDevice->cbBeaconBufReadySetCnt = 0;
}
if (pDevice->eOPMode == OP_MODE_AP) {
if(pMgmt->byDTIMCount > 0) {
pMgmt->byDTIMCount --;
pMgmt->sNodeDBTable[0].bRxPSPoll = false;
}
else {
if(pMgmt->byDTIMCount == 0) {
// check if mutltcast tx bufferring
pMgmt->byDTIMCount = pMgmt->byDTIMPeriod - 1;
pMgmt->sNodeDBTable[0].bRxPSPoll = true;
bScheduleCommand((void *)pDevice, WLAN_CMD_RX_PSPOLL, NULL);
}
}
}
pDevice->bBeaconSent = true;
if (pDevice->bChannelSwitch == true) {
pDevice->byChannelSwitchCount--;
if (pDevice->byChannelSwitchCount == 0) {
pDevice->bChannelSwitch = false;
set_channel(pDevice, pDevice->byNewChannel);
VNTWIFIbChannelSwitch(pDevice->pMgmt, pDevice->byNewChannel);
MACvSelectPage1(pDevice->PortOffset);
MACvRegBitsOn(pDevice->PortOffset, MAC_REG_MSRCTL+1, MSRCTL1_TXPAUSE);
MACvSelectPage0(pDevice->PortOffset);
//VNTWIFIbSendBeacon(pDevice->pMgmt);
CARDbStartTxPacket(pDevice, PKT_TYPE_802_11_ALL);
}
}
}
if (pDevice->dwIsr & ISR_RXDMA0) {
max_count += device_rx_srv(pDevice, TYPE_RXDMA0);
}
if (pDevice->dwIsr & ISR_RXDMA1) {
max_count += device_rx_srv(pDevice, TYPE_RXDMA1);
}
if (pDevice->dwIsr & ISR_TXDMA0){
max_count += device_tx_srv(pDevice, TYPE_TXDMA0);
}
if (pDevice->dwIsr & ISR_AC0DMA){
max_count += device_tx_srv(pDevice, TYPE_AC0DMA);
}
if (pDevice->dwIsr & ISR_SOFTTIMER) {
}
if (pDevice->dwIsr & ISR_SOFTTIMER1) {
if (pDevice->eOPMode == OP_MODE_AP) {
if (pDevice->bShortSlotTime)
pMgmt->wCurrCapInfo |= WLAN_SET_CAP_INFO_SHORTSLOTTIME(1);
else
pMgmt->wCurrCapInfo &= ~(WLAN_SET_CAP_INFO_SHORTSLOTTIME(1));
}
bMgrPrepareBeaconToSend(pDevice, pMgmt);
pDevice->byCntMeasure = 0;
}
MACvReadISR(pDevice->PortOffset, &pDevice->dwIsr);
MACvReceive0(pDevice->PortOffset);
MACvReceive1(pDevice->PortOffset);
if (max_count>pDevice->sOpts.int_works)
break;
}
if (byOrgPageSel == 1) {
MACvSelectPage1(pDevice->PortOffset);
}
spin_unlock_irq(&pDevice->lock);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
return IRQ_RETVAL(handled);
}
static unsigned const ethernet_polynomial = 0x04c11db7U;
static inline u32 ether_crc(int length, unsigned char *data)
{
int crc = -1;
while(--length >= 0) {
unsigned char current_octet = *data++;
int bit;
for (bit = 0; bit < 8; bit++, current_octet >>= 1) {
crc = (crc << 1) ^
((crc < 0) ^ (current_octet & 1) ? ethernet_polynomial : 0);
}
}
return crc;
}
//2008-8-4 <add> by chester
static int Config_FileGetParameter(unsigned char *string,
unsigned char *dest, unsigned char *source)
{
unsigned char buf1[100];
int source_len = strlen(source);
memset(buf1,0,100);
strcat(buf1, string);
strcat(buf1, "=");
source+=strlen(buf1);
memcpy(dest,source,source_len-strlen(buf1));
return true;
}
int Config_FileOperation(PSDevice pDevice,bool fwrite,unsigned char *Parameter) {
unsigned char *config_path = CONFIG_PATH;
unsigned char *buffer = NULL;
unsigned char tmpbuffer[20];
struct file *filp=NULL;
mm_segment_t old_fs = get_fs();
//int oldfsuid=0,oldfsgid=0;
int result=0;
set_fs (KERNEL_DS);
/* Can't do this anymore, so we rely on correct filesystem permissions:
//Make sure a caller can read or write power as root
oldfsuid=current->cred->fsuid;
oldfsgid=current->cred->fsgid;
current->cred->fsuid = 0;
current->cred->fsgid = 0;
*/
//open file
filp = filp_open(config_path, O_RDWR, 0);
if (IS_ERR(filp)) {
printk("Config_FileOperation:open file fail?\n");
result=-1;
goto error2;
}
if(!(filp->f_op) || !(filp->f_op->read) ||!(filp->f_op->write)) {
printk("file %s cann't readable or writable?\n",config_path);
result = -1;
goto error1;
}
buffer = kmalloc(1024, GFP_KERNEL);
if(buffer==NULL) {
printk("allocate mem for file fail?\n");
result = -1;
goto error1;
}
if(filp->f_op->read(filp, buffer, 1024, &filp->f_pos)<0) {
printk("read file error?\n");
result = -1;
goto error1;
}
if(Config_FileGetParameter("ZONETYPE",tmpbuffer,buffer)!=true) {
printk("get parameter error?\n");
result = -1;
goto error1;
}
if(memcmp(tmpbuffer,"USA",3)==0) {
result=ZoneType_USA;
}
else if(memcmp(tmpbuffer,"JAPAN",5)==0) {
result=ZoneType_Japan;
}
else if(memcmp(tmpbuffer,"EUROPE",5)==0) {
result=ZoneType_Europe;
}
else {
result = -1;
printk("Unknown Zonetype[%s]?\n",tmpbuffer);
}
error1:
kfree(buffer);
if(filp_close(filp,NULL))
printk("Config_FileOperation:close file fail\n");
error2:
set_fs (old_fs);
/*
current->cred->fsuid=oldfsuid;
current->cred->fsgid=oldfsgid;
*/
return result;
}
static void device_set_multi(struct net_device *dev) {
PSDevice pDevice = (PSDevice) netdev_priv(dev);
PSMgmtObject pMgmt = pDevice->pMgmt;
u32 mc_filter[2];
struct netdev_hw_addr *ha;
VNSvInPortB(pDevice->PortOffset + MAC_REG_RCR, &(pDevice->byRxMode));
if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
DBG_PRT(MSG_LEVEL_ERR,KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
/* Unconditionally log net taps. */
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST|RCR_UNICAST);
}
else if ((netdev_mc_count(dev) > pDevice->multicast_limit)
|| (dev->flags & IFF_ALLMULTI)) {
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, 0xffffffff);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, 0xffffffff);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
}
else {
memset(mc_filter, 0, sizeof(mc_filter));
netdev_for_each_mc_addr(ha, dev) {
int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
mc_filter[bit_nr >> 5] |= cpu_to_le32(1 << (bit_nr & 31));
}
MACvSelectPage1(pDevice->PortOffset);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0, mc_filter[0]);
VNSvOutPortD(pDevice->PortOffset + MAC_REG_MAR0 + 4, mc_filter[1]);
MACvSelectPage0(pDevice->PortOffset);
pDevice->byRxMode &= ~(RCR_UNICAST);
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
}
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
// If AP mode, don't enable RCR_UNICAST. Since hw only compare addr1 with local mac.
pDevice->byRxMode |= (RCR_MULTICAST|RCR_BROADCAST);
pDevice->byRxMode &= ~(RCR_UNICAST);
}
VNSvOutPortB(pDevice->PortOffset + MAC_REG_RCR, pDevice->byRxMode);
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "pDevice->byRxMode = %x\n", pDevice->byRxMode );
}
static struct net_device_stats *device_get_stats(struct net_device *dev) {
PSDevice pDevice=(PSDevice) netdev_priv(dev);
return &pDevice->stats;
}
static int device_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) {
PSDevice pDevice = (PSDevice)netdev_priv(dev);
struct iwreq *wrq = (struct iwreq *) rq;
int rc =0;
PSMgmtObject pMgmt = pDevice->pMgmt;
PSCmdRequest pReq;
if (pMgmt == NULL) {
rc = -EFAULT;
return rc;
}
switch(cmd) {
case SIOCGIWNAME:
rc = iwctl_giwname(dev, NULL, (char *)&(wrq->u.name), NULL);
break;
case SIOCGIWNWID: //0x8b03 support
rc = -EOPNOTSUPP;
break;
// Set frequency/channel
case SIOCSIWFREQ:
rc = iwctl_siwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
// Get frequency/channel
case SIOCGIWFREQ:
rc = iwctl_giwfreq(dev, NULL, &(wrq->u.freq), NULL);
break;
// Set desired network name (ESSID)
case SIOCSIWESSID:
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.length > IW_ESSID_MAX_SIZE) {
rc = -E2BIG;
break;
}
if (copy_from_user(essid, wrq->u.essid.pointer,
wrq->u.essid.length)) {
rc = -EFAULT;
break;
}
rc = iwctl_siwessid(dev, NULL,
&(wrq->u.essid), essid);
}
break;
// Get current network name (ESSID)
case SIOCGIWESSID:
{
char essid[IW_ESSID_MAX_SIZE+1];
if (wrq->u.essid.pointer)
rc = iwctl_giwessid(dev, NULL,
&(wrq->u.essid), essid);
if (copy_to_user(wrq->u.essid.pointer,
essid,
wrq->u.essid.length) )
rc = -EFAULT;
}
break;
case SIOCSIWAP:
rc = iwctl_siwap(dev, NULL, &(wrq->u.ap_addr), NULL);
break;
// Get current Access Point (BSSID)
case SIOCGIWAP:
rc = iwctl_giwap(dev, NULL, &(wrq->u.ap_addr), NULL);
break;
// Set desired station name
case SIOCSIWNICKN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWNICKN \n");
rc = -EOPNOTSUPP;
break;
// Get current station name
case SIOCGIWNICKN:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWNICKN \n");
rc = -EOPNOTSUPP;
break;
// Set the desired bit-rate
case SIOCSIWRATE:
rc = iwctl_siwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Get the current bit-rate
case SIOCGIWRATE:
rc = iwctl_giwrate(dev, NULL, &(wrq->u.bitrate), NULL);
break;
// Set the desired RTS threshold
case SIOCSIWRTS:
rc = iwctl_siwrts(dev, NULL, &(wrq->u.rts), NULL);
break;
// Get the current RTS threshold
case SIOCGIWRTS:
rc = iwctl_giwrts(dev, NULL, &(wrq->u.rts), NULL);
break;
// Set the desired fragmentation threshold
case SIOCSIWFRAG:
rc = iwctl_siwfrag(dev, NULL, &(wrq->u.frag), NULL);
break;
// Get the current fragmentation threshold
case SIOCGIWFRAG:
rc = iwctl_giwfrag(dev, NULL, &(wrq->u.frag), NULL);
break;
// Set mode of operation
case SIOCSIWMODE:
rc = iwctl_siwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Get mode of operation
case SIOCGIWMODE:
rc = iwctl_giwmode(dev, NULL, &(wrq->u.mode), NULL);
break;
// Set WEP keys and mode
case SIOCSIWENCODE:
{
char abyKey[WLAN_WEP232_KEYLEN];
if (wrq->u.encoding.pointer) {
if (wrq->u.encoding.length > WLAN_WEP232_KEYLEN) {
rc = -E2BIG;
break;
}
memset(abyKey, 0, WLAN_WEP232_KEYLEN);
if (copy_from_user(abyKey,
wrq->u.encoding.pointer,
wrq->u.encoding.length)) {
rc = -EFAULT;
break;
}
} else if (wrq->u.encoding.length != 0) {
rc = -EINVAL;
break;
}
rc = iwctl_siwencode(dev, NULL, &(wrq->u.encoding), abyKey);
}
break;
// Get the WEP keys and mode
case SIOCGIWENCODE:
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
{
char abyKey[WLAN_WEP232_KEYLEN];
rc = iwctl_giwencode(dev, NULL, &(wrq->u.encoding), abyKey);
if (rc != 0) break;
if (wrq->u.encoding.pointer) {
if (copy_to_user(wrq->u.encoding.pointer,
abyKey,
wrq->u.encoding.length))
rc = -EFAULT;
}
}
break;
// Get the current Tx-Power
case SIOCGIWTXPOW:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWTXPOW \n");
rc = -EOPNOTSUPP;
break;
case SIOCSIWTXPOW:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWTXPOW \n");
rc = -EOPNOTSUPP;
break;
case SIOCSIWRETRY:
rc = iwctl_siwretry(dev, NULL, &(wrq->u.retry), NULL);
break;
case SIOCGIWRETRY:
rc = iwctl_giwretry(dev, NULL, &(wrq->u.retry), NULL);
break;
// Get range of parameters
case SIOCGIWRANGE:
{
struct iw_range range;
rc = iwctl_giwrange(dev, NULL, &(wrq->u.data), (char *) &range);
if (copy_to_user(wrq->u.data.pointer, &range, sizeof(struct iw_range)))
rc = -EFAULT;
}
break;
case SIOCGIWPOWER:
rc = iwctl_giwpower(dev, NULL, &(wrq->u.power), NULL);
break;
case SIOCSIWPOWER:
rc = iwctl_siwpower(dev, NULL, &(wrq->u.power), NULL);
break;
case SIOCGIWSENS:
rc = iwctl_giwsens(dev, NULL, &(wrq->u.sens), NULL);
break;
case SIOCSIWSENS:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSENS \n");
rc = -EOPNOTSUPP;
break;
case SIOCGIWAPLIST:
{
char buffer[IW_MAX_AP * (sizeof(struct sockaddr) + sizeof(struct iw_quality))];
if (wrq->u.data.pointer) {
rc = iwctl_giwaplist(dev, NULL, &(wrq->u.data), buffer);
if (rc == 0) {
if (copy_to_user(wrq->u.data.pointer,
buffer,
(wrq->u.data.length * (sizeof(struct sockaddr) + sizeof(struct iw_quality)))
))
rc = -EFAULT;
}
}
}
break;
#ifdef WIRELESS_SPY
// Set the spy list
case SIOCSIWSPY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWSPY \n");
rc = -EOPNOTSUPP;
break;
// Get the spy list
case SIOCGIWSPY:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWSPY \n");
rc = -EOPNOTSUPP;
break;
#endif // WIRELESS_SPY
case SIOCGIWPRIV:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWPRIV \n");
rc = -EOPNOTSUPP;
/*
if(wrq->u.data.pointer) {
wrq->u.data.length = sizeof(iwctl_private_args) / sizeof( iwctl_private_args[0]);
if(copy_to_user(wrq->u.data.pointer,
(u_char *) iwctl_private_args,
sizeof(iwctl_private_args)))
rc = -EFAULT;
}
*/
break;
//2008-0409-07, <Add> by Einsn Liu
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
case SIOCSIWAUTH:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWAUTH \n");
rc = iwctl_siwauth(dev, NULL, &(wrq->u.param), NULL);
break;
case SIOCGIWAUTH:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWAUTH \n");
rc = iwctl_giwauth(dev, NULL, &(wrq->u.param), NULL);
break;
case SIOCSIWGENIE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWGENIE \n");
rc = iwctl_siwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
case SIOCGIWGENIE:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWGENIE \n");
rc = iwctl_giwgenie(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
case SIOCSIWENCODEEXT:
{
char extra[sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1];
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWENCODEEXT \n");
if(wrq->u.encoding.pointer){
memset(extra, 0, sizeof(struct iw_encode_ext)+MAX_KEY_LEN+1);
if(wrq->u.encoding.length > (sizeof(struct iw_encode_ext)+ MAX_KEY_LEN)){
rc = -E2BIG;
break;
}
if(copy_from_user(extra, wrq->u.encoding.pointer,wrq->u.encoding.length)){
rc = -EFAULT;
break;
}
}else if(wrq->u.encoding.length != 0){
rc = -EINVAL;
break;
}
rc = iwctl_siwencodeext(dev, NULL, &(wrq->u.encoding), extra);
}
break;
case SIOCGIWENCODEEXT:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCGIWENCODEEXT \n");
rc = iwctl_giwencodeext(dev, NULL, &(wrq->u.encoding), NULL);
break;
case SIOCSIWMLME:
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO " SIOCSIWMLME \n");
rc = iwctl_siwmlme(dev, NULL, &(wrq->u.data), wrq->u.data.pointer);
break;
#endif // #ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
//End Add -- //2008-0409-07, <Add> by Einsn Liu
case IOCTL_CMD_TEST:
if (!(pDevice->flags & DEVICE_FLAGS_OPENED)) {
rc = -EFAULT;
break;
} else {
rc = 0;
}
pReq = (PSCmdRequest)rq;
pReq->wResult = MAGIC_CODE;
break;
case IOCTL_CMD_SET:
#ifdef SndEvt_ToAPI
if((((PSCmdRequest)rq)->wCmdCode !=WLAN_CMD_SET_EVT) &&
!(pDevice->flags & DEVICE_FLAGS_OPENED))
#else
if (!(pDevice->flags & DEVICE_FLAGS_OPENED) &&
(((PSCmdRequest)rq)->wCmdCode !=WLAN_CMD_SET_WPA))
#endif
{
rc = -EFAULT;
break;
} else {
rc = 0;
}
if (test_and_set_bit( 0, (void*)&(pMgmt->uCmdBusy))) {
return -EBUSY;
}
rc = private_ioctl(pDevice, rq);
clear_bit( 0, (void*)&(pMgmt->uCmdBusy));
break;
case IOCTL_CMD_HOSTAPD:
rc = vt6655_hostap_ioctl(pDevice, &wrq->u.data);
break;
case IOCTL_CMD_WPA:
rc = wpa_ioctl(pDevice, &wrq->u.data);
break;
case SIOCETHTOOL:
return ethtool_ioctl(dev, (void *) rq->ifr_data);
// All other calls are currently unsupported
default:
rc = -EOPNOTSUPP;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Ioctl command not support..%x\n", cmd);
}
if (pDevice->bCommit) {
if (pMgmt->eConfigMode == WMAC_CONFIG_AP) {
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
bScheduleCommand((void *)pDevice, WLAN_CMD_RUN_AP, NULL);
spin_unlock_irq(&pDevice->lock);
}
else {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Commit the settings\n");
spin_lock_irq(&pDevice->lock);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
netif_stop_queue(pDevice->dev);
#ifdef WPA_SUPPLICANT_DRIVER_WEXT_SUPPORT
pMgmt->eScanType = WMAC_SCAN_ACTIVE;
if(pDevice->bWPASuppWextEnabled !=true)
#endif
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, pMgmt->abyDesireSSID);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
pDevice->bCommit = false;
}
return rc;
}
static int ethtool_ioctl(struct net_device *dev, void *useraddr)
{
u32 ethcmd;
if (copy_from_user(ðcmd, useraddr, sizeof(ethcmd)))
return -EFAULT;
switch (ethcmd) {
case ETHTOOL_GDRVINFO: {
struct ethtool_drvinfo info = {ETHTOOL_GDRVINFO};
strncpy(info.driver, DEVICE_NAME, sizeof(info.driver)-1);
strncpy(info.version, DEVICE_VERSION, sizeof(info.version)-1);
if (copy_to_user(useraddr, &info, sizeof(info)))
return -EFAULT;
return 0;
}
}
return -EOPNOTSUPP;
}
/*------------------------------------------------------------------*/
MODULE_DEVICE_TABLE(pci, vt6655_pci_id_table);
static struct pci_driver device_driver = {
.name = DEVICE_NAME,
.id_table = vt6655_pci_id_table,
.probe = vt6655_probe,
.remove = vt6655_remove,
#ifdef CONFIG_PM
.suspend = viawget_suspend,
.resume = viawget_resume,
#endif
};
static int __init vt6655_init_module(void)
{
int ret;
// ret=pci_module_init(&device_driver);
//ret = pcie_port_service_register(&device_driver);
ret = pci_register_driver(&device_driver);
#ifdef CONFIG_PM
if(ret >= 0)
register_reboot_notifier(&device_notifier);
#endif
return ret;
}
static void __exit vt6655_cleanup_module(void)
{
#ifdef CONFIG_PM
unregister_reboot_notifier(&device_notifier);
#endif
pci_unregister_driver(&device_driver);
}
module_init(vt6655_init_module);
module_exit(vt6655_cleanup_module);
#ifdef CONFIG_PM
static int
device_notify_reboot(struct notifier_block *nb, unsigned long event, void *p)
{
struct pci_dev *pdev = NULL;
switch(event) {
case SYS_DOWN:
case SYS_HALT:
case SYS_POWER_OFF:
for_each_pci_dev(pdev) {
if(pci_dev_driver(pdev) == &device_driver) {
if (pci_get_drvdata(pdev))
viawget_suspend(pdev, PMSG_HIBERNATE);
}
}
}
return NOTIFY_DONE;
}
static int
viawget_suspend(struct pci_dev *pcid, pm_message_t state)
{
int power_status; // to silence the compiler
PSDevice pDevice=pci_get_drvdata(pcid);
PSMgmtObject pMgmt = pDevice->pMgmt;
netif_stop_queue(pDevice->dev);
spin_lock_irq(&pDevice->lock);
pci_save_state(pcid);
del_timer(&pDevice->sTimerCommand);
del_timer(&pMgmt->sTimerSecondCallback);
pDevice->cbFreeCmdQueue = CMD_Q_SIZE;
pDevice->uCmdDequeueIdx = 0;
pDevice->uCmdEnqueueIdx = 0;
pDevice->bCmdRunning = false;
MACbShutdown(pDevice->PortOffset);
MACvSaveContext(pDevice->PortOffset, pDevice->abyMacContext);
pDevice->bLinkPass = false;
memset(pMgmt->abyCurrBSSID, 0, 6);
pMgmt->eCurrState = WMAC_STATE_IDLE;
pci_disable_device(pcid);
power_status = pci_set_power_state(pcid, pci_choose_state(pcid, state));
spin_unlock_irq(&pDevice->lock);
return 0;
}
static int
viawget_resume(struct pci_dev *pcid)
{
PSDevice pDevice=pci_get_drvdata(pcid);
PSMgmtObject pMgmt = pDevice->pMgmt;
int power_status; // to silence the compiler
power_status = pci_set_power_state(pcid, 0);
power_status = pci_enable_wake(pcid, 0, 0);
pci_restore_state(pcid);
if (netif_running(pDevice->dev)) {
spin_lock_irq(&pDevice->lock);
MACvRestoreContext(pDevice->PortOffset, pDevice->abyMacContext);
device_init_registers(pDevice, DEVICE_INIT_DXPL);
if (pMgmt->sNodeDBTable[0].bActive == true) { // Assoc with BSS
pMgmt->sNodeDBTable[0].bActive = false;
pDevice->bLinkPass = false;
if(pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) {
// In Adhoc, BSS state set back to started.
pMgmt->eCurrState = WMAC_STATE_STARTED;
}
else {
pMgmt->eCurrMode = WMAC_MODE_STANDBY;
pMgmt->eCurrState = WMAC_STATE_IDLE;
}
}
init_timer(&pMgmt->sTimerSecondCallback);
init_timer(&pDevice->sTimerCommand);
MACvIntEnable(pDevice->PortOffset, IMR_MASK_VALUE);
BSSvClearBSSList((void *)pDevice, pDevice->bLinkPass);
bScheduleCommand((void *) pDevice, WLAN_CMD_BSSID_SCAN, NULL);
bScheduleCommand((void *) pDevice, WLAN_CMD_SSID, NULL);
spin_unlock_irq(&pDevice->lock);
}
return 0;
}
#endif
| gpl-2.0 |
iskandar1023/Velvet-N4 | drivers/hwmon/sht21.c | 4940 | 7557 | /* Sensirion SHT21 humidity and temperature sensor driver
*
* Copyright (C) 2010 Urs Fleisch <urs.fleisch@sensirion.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA
*
* Data sheet available (5/2010) at
* http://www.sensirion.com/en/pdf/product_information/Datasheet-humidity-sensor-SHT21.pdf
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/err.h>
#include <linux/mutex.h>
#include <linux/device.h>
/* I2C command bytes */
#define SHT21_TRIG_T_MEASUREMENT_HM 0xe3
#define SHT21_TRIG_RH_MEASUREMENT_HM 0xe5
/**
* struct sht21 - SHT21 device specific data
* @hwmon_dev: device registered with hwmon
* @lock: mutex to protect measurement values
* @valid: only 0 before first measurement is taken
* @last_update: time of last update (jiffies)
* @temperature: cached temperature measurement value
* @humidity: cached humidity measurement value
*/
struct sht21 {
struct device *hwmon_dev;
struct mutex lock;
char valid;
unsigned long last_update;
int temperature;
int humidity;
};
/**
* sht21_temp_ticks_to_millicelsius() - convert raw temperature ticks to
* milli celsius
* @ticks: temperature ticks value received from sensor
*/
static inline int sht21_temp_ticks_to_millicelsius(int ticks)
{
ticks &= ~0x0003; /* clear status bits */
/*
* Formula T = -46.85 + 175.72 * ST / 2^16 from data sheet 6.2,
* optimized for integer fixed point (3 digits) arithmetic
*/
return ((21965 * ticks) >> 13) - 46850;
}
/**
* sht21_rh_ticks_to_per_cent_mille() - convert raw humidity ticks to
* one-thousandths of a percent relative humidity
* @ticks: humidity ticks value received from sensor
*/
static inline int sht21_rh_ticks_to_per_cent_mille(int ticks)
{
ticks &= ~0x0003; /* clear status bits */
/*
* Formula RH = -6 + 125 * SRH / 2^16 from data sheet 6.1,
* optimized for integer fixed point (3 digits) arithmetic
*/
return ((15625 * ticks) >> 13) - 6000;
}
/**
* sht21_update_measurements() - get updated measurements from device
* @client: I2C client device
*
* Returns 0 on success, else negative errno.
*/
static int sht21_update_measurements(struct i2c_client *client)
{
int ret = 0;
struct sht21 *sht21 = i2c_get_clientdata(client);
mutex_lock(&sht21->lock);
/*
* Data sheet 2.4:
* SHT2x should not be active for more than 10% of the time - e.g.
* maximum two measurements per second at 12bit accuracy shall be made.
*/
if (time_after(jiffies, sht21->last_update + HZ / 2) || !sht21->valid) {
ret = i2c_smbus_read_word_swapped(client,
SHT21_TRIG_T_MEASUREMENT_HM);
if (ret < 0)
goto out;
sht21->temperature = sht21_temp_ticks_to_millicelsius(ret);
ret = i2c_smbus_read_word_swapped(client,
SHT21_TRIG_RH_MEASUREMENT_HM);
if (ret < 0)
goto out;
sht21->humidity = sht21_rh_ticks_to_per_cent_mille(ret);
sht21->last_update = jiffies;
sht21->valid = 1;
}
out:
mutex_unlock(&sht21->lock);
return ret >= 0 ? 0 : ret;
}
/**
* sht21_show_temperature() - show temperature measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
*
* Will be called on read access to temp1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
static ssize_t sht21_show_temperature(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct sht21 *sht21 = i2c_get_clientdata(client);
int ret = sht21_update_measurements(client);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", sht21->temperature);
}
/**
* sht21_show_humidity() - show humidity measurement value in sysfs
* @dev: device
* @attr: device attribute
* @buf: sysfs buffer (PAGE_SIZE) where measurement values are written to
*
* Will be called on read access to humidity1_input sysfs attribute.
* Returns number of bytes written into buffer, negative errno on error.
*/
static ssize_t sht21_show_humidity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct i2c_client *client = to_i2c_client(dev);
struct sht21 *sht21 = i2c_get_clientdata(client);
int ret = sht21_update_measurements(client);
if (ret < 0)
return ret;
return sprintf(buf, "%d\n", sht21->humidity);
}
/* sysfs attributes */
static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, sht21_show_temperature,
NULL, 0);
static SENSOR_DEVICE_ATTR(humidity1_input, S_IRUGO, sht21_show_humidity,
NULL, 0);
static struct attribute *sht21_attributes[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_humidity1_input.dev_attr.attr,
NULL
};
static const struct attribute_group sht21_attr_group = {
.attrs = sht21_attributes,
};
/**
* sht21_probe() - probe device
* @client: I2C client device
* @id: device ID
*
* Called by the I2C core when an entry in the ID table matches a
* device's name.
* Returns 0 on success.
*/
static int __devinit sht21_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct sht21 *sht21;
int err;
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA)) {
dev_err(&client->dev,
"adapter does not support SMBus word transactions\n");
return -ENODEV;
}
sht21 = kzalloc(sizeof(*sht21), GFP_KERNEL);
if (!sht21) {
dev_dbg(&client->dev, "kzalloc failed\n");
return -ENOMEM;
}
i2c_set_clientdata(client, sht21);
mutex_init(&sht21->lock);
err = sysfs_create_group(&client->dev.kobj, &sht21_attr_group);
if (err) {
dev_dbg(&client->dev, "could not create sysfs files\n");
goto fail_free;
}
sht21->hwmon_dev = hwmon_device_register(&client->dev);
if (IS_ERR(sht21->hwmon_dev)) {
dev_dbg(&client->dev, "unable to register hwmon device\n");
err = PTR_ERR(sht21->hwmon_dev);
goto fail_remove_sysfs;
}
dev_info(&client->dev, "initialized\n");
return 0;
fail_remove_sysfs:
sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
fail_free:
kfree(sht21);
return err;
}
/**
* sht21_remove() - remove device
* @client: I2C client device
*/
static int __devexit sht21_remove(struct i2c_client *client)
{
struct sht21 *sht21 = i2c_get_clientdata(client);
hwmon_device_unregister(sht21->hwmon_dev);
sysfs_remove_group(&client->dev.kobj, &sht21_attr_group);
kfree(sht21);
return 0;
}
/* Device ID table */
static const struct i2c_device_id sht21_id[] = {
{ "sht21", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, sht21_id);
static struct i2c_driver sht21_driver = {
.driver.name = "sht21",
.probe = sht21_probe,
.remove = __devexit_p(sht21_remove),
.id_table = sht21_id,
};
module_i2c_driver(sht21_driver);
MODULE_AUTHOR("Urs Fleisch <urs.fleisch@sensirion.com>");
MODULE_DESCRIPTION("Sensirion SHT21 humidity and temperature sensor driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
binkybear/nexus10-5 | drivers/staging/rtl8187se/ieee80211/ieee80211_tx.c | 5452 | 18800 | /******************************************************************************
Copyright(c) 2003 - 2004 Intel Corporation. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the terms of version 2 of the GNU General Public License as
published by the Free Software Foundation.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc., 59
Temple Place - Suite 330, Boston, MA 02111-1307, USA.
The full GNU General Public License is included in this distribution in the
file called LICENSE.
Contact Information:
James P. Ketrenos <ipw2100-admin@linux.intel.com>
Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
******************************************************************************
Few modifications for Realtek's Wi-Fi drivers by
Andrea Merello <andreamrl@tiscali.it>
A special thanks goes to Realtek for their support !
******************************************************************************/
#include <linux/compiler.h>
//#include <linux/config.h>
#include <linux/errno.h>
#include <linux/if_arp.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/tcp.h>
#include <linux/types.h>
#include <linux/wireless.h>
#include <linux/etherdevice.h>
#include <asm/uaccess.h>
#include <linux/if_vlan.h>
#include "ieee80211.h"
/*
802.11 Data Frame
802.11 frame_contorl for data frames - 2 bytes
,-----------------------------------------------------------------------------------------.
bits | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | a | b | c | d | e |
|----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
val | 0 | 0 | 0 | 1 | x | 0 | 0 | 0 | 1 | 0 | x | x | x | x | x |
|----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|-----|------|
desc | ^-ver-^ | ^type-^ | ^-----subtype-----^ | to |from |more |retry| pwr |more |wep |
| | | x=0 data,x=1 data+ack | DS | DS |frag | | mgm |data | |
'-----------------------------------------------------------------------------------------'
/\
|
802.11 Data Frame |
,--------- 'ctrl' expands to >-----------'
|
,--'---,-------------------------------------------------------------.
Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 |
|------|------|---------|---------|---------|------|---------|------|
Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs |
| | tion | (BSSID) | | | ence | data | |
`--------------------------------------------------| |------'
Total: 28 non-data bytes `----.----'
|
.- 'Frame data' expands to <---------------------------'
|
V
,---------------------------------------------------.
Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 |
|------|------|---------|----------|------|---------|
Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP |
| DSAP | SSAP | | | | Packet |
| 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | |
`-----------------------------------------| |
Total: 8 non-data bytes `----.----'
|
.- 'IP Packet' expands, if WEP enabled, to <--'
|
V
,-----------------------.
Bytes | 4 | 0-2296 | 4 |
|-----|-----------|-----|
Desc. | IV | Encrypted | ICV |
| | IP Packet | |
`-----------------------'
Total: 8 non-data bytes
802.3 Ethernet Data Frame
,-----------------------------------------.
Bytes | 6 | 6 | 2 | Variable | 4 |
|-------|-------|------|-----------|------|
Desc. | Dest. | Source| Type | IP Packet | fcs |
| MAC | MAC | | | |
`-----------------------------------------'
Total: 18 non-data bytes
In the event that fragmentation is required, the incoming payload is split into
N parts of size ieee->fts. The first fragment contains the SNAP header and the
remaining packets are just data.
If encryption is enabled, each fragment payload size is reduced by enough space
to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP)
So if you have 1500 bytes of payload with ieee->fts set to 500 without
encryption it will take 3 frames. With WEP it will take 4 frames as the
payload of each frame is reduced to 492 bytes.
* SKB visualization
*
* ,- skb->data
* |
* | ETHERNET HEADER ,-<-- PAYLOAD
* | | 14 bytes from skb->data
* | 2 bytes for Type --> ,T. | (sizeof ethhdr)
* | | | |
* |,-Dest.--. ,--Src.---. | | |
* | 6 bytes| | 6 bytes | | | |
* v | | | | | |
* 0 | v 1 | v | v 2
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
* ^ | ^ | ^ |
* | | | | | |
* | | | | `T' <---- 2 bytes for Type
* | | | |
* | | '---SNAP--' <-------- 6 bytes for SNAP
* | |
* `-IV--' <-------------------- 4 bytes for IV (WEP)
*
* SNAP HEADER
*
*/
static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 };
static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 };
static inline int ieee80211_put_snap(u8 *data, u16 h_proto)
{
struct ieee80211_snap_hdr *snap;
u8 *oui;
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
snap->ssap = 0xaa;
snap->ctrl = 0x03;
if (h_proto == 0x8137 || h_proto == 0x80f3)
oui = P802_1H_OUI;
else
oui = RFC1042_OUI;
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
*(u16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
int ieee80211_encrypt_fragment(
struct ieee80211_device *ieee,
struct sk_buff *frag,
int hdr_len)
{
struct ieee80211_crypt_data* crypt = ieee->crypt[ieee->tx_keyidx];
int res;
/*added to care about null crypt condition, to solve that system hangs when shared keys error*/
if (!crypt || !crypt->ops)
return -1;
#ifdef CONFIG_IEEE80211_CRYPT_TKIP
struct ieee80211_hdr_4addr *header;
if (ieee->tkip_countermeasures &&
crypt && crypt->ops && strcmp(crypt->ops->name, "TKIP") == 0) {
header = (struct ieee80211_hdr_4addr *)frag->data;
if (net_ratelimit()) {
printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
"TX packet to %pM\n",
ieee->dev->name, header->addr1);
}
return -1;
}
#endif
/* To encrypt, frame format is:
* IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */
// PR: FIXME: Copied from hostap. Check fragmentation/MSDU/MPDU encryption.
/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
* call both MSDU and MPDU encryption functions from here. */
atomic_inc(&crypt->refcnt);
res = 0;
if (crypt->ops->encrypt_msdu)
res = crypt->ops->encrypt_msdu(frag, hdr_len, crypt->priv);
if (res == 0 && crypt->ops->encrypt_mpdu)
res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv);
atomic_dec(&crypt->refcnt);
if (res < 0) {
printk(KERN_INFO "%s: Encryption failed: len=%d.\n",
ieee->dev->name, frag->len);
ieee->ieee_stats.tx_discards++;
return -1;
}
return 0;
}
void ieee80211_txb_free(struct ieee80211_txb *txb) {
int i;
if (unlikely(!txb))
return;
for (i = 0; i < txb->nr_frags; i++)
if (txb->fragments[i])
dev_kfree_skb_any(txb->fragments[i]);
kfree(txb);
}
struct ieee80211_txb *ieee80211_alloc_txb(int nr_frags, int txb_size,
int gfp_mask)
{
struct ieee80211_txb *txb;
int i;
txb = kmalloc(
sizeof(struct ieee80211_txb) + (sizeof(u8*) * nr_frags),
gfp_mask);
if (!txb)
return NULL;
memset(txb, 0, sizeof(struct ieee80211_txb));
txb->nr_frags = nr_frags;
txb->frag_size = txb_size;
for (i = 0; i < nr_frags; i++) {
txb->fragments[i] = dev_alloc_skb(txb_size);
if (unlikely(!txb->fragments[i])) {
i--;
break;
}
}
if (unlikely(i != nr_frags)) {
while (i >= 0)
dev_kfree_skb_any(txb->fragments[i--]);
kfree(txb);
return NULL;
}
return txb;
}
// Classify the to-be send data packet
// Need to acquire the sent queue index.
static int
ieee80211_classify(struct sk_buff *skb, struct ieee80211_network *network)
{
struct ether_header *eh = (struct ether_header*)skb->data;
unsigned int wme_UP = 0;
if(!network->QoS_Enable) {
skb->priority = 0;
return(wme_UP);
}
if(eh->ether_type == __constant_htons(ETHERTYPE_IP)) {
const struct iphdr *ih = (struct iphdr*)(skb->data + \
sizeof(struct ether_header));
wme_UP = (ih->tos >> 5)&0x07;
} else if (vlan_tx_tag_present(skb)) {//vtag packet
#ifndef VLAN_PRI_SHIFT
#define VLAN_PRI_SHIFT 13 /* Shift to find VLAN user priority */
#define VLAN_PRI_MASK 7 /* Mask for user priority bits in VLAN */
#endif
u32 tag = vlan_tx_tag_get(skb);
wme_UP = (tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK;
} else if(ETH_P_PAE == ntohs(((struct ethhdr *)skb->data)->h_proto)) {
//printk(KERN_WARNING "type = normal packet\n");
wme_UP = 7;
}
skb->priority = wme_UP;
return(wme_UP);
}
/* SKBs are added to the ieee->tx_queue. */
int ieee80211_rtl_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct ieee80211_device *ieee = netdev_priv(dev);
struct ieee80211_txb *txb = NULL;
struct ieee80211_hdr_3addrqos *frag_hdr;
int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
unsigned long flags;
struct net_device_stats *stats = &ieee->stats;
int ether_type, encrypt;
int bytes, fc, qos_ctl, hdr_len;
struct sk_buff *skb_frag;
struct ieee80211_hdr_3addrqos header = { /* Ensure zero initialized */
.duration_id = 0,
.seq_ctl = 0,
.qos_ctl = 0
};
u8 dest[ETH_ALEN], src[ETH_ALEN];
struct ieee80211_crypt_data* crypt;
//printk(KERN_WARNING "upper layer packet!\n");
spin_lock_irqsave(&ieee->lock, flags);
/* If there is no driver handler to take the TXB, dont' bother
* creating it... */
if ((!ieee->hard_start_xmit && !(ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE))||
((!ieee->softmac_data_hard_start_xmit && (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
printk(KERN_WARNING "%s: No xmit handler.\n",
ieee->dev->name);
goto success;
}
ieee80211_classify(skb,&ieee->current_network);
if(likely(ieee->raw_tx == 0)){
if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
ieee->dev->name, skb->len);
goto success;
}
ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);
crypt = ieee->crypt[ieee->tx_keyidx];
encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
ieee->host_encrypt && crypt && crypt->ops;
if (!encrypt && ieee->ieee802_1x &&
ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
stats->tx_dropped++;
goto success;
}
#ifdef CONFIG_IEEE80211_DEBUG
if (crypt && !encrypt && ether_type == ETH_P_PAE) {
struct eapol *eap = (struct eapol *)(skb->data +
sizeof(struct ethhdr) - SNAP_SIZE - sizeof(u16));
IEEE80211_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
eap_get_type(eap->type));
}
#endif
/* Save source and destination addresses */
memcpy(&dest, skb->data, ETH_ALEN);
memcpy(&src, skb->data+ETH_ALEN, ETH_ALEN);
/* Advance the SKB to the start of the payload */
skb_pull(skb, sizeof(struct ethhdr));
/* Determine total amount of storage required for TXB packets */
bytes = skb->len + SNAP_SIZE + sizeof(u16);
if(ieee->current_network.QoS_Enable) {
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA |
IEEE80211_FCTL_WEP;
else
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_QOS_DATA;
} else {
if (encrypt)
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA |
IEEE80211_FCTL_WEP;
else
fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
}
if (ieee->iw_mode == IW_MODE_INFRA) {
fc |= IEEE80211_FCTL_TODS;
/* To DS: Addr1 = BSSID, Addr2 = SA,
Addr3 = DA */
memcpy(&header.addr1, ieee->current_network.bssid, ETH_ALEN);
memcpy(&header.addr2, &src, ETH_ALEN);
memcpy(&header.addr3, &dest, ETH_ALEN);
} else if (ieee->iw_mode == IW_MODE_ADHOC) {
/* not From/To DS: Addr1 = DA, Addr2 = SA,
Addr3 = BSSID */
memcpy(&header.addr1, dest, ETH_ALEN);
memcpy(&header.addr2, src, ETH_ALEN);
memcpy(&header.addr3, ieee->current_network.bssid, ETH_ALEN);
}
// printk(KERN_WARNING "essid MAC address is %pM", &header.addr1);
header.frame_ctl = cpu_to_le16(fc);
//hdr_len = IEEE80211_3ADDR_LEN;
/* Determine fragmentation size based on destination (multicast
* and broadcast are not fragmented) */
// if (is_multicast_ether_addr(dest) ||
// is_broadcast_ether_addr(dest)) {
if (is_multicast_ether_addr(header.addr1) ||
is_broadcast_ether_addr(header.addr1)) {
frag_size = MAX_FRAG_THRESHOLD;
qos_ctl = QOS_CTL_NOTCONTAIN_ACK;
}
else {
//printk(KERN_WARNING "&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&frag_size = %d\n", frag_size);
frag_size = ieee->fts;//default:392
qos_ctl = 0;
}
if (ieee->current_network.QoS_Enable) {
hdr_len = IEEE80211_3ADDR_LEN + 2;
/* skb->priority is set in the ieee80211_classify() */
qos_ctl |= skb->priority;
header.qos_ctl = cpu_to_le16(qos_ctl);
} else {
hdr_len = IEEE80211_3ADDR_LEN;
}
/* Determine amount of payload per fragment. Regardless of if
* this stack is providing the full 802.11 header, one will
* eventually be affixed to this fragment -- so we must account for
* it when determining the amount of payload space. */
//bytes_per_frag = frag_size - (IEEE80211_3ADDR_LEN + (ieee->current_network->QoS_Enable ? 2:0));
bytes_per_frag = frag_size - hdr_len;
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
bytes_per_frag -= IEEE80211_FCS_LEN;
/* Each fragment may need to have room for encryption pre/postfix */
if (encrypt)
bytes_per_frag -= crypt->ops->extra_prefix_len +
crypt->ops->extra_postfix_len;
/* Number of fragments is the total bytes_per_frag /
* payload_per_fragment */
nr_frags = bytes / bytes_per_frag;
bytes_last_frag = bytes % bytes_per_frag;
if (bytes_last_frag)
nr_frags++;
else
bytes_last_frag = bytes_per_frag;
/* When we allocate the TXB we allocate enough space for the reserve
* and full fragment bytes (bytes_per_frag doesn't include prefix,
* postfix, header, FCS, etc.) */
txb = ieee80211_alloc_txb(nr_frags, frag_size, GFP_ATOMIC);
if (unlikely(!txb)) {
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = encrypt;
txb->payload_size = bytes;
for (i = 0; i < nr_frags; i++) {
skb_frag = txb->fragments[i];
skb_frag->priority = UP2AC(skb->priority);
if (encrypt)
skb_reserve(skb_frag, crypt->ops->extra_prefix_len);
frag_hdr = (struct ieee80211_hdr_3addrqos *)skb_put(skb_frag, hdr_len);
memcpy(frag_hdr, &header, hdr_len);
/* If this is not the last fragment, then add the MOREFRAGS
* bit to the frame control */
if (i != nr_frags - 1) {
frag_hdr->frame_ctl = cpu_to_le16(
fc | IEEE80211_FCTL_MOREFRAGS);
bytes = bytes_per_frag;
} else {
/* The last fragment takes the remaining length */
bytes = bytes_last_frag;
}
if(ieee->current_network.QoS_Enable) {
// add 1 only indicate to corresponding seq number control 2006/7/12
frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[UP2AC(skb->priority)+1]<<4 | i);
//printk(KERN_WARNING "skb->priority = %d,", skb->priority);
//printk(KERN_WARNING "type:%d: seq = %d\n",UP2AC(skb->priority),ieee->seq_ctrl[UP2AC(skb->priority)+1]);
} else {
frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
}
//frag_hdr->seq_ctl = cpu_to_le16(ieee->seq_ctrl<<4 | i);
//
/* Put a SNAP header on the first fragment */
if (i == 0) {
ieee80211_put_snap(
skb_put(skb_frag, SNAP_SIZE + sizeof(u16)),
ether_type);
bytes -= SNAP_SIZE + sizeof(u16);
}
memcpy(skb_put(skb_frag, bytes), skb->data, bytes);
/* Advance the SKB... */
skb_pull(skb, bytes);
/* Encryption routine will move the header forward in order
* to insert the IV between the header and the payload */
if (encrypt)
ieee80211_encrypt_fragment(ieee, skb_frag, hdr_len);
if (ieee->config &
(CFG_IEEE80211_COMPUTE_FCS | CFG_IEEE80211_RESERVE_FCS))
skb_put(skb_frag, 4);
}
// Advance sequence number in data frame.
//printk(KERN_WARNING "QoS Enalbed? %s\n", ieee->current_network.QoS_Enable?"Y":"N");
if (ieee->current_network.QoS_Enable) {
if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
else
ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
} else {
if (ieee->seq_ctrl[0] == 0xFFF)
ieee->seq_ctrl[0] = 0;
else
ieee->seq_ctrl[0]++;
}
//---
}else{
if (unlikely(skb->len < sizeof(struct ieee80211_hdr_3addr))) {
printk(KERN_WARNING "%s: skb too small (%d).\n",
ieee->dev->name, skb->len);
goto success;
}
txb = ieee80211_alloc_txb(1, skb->len, GFP_ATOMIC);
if(!txb){
printk(KERN_WARNING "%s: Could not allocate TXB\n",
ieee->dev->name);
goto failed;
}
txb->encrypted = 0;
txb->payload_size = skb->len;
memcpy(skb_put(txb->fragments[0],skb->len), skb->data, skb->len);
}
success:
spin_unlock_irqrestore(&ieee->lock, flags);
dev_kfree_skb_any(skb);
if (txb) {
if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE){
ieee80211_softmac_xmit(txb, ieee);
}else{
if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
stats->tx_packets++;
stats->tx_bytes += txb->payload_size;
return NETDEV_TX_OK;
}
ieee80211_txb_free(txb);
}
}
return NETDEV_TX_OK;
failed:
spin_unlock_irqrestore(&ieee->lock, flags);
netif_stop_queue(dev);
stats->tx_errors++;
return NETDEV_TX_BUSY;
}
| gpl-2.0 |
flar2/m8-Sense-4.4.4 | arch/mips/lasat/serial.c | 9548 | 3052 | /*
* Registration of Lasat UART platform device.
*
* Copyright (C) 2007 Brian Murphy <brian@murphy.dk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/platform_device.h>
#include <linux/serial_8250.h>
#include <asm/lasat/lasat.h>
#include <asm/lasat/serial.h>
static struct resource lasat_serial_res[2] __initdata;
static struct plat_serial8250_port lasat_serial8250_port[] = {
{
.iotype = UPIO_MEM,
.flags = UPF_IOREMAP | UPF_BOOT_AUTOCONF |
UPF_SKIP_TEST,
},
{},
};
static __init int lasat_uart_add(void)
{
struct platform_device *pdev;
int retval;
pdev = platform_device_alloc("serial8250", -1);
if (!pdev)
return -ENOMEM;
if (!IS_LASAT_200()) {
lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_100);
lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_100 * 8 - 1;
lasat_serial_res[0].flags = IORESOURCE_MEM;
lasat_serial_res[1].start = LASATINT_UART_100;
lasat_serial_res[1].end = LASATINT_UART_100;
lasat_serial_res[1].flags = IORESOURCE_IRQ;
lasat_serial8250_port[0].mapbase = LASAT_UART_REGS_BASE_100;
lasat_serial8250_port[0].uartclk = LASAT_BASE_BAUD_100 * 16;
lasat_serial8250_port[0].regshift = LASAT_UART_REGS_SHIFT_100;
lasat_serial8250_port[0].irq = LASATINT_UART_100;
} else {
lasat_serial_res[0].start = KSEG1ADDR(LASAT_UART_REGS_BASE_200);
lasat_serial_res[0].end = lasat_serial_res[0].start + LASAT_UART_REGS_SHIFT_200 * 8 - 1;
lasat_serial_res[0].flags = IORESOURCE_MEM;
lasat_serial_res[1].start = LASATINT_UART_200;
lasat_serial_res[1].end = LASATINT_UART_200;
lasat_serial_res[1].flags = IORESOURCE_IRQ;
lasat_serial8250_port[0].mapbase = LASAT_UART_REGS_BASE_200;
lasat_serial8250_port[0].uartclk = LASAT_BASE_BAUD_200 * 16;
lasat_serial8250_port[0].regshift = LASAT_UART_REGS_SHIFT_200;
lasat_serial8250_port[0].irq = LASATINT_UART_200;
}
pdev->id = PLAT8250_DEV_PLATFORM;
pdev->dev.platform_data = lasat_serial8250_port;
retval = platform_device_add_resources(pdev, lasat_serial_res, ARRAY_SIZE(lasat_serial_res));
if (retval)
goto err_free_device;
retval = platform_device_add(pdev);
if (retval)
goto err_free_device;
return 0;
err_free_device:
platform_device_put(pdev);
return retval;
}
device_initcall(lasat_uart_add);
| gpl-2.0 |
bananacakes/holiday-2.6.35-crc | drivers/net/irda/ep7211-sir.c | 10060 | 1957 | /*
* IR port driver for the Cirrus Logic EP7211 processor.
*
* Copyright 2001, Blue Mug Inc. All rights reserved.
* Copyright 2007, Samuel Ortiz <samuel@sortiz.org>
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/tty.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <net/irda/irda.h>
#include <net/irda/irda_device.h>
#include <asm/io.h>
#include <mach/hardware.h>
#include "sir-dev.h"
#define MIN_DELAY 25 /* 15 us, but wait a little more to be sure */
#define MAX_DELAY 10000 /* 1 ms */
static int ep7211_open(struct sir_dev *dev);
static int ep7211_close(struct sir_dev *dev);
static int ep7211_change_speed(struct sir_dev *dev, unsigned speed);
static int ep7211_reset(struct sir_dev *dev);
static struct dongle_driver ep7211 = {
.owner = THIS_MODULE,
.driver_name = "EP7211 IR driver",
.type = IRDA_EP7211_DONGLE,
.open = ep7211_open,
.close = ep7211_close,
.reset = ep7211_reset,
.set_speed = ep7211_change_speed,
};
static int __init ep7211_sir_init(void)
{
return irda_register_dongle(&ep7211);
}
static void __exit ep7211_sir_cleanup(void)
{
irda_unregister_dongle(&ep7211);
}
static int ep7211_open(struct sir_dev *dev)
{
unsigned int syscon;
/* Turn on the SIR encoder. */
syscon = clps_readl(SYSCON1);
syscon |= SYSCON1_SIREN;
clps_writel(syscon, SYSCON1);
return 0;
}
static int ep7211_close(struct sir_dev *dev)
{
unsigned int syscon;
/* Turn off the SIR encoder. */
syscon = clps_readl(SYSCON1);
syscon &= ~SYSCON1_SIREN;
clps_writel(syscon, SYSCON1);
return 0;
}
static int ep7211_change_speed(struct sir_dev *dev, unsigned speed)
{
return 0;
}
static int ep7211_reset(struct sir_dev *dev)
{
return 0;
}
MODULE_AUTHOR("Samuel Ortiz <samuel@sortiz.org>");
MODULE_DESCRIPTION("EP7211 IR dongle driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("irda-dongle-13"); /* IRDA_EP7211_DONGLE */
module_init(ep7211_sir_init);
module_exit(ep7211_sir_cleanup);
| gpl-2.0 |
szezso/3.0-mid | arch/sh/drivers/heartbeat.c | 12876 | 4267 | /*
* Generic heartbeat driver for regular LED banks
*
* Copyright (C) 2007 - 2010 Paul Mundt
*
* Most SH reference boards include a number of individual LEDs that can
* be independently controlled (either via a pre-defined hardware
* function or via the LED class, if desired -- the hardware tends to
* encapsulate some of the same "triggers" that the LED class supports,
* so there's not too much value in it).
*
* Additionally, most of these boards also have a LED bank that we've
* traditionally used for strobing the load average. This use case is
* handled by this driver, rather than giving each LED bit position its
* own struct device.
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <asm/heartbeat.h>
#define DRV_NAME "heartbeat"
#define DRV_VERSION "0.1.2"
static unsigned char default_bit_pos[] = { 0, 1, 2, 3, 4, 5, 6, 7 };
static inline void heartbeat_toggle_bit(struct heartbeat_data *hd,
unsigned bit, unsigned int inverted)
{
unsigned int new;
new = (1 << hd->bit_pos[bit]);
if (inverted)
new = ~new;
new &= hd->mask;
switch (hd->regsize) {
case 32:
new |= ioread32(hd->base) & ~hd->mask;
iowrite32(new, hd->base);
break;
case 16:
new |= ioread16(hd->base) & ~hd->mask;
iowrite16(new, hd->base);
break;
default:
new |= ioread8(hd->base) & ~hd->mask;
iowrite8(new, hd->base);
break;
}
}
static void heartbeat_timer(unsigned long data)
{
struct heartbeat_data *hd = (struct heartbeat_data *)data;
static unsigned bit = 0, up = 1;
heartbeat_toggle_bit(hd, bit, hd->flags & HEARTBEAT_INVERTED);
bit += up;
if ((bit == 0) || (bit == (hd->nr_bits)-1))
up = -up;
mod_timer(&hd->timer, jiffies + (110 - ((300 << FSHIFT) /
((avenrun[0] / 5) + (3 << FSHIFT)))));
}
static int heartbeat_drv_probe(struct platform_device *pdev)
{
struct resource *res;
struct heartbeat_data *hd;
int i;
if (unlikely(pdev->num_resources != 1)) {
dev_err(&pdev->dev, "invalid number of resources\n");
return -EINVAL;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (unlikely(res == NULL)) {
dev_err(&pdev->dev, "invalid resource\n");
return -EINVAL;
}
if (pdev->dev.platform_data) {
hd = pdev->dev.platform_data;
} else {
hd = kzalloc(sizeof(struct heartbeat_data), GFP_KERNEL);
if (unlikely(!hd))
return -ENOMEM;
}
hd->base = ioremap_nocache(res->start, resource_size(res));
if (unlikely(!hd->base)) {
dev_err(&pdev->dev, "ioremap failed\n");
if (!pdev->dev.platform_data)
kfree(hd);
return -ENXIO;
}
if (!hd->nr_bits) {
hd->bit_pos = default_bit_pos;
hd->nr_bits = ARRAY_SIZE(default_bit_pos);
}
hd->mask = 0;
for (i = 0; i < hd->nr_bits; i++)
hd->mask |= (1 << hd->bit_pos[i]);
if (!hd->regsize) {
switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
case IORESOURCE_MEM_32BIT:
hd->regsize = 32;
break;
case IORESOURCE_MEM_16BIT:
hd->regsize = 16;
break;
case IORESOURCE_MEM_8BIT:
default:
hd->regsize = 8;
break;
}
}
setup_timer(&hd->timer, heartbeat_timer, (unsigned long)hd);
platform_set_drvdata(pdev, hd);
return mod_timer(&hd->timer, jiffies + 1);
}
static int heartbeat_drv_remove(struct platform_device *pdev)
{
struct heartbeat_data *hd = platform_get_drvdata(pdev);
del_timer_sync(&hd->timer);
iounmap(hd->base);
platform_set_drvdata(pdev, NULL);
if (!pdev->dev.platform_data)
kfree(hd);
return 0;
}
static struct platform_driver heartbeat_driver = {
.probe = heartbeat_drv_probe,
.remove = heartbeat_drv_remove,
.driver = {
.name = DRV_NAME,
},
};
static int __init heartbeat_init(void)
{
printk(KERN_NOTICE DRV_NAME ": version %s loaded\n", DRV_VERSION);
return platform_driver_register(&heartbeat_driver);
}
static void __exit heartbeat_exit(void)
{
platform_driver_unregister(&heartbeat_driver);
}
module_init(heartbeat_init);
module_exit(heartbeat_exit);
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR("Paul Mundt");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
jyh0082007/sigTaint | drivers/acpi/acpi_memhotplug.c | 77 | 10114 | /*
* Copyright (C) 2004, 2013 Intel Corporation
* Author: Naveen B S <naveen.b.s@intel.com>
* Author: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or (at
* your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*
* ACPI based HotPlug driver that supports Memory Hotplug
* This driver fields notifications from firmware for memory add
* and remove operations and alerts the VM of the affected memory
* ranges.
*/
#include <linux/acpi.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
#include "internal.h"
#define ACPI_MEMORY_DEVICE_CLASS "memory"
#define ACPI_MEMORY_DEVICE_HID "PNP0C80"
#define ACPI_MEMORY_DEVICE_NAME "Hotplug Mem Device"
#define _COMPONENT ACPI_MEMORY_DEVICE_COMPONENT
#undef PREFIX
#define PREFIX "ACPI:memory_hp:"
ACPI_MODULE_NAME("acpi_memhotplug");
/* Memory Device States */
#define MEMORY_INVALID_STATE 0
#define MEMORY_POWER_ON_STATE 1
#define MEMORY_POWER_OFF_STATE 2
static int acpi_memory_device_add(struct acpi_device *device,
const struct acpi_device_id *not_used);
static void acpi_memory_device_remove(struct acpi_device *device);
static const struct acpi_device_id memory_device_ids[] = {
{ACPI_MEMORY_DEVICE_HID, 0},
{"", 0},
};
static struct acpi_scan_handler memory_device_handler = {
.ids = memory_device_ids,
.attach = acpi_memory_device_add,
.detach = acpi_memory_device_remove,
.hotplug = {
.enabled = true,
},
};
struct acpi_memory_info {
struct list_head list;
u64 start_addr; /* Memory Range start physical addr */
u64 length; /* Memory Range length */
unsigned short caching; /* memory cache attribute */
unsigned short write_protect; /* memory read/write attribute */
unsigned int enabled:1;
};
struct acpi_memory_device {
struct acpi_device * device;
unsigned int state; /* State of the memory device */
struct list_head res_list;
};
static acpi_status
acpi_memory_get_resource(struct acpi_resource *resource, void *context)
{
struct acpi_memory_device *mem_device = context;
struct acpi_resource_address64 address64;
struct acpi_memory_info *info, *new;
acpi_status status;
status = acpi_resource_to_address64(resource, &address64);
if (ACPI_FAILURE(status) ||
(address64.resource_type != ACPI_MEMORY_RANGE))
return AE_OK;
list_for_each_entry(info, &mem_device->res_list, list) {
/* Can we combine the resource range information? */
if ((info->caching == address64.info.mem.caching) &&
(info->write_protect == address64.info.mem.write_protect) &&
(info->start_addr + info->length == address64.minimum)) {
info->length += address64.address_length;
return AE_OK;
}
}
new = kzalloc(sizeof(struct acpi_memory_info), GFP_KERNEL);
if (!new)
return AE_ERROR;
INIT_LIST_HEAD(&new->list);
new->caching = address64.info.mem.caching;
new->write_protect = address64.info.mem.write_protect;
new->start_addr = address64.minimum;
new->length = address64.address_length;
list_add_tail(&new->list, &mem_device->res_list);
return AE_OK;
}
static void
acpi_memory_free_device_resources(struct acpi_memory_device *mem_device)
{
struct acpi_memory_info *info, *n;
list_for_each_entry_safe(info, n, &mem_device->res_list, list)
kfree(info);
INIT_LIST_HEAD(&mem_device->res_list);
}
static int
acpi_memory_get_device_resources(struct acpi_memory_device *mem_device)
{
acpi_status status;
if (!list_empty(&mem_device->res_list))
return 0;
status = acpi_walk_resources(mem_device->device->handle, METHOD_NAME__CRS,
acpi_memory_get_resource, mem_device);
if (ACPI_FAILURE(status)) {
acpi_memory_free_device_resources(mem_device);
return -EINVAL;
}
return 0;
}
static int acpi_memory_check_device(struct acpi_memory_device *mem_device)
{
unsigned long long current_status;
/* Get device present/absent information from the _STA */
if (ACPI_FAILURE(acpi_evaluate_integer(mem_device->device->handle,
METHOD_NAME__STA, NULL,
¤t_status)))
return -ENODEV;
/*
* Check for device status. Device should be
* present/enabled/functioning.
*/
if (!((current_status & ACPI_STA_DEVICE_PRESENT)
&& (current_status & ACPI_STA_DEVICE_ENABLED)
&& (current_status & ACPI_STA_DEVICE_FUNCTIONING)))
return -ENODEV;
return 0;
}
static unsigned long acpi_meminfo_start_pfn(struct acpi_memory_info *info)
{
return PFN_DOWN(info->start_addr);
}
static unsigned long acpi_meminfo_end_pfn(struct acpi_memory_info *info)
{
return PFN_UP(info->start_addr + info->length-1);
}
static int acpi_bind_memblk(struct memory_block *mem, void *arg)
{
return acpi_bind_one(&mem->dev, (acpi_handle)arg);
}
static int acpi_bind_memory_blocks(struct acpi_memory_info *info,
acpi_handle handle)
{
return walk_memory_range(acpi_meminfo_start_pfn(info),
acpi_meminfo_end_pfn(info), (void *)handle,
acpi_bind_memblk);
}
static int acpi_unbind_memblk(struct memory_block *mem, void *arg)
{
acpi_unbind_one(&mem->dev);
return 0;
}
static void acpi_unbind_memory_blocks(struct acpi_memory_info *info,
acpi_handle handle)
{
walk_memory_range(acpi_meminfo_start_pfn(info),
acpi_meminfo_end_pfn(info), NULL, acpi_unbind_memblk);
}
static int acpi_memory_enable_device(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
int result, num_enabled = 0;
struct acpi_memory_info *info;
int node;
node = acpi_get_node(handle);
/*
* Tell the VM there is more memory here...
* Note: Assume that this function returns zero on success
* We don't have memory-hot-add rollback function,now.
* (i.e. memory-hot-remove function)
*/
list_for_each_entry(info, &mem_device->res_list, list) {
if (info->enabled) { /* just sanity check...*/
num_enabled++;
continue;
}
/*
* If the memory block size is zero, please ignore it.
* Don't try to do the following memory hotplug flowchart.
*/
if (!info->length)
continue;
if (node < 0)
node = memory_add_physaddr_to_nid(info->start_addr);
result = add_memory(node, info->start_addr, info->length);
/*
* If the memory block has been used by the kernel, add_memory()
* returns -EEXIST. If add_memory() returns the other error, it
* means that this memory block is not used by the kernel.
*/
if (result && result != -EEXIST)
continue;
result = acpi_bind_memory_blocks(info, handle);
if (result) {
acpi_unbind_memory_blocks(info, handle);
return -ENODEV;
}
info->enabled = 1;
/*
* Add num_enable even if add_memory() returns -EEXIST, so the
* device is bound to this driver.
*/
num_enabled++;
}
if (!num_enabled) {
dev_err(&mem_device->device->dev, "add_memory failed\n");
mem_device->state = MEMORY_INVALID_STATE;
return -EINVAL;
}
/*
* Sometimes the memory device will contain several memory blocks.
* When one memory block is hot-added to the system memory, it will
* be regarded as a success.
* Otherwise if the last memory block can't be hot-added to the system
* memory, it will be failure and the memory device can't be bound with
* driver.
*/
return 0;
}
static void acpi_memory_remove_memory(struct acpi_memory_device *mem_device)
{
acpi_handle handle = mem_device->device->handle;
struct acpi_memory_info *info, *n;
int nid = acpi_get_node(handle);
list_for_each_entry_safe(info, n, &mem_device->res_list, list) {
if (!info->enabled)
continue;
if (nid == NUMA_NO_NODE)
nid = memory_add_physaddr_to_nid(info->start_addr);
acpi_unbind_memory_blocks(info, handle);
remove_memory(nid, info->start_addr, info->length);
list_del(&info->list);
kfree(info);
}
}
static void acpi_memory_device_free(struct acpi_memory_device *mem_device)
{
if (!mem_device)
return;
acpi_memory_free_device_resources(mem_device);
mem_device->device->driver_data = NULL;
kfree(mem_device);
}
static int acpi_memory_device_add(struct acpi_device *device,
const struct acpi_device_id *not_used)
{
struct acpi_memory_device *mem_device;
int result;
if (!device)
return -EINVAL;
mem_device = kzalloc(sizeof(struct acpi_memory_device), GFP_KERNEL);
if (!mem_device)
return -ENOMEM;
INIT_LIST_HEAD(&mem_device->res_list);
mem_device->device = device;
sprintf(acpi_device_name(device), "%s", ACPI_MEMORY_DEVICE_NAME);
sprintf(acpi_device_class(device), "%s", ACPI_MEMORY_DEVICE_CLASS);
device->driver_data = mem_device;
/* Get the range from the _CRS */
result = acpi_memory_get_device_resources(mem_device);
if (result) {
device->driver_data = NULL;
kfree(mem_device);
return result;
}
/* Set the device state */
mem_device->state = MEMORY_POWER_ON_STATE;
result = acpi_memory_check_device(mem_device);
if (result) {
acpi_memory_device_free(mem_device);
return 0;
}
result = acpi_memory_enable_device(mem_device);
if (result) {
dev_err(&device->dev, "acpi_memory_enable_device() error\n");
acpi_memory_device_free(mem_device);
return result;
}
dev_dbg(&device->dev, "Memory device configured by ACPI\n");
return 1;
}
static void acpi_memory_device_remove(struct acpi_device *device)
{
struct acpi_memory_device *mem_device;
if (!device || !acpi_driver_data(device))
return;
mem_device = acpi_driver_data(device);
acpi_memory_remove_memory(mem_device);
acpi_memory_device_free(mem_device);
}
void __init acpi_memory_hotplug_init(void)
{
acpi_scan_add_handler_with_hotplug(&memory_device_handler, "memory");
}
| gpl-2.0 |
andrew-pa/limbo-android | jni/qemu/roms/ipxe/src/arch/i386/interface/pxe/pxe_udp.c | 77 | 14246 | /** @file
*
* PXE UDP API
*
*/
#include <string.h>
#include <byteswap.h>
#include <ipxe/iobuf.h>
#include <ipxe/xfer.h>
#include <ipxe/udp.h>
#include <ipxe/uaccess.h>
#include <ipxe/process.h>
#include <pxe.h>
/*
* Copyright (C) 2004 Michael Brown <mbrown@fensystems.co.uk>.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
FILE_LICENCE ( GPL2_OR_LATER );
/** A PXE UDP connection */
struct pxe_udp_connection {
/** Data transfer interface to UDP stack */
struct interface xfer;
/** Local address */
struct sockaddr_in local;
/** Current PXENV_UDP_READ parameter block */
struct s_PXENV_UDP_READ *pxenv_udp_read;
};
/**
* Receive PXE UDP data
*
* @v pxe_udp PXE UDP connection
* @v iobuf I/O buffer
* @v meta Data transfer metadata
* @ret rc Return status code
*
* Receives a packet as part of the current pxenv_udp_read()
* operation.
*/
static int pxe_udp_deliver ( struct pxe_udp_connection *pxe_udp,
struct io_buffer *iobuf,
struct xfer_metadata *meta ) {
struct s_PXENV_UDP_READ *pxenv_udp_read = pxe_udp->pxenv_udp_read;
struct sockaddr_in *sin_src;
struct sockaddr_in *sin_dest;
userptr_t buffer;
size_t len;
int rc = 0;
if ( ! pxenv_udp_read ) {
DBG ( "PXE discarded UDP packet\n" );
rc = -ENOBUFS;
goto done;
}
/* Copy packet to buffer and record length */
buffer = real_to_user ( pxenv_udp_read->buffer.segment,
pxenv_udp_read->buffer.offset );
len = iob_len ( iobuf );
if ( len > pxenv_udp_read->buffer_size )
len = pxenv_udp_read->buffer_size;
copy_to_user ( buffer, 0, iobuf->data, len );
pxenv_udp_read->buffer_size = len;
/* Fill in source/dest information */
assert ( meta );
sin_src = ( struct sockaddr_in * ) meta->src;
assert ( sin_src );
assert ( sin_src->sin_family == AF_INET );
pxenv_udp_read->src_ip = sin_src->sin_addr.s_addr;
pxenv_udp_read->s_port = sin_src->sin_port;
sin_dest = ( struct sockaddr_in * ) meta->dest;
assert ( sin_dest );
assert ( sin_dest->sin_family == AF_INET );
pxenv_udp_read->dest_ip = sin_dest->sin_addr.s_addr;
pxenv_udp_read->d_port = sin_dest->sin_port;
/* Mark as received */
pxe_udp->pxenv_udp_read = NULL;
done:
free_iob ( iobuf );
return rc;
}
/** PXE UDP data transfer interface operations */
static struct interface_operation pxe_udp_xfer_operations[] = {
INTF_OP ( xfer_deliver, struct pxe_udp_connection *, pxe_udp_deliver ),
};
/** PXE UDP data transfer interface descriptor */
static struct interface_descriptor pxe_udp_xfer_desc =
INTF_DESC ( struct pxe_udp_connection, xfer, pxe_udp_xfer_operations );
/** The PXE UDP connection */
static struct pxe_udp_connection pxe_udp = {
.xfer = INTF_INIT ( pxe_udp_xfer_desc ),
.local = {
.sin_family = AF_INET,
},
};
/**
* UDP OPEN
*
* @v pxenv_udp_open Pointer to a struct s_PXENV_UDP_OPEN
* @v s_PXENV_UDP_OPEN::src_ip IP address of this station, or 0.0.0.0
* @ret #PXENV_EXIT_SUCCESS Always
* @ret s_PXENV_UDP_OPEN::Status PXE status code
* @err #PXENV_STATUS_UDP_OPEN UDP connection already open
* @err #PXENV_STATUS_OUT_OF_RESOURCES Could not open connection
*
* Prepares the PXE stack for communication using pxenv_udp_write()
* and pxenv_udp_read().
*
* The IP address supplied in s_PXENV_UDP_OPEN::src_ip will be
* recorded and used as the local station's IP address for all further
* communication, including communication by means other than
* pxenv_udp_write() and pxenv_udp_read(). (If
* s_PXENV_UDP_OPEN::src_ip is 0.0.0.0, the local station's IP address
* will remain unchanged.)
*
* You can only have one open UDP connection at a time. This is not a
* meaningful restriction, since pxenv_udp_write() and
* pxenv_udp_read() allow you to specify arbitrary local and remote
* ports and an arbitrary remote address for each packet. According
* to the PXE specifiation, you cannot have a UDP connection open at
* the same time as a TFTP connection; this restriction does not apply
* to Etherboot.
*
* On x86, you must set the s_PXE::StatusCallout field to a nonzero
* value before calling this function in protected mode. You cannot
* call this function with a 32-bit stack segment. (See the relevant
* @ref pxe_x86_pmode16 "implementation note" for more details.)
*
* @note The PXE specification does not make it clear whether the IP
* address supplied in s_PXENV_UDP_OPEN::src_ip should be used only
* for this UDP connection, or retained for all future communication.
* The latter seems more consistent with typical PXE stack behaviour.
*
* @note Etherboot currently ignores the s_PXENV_UDP_OPEN::src_ip
* parameter.
*
*/
PXENV_EXIT_t pxenv_udp_open ( struct s_PXENV_UDP_OPEN *pxenv_udp_open ) {
int rc;
DBG ( "PXENV_UDP_OPEN" );
/* Record source IP address */
pxe_udp.local.sin_addr.s_addr = pxenv_udp_open->src_ip;
DBG ( " %s\n", inet_ntoa ( pxe_udp.local.sin_addr ) );
/* Open promiscuous UDP connection */
intf_restart ( &pxe_udp.xfer, 0 );
if ( ( rc = udp_open_promisc ( &pxe_udp.xfer ) ) != 0 ) {
DBG ( "PXENV_UDP_OPEN could not open promiscuous socket: %s\n",
strerror ( rc ) );
pxenv_udp_open->Status = PXENV_STATUS ( rc );
return PXENV_EXIT_FAILURE;
}
pxenv_udp_open->Status = PXENV_STATUS_SUCCESS;
return PXENV_EXIT_SUCCESS;
}
/**
* UDP CLOSE
*
* @v pxenv_udp_close Pointer to a struct s_PXENV_UDP_CLOSE
* @ret #PXENV_EXIT_SUCCESS Always
* @ret s_PXENV_UDP_CLOSE::Status PXE status code
* @err None -
*
* Closes a UDP connection opened with pxenv_udp_open().
*
* You can only have one open UDP connection at a time. You cannot
* have a UDP connection open at the same time as a TFTP connection.
* You cannot use pxenv_udp_close() to close a TFTP connection; use
* pxenv_tftp_close() instead.
*
* On x86, you must set the s_PXE::StatusCallout field to a nonzero
* value before calling this function in protected mode. You cannot
* call this function with a 32-bit stack segment. (See the relevant
* @ref pxe_x86_pmode16 "implementation note" for more details.)
*
*/
PXENV_EXIT_t pxenv_udp_close ( struct s_PXENV_UDP_CLOSE *pxenv_udp_close ) {
DBG ( "PXENV_UDP_CLOSE\n" );
/* Close UDP connection */
intf_restart ( &pxe_udp.xfer, 0 );
pxenv_udp_close->Status = PXENV_STATUS_SUCCESS;
return PXENV_EXIT_SUCCESS;
}
/**
* UDP WRITE
*
* @v pxenv_udp_write Pointer to a struct s_PXENV_UDP_WRITE
* @v s_PXENV_UDP_WRITE::ip Destination IP address
* @v s_PXENV_UDP_WRITE::gw Relay agent IP address, or 0.0.0.0
* @v s_PXENV_UDP_WRITE::src_port Source UDP port, or 0
* @v s_PXENV_UDP_WRITE::dst_port Destination UDP port
* @v s_PXENV_UDP_WRITE::buffer_size Length of the UDP payload
* @v s_PXENV_UDP_WRITE::buffer Address of the UDP payload
* @ret #PXENV_EXIT_SUCCESS Packet was transmitted successfully
* @ret #PXENV_EXIT_FAILURE Packet could not be transmitted
* @ret s_PXENV_UDP_WRITE::Status PXE status code
* @err #PXENV_STATUS_UDP_CLOSED UDP connection is not open
* @err #PXENV_STATUS_UNDI_TRANSMIT_ERROR Could not transmit packet
*
* Transmits a single UDP packet. A valid IP and UDP header will be
* prepended to the payload in s_PXENV_UDP_WRITE::buffer; the buffer
* should not contain precomputed IP and UDP headers, nor should it
* contain space allocated for these headers. The first byte of the
* buffer will be transmitted as the first byte following the UDP
* header.
*
* If s_PXENV_UDP_WRITE::gw is 0.0.0.0, normal IP routing will take
* place. See the relevant @ref pxe_routing "implementation note" for
* more details.
*
* If s_PXENV_UDP_WRITE::src_port is 0, port 2069 will be used.
*
* You must have opened a UDP connection with pxenv_udp_open() before
* calling pxenv_udp_write().
*
* On x86, you must set the s_PXE::StatusCallout field to a nonzero
* value before calling this function in protected mode. You cannot
* call this function with a 32-bit stack segment. (See the relevant
* @ref pxe_x86_pmode16 "implementation note" for more details.)
*
* @note Etherboot currently ignores the s_PXENV_UDP_WRITE::gw
* parameter.
*
*/
PXENV_EXIT_t pxenv_udp_write ( struct s_PXENV_UDP_WRITE *pxenv_udp_write ) {
struct sockaddr_in dest;
struct xfer_metadata meta = {
.src = ( struct sockaddr * ) &pxe_udp.local,
.dest = ( struct sockaddr * ) &dest,
.netdev = pxe_netdev,
};
size_t len;
struct io_buffer *iobuf;
userptr_t buffer;
int rc;
DBG ( "PXENV_UDP_WRITE" );
/* Construct destination socket address */
memset ( &dest, 0, sizeof ( dest ) );
dest.sin_family = AF_INET;
dest.sin_addr.s_addr = pxenv_udp_write->ip;
dest.sin_port = pxenv_udp_write->dst_port;
/* Set local (source) port. PXE spec says source port is 2069
* if not specified. Really, this ought to be set at UDP open
* time but hey, we didn't design this API.
*/
pxe_udp.local.sin_port = pxenv_udp_write->src_port;
if ( ! pxe_udp.local.sin_port )
pxe_udp.local.sin_port = htons ( 2069 );
/* FIXME: we ignore the gateway specified, since we're
* confident of being able to do our own routing. We should
* probably allow for multiple gateways.
*/
/* Allocate and fill data buffer */
len = pxenv_udp_write->buffer_size;
iobuf = xfer_alloc_iob ( &pxe_udp.xfer, len );
if ( ! iobuf ) {
DBG ( " out of memory\n" );
pxenv_udp_write->Status = PXENV_STATUS_OUT_OF_RESOURCES;
return PXENV_EXIT_FAILURE;
}
buffer = real_to_user ( pxenv_udp_write->buffer.segment,
pxenv_udp_write->buffer.offset );
copy_from_user ( iob_put ( iobuf, len ), buffer, 0, len );
DBG ( " %04x:%04x+%x %d->%s:%d\n", pxenv_udp_write->buffer.segment,
pxenv_udp_write->buffer.offset, pxenv_udp_write->buffer_size,
ntohs ( pxenv_udp_write->src_port ),
inet_ntoa ( dest.sin_addr ),
ntohs ( pxenv_udp_write->dst_port ) );
/* Transmit packet */
if ( ( rc = xfer_deliver ( &pxe_udp.xfer, iobuf, &meta ) ) != 0 ) {
DBG ( "PXENV_UDP_WRITE could not transmit: %s\n",
strerror ( rc ) );
pxenv_udp_write->Status = PXENV_STATUS ( rc );
return PXENV_EXIT_FAILURE;
}
pxenv_udp_write->Status = PXENV_STATUS_SUCCESS;
return PXENV_EXIT_SUCCESS;
}
/**
* UDP READ
*
* @v pxenv_udp_read Pointer to a struct s_PXENV_UDP_READ
* @v s_PXENV_UDP_READ::dest_ip Destination IP address, or 0.0.0.0
* @v s_PXENV_UDP_READ::d_port Destination UDP port, or 0
* @v s_PXENV_UDP_READ::buffer_size Size of the UDP payload buffer
* @v s_PXENV_UDP_READ::buffer Address of the UDP payload buffer
* @ret #PXENV_EXIT_SUCCESS A packet has been received
* @ret #PXENV_EXIT_FAILURE No packet has been received
* @ret s_PXENV_UDP_READ::Status PXE status code
* @ret s_PXENV_UDP_READ::src_ip Source IP address
* @ret s_PXENV_UDP_READ::dest_ip Destination IP address
* @ret s_PXENV_UDP_READ::s_port Source UDP port
* @ret s_PXENV_UDP_READ::d_port Destination UDP port
* @ret s_PXENV_UDP_READ::buffer_size Length of UDP payload
* @err #PXENV_STATUS_UDP_CLOSED UDP connection is not open
* @err #PXENV_STATUS_FAILURE No packet was ready to read
*
* Receive a single UDP packet. This is a non-blocking call; if no
* packet is ready to read, the call will return instantly with
* s_PXENV_UDP_READ::Status==PXENV_STATUS_FAILURE.
*
* If s_PXENV_UDP_READ::dest_ip is 0.0.0.0, UDP packets addressed to
* any IP address will be accepted and may be returned to the caller.
*
* If s_PXENV_UDP_READ::d_port is 0, UDP packets addressed to any UDP
* port will be accepted and may be returned to the caller.
*
* You must have opened a UDP connection with pxenv_udp_open() before
* calling pxenv_udp_read().
*
* On x86, you must set the s_PXE::StatusCallout field to a nonzero
* value before calling this function in protected mode. You cannot
* call this function with a 32-bit stack segment. (See the relevant
* @ref pxe_x86_pmode16 "implementation note" for more details.)
*
* @note The PXE specification (version 2.1) does not state that we
* should fill in s_PXENV_UDP_READ::dest_ip and
* s_PXENV_UDP_READ::d_port, but Microsoft Windows' NTLDR program
* expects us to do so, and will fail if we don't.
*
*/
PXENV_EXIT_t pxenv_udp_read ( struct s_PXENV_UDP_READ *pxenv_udp_read ) {
struct in_addr dest_ip_wanted = { .s_addr = pxenv_udp_read->dest_ip };
struct in_addr dest_ip;
uint16_t d_port_wanted = pxenv_udp_read->d_port;
uint16_t d_port;
/* Try receiving a packet */
pxe_udp.pxenv_udp_read = pxenv_udp_read;
step();
if ( pxe_udp.pxenv_udp_read ) {
/* No packet received */
DBG2 ( "PXENV_UDP_READ\n" );
pxe_udp.pxenv_udp_read = NULL;
goto no_packet;
}
dest_ip.s_addr = pxenv_udp_read->dest_ip;
d_port = pxenv_udp_read->d_port;
DBG ( "PXENV_UDP_READ" );
/* Filter on destination address and/or port */
if ( dest_ip_wanted.s_addr &&
( dest_ip_wanted.s_addr != dest_ip.s_addr ) ) {
DBG ( " wrong IP %s", inet_ntoa ( dest_ip ) );
DBG ( " (wanted %s)\n", inet_ntoa ( dest_ip_wanted ) );
goto no_packet;
}
if ( d_port_wanted && ( d_port_wanted != d_port ) ) {
DBG ( " wrong port %d", htons ( d_port ) );
DBG ( " (wanted %d)\n", htons ( d_port_wanted ) );
goto no_packet;
}
DBG ( " %04x:%04x+%x %s:", pxenv_udp_read->buffer.segment,
pxenv_udp_read->buffer.offset, pxenv_udp_read->buffer_size,
inet_ntoa ( *( ( struct in_addr * ) &pxenv_udp_read->src_ip ) ));
DBG ( "%d<-%s:%d\n", ntohs ( pxenv_udp_read->s_port ),
inet_ntoa ( *( ( struct in_addr * ) &pxenv_udp_read->dest_ip ) ),
ntohs ( pxenv_udp_read->d_port ) );
pxenv_udp_read->Status = PXENV_STATUS_SUCCESS;
return PXENV_EXIT_SUCCESS;
no_packet:
pxenv_udp_read->Status = PXENV_STATUS_FAILURE;
return PXENV_EXIT_FAILURE;
}
| gpl-2.0 |
paxchristos/Semc-ICS-kernel | drivers/hwmon/sht15.c | 333 | 18724 | /*
* sht15.c - support for the SHT15 Temperature and Humidity Sensor
*
* Copyright (c) 2009 Jonathan Cameron
*
* Copyright (c) 2007 Wouter Horre
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Currently ignoring checksum on readings.
* Default resolution only (14bit temp, 12bit humidity)
* Ignoring battery status.
* Heater not enabled.
* Timings are all conservative.
*
* Data sheet available (1/2009) at
* http://www.sensirion.ch/en/pdf/product_information/Datasheet-humidity-sensor-SHT1x.pdf
*
* Regulator supply name = vcc
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
#include <linux/mutex.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/jiffies.h>
#include <linux/err.h>
#include <linux/sht15.h>
#include <linux/regulator/consumer.h>
#include <asm/atomic.h>
#define SHT15_MEASURE_TEMP 3
#define SHT15_MEASURE_RH 5
#define SHT15_READING_NOTHING 0
#define SHT15_READING_TEMP 1
#define SHT15_READING_HUMID 2
/* Min timings in nsecs */
#define SHT15_TSCKL 100 /* clock low */
#define SHT15_TSCKH 100 /* clock high */
#define SHT15_TSU 150 /* data setup time */
/**
* struct sht15_temppair - elements of voltage dependant temp calc
* @vdd: supply voltage in microvolts
* @d1: see data sheet
*/
struct sht15_temppair {
int vdd; /* microvolts */
int d1;
};
/* Table 9 from data sheet - relates temperature calculation
* to supply voltage.
*/
static const struct sht15_temppair temppoints[] = {
{ 2500000, -39400 },
{ 3000000, -39600 },
{ 3500000, -39700 },
{ 4000000, -39800 },
{ 5000000, -40100 },
};
/**
* struct sht15_data - device instance specific data
* @pdata: platform data (gpio's etc)
* @read_work: bh of interrupt handler
* @wait_queue: wait queue for getting values from device
* @val_temp: last temperature value read from device
* @val_humid: last humidity value read from device
* @flag: status flag used to identify what the last request was
* @valid: are the current stored values valid (start condition)
* @last_updat: time of last update
* @read_lock: mutex to ensure only one read in progress
* at a time.
* @dev: associate device structure
* @hwmon_dev: device associated with hwmon subsystem
* @reg: associated regulator (if specified)
* @nb: notifier block to handle notifications of voltage changes
* @supply_uV: local copy of supply voltage used to allow
* use of regulator consumer if available
* @supply_uV_valid: indicates that an updated value has not yet
* been obtained from the regulator and so any calculations
* based upon it will be invalid.
* @update_supply_work: work struct that is used to update the supply_uV
* @interrupt_handled: flag used to indicate a hander has been scheduled
*/
struct sht15_data {
struct sht15_platform_data *pdata;
struct work_struct read_work;
wait_queue_head_t wait_queue;
uint16_t val_temp;
uint16_t val_humid;
u8 flag;
u8 valid;
unsigned long last_updat;
struct mutex read_lock;
struct device *dev;
struct device *hwmon_dev;
struct regulator *reg;
struct notifier_block nb;
int supply_uV;
int supply_uV_valid;
struct work_struct update_supply_work;
atomic_t interrupt_handled;
};
/**
* sht15_connection_reset() - reset the comms interface
* @data: sht15 specific data
*
* This implements section 3.4 of the data sheet
*/
static void sht15_connection_reset(struct sht15_data *data)
{
int i;
gpio_direction_output(data->pdata->gpio_data, 1);
ndelay(SHT15_TSCKL);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
for (i = 0; i < 9; ++i) {
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
}
}
/**
* sht15_send_bit() - send an individual bit to the device
* @data: device state data
* @val: value of bit to be sent
**/
static inline void sht15_send_bit(struct sht15_data *data, int val)
{
gpio_set_value(data->pdata->gpio_data, val);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL); /* clock low time */
}
/**
* sht15_transmission_start() - specific sequence for new transmission
*
* @data: device state data
* Timings for this are not documented on the data sheet, so very
* conservative ones used in implementation. This implements
* figure 12 on the data sheet.
**/
static void sht15_transmission_start(struct sht15_data *data)
{
/* ensure data is high and output */
gpio_direction_output(data->pdata->gpio_data, 1);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_data, 0);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_data, 1);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
}
/**
* sht15_send_byte() - send a single byte to the device
* @data: device state
* @byte: value to be sent
**/
static void sht15_send_byte(struct sht15_data *data, u8 byte)
{
int i;
for (i = 0; i < 8; i++) {
sht15_send_bit(data, !!(byte & 0x80));
byte <<= 1;
}
}
/**
* sht15_wait_for_response() - checks for ack from device
* @data: device state
**/
static int sht15_wait_for_response(struct sht15_data *data)
{
gpio_direction_input(data->pdata->gpio_data);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
if (gpio_get_value(data->pdata->gpio_data)) {
gpio_set_value(data->pdata->gpio_sck, 0);
dev_err(data->dev, "Command not acknowledged\n");
sht15_connection_reset(data);
return -EIO;
}
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
return 0;
}
/**
* sht15_send_cmd() - Sends a command to the device.
* @data: device state
* @cmd: command byte to be sent
*
* On entry, sck is output low, data is output pull high
* and the interrupt disabled.
**/
static int sht15_send_cmd(struct sht15_data *data, u8 cmd)
{
int ret = 0;
sht15_transmission_start(data);
sht15_send_byte(data, cmd);
ret = sht15_wait_for_response(data);
return ret;
}
/**
* sht15_update_single_val() - get a new value from device
* @data: device instance specific data
* @command: command sent to request value
* @timeout_msecs: timeout after which comms are assumed
* to have failed are reset.
**/
static inline int sht15_update_single_val(struct sht15_data *data,
int command,
int timeout_msecs)
{
int ret;
ret = sht15_send_cmd(data, command);
if (ret)
return ret;
gpio_direction_input(data->pdata->gpio_data);
atomic_set(&data->interrupt_handled, 0);
enable_irq(gpio_to_irq(data->pdata->gpio_data));
if (gpio_get_value(data->pdata->gpio_data) == 0) {
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
/* Only relevant if the interrupt hasn't occured. */
if (!atomic_read(&data->interrupt_handled))
schedule_work(&data->read_work);
}
ret = wait_event_timeout(data->wait_queue,
(data->flag == SHT15_READING_NOTHING),
msecs_to_jiffies(timeout_msecs));
if (ret == 0) {/* timeout occurred */
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
sht15_connection_reset(data);
return -ETIME;
}
return 0;
}
/**
* sht15_update_vals() - get updated readings from device if too old
* @data: device state
**/
static int sht15_update_vals(struct sht15_data *data)
{
int ret = 0;
int timeout = HZ;
mutex_lock(&data->read_lock);
if (time_after(jiffies, data->last_updat + timeout)
|| !data->valid) {
data->flag = SHT15_READING_HUMID;
ret = sht15_update_single_val(data, SHT15_MEASURE_RH, 160);
if (ret)
goto error_ret;
data->flag = SHT15_READING_TEMP;
ret = sht15_update_single_val(data, SHT15_MEASURE_TEMP, 400);
if (ret)
goto error_ret;
data->valid = 1;
data->last_updat = jiffies;
}
error_ret:
mutex_unlock(&data->read_lock);
return ret;
}
/**
* sht15_calc_temp() - convert the raw reading to a temperature
* @data: device state
*
* As per section 4.3 of the data sheet.
**/
static inline int sht15_calc_temp(struct sht15_data *data)
{
int d1 = 0;
int i;
for (i = 1; i < ARRAY_SIZE(temppoints); i++)
/* Find pointer to interpolate */
if (data->supply_uV > temppoints[i - 1].vdd) {
d1 = (data->supply_uV/1000 - temppoints[i - 1].vdd)
* (temppoints[i].d1 - temppoints[i - 1].d1)
/ (temppoints[i].vdd - temppoints[i - 1].vdd)
+ temppoints[i - 1].d1;
break;
}
return data->val_temp*10 + d1;
}
/**
* sht15_calc_humid() - using last temperature convert raw to humid
* @data: device state
*
* This is the temperature compensated version as per section 4.2 of
* the data sheet.
**/
static inline int sht15_calc_humid(struct sht15_data *data)
{
int RHlinear; /* milli percent */
int temp = sht15_calc_temp(data);
const int c1 = -4;
const int c2 = 40500; /* x 10 ^ -6 */
const int c3 = -2800; /* x10 ^ -9 */
RHlinear = c1*1000
+ c2 * data->val_humid/1000
+ (data->val_humid * data->val_humid * c3)/1000000;
return (temp - 25000) * (10000 + 80 * data->val_humid)
/ 1000000 + RHlinear;
}
static ssize_t sht15_show_temp(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct sht15_data *data = dev_get_drvdata(dev);
/* Technically no need to read humidity as well */
ret = sht15_update_vals(data);
return ret ? ret : sprintf(buf, "%d\n",
sht15_calc_temp(data));
}
static ssize_t sht15_show_humidity(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct sht15_data *data = dev_get_drvdata(dev);
ret = sht15_update_vals(data);
return ret ? ret : sprintf(buf, "%d\n", sht15_calc_humid(data));
};
static ssize_t show_name(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev = to_platform_device(dev);
return sprintf(buf, "%s\n", pdev->name);
}
static SENSOR_DEVICE_ATTR(temp1_input,
S_IRUGO, sht15_show_temp,
NULL, 0);
static SENSOR_DEVICE_ATTR(humidity1_input,
S_IRUGO, sht15_show_humidity,
NULL, 0);
static DEVICE_ATTR(name, S_IRUGO, show_name, NULL);
static struct attribute *sht15_attrs[] = {
&sensor_dev_attr_temp1_input.dev_attr.attr,
&sensor_dev_attr_humidity1_input.dev_attr.attr,
&dev_attr_name.attr,
NULL,
};
static const struct attribute_group sht15_attr_group = {
.attrs = sht15_attrs,
};
static irqreturn_t sht15_interrupt_fired(int irq, void *d)
{
struct sht15_data *data = d;
/* First disable the interrupt */
disable_irq_nosync(irq);
atomic_inc(&data->interrupt_handled);
/* Then schedule a reading work struct */
if (data->flag != SHT15_READING_NOTHING)
schedule_work(&data->read_work);
return IRQ_HANDLED;
}
/* Each byte of data is acknowledged by pulling the data line
* low for one clock pulse.
*/
static void sht15_ack(struct sht15_data *data)
{
gpio_direction_output(data->pdata->gpio_data, 0);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_data, 1);
gpio_direction_input(data->pdata->gpio_data);
}
/**
* sht15_end_transmission() - notify device of end of transmission
* @data: device state
*
* This is basically a NAK. (single clock pulse, data high)
**/
static void sht15_end_transmission(struct sht15_data *data)
{
gpio_direction_output(data->pdata->gpio_data, 1);
ndelay(SHT15_TSU);
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
}
static void sht15_bh_read_data(struct work_struct *work_s)
{
int i;
uint16_t val = 0;
struct sht15_data *data
= container_of(work_s, struct sht15_data,
read_work);
/* Firstly, verify the line is low */
if (gpio_get_value(data->pdata->gpio_data)) {
/* If not, then start the interrupt again - care
here as could have gone low in meantime so verify
it hasn't!
*/
atomic_set(&data->interrupt_handled, 0);
enable_irq(gpio_to_irq(data->pdata->gpio_data));
/* If still not occured or another handler has been scheduled */
if (gpio_get_value(data->pdata->gpio_data)
|| atomic_read(&data->interrupt_handled))
return;
}
/* Read the data back from the device */
for (i = 0; i < 16; ++i) {
val <<= 1;
gpio_set_value(data->pdata->gpio_sck, 1);
ndelay(SHT15_TSCKH);
val |= !!gpio_get_value(data->pdata->gpio_data);
gpio_set_value(data->pdata->gpio_sck, 0);
ndelay(SHT15_TSCKL);
if (i == 7)
sht15_ack(data);
}
/* Tell the device we are done */
sht15_end_transmission(data);
switch (data->flag) {
case SHT15_READING_TEMP:
data->val_temp = val;
break;
case SHT15_READING_HUMID:
data->val_humid = val;
break;
}
data->flag = SHT15_READING_NOTHING;
wake_up(&data->wait_queue);
}
static void sht15_update_voltage(struct work_struct *work_s)
{
struct sht15_data *data
= container_of(work_s, struct sht15_data,
update_supply_work);
data->supply_uV = regulator_get_voltage(data->reg);
}
/**
* sht15_invalidate_voltage() - mark supply voltage invalid when notified by reg
* @nb: associated notification structure
* @event: voltage regulator state change event code
* @ignored: function parameter - ignored here
*
* Note that as the notification code holds the regulator lock, we have
* to schedule an update of the supply voltage rather than getting it directly.
**/
static int sht15_invalidate_voltage(struct notifier_block *nb,
unsigned long event,
void *ignored)
{
struct sht15_data *data = container_of(nb, struct sht15_data, nb);
if (event == REGULATOR_EVENT_VOLTAGE_CHANGE)
data->supply_uV_valid = false;
schedule_work(&data->update_supply_work);
return NOTIFY_OK;
}
static int __devinit sht15_probe(struct platform_device *pdev)
{
int ret = 0;
struct sht15_data *data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) {
ret = -ENOMEM;
dev_err(&pdev->dev, "kzalloc failed");
goto error_ret;
}
INIT_WORK(&data->read_work, sht15_bh_read_data);
INIT_WORK(&data->update_supply_work, sht15_update_voltage);
platform_set_drvdata(pdev, data);
mutex_init(&data->read_lock);
data->dev = &pdev->dev;
init_waitqueue_head(&data->wait_queue);
if (pdev->dev.platform_data == NULL) {
dev_err(&pdev->dev, "no platform data supplied");
goto err_free_data;
}
data->pdata = pdev->dev.platform_data;
data->supply_uV = data->pdata->supply_mv*1000;
/* If a regulator is available, query what the supply voltage actually is!*/
data->reg = regulator_get(data->dev, "vcc");
if (!IS_ERR(data->reg)) {
data->supply_uV = regulator_get_voltage(data->reg);
regulator_enable(data->reg);
/* setup a notifier block to update this if another device
* causes the voltage to change */
data->nb.notifier_call = &sht15_invalidate_voltage;
ret = regulator_register_notifier(data->reg, &data->nb);
}
/* Try requesting the GPIOs */
ret = gpio_request(data->pdata->gpio_sck, "SHT15 sck");
if (ret) {
dev_err(&pdev->dev, "gpio request failed");
goto err_free_data;
}
gpio_direction_output(data->pdata->gpio_sck, 0);
ret = gpio_request(data->pdata->gpio_data, "SHT15 data");
if (ret) {
dev_err(&pdev->dev, "gpio request failed");
goto err_release_gpio_sck;
}
ret = sysfs_create_group(&pdev->dev.kobj, &sht15_attr_group);
if (ret) {
dev_err(&pdev->dev, "sysfs create failed");
goto err_release_gpio_data;
}
ret = request_irq(gpio_to_irq(data->pdata->gpio_data),
sht15_interrupt_fired,
IRQF_TRIGGER_FALLING,
"sht15 data",
data);
if (ret) {
dev_err(&pdev->dev, "failed to get irq for data line");
goto err_release_gpio_data;
}
disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
sht15_connection_reset(data);
sht15_send_cmd(data, 0x1E);
data->hwmon_dev = hwmon_device_register(data->dev);
if (IS_ERR(data->hwmon_dev)) {
ret = PTR_ERR(data->hwmon_dev);
goto err_release_irq;
}
return 0;
err_release_irq:
free_irq(gpio_to_irq(data->pdata->gpio_data), data);
err_release_gpio_data:
gpio_free(data->pdata->gpio_data);
err_release_gpio_sck:
gpio_free(data->pdata->gpio_sck);
err_free_data:
kfree(data);
error_ret:
return ret;
}
static int __devexit sht15_remove(struct platform_device *pdev)
{
struct sht15_data *data = platform_get_drvdata(pdev);
/* Make sure any reads from the device are done and
* prevent new ones beginnning */
mutex_lock(&data->read_lock);
hwmon_device_unregister(data->hwmon_dev);
sysfs_remove_group(&pdev->dev.kobj, &sht15_attr_group);
if (!IS_ERR(data->reg)) {
regulator_unregister_notifier(data->reg, &data->nb);
regulator_disable(data->reg);
regulator_put(data->reg);
}
free_irq(gpio_to_irq(data->pdata->gpio_data), data);
gpio_free(data->pdata->gpio_data);
gpio_free(data->pdata->gpio_sck);
mutex_unlock(&data->read_lock);
kfree(data);
return 0;
}
/*
* sht_drivers simultaneously refers to __devinit and __devexit function
* which causes spurious section mismatch warning. So use __refdata to
* get rid from this.
*/
static struct platform_driver __refdata sht_drivers[] = {
{
.driver = {
.name = "sht10",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht11",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht15",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht71",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = __devexit_p(sht15_remove),
}, {
.driver = {
.name = "sht75",
.owner = THIS_MODULE,
},
.probe = sht15_probe,
.remove = __devexit_p(sht15_remove),
},
};
static int __init sht15_init(void)
{
int ret;
int i;
for (i = 0; i < ARRAY_SIZE(sht_drivers); i++) {
ret = platform_driver_register(&sht_drivers[i]);
if (ret)
goto error_unreg;
}
return 0;
error_unreg:
while (--i >= 0)
platform_driver_unregister(&sht_drivers[i]);
return ret;
}
module_init(sht15_init);
static void __exit sht15_exit(void)
{
int i;
for (i = ARRAY_SIZE(sht_drivers) - 1; i >= 0; i--)
platform_driver_unregister(&sht_drivers[i]);
}
module_exit(sht15_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
sqlfocus/linux | net/mac80211/ocb.c | 333 | 6943 | /*
* OCB mode implementation
*
* Copyright: (c) 2014 Czech Technical University in Prague
* (c) 2014 Volkswagen Group Research
* Author: Rostislav Lisovy <rostislav.lisovy@fel.cvut.cz>
* Funded by: Volkswagen Group Research
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/if_ether.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/rtnetlink.h>
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#define IEEE80211_OCB_HOUSEKEEPING_INTERVAL (60 * HZ)
#define IEEE80211_OCB_PEER_INACTIVITY_LIMIT (240 * HZ)
#define IEEE80211_OCB_MAX_STA_ENTRIES 128
/**
* enum ocb_deferred_task_flags - mac80211 OCB deferred tasks
* @OCB_WORK_HOUSEKEEPING: run the periodic OCB housekeeping tasks
*
* These flags are used in @wrkq_flags field of &struct ieee80211_if_ocb
*/
enum ocb_deferred_task_flags {
OCB_WORK_HOUSEKEEPING,
};
void ieee80211_ocb_rx_no_sta(struct ieee80211_sub_if_data *sdata,
const u8 *bssid, const u8 *addr,
u32 supp_rates)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct ieee80211_local *local = sdata->local;
struct ieee80211_chanctx_conf *chanctx_conf;
struct ieee80211_supported_band *sband;
enum nl80211_bss_scan_width scan_width;
struct sta_info *sta;
int band;
/* XXX: Consider removing the least recently used entry and
* allow new one to be added.
*/
if (local->num_sta >= IEEE80211_OCB_MAX_STA_ENTRIES) {
net_info_ratelimited("%s: No room for a new OCB STA entry %pM\n",
sdata->name, addr);
return;
}
ocb_dbg(sdata, "Adding new OCB station %pM\n", addr);
rcu_read_lock();
chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
if (WARN_ON_ONCE(!chanctx_conf)) {
rcu_read_unlock();
return;
}
band = chanctx_conf->def.chan->band;
scan_width = cfg80211_chandef_to_scan_width(&chanctx_conf->def);
rcu_read_unlock();
sta = sta_info_alloc(sdata, addr, GFP_ATOMIC);
if (!sta)
return;
/* Add only mandatory rates for now */
sband = local->hw.wiphy->bands[band];
sta->sta.supp_rates[band] =
ieee80211_mandatory_rates(sband, scan_width);
spin_lock(&ifocb->incomplete_lock);
list_add(&sta->list, &ifocb->incomplete_stations);
spin_unlock(&ifocb->incomplete_lock);
ieee80211_queue_work(&local->hw, &sdata->work);
}
static struct sta_info *ieee80211_ocb_finish_sta(struct sta_info *sta)
__acquires(RCU)
{
struct ieee80211_sub_if_data *sdata = sta->sdata;
u8 addr[ETH_ALEN];
memcpy(addr, sta->sta.addr, ETH_ALEN);
ocb_dbg(sdata, "Adding new IBSS station %pM (dev=%s)\n",
addr, sdata->name);
sta_info_move_state(sta, IEEE80211_STA_AUTH);
sta_info_move_state(sta, IEEE80211_STA_ASSOC);
sta_info_move_state(sta, IEEE80211_STA_AUTHORIZED);
rate_control_rate_init(sta);
/* If it fails, maybe we raced another insertion? */
if (sta_info_insert_rcu(sta))
return sta_info_get(sdata, addr);
return sta;
}
static void ieee80211_ocb_housekeeping(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
ocb_dbg(sdata, "Running ocb housekeeping\n");
ieee80211_sta_expire(sdata, IEEE80211_OCB_PEER_INACTIVITY_LIMIT);
mod_timer(&ifocb->housekeeping_timer,
round_jiffies(jiffies + IEEE80211_OCB_HOUSEKEEPING_INTERVAL));
}
void ieee80211_ocb_work(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct sta_info *sta;
if (ifocb->joined != true)
return;
sdata_lock(sdata);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
sta = list_first_entry(&ifocb->incomplete_stations,
struct sta_info, list);
list_del(&sta->list);
spin_unlock_bh(&ifocb->incomplete_lock);
ieee80211_ocb_finish_sta(sta);
rcu_read_unlock();
spin_lock_bh(&ifocb->incomplete_lock);
}
spin_unlock_bh(&ifocb->incomplete_lock);
if (test_and_clear_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags))
ieee80211_ocb_housekeeping(sdata);
sdata_unlock(sdata);
}
static void ieee80211_ocb_housekeeping_timer(unsigned long data)
{
struct ieee80211_sub_if_data *sdata = (void *)data;
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
}
void ieee80211_ocb_setup_sdata(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
setup_timer(&ifocb->housekeeping_timer,
ieee80211_ocb_housekeeping_timer,
(unsigned long)sdata);
INIT_LIST_HEAD(&ifocb->incomplete_stations);
spin_lock_init(&ifocb->incomplete_lock);
}
int ieee80211_ocb_join(struct ieee80211_sub_if_data *sdata,
struct ocb_setup *setup)
{
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
u32 changed = BSS_CHANGED_OCB | BSS_CHANGED_BSSID;
int err;
if (ifocb->joined == true)
return -EINVAL;
sdata->flags |= IEEE80211_SDATA_OPERATING_GMODE;
sdata->smps_mode = IEEE80211_SMPS_OFF;
sdata->needed_rx_chains = sdata->local->rx_chains;
mutex_lock(&sdata->local->mtx);
err = ieee80211_vif_use_channel(sdata, &setup->chandef,
IEEE80211_CHANCTX_SHARED);
mutex_unlock(&sdata->local->mtx);
if (err)
return err;
ieee80211_bss_info_change_notify(sdata, changed);
ifocb->joined = true;
set_bit(OCB_WORK_HOUSEKEEPING, &ifocb->wrkq_flags);
ieee80211_queue_work(&local->hw, &sdata->work);
netif_carrier_on(sdata->dev);
return 0;
}
int ieee80211_ocb_leave(struct ieee80211_sub_if_data *sdata)
{
struct ieee80211_if_ocb *ifocb = &sdata->u.ocb;
struct ieee80211_local *local = sdata->local;
struct sta_info *sta;
ifocb->joined = false;
sta_info_flush(sdata);
spin_lock_bh(&ifocb->incomplete_lock);
while (!list_empty(&ifocb->incomplete_stations)) {
sta = list_first_entry(&ifocb->incomplete_stations,
struct sta_info, list);
list_del(&sta->list);
spin_unlock_bh(&ifocb->incomplete_lock);
sta_info_free(local, sta);
spin_lock_bh(&ifocb->incomplete_lock);
}
spin_unlock_bh(&ifocb->incomplete_lock);
netif_carrier_off(sdata->dev);
clear_bit(SDATA_STATE_OFFCHANNEL, &sdata->state);
ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_OCB);
mutex_lock(&sdata->local->mtx);
ieee80211_vif_release_channel(sdata);
mutex_unlock(&sdata->local->mtx);
skb_queue_purge(&sdata->skb_queue);
del_timer_sync(&sdata->u.ocb.housekeeping_timer);
/* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
* but will not rearm the timer again because it checks
* whether we are connected to the network or not -- at this
* point we shouldn't be anymore.
*/
return 0;
}
| gpl-2.0 |
sanidhya/kup-linux | drivers/input/misc/retu-pwrbutton.c | 845 | 2444 | /*
* Retu power button driver.
*
* Copyright (C) 2004-2010 Nokia Corporation
*
* Original code written by Ari Saastamoinen, Juha Yrjölä and Felipe Balbi.
* Rewritten by Aaro Koskinen.
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/irq.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mfd/retu.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#define RETU_STATUS_PWRONX (1 << 5)
static irqreturn_t retu_pwrbutton_irq(int irq, void *_pwr)
{
struct input_dev *idev = _pwr;
struct retu_dev *rdev = input_get_drvdata(idev);
bool state;
state = !(retu_read(rdev, RETU_REG_STATUS) & RETU_STATUS_PWRONX);
input_report_key(idev, KEY_POWER, state);
input_sync(idev);
return IRQ_HANDLED;
}
static int retu_pwrbutton_probe(struct platform_device *pdev)
{
struct retu_dev *rdev = dev_get_drvdata(pdev->dev.parent);
struct input_dev *idev;
int irq;
int error;
irq = platform_get_irq(pdev, 0);
if (irq < 0)
return irq;
idev = devm_input_allocate_device(&pdev->dev);
if (!idev)
return -ENOMEM;
idev->name = "retu-pwrbutton";
idev->dev.parent = &pdev->dev;
input_set_capability(idev, EV_KEY, KEY_POWER);
input_set_drvdata(idev, rdev);
error = devm_request_threaded_irq(&pdev->dev, irq,
NULL, retu_pwrbutton_irq,
IRQF_ONESHOT,
"retu-pwrbutton", idev);
if (error)
return error;
error = input_register_device(idev);
if (error)
return error;
return 0;
}
static int retu_pwrbutton_remove(struct platform_device *pdev)
{
return 0;
}
static struct platform_driver retu_pwrbutton_driver = {
.probe = retu_pwrbutton_probe,
.remove = retu_pwrbutton_remove,
.driver = {
.name = "retu-pwrbutton",
},
};
module_platform_driver(retu_pwrbutton_driver);
MODULE_ALIAS("platform:retu-pwrbutton");
MODULE_DESCRIPTION("Retu Power Button");
MODULE_AUTHOR("Ari Saastamoinen");
MODULE_AUTHOR("Felipe Balbi");
MODULE_AUTHOR("Aaro Koskinen <aaro.koskinen@iki.fi>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
RenderBroken/OP3-kernel | drivers/video/fbdev/sunxvr1000.c | 845 | 4837 | /* sunxvr1000.c: Sun XVR-1000 driver for sparc64 systems
*
* Copyright (C) 2010 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <linux/of_device.h>
struct gfb_info {
struct fb_info *info;
char __iomem *fb_base;
unsigned long fb_base_phys;
struct device_node *of_node;
unsigned int width;
unsigned int height;
unsigned int depth;
unsigned int fb_size;
u32 pseudo_palette[16];
};
static int gfb_get_props(struct gfb_info *gp)
{
gp->width = of_getintprop_default(gp->of_node, "width", 0);
gp->height = of_getintprop_default(gp->of_node, "height", 0);
gp->depth = of_getintprop_default(gp->of_node, "depth", 32);
if (!gp->width || !gp->height) {
printk(KERN_ERR "gfb: Critical properties missing for %s\n",
gp->of_node->full_name);
return -EINVAL;
}
return 0;
}
static int gfb_setcolreg(unsigned regno,
unsigned red, unsigned green, unsigned blue,
unsigned transp, struct fb_info *info)
{
u32 value;
if (regno < 16) {
red >>= 8;
green >>= 8;
blue >>= 8;
value = (blue << 16) | (green << 8) | red;
((u32 *)info->pseudo_palette)[regno] = value;
}
return 0;
}
static struct fb_ops gfb_ops = {
.owner = THIS_MODULE,
.fb_setcolreg = gfb_setcolreg,
.fb_fillrect = cfb_fillrect,
.fb_copyarea = cfb_copyarea,
.fb_imageblit = cfb_imageblit,
};
static int gfb_set_fbinfo(struct gfb_info *gp)
{
struct fb_info *info = gp->info;
struct fb_var_screeninfo *var = &info->var;
info->flags = FBINFO_DEFAULT;
info->fbops = &gfb_ops;
info->screen_base = gp->fb_base;
info->screen_size = gp->fb_size;
info->pseudo_palette = gp->pseudo_palette;
/* Fill fix common fields */
strlcpy(info->fix.id, "gfb", sizeof(info->fix.id));
info->fix.smem_start = gp->fb_base_phys;
info->fix.smem_len = gp->fb_size;
info->fix.type = FB_TYPE_PACKED_PIXELS;
if (gp->depth == 32 || gp->depth == 24)
info->fix.visual = FB_VISUAL_TRUECOLOR;
else
info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
var->xres = gp->width;
var->yres = gp->height;
var->xres_virtual = var->xres;
var->yres_virtual = var->yres;
var->bits_per_pixel = gp->depth;
var->red.offset = 0;
var->red.length = 8;
var->green.offset = 8;
var->green.length = 8;
var->blue.offset = 16;
var->blue.length = 8;
var->transp.offset = 0;
var->transp.length = 0;
if (fb_alloc_cmap(&info->cmap, 256, 0)) {
printk(KERN_ERR "gfb: Cannot allocate color map.\n");
return -ENOMEM;
}
return 0;
}
static int gfb_probe(struct platform_device *op)
{
struct device_node *dp = op->dev.of_node;
struct fb_info *info;
struct gfb_info *gp;
int err;
info = framebuffer_alloc(sizeof(struct gfb_info), &op->dev);
if (!info) {
printk(KERN_ERR "gfb: Cannot allocate fb_info\n");
err = -ENOMEM;
goto err_out;
}
gp = info->par;
gp->info = info;
gp->of_node = dp;
gp->fb_base_phys = op->resource[6].start;
err = gfb_get_props(gp);
if (err)
goto err_release_fb;
/* Framebuffer length is the same regardless of resolution. */
info->fix.line_length = 16384;
gp->fb_size = info->fix.line_length * gp->height;
gp->fb_base = of_ioremap(&op->resource[6], 0,
gp->fb_size, "gfb fb");
if (!gp->fb_base) {
err = -ENOMEM;
goto err_release_fb;
}
err = gfb_set_fbinfo(gp);
if (err)
goto err_unmap_fb;
printk("gfb: Found device at %s\n", dp->full_name);
err = register_framebuffer(info);
if (err < 0) {
printk(KERN_ERR "gfb: Could not register framebuffer %s\n",
dp->full_name);
goto err_unmap_fb;
}
dev_set_drvdata(&op->dev, info);
return 0;
err_unmap_fb:
of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
err_release_fb:
framebuffer_release(info);
err_out:
return err;
}
static int gfb_remove(struct platform_device *op)
{
struct fb_info *info = dev_get_drvdata(&op->dev);
struct gfb_info *gp = info->par;
unregister_framebuffer(info);
iounmap(gp->fb_base);
of_iounmap(&op->resource[6], gp->fb_base, gp->fb_size);
framebuffer_release(info);
return 0;
}
static const struct of_device_id gfb_match[] = {
{
.name = "SUNW,gfb",
},
{},
};
MODULE_DEVICE_TABLE(of, ffb_match);
static struct platform_driver gfb_driver = {
.probe = gfb_probe,
.remove = gfb_remove,
.driver = {
.name = "gfb",
.owner = THIS_MODULE,
.of_match_table = gfb_match,
},
};
static int __init gfb_init(void)
{
if (fb_get_options("gfb", NULL))
return -ENODEV;
return platform_driver_register(&gfb_driver);
}
static void __exit gfb_exit(void)
{
platform_driver_unregister(&gfb_driver);
}
module_init(gfb_init);
module_exit(gfb_exit);
MODULE_DESCRIPTION("framebuffer driver for Sun XVR-1000 graphics");
MODULE_AUTHOR("David S. Miller <davem@davemloft.net>");
MODULE_VERSION("1.0");
MODULE_LICENSE("GPL");
| gpl-2.0 |
iamroot12CD/linux | arch/arm/mach-omap2/voltagedomains54xx_data.c | 1613 | 2210 | /*
* OMAP5 Voltage Management Routines
*
* Based on voltagedomains44xx_data.c
*
* Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/err.h>
#include <linux/init.h>
#include "common.h"
#include "prm54xx.h"
#include "voltage.h"
#include "omap_opp_data.h"
#include "vc.h"
#include "vp.h"
static const struct omap_vfsm_instance omap5_vdd_mpu_vfsm = {
.voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MPU_RET_SLEEP_OFFSET,
};
static const struct omap_vfsm_instance omap5_vdd_mm_vfsm = {
.voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_MM_RET_SLEEP_OFFSET,
};
static const struct omap_vfsm_instance omap5_vdd_core_vfsm = {
.voltsetup_reg = OMAP54XX_PRM_VOLTSETUP_CORE_RET_SLEEP_OFFSET,
};
static struct voltagedomain omap5_voltdm_mpu = {
.name = "mpu",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_mpu,
.vfsm = &omap5_vdd_mpu_vfsm,
.vp = &omap4_vp_mpu,
};
static struct voltagedomain omap5_voltdm_mm = {
.name = "mm",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_iva,
.vfsm = &omap5_vdd_mm_vfsm,
.vp = &omap4_vp_iva,
};
static struct voltagedomain omap5_voltdm_core = {
.name = "core",
.scalable = true,
.read = omap4_prm_vcvp_read,
.write = omap4_prm_vcvp_write,
.rmw = omap4_prm_vcvp_rmw,
.vc = &omap4_vc_core,
.vfsm = &omap5_vdd_core_vfsm,
.vp = &omap4_vp_core,
};
static struct voltagedomain omap5_voltdm_wkup = {
.name = "wkup",
};
static struct voltagedomain *voltagedomains_omap5[] __initdata = {
&omap5_voltdm_mpu,
&omap5_voltdm_mm,
&omap5_voltdm_core,
&omap5_voltdm_wkup,
NULL,
};
static const char *sys_clk_name __initdata = "sys_clkin";
void __init omap54xx_voltagedomains_init(void)
{
struct voltagedomain *voltdm;
int i;
for (i = 0; voltdm = voltagedomains_omap5[i], voltdm; i++)
voltdm->sys_clk.name = sys_clk_name;
voltdm_init(voltagedomains_omap5);
};
| gpl-2.0 |
olicmoon/linux | arch/mips/netlogic/xlp/ahci-init.c | 1613 | 6632 | /*
* Copyright (c) 2003-2014 Broadcom Corporation
* All Rights Reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the Broadcom
* license below:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
*
* THIS SOFTWARE IS PROVIDED BY BROADCOM ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL BROADCOM OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/dma-mapping.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/irq.h>
#include <linux/bitops.h>
#include <asm/cpu.h>
#include <asm/mipsregs.h>
#include <asm/netlogic/haldefs.h>
#include <asm/netlogic/xlp-hal/xlp.h>
#include <asm/netlogic/common.h>
#include <asm/netlogic/xlp-hal/iomap.h>
#include <asm/netlogic/mips-extns.h>
#define SATA_CTL 0x0
#define SATA_STATUS 0x1 /* Status Reg */
#define SATA_INT 0x2 /* Interrupt Reg */
#define SATA_INT_MASK 0x3 /* Interrupt Mask Reg */
#define SATA_CR_REG_TIMER 0x4 /* PHY Conrol Timer Reg */
#define SATA_CORE_ID 0x5 /* Core ID Reg */
#define SATA_AXI_SLAVE_OPT1 0x6 /* AXI Slave Options Reg */
#define SATA_PHY_LOS_LEV 0x7 /* PHY LOS Level Reg */
#define SATA_PHY_MULTI 0x8 /* PHY Multiplier Reg */
#define SATA_PHY_CLK_SEL 0x9 /* Clock Select Reg */
#define SATA_PHY_AMP1_GEN1 0xa /* PHY Transmit Amplitude Reg 1 */
#define SATA_PHY_AMP1_GEN2 0xb /* PHY Transmit Amplitude Reg 2 */
#define SATA_PHY_AMP1_GEN3 0xc /* PHY Transmit Amplitude Reg 3 */
#define SATA_PHY_PRE1 0xd /* PHY Transmit Preemphasis Reg 1 */
#define SATA_PHY_PRE2 0xe /* PHY Transmit Preemphasis Reg 2 */
#define SATA_PHY_PRE3 0xf /* PHY Transmit Preemphasis Reg 3 */
#define SATA_SPDMODE 0x10 /* Speed Mode Reg */
#define SATA_REFCLK 0x11 /* Reference Clock Control Reg */
#define SATA_BYTE_SWAP_DIS 0x12 /* byte swap disable */
/*SATA_CTL Bits */
#define SATA_RST_N BIT(0)
#define PHY0_RESET_N BIT(16)
#define PHY1_RESET_N BIT(17)
#define PHY2_RESET_N BIT(18)
#define PHY3_RESET_N BIT(19)
#define M_CSYSREQ BIT(2)
#define S_CSYSREQ BIT(3)
/*SATA_STATUS Bits */
#define P0_PHY_READY BIT(4)
#define P1_PHY_READY BIT(5)
#define P2_PHY_READY BIT(6)
#define P3_PHY_READY BIT(7)
#define nlm_read_sata_reg(b, r) nlm_read_reg(b, r)
#define nlm_write_sata_reg(b, r, v) nlm_write_reg(b, r, v)
#define nlm_get_sata_pcibase(node) \
nlm_pcicfg_base(XLP_IO_SATA_OFFSET(node))
/* SATA device specific configuration registers are starts at 0x900 offset */
#define nlm_get_sata_regbase(node) \
(nlm_get_sata_pcibase(node) + 0x900)
static void sata_clear_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
{
uint32_t reg_val;
reg_val = nlm_read_sata_reg(regbase, off);
nlm_write_sata_reg(regbase, off, (reg_val & ~bit));
}
static void sata_set_glue_reg(uint64_t regbase, uint32_t off, uint32_t bit)
{
uint32_t reg_val;
reg_val = nlm_read_sata_reg(regbase, off);
nlm_write_sata_reg(regbase, off, (reg_val | bit));
}
static void nlm_sata_firmware_init(int node)
{
uint32_t reg_val;
uint64_t regbase;
int i;
pr_info("XLP AHCI Initialization started.\n");
regbase = nlm_get_sata_regbase(node);
/* Reset SATA */
sata_clear_glue_reg(regbase, SATA_CTL, SATA_RST_N);
/* Reset PHY */
sata_clear_glue_reg(regbase, SATA_CTL,
(PHY3_RESET_N | PHY2_RESET_N
| PHY1_RESET_N | PHY0_RESET_N));
/* Set SATA */
sata_set_glue_reg(regbase, SATA_CTL, SATA_RST_N);
/* Set PHY */
sata_set_glue_reg(regbase, SATA_CTL,
(PHY3_RESET_N | PHY2_RESET_N
| PHY1_RESET_N | PHY0_RESET_N));
pr_debug("Waiting for PHYs to come up.\n");
i = 0;
do {
reg_val = nlm_read_sata_reg(regbase, SATA_STATUS);
i++;
} while (((reg_val & 0xF0) != 0xF0) && (i < 10000));
for (i = 0; i < 4; i++) {
if (reg_val & (P0_PHY_READY << i))
pr_info("PHY%d is up.\n", i);
else
pr_info("PHY%d is down.\n", i);
}
pr_info("XLP AHCI init done.\n");
}
static int __init nlm_ahci_init(void)
{
int node = 0;
int chip = read_c0_prid() & PRID_IMP_MASK;
if (chip == PRID_IMP_NETLOGIC_XLP3XX)
nlm_sata_firmware_init(node);
return 0;
}
static void nlm_sata_intr_ack(struct irq_data *data)
{
uint32_t val = 0;
uint64_t regbase;
regbase = nlm_get_sata_regbase(nlm_nodeid());
val = nlm_read_sata_reg(regbase, SATA_INT);
sata_set_glue_reg(regbase, SATA_INT, val);
}
static void nlm_sata_fixup_bar(struct pci_dev *dev)
{
/*
* The AHCI resource is in BAR 0, move it to
* BAR 5, where it is expected
*/
dev->resource[5] = dev->resource[0];
memset(&dev->resource[0], 0, sizeof(dev->resource[0]));
}
static void nlm_sata_fixup_final(struct pci_dev *dev)
{
uint32_t val;
uint64_t regbase;
int node = 0; /* XLP3XX does not support multi-node */
regbase = nlm_get_sata_regbase(node);
/* clear pending interrupts and then enable them */
val = nlm_read_sata_reg(regbase, SATA_INT);
sata_set_glue_reg(regbase, SATA_INT, val);
/* Mask the core interrupt. If all the interrupts
* are enabled there are spurious interrupt flow
* happening, to avoid only enable core interrupt
* mask.
*/
sata_set_glue_reg(regbase, SATA_INT_MASK, 0x1);
dev->irq = PIC_SATA_IRQ;
nlm_set_pic_extra_ack(node, PIC_SATA_IRQ, nlm_sata_intr_ack);
}
arch_initcall(nlm_ahci_init);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
nlm_sata_fixup_bar);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_NETLOGIC, PCI_DEVICE_ID_NLM_SATA,
nlm_sata_fixup_final);
| gpl-2.0 |
dwander/linaro-base | drivers/s390/net/qeth_l3_sys.c | 2125 | 27439 | /*
* Copyright IBM Corp. 2007
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>,
* Frank Pavlic <fpavlic@de.ibm.com>,
* Thomas Spatzier <tspat@de.ibm.com>,
* Frank Blaschka <frank.blaschka@de.ibm.com>
*/
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "qeth_l3.h"
#define QETH_DEVICE_ATTR(_id, _name, _mode, _show, _store) \
struct device_attribute dev_attr_##_id = __ATTR(_name, _mode, _show, _store)
static ssize_t qeth_l3_dev_route_show(struct qeth_card *card,
struct qeth_routing_info *route, char *buf)
{
switch (route->type) {
case PRIMARY_ROUTER:
return sprintf(buf, "%s\n", "primary router");
case SECONDARY_ROUTER:
return sprintf(buf, "%s\n", "secondary router");
case MULTICAST_ROUTER:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sprintf(buf, "%s\n", "multicast router+");
else
return sprintf(buf, "%s\n", "multicast router");
case PRIMARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sprintf(buf, "%s\n", "primary connector+");
else
return sprintf(buf, "%s\n", "primary connector");
case SECONDARY_CONNECTOR:
if (card->info.broadcast_capable == QETH_BROADCAST_WITHOUT_ECHO)
return sprintf(buf, "%s\n", "secondary connector+");
else
return sprintf(buf, "%s\n", "secondary connector");
default:
return sprintf(buf, "%s\n", "no");
}
}
static ssize_t qeth_l3_dev_route4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_route_show(card, &card->options.route4, buf);
}
static ssize_t qeth_l3_dev_route_store(struct qeth_card *card,
struct qeth_routing_info *route, enum qeth_prot_versions prot,
const char *buf, size_t count)
{
enum qeth_routing_types old_route_type = route->type;
char *tmp;
int rc = 0;
tmp = strsep((char **) &buf, "\n");
mutex_lock(&card->conf_mutex);
if (!strcmp(tmp, "no_router")) {
route->type = NO_ROUTER;
} else if (!strcmp(tmp, "primary_connector")) {
route->type = PRIMARY_CONNECTOR;
} else if (!strcmp(tmp, "secondary_connector")) {
route->type = SECONDARY_CONNECTOR;
} else if (!strcmp(tmp, "primary_router")) {
route->type = PRIMARY_ROUTER;
} else if (!strcmp(tmp, "secondary_router")) {
route->type = SECONDARY_ROUTER;
} else if (!strcmp(tmp, "multicast_router")) {
route->type = MULTICAST_ROUTER;
} else {
rc = -EINVAL;
goto out;
}
if (((card->state == CARD_STATE_SOFTSETUP) ||
(card->state == CARD_STATE_UP)) &&
(old_route_type != route->type)) {
if (prot == QETH_PROT_IPV4)
rc = qeth_l3_setrouting_v4(card);
else if (prot == QETH_PROT_IPV6)
rc = qeth_l3_setrouting_v6(card);
}
out:
if (rc)
route->type = old_route_type;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_route4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_route_store(card, &card->options.route4,
QETH_PROT_IPV4, buf, count);
}
static DEVICE_ATTR(route4, 0644, qeth_l3_dev_route4_show,
qeth_l3_dev_route4_store);
static ssize_t qeth_l3_dev_route6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_route_show(card, &card->options.route6, buf);
}
static ssize_t qeth_l3_dev_route6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_route_store(card, &card->options.route6,
QETH_PROT_IPV6, buf, count);
}
static DEVICE_ATTR(route6, 0644, qeth_l3_dev_route6_show,
qeth_l3_dev_route6_store);
static ssize_t qeth_l3_dev_fake_broadcast_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.fake_broadcast? 1:0);
}
static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int i, rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
i = simple_strtoul(buf, &tmp, 16);
if ((i == 0) || (i == 1))
card->options.fake_broadcast = i;
else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(fake_broadcast, 0644, qeth_l3_dev_fake_broadcast_show,
qeth_l3_dev_fake_broadcast_store);
static ssize_t qeth_l3_dev_sniffer_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->options.sniffer ? 1 : 0);
}
static ssize_t qeth_l3_dev_sniffer_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
int rc = 0;
unsigned long i;
if (!card)
return -EINVAL;
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
if (card->options.cq == QETH_CQ_ENABLED)
return -EPERM;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
rc = strict_strtoul(buf, 16, &i);
if (rc) {
rc = -EINVAL;
goto out;
}
switch (i) {
case 0:
card->options.sniffer = i;
break;
case 1:
qdio_get_ssqd_desc(CARD_DDEV(card), &card->ssqd);
if (card->ssqd.qdioac2 & QETH_SNIFF_AVAIL) {
card->options.sniffer = i;
if (card->qdio.init_pool.buf_count !=
QETH_IN_BUF_COUNT_MAX)
qeth_realloc_buffer_pool(card,
QETH_IN_BUF_COUNT_MAX);
} else
rc = -EPERM;
break;
default:
rc = -EINVAL;
}
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static DEVICE_ATTR(sniffer, 0644, qeth_l3_dev_sniffer_show,
qeth_l3_dev_sniffer_store);
static ssize_t qeth_l3_dev_hsuid_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
char tmp_hsuid[9];
if (!card)
return -EINVAL;
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
if (card->state == CARD_STATE_DOWN)
return -EPERM;
memcpy(tmp_hsuid, card->options.hsuid, sizeof(tmp_hsuid));
EBCASC(tmp_hsuid, 8);
return sprintf(buf, "%s\n", tmp_hsuid);
}
static ssize_t qeth_l3_dev_hsuid_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *addr;
char *tmp;
int i;
if (!card)
return -EINVAL;
if (card->info.type != QETH_CARD_TYPE_IQD)
return -EPERM;
if (card->state != CARD_STATE_DOWN &&
card->state != CARD_STATE_RECOVER)
return -EPERM;
if (card->options.sniffer)
return -EPERM;
if (card->options.cq == QETH_CQ_NOTAVAILABLE)
return -EPERM;
tmp = strsep((char **)&buf, "\n");
if (strlen(tmp) > 8)
return -EINVAL;
if (card->options.hsuid[0]) {
/* delete old ip address */
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr != NULL) {
addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
addr->u.a6.addr.s6_addr32[1] = 0x00000000;
for (i = 8; i < 16; i++)
addr->u.a6.addr.s6_addr[i] =
card->options.hsuid[i - 8];
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
if (!qeth_l3_delete_ip(card, addr))
kfree(addr);
qeth_l3_set_ip_addr_list(card);
}
if (strlen(tmp) == 0) {
/* delete ip address only */
card->options.hsuid[0] = '\0';
if (card->dev)
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
qeth_configure_cq(card, QETH_CQ_DISABLED);
return count;
}
if (qeth_configure_cq(card, QETH_CQ_ENABLED))
return -EPERM;
for (i = 0; i < 8; i++)
card->options.hsuid[i] = ' ';
card->options.hsuid[8] = '\0';
strncpy(card->options.hsuid, tmp, strlen(tmp));
ASCEBC(card->options.hsuid, 8);
if (card->dev)
memcpy(card->dev->perm_addr, card->options.hsuid, 9);
addr = qeth_l3_get_addr_buffer(QETH_PROT_IPV6);
if (addr != NULL) {
addr->u.a6.addr.s6_addr32[0] = 0xfe800000;
addr->u.a6.addr.s6_addr32[1] = 0x00000000;
for (i = 8; i < 16; i++)
addr->u.a6.addr.s6_addr[i] = card->options.hsuid[i - 8];
addr->u.a6.pfxlen = 0;
addr->type = QETH_IP_TYPE_NORMAL;
} else
return -ENOMEM;
if (!qeth_l3_add_ip(card, addr))
kfree(addr);
qeth_l3_set_ip_addr_list(card);
return count;
}
static DEVICE_ATTR(hsuid, 0644, qeth_l3_dev_hsuid_show,
qeth_l3_dev_hsuid_store);
static struct attribute *qeth_l3_device_attrs[] = {
&dev_attr_route4.attr,
&dev_attr_route6.attr,
&dev_attr_fake_broadcast.attr,
&dev_attr_sniffer.attr,
&dev_attr_hsuid.attr,
NULL,
};
static struct attribute_group qeth_l3_device_attr_group = {
.attrs = qeth_l3_device_attrs,
};
static ssize_t qeth_l3_dev_ipato_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->ipato.enabled? 1:0);
}
static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
struct qeth_ipaddr *tmpipa, *t;
char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
if ((card->state != CARD_STATE_DOWN) &&
(card->state != CARD_STATE_RECOVER)) {
rc = -EPERM;
goto out;
}
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.enabled = (card->ipato.enabled)? 0 : 1;
} else if (!strcmp(tmp, "1")) {
card->ipato.enabled = 1;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if ((tmpipa->type == QETH_IP_TYPE_NORMAL) &&
qeth_l3_is_addr_covered_by_ipato(card, tmpipa))
tmpipa->set_flags |=
QETH_IPA_SETIP_TAKEOVER_FLAG;
}
} else if (!strcmp(tmp, "0")) {
card->ipato.enabled = 0;
list_for_each_entry_safe(tmpipa, t, card->ip_tbd_list, entry) {
if (tmpipa->set_flags &
QETH_IPA_SETIP_TAKEOVER_FLAG)
tmpipa->set_flags &=
~QETH_IPA_SETIP_TAKEOVER_FLAG;
}
} else
rc = -EINVAL;
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_enable, enable, 0644,
qeth_l3_dev_ipato_enable_show,
qeth_l3_dev_ipato_enable_store);
static ssize_t qeth_l3_dev_ipato_invert4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->ipato.invert4? 1:0);
}
static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert4 = (card->ipato.invert4)? 0 : 1;
} else if (!strcmp(tmp, "1")) {
card->ipato.invert4 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert4 = 0;
} else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert4, invert4, 0644,
qeth_l3_dev_ipato_invert4_show,
qeth_l3_dev_ipato_invert4_store);
static ssize_t qeth_l3_dev_ipato_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
unsigned long flags;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
/* add strlen for "/<mask>\n" */
entry_len += (proto == QETH_PROT_IPV4)? 5 : 6;
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipatoe, &card->ipato.entries, entry) {
if (ipatoe->proto != proto)
continue;
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
if ((PAGE_SIZE - i) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, ipatoe->addr, addr_str);
i += snprintf(buf + i, PAGE_SIZE - i,
"%s/%i\n", addr_str, ipatoe->mask_bits);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
}
static ssize_t qeth_l3_dev_ipato_add4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV4);
}
static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto,
u8 *addr, int *mask_bits)
{
const char *start, *end;
char *tmp;
char buffer[40] = {0, };
start = buf;
/* get address string */
end = strchr(start, '/');
if (!end || (end - start >= 40)) {
return -EINVAL;
}
strncpy(buffer, start, end - start);
if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) {
return -EINVAL;
}
start = end + 1;
*mask_bits = simple_strtoul(start, &tmp, 10);
if (!strlen(start) ||
(tmp == start) ||
(*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) {
return -EINVAL;
}
return 0;
}
static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
struct qeth_ipato_entry *ipatoe;
u8 addr[16];
int mask_bits;
int rc = 0;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (rc)
goto out;
ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL);
if (!ipatoe) {
rc = -ENOMEM;
goto out;
}
ipatoe->proto = proto;
memcpy(ipatoe->addr, addr, (proto == QETH_PROT_IPV4)? 4:16);
ipatoe->mask_bits = mask_bits;
rc = qeth_l3_add_ipato_entry(card, ipatoe);
if (rc)
kfree(ipatoe);
out:
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(ipato_add4, add4, 0644,
qeth_l3_dev_ipato_add4_show,
qeth_l3_dev_ipato_add4_store);
static ssize_t qeth_l3_dev_ipato_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
u8 addr[16];
int mask_bits;
int rc = 0;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_ipatoe(buf, proto, addr, &mask_bits);
if (!rc)
qeth_l3_del_ipato_entry(card, proto, addr, mask_bits);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_ipato_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(ipato_del4, del4, 0200, NULL,
qeth_l3_dev_ipato_del4_store);
static ssize_t qeth_l3_dev_ipato_invert6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return sprintf(buf, "%i\n", card->ipato.invert6? 1:0);
}
static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
char *tmp;
int rc = 0;
if (!card)
return -EINVAL;
mutex_lock(&card->conf_mutex);
tmp = strsep((char **) &buf, "\n");
if (!strcmp(tmp, "toggle")) {
card->ipato.invert6 = (card->ipato.invert6)? 0 : 1;
} else if (!strcmp(tmp, "1")) {
card->ipato.invert6 = 1;
} else if (!strcmp(tmp, "0")) {
card->ipato.invert6 = 0;
} else
rc = -EINVAL;
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static QETH_DEVICE_ATTR(ipato_invert6, invert6, 0644,
qeth_l3_dev_ipato_invert6_show,
qeth_l3_dev_ipato_invert6_store);
static ssize_t qeth_l3_dev_ipato_add6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_add_show(buf, card, QETH_PROT_IPV6);
}
static ssize_t qeth_l3_dev_ipato_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_add_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(ipato_add6, add6, 0644,
qeth_l3_dev_ipato_add6_show,
qeth_l3_dev_ipato_add6_store);
static ssize_t qeth_l3_dev_ipato_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_ipato_del_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(ipato_del6, del6, 0200, NULL,
qeth_l3_dev_ipato_del6_store);
static struct attribute *qeth_ipato_device_attrs[] = {
&dev_attr_ipato_enable.attr,
&dev_attr_ipato_invert4.attr,
&dev_attr_ipato_add4.attr,
&dev_attr_ipato_del4.attr,
&dev_attr_ipato_invert6.attr,
&dev_attr_ipato_add6.attr,
&dev_attr_ipato_del6.attr,
NULL,
};
static struct attribute_group qeth_device_ipato_group = {
.name = "ipa_takeover",
.attrs = qeth_ipato_device_attrs,
};
static ssize_t qeth_l3_dev_vipa_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipaddr, &card->ip_list, entry) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_VIPA)
continue;
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
if ((PAGE_SIZE - i) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
}
static ssize_t qeth_l3_dev_vipa_add4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV4);
}
static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto,
u8 *addr)
{
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
return -EINVAL;
}
return 0;
}
static ssize_t qeth_l3_dev_vipa_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
u8 addr[16] = {0, };
int rc;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
if (!rc)
rc = qeth_l3_add_vipa(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_add4, add4, 0644,
qeth_l3_dev_vipa_add4_show,
qeth_l3_dev_vipa_add4_store);
static ssize_t qeth_l3_dev_vipa_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
u8 addr[16];
int rc;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_vipae(buf, proto, addr);
if (!rc)
qeth_l3_del_vipa(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_vipa_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(vipa_del4, del4, 0200, NULL,
qeth_l3_dev_vipa_del4_store);
static ssize_t qeth_l3_dev_vipa_add6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_add_show(buf, card, QETH_PROT_IPV6);
}
static ssize_t qeth_l3_dev_vipa_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_add_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_add6, add6, 0644,
qeth_l3_dev_vipa_add6_show,
qeth_l3_dev_vipa_add6_store);
static ssize_t qeth_l3_dev_vipa_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_vipa_del_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(vipa_del6, del6, 0200, NULL,
qeth_l3_dev_vipa_del6_store);
static struct attribute *qeth_vipa_device_attrs[] = {
&dev_attr_vipa_add4.attr,
&dev_attr_vipa_del4.attr,
&dev_attr_vipa_add6.attr,
&dev_attr_vipa_del6.attr,
NULL,
};
static struct attribute_group qeth_device_vipa_group = {
.name = "vipa",
.attrs = qeth_vipa_device_attrs,
};
static ssize_t qeth_l3_dev_rxip_add_show(char *buf, struct qeth_card *card,
enum qeth_prot_versions proto)
{
struct qeth_ipaddr *ipaddr;
char addr_str[40];
int entry_len; /* length of 1 entry string, differs between v4 and v6 */
unsigned long flags;
int i = 0;
entry_len = (proto == QETH_PROT_IPV4)? 12 : 40;
entry_len += 2; /* \n + terminator */
spin_lock_irqsave(&card->ip_lock, flags);
list_for_each_entry(ipaddr, &card->ip_list, entry) {
if (ipaddr->proto != proto)
continue;
if (ipaddr->type != QETH_IP_TYPE_RXIP)
continue;
/* String must not be longer than PAGE_SIZE. So we check if
* string length gets near PAGE_SIZE. Then we can savely display
* the next IPv6 address (worst case, compared to IPv4) */
if ((PAGE_SIZE - i) <= entry_len)
break;
qeth_l3_ipaddr_to_string(proto, (const u8 *)&ipaddr->u,
addr_str);
i += snprintf(buf + i, PAGE_SIZE - i, "%s\n", addr_str);
}
spin_unlock_irqrestore(&card->ip_lock, flags);
i += snprintf(buf + i, PAGE_SIZE - i, "\n");
return i;
}
static ssize_t qeth_l3_dev_rxip_add4_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV4);
}
static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto,
u8 *addr)
{
if (qeth_l3_string_to_ipaddr(buf, proto, addr)) {
return -EINVAL;
}
return 0;
}
static ssize_t qeth_l3_dev_rxip_add_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
u8 addr[16] = {0, };
int rc;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
rc = qeth_l3_add_rxip(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_add4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_add4, add4, 0644,
qeth_l3_dev_rxip_add4_show,
qeth_l3_dev_rxip_add4_store);
static ssize_t qeth_l3_dev_rxip_del_store(const char *buf, size_t count,
struct qeth_card *card, enum qeth_prot_versions proto)
{
u8 addr[16];
int rc;
mutex_lock(&card->conf_mutex);
rc = qeth_l3_parse_rxipe(buf, proto, addr);
if (!rc)
qeth_l3_del_rxip(card, proto, addr);
mutex_unlock(&card->conf_mutex);
return rc ? rc : count;
}
static ssize_t qeth_l3_dev_rxip_del4_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV4);
}
static QETH_DEVICE_ATTR(rxip_del4, del4, 0200, NULL,
qeth_l3_dev_rxip_del4_store);
static ssize_t qeth_l3_dev_rxip_add6_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_add_show(buf, card, QETH_PROT_IPV6);
}
static ssize_t qeth_l3_dev_rxip_add6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_add_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_add6, add6, 0644,
qeth_l3_dev_rxip_add6_show,
qeth_l3_dev_rxip_add6_store);
static ssize_t qeth_l3_dev_rxip_del6_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct qeth_card *card = dev_get_drvdata(dev);
if (!card)
return -EINVAL;
return qeth_l3_dev_rxip_del_store(buf, count, card, QETH_PROT_IPV6);
}
static QETH_DEVICE_ATTR(rxip_del6, del6, 0200, NULL,
qeth_l3_dev_rxip_del6_store);
static struct attribute *qeth_rxip_device_attrs[] = {
&dev_attr_rxip_add4.attr,
&dev_attr_rxip_del4.attr,
&dev_attr_rxip_add6.attr,
&dev_attr_rxip_del6.attr,
NULL,
};
static struct attribute_group qeth_device_rxip_group = {
.name = "rxip",
.attrs = qeth_rxip_device_attrs,
};
int qeth_l3_create_device_attributes(struct device *dev)
{
int ret;
ret = sysfs_create_group(&dev->kobj, &qeth_l3_device_attr_group);
if (ret)
return ret;
ret = sysfs_create_group(&dev->kobj, &qeth_device_ipato_group);
if (ret) {
sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
return ret;
}
ret = sysfs_create_group(&dev->kobj, &qeth_device_vipa_group);
if (ret) {
sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
return ret;
}
ret = sysfs_create_group(&dev->kobj, &qeth_device_rxip_group);
if (ret) {
sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
return ret;
}
return 0;
}
void qeth_l3_remove_device_attributes(struct device *dev)
{
sysfs_remove_group(&dev->kobj, &qeth_l3_device_attr_group);
sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group);
sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group);
sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group);
}
| gpl-2.0 |
ultrasystem/kernel | drivers/media/video/pwc/pwc-ctrl.c | 2381 | 39974 | /* Driver for Philips webcam
Functions that send various control messages to the webcam, including
video modes.
(C) 1999-2003 Nemosoft Unv.
(C) 2004-2006 Luc Saillard (luc@saillard.org)
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
Please send bug reports and support requests to <luc@saillard.org>.
NOTE: this version of pwc is an unofficial (modified) release of pwc & pcwx
driver and thus may have bugs that are not present in the original version.
Please send bug reports and support requests to <luc@saillard.org>.
The decompression routines have been implemented by reverse-engineering the
Nemosoft binary pwcx module. Caveat emptor.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
Changes
2001/08/03 Alvarado Added methods for changing white balance and
red/green gains
*/
/* Control functions for the cam; brightness, contrast, video mode, etc. */
#ifdef __KERNEL__
#include <asm/uaccess.h>
#endif
#include <asm/errno.h>
#include "pwc.h"
#include "pwc-uncompress.h"
#include "pwc-kiara.h"
#include "pwc-timon.h"
#include "pwc-dec1.h"
#include "pwc-dec23.h"
/* Request types: video */
#define SET_LUM_CTL 0x01
#define GET_LUM_CTL 0x02
#define SET_CHROM_CTL 0x03
#define GET_CHROM_CTL 0x04
#define SET_STATUS_CTL 0x05
#define GET_STATUS_CTL 0x06
#define SET_EP_STREAM_CTL 0x07
#define GET_EP_STREAM_CTL 0x08
#define GET_XX_CTL 0x09
#define SET_XX_CTL 0x0A
#define GET_XY_CTL 0x0B
#define SET_XY_CTL 0x0C
#define SET_MPT_CTL 0x0D
#define GET_MPT_CTL 0x0E
/* Selectors for the Luminance controls [GS]ET_LUM_CTL */
#define AGC_MODE_FORMATTER 0x2000
#define PRESET_AGC_FORMATTER 0x2100
#define SHUTTER_MODE_FORMATTER 0x2200
#define PRESET_SHUTTER_FORMATTER 0x2300
#define PRESET_CONTOUR_FORMATTER 0x2400
#define AUTO_CONTOUR_FORMATTER 0x2500
#define BACK_LIGHT_COMPENSATION_FORMATTER 0x2600
#define CONTRAST_FORMATTER 0x2700
#define DYNAMIC_NOISE_CONTROL_FORMATTER 0x2800
#define FLICKERLESS_MODE_FORMATTER 0x2900
#define AE_CONTROL_SPEED 0x2A00
#define BRIGHTNESS_FORMATTER 0x2B00
#define GAMMA_FORMATTER 0x2C00
/* Selectors for the Chrominance controls [GS]ET_CHROM_CTL */
#define WB_MODE_FORMATTER 0x1000
#define AWB_CONTROL_SPEED_FORMATTER 0x1100
#define AWB_CONTROL_DELAY_FORMATTER 0x1200
#define PRESET_MANUAL_RED_GAIN_FORMATTER 0x1300
#define PRESET_MANUAL_BLUE_GAIN_FORMATTER 0x1400
#define COLOUR_MODE_FORMATTER 0x1500
#define SATURATION_MODE_FORMATTER1 0x1600
#define SATURATION_MODE_FORMATTER2 0x1700
/* Selectors for the Status controls [GS]ET_STATUS_CTL */
#define SAVE_USER_DEFAULTS_FORMATTER 0x0200
#define RESTORE_USER_DEFAULTS_FORMATTER 0x0300
#define RESTORE_FACTORY_DEFAULTS_FORMATTER 0x0400
#define READ_AGC_FORMATTER 0x0500
#define READ_SHUTTER_FORMATTER 0x0600
#define READ_RED_GAIN_FORMATTER 0x0700
#define READ_BLUE_GAIN_FORMATTER 0x0800
#define GET_STATUS_B00 0x0B00
#define SENSOR_TYPE_FORMATTER1 0x0C00
#define GET_STATUS_3000 0x3000
#define READ_RAW_Y_MEAN_FORMATTER 0x3100
#define SET_POWER_SAVE_MODE_FORMATTER 0x3200
#define MIRROR_IMAGE_FORMATTER 0x3300
#define LED_FORMATTER 0x3400
#define LOWLIGHT 0x3500
#define GET_STATUS_3600 0x3600
#define SENSOR_TYPE_FORMATTER2 0x3700
#define GET_STATUS_3800 0x3800
#define GET_STATUS_4000 0x4000
#define GET_STATUS_4100 0x4100 /* Get */
#define CTL_STATUS_4200 0x4200 /* [GS] 1 */
/* Formatters for the Video Endpoint controls [GS]ET_EP_STREAM_CTL */
#define VIDEO_OUTPUT_CONTROL_FORMATTER 0x0100
/* Formatters for the motorized pan & tilt [GS]ET_MPT_CTL */
#define PT_RELATIVE_CONTROL_FORMATTER 0x01
#define PT_RESET_CONTROL_FORMATTER 0x02
#define PT_STATUS_FORMATTER 0x03
static const char *size2name[PSZ_MAX] =
{
"subQCIF",
"QSIF",
"QCIF",
"SIF",
"CIF",
"VGA",
};
/********/
/* Entries for the Nala (645/646) camera; the Nala doesn't have compression
preferences, so you either get compressed or non-compressed streams.
An alternate value of 0 means this mode is not available at all.
*/
#define PWC_FPS_MAX_NALA 8
struct Nala_table_entry {
char alternate; /* USB alternate setting */
int compressed; /* Compressed yes/no */
unsigned char mode[3]; /* precomputed mode table */
};
static unsigned int Nala_fps_vector[PWC_FPS_MAX_NALA] = { 4, 5, 7, 10, 12, 15, 20, 24 };
static struct Nala_table_entry Nala_table[PSZ_MAX][PWC_FPS_MAX_NALA] =
{
#include "pwc-nala.h"
};
static void pwc_set_image_buffer_size(struct pwc_device *pdev);
/****************************************************************************/
static int _send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, int index, void *buf, int buflen, int timeout)
{
int rc;
void *kbuf = NULL;
if (buflen) {
kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
if (kbuf == NULL)
return -ENOMEM;
memcpy(kbuf, buf, buflen);
}
rc = usb_control_msg(pdev->udev, usb_sndctrlpipe(pdev->udev, 0),
request,
USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
index,
kbuf, buflen, timeout);
kfree(kbuf);
return rc;
}
static int recv_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
int rc;
void *kbuf = kmalloc(buflen, GFP_KERNEL); /* not allowed on stack */
if (kbuf == NULL)
return -ENOMEM;
rc = usb_control_msg(pdev->udev, usb_rcvctrlpipe(pdev->udev, 0),
request,
USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value,
pdev->vcinterface,
kbuf, buflen, 500);
memcpy(buf, kbuf, buflen);
kfree(kbuf);
return rc;
}
static inline int send_video_command(struct pwc_device *pdev,
int index, void *buf, int buflen)
{
return _send_control_msg(pdev,
SET_EP_STREAM_CTL,
VIDEO_OUTPUT_CONTROL_FORMATTER,
index,
buf, buflen, 1000);
}
static inline int send_control_msg(struct pwc_device *pdev,
u8 request, u16 value, void *buf, int buflen)
{
return _send_control_msg(pdev,
request, value, pdev->vcinterface, buf, buflen, 500);
}
static int set_video_mode_Nala(struct pwc_device *pdev, int size, int frames)
{
unsigned char buf[3];
int ret, fps;
struct Nala_table_entry *pEntry;
int frames2frames[31] =
{ /* closest match of framerate */
0, 0, 0, 0, 4, /* 0-4 */
5, 5, 7, 7, 10, /* 5-9 */
10, 10, 12, 12, 15, /* 10-14 */
15, 15, 15, 20, 20, /* 15-19 */
20, 20, 20, 24, 24, /* 20-24 */
24, 24, 24, 24, 24, /* 25-29 */
24 /* 30 */
};
int frames2table[31] =
{ 0, 0, 0, 0, 0, /* 0-4 */
1, 1, 1, 2, 2, /* 5-9 */
3, 3, 4, 4, 4, /* 10-14 */
5, 5, 5, 5, 5, /* 15-19 */
6, 6, 6, 6, 7, /* 20-24 */
7, 7, 7, 7, 7, /* 25-29 */
7 /* 30 */
};
if (size < 0 || size > PSZ_CIF || frames < 4 || frames > 25)
return -EINVAL;
frames = frames2frames[frames];
fps = frames2table[frames];
pEntry = &Nala_table[size][fps];
if (pEntry->alternate == 0)
return -EINVAL;
memcpy(buf, pEntry->mode, 3);
ret = send_video_command(pdev, pdev->vendpoint, buf, 3);
if (ret < 0) {
PWC_DEBUG_MODULE("Failed to send video command... %d\n", ret);
return ret;
}
if (pEntry->compressed && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec1_init(pdev->type, pdev->release, buf, pdev->decompress_data);
pdev->cmd_len = 3;
memcpy(pdev->cmd_buf, buf, 3);
/* Set various parameters */
pdev->vframes = frames;
pdev->vsize = size;
pdev->valternate = pEntry->alternate;
pdev->image = pwc_image_sizes[size];
pdev->frame_size = (pdev->image.x * pdev->image.y * 3) / 2;
if (pEntry->compressed) {
if (pdev->release < 5) { /* 4 fold compression */
pdev->vbandlength = 528;
pdev->frame_size /= 4;
}
else {
pdev->vbandlength = 704;
pdev->frame_size /= 3;
}
}
else
pdev->vbandlength = 0;
return 0;
}
static int set_video_mode_Timon(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
{
unsigned char buf[13];
const struct Timon_table_entry *pChoose;
int ret, fps;
if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
return -EINVAL;
if (size == PSZ_VGA && frames > 15)
return -EINVAL;
fps = (frames / 5) - 1;
/* Find a supported framerate with progressively higher compression ratios
if the preferred ratio is not available.
*/
pChoose = NULL;
while (compression <= 3) {
pChoose = &Timon_table[size][fps][compression];
if (pChoose->alternate != 0)
break;
compression++;
}
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
memcpy(buf, pChoose->mode, 13);
if (snapshot)
buf[0] |= 0x80;
ret = send_video_command(pdev, pdev->vendpoint, buf, 13);
if (ret < 0)
return ret;
if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 13;
memcpy(pdev->cmd_buf, buf, 13);
/* Set various parameters */
pdev->vframes = frames;
pdev->vsize = size;
pdev->vsnapshot = snapshot;
pdev->valternate = pChoose->alternate;
pdev->image = pwc_image_sizes[size];
pdev->vbandlength = pChoose->bandlength;
if (pChoose->bandlength > 0)
pdev->frame_size = (pChoose->bandlength * pdev->image.y) / 4;
else
pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
return 0;
}
static int set_video_mode_Kiara(struct pwc_device *pdev, int size, int frames, int compression, int snapshot)
{
const struct Kiara_table_entry *pChoose = NULL;
int fps, ret;
unsigned char buf[12];
struct Kiara_table_entry RawEntry = {6, 773, 1272, {0xAD, 0xF4, 0x10, 0x27, 0xB6, 0x24, 0x96, 0x02, 0x30, 0x05, 0x03, 0x80}};
if (size >= PSZ_MAX || frames < 5 || frames > 30 || compression < 0 || compression > 3)
return -EINVAL;
if (size == PSZ_VGA && frames > 15)
return -EINVAL;
fps = (frames / 5) - 1;
/* special case: VGA @ 5 fps and snapshot is raw bayer mode */
if (size == PSZ_VGA && frames == 5 && snapshot && pdev->pixfmt != V4L2_PIX_FMT_YUV420)
{
/* Only available in case the raw palette is selected or
we have the decompressor available. This mode is
only available in compressed form
*/
PWC_DEBUG_SIZE("Choosing VGA/5 BAYER mode.\n");
pChoose = &RawEntry;
}
else
{
/* Find a supported framerate with progressively higher compression ratios
if the preferred ratio is not available.
Skip this step when using RAW modes.
*/
snapshot = 0;
while (compression <= 3) {
pChoose = &Kiara_table[size][fps][compression];
if (pChoose->alternate != 0)
break;
compression++;
}
}
if (pChoose == NULL || pChoose->alternate == 0)
return -ENOENT; /* Not supported. */
PWC_TRACE("Using alternate setting %d.\n", pChoose->alternate);
/* usb_control_msg won't take staticly allocated arrays as argument?? */
memcpy(buf, pChoose->mode, 12);
if (snapshot)
buf[0] |= 0x80;
/* Firmware bug: video endpoint is 5, but commands are sent to endpoint 4 */
ret = send_video_command(pdev, 4 /* pdev->vendpoint */, buf, 12);
if (ret < 0)
return ret;
if (pChoose->bandlength > 0 && pdev->pixfmt == V4L2_PIX_FMT_YUV420)
pwc_dec23_init(pdev, pdev->type, buf);
pdev->cmd_len = 12;
memcpy(pdev->cmd_buf, buf, 12);
/* All set and go */
pdev->vframes = frames;
pdev->vsize = size;
pdev->vsnapshot = snapshot;
pdev->valternate = pChoose->alternate;
pdev->image = pwc_image_sizes[size];
pdev->vbandlength = pChoose->bandlength;
if (pdev->vbandlength > 0)
pdev->frame_size = (pdev->vbandlength * pdev->image.y) / 4;
else
pdev->frame_size = (pdev->image.x * pdev->image.y * 12) / 8;
PWC_TRACE("frame_size=%d, vframes=%d, vsize=%d, vsnapshot=%d, vbandlength=%d\n",
pdev->frame_size,pdev->vframes,pdev->vsize,pdev->vsnapshot,pdev->vbandlength);
return 0;
}
/**
@pdev: device structure
@width: viewport width
@height: viewport height
@frame: framerate, in fps
@compression: preferred compression ratio
@snapshot: snapshot mode or streaming
*/
int pwc_set_video_mode(struct pwc_device *pdev, int width, int height, int frames, int compression, int snapshot)
{
int ret, size;
PWC_DEBUG_FLOW("set_video_mode(%dx%d @ %d, pixfmt %08x).\n", width, height, frames, pdev->pixfmt);
size = pwc_decode_size(pdev, width, height);
if (size < 0) {
PWC_DEBUG_MODULE("Could not find suitable size.\n");
return -ERANGE;
}
PWC_TRACE("decode_size = %d.\n", size);
if (DEVICE_USE_CODEC1(pdev->type)) {
ret = set_video_mode_Nala(pdev, size, frames);
} else if (DEVICE_USE_CODEC3(pdev->type)) {
ret = set_video_mode_Kiara(pdev, size, frames, compression, snapshot);
} else {
ret = set_video_mode_Timon(pdev, size, frames, compression, snapshot);
}
if (ret < 0) {
PWC_ERROR("Failed to set video mode %s@%d fps; return code = %d\n", size2name[size], frames, ret);
return ret;
}
pdev->view.x = width;
pdev->view.y = height;
pdev->frame_total_size = pdev->frame_size + pdev->frame_header_size + pdev->frame_trailer_size;
pwc_set_image_buffer_size(pdev);
PWC_DEBUG_SIZE("Set viewport to %dx%d, image size is %dx%d.\n", width, height, pwc_image_sizes[size].x, pwc_image_sizes[size].y);
return 0;
}
static unsigned int pwc_get_fps_Nala(struct pwc_device *pdev, unsigned int index, unsigned int size)
{
unsigned int i;
for (i = 0; i < PWC_FPS_MAX_NALA; i++) {
if (Nala_table[size][i].alternate) {
if (index--==0) return Nala_fps_vector[i];
}
}
return 0;
}
static unsigned int pwc_get_fps_Kiara(struct pwc_device *pdev, unsigned int index, unsigned int size)
{
unsigned int i;
for (i = 0; i < PWC_FPS_MAX_KIARA; i++) {
if (Kiara_table[size][i][3].alternate) {
if (index--==0) return Kiara_fps_vector[i];
}
}
return 0;
}
static unsigned int pwc_get_fps_Timon(struct pwc_device *pdev, unsigned int index, unsigned int size)
{
unsigned int i;
for (i=0; i < PWC_FPS_MAX_TIMON; i++) {
if (Timon_table[size][i][3].alternate) {
if (index--==0) return Timon_fps_vector[i];
}
}
return 0;
}
unsigned int pwc_get_fps(struct pwc_device *pdev, unsigned int index, unsigned int size)
{
unsigned int ret;
if (DEVICE_USE_CODEC1(pdev->type)) {
ret = pwc_get_fps_Nala(pdev, index, size);
} else if (DEVICE_USE_CODEC3(pdev->type)) {
ret = pwc_get_fps_Kiara(pdev, index, size);
} else {
ret = pwc_get_fps_Timon(pdev, index, size);
}
return ret;
}
#define BLACK_Y 0
#define BLACK_U 128
#define BLACK_V 128
static void pwc_set_image_buffer_size(struct pwc_device *pdev)
{
int i, factor = 0;
/* for V4L2_PIX_FMT_YUV420 */
switch (pdev->pixfmt) {
case V4L2_PIX_FMT_YUV420:
factor = 6;
break;
case V4L2_PIX_FMT_PWC1:
case V4L2_PIX_FMT_PWC2:
factor = 6; /* can be uncompressed YUV420P */
break;
}
/* Set sizes in bytes */
pdev->image.size = pdev->image.x * pdev->image.y * factor / 4;
pdev->view.size = pdev->view.x * pdev->view.y * factor / 4;
/* Align offset, or you'll get some very weird results in
YUV420 mode... x must be multiple of 4 (to get the Y's in
place), and y even (or you'll mixup U & V). This is less of a
problem for YUV420P.
*/
pdev->offset.x = ((pdev->view.x - pdev->image.x) / 2) & 0xFFFC;
pdev->offset.y = ((pdev->view.y - pdev->image.y) / 2) & 0xFFFE;
/* Fill buffers with black colors */
for (i = 0; i < pwc_mbufs; i++) {
unsigned char *p = pdev->image_data + pdev->images[i].offset;
memset(p, BLACK_Y, pdev->view.x * pdev->view.y);
p += pdev->view.x * pdev->view.y;
memset(p, BLACK_U, pdev->view.x * pdev->view.y/4);
p += pdev->view.x * pdev->view.y/4;
memset(p, BLACK_V, pdev->view.x * pdev->view.y/4);
}
}
/* BRIGHTNESS */
int pwc_get_brightness(struct pwc_device *pdev)
{
char buf;
int ret;
ret = recv_control_msg(pdev,
GET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return buf;
}
int pwc_set_brightness(struct pwc_device *pdev, int value)
{
char buf;
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
buf = (value >> 9) & 0x7f;
return send_control_msg(pdev,
SET_LUM_CTL, BRIGHTNESS_FORMATTER, &buf, sizeof(buf));
}
/* CONTRAST */
int pwc_get_contrast(struct pwc_device *pdev)
{
char buf;
int ret;
ret = recv_control_msg(pdev,
GET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return buf;
}
int pwc_set_contrast(struct pwc_device *pdev, int value)
{
char buf;
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
buf = (value >> 10) & 0x3f;
return send_control_msg(pdev,
SET_LUM_CTL, CONTRAST_FORMATTER, &buf, sizeof(buf));
}
/* GAMMA */
int pwc_get_gamma(struct pwc_device *pdev)
{
char buf;
int ret;
ret = recv_control_msg(pdev,
GET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return buf;
}
int pwc_set_gamma(struct pwc_device *pdev, int value)
{
char buf;
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
buf = (value >> 11) & 0x1f;
return send_control_msg(pdev,
SET_LUM_CTL, GAMMA_FORMATTER, &buf, sizeof(buf));
}
/* SATURATION */
/* return a value between [-100 , 100] */
int pwc_get_saturation(struct pwc_device *pdev, int *value)
{
char buf;
int ret, saturation_register;
if (pdev->type < 675)
return -EINVAL;
if (pdev->type < 730)
saturation_register = SATURATION_MODE_FORMATTER2;
else
saturation_register = SATURATION_MODE_FORMATTER1;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = (signed)buf;
return 0;
}
/* @param value saturation color between [-100 , 100] */
int pwc_set_saturation(struct pwc_device *pdev, int value)
{
char buf;
int saturation_register;
if (pdev->type < 675)
return -EINVAL;
if (value < -100)
value = -100;
if (value > 100)
value = 100;
if (pdev->type < 730)
saturation_register = SATURATION_MODE_FORMATTER2;
else
saturation_register = SATURATION_MODE_FORMATTER1;
return send_control_msg(pdev,
SET_CHROM_CTL, saturation_register, &buf, sizeof(buf));
}
/* AGC */
int pwc_set_agc(struct pwc_device *pdev, int mode, int value)
{
char buf;
int ret;
if (mode)
buf = 0x0; /* auto */
else
buf = 0xff; /* fixed */
ret = send_control_msg(pdev,
SET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
if (!mode && ret >= 0) {
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
buf = (value >> 10) & 0x3F;
ret = send_control_msg(pdev,
SET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
}
if (ret < 0)
return ret;
return 0;
}
int pwc_get_agc(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_LUM_CTL, AGC_MODE_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
if (buf != 0) { /* fixed */
ret = recv_control_msg(pdev,
GET_LUM_CTL, PRESET_AGC_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
if (buf > 0x3F)
buf = 0x3F;
*value = (buf << 10);
}
else { /* auto */
ret = recv_control_msg(pdev,
GET_STATUS_CTL, READ_AGC_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
/* Gah... this value ranges from 0x00 ... 0x9F */
if (buf > 0x9F)
buf = 0x9F;
*value = -(48 + buf * 409);
}
return 0;
}
int pwc_set_shutter_speed(struct pwc_device *pdev, int mode, int value)
{
char buf[2];
int speed, ret;
if (mode)
buf[0] = 0x0; /* auto */
else
buf[0] = 0xff; /* fixed */
ret = send_control_msg(pdev,
SET_LUM_CTL, SHUTTER_MODE_FORMATTER, &buf, 1);
if (!mode && ret >= 0) {
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
if (DEVICE_USE_CODEC2(pdev->type)) {
/* speed ranges from 0x0 to 0x290 (656) */
speed = (value / 100);
buf[1] = speed >> 8;
buf[0] = speed & 0xff;
} else if (DEVICE_USE_CODEC3(pdev->type)) {
/* speed seems to range from 0x0 to 0xff */
buf[1] = 0;
buf[0] = value >> 8;
}
ret = send_control_msg(pdev,
SET_LUM_CTL, PRESET_SHUTTER_FORMATTER,
&buf, sizeof(buf));
}
return ret;
}
/* This function is not exported to v4l1, so output values between 0 -> 256 */
int pwc_get_shutter_speed(struct pwc_device *pdev, int *value)
{
unsigned char buf[2];
int ret;
ret = recv_control_msg(pdev,
GET_STATUS_CTL, READ_SHUTTER_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf[0] + (buf[1] << 8);
if (DEVICE_USE_CODEC2(pdev->type)) {
/* speed ranges from 0x0 to 0x290 (656) */
*value *= 256/656;
} else if (DEVICE_USE_CODEC3(pdev->type)) {
/* speed seems to range from 0x0 to 0xff */
}
return 0;
}
/* POWER */
int pwc_camera_power(struct pwc_device *pdev, int power)
{
char buf;
if (pdev->type < 675 || (pdev->type < 730 && pdev->release < 6))
return 0; /* Not supported by Nala or Timon < release 6 */
if (power)
buf = 0x00; /* active */
else
buf = 0xFF; /* power save */
return send_control_msg(pdev,
SET_STATUS_CTL, SET_POWER_SAVE_MODE_FORMATTER,
&buf, sizeof(buf));
}
/* private calls */
int pwc_restore_user(struct pwc_device *pdev)
{
return send_control_msg(pdev,
SET_STATUS_CTL, RESTORE_USER_DEFAULTS_FORMATTER, NULL, 0);
}
int pwc_save_user(struct pwc_device *pdev)
{
return send_control_msg(pdev,
SET_STATUS_CTL, SAVE_USER_DEFAULTS_FORMATTER, NULL, 0);
}
int pwc_restore_factory(struct pwc_device *pdev)
{
return send_control_msg(pdev,
SET_STATUS_CTL, RESTORE_FACTORY_DEFAULTS_FORMATTER, NULL, 0);
}
/* ************************************************* */
/* Patch by Alvarado: (not in the original version */
/*
* the camera recognizes modes from 0 to 4:
*
* 00: indoor (incandescant lighting)
* 01: outdoor (sunlight)
* 02: fluorescent lighting
* 03: manual
* 04: auto
*/
int pwc_set_awb(struct pwc_device *pdev, int mode)
{
char buf;
int ret;
if (mode < 0)
mode = 0;
if (mode > 4)
mode = 4;
buf = mode & 0x07; /* just the lowest three bits */
ret = send_control_msg(pdev,
SET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return 0;
}
int pwc_get_awb(struct pwc_device *pdev)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, WB_MODE_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return buf;
}
int pwc_set_red_gain(struct pwc_device *pdev, int value)
{
unsigned char buf;
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
/* only the msb is considered */
buf = value >> 8;
return send_control_msg(pdev,
SET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
&buf, sizeof(buf));
}
int pwc_get_red_gain(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, PRESET_MANUAL_RED_GAIN_FORMATTER,
&buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf << 8;
return 0;
}
int pwc_set_blue_gain(struct pwc_device *pdev, int value)
{
unsigned char buf;
if (value < 0)
value = 0;
if (value > 0xffff)
value = 0xffff;
/* only the msb is considered */
buf = value >> 8;
return send_control_msg(pdev,
SET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
&buf, sizeof(buf));
}
int pwc_get_blue_gain(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, PRESET_MANUAL_BLUE_GAIN_FORMATTER,
&buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf << 8;
return 0;
}
/* The following two functions are different, since they only read the
internal red/blue gains, which may be different from the manual
gains set or read above.
*/
static int pwc_read_red_gain(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_STATUS_CTL, READ_RED_GAIN_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf << 8;
return 0;
}
static int pwc_read_blue_gain(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_STATUS_CTL, READ_BLUE_GAIN_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf << 8;
return 0;
}
static int pwc_set_wb_speed(struct pwc_device *pdev, int speed)
{
unsigned char buf;
/* useful range is 0x01..0x20 */
buf = speed / 0x7f0;
return send_control_msg(pdev,
SET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, &buf, sizeof(buf));
}
static int pwc_get_wb_speed(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, AWB_CONTROL_SPEED_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf * 0x7f0;
return 0;
}
static int pwc_set_wb_delay(struct pwc_device *pdev, int delay)
{
unsigned char buf;
/* useful range is 0x01..0x3F */
buf = (delay >> 10);
return send_control_msg(pdev,
SET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, &buf, sizeof(buf));
}
static int pwc_get_wb_delay(struct pwc_device *pdev, int *value)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, AWB_CONTROL_DELAY_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*value = buf << 10;
return 0;
}
int pwc_set_leds(struct pwc_device *pdev, int on_value, int off_value)
{
unsigned char buf[2];
if (pdev->type < 730)
return 0;
on_value /= 100;
off_value /= 100;
if (on_value < 0)
on_value = 0;
if (on_value > 0xff)
on_value = 0xff;
if (off_value < 0)
off_value = 0;
if (off_value > 0xff)
off_value = 0xff;
buf[0] = on_value;
buf[1] = off_value;
return send_control_msg(pdev,
SET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
}
static int pwc_get_leds(struct pwc_device *pdev, int *on_value, int *off_value)
{
unsigned char buf[2];
int ret;
if (pdev->type < 730) {
*on_value = -1;
*off_value = -1;
return 0;
}
ret = recv_control_msg(pdev,
GET_STATUS_CTL, LED_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*on_value = buf[0] * 100;
*off_value = buf[1] * 100;
return 0;
}
int pwc_set_contour(struct pwc_device *pdev, int contour)
{
unsigned char buf;
int ret;
if (contour < 0)
buf = 0xff; /* auto contour on */
else
buf = 0x0; /* auto contour off */
ret = send_control_msg(pdev,
SET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
if (contour < 0)
return 0;
if (contour > 0xffff)
contour = 0xffff;
buf = (contour >> 10); /* contour preset is [0..3f] */
ret = send_control_msg(pdev,
SET_LUM_CTL, PRESET_CONTOUR_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
return 0;
}
int pwc_get_contour(struct pwc_device *pdev, int *contour)
{
unsigned char buf;
int ret;
ret = recv_control_msg(pdev,
GET_LUM_CTL, AUTO_CONTOUR_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
if (buf == 0) {
/* auto mode off, query current preset value */
ret = recv_control_msg(pdev,
GET_LUM_CTL, PRESET_CONTOUR_FORMATTER,
&buf, sizeof(buf));
if (ret < 0)
return ret;
*contour = buf << 10;
}
else
*contour = -1;
return 0;
}
int pwc_set_backlight(struct pwc_device *pdev, int backlight)
{
unsigned char buf;
if (backlight)
buf = 0xff;
else
buf = 0x0;
return send_control_msg(pdev,
SET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
&buf, sizeof(buf));
}
int pwc_get_backlight(struct pwc_device *pdev, int *backlight)
{
int ret;
unsigned char buf;
ret = recv_control_msg(pdev,
GET_LUM_CTL, BACK_LIGHT_COMPENSATION_FORMATTER,
&buf, sizeof(buf));
if (ret < 0)
return ret;
*backlight = !!buf;
return 0;
}
int pwc_set_colour_mode(struct pwc_device *pdev, int colour)
{
unsigned char buf;
if (colour)
buf = 0xff;
else
buf = 0x0;
return send_control_msg(pdev,
SET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
}
int pwc_get_colour_mode(struct pwc_device *pdev, int *colour)
{
int ret;
unsigned char buf;
ret = recv_control_msg(pdev,
GET_CHROM_CTL, COLOUR_MODE_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*colour = !!buf;
return 0;
}
int pwc_set_flicker(struct pwc_device *pdev, int flicker)
{
unsigned char buf;
if (flicker)
buf = 0xff;
else
buf = 0x0;
return send_control_msg(pdev,
SET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
}
int pwc_get_flicker(struct pwc_device *pdev, int *flicker)
{
int ret;
unsigned char buf;
ret = recv_control_msg(pdev,
GET_LUM_CTL, FLICKERLESS_MODE_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
*flicker = !!buf;
return 0;
}
int pwc_set_dynamic_noise(struct pwc_device *pdev, int noise)
{
unsigned char buf;
if (noise < 0)
noise = 0;
if (noise > 3)
noise = 3;
buf = noise;
return send_control_msg(pdev,
SET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
&buf, sizeof(buf));
}
int pwc_get_dynamic_noise(struct pwc_device *pdev, int *noise)
{
int ret;
unsigned char buf;
ret = recv_control_msg(pdev,
GET_LUM_CTL, DYNAMIC_NOISE_CONTROL_FORMATTER,
&buf, sizeof(buf));
if (ret < 0)
return ret;
*noise = buf;
return 0;
}
static int _pwc_mpt_reset(struct pwc_device *pdev, int flags)
{
unsigned char buf;
buf = flags & 0x03; // only lower two bits are currently used
return send_control_msg(pdev,
SET_MPT_CTL, PT_RESET_CONTROL_FORMATTER, &buf, sizeof(buf));
}
int pwc_mpt_reset(struct pwc_device *pdev, int flags)
{
int ret;
ret = _pwc_mpt_reset(pdev, flags);
if (ret >= 0) {
pdev->pan_angle = 0;
pdev->tilt_angle = 0;
}
return ret;
}
static int _pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt)
{
unsigned char buf[4];
/* set new relative angle; angles are expressed in degrees * 100,
but cam as .5 degree resolution, hence divide by 200. Also
the angle must be multiplied by 64 before it's send to
the cam (??)
*/
pan = 64 * pan / 100;
tilt = -64 * tilt / 100; /* positive tilt is down, which is not what the user would expect */
buf[0] = pan & 0xFF;
buf[1] = (pan >> 8) & 0xFF;
buf[2] = tilt & 0xFF;
buf[3] = (tilt >> 8) & 0xFF;
return send_control_msg(pdev,
SET_MPT_CTL, PT_RELATIVE_CONTROL_FORMATTER, &buf, sizeof(buf));
}
int pwc_mpt_set_angle(struct pwc_device *pdev, int pan, int tilt)
{
int ret;
/* check absolute ranges */
if (pan < pdev->angle_range.pan_min ||
pan > pdev->angle_range.pan_max ||
tilt < pdev->angle_range.tilt_min ||
tilt > pdev->angle_range.tilt_max)
return -ERANGE;
/* go to relative range, check again */
pan -= pdev->pan_angle;
tilt -= pdev->tilt_angle;
/* angles are specified in degrees * 100, thus the limit = 36000 */
if (pan < -36000 || pan > 36000 || tilt < -36000 || tilt > 36000)
return -ERANGE;
ret = _pwc_mpt_set_angle(pdev, pan, tilt);
if (ret >= 0) {
pdev->pan_angle += pan;
pdev->tilt_angle += tilt;
}
if (ret == -EPIPE) /* stall -> out of range */
ret = -ERANGE;
return ret;
}
static int pwc_mpt_get_status(struct pwc_device *pdev, struct pwc_mpt_status *status)
{
int ret;
unsigned char buf[5];
ret = recv_control_msg(pdev,
GET_MPT_CTL, PT_STATUS_FORMATTER, &buf, sizeof(buf));
if (ret < 0)
return ret;
status->status = buf[0] & 0x7; // 3 bits are used for reporting
status->time_pan = (buf[1] << 8) + buf[2];
status->time_tilt = (buf[3] << 8) + buf[4];
return 0;
}
int pwc_get_cmos_sensor(struct pwc_device *pdev, int *sensor)
{
unsigned char buf;
int ret = -1, request;
if (pdev->type < 675)
request = SENSOR_TYPE_FORMATTER1;
else if (pdev->type < 730)
return -1; /* The Vesta series doesn't have this call */
else
request = SENSOR_TYPE_FORMATTER2;
ret = recv_control_msg(pdev,
GET_STATUS_CTL, request, &buf, sizeof(buf));
if (ret < 0)
return ret;
if (pdev->type < 675)
*sensor = buf | 0x100;
else
*sensor = buf;
return 0;
}
/* End of Add-Ons */
/* ************************************************* */
/* Linux 2.5.something and 2.6 pass direct pointers to arguments of
ioctl() calls. With 2.4, you have to do tedious copy_from_user()
and copy_to_user() calls. With these macros we circumvent this,
and let me maintain only one source file. The functionality is
exactly the same otherwise.
*/
/* define local variable for arg */
#define ARG_DEF(ARG_type, ARG_name)\
ARG_type *ARG_name = arg;
/* copy arg to local variable */
#define ARG_IN(ARG_name) /* nothing */
/* argument itself (referenced) */
#define ARGR(ARG_name) (*ARG_name)
/* argument address */
#define ARGA(ARG_name) ARG_name
/* copy local variable to arg */
#define ARG_OUT(ARG_name) /* nothing */
long pwc_ioctl(struct pwc_device *pdev, unsigned int cmd, void *arg)
{
long ret = 0;
switch(cmd) {
case VIDIOCPWCRUSER:
{
if (pwc_restore_user(pdev))
ret = -EINVAL;
break;
}
case VIDIOCPWCSUSER:
{
if (pwc_save_user(pdev))
ret = -EINVAL;
break;
}
case VIDIOCPWCFACTORY:
{
if (pwc_restore_factory(pdev))
ret = -EINVAL;
break;
}
case VIDIOCPWCSCQUAL:
{
ARG_DEF(int, qual)
if (pdev->iso_init) {
ret = -EBUSY;
break;
}
ARG_IN(qual)
if (ARGR(qual) < 0 || ARGR(qual) > 3)
ret = -EINVAL;
else
ret = pwc_set_video_mode(pdev, pdev->view.x, pdev->view.y, pdev->vframes, ARGR(qual), pdev->vsnapshot);
if (ret >= 0)
pdev->vcompression = ARGR(qual);
break;
}
case VIDIOCPWCGCQUAL:
{
ARG_DEF(int, qual)
ARGR(qual) = pdev->vcompression;
ARG_OUT(qual)
break;
}
case VIDIOCPWCPROBE:
{
ARG_DEF(struct pwc_probe, probe)
strcpy(ARGR(probe).name, pdev->vdev.name);
ARGR(probe).type = pdev->type;
ARG_OUT(probe)
break;
}
case VIDIOCPWCGSERIAL:
{
ARG_DEF(struct pwc_serial, serial)
strcpy(ARGR(serial).serial, pdev->serial);
ARG_OUT(serial)
break;
}
case VIDIOCPWCSAGC:
{
ARG_DEF(int, agc)
ARG_IN(agc)
if (pwc_set_agc(pdev, ARGR(agc) < 0 ? 1 : 0, ARGR(agc)))
ret = -EINVAL;
break;
}
case VIDIOCPWCGAGC:
{
ARG_DEF(int, agc)
if (pwc_get_agc(pdev, ARGA(agc)))
ret = -EINVAL;
ARG_OUT(agc)
break;
}
case VIDIOCPWCSSHUTTER:
{
ARG_DEF(int, shutter_speed)
ARG_IN(shutter_speed)
ret = pwc_set_shutter_speed(pdev, ARGR(shutter_speed) < 0 ? 1 : 0, ARGR(shutter_speed));
break;
}
case VIDIOCPWCSAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
ARG_IN(wb)
ret = pwc_set_awb(pdev, ARGR(wb).mode);
if (ret >= 0 && ARGR(wb).mode == PWC_WB_MANUAL) {
pwc_set_red_gain(pdev, ARGR(wb).manual_red);
pwc_set_blue_gain(pdev, ARGR(wb).manual_blue);
}
break;
}
case VIDIOCPWCGAWB:
{
ARG_DEF(struct pwc_whitebalance, wb)
memset(ARGA(wb), 0, sizeof(struct pwc_whitebalance));
ARGR(wb).mode = pwc_get_awb(pdev);
if (ARGR(wb).mode < 0)
ret = -EINVAL;
else {
if (ARGR(wb).mode == PWC_WB_MANUAL) {
ret = pwc_get_red_gain(pdev, &ARGR(wb).manual_red);
if (ret < 0)
break;
ret = pwc_get_blue_gain(pdev, &ARGR(wb).manual_blue);
if (ret < 0)
break;
}
if (ARGR(wb).mode == PWC_WB_AUTO) {
ret = pwc_read_red_gain(pdev, &ARGR(wb).read_red);
if (ret < 0)
break;
ret = pwc_read_blue_gain(pdev, &ARGR(wb).read_blue);
if (ret < 0)
break;
}
}
ARG_OUT(wb)
break;
}
case VIDIOCPWCSAWBSPEED:
{
ARG_DEF(struct pwc_wb_speed, wbs)
if (ARGR(wbs).control_speed > 0) {
ret = pwc_set_wb_speed(pdev, ARGR(wbs).control_speed);
}
if (ARGR(wbs).control_delay > 0) {
ret = pwc_set_wb_delay(pdev, ARGR(wbs).control_delay);
}
break;
}
case VIDIOCPWCGAWBSPEED:
{
ARG_DEF(struct pwc_wb_speed, wbs)
ret = pwc_get_wb_speed(pdev, &ARGR(wbs).control_speed);
if (ret < 0)
break;
ret = pwc_get_wb_delay(pdev, &ARGR(wbs).control_delay);
if (ret < 0)
break;
ARG_OUT(wbs)
break;
}
case VIDIOCPWCSLED:
{
ARG_DEF(struct pwc_leds, leds)
ARG_IN(leds)
ret = pwc_set_leds(pdev, ARGR(leds).led_on, ARGR(leds).led_off);
break;
}
case VIDIOCPWCGLED:
{
ARG_DEF(struct pwc_leds, leds)
ret = pwc_get_leds(pdev, &ARGR(leds).led_on, &ARGR(leds).led_off);
ARG_OUT(leds)
break;
}
case VIDIOCPWCSCONTOUR:
{
ARG_DEF(int, contour)
ARG_IN(contour)
ret = pwc_set_contour(pdev, ARGR(contour));
break;
}
case VIDIOCPWCGCONTOUR:
{
ARG_DEF(int, contour)
ret = pwc_get_contour(pdev, ARGA(contour));
ARG_OUT(contour)
break;
}
case VIDIOCPWCSBACKLIGHT:
{
ARG_DEF(int, backlight)
ARG_IN(backlight)
ret = pwc_set_backlight(pdev, ARGR(backlight));
break;
}
case VIDIOCPWCGBACKLIGHT:
{
ARG_DEF(int, backlight)
ret = pwc_get_backlight(pdev, ARGA(backlight));
ARG_OUT(backlight)
break;
}
case VIDIOCPWCSFLICKER:
{
ARG_DEF(int, flicker)
ARG_IN(flicker)
ret = pwc_set_flicker(pdev, ARGR(flicker));
break;
}
case VIDIOCPWCGFLICKER:
{
ARG_DEF(int, flicker)
ret = pwc_get_flicker(pdev, ARGA(flicker));
ARG_OUT(flicker)
break;
}
case VIDIOCPWCSDYNNOISE:
{
ARG_DEF(int, dynnoise)
ARG_IN(dynnoise)
ret = pwc_set_dynamic_noise(pdev, ARGR(dynnoise));
break;
}
case VIDIOCPWCGDYNNOISE:
{
ARG_DEF(int, dynnoise)
ret = pwc_get_dynamic_noise(pdev, ARGA(dynnoise));
ARG_OUT(dynnoise);
break;
}
case VIDIOCPWCGREALSIZE:
{
ARG_DEF(struct pwc_imagesize, size)
ARGR(size).width = pdev->image.x;
ARGR(size).height = pdev->image.y;
ARG_OUT(size)
break;
}
case VIDIOCPWCMPTRESET:
{
if (pdev->features & FEATURE_MOTOR_PANTILT)
{
ARG_DEF(int, flags)
ARG_IN(flags)
ret = pwc_mpt_reset(pdev, ARGR(flags));
}
else
{
ret = -ENXIO;
}
break;
}
case VIDIOCPWCMPTGRANGE:
{
if (pdev->features & FEATURE_MOTOR_PANTILT)
{
ARG_DEF(struct pwc_mpt_range, range)
ARGR(range) = pdev->angle_range;
ARG_OUT(range)
}
else
{
ret = -ENXIO;
}
break;
}
case VIDIOCPWCMPTSANGLE:
{
int new_pan, new_tilt;
if (pdev->features & FEATURE_MOTOR_PANTILT)
{
ARG_DEF(struct pwc_mpt_angles, angles)
ARG_IN(angles)
/* The camera can only set relative angles, so
do some calculations when getting an absolute angle .
*/
if (ARGR(angles).absolute)
{
new_pan = ARGR(angles).pan;
new_tilt = ARGR(angles).tilt;
}
else
{
new_pan = pdev->pan_angle + ARGR(angles).pan;
new_tilt = pdev->tilt_angle + ARGR(angles).tilt;
}
ret = pwc_mpt_set_angle(pdev, new_pan, new_tilt);
}
else
{
ret = -ENXIO;
}
break;
}
case VIDIOCPWCMPTGANGLE:
{
if (pdev->features & FEATURE_MOTOR_PANTILT)
{
ARG_DEF(struct pwc_mpt_angles, angles)
ARGR(angles).absolute = 1;
ARGR(angles).pan = pdev->pan_angle;
ARGR(angles).tilt = pdev->tilt_angle;
ARG_OUT(angles)
}
else
{
ret = -ENXIO;
}
break;
}
case VIDIOCPWCMPTSTATUS:
{
if (pdev->features & FEATURE_MOTOR_PANTILT)
{
ARG_DEF(struct pwc_mpt_status, status)
ret = pwc_mpt_get_status(pdev, ARGA(status));
ARG_OUT(status)
}
else
{
ret = -ENXIO;
}
break;
}
case VIDIOCPWCGVIDCMD:
{
ARG_DEF(struct pwc_video_command, vcmd);
ARGR(vcmd).type = pdev->type;
ARGR(vcmd).release = pdev->release;
ARGR(vcmd).command_len = pdev->cmd_len;
memcpy(&ARGR(vcmd).command_buf, pdev->cmd_buf, pdev->cmd_len);
ARGR(vcmd).bandlength = pdev->vbandlength;
ARGR(vcmd).frame_size = pdev->frame_size;
ARG_OUT(vcmd)
break;
}
/*
case VIDIOCPWCGVIDTABLE:
{
ARG_DEF(struct pwc_table_init_buffer, table);
ARGR(table).len = pdev->cmd_len;
memcpy(&ARGR(table).buffer, pdev->decompress_data, pdev->decompressor->table_size);
ARG_OUT(table)
break;
}
*/
default:
ret = -ENOIOCTLCMD;
break;
}
if (ret > 0)
return 0;
return ret;
}
/* vim: set cinoptions= formatoptions=croql cindent shiftwidth=8 tabstop=8: */
| gpl-2.0 |
SHAYDER/i9300 | fs/gfs2/dir.c | 2381 | 47977 | /*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
* Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License version 2.
*/
/*
* Implements Extendible Hashing as described in:
* "Extendible Hashing" by Fagin, et al in
* __ACM Trans. on Database Systems__, Sept 1979.
*
*
* Here's the layout of dirents which is essentially the same as that of ext2
* within a single block. The field de_name_len is the number of bytes
* actually required for the name (no null terminator). The field de_rec_len
* is the number of bytes allocated to the dirent. The offset of the next
* dirent in the block is (dirent + dirent->de_rec_len). When a dirent is
* deleted, the preceding dirent inherits its allocated space, ie
* prev->de_rec_len += deleted->de_rec_len. Since the next dirent is obtained
* by adding de_rec_len to the current dirent, this essentially causes the
* deleted dirent to get jumped over when iterating through all the dirents.
*
* When deleting the first dirent in a block, there is no previous dirent so
* the field de_ino is set to zero to designate it as deleted. When allocating
* a dirent, gfs2_dirent_alloc iterates through the dirents in a block. If the
* first dirent has (de_ino == 0) and de_rec_len is large enough, this first
* dirent is allocated. Otherwise it must go through all the 'used' dirents
* searching for one in which the amount of total space minus the amount of
* used space will provide enough space for the new dirent.
*
* There are two types of blocks in which dirents reside. In a stuffed dinode,
* the dirents begin at offset sizeof(struct gfs2_dinode) from the beginning of
* the block. In leaves, they begin at offset sizeof(struct gfs2_leaf) from the
* beginning of the leaf block. The dirents reside in leaves when
*
* dip->i_diskflags & GFS2_DIF_EXHASH is true
*
* Otherwise, the dirents are "linear", within a single stuffed dinode block.
*
* When the dirents are in leaves, the actual contents of the directory file are
* used as an array of 64-bit block pointers pointing to the leaf blocks. The
* dirents are NOT in the directory file itself. There can be more than one
* block pointer in the array that points to the same leaf. In fact, when a
* directory is first converted from linear to exhash, all of the pointers
* point to the same leaf.
*
* When a leaf is completely full, the size of the hash table can be
* doubled unless it is already at the maximum size which is hard coded into
* GFS2_DIR_MAX_DEPTH. After that, leaves are chained together in a linked list,
* but never before the maximum hash table size has been reached.
*/
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/buffer_head.h>
#include <linux/sort.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/vmalloc.h>
#include "gfs2.h"
#include "incore.h"
#include "dir.h"
#include "glock.h"
#include "inode.h"
#include "meta_io.h"
#include "quota.h"
#include "rgrp.h"
#include "trans.h"
#include "bmap.h"
#include "util.h"
#define IS_LEAF 1 /* Hashed (leaf) directory */
#define IS_DINODE 2 /* Linear (stuffed dinode block) directory */
#define gfs2_disk_hash2offset(h) (((u64)(h)) >> 1)
#define gfs2_dir_offset2hash(p) ((u32)(((u64)(p)) << 1))
struct qstr gfs2_qdot __read_mostly;
struct qstr gfs2_qdotdot __read_mostly;
typedef int (*gfs2_dscan_t)(const struct gfs2_dirent *dent,
const struct qstr *name, void *opaque);
int gfs2_dir_get_new_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp)
{
struct buffer_head *bh;
bh = gfs2_meta_new(ip->i_gl, block);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_JD, GFS2_FORMAT_JD);
gfs2_buffer_clear_tail(bh, sizeof(struct gfs2_meta_header));
*bhp = bh;
return 0;
}
static int gfs2_dir_get_existing_buffer(struct gfs2_inode *ip, u64 block,
struct buffer_head **bhp)
{
struct buffer_head *bh;
int error;
error = gfs2_meta_read(ip->i_gl, block, DIO_WAIT, &bh);
if (error)
return error;
if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) {
brelse(bh);
return -EIO;
}
*bhp = bh;
return 0;
}
static int gfs2_dir_write_stuffed(struct gfs2_inode *ip, const char *buf,
unsigned int offset, unsigned int size)
{
struct buffer_head *dibh;
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
memcpy(dibh->b_data + offset + sizeof(struct gfs2_dinode), buf, size);
if (ip->i_inode.i_size < offset + size)
i_size_write(&ip->i_inode, offset + size);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
return size;
}
/**
* gfs2_dir_write_data - Write directory information to the inode
* @ip: The GFS2 inode
* @buf: The buffer containing information to be written
* @offset: The file offset to start writing at
* @size: The amount of data to write
*
* Returns: The number of bytes correctly written or error code
*/
static int gfs2_dir_write_data(struct gfs2_inode *ip, const char *buf,
u64 offset, unsigned int size)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
struct buffer_head *dibh;
u64 lblock, dblock;
u32 extlen = 0;
unsigned int o;
int copied = 0;
int error = 0;
int new = 0;
if (!size)
return 0;
if (gfs2_is_stuffed(ip) &&
offset + size <= sdp->sd_sb.sb_bsize - sizeof(struct gfs2_dinode))
return gfs2_dir_write_stuffed(ip, buf, (unsigned int)offset,
size);
if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
return -EINVAL;
if (gfs2_is_stuffed(ip)) {
error = gfs2_unstuff_dinode(ip, NULL);
if (error)
return error;
}
lblock = offset;
o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
while (copied < size) {
unsigned int amount;
struct buffer_head *bh;
amount = size - copied;
if (amount > sdp->sd_sb.sb_bsize - o)
amount = sdp->sd_sb.sb_bsize - o;
if (!extlen) {
new = 1;
error = gfs2_extent_map(&ip->i_inode, lblock, &new,
&dblock, &extlen);
if (error)
goto fail;
error = -EIO;
if (gfs2_assert_withdraw(sdp, dblock))
goto fail;
}
if (amount == sdp->sd_jbsize || new)
error = gfs2_dir_get_new_buffer(ip, dblock, &bh);
else
error = gfs2_dir_get_existing_buffer(ip, dblock, &bh);
if (error)
goto fail;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
memcpy(bh->b_data + o, buf, amount);
brelse(bh);
buf += amount;
copied += amount;
lblock++;
dblock++;
extlen--;
o = sizeof(struct gfs2_meta_header);
}
out:
error = gfs2_meta_inode_buffer(ip, &dibh);
if (error)
return error;
if (ip->i_inode.i_size < offset + copied)
i_size_write(&ip->i_inode, offset + copied);
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
gfs2_trans_add_bh(ip->i_gl, dibh, 1);
gfs2_dinode_out(ip, dibh->b_data);
brelse(dibh);
return copied;
fail:
if (copied)
goto out;
return error;
}
static int gfs2_dir_read_stuffed(struct gfs2_inode *ip, char *buf,
u64 offset, unsigned int size)
{
struct buffer_head *dibh;
int error;
error = gfs2_meta_inode_buffer(ip, &dibh);
if (!error) {
offset += sizeof(struct gfs2_dinode);
memcpy(buf, dibh->b_data + offset, size);
brelse(dibh);
}
return (error) ? error : size;
}
/**
* gfs2_dir_read_data - Read a data from a directory inode
* @ip: The GFS2 Inode
* @buf: The buffer to place result into
* @offset: File offset to begin jdata_readng from
* @size: Amount of data to transfer
*
* Returns: The amount of data actually copied or the error
*/
static int gfs2_dir_read_data(struct gfs2_inode *ip, char *buf, u64 offset,
unsigned int size, unsigned ra)
{
struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
u64 lblock, dblock;
u32 extlen = 0;
unsigned int o;
int copied = 0;
int error = 0;
u64 disksize = i_size_read(&ip->i_inode);
if (offset >= disksize)
return 0;
if (offset + size > disksize)
size = disksize - offset;
if (!size)
return 0;
if (gfs2_is_stuffed(ip))
return gfs2_dir_read_stuffed(ip, buf, offset, size);
if (gfs2_assert_warn(sdp, gfs2_is_jdata(ip)))
return -EINVAL;
lblock = offset;
o = do_div(lblock, sdp->sd_jbsize) + sizeof(struct gfs2_meta_header);
while (copied < size) {
unsigned int amount;
struct buffer_head *bh;
int new;
amount = size - copied;
if (amount > sdp->sd_sb.sb_bsize - o)
amount = sdp->sd_sb.sb_bsize - o;
if (!extlen) {
new = 0;
error = gfs2_extent_map(&ip->i_inode, lblock, &new,
&dblock, &extlen);
if (error || !dblock)
goto fail;
BUG_ON(extlen < 1);
if (!ra)
extlen = 1;
bh = gfs2_meta_ra(ip->i_gl, dblock, extlen);
} else {
error = gfs2_meta_read(ip->i_gl, dblock, DIO_WAIT, &bh);
if (error)
goto fail;
}
error = gfs2_metatype_check(sdp, bh, GFS2_METATYPE_JD);
if (error) {
brelse(bh);
goto fail;
}
dblock++;
extlen--;
memcpy(buf, bh->b_data + o, amount);
brelse(bh);
buf += amount;
copied += amount;
lblock++;
o = sizeof(struct gfs2_meta_header);
}
return copied;
fail:
return (copied) ? copied : error;
}
static inline int gfs2_dirent_sentinel(const struct gfs2_dirent *dent)
{
return dent->de_inum.no_addr == 0 || dent->de_inum.no_formal_ino == 0;
}
static inline int __gfs2_dirent_find(const struct gfs2_dirent *dent,
const struct qstr *name, int ret)
{
if (!gfs2_dirent_sentinel(dent) &&
be32_to_cpu(dent->de_hash) == name->hash &&
be16_to_cpu(dent->de_name_len) == name->len &&
memcmp(dent+1, name->name, name->len) == 0)
return ret;
return 0;
}
static int gfs2_dirent_find(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
return __gfs2_dirent_find(dent, name, 1);
}
static int gfs2_dirent_prev(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
return __gfs2_dirent_find(dent, name, 2);
}
/*
* name->name holds ptr to start of block.
* name->len holds size of block.
*/
static int gfs2_dirent_last(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
const char *start = name->name;
const char *end = (const char *)dent + be16_to_cpu(dent->de_rec_len);
if (name->len == (end - start))
return 1;
return 0;
}
static int gfs2_dirent_find_space(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
unsigned required = GFS2_DIRENT_SIZE(name->len);
unsigned actual = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
unsigned totlen = be16_to_cpu(dent->de_rec_len);
if (gfs2_dirent_sentinel(dent))
actual = 0;
if (totlen - actual >= required)
return 1;
return 0;
}
struct dirent_gather {
const struct gfs2_dirent **pdent;
unsigned offset;
};
static int gfs2_dirent_gather(const struct gfs2_dirent *dent,
const struct qstr *name,
void *opaque)
{
struct dirent_gather *g = opaque;
if (!gfs2_dirent_sentinel(dent)) {
g->pdent[g->offset++] = dent;
}
return 0;
}
/*
* Other possible things to check:
* - Inode located within filesystem size (and on valid block)
* - Valid directory entry type
* Not sure how heavy-weight we want to make this... could also check
* hash is correct for example, but that would take a lot of extra time.
* For now the most important thing is to check that the various sizes
* are correct.
*/
static int gfs2_check_dirent(struct gfs2_dirent *dent, unsigned int offset,
unsigned int size, unsigned int len, int first)
{
const char *msg = "gfs2_dirent too small";
if (unlikely(size < sizeof(struct gfs2_dirent)))
goto error;
msg = "gfs2_dirent misaligned";
if (unlikely(offset & 0x7))
goto error;
msg = "gfs2_dirent points beyond end of block";
if (unlikely(offset + size > len))
goto error;
msg = "zero inode number";
if (unlikely(!first && gfs2_dirent_sentinel(dent)))
goto error;
msg = "name length is greater than space in dirent";
if (!gfs2_dirent_sentinel(dent) &&
unlikely(sizeof(struct gfs2_dirent)+be16_to_cpu(dent->de_name_len) >
size))
goto error;
return 0;
error:
printk(KERN_WARNING "gfs2_check_dirent: %s (%s)\n", msg,
first ? "first in block" : "not first in block");
return -EIO;
}
static int gfs2_dirent_offset(const void *buf)
{
const struct gfs2_meta_header *h = buf;
int offset;
BUG_ON(buf == NULL);
switch(be32_to_cpu(h->mh_type)) {
case GFS2_METATYPE_LF:
offset = sizeof(struct gfs2_leaf);
break;
case GFS2_METATYPE_DI:
offset = sizeof(struct gfs2_dinode);
break;
default:
goto wrong_type;
}
return offset;
wrong_type:
printk(KERN_WARNING "gfs2_scan_dirent: wrong block type %u\n",
be32_to_cpu(h->mh_type));
return -1;
}
static struct gfs2_dirent *gfs2_dirent_scan(struct inode *inode, void *buf,
unsigned int len, gfs2_dscan_t scan,
const struct qstr *name,
void *opaque)
{
struct gfs2_dirent *dent, *prev;
unsigned offset;
unsigned size;
int ret = 0;
ret = gfs2_dirent_offset(buf);
if (ret < 0)
goto consist_inode;
offset = ret;
prev = NULL;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(dent, offset, size, len, 1))
goto consist_inode;
do {
ret = scan(dent, name, opaque);
if (ret)
break;
offset += size;
if (offset == len)
break;
prev = dent;
dent = buf + offset;
size = be16_to_cpu(dent->de_rec_len);
if (gfs2_check_dirent(dent, offset, size, len, 0))
goto consist_inode;
} while(1);
switch(ret) {
case 0:
return NULL;
case 1:
return dent;
case 2:
return prev ? prev : dent;
default:
BUG_ON(ret > 0);
return ERR_PTR(ret);
}
consist_inode:
gfs2_consist_inode(GFS2_I(inode));
return ERR_PTR(-EIO);
}
static int dirent_check_reclen(struct gfs2_inode *dip,
const struct gfs2_dirent *d, const void *end_p)
{
const void *ptr = d;
u16 rec_len = be16_to_cpu(d->de_rec_len);
if (unlikely(rec_len < sizeof(struct gfs2_dirent)))
goto broken;
ptr += rec_len;
if (ptr < end_p)
return rec_len;
if (ptr == end_p)
return -ENOENT;
broken:
gfs2_consist_inode(dip);
return -EIO;
}
/**
* dirent_next - Next dirent
* @dip: the directory
* @bh: The buffer
* @dent: Pointer to list of dirents
*
* Returns: 0 on success, error code otherwise
*/
static int dirent_next(struct gfs2_inode *dip, struct buffer_head *bh,
struct gfs2_dirent **dent)
{
struct gfs2_dirent *cur = *dent, *tmp;
char *bh_end = bh->b_data + bh->b_size;
int ret;
ret = dirent_check_reclen(dip, cur, bh_end);
if (ret < 0)
return ret;
tmp = (void *)cur + ret;
ret = dirent_check_reclen(dip, tmp, bh_end);
if (ret == -EIO)
return ret;
/* Only the first dent could ever have de_inum.no_addr == 0 */
if (gfs2_dirent_sentinel(tmp)) {
gfs2_consist_inode(dip);
return -EIO;
}
*dent = tmp;
return 0;
}
/**
* dirent_del - Delete a dirent
* @dip: The GFS2 inode
* @bh: The buffer
* @prev: The previous dirent
* @cur: The current dirent
*
*/
static void dirent_del(struct gfs2_inode *dip, struct buffer_head *bh,
struct gfs2_dirent *prev, struct gfs2_dirent *cur)
{
u16 cur_rec_len, prev_rec_len;
if (gfs2_dirent_sentinel(cur)) {
gfs2_consist_inode(dip);
return;
}
gfs2_trans_add_bh(dip->i_gl, bh, 1);
/* If there is no prev entry, this is the first entry in the block.
The de_rec_len is already as big as it needs to be. Just zero
out the inode number and return. */
if (!prev) {
cur->de_inum.no_addr = 0;
cur->de_inum.no_formal_ino = 0;
return;
}
/* Combine this dentry with the previous one. */
prev_rec_len = be16_to_cpu(prev->de_rec_len);
cur_rec_len = be16_to_cpu(cur->de_rec_len);
if ((char *)prev + prev_rec_len != (char *)cur)
gfs2_consist_inode(dip);
if ((char *)cur + cur_rec_len > bh->b_data + bh->b_size)
gfs2_consist_inode(dip);
prev_rec_len += cur_rec_len;
prev->de_rec_len = cpu_to_be16(prev_rec_len);
}
/*
* Takes a dent from which to grab space as an argument. Returns the
* newly created dent.
*/
static struct gfs2_dirent *gfs2_init_dirent(struct inode *inode,
struct gfs2_dirent *dent,
const struct qstr *name,
struct buffer_head *bh)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_dirent *ndent;
unsigned offset = 0, totlen;
if (!gfs2_dirent_sentinel(dent))
offset = GFS2_DIRENT_SIZE(be16_to_cpu(dent->de_name_len));
totlen = be16_to_cpu(dent->de_rec_len);
BUG_ON(offset + name->len > totlen);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ndent = (struct gfs2_dirent *)((char *)dent + offset);
dent->de_rec_len = cpu_to_be16(offset);
gfs2_qstr2dirent(name, totlen - offset, ndent);
return ndent;
}
static struct gfs2_dirent *gfs2_dirent_alloc(struct inode *inode,
struct buffer_head *bh,
const struct qstr *name)
{
struct gfs2_dirent *dent;
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
gfs2_dirent_find_space, name, NULL);
if (!dent || IS_ERR(dent))
return dent;
return gfs2_init_dirent(inode, dent, name, bh);
}
static int get_leaf(struct gfs2_inode *dip, u64 leaf_no,
struct buffer_head **bhp)
{
int error;
error = gfs2_meta_read(dip->i_gl, leaf_no, DIO_WAIT, bhp);
if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bhp, GFS2_METATYPE_LF)) {
/* printk(KERN_INFO "block num=%llu\n", leaf_no); */
error = -EIO;
}
return error;
}
/**
* get_leaf_nr - Get a leaf number associated with the index
* @dip: The GFS2 inode
* @index:
* @leaf_out:
*
* Returns: 0 on success, error code otherwise
*/
static int get_leaf_nr(struct gfs2_inode *dip, u32 index,
u64 *leaf_out)
{
__be64 leaf_no;
int error;
error = gfs2_dir_read_data(dip, (char *)&leaf_no,
index * sizeof(__be64),
sizeof(__be64), 0);
if (error != sizeof(u64))
return (error < 0) ? error : -EIO;
*leaf_out = be64_to_cpu(leaf_no);
return 0;
}
static int get_first_leaf(struct gfs2_inode *dip, u32 index,
struct buffer_head **bh_out)
{
u64 leaf_no;
int error;
error = get_leaf_nr(dip, index, &leaf_no);
if (!error)
error = get_leaf(dip, leaf_no, bh_out);
return error;
}
static struct gfs2_dirent *gfs2_dirent_search(struct inode *inode,
const struct qstr *name,
gfs2_dscan_t scan,
struct buffer_head **pbh)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct gfs2_inode *ip = GFS2_I(inode);
int error;
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf;
unsigned hsize = 1 << ip->i_depth;
unsigned index;
u64 ln;
if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(ip);
return ERR_PTR(-EIO);
}
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &bh);
if (error)
return ERR_PTR(error);
do {
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
scan, name, NULL);
if (dent)
goto got_dent;
leaf = (struct gfs2_leaf *)bh->b_data;
ln = be64_to_cpu(leaf->lf_next);
brelse(bh);
if (!ln)
break;
error = get_leaf(ip, ln, &bh);
} while(!error);
return error ? ERR_PTR(error) : NULL;
}
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return ERR_PTR(error);
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size, scan, name, NULL);
got_dent:
if (unlikely(dent == NULL || IS_ERR(dent))) {
brelse(bh);
bh = NULL;
}
*pbh = bh;
return dent;
}
static struct gfs2_leaf *new_leaf(struct inode *inode, struct buffer_head **pbh, u16 depth)
{
struct gfs2_inode *ip = GFS2_I(inode);
unsigned int n = 1;
u64 bn;
int error;
struct buffer_head *bh;
struct gfs2_leaf *leaf;
struct gfs2_dirent *dent;
struct qstr name = { .name = "", .len = 0, .hash = 0 };
error = gfs2_alloc_block(ip, &bn, &n);
if (error)
return NULL;
bh = gfs2_meta_new(ip->i_gl, bn);
if (!bh)
return NULL;
gfs2_trans_add_unrevoke(GFS2_SB(inode), bn, 1);
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_metatype_set(bh, GFS2_METATYPE_LF, GFS2_FORMAT_LF);
leaf = (struct gfs2_leaf *)bh->b_data;
leaf->lf_depth = cpu_to_be16(depth);
leaf->lf_entries = 0;
leaf->lf_dirent_format = cpu_to_be32(GFS2_FORMAT_DE);
leaf->lf_next = 0;
memset(leaf->lf_reserved, 0, sizeof(leaf->lf_reserved));
dent = (struct gfs2_dirent *)(leaf+1);
gfs2_qstr2dirent(&name, bh->b_size - sizeof(struct gfs2_leaf), dent);
*pbh = bh;
return leaf;
}
/**
* dir_make_exhash - Convert a stuffed directory into an ExHash directory
* @dip: The GFS2 inode
*
* Returns: 0 on success, error code otherwise
*/
static int dir_make_exhash(struct inode *inode)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_dirent *dent;
struct qstr args;
struct buffer_head *bh, *dibh;
struct gfs2_leaf *leaf;
int y;
u32 x;
__be64 *lp;
u64 bn;
int error;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
return error;
/* Turn over a new leaf */
leaf = new_leaf(inode, &bh, 0);
if (!leaf)
return -ENOSPC;
bn = bh->b_blocknr;
gfs2_assert(sdp, dip->i_entries < (1 << 16));
leaf->lf_entries = cpu_to_be16(dip->i_entries);
/* Copy dirents */
gfs2_buffer_copy_tail(bh, sizeof(struct gfs2_leaf), dibh,
sizeof(struct gfs2_dinode));
/* Find last entry */
x = 0;
args.len = bh->b_size - sizeof(struct gfs2_dinode) +
sizeof(struct gfs2_leaf);
args.name = bh->b_data;
dent = gfs2_dirent_scan(&dip->i_inode, bh->b_data, bh->b_size,
gfs2_dirent_last, &args, NULL);
if (!dent) {
brelse(bh);
brelse(dibh);
return -EIO;
}
if (IS_ERR(dent)) {
brelse(bh);
brelse(dibh);
return PTR_ERR(dent);
}
/* Adjust the last dirent's record length
(Remember that dent still points to the last entry.) */
dent->de_rec_len = cpu_to_be16(be16_to_cpu(dent->de_rec_len) +
sizeof(struct gfs2_dinode) -
sizeof(struct gfs2_leaf));
brelse(bh);
/* We're done with the new leaf block, now setup the new
hash table. */
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
gfs2_buffer_clear_tail(dibh, sizeof(struct gfs2_dinode));
lp = (__be64 *)(dibh->b_data + sizeof(struct gfs2_dinode));
for (x = sdp->sd_hash_ptrs; x--; lp++)
*lp = cpu_to_be64(bn);
i_size_write(inode, sdp->sd_sb.sb_bsize / 2);
gfs2_add_inode_blocks(&dip->i_inode, 1);
dip->i_diskflags |= GFS2_DIF_EXHASH;
for (x = sdp->sd_hash_ptrs, y = -1; x; x >>= 1, y++) ;
dip->i_depth = y;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
return 0;
}
/**
* dir_split_leaf - Split a leaf block into two
* @dip: The GFS2 inode
* @index:
* @leaf_no:
*
* Returns: 0 on success, error code on failure
*/
static int dir_split_leaf(struct inode *inode, const struct qstr *name)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct buffer_head *nbh, *obh, *dibh;
struct gfs2_leaf *nleaf, *oleaf;
struct gfs2_dirent *dent = NULL, *prev = NULL, *next = NULL, *new;
u32 start, len, half_len, divider;
u64 bn, leaf_no;
__be64 *lp;
u32 index;
int x, moved = 0;
int error;
index = name->hash >> (32 - dip->i_depth);
error = get_leaf_nr(dip, index, &leaf_no);
if (error)
return error;
/* Get the old leaf block */
error = get_leaf(dip, leaf_no, &obh);
if (error)
return error;
oleaf = (struct gfs2_leaf *)obh->b_data;
if (dip->i_depth == be16_to_cpu(oleaf->lf_depth)) {
brelse(obh);
return 1; /* can't split */
}
gfs2_trans_add_bh(dip->i_gl, obh, 1);
nleaf = new_leaf(inode, &nbh, be16_to_cpu(oleaf->lf_depth) + 1);
if (!nleaf) {
brelse(obh);
return -ENOSPC;
}
bn = nbh->b_blocknr;
/* Compute the start and len of leaf pointers in the hash table. */
len = 1 << (dip->i_depth - be16_to_cpu(oleaf->lf_depth));
half_len = len >> 1;
if (!half_len) {
printk(KERN_WARNING "i_depth %u lf_depth %u index %u\n", dip->i_depth, be16_to_cpu(oleaf->lf_depth), index);
gfs2_consist_inode(dip);
error = -EIO;
goto fail_brelse;
}
start = (index & ~(len - 1));
/* Change the pointers.
Don't bother distinguishing stuffed from non-stuffed.
This code is complicated enough already. */
lp = kmalloc(half_len * sizeof(__be64), GFP_NOFS);
if (!lp) {
error = -ENOMEM;
goto fail_brelse;
}
/* Change the pointers */
for (x = 0; x < half_len; x++)
lp[x] = cpu_to_be64(bn);
error = gfs2_dir_write_data(dip, (char *)lp, start * sizeof(u64),
half_len * sizeof(u64));
if (error != half_len * sizeof(u64)) {
if (error >= 0)
error = -EIO;
goto fail_lpfree;
}
kfree(lp);
/* Compute the divider */
divider = (start + half_len) << (32 - dip->i_depth);
/* Copy the entries */
dent = (struct gfs2_dirent *)(obh->b_data + sizeof(struct gfs2_leaf));
do {
next = dent;
if (dirent_next(dip, obh, &next))
next = NULL;
if (!gfs2_dirent_sentinel(dent) &&
be32_to_cpu(dent->de_hash) < divider) {
struct qstr str;
str.name = (char*)(dent+1);
str.len = be16_to_cpu(dent->de_name_len);
str.hash = be32_to_cpu(dent->de_hash);
new = gfs2_dirent_alloc(inode, nbh, &str);
if (IS_ERR(new)) {
error = PTR_ERR(new);
break;
}
new->de_inum = dent->de_inum; /* No endian worries */
new->de_type = dent->de_type; /* No endian worries */
be16_add_cpu(&nleaf->lf_entries, 1);
dirent_del(dip, obh, prev, dent);
if (!oleaf->lf_entries)
gfs2_consist_inode(dip);
be16_add_cpu(&oleaf->lf_entries, -1);
if (!prev)
prev = dent;
moved = 1;
} else {
prev = dent;
}
dent = next;
} while (dent);
oleaf->lf_depth = nleaf->lf_depth;
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(GFS2_SB(&dip->i_inode), !error)) {
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
gfs2_add_inode_blocks(&dip->i_inode, 1);
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
}
brelse(obh);
brelse(nbh);
return error;
fail_lpfree:
kfree(lp);
fail_brelse:
brelse(obh);
brelse(nbh);
return error;
}
/**
* dir_double_exhash - Double size of ExHash table
* @dip: The GFS2 dinode
*
* Returns: 0 on success, error code on failure
*/
static int dir_double_exhash(struct gfs2_inode *dip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct buffer_head *dibh;
u32 hsize;
u64 *buf;
u64 *from, *to;
u64 block;
u64 disksize = i_size_read(&dip->i_inode);
int x;
int error = 0;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != disksize) {
gfs2_consist_inode(dip);
return -EIO;
}
/* Allocate both the "from" and "to" buffers in one big chunk */
buf = kcalloc(3, sdp->sd_hash_bsize, GFP_NOFS);
if (!buf)
return -ENOMEM;
for (block = disksize >> sdp->sd_hash_bsize_shift; block--;) {
error = gfs2_dir_read_data(dip, (char *)buf,
block * sdp->sd_hash_bsize,
sdp->sd_hash_bsize, 1);
if (error != sdp->sd_hash_bsize) {
if (error >= 0)
error = -EIO;
goto fail;
}
from = buf;
to = (u64 *)((char *)buf + sdp->sd_hash_bsize);
for (x = sdp->sd_hash_ptrs; x--; from++) {
*to++ = *from; /* No endianess worries */
*to++ = *from;
}
error = gfs2_dir_write_data(dip,
(char *)buf + sdp->sd_hash_bsize,
block * sdp->sd_sb.sb_bsize,
sdp->sd_sb.sb_bsize);
if (error != sdp->sd_sb.sb_bsize) {
if (error >= 0)
error = -EIO;
goto fail;
}
}
kfree(buf);
error = gfs2_meta_inode_buffer(dip, &dibh);
if (!gfs2_assert_withdraw(sdp, !error)) {
dip->i_depth++;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
}
return error;
fail:
kfree(buf);
return error;
}
/**
* compare_dents - compare directory entries by hash value
* @a: first dent
* @b: second dent
*
* When comparing the hash entries of @a to @b:
* gt: returns 1
* lt: returns -1
* eq: returns 0
*/
static int compare_dents(const void *a, const void *b)
{
const struct gfs2_dirent *dent_a, *dent_b;
u32 hash_a, hash_b;
int ret = 0;
dent_a = *(const struct gfs2_dirent **)a;
hash_a = be32_to_cpu(dent_a->de_hash);
dent_b = *(const struct gfs2_dirent **)b;
hash_b = be32_to_cpu(dent_b->de_hash);
if (hash_a > hash_b)
ret = 1;
else if (hash_a < hash_b)
ret = -1;
else {
unsigned int len_a = be16_to_cpu(dent_a->de_name_len);
unsigned int len_b = be16_to_cpu(dent_b->de_name_len);
if (len_a > len_b)
ret = 1;
else if (len_a < len_b)
ret = -1;
else
ret = memcmp(dent_a + 1, dent_b + 1, len_a);
}
return ret;
}
/**
* do_filldir_main - read out directory entries
* @dip: The GFS2 inode
* @offset: The offset in the file to read from
* @opaque: opaque data to pass to filldir
* @filldir: The function to pass entries to
* @darr: an array of struct gfs2_dirent pointers to read
* @entries: the number of entries in darr
* @copied: pointer to int that's non-zero if a entry has been copied out
*
* Jump through some hoops to make sure that if there are hash collsions,
* they are read out at the beginning of a buffer. We want to minimize
* the possibility that they will fall into different readdir buffers or
* that someone will want to seek to that location.
*
* Returns: errno, >0 on exception from filldir
*/
static int do_filldir_main(struct gfs2_inode *dip, u64 *offset,
void *opaque, filldir_t filldir,
const struct gfs2_dirent **darr, u32 entries,
int *copied)
{
const struct gfs2_dirent *dent, *dent_next;
u64 off, off_next;
unsigned int x, y;
int run = 0;
int error = 0;
sort(darr, entries, sizeof(struct gfs2_dirent *), compare_dents, NULL);
dent_next = darr[0];
off_next = be32_to_cpu(dent_next->de_hash);
off_next = gfs2_disk_hash2offset(off_next);
for (x = 0, y = 1; x < entries; x++, y++) {
dent = dent_next;
off = off_next;
if (y < entries) {
dent_next = darr[y];
off_next = be32_to_cpu(dent_next->de_hash);
off_next = gfs2_disk_hash2offset(off_next);
if (off < *offset)
continue;
*offset = off;
if (off_next == off) {
if (*copied && !run)
return 1;
run = 1;
} else
run = 0;
} else {
if (off < *offset)
continue;
*offset = off;
}
error = filldir(opaque, (const char *)(dent + 1),
be16_to_cpu(dent->de_name_len),
off, be64_to_cpu(dent->de_inum.no_addr),
be16_to_cpu(dent->de_type));
if (error)
return 1;
*copied = 1;
}
/* Increment the *offset by one, so the next time we come into the
do_filldir fxn, we get the next entry instead of the last one in the
current leaf */
(*offset)++;
return 0;
}
static void *gfs2_alloc_sort_buffer(unsigned size)
{
void *ptr = NULL;
if (size < KMALLOC_MAX_SIZE)
ptr = kmalloc(size, GFP_NOFS | __GFP_NOWARN);
if (!ptr)
ptr = __vmalloc(size, GFP_NOFS, PAGE_KERNEL);
return ptr;
}
static void gfs2_free_sort_buffer(void *ptr)
{
if (is_vmalloc_addr(ptr))
vfree(ptr);
else
kfree(ptr);
}
static int gfs2_dir_read_leaf(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir, int *copied, unsigned *depth,
u64 leaf_no)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct buffer_head *bh;
struct gfs2_leaf *lf;
unsigned entries = 0, entries2 = 0;
unsigned leaves = 0;
const struct gfs2_dirent **darr, *dent;
struct dirent_gather g;
struct buffer_head **larr;
int leaf = 0;
int error, i;
u64 lfn = leaf_no;
do {
error = get_leaf(ip, lfn, &bh);
if (error)
goto out;
lf = (struct gfs2_leaf *)bh->b_data;
if (leaves == 0)
*depth = be16_to_cpu(lf->lf_depth);
entries += be16_to_cpu(lf->lf_entries);
leaves++;
lfn = be64_to_cpu(lf->lf_next);
brelse(bh);
} while(lfn);
if (!entries)
return 0;
error = -ENOMEM;
/*
* The extra 99 entries are not normally used, but are a buffer
* zone in case the number of entries in the leaf is corrupt.
* 99 is the maximum number of entries that can fit in a single
* leaf block.
*/
larr = gfs2_alloc_sort_buffer((leaves + entries + 99) * sizeof(void *));
if (!larr)
goto out;
darr = (const struct gfs2_dirent **)(larr + leaves);
g.pdent = darr;
g.offset = 0;
lfn = leaf_no;
do {
error = get_leaf(ip, lfn, &bh);
if (error)
goto out_free;
lf = (struct gfs2_leaf *)bh->b_data;
lfn = be64_to_cpu(lf->lf_next);
if (lf->lf_entries) {
entries2 += be16_to_cpu(lf->lf_entries);
dent = gfs2_dirent_scan(inode, bh->b_data, bh->b_size,
gfs2_dirent_gather, NULL, &g);
error = PTR_ERR(dent);
if (IS_ERR(dent))
goto out_free;
if (entries2 != g.offset) {
fs_warn(sdp, "Number of entries corrupt in dir "
"leaf %llu, entries2 (%u) != "
"g.offset (%u)\n",
(unsigned long long)bh->b_blocknr,
entries2, g.offset);
error = -EIO;
goto out_free;
}
error = 0;
larr[leaf++] = bh;
} else {
brelse(bh);
}
} while(lfn);
BUG_ON(entries2 != entries);
error = do_filldir_main(ip, offset, opaque, filldir, darr,
entries, copied);
out_free:
for(i = 0; i < leaf; i++)
brelse(larr[i]);
gfs2_free_sort_buffer(larr);
out:
return error;
}
/**
* dir_e_read - Reads the entries from a directory into a filldir buffer
* @dip: dinode pointer
* @offset: the hash of the last entry read shifted to the right once
* @opaque: buffer for the filldir function to fill
* @filldir: points to the filldir function to use
*
* Returns: errno
*/
static int dir_e_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
u32 hsize, len = 0;
u32 ht_offset, lp_offset, ht_offset_cur = -1;
u32 hash, index;
__be64 *lp;
int copied = 0;
int error = 0;
unsigned depth = 0;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != i_size_read(inode)) {
gfs2_consist_inode(dip);
return -EIO;
}
hash = gfs2_dir_offset2hash(*offset);
index = hash >> (32 - dip->i_depth);
lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
if (!lp)
return -ENOMEM;
while (index < hsize) {
lp_offset = index & (sdp->sd_hash_ptrs - 1);
ht_offset = index - lp_offset;
if (ht_offset_cur != ht_offset) {
error = gfs2_dir_read_data(dip, (char *)lp,
ht_offset * sizeof(__be64),
sdp->sd_hash_bsize, 1);
if (error != sdp->sd_hash_bsize) {
if (error >= 0)
error = -EIO;
goto out;
}
ht_offset_cur = ht_offset;
}
error = gfs2_dir_read_leaf(inode, offset, opaque, filldir,
&copied, &depth,
be64_to_cpu(lp[lp_offset]));
if (error)
break;
len = 1 << (dip->i_depth - depth);
index = (index & ~(len - 1)) + len;
}
out:
kfree(lp);
if (error > 0)
error = 0;
return error;
}
int gfs2_dir_read(struct inode *inode, u64 *offset, void *opaque,
filldir_t filldir)
{
struct gfs2_inode *dip = GFS2_I(inode);
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct dirent_gather g;
const struct gfs2_dirent **darr, *dent;
struct buffer_head *dibh;
int copied = 0;
int error;
if (!dip->i_entries)
return 0;
if (dip->i_diskflags & GFS2_DIF_EXHASH)
return dir_e_read(inode, offset, opaque, filldir);
if (!gfs2_is_stuffed(dip)) {
gfs2_consist_inode(dip);
return -EIO;
}
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
return error;
error = -ENOMEM;
/* 96 is max number of dirents which can be stuffed into an inode */
darr = kmalloc(96 * sizeof(struct gfs2_dirent *), GFP_NOFS);
if (darr) {
g.pdent = darr;
g.offset = 0;
dent = gfs2_dirent_scan(inode, dibh->b_data, dibh->b_size,
gfs2_dirent_gather, NULL, &g);
if (IS_ERR(dent)) {
error = PTR_ERR(dent);
goto out;
}
if (dip->i_entries != g.offset) {
fs_warn(sdp, "Number of entries corrupt in dir %llu, "
"ip->i_entries (%u) != g.offset (%u)\n",
(unsigned long long)dip->i_no_addr,
dip->i_entries,
g.offset);
error = -EIO;
goto out;
}
error = do_filldir_main(dip, offset, opaque, filldir, darr,
dip->i_entries, &copied);
out:
kfree(darr);
}
if (error > 0)
error = 0;
brelse(dibh);
return error;
}
/**
* gfs2_dir_search - Search a directory
* @dip: The GFS2 inode
* @filename:
* @inode:
*
* This routine searches a directory for a file or another directory.
* Assumes a glock is held on dip.
*
* Returns: errno
*/
struct inode *gfs2_dir_search(struct inode *dir, const struct qstr *name)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct inode *inode;
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
return ERR_CAST(dent);
inode = gfs2_inode_lookup(dir->i_sb,
be16_to_cpu(dent->de_type),
be64_to_cpu(dent->de_inum.no_addr),
be64_to_cpu(dent->de_inum.no_formal_ino), 0);
brelse(bh);
return inode;
}
return ERR_PTR(-ENOENT);
}
int gfs2_dir_check(struct inode *dir, const struct qstr *name,
const struct gfs2_inode *ip)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
int ret = -ENOENT;
dent = gfs2_dirent_search(dir, name, gfs2_dirent_find, &bh);
if (dent) {
if (IS_ERR(dent))
return PTR_ERR(dent);
if (ip) {
if (be64_to_cpu(dent->de_inum.no_addr) != ip->i_no_addr)
goto out;
if (be64_to_cpu(dent->de_inum.no_formal_ino) !=
ip->i_no_formal_ino)
goto out;
if (unlikely(IF2DT(ip->i_inode.i_mode) !=
be16_to_cpu(dent->de_type))) {
gfs2_consist_inode(GFS2_I(dir));
ret = -EIO;
goto out;
}
}
ret = 0;
out:
brelse(bh);
}
return ret;
}
static int dir_new_leaf(struct inode *inode, const struct qstr *name)
{
struct buffer_head *bh, *obh;
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_leaf *leaf, *oleaf;
int error;
u32 index;
u64 bn;
index = name->hash >> (32 - ip->i_depth);
error = get_first_leaf(ip, index, &obh);
if (error)
return error;
do {
oleaf = (struct gfs2_leaf *)obh->b_data;
bn = be64_to_cpu(oleaf->lf_next);
if (!bn)
break;
brelse(obh);
error = get_leaf(ip, bn, &obh);
if (error)
return error;
} while(1);
gfs2_trans_add_bh(ip->i_gl, obh, 1);
leaf = new_leaf(inode, &bh, be16_to_cpu(oleaf->lf_depth));
if (!leaf) {
brelse(obh);
return -ENOSPC;
}
oleaf->lf_next = cpu_to_be64(bh->b_blocknr);
brelse(bh);
brelse(obh);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
return error;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
gfs2_add_inode_blocks(&ip->i_inode, 1);
gfs2_dinode_out(ip, bh->b_data);
brelse(bh);
return 0;
}
/**
* gfs2_dir_add - Add new filename into directory
* @dip: The GFS2 inode
* @filename: The new name
* @inode: The inode number of the entry
* @type: The type of the entry
*
* Returns: 0 on success, error code on failure
*/
int gfs2_dir_add(struct inode *inode, const struct qstr *name,
const struct gfs2_inode *nip)
{
struct gfs2_inode *ip = GFS2_I(inode);
struct buffer_head *bh;
struct gfs2_dirent *dent;
struct gfs2_leaf *leaf;
int error;
while(1) {
dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space,
&bh);
if (dent) {
if (IS_ERR(dent))
return PTR_ERR(dent);
dent = gfs2_init_dirent(inode, dent, name, bh);
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(IF2DT(nip->i_inode.i_mode));
if (ip->i_diskflags & GFS2_DIF_EXHASH) {
leaf = (struct gfs2_leaf *)bh->b_data;
be16_add_cpu(&leaf->lf_entries, 1);
}
brelse(bh);
error = gfs2_meta_inode_buffer(ip, &bh);
if (error)
break;
gfs2_trans_add_bh(ip->i_gl, bh, 1);
ip->i_entries++;
ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(nip->i_inode.i_mode))
inc_nlink(&ip->i_inode);
gfs2_dinode_out(ip, bh->b_data);
brelse(bh);
error = 0;
break;
}
if (!(ip->i_diskflags & GFS2_DIF_EXHASH)) {
error = dir_make_exhash(inode);
if (error)
break;
continue;
}
error = dir_split_leaf(inode, name);
if (error == 0)
continue;
if (error < 0)
break;
if (ip->i_depth < GFS2_DIR_MAX_DEPTH) {
error = dir_double_exhash(ip);
if (error)
break;
error = dir_split_leaf(inode, name);
if (error < 0)
break;
if (error == 0)
continue;
}
error = dir_new_leaf(inode, name);
if (!error)
continue;
error = -ENOSPC;
break;
}
return error;
}
/**
* gfs2_dir_del - Delete a directory entry
* @dip: The GFS2 inode
* @filename: The filename
*
* Returns: 0 on success, error code on failure
*/
int gfs2_dir_del(struct gfs2_inode *dip, const struct dentry *dentry)
{
const struct qstr *name = &dentry->d_name;
struct gfs2_dirent *dent, *prev = NULL;
struct buffer_head *bh;
int error;
/* Returns _either_ the entry (if its first in block) or the
previous entry otherwise */
dent = gfs2_dirent_search(&dip->i_inode, name, gfs2_dirent_prev, &bh);
if (!dent) {
gfs2_consist_inode(dip);
return -EIO;
}
if (IS_ERR(dent)) {
gfs2_consist_inode(dip);
return PTR_ERR(dent);
}
/* If not first in block, adjust pointers accordingly */
if (gfs2_dirent_find(dent, name, NULL) == 0) {
prev = dent;
dent = (struct gfs2_dirent *)((char *)dent + be16_to_cpu(prev->de_rec_len));
}
dirent_del(dip, bh, prev, dent);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
struct gfs2_leaf *leaf = (struct gfs2_leaf *)bh->b_data;
u16 entries = be16_to_cpu(leaf->lf_entries);
if (!entries)
gfs2_consist_inode(dip);
leaf->lf_entries = cpu_to_be16(--entries);
}
brelse(bh);
error = gfs2_meta_inode_buffer(dip, &bh);
if (error)
return error;
if (!dip->i_entries)
gfs2_consist_inode(dip);
gfs2_trans_add_bh(dip->i_gl, bh, 1);
dip->i_entries--;
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
if (S_ISDIR(dentry->d_inode->i_mode))
drop_nlink(&dip->i_inode);
gfs2_dinode_out(dip, bh->b_data);
brelse(bh);
mark_inode_dirty(&dip->i_inode);
return error;
}
/**
* gfs2_dir_mvino - Change inode number of directory entry
* @dip: The GFS2 inode
* @filename:
* @new_inode:
*
* This routine changes the inode number of a directory entry. It's used
* by rename to change ".." when a directory is moved.
* Assumes a glock is held on dvp.
*
* Returns: errno
*/
int gfs2_dir_mvino(struct gfs2_inode *dip, const struct qstr *filename,
const struct gfs2_inode *nip, unsigned int new_type)
{
struct buffer_head *bh;
struct gfs2_dirent *dent;
int error;
dent = gfs2_dirent_search(&dip->i_inode, filename, gfs2_dirent_find, &bh);
if (!dent) {
gfs2_consist_inode(dip);
return -EIO;
}
if (IS_ERR(dent))
return PTR_ERR(dent);
gfs2_trans_add_bh(dip->i_gl, bh, 1);
gfs2_inum_out(nip, dent);
dent->de_type = cpu_to_be16(new_type);
if (dip->i_diskflags & GFS2_DIF_EXHASH) {
brelse(bh);
error = gfs2_meta_inode_buffer(dip, &bh);
if (error)
return error;
gfs2_trans_add_bh(dip->i_gl, bh, 1);
}
dip->i_inode.i_mtime = dip->i_inode.i_ctime = CURRENT_TIME;
gfs2_dinode_out(dip, bh->b_data);
brelse(bh);
return 0;
}
/**
* leaf_dealloc - Deallocate a directory leaf
* @dip: the directory
* @index: the hash table offset in the directory
* @len: the number of pointers to this leaf
* @leaf_no: the leaf number
* @leaf_bh: buffer_head for the starting leaf
* last_dealloc: 1 if this is the final dealloc for the leaf, else 0
*
* Returns: errno
*/
static int leaf_dealloc(struct gfs2_inode *dip, u32 index, u32 len,
u64 leaf_no, struct buffer_head *leaf_bh,
int last_dealloc)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct gfs2_leaf *tmp_leaf;
struct gfs2_rgrp_list rlist;
struct buffer_head *bh, *dibh;
u64 blk, nblk;
unsigned int rg_blocks = 0, l_blocks = 0;
char *ht;
unsigned int x, size = len * sizeof(u64);
int error;
memset(&rlist, 0, sizeof(struct gfs2_rgrp_list));
ht = kzalloc(size, GFP_NOFS);
if (!ht)
return -ENOMEM;
if (!gfs2_alloc_get(dip)) {
error = -ENOMEM;
goto out;
}
error = gfs2_quota_hold(dip, NO_QUOTA_CHANGE, NO_QUOTA_CHANGE);
if (error)
goto out_put;
error = gfs2_rindex_hold(sdp, &dip->i_alloc->al_ri_gh);
if (error)
goto out_qs;
/* Count the number of leaves */
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
goto out_rlist;
}
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
nblk = be64_to_cpu(tmp_leaf->lf_next);
if (blk != leaf_no)
brelse(bh);
gfs2_rlist_add(sdp, &rlist, blk);
l_blocks++;
}
gfs2_rlist_alloc(&rlist, LM_ST_EXCLUSIVE);
for (x = 0; x < rlist.rl_rgrps; x++) {
struct gfs2_rgrpd *rgd;
rgd = rlist.rl_ghs[x].gh_gl->gl_object;
rg_blocks += rgd->rd_length;
}
error = gfs2_glock_nq_m(rlist.rl_rgrps, rlist.rl_ghs);
if (error)
goto out_rlist;
error = gfs2_trans_begin(sdp,
rg_blocks + (DIV_ROUND_UP(size, sdp->sd_jbsize) + 1) +
RES_DINODE + RES_STATFS + RES_QUOTA, l_blocks);
if (error)
goto out_rg_gunlock;
bh = leaf_bh;
for (blk = leaf_no; blk; blk = nblk) {
if (blk != leaf_no) {
error = get_leaf(dip, blk, &bh);
if (error)
goto out_end_trans;
}
tmp_leaf = (struct gfs2_leaf *)bh->b_data;
nblk = be64_to_cpu(tmp_leaf->lf_next);
if (blk != leaf_no)
brelse(bh);
gfs2_free_meta(dip, blk, 1);
gfs2_add_inode_blocks(&dip->i_inode, -1);
}
error = gfs2_dir_write_data(dip, ht, index * sizeof(u64), size);
if (error != size) {
if (error >= 0)
error = -EIO;
goto out_end_trans;
}
error = gfs2_meta_inode_buffer(dip, &dibh);
if (error)
goto out_end_trans;
gfs2_trans_add_bh(dip->i_gl, dibh, 1);
/* On the last dealloc, make this a regular file in case we crash.
(We don't want to free these blocks a second time.) */
if (last_dealloc)
dip->i_inode.i_mode = S_IFREG;
gfs2_dinode_out(dip, dibh->b_data);
brelse(dibh);
out_end_trans:
gfs2_trans_end(sdp);
out_rg_gunlock:
gfs2_glock_dq_m(rlist.rl_rgrps, rlist.rl_ghs);
out_rlist:
gfs2_rlist_free(&rlist);
gfs2_glock_dq_uninit(&dip->i_alloc->al_ri_gh);
out_qs:
gfs2_quota_unhold(dip);
out_put:
gfs2_alloc_put(dip);
out:
kfree(ht);
return error;
}
/**
* gfs2_dir_exhash_dealloc - free all the leaf blocks in a directory
* @dip: the directory
*
* Dealloc all on-disk directory leaves to FREEMETA state
* Change on-disk inode type to "regular file"
*
* Returns: errno
*/
int gfs2_dir_exhash_dealloc(struct gfs2_inode *dip)
{
struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
struct buffer_head *bh;
struct gfs2_leaf *leaf;
u32 hsize, len;
u32 ht_offset, lp_offset, ht_offset_cur = -1;
u32 index = 0, next_index;
__be64 *lp;
u64 leaf_no;
int error = 0, last;
hsize = 1 << dip->i_depth;
if (hsize * sizeof(u64) != i_size_read(&dip->i_inode)) {
gfs2_consist_inode(dip);
return -EIO;
}
lp = kmalloc(sdp->sd_hash_bsize, GFP_NOFS);
if (!lp)
return -ENOMEM;
while (index < hsize) {
lp_offset = index & (sdp->sd_hash_ptrs - 1);
ht_offset = index - lp_offset;
if (ht_offset_cur != ht_offset) {
error = gfs2_dir_read_data(dip, (char *)lp,
ht_offset * sizeof(__be64),
sdp->sd_hash_bsize, 1);
if (error != sdp->sd_hash_bsize) {
if (error >= 0)
error = -EIO;
goto out;
}
ht_offset_cur = ht_offset;
}
leaf_no = be64_to_cpu(lp[lp_offset]);
if (leaf_no) {
error = get_leaf(dip, leaf_no, &bh);
if (error)
goto out;
leaf = (struct gfs2_leaf *)bh->b_data;
len = 1 << (dip->i_depth - be16_to_cpu(leaf->lf_depth));
next_index = (index & ~(len - 1)) + len;
last = ((next_index >= hsize) ? 1 : 0);
error = leaf_dealloc(dip, index, len, leaf_no, bh,
last);
brelse(bh);
if (error)
goto out;
index = next_index;
} else
index++;
}
if (index != hsize) {
gfs2_consist_inode(dip);
error = -EIO;
}
out:
kfree(lp);
return error;
}
/**
* gfs2_diradd_alloc_required - find if adding entry will require an allocation
* @ip: the file being written to
* @filname: the filename that's going to be added
*
* Returns: 1 if alloc required, 0 if not, -ve on error
*/
int gfs2_diradd_alloc_required(struct inode *inode, const struct qstr *name)
{
struct gfs2_dirent *dent;
struct buffer_head *bh;
dent = gfs2_dirent_search(inode, name, gfs2_dirent_find_space, &bh);
if (!dent) {
return 1;
}
if (IS_ERR(dent))
return PTR_ERR(dent);
brelse(bh);
return 0;
}
| gpl-2.0 |
Arasthel/kernel_motorola_msm | arch/x86/kernel/apic/x2apic_phys.c | 2381 | 3896 | #include <linux/threads.h>
#include <linux/cpumask.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/dmar.h>
#include <asm/smp.h>
#include <asm/x2apic.h>
int x2apic_phys;
static struct apic apic_x2apic_phys;
static int set_x2apic_phys_mode(char *arg)
{
x2apic_phys = 1;
return 0;
}
early_param("x2apic_phys", set_x2apic_phys_mode);
static bool x2apic_fadt_phys(void)
{
if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
(acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
printk(KERN_DEBUG "System requires x2apic physical mode\n");
return true;
}
return false;
}
static int x2apic_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
{
return x2apic_enabled() && (x2apic_phys || x2apic_fadt_phys());
}
static void
__x2apic_send_IPI_mask(const struct cpumask *mask, int vector, int apic_dest)
{
unsigned long query_cpu;
unsigned long this_cpu;
unsigned long flags;
x2apic_wrmsr_fence();
local_irq_save(flags);
this_cpu = smp_processor_id();
for_each_cpu(query_cpu, mask) {
if (apic_dest == APIC_DEST_ALLBUT && this_cpu == query_cpu)
continue;
__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
vector, APIC_DEST_PHYSICAL);
}
local_irq_restore(flags);
}
static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
{
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLINC);
}
static void
x2apic_send_IPI_mask_allbutself(const struct cpumask *mask, int vector)
{
__x2apic_send_IPI_mask(mask, vector, APIC_DEST_ALLBUT);
}
static void x2apic_send_IPI_allbutself(int vector)
{
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLBUT);
}
static void x2apic_send_IPI_all(int vector)
{
__x2apic_send_IPI_mask(cpu_online_mask, vector, APIC_DEST_ALLINC);
}
static void init_x2apic_ldr(void)
{
}
static int x2apic_phys_probe(void)
{
if (x2apic_mode && (x2apic_phys || x2apic_fadt_phys()))
return 1;
return apic == &apic_x2apic_phys;
}
static struct apic apic_x2apic_phys = {
.name = "physical x2apic",
.probe = x2apic_phys_probe,
.acpi_madt_oem_check = x2apic_acpi_madt_oem_check,
.apic_id_valid = x2apic_apic_id_valid,
.apic_id_registered = x2apic_apic_id_registered,
.irq_delivery_mode = dest_Fixed,
.irq_dest_mode = 0, /* physical */
.target_cpus = online_target_cpus,
.disable_esr = 0,
.dest_logical = 0,
.check_apicid_used = NULL,
.check_apicid_present = NULL,
.vector_allocation_domain = default_vector_allocation_domain,
.init_apic_ldr = init_x2apic_ldr,
.ioapic_phys_id_map = NULL,
.setup_apic_routing = NULL,
.multi_timer_check = NULL,
.cpu_present_to_apicid = default_cpu_present_to_apicid,
.apicid_to_cpu_present = NULL,
.setup_portio_remap = NULL,
.check_phys_apicid_present = default_check_phys_apicid_present,
.enable_apic_mode = NULL,
.phys_pkg_id = x2apic_phys_pkg_id,
.mps_oem_check = NULL,
.get_apic_id = x2apic_get_apic_id,
.set_apic_id = x2apic_set_apic_id,
.apic_id_mask = 0xFFFFFFFFu,
.cpu_mask_to_apicid_and = default_cpu_mask_to_apicid_and,
.send_IPI_mask = x2apic_send_IPI_mask,
.send_IPI_mask_allbutself = x2apic_send_IPI_mask_allbutself,
.send_IPI_allbutself = x2apic_send_IPI_allbutself,
.send_IPI_all = x2apic_send_IPI_all,
.send_IPI_self = x2apic_send_IPI_self,
.trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
.trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
.wait_for_init_deassert = NULL,
.smp_callin_clear_local_apic = NULL,
.inquire_remote_apic = NULL,
.read = native_apic_msr_read,
.write = native_apic_msr_write,
.eoi_write = native_apic_msr_eoi_write,
.icr_read = native_x2apic_icr_read,
.icr_write = native_x2apic_icr_write,
.wait_icr_idle = native_x2apic_wait_icr_idle,
.safe_wait_icr_idle = native_safe_x2apic_wait_icr_idle,
};
apic_driver(apic_x2apic_phys);
| gpl-2.0 |
TheTonon/CM10-JB | drivers/net/wireless/ath/ath5k/pci.c | 2381 | 9755 | /*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/pci-aspm.h>
#include <linux/etherdevice.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
/* Known PCI ids */
static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
{ PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
{ PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
{ PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
{ PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
{ PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
/* return bus cachesize in 4B word units */
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
u8 u8tmp;
pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
* This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
if (*csz == 0)
*csz = L1_CACHE_BYTES >> 2; /* Use the default size */
}
/*
* Read from eeprom
*/
static bool
ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
/*
* Initialize EEPROM access
*/
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
} else {
ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
AR5K_EEPROM_CMD_READ);
}
for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
return true;
}
udelay(15);
}
return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
return 0;
}
/*
* Read the MAC address from eeprom or platform_data
*/
static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
int octet;
AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
mac_d[octet] = data >> 8;
octet += 2;
}
if (!total || total == 3 * 0xffff)
return -EINVAL;
memcpy(mac, mac_d, ETH_ALEN);
return 0;
}
/* Common ath_bus_opts structure */
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath5k_pci_read_cachesize,
.eeprom_read = ath5k_pci_eeprom_read,
.eeprom_read_mac = ath5k_pci_eeprom_read_mac,
};
/********************\
* PCI Initialization *
\********************/
static int __devinit
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
struct ath5k_softc *sc;
struct ieee80211_hw *hw;
int ret;
u8 csz;
/*
* L0s needs to be disabled on all ath5k cards.
*
* For distributions shipping with CONFIG_PCIEASPM (this will be enabled
* by default in the future in 2.6.36) this will also mean both L1 and
* L0s will be disabled when a pre 1.1 PCIe device is detected. We do
* know L1 works correctly even for all ath5k pre 1.1 PCIe devices
* though but cannot currently undue the effect of a blacklist, for
* details you can read pcie_aspm_sanity_check() and see how it adjusts
* the device link capability.
*
* It may be possible in the future to implement some PCI API to allow
* drivers to override blacklists for pre 1.1 PCIe but for now it is
* best to accept that both L0s and L1 will be disabled completely for
* distributions shipping with CONFIG_PCIEASPM rather than having this
* issue present. Motivation for adding this new API will be to help
* with power consumption for some of these devices.
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
goto err;
}
/* XXX 32-bit addressing only */
ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "32-bit DMA not available\n");
goto err_dis;
}
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
if (csz == 0) {
/*
* Linux 2.4.18 (at least) writes the cache line size
* register as a 16-bit wide register which is wrong.
* We must have this setup properly for rx buffer
* DMA to work so force a reasonable value here if it
* comes up zero.
*/
csz = L1_CACHE_BYTES >> 2;
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
}
/*
* The default setting of latency timer yields poor results,
* set it to the value used by other systems. It may be worth
* tweaking this setting more.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
/* Enable bus mastering */
pci_set_master(pdev);
/*
* Disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state.
*/
pci_write_config_byte(pdev, 0x41, 0);
ret = pci_request_region(pdev, 0, "ath5k");
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
goto err_dis;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
ret = -EIO;
goto err_reg;
}
/*
* Allocate hw (mac80211 main struct)
* and hw->priv (driver private data)
*/
hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
goto err_map;
}
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
sc = hw->priv;
sc->hw = hw;
sc->pdev = pdev;
sc->dev = &pdev->dev;
sc->irq = pdev->irq;
sc->devid = id->device;
sc->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
if (ret)
goto err_free;
/* Set private data */
pci_set_drvdata(pdev, hw);
return 0;
err_free:
ieee80211_free_hw(hw);
err_map:
pci_iounmap(pdev, mem);
err_reg:
pci_release_region(pdev, 0);
err_dis:
pci_disable_device(pdev);
err:
return ret;
}
static void __devexit
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_softc *sc = hw->priv;
ath5k_deinit_softc(sc);
pci_iounmap(pdev, sc->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_softc *sc = hw->priv;
ath5k_led_off(sc);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_softc *sc = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state
*/
pci_write_config_byte(pdev, 0x41, 0);
ath5k_led_enable(sc);
return 0;
}
static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
.remove = __devexit_p(ath5k_pci_remove),
.driver.pm = ATH5K_PM_OPS,
};
/*
* Module init/exit functions
*/
static int __init
init_ath5k_pci(void)
{
int ret;
ret = pci_register_driver(&ath5k_pci_driver);
if (ret) {
printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
return ret;
}
return 0;
}
static void __exit
exit_ath5k_pci(void)
{
pci_unregister_driver(&ath5k_pci_driver);
}
module_init(init_ath5k_pci);
module_exit(exit_ath5k_pci);
| gpl-2.0 |
go2ev-devteam/hi35xx-buildroot | linux/linux-3.0.y/drivers/usb/musb/am35x.c | 2381 | 16701 | /*
* Texas Instruments AM35x "glue layer"
*
* Copyright (c) 2010, by Texas Instruments
*
* Based on the DA8xx "glue layer" code.
* Copyright (c) 2008-2009, MontaVista Software, Inc. <source@mvista.com>
*
* This file is part of the Inventra Controller Driver for Linux.
*
* The Inventra Controller Driver for Linux is free software; you
* can redistribute it and/or modify it under the terms of the GNU
* General Public License version 2 as published by the Free Software
* Foundation.
*
* The Inventra Controller Driver for Linux is distributed in
* the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
* License for more details.
*
* You should have received a copy of the GNU General Public License
* along with The Inventra Controller Driver for Linux ; if not,
* write to the Free Software Foundation, Inc., 59 Temple Place,
* Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <linux/init.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <plat/usb.h>
#include "musb_core.h"
/*
* AM35x specific definitions
*/
/* USB 2.0 OTG module registers */
#define USB_REVISION_REG 0x00
#define USB_CTRL_REG 0x04
#define USB_STAT_REG 0x08
#define USB_EMULATION_REG 0x0c
/* 0x10 Reserved */
#define USB_AUTOREQ_REG 0x14
#define USB_SRP_FIX_TIME_REG 0x18
#define USB_TEARDOWN_REG 0x1c
#define EP_INTR_SRC_REG 0x20
#define EP_INTR_SRC_SET_REG 0x24
#define EP_INTR_SRC_CLEAR_REG 0x28
#define EP_INTR_MASK_REG 0x2c
#define EP_INTR_MASK_SET_REG 0x30
#define EP_INTR_MASK_CLEAR_REG 0x34
#define EP_INTR_SRC_MASKED_REG 0x38
#define CORE_INTR_SRC_REG 0x40
#define CORE_INTR_SRC_SET_REG 0x44
#define CORE_INTR_SRC_CLEAR_REG 0x48
#define CORE_INTR_MASK_REG 0x4c
#define CORE_INTR_MASK_SET_REG 0x50
#define CORE_INTR_MASK_CLEAR_REG 0x54
#define CORE_INTR_SRC_MASKED_REG 0x58
/* 0x5c Reserved */
#define USB_END_OF_INTR_REG 0x60
/* Control register bits */
#define AM35X_SOFT_RESET_MASK 1
/* USB interrupt register bits */
#define AM35X_INTR_USB_SHIFT 16
#define AM35X_INTR_USB_MASK (0x1ff << AM35X_INTR_USB_SHIFT)
#define AM35X_INTR_DRVVBUS 0x100
#define AM35X_INTR_RX_SHIFT 16
#define AM35X_INTR_TX_SHIFT 0
#define AM35X_TX_EP_MASK 0xffff /* EP0 + 15 Tx EPs */
#define AM35X_RX_EP_MASK 0xfffe /* 15 Rx EPs */
#define AM35X_TX_INTR_MASK (AM35X_TX_EP_MASK << AM35X_INTR_TX_SHIFT)
#define AM35X_RX_INTR_MASK (AM35X_RX_EP_MASK << AM35X_INTR_RX_SHIFT)
#define USB_MENTOR_CORE_OFFSET 0x400
struct am35x_glue {
struct device *dev;
struct platform_device *musb;
struct clk *phy_clk;
struct clk *clk;
};
#define glue_to_musb(g) platform_get_drvdata(g->musb)
/*
* am35x_musb_enable - enable interrupts
*/
static void am35x_musb_enable(struct musb *musb)
{
void __iomem *reg_base = musb->ctrl_base;
u32 epmask;
/* Workaround: setup IRQs through both register sets. */
epmask = ((musb->epmask & AM35X_TX_EP_MASK) << AM35X_INTR_TX_SHIFT) |
((musb->epmask & AM35X_RX_EP_MASK) << AM35X_INTR_RX_SHIFT);
musb_writel(reg_base, EP_INTR_MASK_SET_REG, epmask);
musb_writel(reg_base, CORE_INTR_MASK_SET_REG, AM35X_INTR_USB_MASK);
/* Force the DRVVBUS IRQ so we can start polling for ID change. */
if (is_otg_enabled(musb))
musb_writel(reg_base, CORE_INTR_SRC_SET_REG,
AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT);
}
/*
* am35x_musb_disable - disable HDRC and flush interrupts
*/
static void am35x_musb_disable(struct musb *musb)
{
void __iomem *reg_base = musb->ctrl_base;
musb_writel(reg_base, CORE_INTR_MASK_CLEAR_REG, AM35X_INTR_USB_MASK);
musb_writel(reg_base, EP_INTR_MASK_CLEAR_REG,
AM35X_TX_INTR_MASK | AM35X_RX_INTR_MASK);
musb_writeb(musb->mregs, MUSB_DEVCTL, 0);
musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
}
#ifdef CONFIG_USB_MUSB_HDRC_HCD
#define portstate(stmt) stmt
#else
#define portstate(stmt)
#endif
static void am35x_musb_set_vbus(struct musb *musb, int is_on)
{
WARN_ON(is_on && is_peripheral_active(musb));
}
#define POLL_SECONDS 2
static struct timer_list otg_workaround;
static void otg_timer(unsigned long _musb)
{
struct musb *musb = (void *)_musb;
void __iomem *mregs = musb->mregs;
u8 devctl;
unsigned long flags;
/*
* We poll because AM35x's won't expose several OTG-critical
* status change events (from the transceiver) otherwise.
*/
devctl = musb_readb(mregs, MUSB_DEVCTL);
dev_dbg(musb->controller, "Poll devctl %02x (%s)\n", devctl,
otg_state_string(musb->xceiv->state));
spin_lock_irqsave(&musb->lock, flags);
switch (musb->xceiv->state) {
case OTG_STATE_A_WAIT_BCON:
devctl &= ~MUSB_DEVCTL_SESSION;
musb_writeb(musb->mregs, MUSB_DEVCTL, devctl);
devctl = musb_readb(musb->mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE) {
musb->xceiv->state = OTG_STATE_B_IDLE;
MUSB_DEV_MODE(musb);
} else {
musb->xceiv->state = OTG_STATE_A_IDLE;
MUSB_HST_MODE(musb);
}
break;
case OTG_STATE_A_WAIT_VFALL:
musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
musb_writel(musb->ctrl_base, CORE_INTR_SRC_SET_REG,
MUSB_INTR_VBUSERROR << AM35X_INTR_USB_SHIFT);
break;
case OTG_STATE_B_IDLE:
if (!is_peripheral_enabled(musb))
break;
devctl = musb_readb(mregs, MUSB_DEVCTL);
if (devctl & MUSB_DEVCTL_BDEVICE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
else
musb->xceiv->state = OTG_STATE_A_IDLE;
break;
default:
break;
}
spin_unlock_irqrestore(&musb->lock, flags);
}
static void am35x_musb_try_idle(struct musb *musb, unsigned long timeout)
{
static unsigned long last_timer;
if (!is_otg_enabled(musb))
return;
if (timeout == 0)
timeout = jiffies + msecs_to_jiffies(3);
/* Never idle if active, or when VBUS timeout is not set as host */
if (musb->is_active || (musb->a_wait_bcon == 0 &&
musb->xceiv->state == OTG_STATE_A_WAIT_BCON)) {
dev_dbg(musb->controller, "%s active, deleting timer\n",
otg_state_string(musb->xceiv->state));
del_timer(&otg_workaround);
last_timer = jiffies;
return;
}
if (time_after(last_timer, timeout) && timer_pending(&otg_workaround)) {
dev_dbg(musb->controller, "Longer idle timer already pending, ignoring...\n");
return;
}
last_timer = timeout;
dev_dbg(musb->controller, "%s inactive, starting idle timer for %u ms\n",
otg_state_string(musb->xceiv->state),
jiffies_to_msecs(timeout - jiffies));
mod_timer(&otg_workaround, timeout);
}
static irqreturn_t am35x_musb_interrupt(int irq, void *hci)
{
struct musb *musb = hci;
void __iomem *reg_base = musb->ctrl_base;
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
unsigned long flags;
irqreturn_t ret = IRQ_NONE;
u32 epintr, usbintr;
spin_lock_irqsave(&musb->lock, flags);
/* Get endpoint interrupts */
epintr = musb_readl(reg_base, EP_INTR_SRC_MASKED_REG);
if (epintr) {
musb_writel(reg_base, EP_INTR_SRC_CLEAR_REG, epintr);
musb->int_rx =
(epintr & AM35X_RX_INTR_MASK) >> AM35X_INTR_RX_SHIFT;
musb->int_tx =
(epintr & AM35X_TX_INTR_MASK) >> AM35X_INTR_TX_SHIFT;
}
/* Get usb core interrupts */
usbintr = musb_readl(reg_base, CORE_INTR_SRC_MASKED_REG);
if (!usbintr && !epintr)
goto eoi;
if (usbintr) {
musb_writel(reg_base, CORE_INTR_SRC_CLEAR_REG, usbintr);
musb->int_usb =
(usbintr & AM35X_INTR_USB_MASK) >> AM35X_INTR_USB_SHIFT;
}
/*
* DRVVBUS IRQs are the only proxy we have (a very poor one!) for
* AM35x's missing ID change IRQ. We need an ID change IRQ to
* switch appropriately between halves of the OTG state machine.
* Managing DEVCTL.SESSION per Mentor docs requires that we know its
* value but DEVCTL.BDEVICE is invalid without DEVCTL.SESSION set.
* Also, DRVVBUS pulses for SRP (but not at 5V) ...
*/
if (usbintr & (AM35X_INTR_DRVVBUS << AM35X_INTR_USB_SHIFT)) {
int drvvbus = musb_readl(reg_base, USB_STAT_REG);
void __iomem *mregs = musb->mregs;
u8 devctl = musb_readb(mregs, MUSB_DEVCTL);
int err;
err = is_host_enabled(musb) && (musb->int_usb &
MUSB_INTR_VBUSERROR);
if (err) {
/*
* The Mentor core doesn't debounce VBUS as needed
* to cope with device connect current spikes. This
* means it's not uncommon for bus-powered devices
* to get VBUS errors during enumeration.
*
* This is a workaround, but newer RTL from Mentor
* seems to allow a better one: "re"-starting sessions
* without waiting for VBUS to stop registering in
* devctl.
*/
musb->int_usb &= ~MUSB_INTR_VBUSERROR;
musb->xceiv->state = OTG_STATE_A_WAIT_VFALL;
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
WARNING("VBUS error workaround (delay coming)\n");
} else if (is_host_enabled(musb) && drvvbus) {
MUSB_HST_MODE(musb);
musb->xceiv->default_a = 1;
musb->xceiv->state = OTG_STATE_A_WAIT_VRISE;
portstate(musb->port1_status |= USB_PORT_STAT_POWER);
del_timer(&otg_workaround);
} else {
musb->is_active = 0;
MUSB_DEV_MODE(musb);
musb->xceiv->default_a = 0;
musb->xceiv->state = OTG_STATE_B_IDLE;
portstate(musb->port1_status &= ~USB_PORT_STAT_POWER);
}
/* NOTE: this must complete power-on within 100 ms. */
dev_dbg(musb->controller, "VBUS %s (%s)%s, devctl %02x\n",
drvvbus ? "on" : "off",
otg_state_string(musb->xceiv->state),
err ? " ERROR" : "",
devctl);
ret = IRQ_HANDLED;
}
if (musb->int_tx || musb->int_rx || musb->int_usb)
ret |= musb_interrupt(musb);
eoi:
/* EOI needs to be written for the IRQ to be re-asserted. */
if (ret == IRQ_HANDLED || epintr || usbintr) {
/* clear level interrupt */
if (data->clear_irq)
data->clear_irq();
/* write EOI */
musb_writel(reg_base, USB_END_OF_INTR_REG, 0);
}
/* Poll for ID change */
if (is_otg_enabled(musb) && musb->xceiv->state == OTG_STATE_B_IDLE)
mod_timer(&otg_workaround, jiffies + POLL_SECONDS * HZ);
spin_unlock_irqrestore(&musb->lock, flags);
return ret;
}
static int am35x_musb_set_mode(struct musb *musb, u8 musb_mode)
{
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
int retval = 0;
if (data->set_mode)
data->set_mode(musb_mode);
else
retval = -EIO;
return retval;
}
static int am35x_musb_init(struct musb *musb)
{
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
void __iomem *reg_base = musb->ctrl_base;
u32 rev;
musb->mregs += USB_MENTOR_CORE_OFFSET;
/* Returns zero if e.g. not clocked */
rev = musb_readl(reg_base, USB_REVISION_REG);
if (!rev)
return -ENODEV;
usb_nop_xceiv_register();
musb->xceiv = otg_get_transceiver();
if (!musb->xceiv)
return -ENODEV;
if (is_host_enabled(musb))
setup_timer(&otg_workaround, otg_timer, (unsigned long) musb);
/* Reset the musb */
if (data->reset)
data->reset();
/* Reset the controller */
musb_writel(reg_base, USB_CTRL_REG, AM35X_SOFT_RESET_MASK);
/* Start the on-chip PHY and its PLL. */
if (data->set_phy_power)
data->set_phy_power(1);
msleep(5);
musb->isr = am35x_musb_interrupt;
/* clear level interrupt */
if (data->clear_irq)
data->clear_irq();
return 0;
}
static int am35x_musb_exit(struct musb *musb)
{
struct device *dev = musb->controller;
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
if (is_host_enabled(musb))
del_timer_sync(&otg_workaround);
/* Shutdown the on-chip PHY and its PLL. */
if (data->set_phy_power)
data->set_phy_power(0);
otg_put_transceiver(musb->xceiv);
usb_nop_xceiv_unregister();
return 0;
}
/* AM35x supports only 32bit read operation */
void musb_read_fifo(struct musb_hw_ep *hw_ep, u16 len, u8 *dst)
{
void __iomem *fifo = hw_ep->fifo;
u32 val;
int i;
/* Read for 32bit-aligned destination address */
if (likely((0x03 & (unsigned long) dst) == 0) && len >= 4) {
readsl(fifo, dst, len >> 2);
dst += len & ~0x03;
len &= 0x03;
}
/*
* Now read the remaining 1 to 3 byte or complete length if
* unaligned address.
*/
if (len > 4) {
for (i = 0; i < (len >> 2); i++) {
*(u32 *) dst = musb_readl(fifo, 0);
dst += 4;
}
len &= 0x03;
}
if (len > 0) {
val = musb_readl(fifo, 0);
memcpy(dst, &val, len);
}
}
static const struct musb_platform_ops am35x_ops = {
.init = am35x_musb_init,
.exit = am35x_musb_exit,
.enable = am35x_musb_enable,
.disable = am35x_musb_disable,
.set_mode = am35x_musb_set_mode,
.try_idle = am35x_musb_try_idle,
.set_vbus = am35x_musb_set_vbus,
};
static u64 am35x_dmamask = DMA_BIT_MASK(32);
static int __init am35x_probe(struct platform_device *pdev)
{
struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data;
struct platform_device *musb;
struct am35x_glue *glue;
struct clk *phy_clk;
struct clk *clk;
int ret = -ENOMEM;
glue = kzalloc(sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&pdev->dev, "failed to allocate glue context\n");
goto err0;
}
musb = platform_device_alloc("musb-hdrc", -1);
if (!musb) {
dev_err(&pdev->dev, "failed to allocate musb device\n");
goto err1;
}
phy_clk = clk_get(&pdev->dev, "fck");
if (IS_ERR(phy_clk)) {
dev_err(&pdev->dev, "failed to get PHY clock\n");
ret = PTR_ERR(phy_clk);
goto err2;
}
clk = clk_get(&pdev->dev, "ick");
if (IS_ERR(clk)) {
dev_err(&pdev->dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err3;
}
ret = clk_enable(phy_clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable PHY clock\n");
goto err4;
}
ret = clk_enable(clk);
if (ret) {
dev_err(&pdev->dev, "failed to enable clock\n");
goto err5;
}
musb->dev.parent = &pdev->dev;
musb->dev.dma_mask = &am35x_dmamask;
musb->dev.coherent_dma_mask = am35x_dmamask;
glue->dev = &pdev->dev;
glue->musb = musb;
glue->phy_clk = phy_clk;
glue->clk = clk;
pdata->platform_ops = &am35x_ops;
platform_set_drvdata(pdev, glue);
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
dev_err(&pdev->dev, "failed to add resources\n");
goto err6;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
dev_err(&pdev->dev, "failed to add platform_data\n");
goto err6;
}
ret = platform_device_add(musb);
if (ret) {
dev_err(&pdev->dev, "failed to register musb device\n");
goto err6;
}
return 0;
err6:
clk_disable(clk);
err5:
clk_disable(phy_clk);
err4:
clk_put(clk);
err3:
clk_put(phy_clk);
err2:
platform_device_put(musb);
err1:
kfree(glue);
err0:
return ret;
}
static int __exit am35x_remove(struct platform_device *pdev)
{
struct am35x_glue *glue = platform_get_drvdata(pdev);
platform_device_del(glue->musb);
platform_device_put(glue->musb);
clk_disable(glue->clk);
clk_disable(glue->phy_clk);
clk_put(glue->clk);
clk_put(glue->phy_clk);
kfree(glue);
return 0;
}
#ifdef CONFIG_PM
static int am35x_suspend(struct device *dev)
{
struct am35x_glue *glue = dev_get_drvdata(dev);
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
/* Shutdown the on-chip PHY and its PLL. */
if (data->set_phy_power)
data->set_phy_power(0);
clk_disable(glue->phy_clk);
clk_disable(glue->clk);
return 0;
}
static int am35x_resume(struct device *dev)
{
struct am35x_glue *glue = dev_get_drvdata(dev);
struct musb_hdrc_platform_data *plat = dev->platform_data;
struct omap_musb_board_data *data = plat->board_data;
int ret;
/* Start the on-chip PHY and its PLL. */
if (data->set_phy_power)
data->set_phy_power(1);
ret = clk_enable(glue->phy_clk);
if (ret) {
dev_err(dev, "failed to enable PHY clock\n");
return ret;
}
ret = clk_enable(glue->clk);
if (ret) {
dev_err(dev, "failed to enable clock\n");
return ret;
}
return 0;
}
static struct dev_pm_ops am35x_pm_ops = {
.suspend = am35x_suspend,
.resume = am35x_resume,
};
#define DEV_PM_OPS &am35x_pm_ops
#else
#define DEV_PM_OPS NULL
#endif
static struct platform_driver am35x_driver = {
.remove = __exit_p(am35x_remove),
.driver = {
.name = "musb-am35x",
.pm = DEV_PM_OPS,
},
};
MODULE_DESCRIPTION("AM35x MUSB Glue Layer");
MODULE_AUTHOR("Ajay Kumar Gupta <ajay.gupta@ti.com>");
MODULE_LICENSE("GPL v2");
static int __init am35x_init(void)
{
return platform_driver_probe(&am35x_driver, am35x_probe);
}
subsys_initcall(am35x_init);
static void __exit am35x_exit(void)
{
platform_driver_unregister(&am35x_driver);
}
module_exit(am35x_exit);
| gpl-2.0 |
MoKee/android_kernel_samsung_piranha | fs/ubifs/orphan.c | 2381 | 25592 | /*
* This file is part of UBIFS.
*
* Copyright (C) 2006-2008 Nokia Corporation.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc., 51
* Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*
* Author: Adrian Hunter
*/
#include "ubifs.h"
/*
* An orphan is an inode number whose inode node has been committed to the index
* with a link count of zero. That happens when an open file is deleted
* (unlinked) and then a commit is run. In the normal course of events the inode
* would be deleted when the file is closed. However in the case of an unclean
* unmount, orphans need to be accounted for. After an unclean unmount, the
* orphans' inodes must be deleted which means either scanning the entire index
* looking for them, or keeping a list on flash somewhere. This unit implements
* the latter approach.
*
* The orphan area is a fixed number of LEBs situated between the LPT area and
* the main area. The number of orphan area LEBs is specified when the file
* system is created. The minimum number is 1. The size of the orphan area
* should be so that it can hold the maximum number of orphans that are expected
* to ever exist at one time.
*
* The number of orphans that can fit in a LEB is:
*
* (c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64)
*
* For example: a 15872 byte LEB can fit 1980 orphans so 1 LEB may be enough.
*
* Orphans are accumulated in a rb-tree. When an inode's link count drops to
* zero, the inode number is added to the rb-tree. It is removed from the tree
* when the inode is deleted. Any new orphans that are in the orphan tree when
* the commit is run, are written to the orphan area in 1 or more orphan nodes.
* If the orphan area is full, it is consolidated to make space. There is
* always enough space because validation prevents the user from creating more
* than the maximum number of orphans allowed.
*/
#ifdef CONFIG_UBIFS_FS_DEBUG
static int dbg_check_orphans(struct ubifs_info *c);
#else
#define dbg_check_orphans(c) 0
#endif
/**
* ubifs_add_orphan - add an orphan.
* @c: UBIFS file-system description object
* @inum: orphan inode number
*
* Add an orphan. This function is called when an inodes link count drops to
* zero.
*/
int ubifs_add_orphan(struct ubifs_info *c, ino_t inum)
{
struct ubifs_orphan *orphan, *o;
struct rb_node **p, *parent = NULL;
orphan = kzalloc(sizeof(struct ubifs_orphan), GFP_NOFS);
if (!orphan)
return -ENOMEM;
orphan->inum = inum;
orphan->new = 1;
spin_lock(&c->orphan_lock);
if (c->tot_orphans >= c->max_orphans) {
spin_unlock(&c->orphan_lock);
kfree(orphan);
return -ENFILE;
}
p = &c->orph_tree.rb_node;
while (*p) {
parent = *p;
o = rb_entry(parent, struct ubifs_orphan, rb);
if (inum < o->inum)
p = &(*p)->rb_left;
else if (inum > o->inum)
p = &(*p)->rb_right;
else {
dbg_err("orphaned twice");
spin_unlock(&c->orphan_lock);
kfree(orphan);
return 0;
}
}
c->tot_orphans += 1;
c->new_orphans += 1;
rb_link_node(&orphan->rb, parent, p);
rb_insert_color(&orphan->rb, &c->orph_tree);
list_add_tail(&orphan->list, &c->orph_list);
list_add_tail(&orphan->new_list, &c->orph_new);
spin_unlock(&c->orphan_lock);
dbg_gen("ino %lu", (unsigned long)inum);
return 0;
}
/**
* ubifs_delete_orphan - delete an orphan.
* @c: UBIFS file-system description object
* @inum: orphan inode number
*
* Delete an orphan. This function is called when an inode is deleted.
*/
void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum)
{
struct ubifs_orphan *o;
struct rb_node *p;
spin_lock(&c->orphan_lock);
p = c->orph_tree.rb_node;
while (p) {
o = rb_entry(p, struct ubifs_orphan, rb);
if (inum < o->inum)
p = p->rb_left;
else if (inum > o->inum)
p = p->rb_right;
else {
if (o->dnext) {
spin_unlock(&c->orphan_lock);
dbg_gen("deleted twice ino %lu",
(unsigned long)inum);
return;
}
if (o->cnext) {
o->dnext = c->orph_dnext;
c->orph_dnext = o;
spin_unlock(&c->orphan_lock);
dbg_gen("delete later ino %lu",
(unsigned long)inum);
return;
}
rb_erase(p, &c->orph_tree);
list_del(&o->list);
c->tot_orphans -= 1;
if (o->new) {
list_del(&o->new_list);
c->new_orphans -= 1;
}
spin_unlock(&c->orphan_lock);
kfree(o);
dbg_gen("inum %lu", (unsigned long)inum);
return;
}
}
spin_unlock(&c->orphan_lock);
dbg_err("missing orphan ino %lu", (unsigned long)inum);
dbg_dump_stack();
}
/**
* ubifs_orphan_start_commit - start commit of orphans.
* @c: UBIFS file-system description object
*
* Start commit of orphans.
*/
int ubifs_orphan_start_commit(struct ubifs_info *c)
{
struct ubifs_orphan *orphan, **last;
spin_lock(&c->orphan_lock);
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_new, new_list) {
ubifs_assert(orphan->new);
orphan->new = 0;
*last = orphan;
last = &orphan->cnext;
}
*last = orphan->cnext;
c->cmt_orphans = c->new_orphans;
c->new_orphans = 0;
dbg_cmt("%d orphans to commit", c->cmt_orphans);
INIT_LIST_HEAD(&c->orph_new);
if (c->tot_orphans == 0)
c->no_orphs = 1;
else
c->no_orphs = 0;
spin_unlock(&c->orphan_lock);
return 0;
}
/**
* avail_orphs - calculate available space.
* @c: UBIFS file-system description object
*
* This function returns the number of orphans that can be written in the
* available space.
*/
static int avail_orphs(struct ubifs_info *c)
{
int avail_lebs, avail, gap;
avail_lebs = c->orph_lebs - (c->ohead_lnum - c->orph_first) - 1;
avail = avail_lebs *
((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64));
gap = c->leb_size - c->ohead_offs;
if (gap >= UBIFS_ORPH_NODE_SZ + sizeof(__le64))
avail += (gap - UBIFS_ORPH_NODE_SZ) / sizeof(__le64);
return avail;
}
/**
* tot_avail_orphs - calculate total space.
* @c: UBIFS file-system description object
*
* This function returns the number of orphans that can be written in half
* the total space. That leaves half the space for adding new orphans.
*/
static int tot_avail_orphs(struct ubifs_info *c)
{
int avail_lebs, avail;
avail_lebs = c->orph_lebs;
avail = avail_lebs *
((c->leb_size - UBIFS_ORPH_NODE_SZ) / sizeof(__le64));
return avail / 2;
}
/**
* do_write_orph_node - write a node to the orphan head.
* @c: UBIFS file-system description object
* @len: length of node
* @atomic: write atomically
*
* This function writes a node to the orphan head from the orphan buffer. If
* %atomic is not zero, then the write is done atomically. On success, %0 is
* returned, otherwise a negative error code is returned.
*/
static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
{
int err = 0;
if (atomic) {
ubifs_assert(c->ohead_offs == 0);
ubifs_prepare_node(c, c->orph_buf, len, 1);
len = ALIGN(len, c->min_io_size);
err = ubifs_leb_change(c, c->ohead_lnum, c->orph_buf, len,
UBI_SHORTTERM);
} else {
if (c->ohead_offs == 0) {
/* Ensure LEB has been unmapped */
err = ubifs_leb_unmap(c, c->ohead_lnum);
if (err)
return err;
}
err = ubifs_write_node(c, c->orph_buf, len, c->ohead_lnum,
c->ohead_offs, UBI_SHORTTERM);
}
return err;
}
/**
* write_orph_node - write an orphan node.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
* This function builds an orphan node from the cnext list and writes it to the
* orphan head. On success, %0 is returned, otherwise a negative error code
* is returned.
*/
static int write_orph_node(struct ubifs_info *c, int atomic)
{
struct ubifs_orphan *orphan, *cnext;
struct ubifs_orph_node *orph;
int gap, err, len, cnt, i;
ubifs_assert(c->cmt_orphans > 0);
gap = c->leb_size - c->ohead_offs;
if (gap < UBIFS_ORPH_NODE_SZ + sizeof(__le64)) {
c->ohead_lnum += 1;
c->ohead_offs = 0;
gap = c->leb_size;
if (c->ohead_lnum > c->orph_last) {
/*
* We limit the number of orphans so that this should
* never happen.
*/
ubifs_err("out of space in orphan area");
return -EINVAL;
}
}
cnt = (gap - UBIFS_ORPH_NODE_SZ) / sizeof(__le64);
if (cnt > c->cmt_orphans)
cnt = c->cmt_orphans;
len = UBIFS_ORPH_NODE_SZ + cnt * sizeof(__le64);
ubifs_assert(c->orph_buf);
orph = c->orph_buf;
orph->ch.node_type = UBIFS_ORPH_NODE;
spin_lock(&c->orphan_lock);
cnext = c->orph_cnext;
for (i = 0; i < cnt; i++) {
orphan = cnext;
orph->inos[i] = cpu_to_le64(orphan->inum);
cnext = orphan->cnext;
orphan->cnext = NULL;
}
c->orph_cnext = cnext;
c->cmt_orphans -= cnt;
spin_unlock(&c->orphan_lock);
if (c->cmt_orphans)
orph->cmt_no = cpu_to_le64(c->cmt_no);
else
/* Mark the last node of the commit */
orph->cmt_no = cpu_to_le64((c->cmt_no) | (1ULL << 63));
ubifs_assert(c->ohead_offs + len <= c->leb_size);
ubifs_assert(c->ohead_lnum >= c->orph_first);
ubifs_assert(c->ohead_lnum <= c->orph_last);
err = do_write_orph_node(c, len, atomic);
c->ohead_offs += ALIGN(len, c->min_io_size);
c->ohead_offs = ALIGN(c->ohead_offs, 8);
return err;
}
/**
* write_orph_nodes - write orphan nodes until there are no more to commit.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
* This function writes orphan nodes for all the orphans to commit. On success,
* %0 is returned, otherwise a negative error code is returned.
*/
static int write_orph_nodes(struct ubifs_info *c, int atomic)
{
int err;
while (c->cmt_orphans > 0) {
err = write_orph_node(c, atomic);
if (err)
return err;
}
if (atomic) {
int lnum;
/* Unmap any unused LEBs after consolidation */
lnum = c->ohead_lnum + 1;
for (lnum = c->ohead_lnum + 1; lnum <= c->orph_last; lnum++) {
err = ubifs_leb_unmap(c, lnum);
if (err)
return err;
}
}
return 0;
}
/**
* consolidate - consolidate the orphan area.
* @c: UBIFS file-system description object
*
* This function enables consolidation by putting all the orphans into the list
* to commit. The list is in the order that the orphans were added, and the
* LEBs are written atomically in order, so at no time can orphans be lost by
* an unclean unmount.
*
* This function returns %0 on success and a negative error code on failure.
*/
static int consolidate(struct ubifs_info *c)
{
int tot_avail = tot_avail_orphs(c), err = 0;
spin_lock(&c->orphan_lock);
dbg_cmt("there is space for %d orphans and there are %d",
tot_avail, c->tot_orphans);
if (c->tot_orphans - c->new_orphans <= tot_avail) {
struct ubifs_orphan *orphan, **last;
int cnt = 0;
/* Change the cnext list to include all non-new orphans */
last = &c->orph_cnext;
list_for_each_entry(orphan, &c->orph_list, list) {
if (orphan->new)
continue;
*last = orphan;
last = &orphan->cnext;
cnt += 1;
}
*last = orphan->cnext;
ubifs_assert(cnt == c->tot_orphans - c->new_orphans);
c->cmt_orphans = cnt;
c->ohead_lnum = c->orph_first;
c->ohead_offs = 0;
} else {
/*
* We limit the number of orphans so that this should
* never happen.
*/
ubifs_err("out of space in orphan area");
err = -EINVAL;
}
spin_unlock(&c->orphan_lock);
return err;
}
/**
* commit_orphans - commit orphans.
* @c: UBIFS file-system description object
*
* This function commits orphans to flash. On success, %0 is returned,
* otherwise a negative error code is returned.
*/
static int commit_orphans(struct ubifs_info *c)
{
int avail, atomic = 0, err;
ubifs_assert(c->cmt_orphans > 0);
avail = avail_orphs(c);
if (avail < c->cmt_orphans) {
/* Not enough space to write new orphans, so consolidate */
err = consolidate(c);
if (err)
return err;
atomic = 1;
}
err = write_orph_nodes(c, atomic);
return err;
}
/**
* erase_deleted - erase the orphans marked for deletion.
* @c: UBIFS file-system description object
*
* During commit, the orphans being committed cannot be deleted, so they are
* marked for deletion and deleted by this function. Also, the recovery
* adds killed orphans to the deletion list, and therefore they are deleted
* here too.
*/
static void erase_deleted(struct ubifs_info *c)
{
struct ubifs_orphan *orphan, *dnext;
spin_lock(&c->orphan_lock);
dnext = c->orph_dnext;
while (dnext) {
orphan = dnext;
dnext = orphan->dnext;
ubifs_assert(!orphan->new);
rb_erase(&orphan->rb, &c->orph_tree);
list_del(&orphan->list);
c->tot_orphans -= 1;
dbg_gen("deleting orphan ino %lu", (unsigned long)orphan->inum);
kfree(orphan);
}
c->orph_dnext = NULL;
spin_unlock(&c->orphan_lock);
}
/**
* ubifs_orphan_end_commit - end commit of orphans.
* @c: UBIFS file-system description object
*
* End commit of orphans.
*/
int ubifs_orphan_end_commit(struct ubifs_info *c)
{
int err;
if (c->cmt_orphans != 0) {
err = commit_orphans(c);
if (err)
return err;
}
erase_deleted(c);
err = dbg_check_orphans(c);
return err;
}
/**
* ubifs_clear_orphans - erase all LEBs used for orphans.
* @c: UBIFS file-system description object
*
* If recovery is not required, then the orphans from the previous session
* are not needed. This function locates the LEBs used to record
* orphans, and un-maps them.
*/
int ubifs_clear_orphans(struct ubifs_info *c)
{
int lnum, err;
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
err = ubifs_leb_unmap(c, lnum);
if (err)
return err;
}
c->ohead_lnum = c->orph_first;
c->ohead_offs = 0;
return 0;
}
/**
* insert_dead_orphan - insert an orphan.
* @c: UBIFS file-system description object
* @inum: orphan inode number
*
* This function is a helper to the 'do_kill_orphans()' function. The orphan
* must be kept until the next commit, so it is added to the rb-tree and the
* deletion list.
*/
static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
{
struct ubifs_orphan *orphan, *o;
struct rb_node **p, *parent = NULL;
orphan = kzalloc(sizeof(struct ubifs_orphan), GFP_KERNEL);
if (!orphan)
return -ENOMEM;
orphan->inum = inum;
p = &c->orph_tree.rb_node;
while (*p) {
parent = *p;
o = rb_entry(parent, struct ubifs_orphan, rb);
if (inum < o->inum)
p = &(*p)->rb_left;
else if (inum > o->inum)
p = &(*p)->rb_right;
else {
/* Already added - no problem */
kfree(orphan);
return 0;
}
}
c->tot_orphans += 1;
rb_link_node(&orphan->rb, parent, p);
rb_insert_color(&orphan->rb, &c->orph_tree);
list_add_tail(&orphan->list, &c->orph_list);
orphan->dnext = c->orph_dnext;
c->orph_dnext = orphan;
dbg_mnt("ino %lu, new %d, tot %d", (unsigned long)inum,
c->new_orphans, c->tot_orphans);
return 0;
}
/**
* do_kill_orphans - remove orphan inodes from the index.
* @c: UBIFS file-system description object
* @sleb: scanned LEB
* @last_cmt_no: cmt_no of last orphan node read is passed and returned here
* @outofdate: whether the LEB is out of date is returned here
* @last_flagged: whether the end orphan node is encountered
*
* This function is a helper to the 'kill_orphans()' function. It goes through
* every orphan node in a LEB and for every inode number recorded, removes
* all keys for that inode from the TNC.
*/
static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
unsigned long long *last_cmt_no, int *outofdate,
int *last_flagged)
{
struct ubifs_scan_node *snod;
struct ubifs_orph_node *orph;
unsigned long long cmt_no;
ino_t inum;
int i, n, err, first = 1;
list_for_each_entry(snod, &sleb->nodes, list) {
if (snod->type != UBIFS_ORPH_NODE) {
ubifs_err("invalid node type %d in orphan area at "
"%d:%d", snod->type, sleb->lnum, snod->offs);
dbg_dump_node(c, snod->node);
return -EINVAL;
}
orph = snod->node;
/* Check commit number */
cmt_no = le64_to_cpu(orph->cmt_no) & LLONG_MAX;
/*
* The commit number on the master node may be less, because
* of a failed commit. If there are several failed commits in a
* row, the commit number written on orphan nodes will continue
* to increase (because the commit number is adjusted here) even
* though the commit number on the master node stays the same
* because the master node has not been re-written.
*/
if (cmt_no > c->cmt_no)
c->cmt_no = cmt_no;
if (cmt_no < *last_cmt_no && *last_flagged) {
/*
* The last orphan node had a higher commit number and
* was flagged as the last written for that commit
* number. That makes this orphan node, out of date.
*/
if (!first) {
ubifs_err("out of order commit number %llu in "
"orphan node at %d:%d",
cmt_no, sleb->lnum, snod->offs);
dbg_dump_node(c, snod->node);
return -EINVAL;
}
dbg_rcvry("out of date LEB %d", sleb->lnum);
*outofdate = 1;
return 0;
}
if (first)
first = 0;
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
for (i = 0; i < n; i++) {
inum = le64_to_cpu(orph->inos[i]);
dbg_rcvry("deleting orphaned inode %lu",
(unsigned long)inum);
err = ubifs_tnc_remove_ino(c, inum);
if (err)
return err;
err = insert_dead_orphan(c, inum);
if (err)
return err;
}
*last_cmt_no = cmt_no;
if (le64_to_cpu(orph->cmt_no) & (1ULL << 63)) {
dbg_rcvry("last orph node for commit %llu at %d:%d",
cmt_no, sleb->lnum, snod->offs);
*last_flagged = 1;
} else
*last_flagged = 0;
}
return 0;
}
/**
* kill_orphans - remove all orphan inodes from the index.
* @c: UBIFS file-system description object
*
* If recovery is required, then orphan inodes recorded during the previous
* session (which ended with an unclean unmount) must be deleted from the index.
* This is done by updating the TNC, but since the index is not updated until
* the next commit, the LEBs where the orphan information is recorded are not
* erased until the next commit.
*/
static int kill_orphans(struct ubifs_info *c)
{
unsigned long long last_cmt_no = 0;
int lnum, err = 0, outofdate = 0, last_flagged = 0;
c->ohead_lnum = c->orph_first;
c->ohead_offs = 0;
/* Check no-orphans flag and skip this if no orphans */
if (c->no_orphs) {
dbg_rcvry("no orphans");
return 0;
}
/*
* Orph nodes always start at c->orph_first and are written to each
* successive LEB in turn. Generally unused LEBs will have been unmapped
* but may contain out of date orphan nodes if the unmap didn't go
* through. In addition, the last orphan node written for each commit is
* marked (top bit of orph->cmt_no is set to 1). It is possible that
* there are orphan nodes from the next commit (i.e. the commit did not
* complete successfully). In that case, no orphans will have been lost
* due to the way that orphans are written, and any orphans added will
* be valid orphans anyway and so can be deleted.
*/
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
struct ubifs_scan_leb *sleb;
dbg_rcvry("LEB %d", lnum);
sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) == -EUCLEAN)
sleb = ubifs_recover_leb(c, lnum, 0,
c->sbuf, -1);
if (IS_ERR(sleb)) {
err = PTR_ERR(sleb);
break;
}
}
err = do_kill_orphans(c, sleb, &last_cmt_no, &outofdate,
&last_flagged);
if (err || outofdate) {
ubifs_scan_destroy(sleb);
break;
}
if (sleb->endpt) {
c->ohead_lnum = lnum;
c->ohead_offs = sleb->endpt;
}
ubifs_scan_destroy(sleb);
}
return err;
}
/**
* ubifs_mount_orphans - delete orphan inodes and erase LEBs that recorded them.
* @c: UBIFS file-system description object
* @unclean: indicates recovery from unclean unmount
* @read_only: indicates read only mount
*
* This function is called when mounting to erase orphans from the previous
* session. If UBIFS was not unmounted cleanly, then the inodes recorded as
* orphans are deleted.
*/
int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only)
{
int err = 0;
c->max_orphans = tot_avail_orphs(c);
if (!read_only) {
c->orph_buf = vmalloc(c->leb_size);
if (!c->orph_buf)
return -ENOMEM;
}
if (unclean)
err = kill_orphans(c);
else if (!read_only)
err = ubifs_clear_orphans(c);
return err;
}
#ifdef CONFIG_UBIFS_FS_DEBUG
struct check_orphan {
struct rb_node rb;
ino_t inum;
};
struct check_info {
unsigned long last_ino;
unsigned long tot_inos;
unsigned long missing;
unsigned long long leaf_cnt;
struct ubifs_ino_node *node;
struct rb_root root;
};
static int dbg_find_orphan(struct ubifs_info *c, ino_t inum)
{
struct ubifs_orphan *o;
struct rb_node *p;
spin_lock(&c->orphan_lock);
p = c->orph_tree.rb_node;
while (p) {
o = rb_entry(p, struct ubifs_orphan, rb);
if (inum < o->inum)
p = p->rb_left;
else if (inum > o->inum)
p = p->rb_right;
else {
spin_unlock(&c->orphan_lock);
return 1;
}
}
spin_unlock(&c->orphan_lock);
return 0;
}
static int dbg_ins_check_orphan(struct rb_root *root, ino_t inum)
{
struct check_orphan *orphan, *o;
struct rb_node **p, *parent = NULL;
orphan = kzalloc(sizeof(struct check_orphan), GFP_NOFS);
if (!orphan)
return -ENOMEM;
orphan->inum = inum;
p = &root->rb_node;
while (*p) {
parent = *p;
o = rb_entry(parent, struct check_orphan, rb);
if (inum < o->inum)
p = &(*p)->rb_left;
else if (inum > o->inum)
p = &(*p)->rb_right;
else {
kfree(orphan);
return 0;
}
}
rb_link_node(&orphan->rb, parent, p);
rb_insert_color(&orphan->rb, root);
return 0;
}
static int dbg_find_check_orphan(struct rb_root *root, ino_t inum)
{
struct check_orphan *o;
struct rb_node *p;
p = root->rb_node;
while (p) {
o = rb_entry(p, struct check_orphan, rb);
if (inum < o->inum)
p = p->rb_left;
else if (inum > o->inum)
p = p->rb_right;
else
return 1;
}
return 0;
}
static void dbg_free_check_tree(struct rb_root *root)
{
struct rb_node *this = root->rb_node;
struct check_orphan *o;
while (this) {
if (this->rb_left) {
this = this->rb_left;
continue;
} else if (this->rb_right) {
this = this->rb_right;
continue;
}
o = rb_entry(this, struct check_orphan, rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &o->rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
kfree(o);
}
}
static int dbg_orphan_check(struct ubifs_info *c, struct ubifs_zbranch *zbr,
void *priv)
{
struct check_info *ci = priv;
ino_t inum;
int err;
inum = key_inum(c, &zbr->key);
if (inum != ci->last_ino) {
/* Lowest node type is the inode node, so it comes first */
if (key_type(c, &zbr->key) != UBIFS_INO_KEY)
ubifs_err("found orphan node ino %lu, type %d",
(unsigned long)inum, key_type(c, &zbr->key));
ci->last_ino = inum;
ci->tot_inos += 1;
err = ubifs_tnc_read_node(c, zbr, ci->node);
if (err) {
ubifs_err("node read failed, error %d", err);
return err;
}
if (ci->node->nlink == 0)
/* Must be recorded as an orphan */
if (!dbg_find_check_orphan(&ci->root, inum) &&
!dbg_find_orphan(c, inum)) {
ubifs_err("missing orphan, ino %lu",
(unsigned long)inum);
ci->missing += 1;
}
}
ci->leaf_cnt += 1;
return 0;
}
static int dbg_read_orphans(struct check_info *ci, struct ubifs_scan_leb *sleb)
{
struct ubifs_scan_node *snod;
struct ubifs_orph_node *orph;
ino_t inum;
int i, n, err;
list_for_each_entry(snod, &sleb->nodes, list) {
cond_resched();
if (snod->type != UBIFS_ORPH_NODE)
continue;
orph = snod->node;
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
for (i = 0; i < n; i++) {
inum = le64_to_cpu(orph->inos[i]);
err = dbg_ins_check_orphan(&ci->root, inum);
if (err)
return err;
}
}
return 0;
}
static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
{
int lnum, err = 0;
void *buf;
/* Check no-orphans flag and skip this if no orphans */
if (c->no_orphs)
return 0;
buf = __vmalloc(c->leb_size, GFP_NOFS, PAGE_KERNEL);
if (!buf) {
ubifs_err("cannot allocate memory to check orphans");
return 0;
}
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
struct ubifs_scan_leb *sleb;
sleb = ubifs_scan(c, lnum, 0, buf, 0);
if (IS_ERR(sleb)) {
err = PTR_ERR(sleb);
break;
}
err = dbg_read_orphans(ci, sleb);
ubifs_scan_destroy(sleb);
if (err)
break;
}
vfree(buf);
return err;
}
static int dbg_check_orphans(struct ubifs_info *c)
{
struct check_info ci;
int err;
if (!(ubifs_chk_flags & UBIFS_CHK_ORPH))
return 0;
ci.last_ino = 0;
ci.tot_inos = 0;
ci.missing = 0;
ci.leaf_cnt = 0;
ci.root = RB_ROOT;
ci.node = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
if (!ci.node) {
ubifs_err("out of memory");
return -ENOMEM;
}
err = dbg_scan_orphans(c, &ci);
if (err)
goto out;
err = dbg_walk_index(c, &dbg_orphan_check, NULL, &ci);
if (err) {
ubifs_err("cannot scan TNC, error %d", err);
goto out;
}
if (ci.missing) {
ubifs_err("%lu missing orphan(s)", ci.missing);
err = -EINVAL;
goto out;
}
dbg_cmt("last inode number is %lu", ci.last_ino);
dbg_cmt("total number of inodes is %lu", ci.tot_inos);
dbg_cmt("total number of leaf nodes is %llu", ci.leaf_cnt);
out:
dbg_free_check_tree(&ci.root);
kfree(ci.node);
return err;
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
| gpl-2.0 |
stratosk/samsung-kernel-aries | net/batman-adv/bat_debugfs.c | 2381 | 8479 | /*
* Copyright (C) 2010-2011 B.A.T.M.A.N. contributors:
*
* Marek Lindner
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of version 2 of the GNU General Public
* License as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
* 02110-1301, USA
*
*/
#include "main.h"
#include <linux/debugfs.h>
#include "bat_debugfs.h"
#include "translation-table.h"
#include "originator.h"
#include "hard-interface.h"
#include "gateway_common.h"
#include "gateway_client.h"
#include "soft-interface.h"
#include "vis.h"
#include "icmp_socket.h"
static struct dentry *bat_debugfs;
#ifdef CONFIG_BATMAN_ADV_DEBUG
#define LOG_BUFF_MASK (log_buff_len-1)
#define LOG_BUFF(idx) (debug_log->log_buff[(idx) & LOG_BUFF_MASK])
static int log_buff_len = LOG_BUF_LEN;
static void emit_log_char(struct debug_log *debug_log, char c)
{
LOG_BUFF(debug_log->log_end) = c;
debug_log->log_end++;
if (debug_log->log_end - debug_log->log_start > log_buff_len)
debug_log->log_start = debug_log->log_end - log_buff_len;
}
static int fdebug_log(struct debug_log *debug_log, char *fmt, ...)
{
va_list args;
static char debug_log_buf[256];
char *p;
if (!debug_log)
return 0;
spin_lock_bh(&debug_log->lock);
va_start(args, fmt);
vscnprintf(debug_log_buf, sizeof(debug_log_buf), fmt, args);
va_end(args);
for (p = debug_log_buf; *p != 0; p++)
emit_log_char(debug_log, *p);
spin_unlock_bh(&debug_log->lock);
wake_up(&debug_log->queue_wait);
return 0;
}
int debug_log(struct bat_priv *bat_priv, char *fmt, ...)
{
va_list args;
char tmp_log_buf[256];
va_start(args, fmt);
vscnprintf(tmp_log_buf, sizeof(tmp_log_buf), fmt, args);
fdebug_log(bat_priv->debug_log, "[%10u] %s",
(jiffies / HZ), tmp_log_buf);
va_end(args);
return 0;
}
static int log_open(struct inode *inode, struct file *file)
{
nonseekable_open(inode, file);
file->private_data = inode->i_private;
inc_module_count();
return 0;
}
static int log_release(struct inode *inode, struct file *file)
{
dec_module_count();
return 0;
}
static ssize_t log_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct bat_priv *bat_priv = file->private_data;
struct debug_log *debug_log = bat_priv->debug_log;
int error, i = 0;
char c;
if ((file->f_flags & O_NONBLOCK) &&
!(debug_log->log_end - debug_log->log_start))
return -EAGAIN;
if ((!buf) || (count < 0))
return -EINVAL;
if (count == 0)
return 0;
if (!access_ok(VERIFY_WRITE, buf, count))
return -EFAULT;
error = wait_event_interruptible(debug_log->queue_wait,
(debug_log->log_start - debug_log->log_end));
if (error)
return error;
spin_lock_bh(&debug_log->lock);
while ((!error) && (i < count) &&
(debug_log->log_start != debug_log->log_end)) {
c = LOG_BUFF(debug_log->log_start);
debug_log->log_start++;
spin_unlock_bh(&debug_log->lock);
error = __put_user(c, buf);
spin_lock_bh(&debug_log->lock);
buf++;
i++;
}
spin_unlock_bh(&debug_log->lock);
if (!error)
return i;
return error;
}
static unsigned int log_poll(struct file *file, poll_table *wait)
{
struct bat_priv *bat_priv = file->private_data;
struct debug_log *debug_log = bat_priv->debug_log;
poll_wait(file, &debug_log->queue_wait, wait);
if (debug_log->log_end - debug_log->log_start)
return POLLIN | POLLRDNORM;
return 0;
}
static const struct file_operations log_fops = {
.open = log_open,
.release = log_release,
.read = log_read,
.poll = log_poll,
.llseek = no_llseek,
};
static int debug_log_setup(struct bat_priv *bat_priv)
{
struct dentry *d;
if (!bat_priv->debug_dir)
goto err;
bat_priv->debug_log = kzalloc(sizeof(struct debug_log), GFP_ATOMIC);
if (!bat_priv->debug_log)
goto err;
spin_lock_init(&bat_priv->debug_log->lock);
init_waitqueue_head(&bat_priv->debug_log->queue_wait);
d = debugfs_create_file("log", S_IFREG | S_IRUSR,
bat_priv->debug_dir, bat_priv, &log_fops);
if (d)
goto err;
return 0;
err:
return 1;
}
static void debug_log_cleanup(struct bat_priv *bat_priv)
{
kfree(bat_priv->debug_log);
bat_priv->debug_log = NULL;
}
#else /* CONFIG_BATMAN_ADV_DEBUG */
static int debug_log_setup(struct bat_priv *bat_priv)
{
bat_priv->debug_log = NULL;
return 0;
}
static void debug_log_cleanup(struct bat_priv *bat_priv)
{
return;
}
#endif
static int originators_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, orig_seq_print_text, net_dev);
}
static int gateways_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, gw_client_seq_print_text, net_dev);
}
static int softif_neigh_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, softif_neigh_seq_print_text, net_dev);
}
static int transtable_global_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, tt_global_seq_print_text, net_dev);
}
static int transtable_local_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, tt_local_seq_print_text, net_dev);
}
static int vis_data_open(struct inode *inode, struct file *file)
{
struct net_device *net_dev = (struct net_device *)inode->i_private;
return single_open(file, vis_seq_print_text, net_dev);
}
struct bat_debuginfo {
struct attribute attr;
const struct file_operations fops;
};
#define BAT_DEBUGINFO(_name, _mode, _open) \
struct bat_debuginfo bat_debuginfo_##_name = { \
.attr = { .name = __stringify(_name), \
.mode = _mode, }, \
.fops = { .owner = THIS_MODULE, \
.open = _open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
} \
};
static BAT_DEBUGINFO(originators, S_IRUGO, originators_open);
static BAT_DEBUGINFO(gateways, S_IRUGO, gateways_open);
static BAT_DEBUGINFO(softif_neigh, S_IRUGO, softif_neigh_open);
static BAT_DEBUGINFO(transtable_global, S_IRUGO, transtable_global_open);
static BAT_DEBUGINFO(transtable_local, S_IRUGO, transtable_local_open);
static BAT_DEBUGINFO(vis_data, S_IRUGO, vis_data_open);
static struct bat_debuginfo *mesh_debuginfos[] = {
&bat_debuginfo_originators,
&bat_debuginfo_gateways,
&bat_debuginfo_softif_neigh,
&bat_debuginfo_transtable_global,
&bat_debuginfo_transtable_local,
&bat_debuginfo_vis_data,
NULL,
};
void debugfs_init(void)
{
bat_debugfs = debugfs_create_dir(DEBUGFS_BAT_SUBDIR, NULL);
if (bat_debugfs == ERR_PTR(-ENODEV))
bat_debugfs = NULL;
}
void debugfs_destroy(void)
{
if (bat_debugfs) {
debugfs_remove_recursive(bat_debugfs);
bat_debugfs = NULL;
}
}
int debugfs_add_meshif(struct net_device *dev)
{
struct bat_priv *bat_priv = netdev_priv(dev);
struct bat_debuginfo **bat_debug;
struct dentry *file;
if (!bat_debugfs)
goto out;
bat_priv->debug_dir = debugfs_create_dir(dev->name, bat_debugfs);
if (!bat_priv->debug_dir)
goto out;
bat_socket_setup(bat_priv);
debug_log_setup(bat_priv);
for (bat_debug = mesh_debuginfos; *bat_debug; ++bat_debug) {
file = debugfs_create_file(((*bat_debug)->attr).name,
S_IFREG | ((*bat_debug)->attr).mode,
bat_priv->debug_dir,
dev, &(*bat_debug)->fops);
if (!file) {
bat_err(dev, "Can't add debugfs file: %s/%s\n",
dev->name, ((*bat_debug)->attr).name);
goto rem_attr;
}
}
return 0;
rem_attr:
debugfs_remove_recursive(bat_priv->debug_dir);
bat_priv->debug_dir = NULL;
out:
#ifdef CONFIG_DEBUG_FS
return -ENOMEM;
#else
return 0;
#endif /* CONFIG_DEBUG_FS */
}
void debugfs_del_meshif(struct net_device *dev)
{
struct bat_priv *bat_priv = netdev_priv(dev);
debug_log_cleanup(bat_priv);
if (bat_debugfs) {
debugfs_remove_recursive(bat_priv->debug_dir);
bat_priv->debug_dir = NULL;
}
}
| gpl-2.0 |
kadjarT/android_kernel_samsung_cs02 | drivers/net/tun.c | 2637 | 39428 | /*
* TUN - Universal TUN/TAP device driver.
* Copyright (C) 1999-2002 Maxim Krasnyansky <maxk@qualcomm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* $Id: tun.c,v 1.15 2002/03/01 02:44:24 maxk Exp $
*/
/*
* Changes:
*
* Mike Kershaw <dragorn@kismetwireless.net> 2005/08/14
* Add TUNSETLINK ioctl to set the link encapsulation
*
* Mark Smith <markzzzsmith@yahoo.com.au>
* Use random_ether_addr() for tap MAC address.
*
* Harald Roelle <harald.roelle@ifi.lmu.de> 2004/04/20
* Fixes in packet dropping, queue length setting and queue wakeup.
* Increased default tx queue length.
* Added ethtool API.
* Minor cleanups
*
* Daniel Podlejski <underley@underley.eu.org>
* Modifications for 2.3.99-pre5 kernel.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#define DRV_NAME "tun"
#define DRV_VERSION "1.6"
#define DRV_DESCRIPTION "Universal TUN/TAP device driver"
#define DRV_COPYRIGHT "(C) 1999-2004 Max Krasnyansky <maxk@qualcomm.com>"
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/miscdevice.h>
#include <linux/ethtool.h>
#include <linux/rtnetlink.h>
#include <linux/compat.h>
#include <linux/if.h>
#include <linux/if_arp.h>
#include <linux/if_ether.h>
#include <linux/if_tun.h>
#include <linux/crc32.h>
#include <linux/nsproxy.h>
#include <linux/virtio_net.h>
#include <linux/rcupdate.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/rtnetlink.h>
#include <net/sock.h>
#include <asm/uaccess.h>
/* Uncomment to enable debugging */
/* #define TUN_DEBUG 1 */
#ifdef TUN_DEBUG
static int debug;
#define tun_debug(level, tun, fmt, args...) \
do { \
if (tun->debug) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (debug == 2) \
printk(level fmt, ##args); \
} while (0)
#else
#define tun_debug(level, tun, fmt, args...) \
do { \
if (0) \
netdev_printk(level, tun->dev, fmt, ##args); \
} while (0)
#define DBG1(level, fmt, args...) \
do { \
if (0) \
printk(level fmt, ##args); \
} while (0)
#endif
#define FLT_EXACT_COUNT 8
struct tap_filter {
unsigned int count; /* Number of addrs. Zero means disabled */
u32 mask[2]; /* Mask of the hashed addrs */
unsigned char addr[FLT_EXACT_COUNT][ETH_ALEN];
};
struct tun_file {
atomic_t count;
struct tun_struct *tun;
struct net *net;
};
struct tun_sock;
struct tun_struct {
struct tun_file *tfile;
unsigned int flags;
uid_t owner;
gid_t group;
struct net_device *dev;
netdev_features_t set_features;
#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
NETIF_F_TSO6|NETIF_F_UFO)
struct fasync_struct *fasync;
struct tap_filter txflt;
struct socket socket;
struct socket_wq wq;
int vnet_hdr_sz;
#ifdef TUN_DEBUG
int debug;
#endif
};
struct tun_sock {
struct sock sk;
struct tun_struct *tun;
};
static inline struct tun_sock *tun_sk(struct sock *sk)
{
return container_of(sk, struct tun_sock, sk);
}
static int tun_attach(struct tun_struct *tun, struct file *file)
{
struct tun_file *tfile = file->private_data;
int err;
ASSERT_RTNL();
netif_tx_lock_bh(tun->dev);
err = -EINVAL;
if (tfile->tun)
goto out;
err = -EBUSY;
if (tun->tfile)
goto out;
err = 0;
tfile->tun = tun;
tun->tfile = tfile;
tun->socket.file = file;
netif_carrier_on(tun->dev);
dev_hold(tun->dev);
sock_hold(tun->socket.sk);
atomic_inc(&tfile->count);
out:
netif_tx_unlock_bh(tun->dev);
return err;
}
static void __tun_detach(struct tun_struct *tun)
{
/* Detach from net device */
netif_tx_lock_bh(tun->dev);
netif_carrier_off(tun->dev);
tun->tfile = NULL;
tun->socket.file = NULL;
netif_tx_unlock_bh(tun->dev);
/* Drop read queue */
skb_queue_purge(&tun->socket.sk->sk_receive_queue);
/* Drop the extra count on the net device */
dev_put(tun->dev);
}
static void tun_detach(struct tun_struct *tun)
{
rtnl_lock();
__tun_detach(tun);
rtnl_unlock();
}
static struct tun_struct *__tun_get(struct tun_file *tfile)
{
struct tun_struct *tun = NULL;
if (atomic_inc_not_zero(&tfile->count))
tun = tfile->tun;
return tun;
}
static struct tun_struct *tun_get(struct file *file)
{
return __tun_get(file->private_data);
}
static void tun_put(struct tun_struct *tun)
{
struct tun_file *tfile = tun->tfile;
if (atomic_dec_and_test(&tfile->count))
tun_detach(tfile->tun);
}
/* TAP filtering */
static void addr_hash_set(u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
mask[n >> 5] |= (1 << (n & 31));
}
static unsigned int addr_hash_test(const u32 *mask, const u8 *addr)
{
int n = ether_crc(ETH_ALEN, addr) >> 26;
return mask[n >> 5] & (1 << (n & 31));
}
static int update_filter(struct tap_filter *filter, void __user *arg)
{
struct { u8 u[ETH_ALEN]; } *addr;
struct tun_filter uf;
int err, alen, n, nexact;
if (copy_from_user(&uf, arg, sizeof(uf)))
return -EFAULT;
if (!uf.count) {
/* Disabled */
filter->count = 0;
return 0;
}
alen = ETH_ALEN * uf.count;
addr = kmalloc(alen, GFP_KERNEL);
if (!addr)
return -ENOMEM;
if (copy_from_user(addr, arg + sizeof(uf), alen)) {
err = -EFAULT;
goto done;
}
/* The filter is updated without holding any locks. Which is
* perfectly safe. We disable it first and in the worst
* case we'll accept a few undesired packets. */
filter->count = 0;
wmb();
/* Use first set of addresses as an exact filter */
for (n = 0; n < uf.count && n < FLT_EXACT_COUNT; n++)
memcpy(filter->addr[n], addr[n].u, ETH_ALEN);
nexact = n;
/* Remaining multicast addresses are hashed,
* unicast will leave the filter disabled. */
memset(filter->mask, 0, sizeof(filter->mask));
for (; n < uf.count; n++) {
if (!is_multicast_ether_addr(addr[n].u)) {
err = 0; /* no filter */
goto done;
}
addr_hash_set(filter->mask, addr[n].u);
}
/* For ALLMULTI just set the mask to all ones.
* This overrides the mask populated above. */
if ((uf.flags & TUN_FLT_ALLMULTI))
memset(filter->mask, ~0, sizeof(filter->mask));
/* Now enable the filter */
wmb();
filter->count = nexact;
/* Return the number of exact filters */
err = nexact;
done:
kfree(addr);
return err;
}
/* Returns: 0 - drop, !=0 - accept */
static int run_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
/* Cannot use eth_hdr(skb) here because skb_mac_hdr() is incorrect
* at this point. */
struct ethhdr *eh = (struct ethhdr *) skb->data;
int i;
/* Exact match */
for (i = 0; i < filter->count; i++)
if (!compare_ether_addr(eh->h_dest, filter->addr[i]))
return 1;
/* Inexact match (multicast only) */
if (is_multicast_ether_addr(eh->h_dest))
return addr_hash_test(filter->mask, eh->h_dest);
return 0;
}
/*
* Checks whether the packet is accepted or not.
* Returns: 0 - drop, !=0 - accept
*/
static int check_filter(struct tap_filter *filter, const struct sk_buff *skb)
{
if (!filter->count)
return 1;
return run_filter(filter, skb);
}
/* Network device part of the driver */
static const struct ethtool_ops tun_ethtool_ops;
/* Net device detach from fd. */
static void tun_net_uninit(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
struct tun_file *tfile = tun->tfile;
/* Inform the methods they need to stop using the dev.
*/
if (tfile) {
wake_up_all(&tun->wq.wait);
if (atomic_dec_and_test(&tfile->count))
__tun_detach(tun);
}
}
static void tun_free_netdev(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
sk_release_kernel(tun->socket.sk);
}
/* Net device open. */
static int tun_net_open(struct net_device *dev)
{
netif_start_queue(dev);
return 0;
}
/* Net device close. */
static int tun_net_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
/* Net device start xmit */
static netdev_tx_t tun_net_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
tun_debug(KERN_INFO, tun, "tun_net_xmit %d\n", skb->len);
/* Drop packet if interface is not attached */
if (!tun->tfile)
goto drop;
/* Drop if the filter does not like it.
* This is a noop if the filter is disabled.
* Filter can be enabled only for the TAP devices. */
if (!check_filter(&tun->txflt, skb))
goto drop;
if (tun->socket.sk->sk_filter &&
sk_filter(tun->socket.sk, skb))
goto drop;
if (skb_queue_len(&tun->socket.sk->sk_receive_queue) >= dev->tx_queue_len) {
if (!(tun->flags & TUN_ONE_QUEUE)) {
/* Normal queueing mode. */
/* Packet scheduler handles dropping of further packets. */
netif_stop_queue(dev);
/* We won't see all dropped packets individually, so overrun
* error is more appropriate. */
dev->stats.tx_fifo_errors++;
} else {
/* Single queue mode.
* Driver handles dropping of all packets itself. */
goto drop;
}
}
/* Orphan the skb - required as we might hang on to it
* for indefinite time. */
skb_orphan(skb);
/* Enqueue packet */
skb_queue_tail(&tun->socket.sk->sk_receive_queue, skb);
/* Notify and wake up reader process */
if (tun->flags & TUN_FASYNC)
kill_fasync(&tun->fasync, SIGIO, POLL_IN);
wake_up_interruptible_poll(&tun->wq.wait, POLLIN |
POLLRDNORM | POLLRDBAND);
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static void tun_net_mclist(struct net_device *dev)
{
/*
* This callback is supposed to deal with mc filter in
* _rx_ path and has nothing to do with the _tx_ path.
* In rx path we always accept everything userspace gives us.
*/
}
#define MIN_MTU 68
#define MAX_MTU 65535
static int
tun_net_change_mtu(struct net_device *dev, int new_mtu)
{
if (new_mtu < MIN_MTU || new_mtu + dev->hard_header_len > MAX_MTU)
return -EINVAL;
dev->mtu = new_mtu;
return 0;
}
static netdev_features_t tun_net_fix_features(struct net_device *dev,
netdev_features_t features)
{
struct tun_struct *tun = netdev_priv(dev);
return (features & tun->set_features) | (features & ~TUN_USER_FEATURES);
}
#ifdef CONFIG_NET_POLL_CONTROLLER
static void tun_poll_controller(struct net_device *dev)
{
/*
* Tun only receives frames when:
* 1) the char device endpoint gets data from user space
* 2) the tun socket gets a sendmsg call from user space
* Since both of those are syncronous operations, we are guaranteed
* never to have pending data when we poll for it
* so theres nothing to do here but return.
* We need this though so netpoll recognizes us as an interface that
* supports polling, which enables bridge devices in virt setups to
* still use netconsole
*/
return;
}
#endif
static const struct net_device_ops tun_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
};
static const struct net_device_ops tap_netdev_ops = {
.ndo_uninit = tun_net_uninit,
.ndo_open = tun_net_open,
.ndo_stop = tun_net_close,
.ndo_start_xmit = tun_net_xmit,
.ndo_change_mtu = tun_net_change_mtu,
.ndo_fix_features = tun_net_fix_features,
.ndo_set_rx_mode = tun_net_mclist,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = tun_poll_controller,
#endif
};
/* Initialize net device. */
static void tun_net_init(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
dev->netdev_ops = &tun_netdev_ops;
/* Point-to-Point TUN Device */
dev->hard_header_len = 0;
dev->addr_len = 0;
dev->mtu = 1500;
/* Zero header length */
dev->type = ARPHRD_NONE;
dev->flags = IFF_POINTOPOINT | IFF_NOARP | IFF_MULTICAST;
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
case TUN_TAP_DEV:
dev->netdev_ops = &tap_netdev_ops;
/* Ethernet TAP Device */
ether_setup(dev);
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
dev->tx_queue_len = TUN_READQ_SIZE; /* We prefer our own queue length */
break;
}
}
/* Character device part */
/* Poll */
static unsigned int tun_chr_poll(struct file *file, poll_table * wait)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
struct sock *sk;
unsigned int mask = 0;
if (!tun)
return POLLERR;
sk = tun->socket.sk;
tun_debug(KERN_INFO, tun, "tun_chr_poll\n");
poll_wait(file, &tun->wq.wait, wait);
if (!skb_queue_empty(&sk->sk_receive_queue))
mask |= POLLIN | POLLRDNORM;
if (sock_writeable(sk) ||
(!test_and_set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags) &&
sock_writeable(sk)))
mask |= POLLOUT | POLLWRNORM;
if (tun->dev->reg_state != NETREG_REGISTERED)
mask = POLLERR;
tun_put(tun);
return mask;
}
/* prepad is the amount to reserve at front. len is length after that.
* linear is a hint as to how much to copy (usually headers). */
static struct sk_buff *tun_alloc_skb(struct tun_struct *tun,
size_t prepad, size_t len,
size_t linear, int noblock)
{
struct sock *sk = tun->socket.sk;
struct sk_buff *skb;
int err;
sock_update_classid(sk);
/* Under a page? Don't bother with paged skb. */
if (prepad + len < PAGE_SIZE || !linear)
linear = len;
skb = sock_alloc_send_pskb(sk, prepad + linear, len - linear, noblock,
&err);
if (!skb)
return ERR_PTR(err);
skb_reserve(skb, prepad);
skb_put(skb, linear);
skb->data_len = len - linear;
skb->len += len - linear;
return skb;
}
/* Get packet from user space buffer */
static ssize_t tun_get_user(struct tun_struct *tun,
const struct iovec *iv, size_t count,
int noblock)
{
struct tun_pi pi = { 0, cpu_to_be16(ETH_P_IP) };
struct sk_buff *skb;
size_t len = count, align = NET_SKB_PAD;
struct virtio_net_hdr gso = { 0 };
int offset = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi)))
return -EFAULT;
offset += sizeof(pi);
}
if (tun->flags & TUN_VNET_HDR) {
if ((len -= tun->vnet_hdr_sz) > count)
return -EINVAL;
if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso)))
return -EFAULT;
if ((gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) &&
gso.csum_start + gso.csum_offset + 2 > gso.hdr_len)
gso.hdr_len = gso.csum_start + gso.csum_offset + 2;
if (gso.hdr_len > len)
return -EINVAL;
offset += tun->vnet_hdr_sz;
}
if ((tun->flags & TUN_TYPE_MASK) == TUN_TAP_DEV) {
align += NET_IP_ALIGN;
if (unlikely(len < ETH_HLEN ||
(gso.hdr_len && gso.hdr_len < ETH_HLEN)))
return -EINVAL;
}
skb = tun_alloc_skb(tun, align, len, gso.hdr_len, noblock);
if (IS_ERR(skb)) {
if (PTR_ERR(skb) != -EAGAIN)
tun->dev->stats.rx_dropped++;
return PTR_ERR(skb);
}
if (skb_copy_datagram_from_iovec(skb, 0, iv, offset, len)) {
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EFAULT;
}
if (gso.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
if (!skb_partial_csum_set(skb, gso.csum_start,
gso.csum_offset)) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
return -EINVAL;
}
}
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
if (tun->flags & TUN_NO_PI) {
switch (skb->data[0] & 0xf0) {
case 0x40:
pi.proto = htons(ETH_P_IP);
break;
case 0x60:
pi.proto = htons(ETH_P_IPV6);
break;
default:
tun->dev->stats.rx_dropped++;
kfree_skb(skb);
return -EINVAL;
}
}
skb_reset_mac_header(skb);
skb->protocol = pi.proto;
skb->dev = tun->dev;
break;
case TUN_TAP_DEV:
skb->protocol = eth_type_trans(skb, tun->dev);
break;
}
if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
pr_debug("GSO!\n");
switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_TCPV4:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
break;
case VIRTIO_NET_HDR_GSO_TCPV6:
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
break;
case VIRTIO_NET_HDR_GSO_UDP:
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
break;
default:
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
return -EINVAL;
}
if (gso.gso_type & VIRTIO_NET_HDR_GSO_ECN)
skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
skb_shinfo(skb)->gso_size = gso.gso_size;
if (skb_shinfo(skb)->gso_size == 0) {
tun->dev->stats.rx_frame_errors++;
kfree_skb(skb);
return -EINVAL;
}
/* Header must be checked, and gso_segs computed. */
skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
skb_shinfo(skb)->gso_segs = 0;
}
netif_rx_ni(skb);
tun->dev->stats.rx_packets++;
tun->dev->stats.rx_bytes += len;
return count;
}
static ssize_t tun_chr_aio_write(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct tun_struct *tun = tun_get(file);
ssize_t result;
if (!tun)
return -EBADFD;
tun_debug(KERN_INFO, tun, "tun_chr_write %ld\n", count);
result = tun_get_user(tun, iv, iov_length(iv, count),
file->f_flags & O_NONBLOCK);
tun_put(tun);
return result;
}
/* Put packet to the user space buffer */
static ssize_t tun_put_user(struct tun_struct *tun,
struct sk_buff *skb,
const struct iovec *iv, int len)
{
struct tun_pi pi = { 0, skb->protocol };
ssize_t total = 0;
if (!(tun->flags & TUN_NO_PI)) {
if ((len -= sizeof(pi)) < 0)
return -EINVAL;
if (len < skb->len) {
/* Packet will be striped */
pi.flags |= TUN_PKT_STRIP;
}
if (memcpy_toiovecend(iv, (void *) &pi, 0, sizeof(pi)))
return -EFAULT;
total += sizeof(pi);
}
if (tun->flags & TUN_VNET_HDR) {
struct virtio_net_hdr gso = { 0 }; /* no info leak */
if ((len -= tun->vnet_hdr_sz) < 0)
return -EINVAL;
if (skb_is_gso(skb)) {
struct skb_shared_info *sinfo = skb_shinfo(skb);
/* This is a hint as to how much should be linear. */
gso.hdr_len = skb_headlen(skb);
gso.gso_size = sinfo->gso_size;
if (sinfo->gso_type & SKB_GSO_TCPV4)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
else if (sinfo->gso_type & SKB_GSO_TCPV6)
gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
else if (sinfo->gso_type & SKB_GSO_UDP)
gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
else {
pr_err("unexpected GSO type: "
"0x%x, gso_size %d, hdr_len %d\n",
sinfo->gso_type, gso.gso_size,
gso.hdr_len);
print_hex_dump(KERN_ERR, "tun: ",
DUMP_PREFIX_NONE,
16, 1, skb->head,
min((int)gso.hdr_len, 64), true);
WARN_ON_ONCE(1);
return -EINVAL;
}
if (sinfo->gso_type & SKB_GSO_TCP_ECN)
gso.gso_type |= VIRTIO_NET_HDR_GSO_ECN;
} else
gso.gso_type = VIRTIO_NET_HDR_GSO_NONE;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
gso.csum_start = skb_checksum_start_offset(skb);
gso.csum_offset = skb->csum_offset;
} else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
} /* else everything is zero */
if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
sizeof(gso))))
return -EFAULT;
total += tun->vnet_hdr_sz;
}
len = min_t(int, skb->len, len);
skb_copy_datagram_const_iovec(skb, 0, iv, total, len);
total += skb->len;
tun->dev->stats.tx_packets++;
tun->dev->stats.tx_bytes += len;
return total;
}
static ssize_t tun_do_read(struct tun_struct *tun,
struct kiocb *iocb, const struct iovec *iv,
ssize_t len, int noblock)
{
DECLARE_WAITQUEUE(wait, current);
struct sk_buff *skb;
ssize_t ret = 0;
tun_debug(KERN_INFO, tun, "tun_chr_read\n");
if (unlikely(!noblock))
add_wait_queue(&tun->wq.wait, &wait);
while (len) {
current->state = TASK_INTERRUPTIBLE;
/* Read frames from the queue */
if (!(skb=skb_dequeue(&tun->socket.sk->sk_receive_queue))) {
if (noblock) {
ret = -EAGAIN;
break;
}
if (signal_pending(current)) {
ret = -ERESTARTSYS;
break;
}
if (tun->dev->reg_state != NETREG_REGISTERED) {
ret = -EIO;
break;
}
/* Nothing to read, let's sleep */
schedule();
continue;
}
netif_wake_queue(tun->dev);
ret = tun_put_user(tun, skb, iv, len);
kfree_skb(skb);
break;
}
current->state = TASK_RUNNING;
if (unlikely(!noblock))
remove_wait_queue(&tun->wq.wait, &wait);
return ret;
}
static ssize_t tun_chr_aio_read(struct kiocb *iocb, const struct iovec *iv,
unsigned long count, loff_t pos)
{
struct file *file = iocb->ki_filp;
struct tun_file *tfile = file->private_data;
struct tun_struct *tun = __tun_get(tfile);
ssize_t len, ret;
if (!tun)
return -EBADFD;
len = iov_length(iv, count);
if (len < 0) {
ret = -EINVAL;
goto out;
}
ret = tun_do_read(tun, iocb, iv, len, file->f_flags & O_NONBLOCK);
ret = min_t(ssize_t, ret, len);
out:
tun_put(tun);
return ret;
}
static void tun_setup(struct net_device *dev)
{
struct tun_struct *tun = netdev_priv(dev);
tun->owner = -1;
tun->group = -1;
dev->ethtool_ops = &tun_ethtool_ops;
dev->destructor = tun_free_netdev;
}
/* Trivial set of netlink ops to allow deleting tun or tap
* device with netlink.
*/
static int tun_validate(struct nlattr *tb[], struct nlattr *data[])
{
return -EINVAL;
}
static struct rtnl_link_ops tun_link_ops __read_mostly = {
.kind = DRV_NAME,
.priv_size = sizeof(struct tun_struct),
.setup = tun_setup,
.validate = tun_validate,
};
static void tun_sock_write_space(struct sock *sk)
{
struct tun_struct *tun;
wait_queue_head_t *wqueue;
if (!sock_writeable(sk))
return;
if (!test_and_clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags))
return;
wqueue = sk_sleep(sk);
if (wqueue && waitqueue_active(wqueue))
wake_up_interruptible_sync_poll(wqueue, POLLOUT |
POLLWRNORM | POLLWRBAND);
tun = tun_sk(sk)->tun;
kill_fasync(&tun->fasync, SIGIO, POLL_OUT);
}
static void tun_sock_destruct(struct sock *sk)
{
free_netdev(tun_sk(sk)->tun->dev);
}
static int tun_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len)
{
struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
return tun_get_user(tun, m->msg_iov, total_len,
m->msg_flags & MSG_DONTWAIT);
}
static int tun_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *m, size_t total_len,
int flags)
{
struct tun_struct *tun = container_of(sock, struct tun_struct, socket);
int ret;
if (flags & ~(MSG_DONTWAIT|MSG_TRUNC))
return -EINVAL;
ret = tun_do_read(tun, iocb, m->msg_iov, total_len,
flags & MSG_DONTWAIT);
if (ret > total_len) {
m->msg_flags |= MSG_TRUNC;
ret = flags & MSG_TRUNC ? ret : total_len;
}
return ret;
}
static int tun_release(struct socket *sock)
{
if (sock->sk)
sock_put(sock->sk);
return 0;
}
/* Ops structure to mimic raw sockets with tun */
static const struct proto_ops tun_socket_ops = {
.sendmsg = tun_sendmsg,
.recvmsg = tun_recvmsg,
.release = tun_release,
};
static struct proto tun_proto = {
.name = "tun",
.owner = THIS_MODULE,
.obj_size = sizeof(struct tun_sock),
};
static int tun_flags(struct tun_struct *tun)
{
int flags = 0;
if (tun->flags & TUN_TUN_DEV)
flags |= IFF_TUN;
else
flags |= IFF_TAP;
if (tun->flags & TUN_NO_PI)
flags |= IFF_NO_PI;
if (tun->flags & TUN_ONE_QUEUE)
flags |= IFF_ONE_QUEUE;
if (tun->flags & TUN_VNET_HDR)
flags |= IFF_VNET_HDR;
return flags;
}
static ssize_t tun_show_flags(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "0x%x\n", tun_flags(tun));
}
static ssize_t tun_show_owner(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", tun->owner);
}
static ssize_t tun_show_group(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct tun_struct *tun = netdev_priv(to_net_dev(dev));
return sprintf(buf, "%d\n", tun->group);
}
static DEVICE_ATTR(tun_flags, 0444, tun_show_flags, NULL);
static DEVICE_ATTR(owner, 0444, tun_show_owner, NULL);
static DEVICE_ATTR(group, 0444, tun_show_group, NULL);
static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
{
struct sock *sk;
struct tun_struct *tun;
struct net_device *dev;
int err;
dev = __dev_get_by_name(net, ifr->ifr_name);
if (dev) {
const struct cred *cred = current_cred();
if (ifr->ifr_flags & IFF_TUN_EXCL)
return -EBUSY;
if ((ifr->ifr_flags & IFF_TUN) && dev->netdev_ops == &tun_netdev_ops)
tun = netdev_priv(dev);
else if ((ifr->ifr_flags & IFF_TAP) && dev->netdev_ops == &tap_netdev_ops)
tun = netdev_priv(dev);
else
return -EINVAL;
if (((tun->owner != -1 && cred->euid != tun->owner) ||
(tun->group != -1 && !in_egroup_p(tun->group))) &&
!capable(CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_attach(tun->socket.sk);
if (err < 0)
return err;
err = tun_attach(tun, file);
if (err < 0)
return err;
}
else {
char *name;
unsigned long flags = 0;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
err = security_tun_dev_create();
if (err < 0)
return err;
/* Set dev type */
if (ifr->ifr_flags & IFF_TUN) {
/* TUN device */
flags |= TUN_TUN_DEV;
name = "tun%d";
} else if (ifr->ifr_flags & IFF_TAP) {
/* TAP device */
flags |= TUN_TAP_DEV;
name = "tap%d";
} else
return -EINVAL;
if (*ifr->ifr_name)
name = ifr->ifr_name;
dev = alloc_netdev(sizeof(struct tun_struct), name,
tun_setup);
if (!dev)
return -ENOMEM;
dev_net_set(dev, net);
dev->rtnl_link_ops = &tun_link_ops;
tun = netdev_priv(dev);
tun->dev = dev;
tun->flags = flags;
tun->txflt.count = 0;
tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
err = -ENOMEM;
sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
if (!sk)
goto err_free_dev;
sk_change_net(sk, net);
tun->socket.wq = &tun->wq;
init_waitqueue_head(&tun->wq.wait);
tun->socket.ops = &tun_socket_ops;
sock_init_data(&tun->socket, sk);
sk->sk_write_space = tun_sock_write_space;
sk->sk_sndbuf = INT_MAX;
tun_sk(sk)->tun = tun;
security_tun_dev_post_create(sk);
tun_net_init(dev);
dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST |
TUN_USER_FEATURES;
dev->features = dev->hw_features;
err = register_netdevice(tun->dev);
if (err < 0)
goto err_free_sk;
if (device_create_file(&tun->dev->dev, &dev_attr_tun_flags) ||
device_create_file(&tun->dev->dev, &dev_attr_owner) ||
device_create_file(&tun->dev->dev, &dev_attr_group))
pr_err("Failed to create tun sysfs files\n");
sk->sk_destruct = tun_sock_destruct;
err = tun_attach(tun, file);
if (err < 0)
goto failed;
}
tun_debug(KERN_INFO, tun, "tun_set_iff\n");
if (ifr->ifr_flags & IFF_NO_PI)
tun->flags |= TUN_NO_PI;
else
tun->flags &= ~TUN_NO_PI;
if (ifr->ifr_flags & IFF_ONE_QUEUE)
tun->flags |= TUN_ONE_QUEUE;
else
tun->flags &= ~TUN_ONE_QUEUE;
if (ifr->ifr_flags & IFF_VNET_HDR)
tun->flags |= TUN_VNET_HDR;
else
tun->flags &= ~TUN_VNET_HDR;
/* Make sure persistent devices do not get stuck in
* xoff state.
*/
if (netif_running(tun->dev))
netif_wake_queue(tun->dev);
strcpy(ifr->ifr_name, tun->dev->name);
return 0;
err_free_sk:
tun_free_netdev(dev);
err_free_dev:
free_netdev(dev);
failed:
return err;
}
static int tun_get_iff(struct net *net, struct tun_struct *tun,
struct ifreq *ifr)
{
tun_debug(KERN_INFO, tun, "tun_get_iff\n");
strcpy(ifr->ifr_name, tun->dev->name);
ifr->ifr_flags = tun_flags(tun);
return 0;
}
/* This is like a cut-down ethtool ops, except done via tun fd so no
* privs required. */
static int set_offload(struct tun_struct *tun, unsigned long arg)
{
netdev_features_t features = 0;
if (arg & TUN_F_CSUM) {
features |= NETIF_F_HW_CSUM;
arg &= ~TUN_F_CSUM;
if (arg & (TUN_F_TSO4|TUN_F_TSO6)) {
if (arg & TUN_F_TSO_ECN) {
features |= NETIF_F_TSO_ECN;
arg &= ~TUN_F_TSO_ECN;
}
if (arg & TUN_F_TSO4)
features |= NETIF_F_TSO;
if (arg & TUN_F_TSO6)
features |= NETIF_F_TSO6;
arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
}
if (arg & TUN_F_UFO) {
features |= NETIF_F_UFO;
arg &= ~TUN_F_UFO;
}
}
/* This gives the user a way to test for new features in future by
* trying to set them. */
if (arg)
return -EINVAL;
tun->set_features = features;
netdev_update_features(tun->dev);
return 0;
}
static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
unsigned long arg, int ifreq_len)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
void __user* argp = (void __user*)arg;
struct sock_fprog fprog;
struct ifreq ifr;
int sndbuf;
int vnet_hdr_sz;
int ret;
#ifdef CONFIG_ANDROID_PARANOID_NETWORK
if (cmd != TUNGETIFF && !capable(CAP_NET_ADMIN)) {
return -EPERM;
}
#endif
if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
* TUNSETIFF. */
return put_user(IFF_TUN | IFF_TAP | IFF_NO_PI | IFF_ONE_QUEUE |
IFF_VNET_HDR,
(unsigned int __user*)argp);
}
rtnl_lock();
tun = __tun_get(tfile);
if (cmd == TUNSETIFF && !tun) {
ifr.ifr_name[IFNAMSIZ-1] = '\0';
ret = tun_set_iff(tfile->net, file, &ifr);
if (ret)
goto unlock;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
goto unlock;
}
ret = -EBADFD;
if (!tun)
goto unlock;
tun_debug(KERN_INFO, tun, "tun_chr_ioctl cmd %d\n", cmd);
ret = 0;
switch (cmd) {
case TUNGETIFF:
ret = tun_get_iff(current->nsproxy->net_ns, tun, &ifr);
if (ret)
break;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case TUNSETNOCSUM:
/* Disable/Enable checksum */
/* [unimplemented] */
tun_debug(KERN_INFO, tun, "ignored: set checksum %s\n",
arg ? "disabled" : "enabled");
break;
case TUNSETPERSIST:
/* Disable/Enable persist mode */
if (arg)
tun->flags |= TUN_PERSIST;
else
tun->flags &= ~TUN_PERSIST;
tun_debug(KERN_INFO, tun, "persist %s\n",
arg ? "enabled" : "disabled");
break;
case TUNSETOWNER:
/* Set owner of the device */
tun->owner = (uid_t) arg;
tun_debug(KERN_INFO, tun, "owner set to %d\n", tun->owner);
break;
case TUNSETGROUP:
/* Set group of the device */
tun->group= (gid_t) arg;
tun_debug(KERN_INFO, tun, "group set to %d\n", tun->group);
break;
case TUNSETLINK:
/* Only allow setting the type when the interface is down */
if (tun->dev->flags & IFF_UP) {
tun_debug(KERN_INFO, tun,
"Linktype set failed because interface is up\n");
ret = -EBUSY;
} else {
tun->dev->type = (int) arg;
tun_debug(KERN_INFO, tun, "linktype set to %d\n",
tun->dev->type);
ret = 0;
}
break;
#ifdef TUN_DEBUG
case TUNSETDEBUG:
tun->debug = arg;
break;
#endif
case TUNSETOFFLOAD:
ret = set_offload(tun, arg);
break;
case TUNSETTXFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = update_filter(&tun->txflt, (void __user *)arg);
break;
case SIOCGIFHWADDR:
/* Get hw address */
memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
ifr.ifr_hwaddr.sa_family = tun->dev->type;
if (copy_to_user(argp, &ifr, ifreq_len))
ret = -EFAULT;
break;
case SIOCSIFHWADDR:
/* Set hw address */
tun_debug(KERN_DEBUG, tun, "set hw address: %pM\n",
ifr.ifr_hwaddr.sa_data);
ret = dev_set_mac_address(tun->dev, &ifr.ifr_hwaddr);
break;
case TUNGETSNDBUF:
sndbuf = tun->socket.sk->sk_sndbuf;
if (copy_to_user(argp, &sndbuf, sizeof(sndbuf)))
ret = -EFAULT;
break;
case TUNSETSNDBUF:
if (copy_from_user(&sndbuf, argp, sizeof(sndbuf))) {
ret = -EFAULT;
break;
}
tun->socket.sk->sk_sndbuf = sndbuf;
break;
case TUNGETVNETHDRSZ:
vnet_hdr_sz = tun->vnet_hdr_sz;
if (copy_to_user(argp, &vnet_hdr_sz, sizeof(vnet_hdr_sz)))
ret = -EFAULT;
break;
case TUNSETVNETHDRSZ:
if (copy_from_user(&vnet_hdr_sz, argp, sizeof(vnet_hdr_sz))) {
ret = -EFAULT;
break;
}
if (vnet_hdr_sz < (int)sizeof(struct virtio_net_hdr)) {
ret = -EINVAL;
break;
}
tun->vnet_hdr_sz = vnet_hdr_sz;
break;
case TUNATTACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = -EFAULT;
if (copy_from_user(&fprog, argp, sizeof(fprog)))
break;
ret = sk_attach_filter(&fprog, tun->socket.sk);
break;
case TUNDETACHFILTER:
/* Can be set only for TAPs */
ret = -EINVAL;
if ((tun->flags & TUN_TYPE_MASK) != TUN_TAP_DEV)
break;
ret = sk_detach_filter(tun->socket.sk);
break;
default:
ret = -EINVAL;
break;
}
unlock:
rtnl_unlock();
if (tun)
tun_put(tun);
return ret;
}
static long tun_chr_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
}
#ifdef CONFIG_COMPAT
static long tun_chr_compat_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case TUNSETIFF:
case TUNGETIFF:
case TUNSETTXFILTER:
case TUNGETSNDBUF:
case TUNSETSNDBUF:
case SIOCGIFHWADDR:
case SIOCSIFHWADDR:
arg = (unsigned long)compat_ptr(arg);
break;
default:
arg = (compat_ulong_t)arg;
break;
}
/*
* compat_ifreq is shorter than ifreq, so we must not access beyond
* the end of that structure. All fields that are used in this
* driver are compatible though, we don't need to convert the
* contents.
*/
return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
}
#endif /* CONFIG_COMPAT */
static int tun_chr_fasync(int fd, struct file *file, int on)
{
struct tun_struct *tun = tun_get(file);
int ret;
if (!tun)
return -EBADFD;
tun_debug(KERN_INFO, tun, "tun_chr_fasync %d\n", on);
if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
goto out;
if (on) {
ret = __f_setown(file, task_pid(current), PIDTYPE_PID, 0);
if (ret)
goto out;
tun->flags |= TUN_FASYNC;
} else
tun->flags &= ~TUN_FASYNC;
ret = 0;
out:
tun_put(tun);
return ret;
}
static int tun_chr_open(struct inode *inode, struct file * file)
{
struct tun_file *tfile;
DBG1(KERN_INFO, "tunX: tun_chr_open\n");
tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
if (!tfile)
return -ENOMEM;
atomic_set(&tfile->count, 0);
tfile->tun = NULL;
tfile->net = get_net(current->nsproxy->net_ns);
file->private_data = tfile;
return 0;
}
static int tun_chr_close(struct inode *inode, struct file *file)
{
struct tun_file *tfile = file->private_data;
struct tun_struct *tun;
tun = __tun_get(tfile);
if (tun) {
struct net_device *dev = tun->dev;
tun_debug(KERN_INFO, tun, "tun_chr_close\n");
__tun_detach(tun);
/* If desirable, unregister the netdevice. */
if (!(tun->flags & TUN_PERSIST)) {
rtnl_lock();
if (dev->reg_state == NETREG_REGISTERED)
unregister_netdevice(dev);
rtnl_unlock();
}
}
tun = tfile->tun;
if (tun)
sock_put(tun->socket.sk);
put_net(tfile->net);
kfree(tfile);
return 0;
}
static const struct file_operations tun_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.read = do_sync_read,
.aio_read = tun_chr_aio_read,
.write = do_sync_write,
.aio_write = tun_chr_aio_write,
.poll = tun_chr_poll,
.unlocked_ioctl = tun_chr_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = tun_chr_compat_ioctl,
#endif
.open = tun_chr_open,
.release = tun_chr_close,
.fasync = tun_chr_fasync
};
static struct miscdevice tun_miscdev = {
.minor = TUN_MINOR,
.name = "tun",
.nodename = "net/tun",
.fops = &tun_fops,
};
/* ethtool interface */
static int tun_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
cmd->supported = 0;
cmd->advertising = 0;
ethtool_cmd_speed_set(cmd, SPEED_10);
cmd->duplex = DUPLEX_FULL;
cmd->port = PORT_TP;
cmd->phy_address = 0;
cmd->transceiver = XCVR_INTERNAL;
cmd->autoneg = AUTONEG_DISABLE;
cmd->maxtxpkt = 0;
cmd->maxrxpkt = 0;
return 0;
}
static void tun_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct tun_struct *tun = netdev_priv(dev);
strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
switch (tun->flags & TUN_TYPE_MASK) {
case TUN_TUN_DEV:
strlcpy(info->bus_info, "tun", sizeof(info->bus_info));
break;
case TUN_TAP_DEV:
strlcpy(info->bus_info, "tap", sizeof(info->bus_info));
break;
}
}
static u32 tun_get_msglevel(struct net_device *dev)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
return tun->debug;
#else
return -EOPNOTSUPP;
#endif
}
static void tun_set_msglevel(struct net_device *dev, u32 value)
{
#ifdef TUN_DEBUG
struct tun_struct *tun = netdev_priv(dev);
tun->debug = value;
#endif
}
static const struct ethtool_ops tun_ethtool_ops = {
.get_settings = tun_get_settings,
.get_drvinfo = tun_get_drvinfo,
.get_msglevel = tun_get_msglevel,
.set_msglevel = tun_set_msglevel,
.get_link = ethtool_op_get_link,
};
static int __init tun_init(void)
{
int ret = 0;
pr_info("%s, %s\n", DRV_DESCRIPTION, DRV_VERSION);
pr_info("%s\n", DRV_COPYRIGHT);
ret = rtnl_link_register(&tun_link_ops);
if (ret) {
pr_err("Can't register link_ops\n");
goto err_linkops;
}
ret = misc_register(&tun_miscdev);
if (ret) {
pr_err("Can't register misc device %d\n", TUN_MINOR);
goto err_misc;
}
return 0;
err_misc:
rtnl_link_unregister(&tun_link_ops);
err_linkops:
return ret;
}
static void tun_cleanup(void)
{
misc_deregister(&tun_miscdev);
rtnl_link_unregister(&tun_link_ops);
}
/* Get an underlying socket object from tun file. Returns error unless file is
* attached to a device. The returned object works like a packet socket, it
* can be used for sock_sendmsg/sock_recvmsg. The caller is responsible for
* holding a reference to the file for as long as the socket is in use. */
struct socket *tun_get_socket(struct file *file)
{
struct tun_struct *tun;
if (file->f_op != &tun_fops)
return ERR_PTR(-EINVAL);
tun = tun_get(file);
if (!tun)
return ERR_PTR(-EBADFD);
tun_put(tun);
return &tun->socket;
}
EXPORT_SYMBOL_GPL(tun_get_socket);
module_init(tun_init);
module_exit(tun_cleanup);
MODULE_DESCRIPTION(DRV_DESCRIPTION);
MODULE_AUTHOR(DRV_COPYRIGHT);
MODULE_LICENSE("GPL");
MODULE_ALIAS_MISCDEV(TUN_MINOR);
MODULE_ALIAS("devname:net/tun");
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.