repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
El-Nath/biji-find5-kernel | drivers/usb/host/whci/pzl.c | 13906 | 10580 | /*
* Wireless Host Controller (WHC) periodic schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/uwb/umc.h>
#include <linux/usb.h>
#include "../../wusbcore/wusbhc.h"
#include "whcd.h"
static void update_pzl_pointers(struct whc *whc, int period, u64 addr)
{
switch (period) {
case 0:
whc_qset_set_link_ptr(&whc->pz_list[0], addr);
whc_qset_set_link_ptr(&whc->pz_list[2], addr);
whc_qset_set_link_ptr(&whc->pz_list[4], addr);
whc_qset_set_link_ptr(&whc->pz_list[6], addr);
whc_qset_set_link_ptr(&whc->pz_list[8], addr);
whc_qset_set_link_ptr(&whc->pz_list[10], addr);
whc_qset_set_link_ptr(&whc->pz_list[12], addr);
whc_qset_set_link_ptr(&whc->pz_list[14], addr);
break;
case 1:
whc_qset_set_link_ptr(&whc->pz_list[1], addr);
whc_qset_set_link_ptr(&whc->pz_list[5], addr);
whc_qset_set_link_ptr(&whc->pz_list[9], addr);
whc_qset_set_link_ptr(&whc->pz_list[13], addr);
break;
case 2:
whc_qset_set_link_ptr(&whc->pz_list[3], addr);
whc_qset_set_link_ptr(&whc->pz_list[11], addr);
break;
case 3:
whc_qset_set_link_ptr(&whc->pz_list[7], addr);
break;
case 4:
whc_qset_set_link_ptr(&whc->pz_list[15], addr);
break;
}
}
/*
* Return the 'period' to use for this qset. The minimum interval for
* the endpoint is used so whatever urbs are submitted the device is
* polled often enough.
*/
static int qset_get_period(struct whc *whc, struct whc_qset *qset)
{
uint8_t bInterval = qset->ep->desc.bInterval;
if (bInterval < 6)
bInterval = 6;
if (bInterval > 10)
bInterval = 10;
return bInterval - 6;
}
static void qset_insert_in_sw_list(struct whc *whc, struct whc_qset *qset)
{
int period;
period = qset_get_period(whc, qset);
qset_clear(whc, qset);
list_move(&qset->list_node, &whc->periodic_list[period]);
qset->in_sw_list = true;
}
static void pzl_qset_remove(struct whc *whc, struct whc_qset *qset)
{
list_move(&qset->list_node, &whc->periodic_removed_list);
qset->in_hw_list = false;
qset->in_sw_list = false;
}
/**
* pzl_process_qset - process any recently inactivated or halted qTDs
* in a qset.
*
* After inactive qTDs are removed, new qTDs can be added if the
* urb queue still contains URBs.
*
* Returns the schedule updates required.
*/
static enum whc_update pzl_process_qset(struct whc *whc, struct whc_qset *qset)
{
enum whc_update update = 0;
uint32_t status = 0;
while (qset->ntds) {
struct whc_qtd *td;
int t;
t = qset->td_start;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
/*
* Nothing to do with a still active qTD.
*/
if (status & QTD_STS_ACTIVE)
break;
if (status & QTD_STS_HALTED) {
/* Ug, an error. */
process_halted_qtd(whc, qset, td);
/* A halted qTD always triggers an update
because the qset was either removed or
reactivated. */
update |= WHC_UPDATE_UPDATED;
goto done;
}
/* Mmm, a completed qTD. */
process_inactive_qtd(whc, qset, td);
}
if (!qset->remove)
update |= qset_add_qtds(whc, qset);
done:
/*
* If there are no qTDs in this qset, remove it from the PZL.
*/
if (qset->remove && qset->ntds == 0) {
pzl_qset_remove(whc, qset);
update |= WHC_UPDATE_REMOVED;
}
return update;
}
/**
* pzl_start - start the periodic schedule
* @whc: the WHCI host controller
*
* The PZL must be valid (e.g., all entries in the list should have
* the T bit set).
*/
void pzl_start(struct whc *whc)
{
le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, WUSBCMD_PERIODIC_EN);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_PERIODIC_SCHED, WUSBSTS_PERIODIC_SCHED,
1000, "start PZL");
}
/**
* pzl_stop - stop the periodic schedule
* @whc: the WHCI host controller
*/
void pzl_stop(struct whc *whc)
{
whc_write_wusbcmd(whc, WUSBCMD_PERIODIC_EN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_PERIODIC_SCHED, 0,
1000, "stop PZL");
}
/**
* pzl_update - request a PZL update and wait for the hardware to be synced
* @whc: the WHCI HC
* @wusbcmd: WUSBCMD value to start the update.
*
* If the WUSB HC is inactive (i.e., the PZL is stopped) then the
* update must be skipped as the hardware may not respond to update
* requests.
*/
void pzl_update(struct whc *whc, uint32_t wusbcmd)
{
struct wusbhc *wusbhc = &whc->wusbhc;
long t;
mutex_lock(&wusbhc->mutex);
if (wusbhc->active) {
whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
t = wait_event_timeout(
whc->periodic_list_wq,
(le_readl(whc->base + WUSBCMD) & WUSBCMD_PERIODIC_UPDATED) == 0,
msecs_to_jiffies(1000));
if (t == 0)
whc_hw_error(whc, "PZL update timeout");
}
mutex_unlock(&wusbhc->mutex);
}
static void update_pzl_hw_view(struct whc *whc)
{
struct whc_qset *qset, *t;
int period;
u64 tmp_qh = 0;
for (period = 0; period < 5; period++) {
list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
whc_qset_set_link_ptr(&qset->qh.link, tmp_qh);
tmp_qh = qset->qset_dma;
qset->in_hw_list = true;
}
update_pzl_pointers(whc, period, tmp_qh);
}
}
/**
* scan_periodic_work - scan the PZL for qsets to process.
*
* Process each qset in the PZL in turn and then signal the WHC that
* the PZL has been updated.
*
* Then start, stop or update the periodic schedule as required.
*/
void scan_periodic_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, periodic_work);
struct whc_qset *qset, *t;
enum whc_update update = 0;
int period;
spin_lock_irq(&whc->lock);
for (period = 4; period >= 0; period--) {
list_for_each_entry_safe(qset, t, &whc->periodic_list[period], list_node) {
if (!qset->in_hw_list)
update |= WHC_UPDATE_ADDED;
update |= pzl_process_qset(whc, qset);
}
}
if (update & (WHC_UPDATE_ADDED | WHC_UPDATE_REMOVED))
update_pzl_hw_view(whc);
spin_unlock_irq(&whc->lock);
if (update) {
uint32_t wusbcmd = WUSBCMD_PERIODIC_UPDATED | WUSBCMD_PERIODIC_SYNCED_DB;
if (update & WHC_UPDATE_REMOVED)
wusbcmd |= WUSBCMD_PERIODIC_QSET_RM;
pzl_update(whc, wusbcmd);
}
/*
* Now that the PZL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* PZL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->periodic_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
qset_insert_in_sw_list(whc, qset);
queue_work(whc->workqueue, &whc->periodic_work);
}
}
}
spin_unlock_irq(&whc->lock);
}
/**
* pzl_urb_enqueue - queue an URB onto the periodic list (PZL)
* @whc: the WHCI host controller
* @urb: the URB to enqueue
* @mem_flags: flags for any memory allocations
*
* The qset for the endpoint is obtained and the urb queued on to it.
*
* Work is scheduled to update the hardware's view of the PZL.
*/
int pzl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
{
struct whc_qset *qset;
int err;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
if (err < 0) {
spin_unlock_irqrestore(&whc->lock, flags);
return err;
}
qset = get_qset(whc, urb, GFP_ATOMIC);
if (qset == NULL)
err = -ENOMEM;
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list && !qset->remove)
qset_insert_in_sw_list(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
spin_unlock_irqrestore(&whc->lock, flags);
if (!err)
queue_work(whc->workqueue, &whc->periodic_work);
return err;
}
/**
* pzl_urb_dequeue - remove an URB (qset) from the periodic list
* @whc: the WHCI host controller
* @urb: the URB to dequeue
* @status: the current status of the URB
*
* URBs that do yet have qTDs can simply be removed from the software
* queue, otherwise the qset must be removed so the qTDs can be safely
* removed.
*/
int pzl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
{
struct whc_urb *wurb = urb->hcpriv;
struct whc_qset *qset = wurb->qset;
struct whc_std *std, *t;
bool has_qtd = false;
int ret;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
if (ret < 0)
goto out;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb) {
if (std->qtd)
has_qtd = true;
qset_free_std(whc, std);
} else
std->qtd = NULL; /* so this std is re-added when the qset is */
}
if (has_qtd) {
pzl_qset_remove(whc, qset);
update_pzl_hw_view(whc);
wurb->status = status;
wurb->is_async = false;
queue_work(whc->workqueue, &wurb->dequeue_work);
} else
qset_remove_urb(whc, qset, urb, status);
out:
spin_unlock_irqrestore(&whc->lock, flags);
return ret;
}
/**
* pzl_qset_delete - delete a qset from the PZL
*/
void pzl_qset_delete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 1;
queue_work(whc->workqueue, &whc->periodic_work);
qset_delete(whc, qset);
}
/**
* pzl_init - initialize the periodic zone list
* @whc: the WHCI host controller
*/
int pzl_init(struct whc *whc)
{
int i;
whc->pz_list = dma_alloc_coherent(&whc->umc->dev, sizeof(u64) * 16,
&whc->pz_list_dma, GFP_KERNEL);
if (whc->pz_list == NULL)
return -ENOMEM;
/* Set T bit on all elements in PZL. */
for (i = 0; i < 16; i++)
whc->pz_list[i] = cpu_to_le64(QH_LINK_NTDS(8) | QH_LINK_T);
le_writeq(whc->pz_list_dma, whc->base + WUSBPERIODICLISTBASE);
return 0;
}
/**
* pzl_clean_up - free PZL resources
* @whc: the WHCI host controller
*
* The PZL is stopped and empty.
*/
void pzl_clean_up(struct whc *whc)
{
if (whc->pz_list)
dma_free_coherent(&whc->umc->dev, sizeof(u64) * 16, whc->pz_list,
whc->pz_list_dma);
}
| gpl-2.0 |
NooNameR/Sense4.0-kernel | drivers/usb/host/whci/asl.c | 13906 | 9704 | /*
* Wireless Host Controller (WHC) asynchronous schedule management.
*
* Copyright (C) 2007 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/gfp.h>
#include <linux/dma-mapping.h>
#include <linux/uwb/umc.h>
#include <linux/usb.h>
#include "../../wusbcore/wusbhc.h"
#include "whcd.h"
static void qset_get_next_prev(struct whc *whc, struct whc_qset *qset,
struct whc_qset **next, struct whc_qset **prev)
{
struct list_head *n, *p;
BUG_ON(list_empty(&whc->async_list));
n = qset->list_node.next;
if (n == &whc->async_list)
n = n->next;
p = qset->list_node.prev;
if (p == &whc->async_list)
p = p->prev;
*next = container_of(n, struct whc_qset, list_node);
*prev = container_of(p, struct whc_qset, list_node);
}
static void asl_qset_insert_begin(struct whc *whc, struct whc_qset *qset)
{
list_move(&qset->list_node, &whc->async_list);
qset->in_sw_list = true;
}
static void asl_qset_insert(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *next, *prev;
qset_clear(whc, qset);
/* Link into ASL. */
qset_get_next_prev(whc, qset, &next, &prev);
whc_qset_set_link_ptr(&qset->qh.link, next->qset_dma);
whc_qset_set_link_ptr(&prev->qh.link, qset->qset_dma);
qset->in_hw_list = true;
}
static void asl_qset_remove(struct whc *whc, struct whc_qset *qset)
{
struct whc_qset *prev, *next;
qset_get_next_prev(whc, qset, &next, &prev);
list_move(&qset->list_node, &whc->async_removed_list);
qset->in_sw_list = false;
/*
* No more qsets in the ASL? The caller must stop the ASL as
* it's no longer valid.
*/
if (list_empty(&whc->async_list))
return;
/* Remove from ASL. */
whc_qset_set_link_ptr(&prev->qh.link, next->qset_dma);
qset->in_hw_list = false;
}
/**
* process_qset - process any recently inactivated or halted qTDs in a
* qset.
*
* After inactive qTDs are removed, new qTDs can be added if the
* urb queue still contains URBs.
*
* Returns any additional WUSBCMD bits for the ASL sync command (i.e.,
* WUSBCMD_ASYNC_QSET_RM if a halted qset was removed).
*/
static uint32_t process_qset(struct whc *whc, struct whc_qset *qset)
{
enum whc_update update = 0;
uint32_t status = 0;
while (qset->ntds) {
struct whc_qtd *td;
int t;
t = qset->td_start;
td = &qset->qtd[qset->td_start];
status = le32_to_cpu(td->status);
/*
* Nothing to do with a still active qTD.
*/
if (status & QTD_STS_ACTIVE)
break;
if (status & QTD_STS_HALTED) {
/* Ug, an error. */
process_halted_qtd(whc, qset, td);
/* A halted qTD always triggers an update
because the qset was either removed or
reactivated. */
update |= WHC_UPDATE_UPDATED;
goto done;
}
/* Mmm, a completed qTD. */
process_inactive_qtd(whc, qset, td);
}
if (!qset->remove)
update |= qset_add_qtds(whc, qset);
done:
/*
* Remove this qset from the ASL if requested, but only if has
* no qTDs.
*/
if (qset->remove && qset->ntds == 0) {
asl_qset_remove(whc, qset);
update |= WHC_UPDATE_REMOVED;
}
return update;
}
void asl_start(struct whc *whc)
{
struct whc_qset *qset;
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
le_writeq(qset->qset_dma | QH_LINK_NTDS(8), whc->base + WUSBASYNCLISTADDR);
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, WUSBCMD_ASYNC_EN);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, WUSBSTS_ASYNC_SCHED,
1000, "start ASL");
}
void asl_stop(struct whc *whc)
{
whc_write_wusbcmd(whc, WUSBCMD_ASYNC_EN, 0);
whci_wait_for(&whc->umc->dev, whc->base + WUSBSTS,
WUSBSTS_ASYNC_SCHED, 0,
1000, "stop ASL");
}
/**
* asl_update - request an ASL update and wait for the hardware to be synced
* @whc: the WHCI HC
* @wusbcmd: WUSBCMD value to start the update.
*
* If the WUSB HC is inactive (i.e., the ASL is stopped) then the
* update must be skipped as the hardware may not respond to update
* requests.
*/
void asl_update(struct whc *whc, uint32_t wusbcmd)
{
struct wusbhc *wusbhc = &whc->wusbhc;
long t;
mutex_lock(&wusbhc->mutex);
if (wusbhc->active) {
whc_write_wusbcmd(whc, wusbcmd, wusbcmd);
t = wait_event_timeout(
whc->async_list_wq,
(le_readl(whc->base + WUSBCMD) & WUSBCMD_ASYNC_UPDATED) == 0,
msecs_to_jiffies(1000));
if (t == 0)
whc_hw_error(whc, "ASL update timeout");
}
mutex_unlock(&wusbhc->mutex);
}
/**
* scan_async_work - scan the ASL for qsets to process.
*
* Process each qset in the ASL in turn and then signal the WHC that
* the ASL has been updated.
*
* Then start, stop or update the asynchronous schedule as required.
*/
void scan_async_work(struct work_struct *work)
{
struct whc *whc = container_of(work, struct whc, async_work);
struct whc_qset *qset, *t;
enum whc_update update = 0;
spin_lock_irq(&whc->lock);
/*
* Transerve the software list backwards so new qsets can be
* safely inserted into the ASL without making it non-circular.
*/
list_for_each_entry_safe_reverse(qset, t, &whc->async_list, list_node) {
if (!qset->in_hw_list) {
asl_qset_insert(whc, qset);
update |= WHC_UPDATE_ADDED;
}
update |= process_qset(whc, qset);
}
spin_unlock_irq(&whc->lock);
if (update) {
uint32_t wusbcmd = WUSBCMD_ASYNC_UPDATED | WUSBCMD_ASYNC_SYNCED_DB;
if (update & WHC_UPDATE_REMOVED)
wusbcmd |= WUSBCMD_ASYNC_QSET_RM;
asl_update(whc, wusbcmd);
}
/*
* Now that the ASL is updated, complete the removal of any
* removed qsets.
*
* If the qset was to be reset, do so and reinsert it into the
* ASL if it has pending transfers.
*/
spin_lock_irq(&whc->lock);
list_for_each_entry_safe(qset, t, &whc->async_removed_list, list_node) {
qset_remove_complete(whc, qset);
if (qset->reset) {
qset_reset(whc, qset);
if (!list_empty(&qset->stds)) {
asl_qset_insert_begin(whc, qset);
queue_work(whc->workqueue, &whc->async_work);
}
}
}
spin_unlock_irq(&whc->lock);
}
/**
* asl_urb_enqueue - queue an URB onto the asynchronous list (ASL).
* @whc: the WHCI host controller
* @urb: the URB to enqueue
* @mem_flags: flags for any memory allocations
*
* The qset for the endpoint is obtained and the urb queued on to it.
*
* Work is scheduled to update the hardware's view of the ASL.
*/
int asl_urb_enqueue(struct whc *whc, struct urb *urb, gfp_t mem_flags)
{
struct whc_qset *qset;
int err;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
err = usb_hcd_link_urb_to_ep(&whc->wusbhc.usb_hcd, urb);
if (err < 0) {
spin_unlock_irqrestore(&whc->lock, flags);
return err;
}
qset = get_qset(whc, urb, GFP_ATOMIC);
if (qset == NULL)
err = -ENOMEM;
else
err = qset_add_urb(whc, qset, urb, GFP_ATOMIC);
if (!err) {
if (!qset->in_sw_list && !qset->remove)
asl_qset_insert_begin(whc, qset);
} else
usb_hcd_unlink_urb_from_ep(&whc->wusbhc.usb_hcd, urb);
spin_unlock_irqrestore(&whc->lock, flags);
if (!err)
queue_work(whc->workqueue, &whc->async_work);
return err;
}
/**
* asl_urb_dequeue - remove an URB (qset) from the async list.
* @whc: the WHCI host controller
* @urb: the URB to dequeue
* @status: the current status of the URB
*
* URBs that do yet have qTDs can simply be removed from the software
* queue, otherwise the qset must be removed from the ASL so the qTDs
* can be removed.
*/
int asl_urb_dequeue(struct whc *whc, struct urb *urb, int status)
{
struct whc_urb *wurb = urb->hcpriv;
struct whc_qset *qset = wurb->qset;
struct whc_std *std, *t;
bool has_qtd = false;
int ret;
unsigned long flags;
spin_lock_irqsave(&whc->lock, flags);
ret = usb_hcd_check_unlink_urb(&whc->wusbhc.usb_hcd, urb, status);
if (ret < 0)
goto out;
list_for_each_entry_safe(std, t, &qset->stds, list_node) {
if (std->urb == urb) {
if (std->qtd)
has_qtd = true;
qset_free_std(whc, std);
} else
std->qtd = NULL; /* so this std is re-added when the qset is */
}
if (has_qtd) {
asl_qset_remove(whc, qset);
wurb->status = status;
wurb->is_async = true;
queue_work(whc->workqueue, &wurb->dequeue_work);
} else
qset_remove_urb(whc, qset, urb, status);
out:
spin_unlock_irqrestore(&whc->lock, flags);
return ret;
}
/**
* asl_qset_delete - delete a qset from the ASL
*/
void asl_qset_delete(struct whc *whc, struct whc_qset *qset)
{
qset->remove = 1;
queue_work(whc->workqueue, &whc->async_work);
qset_delete(whc, qset);
}
/**
* asl_init - initialize the asynchronous schedule list
*
* A dummy qset with no qTDs is added to the ASL to simplify removing
* qsets (no need to stop the ASL when the last qset is removed).
*/
int asl_init(struct whc *whc)
{
struct whc_qset *qset;
qset = qset_alloc(whc, GFP_KERNEL);
if (qset == NULL)
return -ENOMEM;
asl_qset_insert_begin(whc, qset);
asl_qset_insert(whc, qset);
return 0;
}
/**
* asl_clean_up - free ASL resources
*
* The ASL is stopped and empty except for the dummy qset.
*/
void asl_clean_up(struct whc *whc)
{
struct whc_qset *qset;
if (!list_empty(&whc->async_list)) {
qset = list_first_entry(&whc->async_list, struct whc_qset, list_node);
list_del(&qset->list_node);
qset_free(whc, qset);
}
}
| gpl-2.0 |
sbreen94/Zeus_d2spr | net/llc/llc_s_ac.c | 14930 | 6006 | /*
* llc_s_ac.c - actions performed during sap state transition.
*
* Description :
* Functions in this module are implementation of sap component actions.
* Details of actions can be found in IEEE-802.2 standard document.
* All functions have one sap and one event as input argument. All of
* them return 0 On success and 1 otherwise.
*
* Copyright (c) 1997 by Procom Technology, Inc.
* 2001-2003 by Arnaldo Carvalho de Melo <acme@conectiva.com.br>
*
* This program can be redistributed or modified under the terms of the
* GNU General Public License as published by the Free Software Foundation.
* This program is distributed without any warranty or implied warranty
* of merchantability or fitness for a particular purpose.
*
* See the GNU General Public License for more details.
*/
#include <linux/netdevice.h>
#include <net/llc.h>
#include <net/llc_pdu.h>
#include <net/llc_s_ac.h>
#include <net/llc_s_ev.h>
#include <net/llc_sap.h>
/**
* llc_sap_action_unit_data_ind - forward UI PDU to network layer
* @sap: SAP
* @skb: the event to forward
*
* Received a UI PDU from MAC layer; forward to network layer as a
* UNITDATA INDICATION; verify our event is the kind we expect
*/
int llc_sap_action_unitdata_ind(struct llc_sap *sap, struct sk_buff *skb)
{
llc_sap_rtn_pdu(sap, skb);
return 0;
}
/**
* llc_sap_action_send_ui - sends UI PDU resp to UNITDATA REQ to MAC layer
* @sap: SAP
* @skb: the event to send
*
* Sends a UI PDU to the MAC layer in response to a UNITDATA REQUEST
* primitive from the network layer. Verifies event is a primitive type of
* event. Verify the primitive is a UNITDATA REQUEST.
*/
int llc_sap_action_send_ui(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_ui_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
rc = dev_queue_xmit(skb);
return rc;
}
/**
* llc_sap_action_send_xid_c - send XID PDU as response to XID REQ
* @sap: SAP
* @skb: the event to send
*
* Send a XID command PDU to MAC layer in response to a XID REQUEST
* primitive from the network layer. Verify event is a primitive type
* event. Verify the primitive is a XID REQUEST.
*/
int llc_sap_action_send_xid_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_xid_cmd(skb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
rc = dev_queue_xmit(skb);
return rc;
}
/**
* llc_sap_action_send_xid_r - send XID PDU resp to MAC for received XID
* @sap: SAP
* @skb: the event to send
*
* Send XID response PDU to MAC in response to an earlier received XID
* command PDU. Verify event is a PDU type event
*/
int llc_sap_action_send_xid_r(struct llc_sap *sap, struct sk_buff *skb)
{
u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap;
int rc = 1;
struct sk_buff *nskb;
llc_pdu_decode_sa(skb, mac_da);
llc_pdu_decode_da(skb, mac_sa);
llc_pdu_decode_ssap(skb, &dsap);
nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U,
sizeof(struct llc_xid_info));
if (!nskb)
goto out;
llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
LLC_PDU_RSP);
llc_pdu_init_as_xid_rsp(nskb, LLC_XID_NULL_CLASS_2, 0);
rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
if (likely(!rc))
rc = dev_queue_xmit(nskb);
out:
return rc;
}
/**
* llc_sap_action_send_test_c - send TEST PDU to MAC in resp to TEST REQ
* @sap: SAP
* @skb: the event to send
*
* Send a TEST command PDU to the MAC layer in response to a TEST REQUEST
* primitive from the network layer. Verify event is a primitive type
* event; verify the primitive is a TEST REQUEST.
*/
int llc_sap_action_send_test_c(struct llc_sap *sap, struct sk_buff *skb)
{
struct llc_sap_state_ev *ev = llc_sap_ev(skb);
int rc;
llc_pdu_header_init(skb, LLC_PDU_TYPE_U, ev->saddr.lsap,
ev->daddr.lsap, LLC_PDU_CMD);
llc_pdu_init_as_test_cmd(skb);
rc = llc_mac_hdr_init(skb, ev->saddr.mac, ev->daddr.mac);
if (likely(!rc))
rc = dev_queue_xmit(skb);
return rc;
}
int llc_sap_action_send_test_r(struct llc_sap *sap, struct sk_buff *skb)
{
u8 mac_da[ETH_ALEN], mac_sa[ETH_ALEN], dsap;
struct sk_buff *nskb;
int rc = 1;
u32 data_size;
llc_pdu_decode_sa(skb, mac_da);
llc_pdu_decode_da(skb, mac_sa);
llc_pdu_decode_ssap(skb, &dsap);
/* The test request command is type U (llc_len = 3) */
data_size = ntohs(eth_hdr(skb)->h_proto) - 3;
nskb = llc_alloc_frame(NULL, skb->dev, LLC_PDU_TYPE_U, data_size);
if (!nskb)
goto out;
llc_pdu_header_init(nskb, LLC_PDU_TYPE_U, sap->laddr.lsap, dsap,
LLC_PDU_RSP);
llc_pdu_init_as_test_rsp(nskb, skb);
rc = llc_mac_hdr_init(nskb, mac_sa, mac_da);
if (likely(!rc))
rc = dev_queue_xmit(nskb);
out:
return rc;
}
/**
* llc_sap_action_report_status - report data link status to layer mgmt
* @sap: SAP
* @skb: the event to send
*
* Report data link status to layer management. Verify our event is the
* kind we expect.
*/
int llc_sap_action_report_status(struct llc_sap *sap, struct sk_buff *skb)
{
return 0;
}
/**
* llc_sap_action_xid_ind - send XID PDU resp to net layer via XID IND
* @sap: SAP
* @skb: the event to send
*
* Send a XID response PDU to the network layer via a XID INDICATION
* primitive.
*/
int llc_sap_action_xid_ind(struct llc_sap *sap, struct sk_buff *skb)
{
llc_sap_rtn_pdu(sap, skb);
return 0;
}
/**
* llc_sap_action_test_ind - send TEST PDU to net layer via TEST IND
* @sap: SAP
* @skb: the event to send
*
* Send a TEST response PDU to the network layer via a TEST INDICATION
* primitive. Verify our event is a PDU type event.
*/
int llc_sap_action_test_ind(struct llc_sap *sap, struct sk_buff *skb)
{
llc_sap_rtn_pdu(sap, skb);
return 0;
}
| gpl-2.0 |
iodak/mako-msm | arch/arm/mm/init.c | 83 | 26444 | /*
* linux/arch/arm/mm/init.c
*
* Copyright (C) 1995-2005 Russell King
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/swap.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
#include <linux/of_fdt.h>
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/dma-contiguous.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
#include <asm/prom.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/sizes.h>
#include <asm/tlb.h>
#include <asm/fixmap.h>
#include <asm/cputype.h>
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include "mm.h"
static unsigned long phys_initrd_start __initdata = 0;
static unsigned long phys_initrd_size __initdata = 0;
int msm_krait_need_wfe_fixup;
EXPORT_SYMBOL(msm_krait_need_wfe_fixup);
static int __init early_initrd(char *p)
{
unsigned long start, size;
char *endp;
start = memparse(p, &endp);
if (*endp == ',') {
size = memparse(endp + 1, NULL);
phys_initrd_start = start;
phys_initrd_size = size;
}
return 0;
}
early_param("initrd", early_initrd);
static int __init parse_tag_initrd(const struct tag *tag)
{
printk(KERN_WARNING "ATAG_INITRD is deprecated; "
"please update your bootloader.\n");
phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD, parse_tag_initrd);
static int __init parse_tag_initrd2(const struct tag *tag)
{
phys_initrd_start = tag->u.initrd.start;
phys_initrd_size = tag->u.initrd.size;
return 0;
}
__tagtable(ATAG_INITRD2, parse_tag_initrd2);
#ifdef CONFIG_OF_FLATTREE
void __init early_init_dt_setup_initrd_arch(unsigned long start, unsigned long end)
{
phys_initrd_start = start;
phys_initrd_size = end - start;
}
#endif /* CONFIG_OF_FLATTREE */
/*
* This keeps memory configuration data used by a couple memory
* initialization functions, as well as show_mem() for the skipping
* of holes in the memory map. It is populated by arm_add_memory().
*/
struct meminfo meminfo;
void show_mem(unsigned int filter)
{
int free = 0, total = 0, reserved = 0;
int shared = 0, cached = 0, slab = 0, i;
struct meminfo * mi = &meminfo;
printk("Mem-info:\n");
show_free_areas(filter);
for_each_bank (i, mi) {
struct membank *bank = &mi->bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
total++;
if (PageReserved(page))
reserved++;
else if (PageSwapCache(page))
cached++;
else if (PageSlab(page))
slab++;
else if (!page_count(page))
free++;
else
shared += page_count(page) - 1;
page++;
#ifdef CONFIG_SPARSEMEM
pfn1++;
if (!(pfn1 % PAGES_PER_SECTION))
page = pfn_to_page(pfn1);
} while (pfn1 < pfn2);
#else
} while (page < end);
#endif
}
printk("%d pages of RAM\n", total);
printk("%d free pages\n", free);
printk("%d reserved pages\n", reserved);
printk("%d slab pages\n", slab);
printk("%d pages shared\n", shared);
printk("%d pages swap cached\n", cached);
}
static void __init find_limits(unsigned long *min, unsigned long *max_low,
unsigned long *max_high)
{
struct meminfo *mi = &meminfo;
int i;
/* This assumes the meminfo array is properly sorted */
*min = bank_pfn_start(&mi->bank[0]);
for_each_bank (i, mi)
if (mi->bank[i].highmem)
break;
*max_low = bank_pfn_end(&mi->bank[i - 1]);
*max_high = bank_pfn_end(&mi->bank[mi->nr_banks - 1]);
}
static void __init arm_bootmem_init(unsigned long start_pfn,
unsigned long end_pfn)
{
struct memblock_region *reg;
unsigned int boot_pages;
phys_addr_t bitmap;
pg_data_t *pgdat;
/*
* Allocate the bootmem bitmap page. This must be in a region
* of memory which has already been mapped.
*/
boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
__pfn_to_phys(end_pfn));
/*
* Initialise the bootmem allocator, handing the
* memory banks over to bootmem.
*/
node_set_online(0);
pgdat = NODE_DATA(0);
init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
/* Free the lowmem regions from memblock into bootmem. */
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
free_bootmem(__pfn_to_phys(start), (end - start) << PAGE_SHIFT);
}
/* Reserve the lowmem memblock reserved regions in bootmem. */
for_each_memblock(reserved, reg) {
unsigned long start = memblock_region_reserved_base_pfn(reg);
unsigned long end = memblock_region_reserved_end_pfn(reg);
if (end >= end_pfn)
end = end_pfn;
if (start >= end)
break;
reserve_bootmem(__pfn_to_phys(start),
(end - start) << PAGE_SHIFT, BOOTMEM_DEFAULT);
}
}
#ifdef CONFIG_ZONE_DMA
unsigned long arm_dma_zone_size __read_mostly;
EXPORT_SYMBOL(arm_dma_zone_size);
/*
* The DMA mask corresponding to the maximum bus address allocatable
* using GFP_DMA. The default here places no restriction on DMA
* allocations. This must be the smallest DMA mask in the system,
* so a successful GFP_DMA allocation will always satisfy this.
*/
phys_addr_t arm_dma_limit;
static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
unsigned long dma_size)
{
if (size[0] <= dma_size)
return;
size[ZONE_NORMAL] = size[0] - dma_size;
size[ZONE_DMA] = dma_size;
hole[ZONE_NORMAL] = hole[0];
hole[ZONE_DMA] = 0;
}
#endif
void __init setup_dma_zone(struct machine_desc *mdesc)
{
#ifdef CONFIG_ZONE_DMA
if (mdesc->dma_zone_size) {
arm_dma_zone_size = mdesc->dma_zone_size;
arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
} else
arm_dma_limit = 0xffffffff;
#endif
}
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
static void __init arm_bootmem_free_hmnm(unsigned long max_low,
unsigned long max_high)
{
unsigned long max_zone_pfns[MAX_NR_ZONES];
struct memblock_region *reg;
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
max_zone_pfns[0] = max_low;
#ifdef CONFIG_HIGHMEM
max_zone_pfns[ZONE_HIGHMEM] = max_high;
#endif
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
memblock_set_node(PFN_PHYS(start), PFN_PHYS(end - start), 0);
}
free_area_init_nodes(max_zone_pfns);
}
#else
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
struct memblock_region *reg;
/*
* initialise the zones.
*/
memset(zone_size, 0, sizeof(zone_size));
/*
* The memory size has already been determined. If we need
* to do anything fancy with the allocation of this memory
* to the zones, now is the time to do it.
*/
zone_size[0] = max_low - min;
#ifdef CONFIG_HIGHMEM
zone_size[ZONE_HIGHMEM] = max_high - max_low;
#endif
/*
* Calculate the size of the holes.
* holes = node_size - sum(bank_sizes)
*/
memcpy(zhole_size, zone_size, sizeof(zhole_size));
for_each_memblock(memory, reg) {
unsigned long start = memblock_region_memory_base_pfn(reg);
unsigned long end = memblock_region_memory_end_pfn(reg);
if (start < max_low) {
unsigned long low_end = min(end, max_low);
zhole_size[0] -= low_end - start;
}
#ifdef CONFIG_HIGHMEM
if (end > max_low) {
unsigned long high_start = max(start, max_low);
zhole_size[ZONE_HIGHMEM] -= end - high_start;
}
#endif
}
#ifdef CONFIG_ZONE_DMA
/*
* Adjust the sizes according to any special requirements for
* this machine type.
*/
if (arm_dma_zone_size)
arm_adjust_dma_zone(zone_size, zhole_size,
arm_dma_zone_size >> PAGE_SHIFT);
#endif
free_area_init_node(0, zone_size, min, zhole_size);
}
#endif
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
{
return memblock_is_memory(__pfn_to_phys(pfn));
}
EXPORT_SYMBOL(pfn_valid);
#endif
#ifndef CONFIG_SPARSEMEM
static void __init arm_memory_present(void)
{
}
#else
static void __init arm_memory_present(void)
{
struct meminfo *mi = &meminfo;
int i;
for_each_bank(i, mi) {
memory_present(0, bank_pfn_start(&mi->bank[i]),
bank_pfn_end(&mi->bank[i]));
}
}
#endif
static bool arm_memblock_steal_permitted = true;
phys_addr_t __init arm_memblock_steal(phys_addr_t size, phys_addr_t align)
{
phys_addr_t phys;
BUG_ON(!arm_memblock_steal_permitted);
phys = memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE);
memblock_free(phys, size);
memblock_remove(phys, size);
return phys;
}
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
phys_addr_t memory_hole_offset;
EXPORT_SYMBOL(memory_hole_offset);
phys_addr_t memory_hole_start;
EXPORT_SYMBOL(memory_hole_start);
phys_addr_t memory_hole_end;
EXPORT_SYMBOL(memory_hole_end);
unsigned long memory_hole_align;
EXPORT_SYMBOL(memory_hole_align);
unsigned long virtual_hole_start;
unsigned long virtual_hole_end;
#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
void find_memory_hole(void)
{
int i;
phys_addr_t hole_start;
phys_addr_t hole_size;
unsigned long hole_end_virt;
/*
* Find the start and end of the hole, using meminfo.
*/
for (i = 0; i < (meminfo.nr_banks - 1); i++) {
if ((meminfo.bank[i].start + meminfo.bank[i].size) !=
meminfo.bank[i+1].start) {
if (meminfo.bank[i].start + meminfo.bank[i].size
<= MAX_HOLE_ADDRESS) {
hole_start = meminfo.bank[i].start +
meminfo.bank[i].size;
hole_size = meminfo.bank[i+1].start -
hole_start;
if (memory_hole_start == 0 &&
memory_hole_end == 0) {
memory_hole_start = hole_start;
memory_hole_end = hole_start +
hole_size;
} else if ((memory_hole_end -
memory_hole_start) <= hole_size) {
memory_hole_start = hole_start;
memory_hole_end = hole_start +
hole_size;
}
}
}
}
memory_hole_offset = memory_hole_start - PHYS_OFFSET;
if (!IS_ALIGNED(memory_hole_start, SECTION_SIZE)) {
pr_err("memory_hole_start %pa is not aligned to %lx\n",
&memory_hole_start, SECTION_SIZE);
BUG();
}
if (!IS_ALIGNED(memory_hole_end, SECTION_SIZE)) {
pr_err("memory_hole_end %pa is not aligned to %lx\n",
&memory_hole_end, SECTION_SIZE);
BUG();
}
hole_end_virt = __phys_to_virt(memory_hole_end);
if ((!IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
IS_ALIGNED(memory_hole_end, PMD_SIZE)) ||
(IS_ALIGNED(hole_end_virt, PMD_SIZE) &&
!IS_ALIGNED(memory_hole_end, PMD_SIZE))) {
memory_hole_align = !IS_ALIGNED(hole_end_virt, PMD_SIZE) ?
hole_end_virt & ~PMD_MASK :
memory_hole_end & ~PMD_MASK;
virtual_hole_start = hole_end_virt;
virtual_hole_end = hole_end_virt + memory_hole_align;
pr_info("Physical memory hole is not aligned. There will be a virtual memory hole from %lx to %lx\n",
virtual_hole_start, virtual_hole_end);
}
}
#endif
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
/* Register the kernel text, kernel data and initrd with memblock. */
#ifdef CONFIG_XIP_KERNEL
memblock_reserve(__pa(_sdata), _end - _sdata);
#else
memblock_reserve(__pa(_stext), _end - _stext);
#endif
#ifdef CONFIG_BLK_DEV_INITRD
if (phys_initrd_size &&
!memblock_is_region_memory(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx is not a memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
if (phys_initrd_size &&
memblock_is_region_reserved(phys_initrd_start, phys_initrd_size)) {
pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region - disabling initrd\n",
phys_initrd_start, phys_initrd_size);
phys_initrd_start = phys_initrd_size = 0;
}
if (phys_initrd_size) {
memblock_reserve(phys_initrd_start, phys_initrd_size);
/* Now convert initrd to virtual addresses */
initrd_start = __phys_to_virt(phys_initrd_start);
initrd_end = initrd_start + phys_initrd_size;
}
#endif
arm_mm_memblock_reserve();
arm_dt_memblock_reserve();
/* reserve any platform specific memblock areas */
if (mdesc->reserve)
mdesc->reserve();
/*
* reserve memory for DMA contigouos allocations,
* must come from DMA area inside low memory
*/
dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
arm_memblock_steal_permitted = false;
memblock_allow_resize();
memblock_dump_all();
}
#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
int _early_pfn_valid(unsigned long pfn)
{
struct meminfo *mi = &meminfo;
unsigned int left = 0, right = mi->nr_banks;
do {
unsigned int mid = (right + left) / 2;
struct membank *bank = &mi->bank[mid];
if (pfn < bank_pfn_start(bank))
right = mid;
else if (pfn >= bank_pfn_end(bank))
left = mid + 1;
else
return 1;
} while (left < right);
return 0;
}
EXPORT_SYMBOL(_early_pfn_valid);
#endif
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
max_low = max_high = 0;
find_limits(&min, &max_low, &max_high);
arm_bootmem_init(min, max_low);
/*
* Sparsemem tries to allocate bootmem in memory_present(),
* so must be done after the fixed reservations
*/
arm_memory_present();
/*
* sparse_init() needs the bootmem allocator up and running.
*/
sparse_init();
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
arm_bootmem_free_hmnm(max_low, max_high);
#else
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
arm_bootmem_free(min, max_low, max_high);
#endif
/*
* This doesn't seem to be used by the Linux memory manager any
* more, but is used by ll_rw_block. If we can get rid of it, we
* also get rid of some of the stuff above as well.
*
* Note: max_low_pfn and max_pfn reflect the number of _pages_ in
* the system, not the maximum PFN.
*/
max_low_pfn = max_low - PHYS_PFN_OFFSET;
max_pfn = max_high - PHYS_PFN_OFFSET;
}
static inline int free_area(unsigned long pfn, unsigned long end, char *s)
{
unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
for (; pfn < end; pfn++) {
struct page *page = pfn_to_page(pfn);
ClearPageReserved(page);
init_page_count(page);
__free_page(page);
pages++;
}
if (size && s)
printk(KERN_INFO "Freeing %s memory: %dK\n", s, size);
return pages;
}
/*
* Poison init memory with an undefined instruction (ARM) or a branch to an
* undefined instruction (Thumb).
*/
static inline void poison_init_mem(void *s, size_t count)
{
u32 *p = (u32 *)s;
for (; count != 0; count -= 4)
*p++ = 0xe7fddef0;
}
static inline void
free_memmap(unsigned long start_pfn, unsigned long end_pfn)
{
struct page *start_pg, *end_pg;
unsigned long pg, pgend;
/*
* Convert start_pfn/end_pfn to a struct page pointer.
*/
start_pg = pfn_to_page(start_pfn - 1) + 1;
end_pg = pfn_to_page(end_pfn - 1) + 1;
/*
* Convert to physical addresses, and
* round start upwards and end downwards.
*/
pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
/*
* If there are free pages between these,
* free the section of the memmap array.
*/
if (pg < pgend)
free_bootmem(pg, pgend - pg);
}
/*
* The mem_map array can get very big. Free as much of the unused portion of
* the mem_map that we are allowed to. The page migration code moves pages
* in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we
* can't free mem_map entries that may be dereferenced in this manner.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
unsigned long bank_start, prev_bank_end = 0;
unsigned int i;
/*
* This relies on each bank being in address order.
* The banks are sorted previously in bootmem_init().
*/
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
bank_start = round_down(bank_pfn_start(bank),
MAX_ORDER_NR_PAGES);
#ifdef CONFIG_SPARSEMEM
/*
* Take care not to free memmap entries that don't exist
* due to SPARSEMEM sections which aren't present.
*/
bank_start = min(bank_start,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
#else
/*
* Align down here since the VM subsystem insists that the
* memmap entries are valid from the bank start aligned to
* MAX_ORDER_NR_PAGES.
*/
bank_start = round_down(bank_start, MAX_ORDER_NR_PAGES);
#endif
/*
* If we had a previous bank, and there is a space
* between the current bank and the previous, free it.
*/
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
prev_bank_end = round_up(bank_pfn_end(bank),
MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM
if (!IS_ALIGNED(prev_bank_end, PAGES_PER_SECTION))
free_memmap(prev_bank_end,
ALIGN(prev_bank_end, PAGES_PER_SECTION));
#endif
}
static void __init free_highpages(void)
{
#ifdef CONFIG_HIGHMEM
unsigned long max_low = max_low_pfn + PHYS_PFN_OFFSET;
struct memblock_region *mem, *res;
/* set highmem page free */
for_each_memblock(memory, mem) {
unsigned long start = memblock_region_memory_base_pfn(mem);
unsigned long end = memblock_region_memory_end_pfn(mem);
/* Ignore complete lowmem entries */
if (end <= max_low)
continue;
/* Truncate partial highmem entries */
if (start < max_low)
start = max_low;
/* Find and exclude any reserved regions */
for_each_memblock(reserved, res) {
unsigned long res_start, res_end;
res_start = memblock_region_reserved_base_pfn(res);
res_end = memblock_region_reserved_end_pfn(res);
if (res_end < start)
continue;
if (res_start < start)
res_start = start;
if (res_start > end)
res_start = end;
if (res_end > end)
res_end = end;
if (res_start != start)
totalhigh_pages += free_area(start, res_start,
NULL);
start = res_end;
if (start == end)
break;
}
/* And now free anything which remains */
if (start < end)
totalhigh_pages += free_area(start, end, NULL);
}
totalram_pages += totalhigh_pages;
#endif
}
#define MLK(b, t) b, t, ((t) - (b)) >> 10
#define MLM(b, t) b, t, ((t) - (b)) >> 20
#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
static void print_vmalloc_lowmem_info(void)
{
int i;
void *va_start, *va_end;
printk(KERN_NOTICE
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM(VMALLOC_START, VMALLOC_END));
for (i = meminfo.nr_banks - 1; i >= 0; i--) {
if (!meminfo.bank[i].highmem) {
va_start = __va(meminfo.bank[i].start);
va_end = __va(meminfo.bank[i].start +
meminfo.bank[i].size);
printk(KERN_NOTICE
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM((unsigned long)va_start, (unsigned long)va_end));
}
if (i && ((meminfo.bank[i-1].start + meminfo.bank[i-1].size) !=
meminfo.bank[i].start)) {
if (meminfo.bank[i-1].start + meminfo.bank[i-1].size
<= MAX_HOLE_ADDRESS) {
va_start = __va(meminfo.bank[i-1].start
+ meminfo.bank[i-1].size);
va_end = __va(meminfo.bank[i].start);
printk(KERN_NOTICE
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM((unsigned long)va_start,
(unsigned long)va_end));
}
}
}
}
#endif
/*
* mem_init() marks the free areas in the mem_map and tells us how much
* memory is free. This is done after various parts of the system have
* claimed their memory after the kernel image.
*/
void __init mem_init(void)
{
unsigned long reserved_pages, free_pages;
struct memblock_region *reg;
int i;
#ifdef CONFIG_HAVE_TCM
/* These pointers are filled in on TCM detection */
extern u32 dtcm_end;
extern u32 itcm_end;
#endif
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
/* this will put all unused low memory onto the freelists */
free_unused_memmap(&meminfo);
totalram_pages += free_all_bootmem();
#ifdef CONFIG_SA1111
/* now that our DMA memory is actually so designated, we can free it */
totalram_pages += free_area(PHYS_PFN_OFFSET,
__phys_to_pfn(__pa(swapper_pg_dir)), NULL);
#endif
free_highpages();
reserved_pages = free_pages = 0;
for_each_bank(i, &meminfo) {
struct membank *bank = &meminfo.bank[i];
unsigned int pfn1, pfn2;
struct page *page, *end;
pfn1 = bank_pfn_start(bank);
pfn2 = bank_pfn_end(bank);
page = pfn_to_page(pfn1);
end = pfn_to_page(pfn2 - 1) + 1;
do {
if (PageReserved(page))
reserved_pages++;
else if (!page_count(page))
free_pages++;
page++;
#ifdef CONFIG_SPARSEMEM
pfn1++;
if (!(pfn1 % PAGES_PER_SECTION))
page = pfn_to_page(pfn1);
} while (pfn1 < pfn2);
#else
} while (page < end);
#endif
}
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
*/
printk(KERN_INFO "Memory:");
num_physpages = 0;
for_each_memblock(memory, reg) {
unsigned long pages = memblock_region_memory_end_pfn(reg) -
memblock_region_memory_base_pfn(reg);
num_physpages += pages;
printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
}
printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
printk(KERN_NOTICE "Memory: %luk/%luk available, %luk reserved, %luK highmem\n",
nr_free_pages() << (PAGE_SHIFT-10),
free_pages << (PAGE_SHIFT-10),
reserved_pages << (PAGE_SHIFT-10),
totalhigh_pages << (PAGE_SHIFT-10));
printk(KERN_NOTICE "Virtual kernel memory layout:\n"
" vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
#ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
" timers : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
#ifdef CONFIG_HAVE_TCM
" DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
" ITCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
#endif
" fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n",
MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
(PAGE_SIZE)),
#ifdef CONFIG_ARM_USE_USER_ACCESSIBLE_TIMERS
MLK(UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE),
UL(CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE)
+ (PAGE_SIZE)),
#endif
#ifdef CONFIG_HAVE_TCM
MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
MLK(ITCM_OFFSET, (unsigned long) itcm_end),
#endif
MLK(FIXADDR_START, FIXADDR_TOP));
#ifdef CONFIG_ENABLE_VMALLOC_SAVING
print_vmalloc_lowmem_info();
#else
printk(KERN_NOTICE
" vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
" lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n",
MLM(VMALLOC_START, VMALLOC_END),
MLM(PAGE_OFFSET, (unsigned long)high_memory));
#endif
printk(KERN_NOTICE
#ifdef CONFIG_HIGHMEM
" pkmap : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
#ifdef CONFIG_MODULES
" modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
#endif
" .text : 0x%p" " - 0x%p" " (%4d kB)\n"
" .init : 0x%p" " - 0x%p" " (%4d kB)\n"
" .data : 0x%p" " - 0x%p" " (%4d kB)\n"
" .bss : 0x%p" " - 0x%p" " (%4d kB)\n",
#ifdef CONFIG_HIGHMEM
MLM(PKMAP_BASE, (PKMAP_BASE) + (LAST_PKMAP) *
(PAGE_SIZE)),
#endif
#ifdef CONFIG_MODULES
MLM(MODULES_VADDR, MODULES_END),
#endif
MLK_ROUNDUP(_text, _etext),
MLK_ROUNDUP(__init_begin, __init_end),
MLK_ROUNDUP(_sdata, _edata),
MLK_ROUNDUP(__bss_start, __bss_stop));
/*
* Check boundaries twice: Some fundamental inconsistencies can
* be detected at build time already.
*/
#ifdef CONFIG_MMU
BUILD_BUG_ON(TASK_SIZE > MODULES_VADDR);
BUG_ON(TASK_SIZE > MODULES_VADDR);
#endif
#ifdef CONFIG_HIGHMEM
BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
BUG_ON(PKMAP_BASE + LAST_PKMAP * PAGE_SIZE > PAGE_OFFSET);
#endif
if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
extern int sysctl_overcommit_memory;
/*
* On a machine this small we won't get
* anywhere without overcommit, so turn
* it on by default.
*/
sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
}
}
#undef MLK
#undef MLM
#undef MLK_ROUNDUP
void free_initmem(void)
{
unsigned long reclaimed_initmem;
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
totalram_pages += free_area(__phys_to_pfn(__pa(&__tcm_start)),
__phys_to_pfn(__pa(&__tcm_end)),
"TCM link");
#endif
#ifdef CONFIG_STRICT_MEMORY_RWX
poison_init_mem((char *)__arch_info_begin,
__init_end - (char *)__arch_info_begin);
reclaimed_initmem = free_area(__phys_to_pfn(__pa(__arch_info_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
totalram_pages += reclaimed_initmem;
#else
poison_init_mem(__init_begin, __init_end - __init_begin);
if (!machine_is_integrator() && !machine_is_cintegrator()) {
reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
totalram_pages += reclaimed_initmem;
}
#endif
}
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
unsigned long reclaimed_initrd_mem;
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
__phys_to_pfn(__pa(end)),
"initrd");
totalram_pages += reclaimed_initrd_mem;
}
}
static int __init keepinitrd_setup(char *__unused)
{
keep_initrd = 1;
return 1;
}
__setup("keepinitrd", keepinitrd_setup);
#endif
#ifdef CONFIG_MSM_KRAIT_WFE_FIXUP
static int __init msm_krait_wfe_init(void)
{
unsigned int val, midr;
midr = read_cpuid_id() & 0xffffff00;
if ((midr == 0x511f0400) || (midr == 0x510f0600)) {
asm volatile("mrc p15, 7, %0, c15, c0, 5" : "=r" (val));
msm_krait_need_wfe_fixup = (val & 0x10000) ? 1 : 0;
}
return 0;
}
pure_initcall(msm_krait_wfe_init);
#endif
| gpl-2.0 |
treejames/exynos4_uboot | board/w7o/fsboot.c | 339 | 2096 | /*
* (C) Copyright 2001
* Wave 7 Optics, Inc.
*
* See file CREDITS for list of people who contributed to this
* project.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston,
* MA 02111-1307 USA
*/
#include <common.h>
#include <config.h>
#include <command.h>
/*
* FIXME: Add code to test image and it's header.
*/
extern int valid_elf_image (unsigned long addr);
static int
image_check(ulong addr)
{
return valid_elf_image(addr);
}
void
init_fsboot(void)
{
char *envp;
ulong loadaddr;
ulong testaddr;
ulong alt_loadaddr;
char buf[9];
/*
* Get test image address
*/
if ((envp = getenv("testaddr")) != NULL)
testaddr = simple_strtoul(envp, NULL, 16);
else
testaddr = -1;
/*
* Are we going to test boot and image?
*/
if ((testaddr != -1) && image_check(testaddr)) {
/* Set alt_loadaddr */
alt_loadaddr = testaddr;
sprintf(buf, "%lX", alt_loadaddr);
setenv("alt_loadaddr", buf);
/* Clear test_addr */
setenv("testaddr", NULL);
/*
* Save current environment with alt_loadaddr,
* and cleared testaddr.
*/
saveenv();
/*
* Setup temporary loadaddr to alt_loadaddr
* XXX - DO NOT SAVE ENVIRONMENT!
*/
loadaddr = alt_loadaddr;
sprintf(buf, "%lX", loadaddr);
setenv("loadaddr", buf);
} else { /* Normal boot */
setenv("alt_loadaddr", NULL); /* Clear alt_loadaddr */
setenv("testaddr", NULL); /* Clear testaddr */
saveenv();
}
return;
}
| gpl-2.0 |
posthumanik/golden_cm10.2_kernel | drivers/gpu/mali/mali400ko/driver/src/devicedrv/mali/linux/mali_osk_profiling_gator.c | 339 | 7014 | /*
* Copyright (C) 2012 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/module.h>
#include "mali_kernel_common.h"
#include "mali_osk.h"
#include "mali_ukk.h"
#include "mali_uk_types.h"
#include "mali_osk_profiling.h"
#include "mali_linux_trace.h"
#include "mali_gp.h"
#include "mali_pp.h"
#include "mali_l2_cache.h"
#include "mali_user_settings_db.h"
_mali_osk_errcode_t _mali_osk_profiling_init(mali_bool auto_start)
{
if (MALI_TRUE == auto_start)
{
mali_set_user_setting(_MALI_UK_USER_SETTING_SW_EVENTS_ENABLE, MALI_TRUE);
}
return _MALI_OSK_ERR_OK;
}
void _mali_osk_profiling_term(void)
{
/* Nothing to do */
}
_mali_osk_errcode_t _mali_osk_profiling_start(u32 * limit)
{
/* Nothing to do */
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_osk_profiling_stop(u32 *count)
{
/* Nothing to do */
return _MALI_OSK_ERR_OK;
}
u32 _mali_osk_profiling_get_count(void)
{
return 0;
}
_mali_osk_errcode_t _mali_osk_profiling_get_event(u32 index, u64* timestamp, u32* event_id, u32 data[5])
{
/* Nothing to do */
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_osk_profiling_clear(void)
{
/* Nothing to do */
return _MALI_OSK_ERR_OK;
}
mali_bool _mali_osk_profiling_is_recording(void)
{
return MALI_FALSE;
}
mali_bool _mali_osk_profiling_have_recording(void)
{
return MALI_FALSE;
}
void _mali_osk_profiling_report_sw_counters(u32 *counters)
{
trace_mali_sw_counters(_mali_osk_get_pid(), _mali_osk_get_tid(), NULL, counters);
}
_mali_osk_errcode_t _mali_ukk_profiling_start(_mali_uk_profiling_start_s *args)
{
return _mali_osk_profiling_start(&args->limit);
}
_mali_osk_errcode_t _mali_ukk_profiling_add_event(_mali_uk_profiling_add_event_s *args)
{
/* Always add process and thread identificator in the first two data elements for events from user space */
_mali_osk_profiling_add_event(args->event_id, _mali_osk_get_pid(), _mali_osk_get_tid(), args->data[2], args->data[3], args->data[4]);
return _MALI_OSK_ERR_OK;
}
_mali_osk_errcode_t _mali_ukk_profiling_stop(_mali_uk_profiling_stop_s *args)
{
return _mali_osk_profiling_stop(&args->count);
}
_mali_osk_errcode_t _mali_ukk_profiling_get_event(_mali_uk_profiling_get_event_s *args)
{
return _mali_osk_profiling_get_event(args->index, &args->timestamp, &args->event_id, args->data);
}
_mali_osk_errcode_t _mali_ukk_profiling_clear(_mali_uk_profiling_clear_s *args)
{
return _mali_osk_profiling_clear();
}
_mali_osk_errcode_t _mali_ukk_sw_counters_report(_mali_uk_sw_counters_report_s *args)
{
_mali_osk_profiling_report_sw_counters(args->counters);
return _MALI_OSK_ERR_OK;
}
/**
* Called by gator.ko to set HW counters
*
* @param counter_id The counter ID.
* @param event_id Event ID that the counter should count (HW counter value from TRM).
*
* @return 1 on success, 0 on failure.
*/
int _mali_profiling_set_event(u32 counter_id, s32 event_id)
{
if (COUNTER_VP_C0 == counter_id)
{
struct mali_gp_core* gp_core = mali_gp_get_global_gp_core();
if (NULL != gp_core)
{
if (MALI_TRUE == mali_gp_core_set_counter_src0(gp_core, event_id))
{
return 1;
}
}
}
if (COUNTER_VP_C1 == counter_id)
{
struct mali_gp_core* gp_core = mali_gp_get_global_gp_core();
if (NULL != gp_core)
{
if (MALI_TRUE == mali_gp_core_set_counter_src1(gp_core, event_id))
{
return 1;
}
}
}
if (COUNTER_FP0_C0 <= counter_id && COUNTER_FP3_C1 >= counter_id)
{
u32 core_id = (counter_id - COUNTER_FP0_C0) >> 1;
struct mali_pp_core* pp_core = mali_pp_get_global_pp_core(core_id);
if (NULL != pp_core)
{
u32 counter_src = (counter_id - COUNTER_FP0_C0) & 1;
if (0 == counter_src)
{
if (MALI_TRUE == mali_pp_core_set_counter_src0(pp_core, event_id))
{
return 1;
}
}
else
{
if (MALI_TRUE == mali_pp_core_set_counter_src1(pp_core, event_id))
{
return 1;
}
}
}
}
if (COUNTER_L2_C0 <= counter_id && COUNTER_L2_C1 >= counter_id)
{
u32 core_id = (counter_id - COUNTER_L2_C0) >> 1;
struct mali_l2_cache_core* l2_cache_core = mali_l2_cache_core_get_glob_l2_core(core_id);
if (NULL != l2_cache_core)
{
u32 counter_src = (counter_id - COUNTER_L2_C0) & 1;
if (0 == counter_src)
{
if (MALI_TRUE == mali_l2_cache_core_set_counter_src0(l2_cache_core, event_id))
{
return 1;
}
}
else
{
if (MALI_TRUE == mali_l2_cache_core_set_counter_src1(l2_cache_core, event_id))
{
return 1;
}
}
}
}
return 0;
}
/**
* Called by gator.ko to retrieve the L2 cache counter values for the first L2 cache.
* The L2 cache counters are unique in that they are polled by gator, rather than being
* transmitted via the tracepoint mechanism.
*
* @param src0 First L2 cache counter ID.
* @param val0 First L2 cache counter value.
* @param src1 Second L2 cache counter ID.
* @param val1 Second L2 cache counter value.
*/
void _mali_profiling_get_counters(u32 *src0, u32 *val0, u32 *src1, u32 *val1)
{
struct mali_l2_cache_core *l2_cache = mali_l2_cache_core_get_glob_l2_core(0);
if (NULL != l2_cache)
{
if (MALI_TRUE == mali_l2_cache_lock_power_state(l2_cache))
{
/* It is now safe to access the L2 cache core in order to retrieve the counters */
mali_l2_cache_core_get_counter_values(l2_cache, src0, val0, src1, val1);
}
mali_l2_cache_unlock_power_state(l2_cache);
}
}
/*
* List of possible actions to be controlled by Streamline.
* The following numbers are used by gator to control the frame buffer dumping and s/w counter reporting.
* We cannot use the enums in mali_uk_types.h because they are unknown inside gator.
*/
#define FBDUMP_CONTROL_ENABLE (1)
#define FBDUMP_CONTROL_RATE (2)
#define SW_COUNTER_ENABLE (3)
#define FBDUMP_CONTROL_RESIZE_FACTOR (4)
/**
* Called by gator to control the production of profiling information at runtime.
*/
void _mali_profiling_control(u32 action, u32 value)
{
switch(action)
{
case FBDUMP_CONTROL_ENABLE:
mali_set_user_setting(_MALI_UK_USER_SETTING_COLORBUFFER_CAPTURE_ENABLED, (value == 0 ? MALI_FALSE : MALI_TRUE));
break;
case FBDUMP_CONTROL_RATE:
mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_N_FRAMES, value);
break;
case SW_COUNTER_ENABLE:
mali_set_user_setting(_MALI_UK_USER_SETTING_SW_COUNTER_ENABLED, value);
break;
case FBDUMP_CONTROL_RESIZE_FACTOR:
mali_set_user_setting(_MALI_UK_USER_SETTING_BUFFER_CAPTURE_RESIZE_FACTOR, value);
break;
default:
break; /* Ignore unimplemented actions */
}
}
EXPORT_SYMBOL(_mali_profiling_set_event);
EXPORT_SYMBOL(_mali_profiling_get_counters);
EXPORT_SYMBOL(_mali_profiling_control);
| gpl-2.0 |
mmontuori/kernel-tegra-nvidia | arch/powerpc/platforms/cell/spufs/inode.c | 595 | 18537 |
/*
* SPU file system
*
* (C) Copyright IBM Deutschland Entwicklung GmbH 2005
*
* Author: Arnd Bergmann <arndb@de.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/file.h>
#include <linux/fs.h>
#include <linux/fsnotify.h>
#include <linux/backing-dev.h>
#include <linux/init.h>
#include <linux/ioctl.h>
#include <linux/module.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
#include <linux/poll.h>
#include <linux/slab.h>
#include <linux/parser.h>
#include <asm/prom.h>
#include <asm/spu.h>
#include <asm/spu_priv1.h>
#include <asm/uaccess.h>
#include "spufs.h"
struct spufs_sb_info {
int debug;
};
static struct kmem_cache *spufs_inode_cache;
char *isolated_loader;
static int isolated_loader_size;
static struct spufs_sb_info *spufs_get_sb_info(struct super_block *sb)
{
return sb->s_fs_info;
}
static struct inode *
spufs_alloc_inode(struct super_block *sb)
{
struct spufs_inode_info *ei;
ei = kmem_cache_alloc(spufs_inode_cache, GFP_KERNEL);
if (!ei)
return NULL;
ei->i_gang = NULL;
ei->i_ctx = NULL;
ei->i_openers = 0;
return &ei->vfs_inode;
}
static void
spufs_destroy_inode(struct inode *inode)
{
kmem_cache_free(spufs_inode_cache, SPUFS_I(inode));
}
static void
spufs_init_once(void *p)
{
struct spufs_inode_info *ei = p;
inode_init_once(&ei->vfs_inode);
}
static struct inode *
spufs_new_inode(struct super_block *sb, int mode)
{
struct inode *inode;
inode = new_inode(sb);
if (!inode)
goto out;
inode->i_mode = mode;
inode->i_uid = current_fsuid();
inode->i_gid = current_fsgid();
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
out:
return inode;
}
static int
spufs_setattr(struct dentry *dentry, struct iattr *attr)
{
struct inode *inode = dentry->d_inode;
if ((attr->ia_valid & ATTR_SIZE) &&
(attr->ia_size != inode->i_size))
return -EINVAL;
return inode_setattr(inode, attr);
}
static int
spufs_new_file(struct super_block *sb, struct dentry *dentry,
const struct file_operations *fops, int mode,
size_t size, struct spu_context *ctx)
{
static const struct inode_operations spufs_file_iops = {
.setattr = spufs_setattr,
};
struct inode *inode;
int ret;
ret = -ENOSPC;
inode = spufs_new_inode(sb, S_IFREG | mode);
if (!inode)
goto out;
ret = 0;
inode->i_op = &spufs_file_iops;
inode->i_fop = fops;
inode->i_size = size;
inode->i_private = SPUFS_I(inode)->i_ctx = get_spu_context(ctx);
d_add(dentry, inode);
out:
return ret;
}
static void
spufs_delete_inode(struct inode *inode)
{
struct spufs_inode_info *ei = SPUFS_I(inode);
if (ei->i_ctx)
put_spu_context(ei->i_ctx);
if (ei->i_gang)
put_spu_gang(ei->i_gang);
clear_inode(inode);
}
static void spufs_prune_dir(struct dentry *dir)
{
struct dentry *dentry, *tmp;
mutex_lock(&dir->d_inode->i_mutex);
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
spin_lock(&dcache_lock);
spin_lock(&dentry->d_lock);
if (!(d_unhashed(dentry)) && dentry->d_inode) {
dget_locked(dentry);
__d_drop(dentry);
spin_unlock(&dentry->d_lock);
simple_unlink(dir->d_inode, dentry);
spin_unlock(&dcache_lock);
dput(dentry);
} else {
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
}
}
shrink_dcache_parent(dir);
mutex_unlock(&dir->d_inode->i_mutex);
}
/* Caller must hold parent->i_mutex */
static int spufs_rmdir(struct inode *parent, struct dentry *dir)
{
/* remove all entries */
spufs_prune_dir(dir);
d_drop(dir);
return simple_rmdir(parent, dir);
}
static int spufs_fill_dir(struct dentry *dir,
const struct spufs_tree_descr *files, int mode,
struct spu_context *ctx)
{
struct dentry *dentry, *tmp;
int ret;
while (files->name && files->name[0]) {
ret = -ENOMEM;
dentry = d_alloc_name(dir, files->name);
if (!dentry)
goto out;
ret = spufs_new_file(dir->d_sb, dentry, files->ops,
files->mode & mode, files->size, ctx);
if (ret)
goto out;
files++;
}
return 0;
out:
/*
* remove all children from dir. dir->inode is not set so don't
* just simply use spufs_prune_dir() and panic afterwards :)
* dput() looks like it will do the right thing:
* - dec parent's ref counter
* - remove child from parent's child list
* - free child's inode if possible
* - free child
*/
list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) {
dput(dentry);
}
shrink_dcache_parent(dir);
return ret;
}
static int spufs_dir_close(struct inode *inode, struct file *file)
{
struct spu_context *ctx;
struct inode *parent;
struct dentry *dir;
int ret;
dir = file->f_path.dentry;
parent = dir->d_parent->d_inode;
ctx = SPUFS_I(dir->d_inode)->i_ctx;
mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
ret = spufs_rmdir(parent, dir);
mutex_unlock(&parent->i_mutex);
WARN_ON(ret);
/* We have to give up the mm_struct */
spu_forget(ctx);
return dcache_dir_close(inode, file);
}
const struct file_operations spufs_context_fops = {
.open = dcache_dir_open,
.release = spufs_dir_close,
.llseek = dcache_dir_lseek,
.read = generic_read_dir,
.readdir = dcache_readdir,
.fsync = simple_sync_file,
};
EXPORT_SYMBOL_GPL(spufs_context_fops);
static int
spufs_mkdir(struct inode *dir, struct dentry *dentry, unsigned int flags,
int mode)
{
int ret;
struct inode *inode;
struct spu_context *ctx;
ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
goto out;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
inode->i_mode &= S_ISGID;
}
ctx = alloc_spu_context(SPUFS_I(dir)->i_gang); /* XXX gang */
SPUFS_I(inode)->i_ctx = ctx;
if (!ctx)
goto out_iput;
ctx->flags = flags;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
if (flags & SPU_CREATE_NOSCHED)
ret = spufs_fill_dir(dentry, spufs_dir_nosched_contents,
mode, ctx);
else
ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx);
if (ret)
goto out_free_ctx;
if (spufs_get_sb_info(dir->i_sb)->debug)
ret = spufs_fill_dir(dentry, spufs_dir_debug_contents,
mode, ctx);
if (ret)
goto out_free_ctx;
d_instantiate(dentry, inode);
dget(dentry);
inc_nlink(dir);
inc_nlink(dentry->d_inode);
goto out;
out_free_ctx:
spu_forget(ctx);
put_spu_context(ctx);
out_iput:
iput(inode);
out:
return ret;
}
static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt)
{
int ret;
struct file *filp;
ret = get_unused_fd();
if (ret < 0) {
dput(dentry);
mntput(mnt);
goto out;
}
filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
ret = PTR_ERR(filp);
goto out;
}
filp->f_op = &spufs_context_fops;
fd_install(ret, filp);
out:
return ret;
}
static struct spu_context *
spufs_assert_affinity(unsigned int flags, struct spu_gang *gang,
struct file *filp)
{
struct spu_context *tmp, *neighbor, *err;
int count, node;
int aff_supp;
aff_supp = !list_empty(&(list_entry(cbe_spu_info[0].spus.next,
struct spu, cbe_list))->aff_list);
if (!aff_supp)
return ERR_PTR(-EINVAL);
if (flags & SPU_CREATE_GANG)
return ERR_PTR(-EINVAL);
if (flags & SPU_CREATE_AFFINITY_MEM &&
gang->aff_ref_ctx &&
gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM)
return ERR_PTR(-EEXIST);
if (gang->aff_flags & AFF_MERGED)
return ERR_PTR(-EBUSY);
neighbor = NULL;
if (flags & SPU_CREATE_AFFINITY_SPU) {
if (!filp || filp->f_op != &spufs_context_fops)
return ERR_PTR(-EINVAL);
neighbor = get_spu_context(
SPUFS_I(filp->f_dentry->d_inode)->i_ctx);
if (!list_empty(&neighbor->aff_list) && !(neighbor->aff_head) &&
!list_is_last(&neighbor->aff_list, &gang->aff_list_head) &&
!list_entry(neighbor->aff_list.next, struct spu_context,
aff_list)->aff_head) {
err = ERR_PTR(-EEXIST);
goto out_put_neighbor;
}
if (gang != neighbor->gang) {
err = ERR_PTR(-EINVAL);
goto out_put_neighbor;
}
count = 1;
list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
count++;
if (list_empty(&neighbor->aff_list))
count++;
for (node = 0; node < MAX_NUMNODES; node++) {
if ((cbe_spu_info[node].n_spus - atomic_read(
&cbe_spu_info[node].reserved_spus)) >= count)
break;
}
if (node == MAX_NUMNODES) {
err = ERR_PTR(-EEXIST);
goto out_put_neighbor;
}
}
return neighbor;
out_put_neighbor:
put_spu_context(neighbor);
return err;
}
static void
spufs_set_affinity(unsigned int flags, struct spu_context *ctx,
struct spu_context *neighbor)
{
if (flags & SPU_CREATE_AFFINITY_MEM)
ctx->gang->aff_ref_ctx = ctx;
if (flags & SPU_CREATE_AFFINITY_SPU) {
if (list_empty(&neighbor->aff_list)) {
list_add_tail(&neighbor->aff_list,
&ctx->gang->aff_list_head);
neighbor->aff_head = 1;
}
if (list_is_last(&neighbor->aff_list, &ctx->gang->aff_list_head)
|| list_entry(neighbor->aff_list.next, struct spu_context,
aff_list)->aff_head) {
list_add(&ctx->aff_list, &neighbor->aff_list);
} else {
list_add_tail(&ctx->aff_list, &neighbor->aff_list);
if (neighbor->aff_head) {
neighbor->aff_head = 0;
ctx->aff_head = 1;
}
}
if (!ctx->gang->aff_ref_ctx)
ctx->gang->aff_ref_ctx = ctx;
}
}
static int
spufs_create_context(struct inode *inode, struct dentry *dentry,
struct vfsmount *mnt, int flags, int mode,
struct file *aff_filp)
{
int ret;
int affinity;
struct spu_gang *gang;
struct spu_context *neighbor;
ret = -EPERM;
if ((flags & SPU_CREATE_NOSCHED) &&
!capable(CAP_SYS_NICE))
goto out_unlock;
ret = -EINVAL;
if ((flags & (SPU_CREATE_NOSCHED | SPU_CREATE_ISOLATE))
== SPU_CREATE_ISOLATE)
goto out_unlock;
ret = -ENODEV;
if ((flags & SPU_CREATE_ISOLATE) && !isolated_loader)
goto out_unlock;
gang = NULL;
neighbor = NULL;
affinity = flags & (SPU_CREATE_AFFINITY_MEM | SPU_CREATE_AFFINITY_SPU);
if (affinity) {
gang = SPUFS_I(inode)->i_gang;
ret = -EINVAL;
if (!gang)
goto out_unlock;
mutex_lock(&gang->aff_mutex);
neighbor = spufs_assert_affinity(flags, gang, aff_filp);
if (IS_ERR(neighbor)) {
ret = PTR_ERR(neighbor);
goto out_aff_unlock;
}
}
ret = spufs_mkdir(inode, dentry, flags, mode & S_IRWXUGO);
if (ret)
goto out_aff_unlock;
if (affinity) {
spufs_set_affinity(flags, SPUFS_I(dentry->d_inode)->i_ctx,
neighbor);
if (neighbor)
put_spu_context(neighbor);
}
/*
* get references for dget and mntget, will be released
* in error path of *_open().
*/
ret = spufs_context_open(dget(dentry), mntget(mnt));
if (ret < 0) {
WARN_ON(spufs_rmdir(inode, dentry));
if (affinity)
mutex_unlock(&gang->aff_mutex);
mutex_unlock(&inode->i_mutex);
spu_forget(SPUFS_I(dentry->d_inode)->i_ctx);
goto out;
}
out_aff_unlock:
if (affinity)
mutex_unlock(&gang->aff_mutex);
out_unlock:
mutex_unlock(&inode->i_mutex);
out:
dput(dentry);
return ret;
}
static int
spufs_mkgang(struct inode *dir, struct dentry *dentry, int mode)
{
int ret;
struct inode *inode;
struct spu_gang *gang;
ret = -ENOSPC;
inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR);
if (!inode)
goto out;
ret = 0;
if (dir->i_mode & S_ISGID) {
inode->i_gid = dir->i_gid;
inode->i_mode &= S_ISGID;
}
gang = alloc_spu_gang();
SPUFS_I(inode)->i_ctx = NULL;
SPUFS_I(inode)->i_gang = gang;
if (!gang)
goto out_iput;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
d_instantiate(dentry, inode);
inc_nlink(dir);
inc_nlink(dentry->d_inode);
return ret;
out_iput:
iput(inode);
out:
return ret;
}
static int spufs_gang_open(struct dentry *dentry, struct vfsmount *mnt)
{
int ret;
struct file *filp;
ret = get_unused_fd();
if (ret < 0) {
dput(dentry);
mntput(mnt);
goto out;
}
filp = dentry_open(dentry, mnt, O_RDONLY, current_cred());
if (IS_ERR(filp)) {
put_unused_fd(ret);
ret = PTR_ERR(filp);
goto out;
}
filp->f_op = &simple_dir_operations;
fd_install(ret, filp);
out:
return ret;
}
static int spufs_create_gang(struct inode *inode,
struct dentry *dentry,
struct vfsmount *mnt, int mode)
{
int ret;
ret = spufs_mkgang(inode, dentry, mode & S_IRWXUGO);
if (ret)
goto out;
/*
* get references for dget and mntget, will be released
* in error path of *_open().
*/
ret = spufs_gang_open(dget(dentry), mntget(mnt));
if (ret < 0) {
int err = simple_rmdir(inode, dentry);
WARN_ON(err);
}
out:
mutex_unlock(&inode->i_mutex);
dput(dentry);
return ret;
}
static struct file_system_type spufs_type;
long spufs_create(struct nameidata *nd, unsigned int flags, mode_t mode,
struct file *filp)
{
struct dentry *dentry;
int ret;
ret = -EINVAL;
/* check if we are on spufs */
if (nd->path.dentry->d_sb->s_type != &spufs_type)
goto out;
/* don't accept undefined flags */
if (flags & (~SPU_CREATE_FLAG_ALL))
goto out;
/* only threads can be underneath a gang */
if (nd->path.dentry != nd->path.dentry->d_sb->s_root) {
if ((flags & SPU_CREATE_GANG) ||
!SPUFS_I(nd->path.dentry->d_inode)->i_gang)
goto out;
}
dentry = lookup_create(nd, 1);
ret = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_dir;
mode &= ~current_umask();
if (flags & SPU_CREATE_GANG)
ret = spufs_create_gang(nd->path.dentry->d_inode,
dentry, nd->path.mnt, mode);
else
ret = spufs_create_context(nd->path.dentry->d_inode,
dentry, nd->path.mnt, flags, mode,
filp);
if (ret >= 0)
fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
return ret;
out_dir:
mutex_unlock(&nd->path.dentry->d_inode->i_mutex);
out:
return ret;
}
/* File system initialization */
enum {
Opt_uid, Opt_gid, Opt_mode, Opt_debug, Opt_err,
};
static const match_table_t spufs_tokens = {
{ Opt_uid, "uid=%d" },
{ Opt_gid, "gid=%d" },
{ Opt_mode, "mode=%o" },
{ Opt_debug, "debug" },
{ Opt_err, NULL },
};
static int
spufs_parse_options(struct super_block *sb, char *options, struct inode *root)
{
char *p;
substring_t args[MAX_OPT_ARGS];
while ((p = strsep(&options, ",")) != NULL) {
int token, option;
if (!*p)
continue;
token = match_token(p, spufs_tokens, args);
switch (token) {
case Opt_uid:
if (match_int(&args[0], &option))
return 0;
root->i_uid = option;
break;
case Opt_gid:
if (match_int(&args[0], &option))
return 0;
root->i_gid = option;
break;
case Opt_mode:
if (match_octal(&args[0], &option))
return 0;
root->i_mode = option | S_IFDIR;
break;
case Opt_debug:
spufs_get_sb_info(sb)->debug = 1;
break;
default:
return 0;
}
}
return 1;
}
static void spufs_exit_isolated_loader(void)
{
free_pages((unsigned long) isolated_loader,
get_order(isolated_loader_size));
}
static void
spufs_init_isolated_loader(void)
{
struct device_node *dn;
const char *loader;
int size;
dn = of_find_node_by_path("/spu-isolation");
if (!dn)
return;
loader = of_get_property(dn, "loader", &size);
if (!loader)
return;
/* the loader must be align on a 16 byte boundary */
isolated_loader = (char *)__get_free_pages(GFP_KERNEL, get_order(size));
if (!isolated_loader)
return;
isolated_loader_size = size;
memcpy(isolated_loader, loader, size);
printk(KERN_INFO "spufs: SPU isolation mode enabled\n");
}
static int
spufs_create_root(struct super_block *sb, void *data)
{
struct inode *inode;
int ret;
ret = -ENODEV;
if (!spu_management_ops)
goto out;
ret = -ENOMEM;
inode = spufs_new_inode(sb, S_IFDIR | 0775);
if (!inode)
goto out;
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
SPUFS_I(inode)->i_ctx = NULL;
inc_nlink(inode);
ret = -EINVAL;
if (!spufs_parse_options(sb, data, inode))
goto out_iput;
ret = -ENOMEM;
sb->s_root = d_alloc_root(inode);
if (!sb->s_root)
goto out_iput;
return 0;
out_iput:
iput(inode);
out:
return ret;
}
static int
spufs_fill_super(struct super_block *sb, void *data, int silent)
{
struct spufs_sb_info *info;
static const struct super_operations s_ops = {
.alloc_inode = spufs_alloc_inode,
.destroy_inode = spufs_destroy_inode,
.statfs = simple_statfs,
.delete_inode = spufs_delete_inode,
.drop_inode = generic_delete_inode,
.show_options = generic_show_options,
};
save_mount_options(sb, data);
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_CACHE_SIZE;
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = SPUFS_MAGIC;
sb->s_op = &s_ops;
sb->s_fs_info = info;
return spufs_create_root(sb, data);
}
static int
spufs_get_sb(struct file_system_type *fstype, int flags,
const char *name, void *data, struct vfsmount *mnt)
{
return get_sb_single(fstype, flags, data, spufs_fill_super, mnt);
}
static struct file_system_type spufs_type = {
.owner = THIS_MODULE,
.name = "spufs",
.get_sb = spufs_get_sb,
.kill_sb = kill_litter_super,
};
static int __init spufs_init(void)
{
int ret;
ret = -ENODEV;
if (!spu_management_ops)
goto out;
ret = -ENOMEM;
spufs_inode_cache = kmem_cache_create("spufs_inode_cache",
sizeof(struct spufs_inode_info), 0,
SLAB_HWCACHE_ALIGN, spufs_init_once);
if (!spufs_inode_cache)
goto out;
ret = spu_sched_init();
if (ret)
goto out_cache;
ret = register_filesystem(&spufs_type);
if (ret)
goto out_sched;
ret = register_spu_syscalls(&spufs_calls);
if (ret)
goto out_fs;
spufs_init_isolated_loader();
return 0;
out_fs:
unregister_filesystem(&spufs_type);
out_sched:
spu_sched_exit();
out_cache:
kmem_cache_destroy(spufs_inode_cache);
out:
return ret;
}
module_init(spufs_init);
static void __exit spufs_exit(void)
{
spu_sched_exit();
spufs_exit_isolated_loader();
unregister_spu_syscalls(&spufs_calls);
unregister_filesystem(&spufs_type);
kmem_cache_destroy(spufs_inode_cache);
}
module_exit(spufs_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
| gpl-2.0 |
codefarmer-cyk/linux | security/tomoyo/file.c | 851 | 29522 | /*
* security/tomoyo/file.c
*
* Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include "common.h"
#include <linux/slab.h>
/*
* Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index".
*/
static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = {
[TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE,
[TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN,
[TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN,
[TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN,
[TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK,
[TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR,
[TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR,
[TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE,
[TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK,
[TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT,
[TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT,
};
/*
* Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index".
*/
const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = {
[TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK,
[TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR,
};
/*
* Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index".
*/
const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = {
[TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK,
[TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME,
[TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT,
};
/*
* Mapping table from "enum tomoyo_path_number_acl_index" to
* "enum tomoyo_mac_index".
*/
const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = {
[TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE,
[TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR,
[TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO,
[TOMOYO_TYPE_MKSOCK] = TOMOYO_MAC_FILE_MKSOCK,
[TOMOYO_TYPE_IOCTL] = TOMOYO_MAC_FILE_IOCTL,
[TOMOYO_TYPE_CHMOD] = TOMOYO_MAC_FILE_CHMOD,
[TOMOYO_TYPE_CHOWN] = TOMOYO_MAC_FILE_CHOWN,
[TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP,
};
/**
* tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union".
*
* @ptr: Pointer to "struct tomoyo_name_union".
*
* Returns nothing.
*/
void tomoyo_put_name_union(struct tomoyo_name_union *ptr)
{
tomoyo_put_group(ptr->group);
tomoyo_put_name(ptr->filename);
}
/**
* tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not.
*
* @name: Pointer to "struct tomoyo_path_info".
* @ptr: Pointer to "struct tomoyo_name_union".
*
* Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise.
*/
const struct tomoyo_path_info *
tomoyo_compare_name_union(const struct tomoyo_path_info *name,
const struct tomoyo_name_union *ptr)
{
if (ptr->group)
return tomoyo_path_matches_group(name, ptr->group);
if (tomoyo_path_matches_pattern(name, ptr->filename))
return ptr->filename;
return NULL;
}
/**
* tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union".
*
* @ptr: Pointer to "struct tomoyo_number_union".
*
* Returns nothing.
*/
void tomoyo_put_number_union(struct tomoyo_number_union *ptr)
{
tomoyo_put_group(ptr->group);
}
/**
* tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not.
*
* @value: Number to check.
* @ptr: Pointer to "struct tomoyo_number_union".
*
* Returns true if @value matches @ptr, false otherwise.
*/
bool tomoyo_compare_number_union(const unsigned long value,
const struct tomoyo_number_union *ptr)
{
if (ptr->group)
return tomoyo_number_matches_group(value, value, ptr->group);
return value >= ptr->values[0] && value <= ptr->values[1];
}
/**
* tomoyo_add_slash - Add trailing '/' if needed.
*
* @buf: Pointer to "struct tomoyo_path_info".
*
* Returns nothing.
*
* @buf must be generated by tomoyo_encode() because this function does not
* allocate memory for adding '/'.
*/
static void tomoyo_add_slash(struct tomoyo_path_info *buf)
{
if (buf->is_dir)
return;
/*
* This is OK because tomoyo_encode() reserves space for appending "/".
*/
strcat((char *) buf->name, "/");
tomoyo_fill_path_info(buf);
}
/**
* tomoyo_get_realpath - Get realpath.
*
* @buf: Pointer to "struct tomoyo_path_info".
* @path: Pointer to "struct path".
*
* Returns true on success, false otherwise.
*/
static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, const struct path *path)
{
buf->name = tomoyo_realpath_from_path(path);
if (buf->name) {
tomoyo_fill_path_info(buf);
return true;
}
return false;
}
/**
* tomoyo_audit_path_log - Audit path request log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_path_log(struct tomoyo_request_info *r)
{
return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword
[r->param.path.operation],
r->param.path.filename->name);
}
/**
* tomoyo_audit_path2_log - Audit path/path request log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_path2_log(struct tomoyo_request_info *r)
{
return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
[tomoyo_pp2mac[r->param.path2.operation]],
r->param.path2.filename1->name,
r->param.path2.filename2->name);
}
/**
* tomoyo_audit_mkdev_log - Audit path/number/number/number request log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r)
{
return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n",
tomoyo_mac_keywords
[tomoyo_pnnn2mac[r->param.mkdev.operation]],
r->param.mkdev.filename->name,
r->param.mkdev.mode, r->param.mkdev.major,
r->param.mkdev.minor);
}
/**
* tomoyo_audit_path_number_log - Audit path/number request log.
*
* @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r)
{
const u8 type = r->param.path_number.operation;
u8 radix;
char buffer[64];
switch (type) {
case TOMOYO_TYPE_CREATE:
case TOMOYO_TYPE_MKDIR:
case TOMOYO_TYPE_MKFIFO:
case TOMOYO_TYPE_MKSOCK:
case TOMOYO_TYPE_CHMOD:
radix = TOMOYO_VALUE_TYPE_OCTAL;
break;
case TOMOYO_TYPE_IOCTL:
radix = TOMOYO_VALUE_TYPE_HEXADECIMAL;
break;
default:
radix = TOMOYO_VALUE_TYPE_DECIMAL;
break;
}
tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number,
radix);
return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
[tomoyo_pn2mac[type]],
r->param.path_number.filename->name, buffer);
}
/**
* tomoyo_check_path_acl - Check permission for path operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*
* To be able to use wildcard for domain transition, this function sets
* matching entry on success. Since the caller holds tomoyo_read_lock(),
* it is safe to set matching entry.
*/
static bool tomoyo_check_path_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl),
head);
if (acl->perm & (1 << r->param.path.operation)) {
r->param.path.matched_path =
tomoyo_compare_name_union(r->param.path.filename,
&acl->name);
return r->param.path.matched_path != NULL;
}
return false;
}
/**
* tomoyo_check_path_number_acl - Check permission for path number operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_path_number_acl *acl =
container_of(ptr, typeof(*acl), head);
return (acl->perm & (1 << r->param.path_number.operation)) &&
tomoyo_compare_number_union(r->param.path_number.number,
&acl->number) &&
tomoyo_compare_name_union(r->param.path_number.filename,
&acl->name);
}
/**
* tomoyo_check_path2_acl - Check permission for path path operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_path2_acl *acl =
container_of(ptr, typeof(*acl), head);
return (acl->perm & (1 << r->param.path2.operation)) &&
tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1)
&& tomoyo_compare_name_union(r->param.path2.filename2,
&acl->name2);
}
/**
* tomoyo_check_mkdev_acl - Check permission for path number number number operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @ptr: Pointer to "struct tomoyo_acl_info".
*
* Returns true if granted, false otherwise.
*/
static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r,
const struct tomoyo_acl_info *ptr)
{
const struct tomoyo_mkdev_acl *acl =
container_of(ptr, typeof(*acl), head);
return (acl->perm & (1 << r->param.mkdev.operation)) &&
tomoyo_compare_number_union(r->param.mkdev.mode,
&acl->mode) &&
tomoyo_compare_number_union(r->param.mkdev.major,
&acl->major) &&
tomoyo_compare_number_union(r->param.mkdev.minor,
&acl->minor) &&
tomoyo_compare_name_union(r->param.mkdev.filename,
&acl->name);
}
/**
* tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_name_union(&p1->name, &p2->name);
}
/**
* tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head)
->perm;
u16 perm = *a_perm;
const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
*a_perm = perm;
return !perm;
}
/**
* tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
*
* @perm: Permission.
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_path_acl(const u16 perm,
struct tomoyo_acl_param *param)
{
struct tomoyo_path_acl e = {
.head.type = TOMOYO_TYPE_PATH_ACL,
.perm = perm
};
int error;
if (!tomoyo_parse_name_union(param, &e.name))
error = -EINVAL;
else
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_path_acl,
tomoyo_merge_path_acl);
tomoyo_put_name_union(&e.name);
return error;
}
/**
* tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_name_union(&p1->name, &p2->name) &&
tomoyo_same_number_union(&p1->mode, &p2->mode) &&
tomoyo_same_number_union(&p1->major, &p2->major) &&
tomoyo_same_number_union(&p1->minor, &p2->minor);
}
/**
* tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl,
head)->perm;
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
*a_perm = perm;
return !perm;
}
/**
* tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list.
*
* @perm: Permission.
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_mkdev_acl(const u8 perm,
struct tomoyo_acl_param *param)
{
struct tomoyo_mkdev_acl e = {
.head.type = TOMOYO_TYPE_MKDEV_ACL,
.perm = perm
};
int error;
if (!tomoyo_parse_name_union(param, &e.name) ||
!tomoyo_parse_number_union(param, &e.mode) ||
!tomoyo_parse_number_union(param, &e.major) ||
!tomoyo_parse_number_union(param, &e.minor))
error = -EINVAL;
else
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_mkdev_acl,
tomoyo_merge_mkdev_acl);
tomoyo_put_name_union(&e.name);
tomoyo_put_number_union(&e.mode);
tomoyo_put_number_union(&e.major);
tomoyo_put_number_union(&e.minor);
return error;
}
/**
* tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_name_union(&p1->name1, &p2->name1) &&
tomoyo_same_name_union(&p1->name2, &p2->name2);
}
/**
* tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head)
->perm;
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
*a_perm = perm;
return !perm;
}
/**
* tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
*
* @perm: Permission.
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_path2_acl(const u8 perm,
struct tomoyo_acl_param *param)
{
struct tomoyo_path2_acl e = {
.head.type = TOMOYO_TYPE_PATH2_ACL,
.perm = perm
};
int error;
if (!tomoyo_parse_name_union(param, &e.name1) ||
!tomoyo_parse_name_union(param, &e.name2))
error = -EINVAL;
else
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_path2_acl,
tomoyo_merge_path2_acl);
tomoyo_put_name_union(&e.name1);
tomoyo_put_name_union(&e.name2);
return error;
}
/**
* tomoyo_path_permission - Check permission for single path operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @operation: Type of operation.
* @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
const struct tomoyo_path_info *filename)
{
int error;
r->type = tomoyo_p2mac[operation];
r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
if (r->mode == TOMOYO_CONFIG_DISABLED)
return 0;
r->param_type = TOMOYO_TYPE_PATH_ACL;
r->param.path.filename = filename;
r->param.path.operation = operation;
do {
tomoyo_check_acl(r, tomoyo_check_path_acl);
error = tomoyo_audit_path_log(r);
} while (error == TOMOYO_RETRY_REQUEST);
return error;
}
/**
* tomoyo_execute_permission - Check permission for execute operation.
*
* @r: Pointer to "struct tomoyo_request_info".
* @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_execute_permission(struct tomoyo_request_info *r,
const struct tomoyo_path_info *filename)
{
/*
* Unlike other permission checks, this check is done regardless of
* profile mode settings in order to check for domain transition
* preference.
*/
r->type = TOMOYO_MAC_FILE_EXECUTE;
r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
r->param_type = TOMOYO_TYPE_PATH_ACL;
r->param.path.filename = filename;
r->param.path.operation = TOMOYO_TYPE_EXECUTE;
tomoyo_check_acl(r, tomoyo_check_path_acl);
r->ee->transition = r->matched_acl && r->matched_acl->cond ?
r->matched_acl->cond->transit : NULL;
if (r->mode != TOMOYO_CONFIG_DISABLED)
return tomoyo_audit_path_log(r);
return 0;
}
/**
* tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b except permission bits, false otherwise.
*/
static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_path_number_acl *p1 = container_of(a, typeof(*p1),
head);
const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2),
head);
return tomoyo_same_name_union(&p1->name, &p2->name) &&
tomoyo_same_number_union(&p1->number, &p2->number);
}
/**
* tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
* @is_delete: True for @a &= ~@b, false for @a |= @b.
*
* Returns true if @a is empty, false otherwise.
*/
static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
struct tomoyo_acl_info *b,
const bool is_delete)
{
u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl,
head)->perm;
u8 perm = *a_perm;
const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
->perm;
if (is_delete)
perm &= ~b_perm;
else
perm |= b_perm;
*a_perm = perm;
return !perm;
}
/**
* tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL.
*
* @perm: Permission.
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*/
static int tomoyo_update_path_number_acl(const u8 perm,
struct tomoyo_acl_param *param)
{
struct tomoyo_path_number_acl e = {
.head.type = TOMOYO_TYPE_PATH_NUMBER_ACL,
.perm = perm
};
int error;
if (!tomoyo_parse_name_union(param, &e.name) ||
!tomoyo_parse_number_union(param, &e.number))
error = -EINVAL;
else
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_path_number_acl,
tomoyo_merge_path_number_acl);
tomoyo_put_name_union(&e.name);
tomoyo_put_number_union(&e.number);
return error;
}
/**
* tomoyo_path_number_perm - Check permission for "create", "mkdir", "mkfifo", "mksock", "ioctl", "chmod", "chown", "chgrp".
*
* @type: Type of operation.
* @path: Pointer to "struct path".
* @number: Number.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_path_number_perm(const u8 type, struct path *path,
unsigned long number)
{
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
.path1 = *path,
};
int error = -ENOMEM;
struct tomoyo_path_info buf;
int idx;
if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type])
== TOMOYO_CONFIG_DISABLED || !path->dentry)
return 0;
idx = tomoyo_read_lock();
if (!tomoyo_get_realpath(&buf, path))
goto out;
r.obj = &obj;
if (type == TOMOYO_TYPE_MKDIR)
tomoyo_add_slash(&buf);
r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL;
r.param.path_number.operation = type;
r.param.path_number.filename = &buf;
r.param.path_number.number = number;
do {
tomoyo_check_acl(&r, tomoyo_check_path_number_acl);
error = tomoyo_audit_path_number_log(&r);
} while (error == TOMOYO_RETRY_REQUEST);
kfree(buf.name);
out:
tomoyo_read_unlock(idx);
if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
* tomoyo_check_open_permission - Check permission for "read" and "write".
*
* @domain: Pointer to "struct tomoyo_domain_info".
* @path: Pointer to "struct path".
* @flag: Flags for open().
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag)
{
const u8 acc_mode = ACC_MODE(flag);
int error = 0;
struct tomoyo_path_info buf;
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
.path1 = *path,
};
int idx;
buf.name = NULL;
r.mode = TOMOYO_CONFIG_DISABLED;
idx = tomoyo_read_lock();
if (acc_mode &&
tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN)
!= TOMOYO_CONFIG_DISABLED) {
if (!tomoyo_get_realpath(&buf, path)) {
error = -ENOMEM;
goto out;
}
r.obj = &obj;
if (acc_mode & MAY_READ)
error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ,
&buf);
if (!error && (acc_mode & MAY_WRITE))
error = tomoyo_path_permission(&r, (flag & O_APPEND) ?
TOMOYO_TYPE_APPEND :
TOMOYO_TYPE_WRITE,
&buf);
}
out:
kfree(buf.name);
tomoyo_read_unlock(idx);
if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
* tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount".
*
* @operation: Type of operation.
* @path: Pointer to "struct path".
* @target: Symlink's target if @operation is TOMOYO_TYPE_SYMLINK,
* NULL otherwise.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_path_perm(const u8 operation, const struct path *path, const char *target)
{
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
.path1 = *path,
};
int error;
struct tomoyo_path_info buf;
bool is_enforce;
struct tomoyo_path_info symlink_target;
int idx;
if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation])
== TOMOYO_CONFIG_DISABLED)
return 0;
is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING);
error = -ENOMEM;
buf.name = NULL;
idx = tomoyo_read_lock();
if (!tomoyo_get_realpath(&buf, path))
goto out;
r.obj = &obj;
switch (operation) {
case TOMOYO_TYPE_RMDIR:
case TOMOYO_TYPE_CHROOT:
tomoyo_add_slash(&buf);
break;
case TOMOYO_TYPE_SYMLINK:
symlink_target.name = tomoyo_encode(target);
if (!symlink_target.name)
goto out;
tomoyo_fill_path_info(&symlink_target);
obj.symlink_target = &symlink_target;
break;
}
error = tomoyo_path_permission(&r, operation, &buf);
if (operation == TOMOYO_TYPE_SYMLINK)
kfree(symlink_target.name);
out:
kfree(buf.name);
tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
* tomoyo_mkdev_perm - Check permission for "mkblock" and "mkchar".
*
* @operation: Type of operation. (TOMOYO_TYPE_MKCHAR or TOMOYO_TYPE_MKBLOCK)
* @path: Pointer to "struct path".
* @mode: Create mode.
* @dev: Device number.
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_mkdev_perm(const u8 operation, struct path *path,
const unsigned int mode, unsigned int dev)
{
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
.path1 = *path,
};
int error = -ENOMEM;
struct tomoyo_path_info buf;
int idx;
if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation])
== TOMOYO_CONFIG_DISABLED)
return 0;
idx = tomoyo_read_lock();
error = -ENOMEM;
if (tomoyo_get_realpath(&buf, path)) {
r.obj = &obj;
dev = new_decode_dev(dev);
r.param_type = TOMOYO_TYPE_MKDEV_ACL;
r.param.mkdev.filename = &buf;
r.param.mkdev.operation = operation;
r.param.mkdev.mode = mode;
r.param.mkdev.major = MAJOR(dev);
r.param.mkdev.minor = MINOR(dev);
tomoyo_check_acl(&r, tomoyo_check_mkdev_acl);
error = tomoyo_audit_mkdev_log(&r);
kfree(buf.name);
}
tomoyo_read_unlock(idx);
if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
* tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
*
* @operation: Type of operation.
* @path1: Pointer to "struct path".
* @path2: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
int tomoyo_path2_perm(const u8 operation, struct path *path1,
struct path *path2)
{
int error = -ENOMEM;
struct tomoyo_path_info buf1;
struct tomoyo_path_info buf2;
struct tomoyo_request_info r;
struct tomoyo_obj_info obj = {
.path1 = *path1,
.path2 = *path2,
};
int idx;
if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation])
== TOMOYO_CONFIG_DISABLED)
return 0;
buf1.name = NULL;
buf2.name = NULL;
idx = tomoyo_read_lock();
if (!tomoyo_get_realpath(&buf1, path1) ||
!tomoyo_get_realpath(&buf2, path2))
goto out;
switch (operation) {
case TOMOYO_TYPE_RENAME:
case TOMOYO_TYPE_LINK:
if (!d_is_dir(path1->dentry))
break;
/* fall through */
case TOMOYO_TYPE_PIVOT_ROOT:
tomoyo_add_slash(&buf1);
tomoyo_add_slash(&buf2);
break;
}
r.obj = &obj;
r.param_type = TOMOYO_TYPE_PATH2_ACL;
r.param.path2.operation = operation;
r.param.path2.filename1 = &buf1;
r.param.path2.filename2 = &buf2;
do {
tomoyo_check_acl(&r, tomoyo_check_path2_acl);
error = tomoyo_audit_path2_log(&r);
} while (error == TOMOYO_RETRY_REQUEST);
out:
kfree(buf1.name);
kfree(buf2.name);
tomoyo_read_unlock(idx);
if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
* tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry.
*
* @a: Pointer to "struct tomoyo_acl_info".
* @b: Pointer to "struct tomoyo_acl_info".
*
* Returns true if @a == @b, false otherwise.
*/
static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
const struct tomoyo_acl_info *b)
{
const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
tomoyo_same_number_union(&p1->flags, &p2->flags);
}
/**
* tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param)
{
struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
int error;
if (!tomoyo_parse_name_union(param, &e.dev_name) ||
!tomoyo_parse_name_union(param, &e.dir_name) ||
!tomoyo_parse_name_union(param, &e.fs_type) ||
!tomoyo_parse_number_union(param, &e.flags))
error = -EINVAL;
else
error = tomoyo_update_domain(&e.head, sizeof(e), param,
tomoyo_same_mount_acl, NULL);
tomoyo_put_name_union(&e.dev_name);
tomoyo_put_name_union(&e.dir_name);
tomoyo_put_name_union(&e.fs_type);
tomoyo_put_number_union(&e.flags);
return error;
}
/**
* tomoyo_write_file - Update file related list.
*
* @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
int tomoyo_write_file(struct tomoyo_acl_param *param)
{
u16 perm = 0;
u8 type;
const char *operation = tomoyo_read_token(param);
for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++)
if (tomoyo_permstr(operation, tomoyo_path_keyword[type]))
perm |= 1 << type;
if (perm)
return tomoyo_update_path_acl(perm, param);
for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++)
if (tomoyo_permstr(operation,
tomoyo_mac_keywords[tomoyo_pp2mac[type]]))
perm |= 1 << type;
if (perm)
return tomoyo_update_path2_acl(perm, param);
for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++)
if (tomoyo_permstr(operation,
tomoyo_mac_keywords[tomoyo_pn2mac[type]]))
perm |= 1 << type;
if (perm)
return tomoyo_update_path_number_acl(perm, param);
for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++)
if (tomoyo_permstr(operation,
tomoyo_mac_keywords[tomoyo_pnnn2mac[type]]))
perm |= 1 << type;
if (perm)
return tomoyo_update_mkdev_acl(perm, param);
if (tomoyo_permstr(operation,
tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT]))
return tomoyo_update_mount_acl(param);
return -EINVAL;
}
| gpl-2.0 |
CyanogenMod/android_kernel_samsung_smdk4210 | arch/arm/mach-omap2/clock2420_data.c | 2131 | 59427 | /*
* OMAP2420 clock data
*
* Copyright (C) 2005-2009 Texas Instruments, Inc.
* Copyright (C) 2004-2011 Nokia Corporation
*
* Contacts:
* Richard Woodruff <r-woodruff2@ti.com>
* Paul Walmsley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/list.h>
#include <plat/clkdev_omap.h>
#include "clock.h"
#include "clock2xxx.h"
#include "opp2xxx.h"
#include "cm2xxx_3xxx.h"
#include "prm2xxx_3xxx.h"
#include "prm-regbits-24xx.h"
#include "cm-regbits-24xx.h"
#include "sdrc.h"
#include "control.h"
#define OMAP_CM_REGADDR OMAP2420_CM_REGADDR
/*
* 2420 clock tree.
*
* NOTE:In many cases here we are assigning a 'default' parent. In
* many cases the parent is selectable. The set parent calls will
* also switch sources.
*
* Several sources are given initial rates which may be wrong, this will
* be fixed up in the init func.
*
* Things are broadly separated below by clock domains. It is
* noteworthy that most peripherals have dependencies on multiple clock
* domains. Many get their interface clocks from the L4 domain, but get
* functional clocks from fixed sources or other core domain derived
* clocks.
*/
/* Base external input clocks */
static struct clk func_32k_ck = {
.name = "func_32k_ck",
.ops = &clkops_null,
.rate = 32768,
.clkdm_name = "wkup_clkdm",
};
static struct clk secure_32k_ck = {
.name = "secure_32k_ck",
.ops = &clkops_null,
.rate = 32768,
.clkdm_name = "wkup_clkdm",
};
/* Typical 12/13MHz in standalone mode, will be 26Mhz in chassis mode */
static struct clk osc_ck = { /* (*12, *13, 19.2, *26, 38.4)MHz */
.name = "osc_ck",
.ops = &clkops_oscck,
.clkdm_name = "wkup_clkdm",
.recalc = &omap2_osc_clk_recalc,
};
/* Without modem likely 12MHz, with modem likely 13MHz */
static struct clk sys_ck = { /* (*12, *13, 19.2, 26, 38.4)MHz */
.name = "sys_ck", /* ~ ref_clk also */
.ops = &clkops_null,
.parent = &osc_ck,
.clkdm_name = "wkup_clkdm",
.recalc = &omap2xxx_sys_clk_recalc,
};
static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
.name = "alt_ck",
.ops = &clkops_null,
.rate = 54000000,
.clkdm_name = "wkup_clkdm",
};
/* Optional external clock input for McBSP CLKS */
static struct clk mcbsp_clks = {
.name = "mcbsp_clks",
.ops = &clkops_null,
};
/*
* Analog domain root source clocks
*/
/* dpll_ck, is broken out in to special cases through clksel */
/* REVISIT: Rate changes on dpll_ck trigger a full set change. ...
* deal with this
*/
static struct dpll_data dpll_dd = {
.mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
.mult_mask = OMAP24XX_DPLL_MULT_MASK,
.div1_mask = OMAP24XX_DPLL_DIV_MASK,
.clk_bypass = &sys_ck,
.clk_ref = &sys_ck,
.control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_mask = OMAP24XX_EN_DPLL_MASK,
.max_multiplier = 1023,
.min_divider = 1,
.max_divider = 16,
};
/*
* XXX Cannot add round_rate here yet, as this is still a composite clock,
* not just a DPLL
*/
static struct clk dpll_ck = {
.name = "dpll_ck",
.ops = &clkops_omap2xxx_dpll_ops,
.parent = &sys_ck, /* Can be func_32k also */
.dpll_data = &dpll_dd,
.clkdm_name = "wkup_clkdm",
.recalc = &omap2_dpllcore_recalc,
.set_rate = &omap2_reprogram_dpllcore,
};
static struct clk apll96_ck = {
.name = "apll96_ck",
.ops = &clkops_apll96,
.parent = &sys_ck,
.rate = 96000000,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_bit = OMAP24XX_EN_96M_PLL_SHIFT,
};
static struct clk apll54_ck = {
.name = "apll54_ck",
.ops = &clkops_apll54,
.parent = &sys_ck,
.rate = 54000000,
.flags = ENABLE_ON_INIT,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN),
.enable_bit = OMAP24XX_EN_54M_PLL_SHIFT,
};
/*
* PRCM digital base sources
*/
/* func_54m_ck */
static const struct clksel_rate func_54m_apll54_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_54m_alt_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel func_54m_clksel[] = {
{ .parent = &apll54_ck, .rates = func_54m_apll54_rates, },
{ .parent = &alt_ck, .rates = func_54m_alt_rates, },
{ .parent = NULL },
};
static struct clk func_54m_ck = {
.name = "func_54m_ck",
.ops = &clkops_null,
.parent = &apll54_ck, /* can also be alt_clk */
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_54M_SOURCE_MASK,
.clksel = func_54m_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk core_ck = {
.name = "core_ck",
.ops = &clkops_null,
.parent = &dpll_ck, /* can also be 32k */
.clkdm_name = "wkup_clkdm",
.recalc = &followparent_recalc,
};
static struct clk func_96m_ck = {
.name = "func_96m_ck",
.ops = &clkops_null,
.parent = &apll96_ck,
.clkdm_name = "wkup_clkdm",
.recalc = &followparent_recalc,
};
/* func_48m_ck */
static const struct clksel_rate func_48m_apll96_rates[] = {
{ .div = 2, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel_rate func_48m_alt_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel func_48m_clksel[] = {
{ .parent = &apll96_ck, .rates = func_48m_apll96_rates },
{ .parent = &alt_ck, .rates = func_48m_alt_rates },
{ .parent = NULL }
};
static struct clk func_48m_ck = {
.name = "func_48m_ck",
.ops = &clkops_null,
.parent = &apll96_ck, /* 96M or Alt */
.clkdm_name = "wkup_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_48M_SOURCE_MASK,
.clksel = func_48m_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static struct clk func_12m_ck = {
.name = "func_12m_ck",
.ops = &clkops_null,
.parent = &func_48m_ck,
.fixed_div = 4,
.clkdm_name = "wkup_clkdm",
.recalc = &omap_fixed_divisor_recalc,
};
/* Secure timer, only available in secure mode */
static struct clk wdt1_osc_ck = {
.name = "ck_wdt1_osc",
.ops = &clkops_null, /* RMK: missing? */
.parent = &osc_ck,
.recalc = &followparent_recalc,
};
/*
* The common_clkout* clksel_rate structs are common to
* sys_clkout, sys_clkout_src, sys_clkout2, and sys_clkout2_src.
* sys_clkout2_* are 2420-only, so the
* clksel_rate flags fields are inaccurate for those clocks. This is
* harmless since access to those clocks are gated by the struct clk
* flags fields, which mark them as 2420-only.
*/
static const struct clksel_rate common_clkout_src_core_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_sys_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_96m_rates[] = {
{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_clkout_src_54m_rates[] = {
{ .div = 1, .val = 3, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel common_clkout_src_clksel[] = {
{ .parent = &core_ck, .rates = common_clkout_src_core_rates },
{ .parent = &sys_ck, .rates = common_clkout_src_sys_rates },
{ .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates },
{ .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates },
{ .parent = NULL }
};
static struct clk sys_clkout_src = {
.name = "sys_clkout_src",
.ops = &clkops_omap2_dflt,
.parent = &func_54m_ck,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.enable_bit = OMAP24XX_CLKOUT_EN_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.clksel_mask = OMAP24XX_CLKOUT_SOURCE_MASK,
.clksel = common_clkout_src_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static const struct clksel_rate common_clkout_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 2, .val = 1, .flags = RATE_IN_24XX },
{ .div = 4, .val = 2, .flags = RATE_IN_24XX },
{ .div = 8, .val = 3, .flags = RATE_IN_24XX },
{ .div = 16, .val = 4, .flags = RATE_IN_24XX },
{ .div = 0 },
};
static const struct clksel sys_clkout_clksel[] = {
{ .parent = &sys_clkout_src, .rates = common_clkout_rates },
{ .parent = NULL }
};
static struct clk sys_clkout = {
.name = "sys_clkout",
.ops = &clkops_null,
.parent = &sys_clkout_src,
.clkdm_name = "wkup_clkdm",
.clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.clksel_mask = OMAP24XX_CLKOUT_DIV_MASK,
.clksel = sys_clkout_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
/* In 2430, new in 2420 ES2 */
static struct clk sys_clkout2_src = {
.name = "sys_clkout2_src",
.ops = &clkops_omap2_dflt,
.parent = &func_54m_ck,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.enable_bit = OMAP2420_CLKOUT2_EN_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.clksel_mask = OMAP2420_CLKOUT2_SOURCE_MASK,
.clksel = common_clkout_src_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static const struct clksel sys_clkout2_clksel[] = {
{ .parent = &sys_clkout2_src, .rates = common_clkout_rates },
{ .parent = NULL }
};
/* In 2430, new in 2420 ES2 */
static struct clk sys_clkout2 = {
.name = "sys_clkout2",
.ops = &clkops_null,
.parent = &sys_clkout2_src,
.clkdm_name = "wkup_clkdm",
.clksel_reg = OMAP2420_PRCM_CLKOUT_CTRL,
.clksel_mask = OMAP2420_CLKOUT2_DIV_MASK,
.clksel = sys_clkout2_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static struct clk emul_ck = {
.name = "emul_ck",
.ops = &clkops_omap2_dflt,
.parent = &func_54m_ck,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP2420_PRCM_CLKEMUL_CTRL,
.enable_bit = OMAP24XX_EMULATION_EN_SHIFT,
.recalc = &followparent_recalc,
};
/*
* MPU clock domain
* Clocks:
* MPU_FCLK, MPU_ICLK
* INT_M_FCLK, INT_M_I_CLK
*
* - Individual clocks are hardware managed.
* - Base divider comes from: CM_CLKSEL_MPU
*
*/
static const struct clksel_rate mpu_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_242X },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 0 },
};
static const struct clksel mpu_clksel[] = {
{ .parent = &core_ck, .rates = mpu_core_rates },
{ .parent = NULL }
};
static struct clk mpu_ck = { /* Control cpu */
.name = "mpu_ck",
.ops = &clkops_null,
.parent = &core_ck,
.clkdm_name = "mpu_clkdm",
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL),
.clksel_mask = OMAP24XX_CLKSEL_MPU_MASK,
.clksel = mpu_clksel,
.recalc = &omap2_clksel_recalc,
};
/*
* DSP (2420-UMA+IVA1) clock domain
* Clocks:
* 2420: UMA_FCLK, UMA_ICLK, IVA_MPU, IVA_COP
*
* Won't be too specific here. The core clock comes into this block
* it is divided then tee'ed. One branch goes directly to xyz enable
* controls. The other branch gets further divided by 2 then possibly
* routed into a synchronizer and out of clocks abc.
*/
static const struct clksel_rate dsp_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
{ .div = 0 },
};
static const struct clksel dsp_fck_clksel[] = {
{ .parent = &core_ck, .rates = dsp_fck_core_rates },
{ .parent = NULL }
};
static struct clk dsp_fck = {
.name = "dsp_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &core_ck,
.clkdm_name = "dsp_clkdm",
.enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
.clksel_mask = OMAP24XX_CLKSEL_DSP_MASK,
.clksel = dsp_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static const struct clksel dsp_ick_clksel[] = {
{ .parent = &dsp_fck, .rates = dsp_ick_rates },
{ .parent = NULL }
};
static struct clk dsp_ick = {
.name = "dsp_ick", /* apparently ipi and isp */
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &dsp_fck,
.clkdm_name = "dsp_clkdm",
.enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_ICLKEN),
.enable_bit = OMAP2420_EN_DSP_IPI_SHIFT, /* for ipi */
.clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
.clksel_mask = OMAP24XX_CLKSEL_DSP_IF_MASK,
.clksel = dsp_ick_clksel,
.recalc = &omap2_clksel_recalc,
};
/*
* The IVA1 is an ARM7 core on the 2420 that has nothing to do with
* the C54x, but which is contained in the DSP powerdomain. Does not
* exist on later OMAPs.
*/
static struct clk iva1_ifck = {
.name = "iva1_ifck",
.ops = &clkops_omap2_dflt_wait,
.parent = &core_ck,
.clkdm_name = "iva1_clkdm",
.enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
.enable_bit = OMAP2420_EN_IVA_COP_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL),
.clksel_mask = OMAP2420_CLKSEL_IVA_MASK,
.clksel = dsp_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
/* IVA1 mpu/int/i/f clocks are /2 of parent */
static struct clk iva1_mpu_int_ifck = {
.name = "iva1_mpu_int_ifck",
.ops = &clkops_omap2_dflt_wait,
.parent = &iva1_ifck,
.clkdm_name = "iva1_clkdm",
.enable_reg = OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN),
.enable_bit = OMAP2420_EN_IVA_MPU_SHIFT,
.fixed_div = 2,
.recalc = &omap_fixed_divisor_recalc,
};
/*
* L3 clock domain
* L3 clocks are used for both interface and functional clocks to
* multiple entities. Some of these clocks are completely managed
* by hardware, and some others allow software control. Hardware
* managed ones general are based on directly CLK_REQ signals and
* various auto idle settings. The functional spec sets many of these
* as 'tie-high' for their enables.
*
* I-CLOCKS:
* L3-Interconnect, SMS, GPMC, SDRC, OCM_RAM, OCM_ROM, SDMA
* CAM, HS-USB.
* F-CLOCK
* SSI.
*
* GPMC memories and SDRC have timing and clock sensitive registers which
* may very well need notification when the clock changes. Currently for low
* operating points, these are taken care of in sleep.S.
*/
static const struct clksel_rate core_l3_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_242X },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
{ .div = 16, .val = 16, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel core_l3_clksel[] = {
{ .parent = &core_ck, .rates = core_l3_core_rates },
{ .parent = NULL }
};
static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
.name = "core_l3_ck",
.ops = &clkops_null,
.parent = &core_ck,
.clkdm_name = "core_l3_clkdm",
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_L3_MASK,
.clksel = core_l3_clksel,
.recalc = &omap2_clksel_recalc,
};
/* usb_l4_ick */
static const struct clksel_rate usb_l4_ick_core_l3_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel usb_l4_ick_clksel[] = {
{ .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates },
{ .parent = NULL },
};
/* It is unclear from TRM whether usb_l4_ick is really in L3 or L4 clkdm */
static struct clk usb_l4_ick = { /* FS-USB interface clock */
.name = "usb_l4_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &core_l3_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
.enable_bit = OMAP24XX_EN_USB_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_USB_MASK,
.clksel = usb_l4_ick_clksel,
.recalc = &omap2_clksel_recalc,
};
/*
* L4 clock management domain
*
* This domain contains lots of interface clocks from the L4 interface, some
* functional clocks. Fixed APLL functional source clocks are managed in
* this domain.
*/
static const struct clksel_rate l4_core_l3_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel l4_clksel[] = {
{ .parent = &core_l3_ck, .rates = l4_core_l3_rates },
{ .parent = NULL }
};
static struct clk l4_ck = { /* used both as an ick and fck */
.name = "l4_ck",
.ops = &clkops_null,
.parent = &core_l3_ck,
.clkdm_name = "core_l4_clkdm",
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_L4_MASK,
.clksel = l4_clksel,
.recalc = &omap2_clksel_recalc,
};
/*
* SSI is in L3 management domain, its direct parent is core not l3,
* many core power domain entities are grouped into the L3 clock
* domain.
* SSI_SSR_FCLK, SSI_SST_FCLK, SSI_L4_ICLK
*
* ssr = core/1/2/3/4/5, sst = 1/2 ssr.
*/
static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel ssi_ssr_sst_fck_clksel[] = {
{ .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates },
{ .parent = NULL }
};
static struct clk ssi_ssr_sst_fck = {
.name = "ssi_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &core_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP24XX_EN_SSI_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_SSI_MASK,
.clksel = ssi_ssr_sst_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
/*
* Presumably this is the same as SSI_ICLK.
* TRM contradicts itself on what clockdomain SSI_ICLK is in
*/
static struct clk ssi_l4_ick = {
.name = "ssi_l4_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
.enable_bit = OMAP24XX_EN_SSI_SHIFT,
.recalc = &followparent_recalc,
};
/*
* GFX clock domain
* Clocks:
* GFX_FCLK, GFX_ICLK
* GFX_CG1(2d), GFX_CG2(3d)
*
* GFX_FCLK runs from L3, and is divided by (1,2,3,4)
* The 2d and 3d clocks run at a hardware determined
* divided value of fclk.
*
*/
/* This clksel struct is shared between gfx_3d_fck and gfx_2d_fck */
static const struct clksel gfx_fck_clksel[] = {
{ .parent = &core_l3_ck, .rates = gfx_l3_rates },
{ .parent = NULL },
};
static struct clk gfx_3d_fck = {
.name = "gfx_3d_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &core_l3_ck,
.clkdm_name = "gfx_clkdm",
.enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_3D_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
.clksel_mask = OMAP_CLKSEL_GFX_MASK,
.clksel = gfx_fck_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static struct clk gfx_2d_fck = {
.name = "gfx_2d_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &core_l3_ck,
.clkdm_name = "gfx_clkdm",
.enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_2D_SHIFT,
.clksel_reg = OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL),
.clksel_mask = OMAP_CLKSEL_GFX_MASK,
.clksel = gfx_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
/* This interface clock does not have a CM_AUTOIDLE bit */
static struct clk gfx_ick = {
.name = "gfx_ick", /* From l3 */
.ops = &clkops_omap2_dflt_wait,
.parent = &core_l3_ck,
.clkdm_name = "gfx_clkdm",
.enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN),
.enable_bit = OMAP_EN_GFX_SHIFT,
.recalc = &followparent_recalc,
};
/*
* DSS clock domain
* CLOCKs:
* DSS_L4_ICLK, DSS_L3_ICLK,
* DSS_CLK1, DSS_CLK2, DSS_54MHz_CLK
*
* DSS is both initiator and target.
*/
/* XXX Add RATE_NOT_VALIDATED */
static const struct clksel_rate dss1_fck_sys_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss1_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 2, .val = 2, .flags = RATE_IN_24XX },
{ .div = 3, .val = 3, .flags = RATE_IN_24XX },
{ .div = 4, .val = 4, .flags = RATE_IN_24XX },
{ .div = 5, .val = 5, .flags = RATE_IN_24XX },
{ .div = 6, .val = 6, .flags = RATE_IN_24XX },
{ .div = 8, .val = 8, .flags = RATE_IN_24XX },
{ .div = 9, .val = 9, .flags = RATE_IN_24XX },
{ .div = 12, .val = 12, .flags = RATE_IN_24XX },
{ .div = 16, .val = 16, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel dss1_fck_clksel[] = {
{ .parent = &sys_ck, .rates = dss1_fck_sys_rates },
{ .parent = &core_ck, .rates = dss1_fck_core_rates },
{ .parent = NULL },
};
static struct clk dss_ick = { /* Enables both L3,L4 ICLK's */
.name = "dss_ick",
.ops = &clkops_omap2_iclk_dflt,
.parent = &l4_ck, /* really both l3 and l4 */
.clkdm_name = "dss_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_DSS1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk dss1_fck = {
.name = "dss1_fck",
.ops = &clkops_omap2_dflt,
.parent = &core_ck, /* Core or sys */
.clkdm_name = "dss_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_DSS1_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_DSS1_MASK,
.clksel = dss1_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static const struct clksel_rate dss2_fck_sys_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate dss2_fck_48m_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel dss2_fck_clksel[] = {
{ .parent = &sys_ck, .rates = dss2_fck_sys_rates },
{ .parent = &func_48m_ck, .rates = dss2_fck_48m_rates },
{ .parent = NULL }
};
static struct clk dss2_fck = { /* Alt clk used in power management */
.name = "dss2_fck",
.ops = &clkops_omap2_dflt,
.parent = &sys_ck, /* fixed at sys_ck or 48MHz */
.clkdm_name = "dss_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_DSS2_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_DSS2_MASK,
.clksel = dss2_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk dss_54m_fck = { /* Alt clk used in power management */
.name = "dss_54m_fck", /* 54m tv clk */
.ops = &clkops_omap2_dflt_wait,
.parent = &func_54m_ck,
.clkdm_name = "dss_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_TV_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wu_l4_ick = {
.name = "wu_l4_ick",
.ops = &clkops_null,
.parent = &sys_ck,
.clkdm_name = "wkup_clkdm",
.recalc = &followparent_recalc,
};
/*
* CORE power domain ICLK & FCLK defines.
* Many of the these can have more than one possible parent. Entries
* here will likely have an L4 interface parent, and may have multiple
* functional clock parents.
*/
static const struct clksel_rate gpt_alt_rates[] = {
{ .div = 1, .val = 2, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel omap24xx_gpt_clksel[] = {
{ .parent = &func_32k_ck, .rates = gpt_32k_rates },
{ .parent = &sys_ck, .rates = gpt_sys_rates },
{ .parent = &alt_ck, .rates = gpt_alt_rates },
{ .parent = NULL },
};
static struct clk gpt1_ick = {
.name = "gpt1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_GPT1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt1_fck = {
.name = "gpt1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_GPT1_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1),
.clksel_mask = OMAP24XX_CLKSEL_GPT1_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
.round_rate = &omap2_clksel_round_rate,
.set_rate = &omap2_clksel_set_rate
};
static struct clk gpt2_ick = {
.name = "gpt2_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt2_fck = {
.name = "gpt2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT2_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT2_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt3_ick = {
.name = "gpt3_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT3_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt3_fck = {
.name = "gpt3_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT3_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT3_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt4_ick = {
.name = "gpt4_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT4_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt4_fck = {
.name = "gpt4_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT4_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT4_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt5_ick = {
.name = "gpt5_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT5_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt5_fck = {
.name = "gpt5_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT5_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT5_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt6_ick = {
.name = "gpt6_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT6_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt6_fck = {
.name = "gpt6_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT6_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT6_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt7_ick = {
.name = "gpt7_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT7_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt7_fck = {
.name = "gpt7_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT7_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT7_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt8_ick = {
.name = "gpt8_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT8_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt8_fck = {
.name = "gpt8_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT8_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT8_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt9_ick = {
.name = "gpt9_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT9_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt9_fck = {
.name = "gpt9_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT9_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT9_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt10_ick = {
.name = "gpt10_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT10_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt10_fck = {
.name = "gpt10_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT10_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT10_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt11_ick = {
.name = "gpt11_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT11_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt11_fck = {
.name = "gpt11_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT11_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT11_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk gpt12_ick = {
.name = "gpt12_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_GPT12_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpt12_fck = {
.name = "gpt12_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &secure_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_GPT12_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2),
.clksel_mask = OMAP24XX_CLKSEL_GPT12_MASK,
.clksel = omap24xx_gpt_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp1_ick = {
.name = "mcbsp1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
.recalc = &followparent_recalc,
};
static const struct clksel_rate common_mcbsp_96m_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel_rate common_mcbsp_mcbsp_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_24XX },
{ .div = 0 }
};
static const struct clksel mcbsp_fck_clksel[] = {
{ .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates },
{ .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates },
{ .parent = NULL }
};
static struct clk mcbsp1_fck = {
.name = "mcbsp1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP1_SHIFT,
.clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
.clksel_mask = OMAP2_MCBSP1_CLKS_MASK,
.clksel = mcbsp_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk mcbsp2_ick = {
.name = "mcbsp2_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mcbsp2_fck = {
.name = "mcbsp2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.init = &omap2_init_clksel_parent,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCBSP2_SHIFT,
.clksel_reg = OMAP242X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0),
.clksel_mask = OMAP2_MCBSP2_CLKS_MASK,
.clksel = mcbsp_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk mcspi1_ick = {
.name = "mcspi1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mcspi1_fck = {
.name = "mcspi1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mcspi2_ick = {
.name = "mcspi2_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mcspi2_fck = {
.name = "mcspi2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MCSPI2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart1_ick = {
.name = "uart1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_UART1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart1_fck = {
.name = "uart1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_UART1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart2_ick = {
.name = "uart2_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_UART2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart2_fck = {
.name = "uart2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_UART2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart3_ick = {
.name = "uart3_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2),
.enable_bit = OMAP24XX_EN_UART3_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk uart3_fck = {
.name = "uart3_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP24XX_EN_UART3_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpios_ick = {
.name = "gpios_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk gpios_fck = {
.name = "gpios_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_GPIOS_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mpu_wdt_ick = {
.name = "mpu_wdt_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mpu_wdt_fck = {
.name = "mpu_wdt_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN),
.enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk sync_32k_ick = {
.name = "sync_32k_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.flags = ENABLE_ON_INIT,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_32KSYNC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wdt1_ick = {
.name = "wdt1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_WDT1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk omapctrl_ick = {
.name = "omapctrl_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &wu_l4_ick,
.clkdm_name = "wkup_clkdm",
.flags = ENABLE_ON_INIT,
.enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN),
.enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk cam_ick = {
.name = "cam_ick",
.ops = &clkops_omap2_iclk_dflt,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_CAM_SHIFT,
.recalc = &followparent_recalc,
};
/*
* cam_fck controls both CAM_MCLK and CAM_FCLK. It should probably be
* split into two separate clocks, since the parent clocks are different
* and the clockdomains are also different.
*/
static struct clk cam_fck = {
.name = "cam_fck",
.ops = &clkops_omap2_dflt,
.parent = &func_96m_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_CAM_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mailboxes_ick = {
.name = "mailboxes_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wdt4_ick = {
.name = "wdt4_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_WDT4_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wdt4_fck = {
.name = "wdt4_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_WDT4_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wdt3_ick = {
.name = "wdt3_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_WDT3_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk wdt3_fck = {
.name = "wdt3_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_32k_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_WDT3_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mspro_ick = {
.name = "mspro_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mspro_fck = {
.name = "mspro_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_MSPRO_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mmc_ick = {
.name = "mmc_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_MMC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk mmc_fck = {
.name = "mmc_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_MMC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk fac_ick = {
.name = "fac_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_FAC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk fac_fck = {
.name = "fac_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_12m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_FAC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk eac_ick = {
.name = "eac_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_EAC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk eac_fck = {
.name = "eac_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_EAC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk hdq_ick = {
.name = "hdq_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP24XX_EN_HDQ_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk hdq_fck = {
.name = "hdq_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_12m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP24XX_EN_HDQ_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk i2c2_ick = {
.name = "i2c2_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_I2C2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk i2c2_fck = {
.name = "i2c2_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_12m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_I2C2_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk i2c1_ick = {
.name = "i2c1_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_I2C1_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk i2c1_fck = {
.name = "i2c1_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_12m_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_I2C1_SHIFT,
.recalc = &followparent_recalc,
};
/*
* The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
* accesses derived from this data.
*/
static struct clk gpmc_fck = {
.name = "gpmc_fck",
.ops = &clkops_omap2_iclk_idle_only,
.parent = &core_l3_ck,
.flags = ENABLE_ON_INIT,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_GPMC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk sdma_fck = {
.name = "sdma_fck",
.ops = &clkops_null, /* RMK: missing? */
.parent = &core_l3_ck,
.clkdm_name = "core_l3_clkdm",
.recalc = &followparent_recalc,
};
/*
* The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
* accesses derived from this data.
*/
static struct clk sdma_ick = {
.name = "sdma_ick",
.ops = &clkops_omap2_iclk_idle_only,
.parent = &core_l3_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_SDMA_SHIFT,
.recalc = &followparent_recalc,
};
/*
* The enable_reg/enable_bit in this clock is only used for CM_AUTOIDLE
* accesses derived from this data.
*/
static struct clk sdrc_ick = {
.name = "sdrc_ick",
.ops = &clkops_omap2_iclk_idle_only,
.parent = &core_l3_ck,
.flags = ENABLE_ON_INIT,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3),
.enable_bit = OMAP24XX_AUTO_SDRC_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk vlynq_ick = {
.name = "vlynq_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &core_l3_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1),
.enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
.recalc = &followparent_recalc,
};
static const struct clksel_rate vlynq_fck_96m_rates[] = {
{ .div = 1, .val = 0, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel_rate vlynq_fck_core_rates[] = {
{ .div = 1, .val = 1, .flags = RATE_IN_242X },
{ .div = 2, .val = 2, .flags = RATE_IN_242X },
{ .div = 3, .val = 3, .flags = RATE_IN_242X },
{ .div = 4, .val = 4, .flags = RATE_IN_242X },
{ .div = 6, .val = 6, .flags = RATE_IN_242X },
{ .div = 8, .val = 8, .flags = RATE_IN_242X },
{ .div = 9, .val = 9, .flags = RATE_IN_242X },
{ .div = 12, .val = 12, .flags = RATE_IN_242X },
{ .div = 16, .val = 16, .flags = RATE_IN_242X },
{ .div = 18, .val = 18, .flags = RATE_IN_242X },
{ .div = 0 }
};
static const struct clksel vlynq_fck_clksel[] = {
{ .parent = &func_96m_ck, .rates = vlynq_fck_96m_rates },
{ .parent = &core_ck, .rates = vlynq_fck_core_rates },
{ .parent = NULL }
};
static struct clk vlynq_fck = {
.name = "vlynq_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_96m_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1),
.enable_bit = OMAP2420_EN_VLYNQ_SHIFT,
.init = &omap2_init_clksel_parent,
.clksel_reg = OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1),
.clksel_mask = OMAP2420_CLKSEL_VLYNQ_MASK,
.clksel = vlynq_fck_clksel,
.recalc = &omap2_clksel_recalc,
};
static struct clk des_ick = {
.name = "des_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_DES_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk sha_ick = {
.name = "sha_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_SHA_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk rng_ick = {
.name = "rng_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_RNG_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk aes_ick = {
.name = "aes_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_AES_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk pka_ick = {
.name = "pka_ick",
.ops = &clkops_omap2_iclk_dflt_wait,
.parent = &l4_ck,
.clkdm_name = "core_l4_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4),
.enable_bit = OMAP24XX_EN_PKA_SHIFT,
.recalc = &followparent_recalc,
};
static struct clk usb_fck = {
.name = "usb_fck",
.ops = &clkops_omap2_dflt_wait,
.parent = &func_48m_ck,
.clkdm_name = "core_l3_clkdm",
.enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2),
.enable_bit = OMAP24XX_EN_USB_SHIFT,
.recalc = &followparent_recalc,
};
/*
* This clock is a composite clock which does entire set changes then
* forces a rebalance. It keys on the MPU speed, but it really could
* be any key speed part of a set in the rate table.
*
* to really change a set, you need memory table sets which get changed
* in sram, pre-notifiers & post notifiers, changing the top set, without
* having low level display recalc's won't work... this is why dpm notifiers
* work, isr's off, walk a list of clocks already _off_ and not messing with
* the bus.
*
* This clock should have no parent. It embodies the entire upper level
* active set. A parent will mess up some of the init also.
*/
static struct clk virt_prcm_set = {
.name = "virt_prcm_set",
.ops = &clkops_null,
.parent = &mpu_ck, /* Indexed by mpu speed, no parent */
.recalc = &omap2_table_mpu_recalc, /* sets are keyed on mpu rate */
.set_rate = &omap2_select_table_rate,
.round_rate = &omap2_round_to_table_rate,
};
/*
* clkdev integration
*/
static struct omap_clk omap2420_clks[] = {
/* external root sources */
CLK(NULL, "func_32k_ck", &func_32k_ck, CK_242X),
CLK(NULL, "secure_32k_ck", &secure_32k_ck, CK_242X),
CLK(NULL, "osc_ck", &osc_ck, CK_242X),
CLK(NULL, "sys_ck", &sys_ck, CK_242X),
CLK(NULL, "alt_ck", &alt_ck, CK_242X),
CLK("omap-mcbsp.1", "pad_fck", &mcbsp_clks, CK_242X),
CLK("omap-mcbsp.2", "pad_fck", &mcbsp_clks, CK_242X),
CLK(NULL, "mcbsp_clks", &mcbsp_clks, CK_242X),
/* internal analog sources */
CLK(NULL, "dpll_ck", &dpll_ck, CK_242X),
CLK(NULL, "apll96_ck", &apll96_ck, CK_242X),
CLK(NULL, "apll54_ck", &apll54_ck, CK_242X),
/* internal prcm root sources */
CLK(NULL, "func_54m_ck", &func_54m_ck, CK_242X),
CLK(NULL, "core_ck", &core_ck, CK_242X),
CLK("omap-mcbsp.1", "prcm_fck", &func_96m_ck, CK_242X),
CLK("omap-mcbsp.2", "prcm_fck", &func_96m_ck, CK_242X),
CLK(NULL, "func_96m_ck", &func_96m_ck, CK_242X),
CLK(NULL, "func_48m_ck", &func_48m_ck, CK_242X),
CLK(NULL, "func_12m_ck", &func_12m_ck, CK_242X),
CLK(NULL, "ck_wdt1_osc", &wdt1_osc_ck, CK_242X),
CLK(NULL, "sys_clkout_src", &sys_clkout_src, CK_242X),
CLK(NULL, "sys_clkout", &sys_clkout, CK_242X),
CLK(NULL, "sys_clkout2_src", &sys_clkout2_src, CK_242X),
CLK(NULL, "sys_clkout2", &sys_clkout2, CK_242X),
CLK(NULL, "emul_ck", &emul_ck, CK_242X),
/* mpu domain clocks */
CLK(NULL, "mpu_ck", &mpu_ck, CK_242X),
/* dsp domain clocks */
CLK(NULL, "dsp_fck", &dsp_fck, CK_242X),
CLK(NULL, "dsp_ick", &dsp_ick, CK_242X),
CLK(NULL, "iva1_ifck", &iva1_ifck, CK_242X),
CLK(NULL, "iva1_mpu_int_ifck", &iva1_mpu_int_ifck, CK_242X),
/* GFX domain clocks */
CLK(NULL, "gfx_3d_fck", &gfx_3d_fck, CK_242X),
CLK(NULL, "gfx_2d_fck", &gfx_2d_fck, CK_242X),
CLK(NULL, "gfx_ick", &gfx_ick, CK_242X),
/* DSS domain clocks */
CLK("omapdss_dss", "ick", &dss_ick, CK_242X),
CLK("omapdss_dss", "fck", &dss1_fck, CK_242X),
CLK("omapdss_dss", "sys_clk", &dss2_fck, CK_242X),
CLK("omapdss_dss", "tv_clk", &dss_54m_fck, CK_242X),
/* L3 domain clocks */
CLK(NULL, "core_l3_ck", &core_l3_ck, CK_242X),
CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck, CK_242X),
CLK(NULL, "usb_l4_ick", &usb_l4_ick, CK_242X),
/* L4 domain clocks */
CLK(NULL, "l4_ck", &l4_ck, CK_242X),
CLK(NULL, "ssi_l4_ick", &ssi_l4_ick, CK_242X),
CLK(NULL, "wu_l4_ick", &wu_l4_ick, CK_242X),
/* virtual meta-group clock */
CLK(NULL, "virt_prcm_set", &virt_prcm_set, CK_242X),
/* general l4 interface ck, multi-parent functional clk */
CLK(NULL, "gpt1_ick", &gpt1_ick, CK_242X),
CLK(NULL, "gpt1_fck", &gpt1_fck, CK_242X),
CLK(NULL, "gpt2_ick", &gpt2_ick, CK_242X),
CLK(NULL, "gpt2_fck", &gpt2_fck, CK_242X),
CLK(NULL, "gpt3_ick", &gpt3_ick, CK_242X),
CLK(NULL, "gpt3_fck", &gpt3_fck, CK_242X),
CLK(NULL, "gpt4_ick", &gpt4_ick, CK_242X),
CLK(NULL, "gpt4_fck", &gpt4_fck, CK_242X),
CLK(NULL, "gpt5_ick", &gpt5_ick, CK_242X),
CLK(NULL, "gpt5_fck", &gpt5_fck, CK_242X),
CLK(NULL, "gpt6_ick", &gpt6_ick, CK_242X),
CLK(NULL, "gpt6_fck", &gpt6_fck, CK_242X),
CLK(NULL, "gpt7_ick", &gpt7_ick, CK_242X),
CLK(NULL, "gpt7_fck", &gpt7_fck, CK_242X),
CLK(NULL, "gpt8_ick", &gpt8_ick, CK_242X),
CLK(NULL, "gpt8_fck", &gpt8_fck, CK_242X),
CLK(NULL, "gpt9_ick", &gpt9_ick, CK_242X),
CLK(NULL, "gpt9_fck", &gpt9_fck, CK_242X),
CLK(NULL, "gpt10_ick", &gpt10_ick, CK_242X),
CLK(NULL, "gpt10_fck", &gpt10_fck, CK_242X),
CLK(NULL, "gpt11_ick", &gpt11_ick, CK_242X),
CLK(NULL, "gpt11_fck", &gpt11_fck, CK_242X),
CLK(NULL, "gpt12_ick", &gpt12_ick, CK_242X),
CLK(NULL, "gpt12_fck", &gpt12_fck, CK_242X),
CLK("omap-mcbsp.1", "ick", &mcbsp1_ick, CK_242X),
CLK("omap-mcbsp.1", "fck", &mcbsp1_fck, CK_242X),
CLK("omap-mcbsp.2", "ick", &mcbsp2_ick, CK_242X),
CLK("omap-mcbsp.2", "fck", &mcbsp2_fck, CK_242X),
CLK("omap2_mcspi.1", "ick", &mcspi1_ick, CK_242X),
CLK("omap2_mcspi.1", "fck", &mcspi1_fck, CK_242X),
CLK("omap2_mcspi.2", "ick", &mcspi2_ick, CK_242X),
CLK("omap2_mcspi.2", "fck", &mcspi2_fck, CK_242X),
CLK(NULL, "uart1_ick", &uart1_ick, CK_242X),
CLK(NULL, "uart1_fck", &uart1_fck, CK_242X),
CLK(NULL, "uart2_ick", &uart2_ick, CK_242X),
CLK(NULL, "uart2_fck", &uart2_fck, CK_242X),
CLK(NULL, "uart3_ick", &uart3_ick, CK_242X),
CLK(NULL, "uart3_fck", &uart3_fck, CK_242X),
CLK(NULL, "gpios_ick", &gpios_ick, CK_242X),
CLK(NULL, "gpios_fck", &gpios_fck, CK_242X),
CLK("omap_wdt", "ick", &mpu_wdt_ick, CK_242X),
CLK("omap_wdt", "fck", &mpu_wdt_fck, CK_242X),
CLK(NULL, "sync_32k_ick", &sync_32k_ick, CK_242X),
CLK(NULL, "wdt1_ick", &wdt1_ick, CK_242X),
CLK(NULL, "omapctrl_ick", &omapctrl_ick, CK_242X),
CLK("omap24xxcam", "fck", &cam_fck, CK_242X),
CLK("omap24xxcam", "ick", &cam_ick, CK_242X),
CLK(NULL, "mailboxes_ick", &mailboxes_ick, CK_242X),
CLK(NULL, "wdt4_ick", &wdt4_ick, CK_242X),
CLK(NULL, "wdt4_fck", &wdt4_fck, CK_242X),
CLK(NULL, "wdt3_ick", &wdt3_ick, CK_242X),
CLK(NULL, "wdt3_fck", &wdt3_fck, CK_242X),
CLK(NULL, "mspro_ick", &mspro_ick, CK_242X),
CLK(NULL, "mspro_fck", &mspro_fck, CK_242X),
CLK("mmci-omap.0", "ick", &mmc_ick, CK_242X),
CLK("mmci-omap.0", "fck", &mmc_fck, CK_242X),
CLK(NULL, "fac_ick", &fac_ick, CK_242X),
CLK(NULL, "fac_fck", &fac_fck, CK_242X),
CLK(NULL, "eac_ick", &eac_ick, CK_242X),
CLK(NULL, "eac_fck", &eac_fck, CK_242X),
CLK("omap_hdq.0", "ick", &hdq_ick, CK_242X),
CLK("omap_hdq.1", "fck", &hdq_fck, CK_242X),
CLK("omap_i2c.1", "ick", &i2c1_ick, CK_242X),
CLK("omap_i2c.1", "fck", &i2c1_fck, CK_242X),
CLK("omap_i2c.2", "ick", &i2c2_ick, CK_242X),
CLK("omap_i2c.2", "fck", &i2c2_fck, CK_242X),
CLK(NULL, "gpmc_fck", &gpmc_fck, CK_242X),
CLK(NULL, "sdma_fck", &sdma_fck, CK_242X),
CLK(NULL, "sdma_ick", &sdma_ick, CK_242X),
CLK(NULL, "sdrc_ick", &sdrc_ick, CK_242X),
CLK(NULL, "vlynq_ick", &vlynq_ick, CK_242X),
CLK(NULL, "vlynq_fck", &vlynq_fck, CK_242X),
CLK(NULL, "des_ick", &des_ick, CK_242X),
CLK("omap-sham", "ick", &sha_ick, CK_242X),
CLK("omap_rng", "ick", &rng_ick, CK_242X),
CLK("omap-aes", "ick", &aes_ick, CK_242X),
CLK(NULL, "pka_ick", &pka_ick, CK_242X),
CLK(NULL, "usb_fck", &usb_fck, CK_242X),
CLK("musb-hdrc", "fck", &osc_ck, CK_242X),
};
/*
* init code
*/
int __init omap2420_clk_init(void)
{
const struct prcm_config *prcm;
struct omap_clk *c;
u32 clkrate;
prcm_clksrc_ctrl = OMAP2420_PRCM_CLKSRC_CTRL;
cm_idlest_pll = OMAP_CM_REGADDR(PLL_MOD, CM_IDLEST);
cpu_mask = RATE_IN_242X;
rate_table = omap2420_rate_table;
clk_init(&omap2_clk_functions);
for (c = omap2420_clks; c < omap2420_clks + ARRAY_SIZE(omap2420_clks);
c++)
clk_preinit(c->lk.clk);
osc_ck.rate = omap2_osc_clk_recalc(&osc_ck);
propagate_rate(&osc_ck);
sys_ck.rate = omap2xxx_sys_clk_recalc(&sys_ck);
propagate_rate(&sys_ck);
for (c = omap2420_clks; c < omap2420_clks + ARRAY_SIZE(omap2420_clks);
c++) {
clkdev_add(&c->lk);
clk_register(c->lk.clk);
omap2_init_clk_clkdm(c->lk.clk);
}
/* Disable autoidle on all clocks; let the PM code enable it later */
omap_clk_disable_autoidle_all();
/* Check the MPU rate set by bootloader */
clkrate = omap2xxx_clk_get_core_rate(&dpll_ck);
for (prcm = rate_table; prcm->mpu_speed; prcm++) {
if (!(prcm->flags & cpu_mask))
continue;
if (prcm->xtal_speed != sys_ck.rate)
continue;
if (prcm->dpll_speed <= clkrate)
break;
}
curr_prcm_set = prcm;
recalculate_root_clocks();
pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n",
(sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
(dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
/*
* Only enable those clocks we will need, let the drivers
* enable other clocks as necessary
*/
clk_enable_init_clocks();
/* Avoid sleeping sleeping during omap2_clk_prepare_for_reboot() */
vclk = clk_get(NULL, "virt_prcm_set");
sclk = clk_get(NULL, "sys_ck");
dclk = clk_get(NULL, "dpll_ck");
return 0;
}
| gpl-2.0 |
Snuzzo/funky_msm8960 | drivers/net/bna/bna_txrx.c | 2387 | 103208 | /*
* Linux network driver for Brocade Converged Network Adapter.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License (GPL) Version 2 as
* published by the Free Software Foundation
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
/*
* Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
* All rights reserved
* www.brocade.com
*/
#include "bna.h"
#include "bfa_sm.h"
#include "bfi.h"
/**
* IB
*/
#define bna_ib_find_free_ibidx(_mask, _pos)\
do {\
(_pos) = 0;\
while (((_pos) < (BFI_IBIDX_MAX_SEGSIZE)) &&\
((1 << (_pos)) & (_mask)))\
(_pos)++;\
} while (0)
#define bna_ib_count_ibidx(_mask, _count)\
do {\
int pos = 0;\
(_count) = 0;\
while (pos < (BFI_IBIDX_MAX_SEGSIZE)) {\
if ((1 << pos) & (_mask))\
(_count) = pos + 1;\
pos++;\
} \
} while (0)
#define bna_ib_select_segpool(_count, _q_idx)\
do {\
int i;\
(_q_idx) = -1;\
for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {\
if ((_count <= ibidx_pool[i].pool_entry_size)) {\
(_q_idx) = i;\
break;\
} \
} \
} while (0)
struct bna_ibidx_pool {
int pool_size;
int pool_entry_size;
};
init_ibidx_pool(ibidx_pool);
static struct bna_intr *
bna_intr_get(struct bna_ib_mod *ib_mod, enum bna_intr_type intr_type,
int vector)
{
struct bna_intr *intr;
struct list_head *qe;
list_for_each(qe, &ib_mod->intr_active_q) {
intr = (struct bna_intr *)qe;
if ((intr->intr_type == intr_type) &&
(intr->vector == vector)) {
intr->ref_count++;
return intr;
}
}
if (list_empty(&ib_mod->intr_free_q))
return NULL;
bfa_q_deq(&ib_mod->intr_free_q, &intr);
bfa_q_qe_init(&intr->qe);
intr->ref_count = 1;
intr->intr_type = intr_type;
intr->vector = vector;
list_add_tail(&intr->qe, &ib_mod->intr_active_q);
return intr;
}
static void
bna_intr_put(struct bna_ib_mod *ib_mod,
struct bna_intr *intr)
{
intr->ref_count--;
if (intr->ref_count == 0) {
intr->ib = NULL;
list_del(&intr->qe);
bfa_q_qe_init(&intr->qe);
list_add_tail(&intr->qe, &ib_mod->intr_free_q);
}
}
void
bna_ib_mod_init(struct bna_ib_mod *ib_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int i;
int j;
int count;
u8 offset;
struct bna_doorbell_qset *qset;
unsigned long off;
ib_mod->bna = bna;
ib_mod->ib = (struct bna_ib *)
res_info[BNA_RES_MEM_T_IB_ARRAY].res_u.mem_info.mdl[0].kva;
ib_mod->intr = (struct bna_intr *)
res_info[BNA_RES_MEM_T_INTR_ARRAY].res_u.mem_info.mdl[0].kva;
ib_mod->idx_seg = (struct bna_ibidx_seg *)
res_info[BNA_RES_MEM_T_IDXSEG_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&ib_mod->ib_free_q);
INIT_LIST_HEAD(&ib_mod->intr_free_q);
INIT_LIST_HEAD(&ib_mod->intr_active_q);
for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++)
INIT_LIST_HEAD(&ib_mod->ibidx_seg_pool[i]);
for (i = 0; i < BFI_MAX_IB; i++) {
ib_mod->ib[i].ib_id = i;
ib_mod->ib[i].ib_seg_host_addr_kva =
res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].kva;
ib_mod->ib[i].ib_seg_host_addr.lsb =
res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.lsb;
ib_mod->ib[i].ib_seg_host_addr.msb =
res_info[BNA_RES_MEM_T_IBIDX].res_u.mem_info.mdl[i].dma.msb;
qset = (struct bna_doorbell_qset *)0;
off = (unsigned long)(&qset[i >> 1].ib0[(i & 0x1)
* (0x20 >> 2)]);
ib_mod->ib[i].door_bell.doorbell_addr = off +
BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
bfa_q_qe_init(&ib_mod->ib[i].qe);
list_add_tail(&ib_mod->ib[i].qe, &ib_mod->ib_free_q);
bfa_q_qe_init(&ib_mod->intr[i].qe);
list_add_tail(&ib_mod->intr[i].qe, &ib_mod->intr_free_q);
}
count = 0;
offset = 0;
for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
for (j = 0; j < ibidx_pool[i].pool_size; j++) {
bfa_q_qe_init(&ib_mod->idx_seg[count]);
ib_mod->idx_seg[count].ib_seg_size =
ibidx_pool[i].pool_entry_size;
ib_mod->idx_seg[count].ib_idx_tbl_offset = offset;
list_add_tail(&ib_mod->idx_seg[count].qe,
&ib_mod->ibidx_seg_pool[i]);
count++;
offset += ibidx_pool[i].pool_entry_size;
}
}
}
void
bna_ib_mod_uninit(struct bna_ib_mod *ib_mod)
{
int i;
int j;
struct list_head *qe;
i = 0;
list_for_each(qe, &ib_mod->ib_free_q)
i++;
i = 0;
list_for_each(qe, &ib_mod->intr_free_q)
i++;
for (i = 0; i < BFI_IBIDX_TOTAL_POOLS; i++) {
j = 0;
list_for_each(qe, &ib_mod->ibidx_seg_pool[i])
j++;
}
ib_mod->bna = NULL;
}
static struct bna_ib *
bna_ib_get(struct bna_ib_mod *ib_mod,
enum bna_intr_type intr_type,
int vector)
{
struct bna_ib *ib;
struct bna_intr *intr;
if (intr_type == BNA_INTR_T_INTX)
vector = (1 << vector);
intr = bna_intr_get(ib_mod, intr_type, vector);
if (intr == NULL)
return NULL;
if (intr->ib) {
if (intr->ib->ref_count == BFI_IBIDX_MAX_SEGSIZE) {
bna_intr_put(ib_mod, intr);
return NULL;
}
intr->ib->ref_count++;
return intr->ib;
}
if (list_empty(&ib_mod->ib_free_q)) {
bna_intr_put(ib_mod, intr);
return NULL;
}
bfa_q_deq(&ib_mod->ib_free_q, &ib);
bfa_q_qe_init(&ib->qe);
ib->ref_count = 1;
ib->start_count = 0;
ib->idx_mask = 0;
ib->intr = intr;
ib->idx_seg = NULL;
intr->ib = ib;
ib->bna = ib_mod->bna;
return ib;
}
static void
bna_ib_put(struct bna_ib_mod *ib_mod, struct bna_ib *ib)
{
bna_intr_put(ib_mod, ib->intr);
ib->ref_count--;
if (ib->ref_count == 0) {
ib->intr = NULL;
ib->bna = NULL;
list_add_tail(&ib->qe, &ib_mod->ib_free_q);
}
}
/* Returns index offset - starting from 0 */
static int
bna_ib_reserve_idx(struct bna_ib *ib)
{
struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
struct bna_ibidx_seg *idx_seg;
int idx;
int num_idx;
int q_idx;
/* Find the first free index position */
bna_ib_find_free_ibidx(ib->idx_mask, idx);
if (idx == BFI_IBIDX_MAX_SEGSIZE)
return -1;
/*
* Calculate the total number of indexes held by this IB,
* including the index newly reserved above.
*/
bna_ib_count_ibidx((ib->idx_mask | (1 << idx)), num_idx);
/* See if there is a free space in the index segment held by this IB */
if (ib->idx_seg && (num_idx <= ib->idx_seg->ib_seg_size)) {
ib->idx_mask |= (1 << idx);
return idx;
}
if (ib->start_count)
return -1;
/* Allocate a new segment */
bna_ib_select_segpool(num_idx, q_idx);
while (1) {
if (q_idx == BFI_IBIDX_TOTAL_POOLS)
return -1;
if (!list_empty(&ib_mod->ibidx_seg_pool[q_idx]))
break;
q_idx++;
}
bfa_q_deq(&ib_mod->ibidx_seg_pool[q_idx], &idx_seg);
bfa_q_qe_init(&idx_seg->qe);
/* Free the old segment */
if (ib->idx_seg) {
bna_ib_select_segpool(ib->idx_seg->ib_seg_size, q_idx);
list_add_tail(&ib->idx_seg->qe, &ib_mod->ibidx_seg_pool[q_idx]);
}
ib->idx_seg = idx_seg;
ib->idx_mask |= (1 << idx);
return idx;
}
static void
bna_ib_release_idx(struct bna_ib *ib, int idx)
{
struct bna_ib_mod *ib_mod = &ib->bna->ib_mod;
struct bna_ibidx_seg *idx_seg;
int num_idx;
int cur_q_idx;
int new_q_idx;
ib->idx_mask &= ~(1 << idx);
if (ib->start_count)
return;
bna_ib_count_ibidx(ib->idx_mask, num_idx);
/*
* Free the segment, if there are no more indexes in the segment
* held by this IB
*/
if (!num_idx) {
bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
list_add_tail(&ib->idx_seg->qe,
&ib_mod->ibidx_seg_pool[cur_q_idx]);
ib->idx_seg = NULL;
return;
}
/* See if we can move to a smaller segment */
bna_ib_select_segpool(num_idx, new_q_idx);
bna_ib_select_segpool(ib->idx_seg->ib_seg_size, cur_q_idx);
while (new_q_idx < cur_q_idx) {
if (!list_empty(&ib_mod->ibidx_seg_pool[new_q_idx]))
break;
new_q_idx++;
}
if (new_q_idx < cur_q_idx) {
/* Select the new smaller segment */
bfa_q_deq(&ib_mod->ibidx_seg_pool[new_q_idx], &idx_seg);
bfa_q_qe_init(&idx_seg->qe);
/* Free the old segment */
list_add_tail(&ib->idx_seg->qe,
&ib_mod->ibidx_seg_pool[cur_q_idx]);
ib->idx_seg = idx_seg;
}
}
static int
bna_ib_config(struct bna_ib *ib, struct bna_ib_config *ib_config)
{
if (ib->start_count)
return -1;
ib->ib_config.coalescing_timeo = ib_config->coalescing_timeo;
ib->ib_config.interpkt_timeo = ib_config->interpkt_timeo;
ib->ib_config.interpkt_count = ib_config->interpkt_count;
ib->ib_config.ctrl_flags = ib_config->ctrl_flags;
ib->ib_config.ctrl_flags |= BFI_IB_CF_MASTER_ENABLE;
if (ib->intr->intr_type == BNA_INTR_T_MSIX)
ib->ib_config.ctrl_flags |= BFI_IB_CF_MSIX_MODE;
return 0;
}
static void
bna_ib_start(struct bna_ib *ib)
{
struct bna_ib_blk_mem ib_cfg;
struct bna_ib_blk_mem *ib_mem;
u32 pg_num;
u32 intx_mask;
int i;
void __iomem *base_addr;
unsigned long off;
ib->start_count++;
if (ib->start_count > 1)
return;
ib_cfg.host_addr_lo = (u32)(ib->ib_seg_host_addr.lsb);
ib_cfg.host_addr_hi = (u32)(ib->ib_seg_host_addr.msb);
ib_cfg.clsc_n_ctrl_n_msix = (((u32)
ib->ib_config.coalescing_timeo << 16) |
((u32)ib->ib_config.ctrl_flags << 8) |
(ib->intr->vector));
ib_cfg.ipkt_n_ent_n_idxof =
((u32)
(ib->ib_config.interpkt_timeo & 0xf) << 16) |
((u32)ib->idx_seg->ib_seg_size << 8) |
(ib->idx_seg->ib_idx_tbl_offset);
ib_cfg.ipkt_cnt_cfg_n_unacked = ((u32)
ib->ib_config.interpkt_count << 24);
pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
HQM_IB_RAM_BASE_OFFSET);
writel(pg_num, ib->bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
HQM_IB_RAM_BASE_OFFSET);
ib_mem = (struct bna_ib_blk_mem *)0;
off = (unsigned long)&ib_mem[ib->ib_id].host_addr_lo;
writel(htonl(ib_cfg.host_addr_lo), base_addr + off);
off = (unsigned long)&ib_mem[ib->ib_id].host_addr_hi;
writel(htonl(ib_cfg.host_addr_hi), base_addr + off);
off = (unsigned long)&ib_mem[ib->ib_id].clsc_n_ctrl_n_msix;
writel(ib_cfg.clsc_n_ctrl_n_msix, base_addr + off);
off = (unsigned long)&ib_mem[ib->ib_id].ipkt_n_ent_n_idxof;
writel(ib_cfg.ipkt_n_ent_n_idxof, base_addr + off);
off = (unsigned long)&ib_mem[ib->ib_id].ipkt_cnt_cfg_n_unacked;
writel(ib_cfg.ipkt_cnt_cfg_n_unacked, base_addr + off);
ib->door_bell.doorbell_ack = BNA_DOORBELL_IB_INT_ACK(
(u32)ib->ib_config.coalescing_timeo, 0);
pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + ib->bna->port_num,
HQM_INDX_TBL_RAM_BASE_OFFSET);
writel(pg_num, ib->bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(ib->bna->pcidev.pci_bar_kva,
HQM_INDX_TBL_RAM_BASE_OFFSET);
for (i = 0; i < ib->idx_seg->ib_seg_size; i++) {
off = (unsigned long)
((ib->idx_seg->ib_idx_tbl_offset + i) * BFI_IBIDX_SIZE);
writel(0, base_addr + off);
}
if (ib->intr->intr_type == BNA_INTR_T_INTX) {
bna_intx_disable(ib->bna, intx_mask);
intx_mask &= ~(ib->intr->vector);
bna_intx_enable(ib->bna, intx_mask);
}
}
static void
bna_ib_stop(struct bna_ib *ib)
{
u32 intx_mask;
ib->start_count--;
if (ib->start_count == 0) {
writel(BNA_DOORBELL_IB_INT_DISABLE,
ib->door_bell.doorbell_addr);
if (ib->intr->intr_type == BNA_INTR_T_INTX) {
bna_intx_disable(ib->bna, intx_mask);
intx_mask |= (ib->intr->vector);
bna_intx_enable(ib->bna, intx_mask);
}
}
}
static void
bna_ib_fail(struct bna_ib *ib)
{
ib->start_count = 0;
}
/**
* RXF
*/
static void rxf_enable(struct bna_rxf *rxf);
static void rxf_disable(struct bna_rxf *rxf);
static void __rxf_config_set(struct bna_rxf *rxf);
static void __rxf_rit_set(struct bna_rxf *rxf);
static void __bna_rxf_stat_clr(struct bna_rxf *rxf);
static int rxf_process_packet_filter(struct bna_rxf *rxf);
static int rxf_clear_packet_filter(struct bna_rxf *rxf);
static void rxf_reset_packet_filter(struct bna_rxf *rxf);
static void rxf_cb_enabled(void *arg, int status);
static void rxf_cb_disabled(void *arg, int status);
static void bna_rxf_cb_stats_cleared(void *arg, int status);
static void __rxf_enable(struct bna_rxf *rxf);
static void __rxf_disable(struct bna_rxf *rxf);
bfa_fsm_state_decl(bna_rxf, stopped, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, start_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cam_fltr_mod_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, started, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, cam_fltr_clr_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, stop_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, pause_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, resume_wait, struct bna_rxf,
enum bna_rxf_event);
bfa_fsm_state_decl(bna_rxf, stat_clr_wait, struct bna_rxf,
enum bna_rxf_event);
static struct bfa_sm_table rxf_sm_table[] = {
{BFA_SM(bna_rxf_sm_stopped), BNA_RXF_STOPPED},
{BFA_SM(bna_rxf_sm_start_wait), BNA_RXF_START_WAIT},
{BFA_SM(bna_rxf_sm_cam_fltr_mod_wait), BNA_RXF_CAM_FLTR_MOD_WAIT},
{BFA_SM(bna_rxf_sm_started), BNA_RXF_STARTED},
{BFA_SM(bna_rxf_sm_cam_fltr_clr_wait), BNA_RXF_CAM_FLTR_CLR_WAIT},
{BFA_SM(bna_rxf_sm_stop_wait), BNA_RXF_STOP_WAIT},
{BFA_SM(bna_rxf_sm_pause_wait), BNA_RXF_PAUSE_WAIT},
{BFA_SM(bna_rxf_sm_resume_wait), BNA_RXF_RESUME_WAIT},
{BFA_SM(bna_rxf_sm_stat_clr_wait), BNA_RXF_STAT_CLR_WAIT}
};
static void
bna_rxf_sm_stopped_entry(struct bna_rxf *rxf)
{
call_rxf_stop_cbfn(rxf, BNA_CB_SUCCESS);
}
static void
bna_rxf_sm_stopped(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_START:
bfa_fsm_set_state(rxf, bna_rxf_sm_start_wait);
break;
case RXF_E_STOP:
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_FAIL:
/* No-op */
break;
case RXF_E_CAM_FLTR_MOD:
call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
break;
case RXF_E_STARTED:
case RXF_E_STOPPED:
case RXF_E_CAM_FLTR_RESP:
/**
* These events are received due to flushing of mbox
* when device fails
*/
/* No-op */
break;
case RXF_E_PAUSE:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
break;
case RXF_E_RESUME:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_start_wait_entry(struct bna_rxf *rxf)
{
__rxf_config_set(rxf);
__rxf_rit_set(rxf);
rxf_enable(rxf);
}
static void
bna_rxf_sm_start_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
/**
* STOP is originated from bnad. When this happens,
* it can not be waiting for filter update
*/
call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
break;
case RXF_E_FAIL:
call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CAM_FLTR_MOD:
/* No-op */
break;
case RXF_E_STARTED:
/**
* Force rxf_process_filter() to go through initial
* config
*/
if ((rxf->ucast_active_mac != NULL) &&
(rxf->ucast_pending_set == 0))
rxf->ucast_pending_set = 1;
if (rxf->rss_status == BNA_STATUS_T_ENABLED)
rxf->rxf_flags |= BNA_RXF_FL_RSS_CONFIG_PENDING;
rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
break;
case RXF_E_PAUSE:
case RXF_E_RESUME:
rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_cam_fltr_mod_wait_entry(struct bna_rxf *rxf)
{
if (!rxf_process_packet_filter(rxf)) {
/* No more pending CAM entries to update */
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
}
static void
bna_rxf_sm_cam_fltr_mod_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
/**
* STOP is originated from bnad. When this happens,
* it can not be waiting for filter update
*/
call_rxf_start_cbfn(rxf, BNA_CB_INTERRUPT);
bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
break;
case RXF_E_FAIL:
rxf_reset_packet_filter(rxf);
call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
call_rxf_start_cbfn(rxf, BNA_CB_FAIL);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CAM_FLTR_MOD:
/* No-op */
break;
case RXF_E_CAM_FLTR_RESP:
if (!rxf_process_packet_filter(rxf)) {
/* No more pending CAM entries to update */
call_rxf_cam_fltr_cbfn(rxf, BNA_CB_SUCCESS);
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
}
break;
case RXF_E_PAUSE:
case RXF_E_RESUME:
rxf->rxf_flags |= BNA_RXF_FL_OPERSTATE_CHANGED;
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_started_entry(struct bna_rxf *rxf)
{
call_rxf_start_cbfn(rxf, BNA_CB_SUCCESS);
if (rxf->rxf_flags & BNA_RXF_FL_OPERSTATE_CHANGED) {
if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
bfa_fsm_send_event(rxf, RXF_E_PAUSE);
else
bfa_fsm_send_event(rxf, RXF_E_RESUME);
}
}
static void
bna_rxf_sm_started(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_STOP:
bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_clr_wait);
/* Hack to get FSM start clearing CAM entries */
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
break;
case RXF_E_FAIL:
rxf_reset_packet_filter(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CAM_FLTR_MOD:
bfa_fsm_set_state(rxf, bna_rxf_sm_cam_fltr_mod_wait);
break;
case RXF_E_PAUSE:
bfa_fsm_set_state(rxf, bna_rxf_sm_pause_wait);
break;
case RXF_E_RESUME:
bfa_fsm_set_state(rxf, bna_rxf_sm_resume_wait);
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_cam_fltr_clr_wait_entry(struct bna_rxf *rxf)
{
/**
* Note: Do not add rxf_clear_packet_filter here.
* It will overstep mbox when this transition happens:
* cam_fltr_mod_wait -> cam_fltr_clr_wait on RXF_E_STOP event
*/
}
static void
bna_rxf_sm_cam_fltr_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
/**
* FSM was in the process of stopping, initiated by
* bnad. When this happens, no one can be waiting for
* start or filter update
*/
rxf_reset_packet_filter(rxf);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_CAM_FLTR_RESP:
if (!rxf_clear_packet_filter(rxf)) {
/* No more pending CAM entries to clear */
bfa_fsm_set_state(rxf, bna_rxf_sm_stop_wait);
rxf_disable(rxf);
}
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_stop_wait_entry(struct bna_rxf *rxf)
{
/**
* NOTE: Do not add rxf_disable here.
* It will overstep mbox when this transition happens:
* start_wait -> stop_wait on RXF_E_STOP event
*/
}
static void
bna_rxf_sm_stop_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
/**
* FSM was in the process of stopping, initiated by
* bnad. When this happens, no one can be waiting for
* start or filter update
*/
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_STARTED:
/**
* This event is received due to abrupt transition from
* bna_rxf_sm_start_wait state on receiving
* RXF_E_STOP event
*/
rxf_disable(rxf);
break;
case RXF_E_STOPPED:
/**
* FSM was in the process of stopping, initiated by
* bnad. When this happens, no one can be waiting for
* start or filter update
*/
bfa_fsm_set_state(rxf, bna_rxf_sm_stat_clr_wait);
break;
case RXF_E_PAUSE:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
break;
case RXF_E_RESUME:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_pause_wait_entry(struct bna_rxf *rxf)
{
rxf->rxf_flags &=
~(BNA_RXF_FL_OPERSTATE_CHANGED | BNA_RXF_FL_RXF_ENABLED);
__rxf_disable(rxf);
}
static void
bna_rxf_sm_pause_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
/**
* FSM was in the process of disabling rxf, initiated by
* bnad.
*/
call_rxf_pause_cbfn(rxf, BNA_CB_FAIL);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_STOPPED:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_PAUSED;
call_rxf_pause_cbfn(rxf, BNA_CB_SUCCESS);
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
break;
/*
* Since PAUSE/RESUME can only be sent by bnad, we don't expect
* any other event during these states
*/
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_resume_wait_entry(struct bna_rxf *rxf)
{
rxf->rxf_flags &= ~(BNA_RXF_FL_OPERSTATE_CHANGED);
rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
__rxf_enable(rxf);
}
static void
bna_rxf_sm_resume_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
/**
* FSM was in the process of disabling rxf, initiated by
* bnad.
*/
call_rxf_resume_cbfn(rxf, BNA_CB_FAIL);
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
case RXF_E_STARTED:
rxf->rxf_oper_state = BNA_RXF_OPER_STATE_RUNNING;
call_rxf_resume_cbfn(rxf, BNA_CB_SUCCESS);
bfa_fsm_set_state(rxf, bna_rxf_sm_started);
break;
/*
* Since PAUSE/RESUME can only be sent by bnad, we don't expect
* any other event during these states
*/
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
bna_rxf_sm_stat_clr_wait_entry(struct bna_rxf *rxf)
{
__bna_rxf_stat_clr(rxf);
}
static void
bna_rxf_sm_stat_clr_wait(struct bna_rxf *rxf, enum bna_rxf_event event)
{
switch (event) {
case RXF_E_FAIL:
case RXF_E_STAT_CLEARED:
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
break;
default:
bfa_sm_fault(rxf->rx->bna, event);
}
}
static void
__rxf_enable(struct bna_rxf *rxf)
{
struct bfi_ll_rxf_multi_req ll_req;
u32 bm[2] = {0, 0};
if (rxf->rxf_id < 32)
bm[0] = 1 << rxf->rxf_id;
else
bm[1] = 1 << (rxf->rxf_id - 32);
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
ll_req.rxf_id_mask[0] = htonl(bm[0]);
ll_req.rxf_id_mask[1] = htonl(bm[1]);
ll_req.enable = 1;
bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
rxf_cb_enabled, rxf);
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
static void
__rxf_disable(struct bna_rxf *rxf)
{
struct bfi_ll_rxf_multi_req ll_req;
u32 bm[2] = {0, 0};
if (rxf->rxf_id < 32)
bm[0] = 1 << rxf->rxf_id;
else
bm[1] = 1 << (rxf->rxf_id - 32);
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RX_REQ, 0);
ll_req.rxf_id_mask[0] = htonl(bm[0]);
ll_req.rxf_id_mask[1] = htonl(bm[1]);
ll_req.enable = 0;
bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
rxf_cb_disabled, rxf);
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
static void
__rxf_config_set(struct bna_rxf *rxf)
{
u32 i;
struct bna_rss_mem *rss_mem;
struct bna_rx_fndb_ram *rx_fndb_ram;
struct bna *bna = rxf->rx->bna;
void __iomem *base_addr;
unsigned long off;
base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
RSS_TABLE_BASE_OFFSET);
rss_mem = (struct bna_rss_mem *)0;
/* Configure RSS if required */
if (rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE) {
/* configure RSS Table */
writel(BNA_GET_PAGE_NUM(RAD0_MEM_BLK_BASE_PG_NUM +
bna->port_num, RSS_TABLE_BASE_OFFSET),
bna->regs.page_addr);
/* temporarily disable RSS, while hash value is written */
off = (unsigned long)&rss_mem[0].type_n_hash;
writel(0, base_addr + off);
for (i = 0; i < BFI_RSS_HASH_KEY_LEN; i++) {
off = (unsigned long)
&rss_mem[0].hash_key[(BFI_RSS_HASH_KEY_LEN - 1) - i];
writel(htonl(rxf->rss_cfg.toeplitz_hash_key[i]),
base_addr + off);
}
off = (unsigned long)&rss_mem[0].type_n_hash;
writel(rxf->rss_cfg.hash_type | rxf->rss_cfg.hash_mask,
base_addr + off);
}
/* Configure RxF */
writel(BNA_GET_PAGE_NUM(
LUT0_MEM_BLK_BASE_PG_NUM + (bna->port_num * 2),
RX_FNDB_RAM_BASE_OFFSET),
bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
RX_FNDB_RAM_BASE_OFFSET);
rx_fndb_ram = (struct bna_rx_fndb_ram *)0;
/* We always use RSS table 0 */
off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rss_prop;
writel(rxf->ctrl_flags & BNA_RXF_CF_RSS_ENABLE,
base_addr + off);
/* small large buffer enable/disable */
off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].size_routing_props;
writel((rxf->ctrl_flags & BNA_RXF_CF_SM_LG_RXQ) | 0x80,
base_addr + off);
/* RIT offset, HDS forced offset, multicast RxQ Id */
off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].rit_hds_mcastq;
writel((rxf->rit_segment->rit_offset << 16) |
(rxf->forced_offset << 8) |
(rxf->hds_cfg.hdr_type & BNA_HDS_FORCED) | rxf->mcast_rxq_id,
base_addr + off);
/*
* default vlan tag, default function enable, strip vlan bytes,
* HDS type, header size
*/
off = (unsigned long)&rx_fndb_ram[rxf->rxf_id].control_flags;
writel(((u32)rxf->default_vlan_tag << 16) |
(rxf->ctrl_flags &
(BNA_RXF_CF_DEFAULT_VLAN |
BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE |
BNA_RXF_CF_VLAN_STRIP)) |
(rxf->hds_cfg.hdr_type & ~BNA_HDS_FORCED) |
rxf->hds_cfg.header_size,
base_addr + off);
}
void
__rxf_vlan_filter_set(struct bna_rxf *rxf, enum bna_status status)
{
struct bna *bna = rxf->rx->bna;
int i;
writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
(bna->port_num * 2), VLAN_RAM_BASE_OFFSET),
bna->regs.page_addr);
if (status == BNA_STATUS_T_ENABLED) {
/* enable VLAN filtering on this function */
for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
writel(rxf->vlan_filter_table[i],
BNA_GET_VLAN_MEM_ENTRY_ADDR
(bna->pcidev.pci_bar_kva, rxf->rxf_id,
i * 32));
}
} else {
/* disable VLAN filtering on this function */
for (i = 0; i <= BFI_MAX_VLAN / 32; i++) {
writel(0xffffffff,
BNA_GET_VLAN_MEM_ENTRY_ADDR
(bna->pcidev.pci_bar_kva, rxf->rxf_id,
i * 32));
}
}
}
static void
__rxf_rit_set(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
struct bna_rit_mem *rit_mem;
int i;
void __iomem *base_addr;
unsigned long off;
base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
FUNCTION_TO_RXQ_TRANSLATE);
rit_mem = (struct bna_rit_mem *)0;
writel(BNA_GET_PAGE_NUM(RXA0_MEM_BLK_BASE_PG_NUM + bna->port_num,
FUNCTION_TO_RXQ_TRANSLATE),
bna->regs.page_addr);
for (i = 0; i < rxf->rit_segment->rit_size; i++) {
off = (unsigned long)&rit_mem[i + rxf->rit_segment->rit_offset];
writel(rxf->rit_segment->rit[i].large_rxq_id << 6 |
rxf->rit_segment->rit[i].small_rxq_id,
base_addr + off);
}
}
static void
__bna_rxf_stat_clr(struct bna_rxf *rxf)
{
struct bfi_ll_stats_req ll_req;
u32 bm[2] = {0, 0};
if (rxf->rxf_id < 32)
bm[0] = 1 << rxf->rxf_id;
else
bm[1] = 1 << (rxf->rxf_id - 32);
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
ll_req.stats_mask = 0;
ll_req.txf_id_mask[0] = 0;
ll_req.txf_id_mask[1] = 0;
ll_req.rxf_id_mask[0] = htonl(bm[0]);
ll_req.rxf_id_mask[1] = htonl(bm[1]);
bna_mbox_qe_fill(&rxf->mbox_qe, &ll_req, sizeof(ll_req),
bna_rxf_cb_stats_cleared, rxf);
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
static void
rxf_enable(struct bna_rxf *rxf)
{
if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
bfa_fsm_send_event(rxf, RXF_E_STARTED);
else {
rxf->rxf_flags |= BNA_RXF_FL_RXF_ENABLED;
__rxf_enable(rxf);
}
}
static void
rxf_cb_enabled(void *arg, int status)
{
struct bna_rxf *rxf = (struct bna_rxf *)arg;
bfa_q_qe_init(&rxf->mbox_qe.qe);
bfa_fsm_send_event(rxf, RXF_E_STARTED);
}
static void
rxf_disable(struct bna_rxf *rxf)
{
if (rxf->rxf_oper_state == BNA_RXF_OPER_STATE_PAUSED)
bfa_fsm_send_event(rxf, RXF_E_STOPPED);
else
rxf->rxf_flags &= ~BNA_RXF_FL_RXF_ENABLED;
__rxf_disable(rxf);
}
static void
rxf_cb_disabled(void *arg, int status)
{
struct bna_rxf *rxf = (struct bna_rxf *)arg;
bfa_q_qe_init(&rxf->mbox_qe.qe);
bfa_fsm_send_event(rxf, RXF_E_STOPPED);
}
void
rxf_cb_cam_fltr_mbox_cmd(void *arg, int status)
{
struct bna_rxf *rxf = (struct bna_rxf *)arg;
bfa_q_qe_init(&rxf->mbox_qe.qe);
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_RESP);
}
static void
bna_rxf_cb_stats_cleared(void *arg, int status)
{
struct bna_rxf *rxf = (struct bna_rxf *)arg;
bfa_q_qe_init(&rxf->mbox_qe.qe);
bfa_fsm_send_event(rxf, RXF_E_STAT_CLEARED);
}
void
rxf_cam_mbox_cmd(struct bna_rxf *rxf, u8 cmd,
const struct bna_mac *mac_addr)
{
struct bfi_ll_mac_addr_req req;
bfi_h2i_set(req.mh, BFI_MC_LL, cmd, 0);
req.rxf_id = rxf->rxf_id;
memcpy(&req.mac_addr, (void *)&mac_addr->addr, ETH_ALEN);
bna_mbox_qe_fill(&rxf->mbox_qe, &req, sizeof(req),
rxf_cb_cam_fltr_mbox_cmd, rxf);
bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
}
static int
rxf_process_packet_filter_mcast(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
/* Add multicast entries */
if (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_ADD_REQ, mac);
list_add_tail(&mac->qe, &rxf->mcast_active_q);
return 1;
}
/* Delete multicast entries previousely added */
if (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
return 1;
}
return 0;
}
static int
rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
{
/* Apply the VLAN filter */
if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
__rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
}
/* Apply RSS configuration */
if (rxf->rxf_flags & BNA_RXF_FL_RSS_CONFIG_PENDING) {
rxf->rxf_flags &= ~BNA_RXF_FL_RSS_CONFIG_PENDING;
if (rxf->rss_status == BNA_STATUS_T_DISABLED) {
/* RSS is being disabled */
rxf->ctrl_flags &= ~BNA_RXF_CF_RSS_ENABLE;
__rxf_rit_set(rxf);
__rxf_config_set(rxf);
} else {
/* RSS is being enabled or reconfigured */
rxf->ctrl_flags |= BNA_RXF_CF_RSS_ENABLE;
__rxf_rit_set(rxf);
__rxf_config_set(rxf);
}
}
return 0;
}
/**
* Processes pending ucast, mcast entry addition/deletion and issues mailbox
* command. Also processes pending filter configuration - promiscuous mode,
* default mode, allmutli mode and issues mailbox command or directly applies
* to h/w
*/
static int
rxf_process_packet_filter(struct bna_rxf *rxf)
{
/* Set the default MAC first */
if (rxf->ucast_pending_set > 0) {
rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_UCAST_SET_REQ,
rxf->ucast_active_mac);
rxf->ucast_pending_set--;
return 1;
}
if (rxf_process_packet_filter_ucast(rxf))
return 1;
if (rxf_process_packet_filter_mcast(rxf))
return 1;
if (rxf_process_packet_filter_promisc(rxf))
return 1;
if (rxf_process_packet_filter_allmulti(rxf))
return 1;
if (rxf_process_packet_filter_vlan(rxf))
return 1;
return 0;
}
static int
rxf_clear_packet_filter_mcast(struct bna_rxf *rxf)
{
struct bna_mac *mac = NULL;
struct list_head *qe;
/* 3. delete pending mcast entries */
if (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
return 1;
}
/* 4. clear active mcast entries; move them to pending_add_q */
if (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
rxf_cam_mbox_cmd(rxf, BFI_LL_H2I_MAC_MCAST_DEL_REQ, mac);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
return 1;
}
return 0;
}
/**
* In the rxf stop path, processes pending ucast/mcast delete queue and issues
* the mailbox command. Moves the active ucast/mcast entries to pending add q,
* so that they are added to CAM again in the rxf start path. Moves the current
* filter settings - promiscuous, default, allmutli - to pending filter
* configuration
*/
static int
rxf_clear_packet_filter(struct bna_rxf *rxf)
{
if (rxf_clear_packet_filter_ucast(rxf))
return 1;
if (rxf_clear_packet_filter_mcast(rxf))
return 1;
/* 5. clear active default MAC in the CAM */
if (rxf->ucast_pending_set > 0)
rxf->ucast_pending_set = 0;
if (rxf_clear_packet_filter_promisc(rxf))
return 1;
if (rxf_clear_packet_filter_allmulti(rxf))
return 1;
return 0;
}
static void
rxf_reset_packet_filter_mcast(struct bna_rxf *rxf)
{
struct list_head *qe;
struct bna_mac *mac;
/* 3. Move active mcast entries to pending_add_q */
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
bfa_q_qe_init(qe);
list_add_tail(qe, &rxf->mcast_pending_add_q);
}
/* 4. Throw away delete pending mcast entries */
while (!list_empty(&rxf->mcast_pending_del_q)) {
bfa_q_deq(&rxf->mcast_pending_del_q, &qe);
bfa_q_qe_init(qe);
mac = (struct bna_mac *)qe;
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
}
}
/**
* In the rxf fail path, throws away the ucast/mcast entries pending for
* deletion, moves all active ucast/mcast entries to pending queue so that
* they are added back to CAM in the rxf start path. Also moves the current
* filter configuration to pending filter configuration.
*/
static void
rxf_reset_packet_filter(struct bna_rxf *rxf)
{
rxf_reset_packet_filter_ucast(rxf);
rxf_reset_packet_filter_mcast(rxf);
/* 5. Turn off ucast set flag */
rxf->ucast_pending_set = 0;
rxf_reset_packet_filter_promisc(rxf);
rxf_reset_packet_filter_allmulti(rxf);
}
static void
bna_rxf_init(struct bna_rxf *rxf,
struct bna_rx *rx,
struct bna_rx_config *q_config)
{
struct list_head *qe;
struct bna_rxp *rxp;
/* rxf_id is initialized during rx_mod init */
rxf->rx = rx;
INIT_LIST_HEAD(&rxf->ucast_pending_add_q);
INIT_LIST_HEAD(&rxf->ucast_pending_del_q);
rxf->ucast_pending_set = 0;
INIT_LIST_HEAD(&rxf->ucast_active_q);
rxf->ucast_active_mac = NULL;
INIT_LIST_HEAD(&rxf->mcast_pending_add_q);
INIT_LIST_HEAD(&rxf->mcast_pending_del_q);
INIT_LIST_HEAD(&rxf->mcast_active_q);
bfa_q_qe_init(&rxf->mbox_qe.qe);
if (q_config->vlan_strip_status == BNA_STATUS_T_ENABLED)
rxf->ctrl_flags |= BNA_RXF_CF_VLAN_STRIP;
rxf->rxf_oper_state = (q_config->paused) ?
BNA_RXF_OPER_STATE_PAUSED : BNA_RXF_OPER_STATE_RUNNING;
bna_rxf_adv_init(rxf, rx, q_config);
rxf->rit_segment = bna_rit_mod_seg_get(&rxf->rx->bna->rit_mod,
q_config->num_paths);
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
if (q_config->rxp_type == BNA_RXP_SINGLE)
rxf->mcast_rxq_id = rxp->rxq.single.only->rxq_id;
else
rxf->mcast_rxq_id = rxp->rxq.slr.large->rxq_id;
break;
}
rxf->vlan_filter_status = BNA_STATUS_T_DISABLED;
memset(rxf->vlan_filter_table, 0,
(sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
/* Set up VLAN 0 for pure priority tagged packets */
rxf->vlan_filter_table[0] |= 1;
bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
}
static void
bna_rxf_uninit(struct bna_rxf *rxf)
{
struct bna *bna = rxf->rx->bna;
struct bna_mac *mac;
bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
rxf->rit_segment = NULL;
rxf->ucast_pending_set = 0;
while (!list_empty(&rxf->ucast_pending_add_q)) {
bfa_q_deq(&rxf->ucast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod, mac);
}
if (rxf->ucast_active_mac) {
bfa_q_qe_init(&rxf->ucast_active_mac->qe);
bna_ucam_mod_mac_put(&rxf->rx->bna->ucam_mod,
rxf->ucast_active_mac);
rxf->ucast_active_mac = NULL;
}
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &mac);
bfa_q_qe_init(&mac->qe);
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
}
/* Turn off pending promisc mode */
if (is_promisc_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
/* system promisc state should be pending */
BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
promisc_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
bna->rxf_promisc_id = BFI_MAX_RXF;
}
/* Promisc mode should not be active */
BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
/* Turn off pending all-multi mode */
if (is_allmulti_enable(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask)) {
allmulti_inactive(rxf->rxmode_pending,
rxf->rxmode_pending_bitmask);
}
/* Allmulti mode should not be active */
BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
rxf->rx = NULL;
}
static void
bna_rx_cb_rxf_started(struct bna_rx *rx, enum bna_cb_status status)
{
bfa_fsm_send_event(rx, RX_E_RXF_STARTED);
if (rx->rxf.rxf_id < 32)
rx->bna->rx_mod.rxf_bmap[0] |= ((u32)1 << rx->rxf.rxf_id);
else
rx->bna->rx_mod.rxf_bmap[1] |= ((u32)
1 << (rx->rxf.rxf_id - 32));
}
static void
bna_rxf_start(struct bna_rxf *rxf)
{
rxf->start_cbfn = bna_rx_cb_rxf_started;
rxf->start_cbarg = rxf->rx;
rxf->rxf_flags &= ~BNA_RXF_FL_FAILED;
bfa_fsm_send_event(rxf, RXF_E_START);
}
static void
bna_rx_cb_rxf_stopped(struct bna_rx *rx, enum bna_cb_status status)
{
bfa_fsm_send_event(rx, RX_E_RXF_STOPPED);
if (rx->rxf.rxf_id < 32)
rx->bna->rx_mod.rxf_bmap[0] &= ~(u32)1 << rx->rxf.rxf_id;
else
rx->bna->rx_mod.rxf_bmap[1] &= ~(u32)
1 << (rx->rxf.rxf_id - 32);
}
static void
bna_rxf_stop(struct bna_rxf *rxf)
{
rxf->stop_cbfn = bna_rx_cb_rxf_stopped;
rxf->stop_cbarg = rxf->rx;
bfa_fsm_send_event(rxf, RXF_E_STOP);
}
static void
bna_rxf_fail(struct bna_rxf *rxf)
{
rxf->rxf_flags |= BNA_RXF_FL_FAILED;
bfa_fsm_send_event(rxf, RXF_E_FAIL);
}
int
bna_rxf_state_get(struct bna_rxf *rxf)
{
return bfa_sm_to_state(rxf_sm_table, rxf->fsm);
}
enum bna_cb_status
bna_rx_ucast_set(struct bna_rx *rx, u8 *ucmac,
void (*cbfn)(struct bnad *, struct bna_rx *,
enum bna_cb_status))
{
struct bna_rxf *rxf = &rx->rxf;
if (rxf->ucast_active_mac == NULL) {
rxf->ucast_active_mac =
bna_ucam_mod_mac_get(&rxf->rx->bna->ucam_mod);
if (rxf->ucast_active_mac == NULL)
return BNA_CB_UCAST_CAM_FULL;
bfa_q_qe_init(&rxf->ucast_active_mac->qe);
}
memcpy(rxf->ucast_active_mac->addr, ucmac, ETH_ALEN);
rxf->ucast_pending_set++;
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_mcast_add(struct bna_rx *rx, u8 *addr,
void (*cbfn)(struct bnad *, struct bna_rx *,
enum bna_cb_status))
{
struct bna_rxf *rxf = &rx->rxf;
struct list_head *qe;
struct bna_mac *mac;
/* Check if already added */
list_for_each(qe, &rxf->mcast_active_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
if (cbfn)
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
return BNA_CB_SUCCESS;
}
}
/* Check if pending addition */
list_for_each(qe, &rxf->mcast_pending_add_q) {
mac = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(mac->addr, addr)) {
if (cbfn)
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
return BNA_CB_SUCCESS;
}
}
mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
if (mac == NULL)
return BNA_CB_MCAST_LIST_FULL;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, addr, ETH_ALEN);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
return BNA_CB_SUCCESS;
}
enum bna_cb_status
bna_rx_mcast_listset(struct bna_rx *rx, int count, u8 *mclist,
void (*cbfn)(struct bnad *, struct bna_rx *,
enum bna_cb_status))
{
struct bna_rxf *rxf = &rx->rxf;
struct list_head list_head;
struct list_head *qe;
u8 *mcaddr;
struct bna_mac *mac;
struct bna_mac *mac1;
int skip;
int delete;
int need_hw_config = 0;
int i;
/* Allocate nodes */
INIT_LIST_HEAD(&list_head);
for (i = 0, mcaddr = mclist; i < count; i++) {
mac = bna_mcam_mod_mac_get(&rxf->rx->bna->mcam_mod);
if (mac == NULL)
goto err_return;
bfa_q_qe_init(&mac->qe);
memcpy(mac->addr, mcaddr, ETH_ALEN);
list_add_tail(&mac->qe, &list_head);
mcaddr += ETH_ALEN;
}
/* Schedule for addition */
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
skip = 0;
/* Skip if already added */
list_for_each(qe, &rxf->mcast_active_q) {
mac1 = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
mac);
skip = 1;
break;
}
}
if (skip)
continue;
/* Skip if pending addition */
list_for_each(qe, &rxf->mcast_pending_add_q) {
mac1 = (struct bna_mac *)qe;
if (BNA_MAC_IS_EQUAL(mac1->addr, mac->addr)) {
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod,
mac);
skip = 1;
break;
}
}
if (skip)
continue;
need_hw_config = 1;
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
/**
* Delete the entries that are in the pending_add_q but not
* in the new list
*/
while (!list_empty(&rxf->mcast_pending_add_q)) {
bfa_q_deq(&rxf->mcast_pending_add_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
delete = 0;
break;
}
mcaddr += ETH_ALEN;
}
if (delete)
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
else
list_add_tail(&mac->qe, &list_head);
}
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->mcast_pending_add_q);
}
/**
* Schedule entries for deletion that are in the active_q but not
* in the new list
*/
while (!list_empty(&rxf->mcast_active_q)) {
bfa_q_deq(&rxf->mcast_active_q, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
for (i = 0, mcaddr = mclist, delete = 1; i < count; i++) {
if (BNA_MAC_IS_EQUAL(mcaddr, mac->addr)) {
delete = 0;
break;
}
mcaddr += ETH_ALEN;
}
if (delete) {
list_add_tail(&mac->qe, &rxf->mcast_pending_del_q);
need_hw_config = 1;
} else {
list_add_tail(&mac->qe, &list_head);
}
}
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
list_add_tail(&mac->qe, &rxf->mcast_active_q);
}
if (need_hw_config) {
rxf->cam_fltr_cbfn = cbfn;
rxf->cam_fltr_cbarg = rx->bna->bnad;
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
} else if (cbfn)
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
return BNA_CB_SUCCESS;
err_return:
while (!list_empty(&list_head)) {
bfa_q_deq(&list_head, &qe);
mac = (struct bna_mac *)qe;
bfa_q_qe_init(&mac->qe);
bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
}
return BNA_CB_MCAST_LIST_FULL;
}
void
bna_rx_vlan_add(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> 5);
int bit = (1 << (vlan_id & 0x1F));
rxf->vlan_filter_table[index] |= bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
}
}
void
bna_rx_vlan_del(struct bna_rx *rx, int vlan_id)
{
struct bna_rxf *rxf = &rx->rxf;
int index = (vlan_id >> 5);
int bit = (1 << (vlan_id & 0x1F));
rxf->vlan_filter_table[index] &= ~bit;
if (rxf->vlan_filter_status == BNA_STATUS_T_ENABLED) {
rxf->rxf_flags |= BNA_RXF_FL_VLAN_CONFIG_PENDING;
bfa_fsm_send_event(rxf, RXF_E_CAM_FLTR_MOD);
}
}
/**
* RX
*/
#define RXQ_RCB_INIT(q, rxp, qdepth, bna, _id, unmapq_mem) do { \
struct bna_doorbell_qset *_qset; \
unsigned long off; \
(q)->rcb->producer_index = (q)->rcb->consumer_index = 0; \
(q)->rcb->q_depth = (qdepth); \
(q)->rcb->unmap_q = unmapq_mem; \
(q)->rcb->rxq = (q); \
(q)->rcb->cq = &(rxp)->cq; \
(q)->rcb->bnad = (bna)->bnad; \
_qset = (struct bna_doorbell_qset *)0; \
off = (unsigned long)&_qset[(q)->rxq_id].rxq[0]; \
(q)->rcb->q_dbell = off + \
BNA_GET_DOORBELL_BASE_ADDR((bna)->pcidev.pci_bar_kva); \
(q)->rcb->id = _id; \
} while (0)
#define BNA_GET_RXQS(qcfg) (((qcfg)->rxp_type == BNA_RXP_SINGLE) ? \
(qcfg)->num_paths : ((qcfg)->num_paths * 2))
#define SIZE_TO_PAGES(size) (((size) >> PAGE_SHIFT) + ((((size) &\
(PAGE_SIZE - 1)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT))
#define call_rx_stop_callback(rx, status) \
if ((rx)->stop_cbfn) { \
(*(rx)->stop_cbfn)((rx)->stop_cbarg, rx, (status)); \
(rx)->stop_cbfn = NULL; \
(rx)->stop_cbarg = NULL; \
}
/*
* Since rx_enable is synchronous callback, there is no start_cbfn required.
* Instead, we'll call bnad_rx_post(rxp) so that bnad can post the buffers
* for each rxpath.
*/
#define call_rx_disable_cbfn(rx, status) \
if ((rx)->disable_cbfn) { \
(*(rx)->disable_cbfn)((rx)->disable_cbarg, \
status); \
(rx)->disable_cbfn = NULL; \
(rx)->disable_cbarg = NULL; \
} \
#define rxqs_reqd(type, num_rxqs) \
(((type) == BNA_RXP_SINGLE) ? (num_rxqs) : ((num_rxqs) * 2))
#define rx_ib_fail(rx) \
do { \
struct bna_rxp *rxp; \
struct list_head *qe; \
list_for_each(qe, &(rx)->rxp_q) { \
rxp = (struct bna_rxp *)qe; \
bna_ib_fail(rxp->cq.ib); \
} \
} while (0)
static void __bna_multi_rxq_stop(struct bna_rxp *, u32 *);
static void __bna_rxq_start(struct bna_rxq *rxq);
static void __bna_cq_start(struct bna_cq *cq);
static void bna_rit_create(struct bna_rx *rx);
static void bna_rx_cb_multi_rxq_stopped(void *arg, int status);
static void bna_rx_cb_rxq_stopped_all(void *arg);
bfa_fsm_state_decl(bna_rx, stopped,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_start_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, started,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxf_stop_wait,
struct bna_rx, enum bna_rx_event);
bfa_fsm_state_decl(bna_rx, rxq_stop_wait,
struct bna_rx, enum bna_rx_event);
static const struct bfa_sm_table rx_sm_table[] = {
{BFA_SM(bna_rx_sm_stopped), BNA_RX_STOPPED},
{BFA_SM(bna_rx_sm_rxf_start_wait), BNA_RX_RXF_START_WAIT},
{BFA_SM(bna_rx_sm_started), BNA_RX_STARTED},
{BFA_SM(bna_rx_sm_rxf_stop_wait), BNA_RX_RXF_STOP_WAIT},
{BFA_SM(bna_rx_sm_rxq_stop_wait), BNA_RX_RXQ_STOP_WAIT},
};
static void bna_rx_sm_stopped_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
rx->rx_cleanup_cbfn(rx->bna->bnad, rxp->cq.ccb);
}
call_rx_stop_callback(rx, BNA_CB_SUCCESS);
}
static void bna_rx_sm_stopped(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_START:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_start_wait);
break;
case RX_E_STOP:
call_rx_stop_callback(rx, BNA_CB_SUCCESS);
break;
case RX_E_FAIL:
/* no-op */
break;
default:
bfa_sm_fault(rx->bna, event);
break;
}
}
static void bna_rx_sm_rxf_start_wait_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
struct bna_rxq *q0 = NULL, *q1 = NULL;
/* Setup the RIT */
bna_rit_create(rx);
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_start(rxp->cq.ib);
GET_RXQS(rxp, q0, q1);
q0->buffer_size = bna_port_mtu_get(&rx->bna->port);
__bna_rxq_start(q0);
rx->rx_post_cbfn(rx->bna->bnad, q0->rcb);
if (q1) {
__bna_rxq_start(q1);
rx->rx_post_cbfn(rx->bna->bnad, q1->rcb);
}
__bna_cq_start(&rxp->cq);
}
bna_rxf_start(&rx->rxf);
}
static void bna_rx_sm_rxf_start_wait(struct bna_rx *rx,
enum bna_rx_event event)
{
switch (event) {
case RX_E_STOP:
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_ib_fail(rx);
bna_rxf_fail(&rx->rxf);
break;
case RX_E_RXF_STARTED:
bfa_fsm_set_state(rx, bna_rx_sm_started);
break;
default:
bfa_sm_fault(rx->bna, event);
break;
}
}
void
bna_rx_sm_started_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp;
struct list_head *qe_rxp;
/* Start IB */
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
bna_ib_ack(&rxp->cq.ib->door_bell, 0);
}
bna_llport_rx_started(&rx->bna->port.llport);
}
void
bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_FAIL:
bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_ib_fail(rx);
bna_rxf_fail(&rx->rxf);
break;
case RX_E_STOP:
bna_llport_rx_stopped(&rx->bna->port.llport);
bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
break;
default:
bfa_sm_fault(rx->bna, event);
break;
}
}
void
bna_rx_sm_rxf_stop_wait_entry(struct bna_rx *rx)
{
bna_rxf_stop(&rx->rxf);
}
void
bna_rx_sm_rxf_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
switch (event) {
case RX_E_RXF_STOPPED:
bfa_fsm_set_state(rx, bna_rx_sm_rxq_stop_wait);
break;
case RX_E_RXF_STARTED:
/**
* RxF was in the process of starting up when
* RXF_E_STOP was issued. Ignore this event
*/
break;
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
rx_ib_fail(rx);
bna_rxf_fail(&rx->rxf);
break;
default:
bfa_sm_fault(rx->bna, event);
break;
}
}
void
bna_rx_sm_rxq_stop_wait_entry(struct bna_rx *rx)
{
struct bna_rxp *rxp = NULL;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
struct list_head *qe;
u32 rxq_mask[2] = {0, 0};
/* Only one call to multi-rxq-stop for all RXPs in this RX */
bfa_wc_up(&rx->rxq_stop_wc);
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
GET_RXQS(rxp, q0, q1);
if (q0->rxq_id < 32)
rxq_mask[0] |= ((u32)1 << q0->rxq_id);
else
rxq_mask[1] |= ((u32)1 << (q0->rxq_id - 32));
if (q1) {
if (q1->rxq_id < 32)
rxq_mask[0] |= ((u32)1 << q1->rxq_id);
else
rxq_mask[1] |= ((u32)
1 << (q1->rxq_id - 32));
}
}
__bna_multi_rxq_stop(rxp, rxq_mask);
}
void
bna_rx_sm_rxq_stop_wait(struct bna_rx *rx, enum bna_rx_event event)
{
struct bna_rxp *rxp = NULL;
struct list_head *qe;
switch (event) {
case RX_E_RXQ_STOPPED:
list_for_each(qe, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe;
bna_ib_stop(rxp->cq.ib);
}
/* Fall through */
case RX_E_FAIL:
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
break;
default:
bfa_sm_fault(rx->bna, event);
break;
}
}
void
__bna_multi_rxq_stop(struct bna_rxp *rxp, u32 * rxq_id_mask)
{
struct bfi_ll_q_stop_req ll_req;
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_RXQ_STOP_REQ, 0);
ll_req.q_id_mask[0] = htonl(rxq_id_mask[0]);
ll_req.q_id_mask[1] = htonl(rxq_id_mask[1]);
bna_mbox_qe_fill(&rxp->mbox_qe, &ll_req, sizeof(ll_req),
bna_rx_cb_multi_rxq_stopped, rxp);
bna_mbox_send(rxp->rx->bna, &rxp->mbox_qe);
}
void
__bna_rxq_start(struct bna_rxq *rxq)
{
struct bna_rxtx_q_mem *q_mem;
struct bna_rxq_mem rxq_cfg, *rxq_mem;
struct bna_dma_addr cur_q_addr;
/* struct bna_doorbell_qset *qset; */
struct bna_qpt *qpt;
u32 pg_num;
struct bna *bna = rxq->rx->bna;
void __iomem *base_addr;
unsigned long off;
qpt = &rxq->qpt;
cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
rxq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
rxq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
rxq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
rxq_cfg.cur_q_entry_hi = cur_q_addr.msb;
rxq_cfg.pg_cnt_n_prd_ptr = ((u32)qpt->page_count << 16) | 0x0;
rxq_cfg.entry_n_pg_size = ((u32)(BFI_RXQ_WI_SIZE >> 2) << 16) |
(qpt->page_size >> 2);
rxq_cfg.sg_n_cq_n_cns_ptr =
((u32)(rxq->rxp->cq.cq_id & 0xff) << 16) | 0x0;
rxq_cfg.buf_sz_n_q_state = ((u32)rxq->buffer_size << 16) |
BNA_Q_IDLE_STATE;
rxq_cfg.next_qid = 0x0 | (0x3 << 8);
/* Write the page number register */
pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
HQM_RXTX_Q_RAM_BASE_OFFSET);
writel(pg_num, bna->regs.page_addr);
/* Write to h/w */
base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
HQM_RXTX_Q_RAM_BASE_OFFSET);
q_mem = (struct bna_rxtx_q_mem *)0;
rxq_mem = &q_mem[rxq->rxq_id].rxq;
off = (unsigned long)&rxq_mem->pg_tbl_addr_lo;
writel(htonl(rxq_cfg.pg_tbl_addr_lo), base_addr + off);
off = (unsigned long)&rxq_mem->pg_tbl_addr_hi;
writel(htonl(rxq_cfg.pg_tbl_addr_hi), base_addr + off);
off = (unsigned long)&rxq_mem->cur_q_entry_lo;
writel(htonl(rxq_cfg.cur_q_entry_lo), base_addr + off);
off = (unsigned long)&rxq_mem->cur_q_entry_hi;
writel(htonl(rxq_cfg.cur_q_entry_hi), base_addr + off);
off = (unsigned long)&rxq_mem->pg_cnt_n_prd_ptr;
writel(rxq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
off = (unsigned long)&rxq_mem->entry_n_pg_size;
writel(rxq_cfg.entry_n_pg_size, base_addr + off);
off = (unsigned long)&rxq_mem->sg_n_cq_n_cns_ptr;
writel(rxq_cfg.sg_n_cq_n_cns_ptr, base_addr + off);
off = (unsigned long)&rxq_mem->buf_sz_n_q_state;
writel(rxq_cfg.buf_sz_n_q_state, base_addr + off);
off = (unsigned long)&rxq_mem->next_qid;
writel(rxq_cfg.next_qid, base_addr + off);
rxq->rcb->producer_index = 0;
rxq->rcb->consumer_index = 0;
}
void
__bna_cq_start(struct bna_cq *cq)
{
struct bna_cq_mem cq_cfg, *cq_mem;
const struct bna_qpt *qpt;
struct bna_dma_addr cur_q_addr;
u32 pg_num;
struct bna *bna = cq->rx->bna;
void __iomem *base_addr;
unsigned long off;
qpt = &cq->qpt;
cur_q_addr = *((struct bna_dma_addr *)(qpt->kv_qpt_ptr));
/*
* Fill out structure, to be subsequently written
* to hardware
*/
cq_cfg.pg_tbl_addr_lo = qpt->hw_qpt_ptr.lsb;
cq_cfg.pg_tbl_addr_hi = qpt->hw_qpt_ptr.msb;
cq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
cq_cfg.cur_q_entry_hi = cur_q_addr.msb;
cq_cfg.pg_cnt_n_prd_ptr = (qpt->page_count << 16) | 0x0;
cq_cfg.entry_n_pg_size =
((u32)(BFI_CQ_WI_SIZE >> 2) << 16) | (qpt->page_size >> 2);
cq_cfg.int_blk_n_cns_ptr = ((((u32)cq->ib_seg_offset) << 24) |
((u32)(cq->ib->ib_id & 0xff) << 16) | 0x0);
cq_cfg.q_state = BNA_Q_IDLE_STATE;
/* Write the page number register */
pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + bna->port_num,
HQM_CQ_RAM_BASE_OFFSET);
writel(pg_num, bna->regs.page_addr);
/* H/W write */
base_addr = BNA_GET_MEM_BASE_ADDR(bna->pcidev.pci_bar_kva,
HQM_CQ_RAM_BASE_OFFSET);
cq_mem = (struct bna_cq_mem *)0;
off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_lo;
writel(htonl(cq_cfg.pg_tbl_addr_lo), base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].pg_tbl_addr_hi;
writel(htonl(cq_cfg.pg_tbl_addr_hi), base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_lo;
writel(htonl(cq_cfg.cur_q_entry_lo), base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].cur_q_entry_hi;
writel(htonl(cq_cfg.cur_q_entry_hi), base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].pg_cnt_n_prd_ptr;
writel(cq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].entry_n_pg_size;
writel(cq_cfg.entry_n_pg_size, base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].int_blk_n_cns_ptr;
writel(cq_cfg.int_blk_n_cns_ptr, base_addr + off);
off = (unsigned long)&cq_mem[cq->cq_id].q_state;
writel(cq_cfg.q_state, base_addr + off);
cq->ccb->producer_index = 0;
*(cq->ccb->hw_producer_index) = 0;
}
void
bna_rit_create(struct bna_rx *rx)
{
struct list_head *qe_rxp;
struct bna_rxp *rxp;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
int offset;
offset = 0;
list_for_each(qe_rxp, &rx->rxp_q) {
rxp = (struct bna_rxp *)qe_rxp;
GET_RXQS(rxp, q0, q1);
rx->rxf.rit_segment->rit[offset].large_rxq_id = q0->rxq_id;
rx->rxf.rit_segment->rit[offset].small_rxq_id =
(q1 ? q1->rxq_id : 0);
offset++;
}
}
static int
_rx_can_satisfy(struct bna_rx_mod *rx_mod,
struct bna_rx_config *rx_cfg)
{
if ((rx_mod->rx_free_count == 0) ||
(rx_mod->rxp_free_count == 0) ||
(rx_mod->rxq_free_count == 0))
return 0;
if (rx_cfg->rxp_type == BNA_RXP_SINGLE) {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < rx_cfg->num_paths))
return 0;
} else {
if ((rx_mod->rxp_free_count < rx_cfg->num_paths) ||
(rx_mod->rxq_free_count < (2 * rx_cfg->num_paths)))
return 0;
}
if (!bna_rit_mod_can_satisfy(&rx_mod->bna->rit_mod, rx_cfg->num_paths))
return 0;
return 1;
}
static struct bna_rxq *
_get_free_rxq(struct bna_rx_mod *rx_mod)
{
struct bna_rxq *rxq = NULL;
struct list_head *qe = NULL;
bfa_q_deq(&rx_mod->rxq_free_q, &qe);
if (qe) {
rx_mod->rxq_free_count--;
rxq = (struct bna_rxq *)qe;
}
return rxq;
}
static void
_put_free_rxq(struct bna_rx_mod *rx_mod, struct bna_rxq *rxq)
{
bfa_q_qe_init(&rxq->qe);
list_add_tail(&rxq->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
static struct bna_rxp *
_get_free_rxp(struct bna_rx_mod *rx_mod)
{
struct list_head *qe = NULL;
struct bna_rxp *rxp = NULL;
bfa_q_deq(&rx_mod->rxp_free_q, &qe);
if (qe) {
rx_mod->rxp_free_count--;
rxp = (struct bna_rxp *)qe;
}
return rxp;
}
static void
_put_free_rxp(struct bna_rx_mod *rx_mod, struct bna_rxp *rxp)
{
bfa_q_qe_init(&rxp->qe);
list_add_tail(&rxp->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
static struct bna_rx *
_get_free_rx(struct bna_rx_mod *rx_mod)
{
struct list_head *qe = NULL;
struct bna_rx *rx = NULL;
bfa_q_deq(&rx_mod->rx_free_q, &qe);
if (qe) {
rx_mod->rx_free_count--;
rx = (struct bna_rx *)qe;
bfa_q_qe_init(qe);
list_add_tail(&rx->qe, &rx_mod->rx_active_q);
}
return rx;
}
static void
_put_free_rx(struct bna_rx_mod *rx_mod, struct bna_rx *rx)
{
bfa_q_qe_init(&rx->qe);
list_add_tail(&rx->qe, &rx_mod->rx_free_q);
rx_mod->rx_free_count++;
}
static void
_rx_init(struct bna_rx *rx, struct bna *bna)
{
rx->bna = bna;
rx->rx_flags = 0;
INIT_LIST_HEAD(&rx->rxp_q);
rx->rxq_stop_wc.wc_resume = bna_rx_cb_rxq_stopped_all;
rx->rxq_stop_wc.wc_cbarg = rx;
rx->rxq_stop_wc.wc_count = 0;
rx->stop_cbfn = NULL;
rx->stop_cbarg = NULL;
}
static void
_rxp_add_rxqs(struct bna_rxp *rxp,
struct bna_rxq *q0,
struct bna_rxq *q1)
{
switch (rxp->type) {
case BNA_RXP_SINGLE:
rxp->rxq.single.only = q0;
rxp->rxq.single.reserved = NULL;
break;
case BNA_RXP_SLR:
rxp->rxq.slr.large = q0;
rxp->rxq.slr.small = q1;
break;
case BNA_RXP_HDS:
rxp->rxq.hds.data = q0;
rxp->rxq.hds.hdr = q1;
break;
default:
break;
}
}
static void
_rxq_qpt_init(struct bna_rxq *rxq,
struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
int i;
rxq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxq->qpt.kv_qpt_ptr = qpt_mem->kva;
rxq->qpt.page_count = page_count;
rxq->qpt.page_size = page_size;
rxq->rcb->sw_qpt = (void **) swqpt_mem->kva;
for (i = 0; i < rxq->qpt.page_count; i++) {
rxq->rcb->sw_qpt[i] = page_mem[i].kva;
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].lsb =
page_mem[i].dma.lsb;
((struct bna_dma_addr *)rxq->qpt.kv_qpt_ptr)[i].msb =
page_mem[i].dma.msb;
}
}
static void
_rxp_cqpt_setup(struct bna_rxp *rxp,
u32 page_count,
u32 page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
int i;
rxp->cq.qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
rxp->cq.qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
rxp->cq.qpt.kv_qpt_ptr = qpt_mem->kva;
rxp->cq.qpt.page_count = page_count;
rxp->cq.qpt.page_size = page_size;
rxp->cq.ccb->sw_qpt = (void **) swqpt_mem->kva;
for (i = 0; i < rxp->cq.qpt.page_count; i++) {
rxp->cq.ccb->sw_qpt[i] = page_mem[i].kva;
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].lsb =
page_mem[i].dma.lsb;
((struct bna_dma_addr *)rxp->cq.qpt.kv_qpt_ptr)[i].msb =
page_mem[i].dma.msb;
}
}
static void
_rx_add_rxp(struct bna_rx *rx, struct bna_rxp *rxp)
{
list_add_tail(&rxp->qe, &rx->rxp_q);
}
static void
_init_rxmod_queues(struct bna_rx_mod *rx_mod)
{
INIT_LIST_HEAD(&rx_mod->rx_free_q);
INIT_LIST_HEAD(&rx_mod->rxq_free_q);
INIT_LIST_HEAD(&rx_mod->rxp_free_q);
INIT_LIST_HEAD(&rx_mod->rx_active_q);
rx_mod->rx_free_count = 0;
rx_mod->rxq_free_count = 0;
rx_mod->rxp_free_count = 0;
}
static void
_rx_ctor(struct bna_rx *rx, int id)
{
bfa_q_qe_init(&rx->qe);
INIT_LIST_HEAD(&rx->rxp_q);
rx->bna = NULL;
rx->rxf.rxf_id = id;
/* FIXME: mbox_qe ctor()?? */
bfa_q_qe_init(&rx->mbox_qe.qe);
rx->stop_cbfn = NULL;
rx->stop_cbarg = NULL;
}
void
bna_rx_cb_multi_rxq_stopped(void *arg, int status)
{
struct bna_rxp *rxp = (struct bna_rxp *)arg;
bfa_wc_down(&rxp->rx->rxq_stop_wc);
}
void
bna_rx_cb_rxq_stopped_all(void *arg)
{
struct bna_rx *rx = (struct bna_rx *)arg;
bfa_fsm_send_event(rx, RX_E_RXQ_STOPPED);
}
static void
bna_rx_mod_cb_rx_stopped(void *arg, struct bna_rx *rx,
enum bna_cb_status status)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
bfa_wc_down(&rx_mod->rx_stop_wc);
}
static void
bna_rx_mod_cb_rx_stopped_all(void *arg)
{
struct bna_rx_mod *rx_mod = (struct bna_rx_mod *)arg;
if (rx_mod->stop_cbfn)
rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
rx_mod->stop_cbfn = NULL;
}
static void
bna_rx_start(struct bna_rx *rx)
{
rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
if (rx->rx_flags & BNA_RX_F_ENABLE)
bfa_fsm_send_event(rx, RX_E_START);
}
static void
bna_rx_stop(struct bna_rx *rx)
{
rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
if (rx->fsm == (bfa_fsm_t) bna_rx_sm_stopped)
bna_rx_mod_cb_rx_stopped(&rx->bna->rx_mod, rx, BNA_CB_SUCCESS);
else {
rx->stop_cbfn = bna_rx_mod_cb_rx_stopped;
rx->stop_cbarg = &rx->bna->rx_mod;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
static void
bna_rx_fail(struct bna_rx *rx)
{
/* Indicate port is not enabled, and failed */
rx->rx_flags &= ~BNA_RX_F_PORT_ENABLED;
rx->rx_flags |= BNA_RX_F_PORT_FAILED;
bfa_fsm_send_event(rx, RX_E_FAIL);
}
void
bna_rx_mod_start(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags |= BNA_RX_MOD_F_PORT_STARTED;
if (type == BNA_RX_T_LOOPBACK)
rx_mod->flags |= BNA_RX_MOD_F_PORT_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type)
bna_rx_start(rx);
}
}
void
bna_rx_mod_stop(struct bna_rx_mod *rx_mod, enum bna_rx_type type)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
rx_mod->stop_cbfn = bna_port_cb_rx_stopped;
/**
* Before calling bna_rx_stop(), increment rx_stop_wc as many times
* as we are going to call bna_rx_stop
*/
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type)
bfa_wc_up(&rx_mod->rx_stop_wc);
}
if (rx_mod->rx_stop_wc.wc_count == 0) {
rx_mod->stop_cbfn(&rx_mod->bna->port, BNA_CB_SUCCESS);
rx_mod->stop_cbfn = NULL;
return;
}
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
if (rx->type == type)
bna_rx_stop(rx);
}
}
void
bna_rx_mod_fail(struct bna_rx_mod *rx_mod)
{
struct bna_rx *rx;
struct list_head *qe;
rx_mod->flags &= ~BNA_RX_MOD_F_PORT_STARTED;
rx_mod->flags &= ~BNA_RX_MOD_F_PORT_LOOPBACK;
list_for_each(qe, &rx_mod->rx_active_q) {
rx = (struct bna_rx *)qe;
bna_rx_fail(rx);
}
}
void bna_rx_mod_init(struct bna_rx_mod *rx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int index;
struct bna_rx *rx_ptr;
struct bna_rxp *rxp_ptr;
struct bna_rxq *rxq_ptr;
rx_mod->bna = bna;
rx_mod->flags = 0;
rx_mod->rx = (struct bna_rx *)
res_info[BNA_RES_MEM_T_RX_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxp = (struct bna_rxp *)
res_info[BNA_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mdl[0].kva;
rx_mod->rxq = (struct bna_rxq *)
res_info[BNA_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mdl[0].kva;
/* Initialize the queues */
_init_rxmod_queues(rx_mod);
/* Build RX queues */
for (index = 0; index < BFI_MAX_RXQ; index++) {
rx_ptr = &rx_mod->rx[index];
_rx_ctor(rx_ptr, index);
list_add_tail(&rx_ptr->qe, &rx_mod->rx_free_q);
rx_mod->rx_free_count++;
}
/* build RX-path queue */
for (index = 0; index < BFI_MAX_RXQ; index++) {
rxp_ptr = &rx_mod->rxp[index];
rxp_ptr->cq.cq_id = index;
bfa_q_qe_init(&rxp_ptr->qe);
list_add_tail(&rxp_ptr->qe, &rx_mod->rxp_free_q);
rx_mod->rxp_free_count++;
}
/* build RXQ queue */
for (index = 0; index < BFI_MAX_RXQ; index++) {
rxq_ptr = &rx_mod->rxq[index];
rxq_ptr->rxq_id = index;
bfa_q_qe_init(&rxq_ptr->qe);
list_add_tail(&rxq_ptr->qe, &rx_mod->rxq_free_q);
rx_mod->rxq_free_count++;
}
rx_mod->rx_stop_wc.wc_resume = bna_rx_mod_cb_rx_stopped_all;
rx_mod->rx_stop_wc.wc_cbarg = rx_mod;
rx_mod->rx_stop_wc.wc_count = 0;
}
void
bna_rx_mod_uninit(struct bna_rx_mod *rx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &rx_mod->rx_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxp_free_q)
i++;
i = 0;
list_for_each(qe, &rx_mod->rxq_free_q)
i++;
rx_mod->bna = NULL;
}
int
bna_rx_state_get(struct bna_rx *rx)
{
return bfa_sm_to_state(rx_sm_table, rx->fsm);
}
void
bna_rx_res_req(struct bna_rx_config *q_cfg, struct bna_res_info *res_info)
{
u32 cq_size, hq_size, dq_size;
u32 cpage_count, hpage_count, dpage_count;
struct bna_mem_info *mem_info;
u32 cq_depth;
u32 hq_depth;
u32 dq_depth;
dq_depth = q_cfg->q_depth;
hq_depth = ((q_cfg->rxp_type == BNA_RXP_SINGLE) ? 0 : q_cfg->q_depth);
cq_depth = dq_depth + hq_depth;
BNA_TO_POWER_OF_2_HIGH(cq_depth);
cq_size = cq_depth * BFI_CQ_WI_SIZE;
cq_size = ALIGN(cq_size, PAGE_SIZE);
cpage_count = SIZE_TO_PAGES(cq_size);
BNA_TO_POWER_OF_2_HIGH(dq_depth);
dq_size = dq_depth * BFI_RXQ_WI_SIZE;
dq_size = ALIGN(dq_size, PAGE_SIZE);
dpage_count = SIZE_TO_PAGES(dq_size);
if (BNA_RXP_SINGLE != q_cfg->rxp_type) {
BNA_TO_POWER_OF_2_HIGH(hq_depth);
hq_size = hq_depth * BFI_RXQ_WI_SIZE;
hq_size = ALIGN(hq_size, PAGE_SIZE);
hpage_count = SIZE_TO_PAGES(hq_size);
} else {
hpage_count = 0;
}
/* CCB structures */
res_info[BNA_RX_RES_MEM_T_CCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_ccb);
mem_info->num = q_cfg->num_paths;
/* RCB structures */
res_info[BNA_RX_RES_MEM_T_RCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_rcb);
mem_info->num = BNA_GET_RXQS(q_cfg);
/* Completion QPT */
res_info[BNA_RX_RES_MEM_T_CQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = cpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
/* Completion s/w QPT */
res_info[BNA_RX_RES_MEM_T_CSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = cpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
/* Completion QPT pages */
res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE;
mem_info->num = cpage_count * q_cfg->num_paths;
/* Data QPTs */
res_info[BNA_RX_RES_MEM_T_DQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = dpage_count * sizeof(struct bna_dma_addr);
mem_info->num = q_cfg->num_paths;
/* Data s/w QPTs */
res_info[BNA_RX_RES_MEM_T_DSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = dpage_count * sizeof(void *);
mem_info->num = q_cfg->num_paths;
/* Data QPT pages */
res_info[BNA_RX_RES_MEM_T_DPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE;
mem_info->num = dpage_count * q_cfg->num_paths;
/* Hdr QPTs */
res_info[BNA_RX_RES_MEM_T_HQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = hpage_count * sizeof(struct bna_dma_addr);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
/* Hdr s/w QPTs */
res_info[BNA_RX_RES_MEM_T_HSWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = hpage_count * sizeof(void *);
mem_info->num = (hpage_count ? q_cfg->num_paths : 0);
/* Hdr QPT pages */
res_info[BNA_RX_RES_MEM_T_HPAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = (hpage_count ? PAGE_SIZE : 0);
mem_info->num = (hpage_count ? (hpage_count * q_cfg->num_paths) : 0);
/* RX Interrupts */
res_info[BNA_RX_RES_T_INTR].res_type = BNA_RES_T_INTR;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.intr_type = BNA_INTR_T_MSIX;
res_info[BNA_RX_RES_T_INTR].res_u.intr_info.num = q_cfg->num_paths;
}
struct bna_rx *
bna_rx_create(struct bna *bna, struct bnad *bnad,
struct bna_rx_config *rx_cfg,
struct bna_rx_event_cbfn *rx_cbfn,
struct bna_res_info *res_info,
void *priv)
{
struct bna_rx_mod *rx_mod = &bna->rx_mod;
struct bna_rx *rx;
struct bna_rxp *rxp;
struct bna_rxq *q0;
struct bna_rxq *q1;
struct bna_intr_info *intr_info;
u32 page_count;
struct bna_mem_descr *ccb_mem;
struct bna_mem_descr *rcb_mem;
struct bna_mem_descr *unmapq_mem;
struct bna_mem_descr *cqpt_mem;
struct bna_mem_descr *cswqpt_mem;
struct bna_mem_descr *cpage_mem;
struct bna_mem_descr *hqpt_mem; /* Header/Small Q qpt */
struct bna_mem_descr *dqpt_mem; /* Data/Large Q qpt */
struct bna_mem_descr *hsqpt_mem; /* s/w qpt for hdr */
struct bna_mem_descr *dsqpt_mem; /* s/w qpt for data */
struct bna_mem_descr *hpage_mem; /* hdr page mem */
struct bna_mem_descr *dpage_mem; /* data page mem */
int i, cpage_idx = 0, dpage_idx = 0, hpage_idx = 0;
int dpage_count, hpage_count, rcb_idx;
struct bna_ib_config ibcfg;
/* Fail if we don't have enough RXPs, RXQs */
if (!_rx_can_satisfy(rx_mod, rx_cfg))
return NULL;
/* Initialize resource pointers */
intr_info = &res_info[BNA_RX_RES_T_INTR].res_u.intr_info;
ccb_mem = &res_info[BNA_RX_RES_MEM_T_CCB].res_u.mem_info.mdl[0];
rcb_mem = &res_info[BNA_RX_RES_MEM_T_RCB].res_u.mem_info.mdl[0];
unmapq_mem = &res_info[BNA_RX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[0];
cqpt_mem = &res_info[BNA_RX_RES_MEM_T_CQPT].res_u.mem_info.mdl[0];
cswqpt_mem = &res_info[BNA_RX_RES_MEM_T_CSWQPT].res_u.mem_info.mdl[0];
cpage_mem = &res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.mdl[0];
hqpt_mem = &res_info[BNA_RX_RES_MEM_T_HQPT].res_u.mem_info.mdl[0];
dqpt_mem = &res_info[BNA_RX_RES_MEM_T_DQPT].res_u.mem_info.mdl[0];
hsqpt_mem = &res_info[BNA_RX_RES_MEM_T_HSWQPT].res_u.mem_info.mdl[0];
dsqpt_mem = &res_info[BNA_RX_RES_MEM_T_DSWQPT].res_u.mem_info.mdl[0];
hpage_mem = &res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.mdl[0];
dpage_mem = &res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.mdl[0];
/* Compute q depth & page count */
page_count = res_info[BNA_RX_RES_MEM_T_CQPT_PAGE].res_u.mem_info.num /
rx_cfg->num_paths;
dpage_count = res_info[BNA_RX_RES_MEM_T_DPAGE].res_u.mem_info.num /
rx_cfg->num_paths;
hpage_count = res_info[BNA_RX_RES_MEM_T_HPAGE].res_u.mem_info.num /
rx_cfg->num_paths;
/* Get RX pointer */
rx = _get_free_rx(rx_mod);
_rx_init(rx, bna);
rx->priv = priv;
rx->type = rx_cfg->rx_type;
rx->rcb_setup_cbfn = rx_cbfn->rcb_setup_cbfn;
rx->rcb_destroy_cbfn = rx_cbfn->rcb_destroy_cbfn;
rx->ccb_setup_cbfn = rx_cbfn->ccb_setup_cbfn;
rx->ccb_destroy_cbfn = rx_cbfn->ccb_destroy_cbfn;
/* Following callbacks are mandatory */
rx->rx_cleanup_cbfn = rx_cbfn->rx_cleanup_cbfn;
rx->rx_post_cbfn = rx_cbfn->rx_post_cbfn;
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_STARTED) {
switch (rx->type) {
case BNA_RX_T_REGULAR:
if (!(rx->bna->rx_mod.flags &
BNA_RX_MOD_F_PORT_LOOPBACK))
rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
break;
case BNA_RX_T_LOOPBACK:
if (rx->bna->rx_mod.flags & BNA_RX_MOD_F_PORT_LOOPBACK)
rx->rx_flags |= BNA_RX_F_PORT_ENABLED;
break;
}
}
for (i = 0, rcb_idx = 0; i < rx_cfg->num_paths; i++) {
rxp = _get_free_rxp(rx_mod);
rxp->type = rx_cfg->rxp_type;
rxp->rx = rx;
rxp->cq.rx = rx;
/* Get required RXQs, and queue them to rx-path */
q0 = _get_free_rxq(rx_mod);
if (BNA_RXP_SINGLE == rx_cfg->rxp_type)
q1 = NULL;
else
q1 = _get_free_rxq(rx_mod);
/* Initialize IB */
if (1 == intr_info->num) {
rxp->cq.ib = bna_ib_get(&bna->ib_mod,
intr_info->intr_type,
intr_info->idl[0].vector);
rxp->vector = intr_info->idl[0].vector;
} else {
rxp->cq.ib = bna_ib_get(&bna->ib_mod,
intr_info->intr_type,
intr_info->idl[i].vector);
/* Map the MSI-x vector used for this RXP */
rxp->vector = intr_info->idl[i].vector;
}
rxp->cq.ib_seg_offset = bna_ib_reserve_idx(rxp->cq.ib);
ibcfg.coalescing_timeo = BFI_RX_COALESCING_TIMEO;
ibcfg.interpkt_count = BFI_RX_INTERPKT_COUNT;
ibcfg.interpkt_timeo = BFI_RX_INTERPKT_TIMEO;
ibcfg.ctrl_flags = BFI_IB_CF_INT_ENABLE;
bna_ib_config(rxp->cq.ib, &ibcfg);
/* Link rxqs to rxp */
_rxp_add_rxqs(rxp, q0, q1);
/* Link rxp to rx */
_rx_add_rxp(rx, rxp);
q0->rx = rx;
q0->rxp = rxp;
/* Initialize RCB for the large / data q */
q0->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
RXQ_RCB_INIT(q0, rxp, rx_cfg->q_depth, bna, 0,
(void *)unmapq_mem[rcb_idx].kva);
rcb_idx++;
(q0)->rx_packets = (q0)->rx_bytes = 0;
(q0)->rx_packets_with_error = (q0)->rxbuf_alloc_failed = 0;
/* Initialize RXQs */
_rxq_qpt_init(q0, rxp, dpage_count, PAGE_SIZE,
&dqpt_mem[i], &dsqpt_mem[i], &dpage_mem[dpage_idx]);
q0->rcb->page_idx = dpage_idx;
q0->rcb->page_count = dpage_count;
dpage_idx += dpage_count;
/* Call bnad to complete rcb setup */
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q0->rcb);
if (q1) {
q1->rx = rx;
q1->rxp = rxp;
q1->rcb = (struct bna_rcb *) rcb_mem[rcb_idx].kva;
RXQ_RCB_INIT(q1, rxp, rx_cfg->q_depth, bna, 1,
(void *)unmapq_mem[rcb_idx].kva);
rcb_idx++;
(q1)->buffer_size = (rx_cfg)->small_buff_size;
(q1)->rx_packets = (q1)->rx_bytes = 0;
(q1)->rx_packets_with_error =
(q1)->rxbuf_alloc_failed = 0;
_rxq_qpt_init(q1, rxp, hpage_count, PAGE_SIZE,
&hqpt_mem[i], &hsqpt_mem[i],
&hpage_mem[hpage_idx]);
q1->rcb->page_idx = hpage_idx;
q1->rcb->page_count = hpage_count;
hpage_idx += hpage_count;
/* Call bnad to complete rcb setup */
if (rx->rcb_setup_cbfn)
rx->rcb_setup_cbfn(bnad, q1->rcb);
}
/* Setup RXP::CQ */
rxp->cq.ccb = (struct bna_ccb *) ccb_mem[i].kva;
_rxp_cqpt_setup(rxp, page_count, PAGE_SIZE,
&cqpt_mem[i], &cswqpt_mem[i], &cpage_mem[cpage_idx]);
rxp->cq.ccb->page_idx = cpage_idx;
rxp->cq.ccb->page_count = page_count;
cpage_idx += page_count;
rxp->cq.ccb->pkt_rate.small_pkt_cnt = 0;
rxp->cq.ccb->pkt_rate.large_pkt_cnt = 0;
rxp->cq.ccb->producer_index = 0;
rxp->cq.ccb->q_depth = rx_cfg->q_depth +
((rx_cfg->rxp_type == BNA_RXP_SINGLE) ?
0 : rx_cfg->q_depth);
rxp->cq.ccb->i_dbell = &rxp->cq.ib->door_bell;
rxp->cq.ccb->rcb[0] = q0->rcb;
if (q1)
rxp->cq.ccb->rcb[1] = q1->rcb;
rxp->cq.ccb->cq = &rxp->cq;
rxp->cq.ccb->bnad = bna->bnad;
rxp->cq.ccb->hw_producer_index =
((volatile u32 *)rxp->cq.ib->ib_seg_host_addr_kva +
(rxp->cq.ib_seg_offset * BFI_IBIDX_SIZE));
*(rxp->cq.ccb->hw_producer_index) = 0;
rxp->cq.ccb->intr_type = intr_info->intr_type;
rxp->cq.ccb->intr_vector = (intr_info->num == 1) ?
intr_info->idl[0].vector :
intr_info->idl[i].vector;
rxp->cq.ccb->rx_coalescing_timeo =
rxp->cq.ib->ib_config.coalescing_timeo;
rxp->cq.ccb->id = i;
/* Call bnad to complete CCB setup */
if (rx->ccb_setup_cbfn)
rx->ccb_setup_cbfn(bnad, rxp->cq.ccb);
} /* for each rx-path */
bna_rxf_init(&rx->rxf, rx, rx_cfg);
bfa_fsm_set_state(rx, bna_rx_sm_stopped);
return rx;
}
void
bna_rx_destroy(struct bna_rx *rx)
{
struct bna_rx_mod *rx_mod = &rx->bna->rx_mod;
struct bna_ib_mod *ib_mod = &rx->bna->ib_mod;
struct bna_rxq *q0 = NULL;
struct bna_rxq *q1 = NULL;
struct bna_rxp *rxp;
struct list_head *qe;
bna_rxf_uninit(&rx->rxf);
while (!list_empty(&rx->rxp_q)) {
bfa_q_deq(&rx->rxp_q, &rxp);
GET_RXQS(rxp, q0, q1);
/* Callback to bnad for destroying RCB */
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q0->rcb);
q0->rcb = NULL;
q0->rxp = NULL;
q0->rx = NULL;
_put_free_rxq(rx_mod, q0);
if (q1) {
/* Callback to bnad for destroying RCB */
if (rx->rcb_destroy_cbfn)
rx->rcb_destroy_cbfn(rx->bna->bnad, q1->rcb);
q1->rcb = NULL;
q1->rxp = NULL;
q1->rx = NULL;
_put_free_rxq(rx_mod, q1);
}
rxp->rxq.slr.large = NULL;
rxp->rxq.slr.small = NULL;
if (rxp->cq.ib) {
if (rxp->cq.ib_seg_offset != 0xff)
bna_ib_release_idx(rxp->cq.ib,
rxp->cq.ib_seg_offset);
bna_ib_put(ib_mod, rxp->cq.ib);
rxp->cq.ib = NULL;
}
/* Callback to bnad for destroying CCB */
if (rx->ccb_destroy_cbfn)
rx->ccb_destroy_cbfn(rx->bna->bnad, rxp->cq.ccb);
rxp->cq.ccb = NULL;
rxp->rx = NULL;
_put_free_rxp(rx_mod, rxp);
}
list_for_each(qe, &rx_mod->rx_active_q) {
if (qe == &rx->qe) {
list_del(&rx->qe);
bfa_q_qe_init(&rx->qe);
break;
}
}
rx->bna = NULL;
rx->priv = NULL;
_put_free_rx(rx_mod, rx);
}
void
bna_rx_enable(struct bna_rx *rx)
{
if (rx->fsm != (bfa_sm_t)bna_rx_sm_stopped)
return;
rx->rx_flags |= BNA_RX_F_ENABLE;
if (rx->rx_flags & BNA_RX_F_PORT_ENABLED)
bfa_fsm_send_event(rx, RX_E_START);
}
void
bna_rx_disable(struct bna_rx *rx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_rx *,
enum bna_cb_status))
{
if (type == BNA_SOFT_CLEANUP) {
/* h/w should not be accessed. Treat we're stopped */
(*cbfn)(rx->bna->bnad, rx, BNA_CB_SUCCESS);
} else {
rx->stop_cbfn = cbfn;
rx->stop_cbarg = rx->bna->bnad;
rx->rx_flags &= ~BNA_RX_F_ENABLE;
bfa_fsm_send_event(rx, RX_E_STOP);
}
}
/**
* TX
*/
#define call_tx_stop_cbfn(tx, status)\
do {\
if ((tx)->stop_cbfn)\
(tx)->stop_cbfn((tx)->stop_cbarg, (tx), status);\
(tx)->stop_cbfn = NULL;\
(tx)->stop_cbarg = NULL;\
} while (0)
#define call_tx_prio_change_cbfn(tx, status)\
do {\
if ((tx)->prio_change_cbfn)\
(tx)->prio_change_cbfn((tx)->bna->bnad, (tx), status);\
(tx)->prio_change_cbfn = NULL;\
} while (0)
static void bna_tx_mod_cb_tx_stopped(void *tx_mod, struct bna_tx *tx,
enum bna_cb_status status);
static void bna_tx_cb_txq_stopped(void *arg, int status);
static void bna_tx_cb_stats_cleared(void *arg, int status);
static void __bna_tx_stop(struct bna_tx *tx);
static void __bna_tx_start(struct bna_tx *tx);
static void __bna_txf_stat_clr(struct bna_tx *tx);
enum bna_tx_event {
TX_E_START = 1,
TX_E_STOP = 2,
TX_E_FAIL = 3,
TX_E_TXQ_STOPPED = 4,
TX_E_PRIO_CHANGE = 5,
TX_E_STAT_CLEARED = 6,
};
enum bna_tx_state {
BNA_TX_STOPPED = 1,
BNA_TX_STARTED = 2,
BNA_TX_TXQ_STOP_WAIT = 3,
BNA_TX_PRIO_STOP_WAIT = 4,
BNA_TX_STAT_CLR_WAIT = 5,
};
bfa_fsm_state_decl(bna_tx, stopped, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, started, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, txq_stop_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, prio_stop_wait, struct bna_tx,
enum bna_tx_event);
bfa_fsm_state_decl(bna_tx, stat_clr_wait, struct bna_tx,
enum bna_tx_event);
static struct bfa_sm_table tx_sm_table[] = {
{BFA_SM(bna_tx_sm_stopped), BNA_TX_STOPPED},
{BFA_SM(bna_tx_sm_started), BNA_TX_STARTED},
{BFA_SM(bna_tx_sm_txq_stop_wait), BNA_TX_TXQ_STOP_WAIT},
{BFA_SM(bna_tx_sm_prio_stop_wait), BNA_TX_PRIO_STOP_WAIT},
{BFA_SM(bna_tx_sm_stat_clr_wait), BNA_TX_STAT_CLR_WAIT},
};
static void
bna_tx_sm_stopped_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
(tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
}
call_tx_stop_cbfn(tx, BNA_CB_SUCCESS);
}
static void
bna_tx_sm_stopped(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_START:
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_FAIL:
/* No-op */
break;
case TX_E_PRIO_CHANGE:
call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
break;
case TX_E_TXQ_STOPPED:
/**
* This event is received due to flushing of mbox when
* device fails
*/
/* No-op */
break;
default:
bfa_sm_fault(tx->bna, event);
}
}
static void
bna_tx_sm_started_entry(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
__bna_tx_start(tx);
/* Start IB */
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_ack(&txq->ib->door_bell, 0);
}
}
static void
bna_tx_sm_started(struct bna_tx *tx, enum bna_tx_event event)
{
struct bna_txq *txq;
struct list_head *qe;
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
__bna_tx_stop(tx);
break;
case TX_E_FAIL:
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_fail(txq->ib);
(tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
}
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_PRIO_CHANGE:
bfa_fsm_set_state(tx, bna_tx_sm_prio_stop_wait);
break;
default:
bfa_sm_fault(tx->bna, event);
}
}
static void
bna_tx_sm_txq_stop_wait_entry(struct bna_tx *tx)
{
}
static void
bna_tx_sm_txq_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
struct bna_txq *txq;
struct list_head *qe;
switch (event) {
case TX_E_FAIL:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_TXQ_STOPPED:
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_stop(txq->ib);
}
bfa_fsm_set_state(tx, bna_tx_sm_stat_clr_wait);
break;
case TX_E_PRIO_CHANGE:
/* No-op */
break;
default:
bfa_sm_fault(tx->bna, event);
}
}
static void
bna_tx_sm_prio_stop_wait_entry(struct bna_tx *tx)
{
__bna_tx_stop(tx);
}
static void
bna_tx_sm_prio_stop_wait(struct bna_tx *tx, enum bna_tx_event event)
{
struct bna_txq *txq;
struct list_head *qe;
switch (event) {
case TX_E_STOP:
bfa_fsm_set_state(tx, bna_tx_sm_txq_stop_wait);
break;
case TX_E_FAIL:
call_tx_prio_change_cbfn(tx, BNA_CB_FAIL);
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
case TX_E_TXQ_STOPPED:
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_stop(txq->ib);
(tx->tx_cleanup_cbfn)(tx->bna->bnad, txq->tcb);
}
call_tx_prio_change_cbfn(tx, BNA_CB_SUCCESS);
bfa_fsm_set_state(tx, bna_tx_sm_started);
break;
case TX_E_PRIO_CHANGE:
/* No-op */
break;
default:
bfa_sm_fault(tx->bna, event);
}
}
static void
bna_tx_sm_stat_clr_wait_entry(struct bna_tx *tx)
{
__bna_txf_stat_clr(tx);
}
static void
bna_tx_sm_stat_clr_wait(struct bna_tx *tx, enum bna_tx_event event)
{
switch (event) {
case TX_E_FAIL:
case TX_E_STAT_CLEARED:
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
break;
default:
bfa_sm_fault(tx->bna, event);
}
}
static void
__bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
{
struct bna_rxtx_q_mem *q_mem;
struct bna_txq_mem txq_cfg;
struct bna_txq_mem *txq_mem;
struct bna_dma_addr cur_q_addr;
u32 pg_num;
void __iomem *base_addr;
unsigned long off;
/* Fill out structure, to be subsequently written to hardware */
txq_cfg.pg_tbl_addr_lo = txq->qpt.hw_qpt_ptr.lsb;
txq_cfg.pg_tbl_addr_hi = txq->qpt.hw_qpt_ptr.msb;
cur_q_addr = *((struct bna_dma_addr *)(txq->qpt.kv_qpt_ptr));
txq_cfg.cur_q_entry_lo = cur_q_addr.lsb;
txq_cfg.cur_q_entry_hi = cur_q_addr.msb;
txq_cfg.pg_cnt_n_prd_ptr = (txq->qpt.page_count << 16) | 0x0;
txq_cfg.entry_n_pg_size = ((u32)(BFI_TXQ_WI_SIZE >> 2) << 16) |
(txq->qpt.page_size >> 2);
txq_cfg.int_blk_n_cns_ptr = ((((u32)txq->ib_seg_offset) << 24) |
((u32)(txq->ib->ib_id & 0xff) << 16) | 0x0);
txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
(txq->priority & 0x7));
txq_cfg.wvc_n_cquota_n_rquota =
((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
(BFI_TX_MAX_WRR_QUOTA & 0xfff));
/* Setup the page and write to H/W */
pg_num = BNA_GET_PAGE_NUM(HQM0_BLK_PG_NUM + tx->bna->port_num,
HQM_RXTX_Q_RAM_BASE_OFFSET);
writel(pg_num, tx->bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
HQM_RXTX_Q_RAM_BASE_OFFSET);
q_mem = (struct bna_rxtx_q_mem *)0;
txq_mem = &q_mem[txq->txq_id].txq;
/*
* The following 4 lines, is a hack b'cos the H/W needs to read
* these DMA addresses as little endian
*/
off = (unsigned long)&txq_mem->pg_tbl_addr_lo;
writel(htonl(txq_cfg.pg_tbl_addr_lo), base_addr + off);
off = (unsigned long)&txq_mem->pg_tbl_addr_hi;
writel(htonl(txq_cfg.pg_tbl_addr_hi), base_addr + off);
off = (unsigned long)&txq_mem->cur_q_entry_lo;
writel(htonl(txq_cfg.cur_q_entry_lo), base_addr + off);
off = (unsigned long)&txq_mem->cur_q_entry_hi;
writel(htonl(txq_cfg.cur_q_entry_hi), base_addr + off);
off = (unsigned long)&txq_mem->pg_cnt_n_prd_ptr;
writel(txq_cfg.pg_cnt_n_prd_ptr, base_addr + off);
off = (unsigned long)&txq_mem->entry_n_pg_size;
writel(txq_cfg.entry_n_pg_size, base_addr + off);
off = (unsigned long)&txq_mem->int_blk_n_cns_ptr;
writel(txq_cfg.int_blk_n_cns_ptr, base_addr + off);
off = (unsigned long)&txq_mem->cns_ptr2_n_q_state;
writel(txq_cfg.cns_ptr2_n_q_state, base_addr + off);
off = (unsigned long)&txq_mem->nxt_qid_n_fid_n_pri;
writel(txq_cfg.nxt_qid_n_fid_n_pri, base_addr + off);
off = (unsigned long)&txq_mem->wvc_n_cquota_n_rquota;
writel(txq_cfg.wvc_n_cquota_n_rquota, base_addr + off);
txq->tcb->producer_index = 0;
txq->tcb->consumer_index = 0;
*(txq->tcb->hw_consumer_index) = 0;
}
static void
__bna_txq_stop(struct bna_tx *tx, struct bna_txq *txq)
{
struct bfi_ll_q_stop_req ll_req;
u32 bit_mask[2] = {0, 0};
if (txq->txq_id < 32)
bit_mask[0] = (u32)1 << txq->txq_id;
else
bit_mask[1] = (u32)1 << (txq->txq_id - 32);
memset(&ll_req, 0, sizeof(ll_req));
ll_req.mh.msg_class = BFI_MC_LL;
ll_req.mh.msg_id = BFI_LL_H2I_TXQ_STOP_REQ;
ll_req.mh.mtag.h2i.lpu_id = 0;
ll_req.q_id_mask[0] = htonl(bit_mask[0]);
ll_req.q_id_mask[1] = htonl(bit_mask[1]);
bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
bna_tx_cb_txq_stopped, tx);
bna_mbox_send(tx->bna, &tx->mbox_qe);
}
static void
__bna_txf_start(struct bna_tx *tx)
{
struct bna_tx_fndb_ram *tx_fndb;
struct bna_txf *txf = &tx->txf;
void __iomem *base_addr;
unsigned long off;
writel(BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
(tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET),
tx->bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
TX_FNDB_RAM_BASE_OFFSET);
tx_fndb = (struct bna_tx_fndb_ram *)0;
off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
writel(((u32)txf->vlan << 16) | txf->ctrl_flags,
base_addr + off);
if (tx->txf.txf_id < 32)
tx->bna->tx_mod.txf_bmap[0] |= ((u32)1 << tx->txf.txf_id);
else
tx->bna->tx_mod.txf_bmap[1] |= ((u32)
1 << (tx->txf.txf_id - 32));
}
static void
__bna_txf_stop(struct bna_tx *tx)
{
struct bna_tx_fndb_ram *tx_fndb;
u32 page_num;
u32 ctl_flags;
struct bna_txf *txf = &tx->txf;
void __iomem *base_addr;
unsigned long off;
/* retrieve the running txf_flags & turn off enable bit */
page_num = BNA_GET_PAGE_NUM(LUT0_MEM_BLK_BASE_PG_NUM +
(tx->bna->port_num * 2), TX_FNDB_RAM_BASE_OFFSET);
writel(page_num, tx->bna->regs.page_addr);
base_addr = BNA_GET_MEM_BASE_ADDR(tx->bna->pcidev.pci_bar_kva,
TX_FNDB_RAM_BASE_OFFSET);
tx_fndb = (struct bna_tx_fndb_ram *)0;
off = (unsigned long)&tx_fndb[txf->txf_id].vlan_n_ctrl_flags;
ctl_flags = readl(base_addr + off);
ctl_flags &= ~BFI_TXF_CF_ENABLE;
writel(ctl_flags, base_addr + off);
if (tx->txf.txf_id < 32)
tx->bna->tx_mod.txf_bmap[0] &= ~((u32)1 << tx->txf.txf_id);
else
tx->bna->tx_mod.txf_bmap[0] &= ~((u32)
1 << (tx->txf.txf_id - 32));
}
static void
__bna_txf_stat_clr(struct bna_tx *tx)
{
struct bfi_ll_stats_req ll_req;
u32 txf_bmap[2] = {0, 0};
if (tx->txf.txf_id < 32)
txf_bmap[0] = ((u32)1 << tx->txf.txf_id);
else
txf_bmap[1] = ((u32)1 << (tx->txf.txf_id - 32));
bfi_h2i_set(ll_req.mh, BFI_MC_LL, BFI_LL_H2I_STATS_CLEAR_REQ, 0);
ll_req.stats_mask = 0;
ll_req.rxf_id_mask[0] = 0;
ll_req.rxf_id_mask[1] = 0;
ll_req.txf_id_mask[0] = htonl(txf_bmap[0]);
ll_req.txf_id_mask[1] = htonl(txf_bmap[1]);
bna_mbox_qe_fill(&tx->mbox_qe, &ll_req, sizeof(ll_req),
bna_tx_cb_stats_cleared, tx);
bna_mbox_send(tx->bna, &tx->mbox_qe);
}
static void
__bna_tx_start(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bna_ib_start(txq->ib);
__bna_txq_start(tx, txq);
}
__bna_txf_start(tx);
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->tcb->priority = txq->priority;
(tx->tx_resume_cbfn)(tx->bna->bnad, txq->tcb);
}
}
static void
__bna_tx_stop(struct bna_tx *tx)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
(tx->tx_stall_cbfn)(tx->bna->bnad, txq->tcb);
}
__bna_txf_stop(tx);
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
bfa_wc_up(&tx->txq_stop_wc);
}
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
__bna_txq_stop(tx, txq);
}
}
static void
bna_txq_qpt_setup(struct bna_txq *txq, int page_count, int page_size,
struct bna_mem_descr *qpt_mem,
struct bna_mem_descr *swqpt_mem,
struct bna_mem_descr *page_mem)
{
int i;
txq->qpt.hw_qpt_ptr.lsb = qpt_mem->dma.lsb;
txq->qpt.hw_qpt_ptr.msb = qpt_mem->dma.msb;
txq->qpt.kv_qpt_ptr = qpt_mem->kva;
txq->qpt.page_count = page_count;
txq->qpt.page_size = page_size;
txq->tcb->sw_qpt = (void **) swqpt_mem->kva;
for (i = 0; i < page_count; i++) {
txq->tcb->sw_qpt[i] = page_mem[i].kva;
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].lsb =
page_mem[i].dma.lsb;
((struct bna_dma_addr *)txq->qpt.kv_qpt_ptr)[i].msb =
page_mem[i].dma.msb;
}
}
static void
bna_tx_free(struct bna_tx *tx)
{
struct bna_tx_mod *tx_mod = &tx->bna->tx_mod;
struct bna_txq *txq;
struct bna_ib_mod *ib_mod = &tx->bna->ib_mod;
struct list_head *qe;
while (!list_empty(&tx->txq_q)) {
bfa_q_deq(&tx->txq_q, &txq);
bfa_q_qe_init(&txq->qe);
if (txq->ib) {
if (txq->ib_seg_offset != -1)
bna_ib_release_idx(txq->ib,
txq->ib_seg_offset);
bna_ib_put(ib_mod, txq->ib);
txq->ib = NULL;
}
txq->tcb = NULL;
txq->tx = NULL;
list_add_tail(&txq->qe, &tx_mod->txq_free_q);
}
list_for_each(qe, &tx_mod->tx_active_q) {
if (qe == &tx->qe) {
list_del(&tx->qe);
bfa_q_qe_init(&tx->qe);
break;
}
}
tx->bna = NULL;
tx->priv = NULL;
list_add_tail(&tx->qe, &tx_mod->tx_free_q);
}
static void
bna_tx_cb_txq_stopped(void *arg, int status)
{
struct bna_tx *tx = (struct bna_tx *)arg;
bfa_q_qe_init(&tx->mbox_qe.qe);
bfa_wc_down(&tx->txq_stop_wc);
}
static void
bna_tx_cb_txq_stopped_all(void *arg)
{
struct bna_tx *tx = (struct bna_tx *)arg;
bfa_fsm_send_event(tx, TX_E_TXQ_STOPPED);
}
static void
bna_tx_cb_stats_cleared(void *arg, int status)
{
struct bna_tx *tx = (struct bna_tx *)arg;
bfa_q_qe_init(&tx->mbox_qe.qe);
bfa_fsm_send_event(tx, TX_E_STAT_CLEARED);
}
static void
bna_tx_start(struct bna_tx *tx)
{
tx->flags |= BNA_TX_F_PORT_STARTED;
if (tx->flags & BNA_TX_F_ENABLED)
bfa_fsm_send_event(tx, TX_E_START);
}
static void
bna_tx_stop(struct bna_tx *tx)
{
tx->stop_cbfn = bna_tx_mod_cb_tx_stopped;
tx->stop_cbarg = &tx->bna->tx_mod;
tx->flags &= ~BNA_TX_F_PORT_STARTED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
static void
bna_tx_fail(struct bna_tx *tx)
{
tx->flags &= ~BNA_TX_F_PORT_STARTED;
bfa_fsm_send_event(tx, TX_E_FAIL);
}
static void
bna_tx_prio_changed(struct bna_tx *tx, int prio)
{
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->priority = prio;
}
bfa_fsm_send_event(tx, TX_E_PRIO_CHANGE);
}
static void
bna_tx_cee_link_status(struct bna_tx *tx, int cee_link)
{
if (cee_link)
tx->flags |= BNA_TX_F_PRIO_LOCK;
else
tx->flags &= ~BNA_TX_F_PRIO_LOCK;
}
static void
bna_tx_mod_cb_tx_stopped(void *arg, struct bna_tx *tx,
enum bna_cb_status status)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
bfa_wc_down(&tx_mod->tx_stop_wc);
}
static void
bna_tx_mod_cb_tx_stopped_all(void *arg)
{
struct bna_tx_mod *tx_mod = (struct bna_tx_mod *)arg;
if (tx_mod->stop_cbfn)
tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
tx_mod->stop_cbfn = NULL;
}
void
bna_tx_res_req(int num_txq, int txq_depth, struct bna_res_info *res_info)
{
u32 q_size;
u32 page_count;
struct bna_mem_info *mem_info;
res_info[BNA_TX_RES_MEM_T_TCB].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = sizeof(struct bna_tcb);
mem_info->num = num_txq;
q_size = txq_depth * BFI_TXQ_WI_SIZE;
q_size = ALIGN(q_size, PAGE_SIZE);
page_count = q_size >> PAGE_SHIFT;
res_info[BNA_TX_RES_MEM_T_QPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = page_count * sizeof(struct bna_dma_addr);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_SWQPT].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_KVA;
mem_info->len = page_count * sizeof(void *);
mem_info->num = num_txq;
res_info[BNA_TX_RES_MEM_T_PAGE].res_type = BNA_RES_T_MEM;
mem_info = &res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info;
mem_info->mem_type = BNA_MEM_T_DMA;
mem_info->len = PAGE_SIZE;
mem_info->num = num_txq * page_count;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_type = BNA_RES_T_INTR;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.intr_type =
BNA_INTR_T_MSIX;
res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info.num = num_txq;
}
struct bna_tx *
bna_tx_create(struct bna *bna, struct bnad *bnad,
struct bna_tx_config *tx_cfg,
struct bna_tx_event_cbfn *tx_cbfn,
struct bna_res_info *res_info, void *priv)
{
struct bna_intr_info *intr_info;
struct bna_tx_mod *tx_mod = &bna->tx_mod;
struct bna_tx *tx;
struct bna_txq *txq;
struct list_head *qe;
struct bna_ib_mod *ib_mod = &bna->ib_mod;
struct bna_doorbell_qset *qset;
struct bna_ib_config ib_config;
int page_count;
int page_size;
int page_idx;
int i;
unsigned long off;
intr_info = &res_info[BNA_TX_RES_INTR_T_TXCMPL].res_u.intr_info;
page_count = (res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.num) /
tx_cfg->num_txq;
page_size = res_info[BNA_TX_RES_MEM_T_PAGE].res_u.mem_info.len;
/**
* Get resources
*/
if ((intr_info->num != 1) && (intr_info->num != tx_cfg->num_txq))
return NULL;
/* Tx */
if (list_empty(&tx_mod->tx_free_q))
return NULL;
bfa_q_deq(&tx_mod->tx_free_q, &tx);
bfa_q_qe_init(&tx->qe);
/* TxQs */
INIT_LIST_HEAD(&tx->txq_q);
for (i = 0; i < tx_cfg->num_txq; i++) {
if (list_empty(&tx_mod->txq_free_q))
goto err_return;
bfa_q_deq(&tx_mod->txq_free_q, &txq);
bfa_q_qe_init(&txq->qe);
list_add_tail(&txq->qe, &tx->txq_q);
txq->ib = NULL;
txq->ib_seg_offset = -1;
txq->tx = tx;
}
/* IBs */
i = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
if (intr_info->num == 1)
txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
intr_info->idl[0].vector);
else
txq->ib = bna_ib_get(ib_mod, intr_info->intr_type,
intr_info->idl[i].vector);
if (txq->ib == NULL)
goto err_return;
txq->ib_seg_offset = bna_ib_reserve_idx(txq->ib);
if (txq->ib_seg_offset == -1)
goto err_return;
i++;
}
/*
* Initialize
*/
/* Tx */
tx->tcb_setup_cbfn = tx_cbfn->tcb_setup_cbfn;
tx->tcb_destroy_cbfn = tx_cbfn->tcb_destroy_cbfn;
/* Following callbacks are mandatory */
tx->tx_stall_cbfn = tx_cbfn->tx_stall_cbfn;
tx->tx_resume_cbfn = tx_cbfn->tx_resume_cbfn;
tx->tx_cleanup_cbfn = tx_cbfn->tx_cleanup_cbfn;
list_add_tail(&tx->qe, &tx_mod->tx_active_q);
tx->bna = bna;
tx->priv = priv;
tx->txq_stop_wc.wc_resume = bna_tx_cb_txq_stopped_all;
tx->txq_stop_wc.wc_cbarg = tx;
tx->txq_stop_wc.wc_count = 0;
tx->type = tx_cfg->tx_type;
tx->flags = 0;
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_STARTED) {
switch (tx->type) {
case BNA_TX_T_REGULAR:
if (!(tx->bna->tx_mod.flags &
BNA_TX_MOD_F_PORT_LOOPBACK))
tx->flags |= BNA_TX_F_PORT_STARTED;
break;
case BNA_TX_T_LOOPBACK:
if (tx->bna->tx_mod.flags & BNA_TX_MOD_F_PORT_LOOPBACK)
tx->flags |= BNA_TX_F_PORT_STARTED;
break;
}
}
if (tx->bna->tx_mod.cee_link)
tx->flags |= BNA_TX_F_PRIO_LOCK;
/* TxQ */
i = 0;
page_idx = 0;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
txq->priority = tx_mod->priority;
txq->tcb = (struct bna_tcb *)
res_info[BNA_TX_RES_MEM_T_TCB].res_u.mem_info.mdl[i].kva;
txq->tx_packets = 0;
txq->tx_bytes = 0;
/* IB */
ib_config.coalescing_timeo = BFI_TX_COALESCING_TIMEO;
ib_config.interpkt_timeo = 0; /* Not used */
ib_config.interpkt_count = BFI_TX_INTERPKT_COUNT;
ib_config.ctrl_flags = (BFI_IB_CF_INTER_PKT_DMA |
BFI_IB_CF_INT_ENABLE |
BFI_IB_CF_COALESCING_MODE);
bna_ib_config(txq->ib, &ib_config);
/* TCB */
txq->tcb->producer_index = 0;
txq->tcb->consumer_index = 0;
txq->tcb->hw_consumer_index = (volatile u32 *)
((volatile u8 *)txq->ib->ib_seg_host_addr_kva +
(txq->ib_seg_offset * BFI_IBIDX_SIZE));
*(txq->tcb->hw_consumer_index) = 0;
txq->tcb->q_depth = tx_cfg->txq_depth;
txq->tcb->unmap_q = (void *)
res_info[BNA_TX_RES_MEM_T_UNMAPQ].res_u.mem_info.mdl[i].kva;
qset = (struct bna_doorbell_qset *)0;
off = (unsigned long)&qset[txq->txq_id].txq[0];
txq->tcb->q_dbell = off +
BNA_GET_DOORBELL_BASE_ADDR(bna->pcidev.pci_bar_kva);
txq->tcb->i_dbell = &txq->ib->door_bell;
txq->tcb->intr_type = intr_info->intr_type;
txq->tcb->intr_vector = (intr_info->num == 1) ?
intr_info->idl[0].vector :
intr_info->idl[i].vector;
txq->tcb->txq = txq;
txq->tcb->bnad = bnad;
txq->tcb->id = i;
/* QPT, SWQPT, Pages */
bna_txq_qpt_setup(txq, page_count, page_size,
&res_info[BNA_TX_RES_MEM_T_QPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_SWQPT].res_u.mem_info.mdl[i],
&res_info[BNA_TX_RES_MEM_T_PAGE].
res_u.mem_info.mdl[page_idx]);
txq->tcb->page_idx = page_idx;
txq->tcb->page_count = page_count;
page_idx += page_count;
/* Callback to bnad for setting up TCB */
if (tx->tcb_setup_cbfn)
(tx->tcb_setup_cbfn)(bna->bnad, txq->tcb);
i++;
}
/* TxF */
tx->txf.ctrl_flags = BFI_TXF_CF_ENABLE | BFI_TXF_CF_VLAN_WI_BASED;
tx->txf.vlan = 0;
/* Mbox element */
bfa_q_qe_init(&tx->mbox_qe.qe);
bfa_fsm_set_state(tx, bna_tx_sm_stopped);
return tx;
err_return:
bna_tx_free(tx);
return NULL;
}
void
bna_tx_destroy(struct bna_tx *tx)
{
/* Callback to bnad for destroying TCB */
if (tx->tcb_destroy_cbfn) {
struct bna_txq *txq;
struct list_head *qe;
list_for_each(qe, &tx->txq_q) {
txq = (struct bna_txq *)qe;
(tx->tcb_destroy_cbfn)(tx->bna->bnad, txq->tcb);
}
}
bna_tx_free(tx);
}
void
bna_tx_enable(struct bna_tx *tx)
{
if (tx->fsm != (bfa_sm_t)bna_tx_sm_stopped)
return;
tx->flags |= BNA_TX_F_ENABLED;
if (tx->flags & BNA_TX_F_PORT_STARTED)
bfa_fsm_send_event(tx, TX_E_START);
}
void
bna_tx_disable(struct bna_tx *tx, enum bna_cleanup_type type,
void (*cbfn)(void *, struct bna_tx *, enum bna_cb_status))
{
if (type == BNA_SOFT_CLEANUP) {
(*cbfn)(tx->bna->bnad, tx, BNA_CB_SUCCESS);
return;
}
tx->stop_cbfn = cbfn;
tx->stop_cbarg = tx->bna->bnad;
tx->flags &= ~BNA_TX_F_ENABLED;
bfa_fsm_send_event(tx, TX_E_STOP);
}
int
bna_tx_state_get(struct bna_tx *tx)
{
return bfa_sm_to_state(tx_sm_table, tx->fsm);
}
void
bna_tx_mod_init(struct bna_tx_mod *tx_mod, struct bna *bna,
struct bna_res_info *res_info)
{
int i;
tx_mod->bna = bna;
tx_mod->flags = 0;
tx_mod->tx = (struct bna_tx *)
res_info[BNA_RES_MEM_T_TX_ARRAY].res_u.mem_info.mdl[0].kva;
tx_mod->txq = (struct bna_txq *)
res_info[BNA_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mdl[0].kva;
INIT_LIST_HEAD(&tx_mod->tx_free_q);
INIT_LIST_HEAD(&tx_mod->tx_active_q);
INIT_LIST_HEAD(&tx_mod->txq_free_q);
for (i = 0; i < BFI_MAX_TXQ; i++) {
tx_mod->tx[i].txf.txf_id = i;
bfa_q_qe_init(&tx_mod->tx[i].qe);
list_add_tail(&tx_mod->tx[i].qe, &tx_mod->tx_free_q);
tx_mod->txq[i].txq_id = i;
bfa_q_qe_init(&tx_mod->txq[i].qe);
list_add_tail(&tx_mod->txq[i].qe, &tx_mod->txq_free_q);
}
tx_mod->tx_stop_wc.wc_resume = bna_tx_mod_cb_tx_stopped_all;
tx_mod->tx_stop_wc.wc_cbarg = tx_mod;
tx_mod->tx_stop_wc.wc_count = 0;
}
void
bna_tx_mod_uninit(struct bna_tx_mod *tx_mod)
{
struct list_head *qe;
int i;
i = 0;
list_for_each(qe, &tx_mod->tx_free_q)
i++;
i = 0;
list_for_each(qe, &tx_mod->txq_free_q)
i++;
tx_mod->bna = NULL;
}
void
bna_tx_mod_start(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags |= BNA_TX_MOD_F_PORT_STARTED;
if (type == BNA_TX_T_LOOPBACK)
tx_mod->flags |= BNA_TX_MOD_F_PORT_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type)
bna_tx_start(tx);
}
}
void
bna_tx_mod_stop(struct bna_tx_mod *tx_mod, enum bna_tx_type type)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
tx_mod->stop_cbfn = bna_port_cb_tx_stopped;
/**
* Before calling bna_tx_stop(), increment tx_stop_wc as many times
* as we are going to call bna_tx_stop
*/
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type)
bfa_wc_up(&tx_mod->tx_stop_wc);
}
if (tx_mod->tx_stop_wc.wc_count == 0) {
tx_mod->stop_cbfn(&tx_mod->bna->port, BNA_CB_SUCCESS);
tx_mod->stop_cbfn = NULL;
return;
}
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
if (tx->type == type)
bna_tx_stop(tx);
}
}
void
bna_tx_mod_fail(struct bna_tx_mod *tx_mod)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->flags &= ~BNA_TX_MOD_F_PORT_STARTED;
tx_mod->flags &= ~BNA_TX_MOD_F_PORT_LOOPBACK;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bna_tx_fail(tx);
}
}
void
bna_tx_mod_prio_changed(struct bna_tx_mod *tx_mod, int prio)
{
struct bna_tx *tx;
struct list_head *qe;
if (prio != tx_mod->priority) {
tx_mod->priority = prio;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bna_tx_prio_changed(tx, prio);
}
}
}
void
bna_tx_mod_cee_link_status(struct bna_tx_mod *tx_mod, int cee_link)
{
struct bna_tx *tx;
struct list_head *qe;
tx_mod->cee_link = cee_link;
list_for_each(qe, &tx_mod->tx_active_q) {
tx = (struct bna_tx *)qe;
bna_tx_cee_link_status(tx, cee_link);
}
}
| gpl-2.0 |
Californication/lge-kernel-msm7x27-ics-3.0.8 | sound/soc/codecs/wm8580.c | 2899 | 26003 | /*
* wm8580.c -- WM8580 ALSA Soc Audio driver
*
* Copyright 2008, 2009 Wolfson Microelectronics PLC.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Notes:
* The WM8580 is a multichannel codec with S/PDIF support, featuring six
* DAC channels and two ADC channels.
*
* Currently only the primary audio interface is supported - S/PDIF and
* the secondary audio interfaces are not.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/initval.h>
#include <asm/div64.h>
#include "wm8580.h"
/* WM8580 register space */
#define WM8580_PLLA1 0x00
#define WM8580_PLLA2 0x01
#define WM8580_PLLA3 0x02
#define WM8580_PLLA4 0x03
#define WM8580_PLLB1 0x04
#define WM8580_PLLB2 0x05
#define WM8580_PLLB3 0x06
#define WM8580_PLLB4 0x07
#define WM8580_CLKSEL 0x08
#define WM8580_PAIF1 0x09
#define WM8580_PAIF2 0x0A
#define WM8580_SAIF1 0x0B
#define WM8580_PAIF3 0x0C
#define WM8580_PAIF4 0x0D
#define WM8580_SAIF2 0x0E
#define WM8580_DAC_CONTROL1 0x0F
#define WM8580_DAC_CONTROL2 0x10
#define WM8580_DAC_CONTROL3 0x11
#define WM8580_DAC_CONTROL4 0x12
#define WM8580_DAC_CONTROL5 0x13
#define WM8580_DIGITAL_ATTENUATION_DACL1 0x14
#define WM8580_DIGITAL_ATTENUATION_DACR1 0x15
#define WM8580_DIGITAL_ATTENUATION_DACL2 0x16
#define WM8580_DIGITAL_ATTENUATION_DACR2 0x17
#define WM8580_DIGITAL_ATTENUATION_DACL3 0x18
#define WM8580_DIGITAL_ATTENUATION_DACR3 0x19
#define WM8580_MASTER_DIGITAL_ATTENUATION 0x1C
#define WM8580_ADC_CONTROL1 0x1D
#define WM8580_SPDTXCHAN0 0x1E
#define WM8580_SPDTXCHAN1 0x1F
#define WM8580_SPDTXCHAN2 0x20
#define WM8580_SPDTXCHAN3 0x21
#define WM8580_SPDTXCHAN4 0x22
#define WM8580_SPDTXCHAN5 0x23
#define WM8580_SPDMODE 0x24
#define WM8580_INTMASK 0x25
#define WM8580_GPO1 0x26
#define WM8580_GPO2 0x27
#define WM8580_GPO3 0x28
#define WM8580_GPO4 0x29
#define WM8580_GPO5 0x2A
#define WM8580_INTSTAT 0x2B
#define WM8580_SPDRXCHAN1 0x2C
#define WM8580_SPDRXCHAN2 0x2D
#define WM8580_SPDRXCHAN3 0x2E
#define WM8580_SPDRXCHAN4 0x2F
#define WM8580_SPDRXCHAN5 0x30
#define WM8580_SPDSTAT 0x31
#define WM8580_PWRDN1 0x32
#define WM8580_PWRDN2 0x33
#define WM8580_READBACK 0x34
#define WM8580_RESET 0x35
#define WM8580_MAX_REGISTER 0x35
#define WM8580_DACOSR 0x40
/* PLLB4 (register 7h) */
#define WM8580_PLLB4_MCLKOUTSRC_MASK 0x60
#define WM8580_PLLB4_MCLKOUTSRC_PLLA 0x20
#define WM8580_PLLB4_MCLKOUTSRC_PLLB 0x40
#define WM8580_PLLB4_MCLKOUTSRC_OSC 0x60
#define WM8580_PLLB4_CLKOUTSRC_MASK 0x180
#define WM8580_PLLB4_CLKOUTSRC_PLLACLK 0x080
#define WM8580_PLLB4_CLKOUTSRC_PLLBCLK 0x100
#define WM8580_PLLB4_CLKOUTSRC_OSCCLK 0x180
/* CLKSEL (register 8h) */
#define WM8580_CLKSEL_DAC_CLKSEL_MASK 0x03
#define WM8580_CLKSEL_DAC_CLKSEL_PLLA 0x01
#define WM8580_CLKSEL_DAC_CLKSEL_PLLB 0x02
/* AIF control 1 (registers 9h-bh) */
#define WM8580_AIF_RATE_MASK 0x7
#define WM8580_AIF_BCLKSEL_MASK 0x18
#define WM8580_AIF_MS 0x20
#define WM8580_AIF_CLKSRC_MASK 0xc0
#define WM8580_AIF_CLKSRC_PLLA 0x40
#define WM8580_AIF_CLKSRC_PLLB 0x40
#define WM8580_AIF_CLKSRC_MCLK 0xc0
/* AIF control 2 (registers ch-eh) */
#define WM8580_AIF_FMT_MASK 0x03
#define WM8580_AIF_FMT_RIGHTJ 0x00
#define WM8580_AIF_FMT_LEFTJ 0x01
#define WM8580_AIF_FMT_I2S 0x02
#define WM8580_AIF_FMT_DSP 0x03
#define WM8580_AIF_LENGTH_MASK 0x0c
#define WM8580_AIF_LENGTH_16 0x00
#define WM8580_AIF_LENGTH_20 0x04
#define WM8580_AIF_LENGTH_24 0x08
#define WM8580_AIF_LENGTH_32 0x0c
#define WM8580_AIF_LRP 0x10
#define WM8580_AIF_BCP 0x20
/* Powerdown Register 1 (register 32h) */
#define WM8580_PWRDN1_PWDN 0x001
#define WM8580_PWRDN1_ALLDACPD 0x040
/* Powerdown Register 2 (register 33h) */
#define WM8580_PWRDN2_OSSCPD 0x001
#define WM8580_PWRDN2_PLLAPD 0x002
#define WM8580_PWRDN2_PLLBPD 0x004
#define WM8580_PWRDN2_SPDIFPD 0x008
#define WM8580_PWRDN2_SPDIFTXD 0x010
#define WM8580_PWRDN2_SPDIFRXD 0x020
#define WM8580_DAC_CONTROL5_MUTEALL 0x10
/*
* wm8580 register cache
* We can't read the WM8580 register space when we
* are using 2 wire for device control, so we cache them instead.
*/
static const u16 wm8580_reg[] = {
0x0121, 0x017e, 0x007d, 0x0014, /*R3*/
0x0121, 0x017e, 0x007d, 0x0194, /*R7*/
0x0010, 0x0002, 0x0002, 0x00c2, /*R11*/
0x0182, 0x0082, 0x000a, 0x0024, /*R15*/
0x0009, 0x0000, 0x00ff, 0x0000, /*R19*/
0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R23*/
0x00ff, 0x00ff, 0x00ff, 0x00ff, /*R27*/
0x01f0, 0x0040, 0x0000, 0x0000, /*R31(0x1F)*/
0x0000, 0x0000, 0x0031, 0x000b, /*R35*/
0x0039, 0x0000, 0x0010, 0x0032, /*R39*/
0x0054, 0x0076, 0x0098, 0x0000, /*R43(0x2B)*/
0x0000, 0x0000, 0x0000, 0x0000, /*R47*/
0x0000, 0x0000, 0x005e, 0x003e, /*R51(0x33)*/
0x0000, 0x0000 /*R53*/
};
struct pll_state {
unsigned int in;
unsigned int out;
};
#define WM8580_NUM_SUPPLIES 3
static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
"AVDD",
"DVDD",
"PVDD",
};
/* codec private data */
struct wm8580_priv {
enum snd_soc_control_type control_type;
struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
struct pll_state a;
struct pll_state b;
int sysclk[2];
};
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
u16 *reg_cache = codec->reg_cache;
unsigned int reg = mc->reg;
unsigned int reg2 = mc->rreg;
int ret;
/* Clear the register cache so we write without VU set */
reg_cache[reg] = 0;
reg_cache[reg2] = 0;
ret = snd_soc_put_volsw_2r(kcontrol, ucontrol);
if (ret < 0)
return ret;
/* Now write again with the volume update bit set */
snd_soc_update_bits(codec, reg, 0x100, 0x100);
snd_soc_update_bits(codec, reg2, 0x100, 0x100);
return 0;
}
#define SOC_WM8580_OUT_DOUBLE_R_TLV(xname, reg_left, reg_right, xshift, xmax, \
xinvert, tlv_array) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \
.access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\
SNDRV_CTL_ELEM_ACCESS_READWRITE, \
.tlv.p = (tlv_array), \
.info = snd_soc_info_volsw_2r, \
.get = snd_soc_get_volsw_2r, .put = wm8580_out_vu, \
.private_value = (unsigned long)&(struct soc_mixer_control) \
{.reg = reg_left, .rreg = reg_right, .shift = xshift, \
.max = xmax, .invert = xinvert} }
static const struct snd_kcontrol_new wm8580_snd_controls[] = {
SOC_WM8580_OUT_DOUBLE_R_TLV("DAC1 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL1,
WM8580_DIGITAL_ATTENUATION_DACR1,
0, 0xff, 0, dac_tlv),
SOC_WM8580_OUT_DOUBLE_R_TLV("DAC2 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL2,
WM8580_DIGITAL_ATTENUATION_DACR2,
0, 0xff, 0, dac_tlv),
SOC_WM8580_OUT_DOUBLE_R_TLV("DAC3 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL3,
WM8580_DIGITAL_ATTENUATION_DACR3,
0, 0xff, 0, dac_tlv),
SOC_SINGLE("DAC1 Deemphasis Switch", WM8580_DAC_CONTROL3, 0, 1, 0),
SOC_SINGLE("DAC2 Deemphasis Switch", WM8580_DAC_CONTROL3, 1, 1, 0),
SOC_SINGLE("DAC3 Deemphasis Switch", WM8580_DAC_CONTROL3, 2, 1, 0),
SOC_DOUBLE("DAC1 Invert Switch", WM8580_DAC_CONTROL4, 0, 1, 1, 0),
SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0),
SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
SOC_DOUBLE("Capture Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 1),
SOC_SINGLE("Capture High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
};
static const struct snd_soc_dapm_widget wm8580_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC1", "Playback", WM8580_PWRDN1, 2, 1),
SND_SOC_DAPM_DAC("DAC2", "Playback", WM8580_PWRDN1, 3, 1),
SND_SOC_DAPM_DAC("DAC3", "Playback", WM8580_PWRDN1, 4, 1),
SND_SOC_DAPM_OUTPUT("VOUT1L"),
SND_SOC_DAPM_OUTPUT("VOUT1R"),
SND_SOC_DAPM_OUTPUT("VOUT2L"),
SND_SOC_DAPM_OUTPUT("VOUT2R"),
SND_SOC_DAPM_OUTPUT("VOUT3L"),
SND_SOC_DAPM_OUTPUT("VOUT3R"),
SND_SOC_DAPM_ADC("ADC", "Capture", WM8580_PWRDN1, 1, 1),
SND_SOC_DAPM_INPUT("AINL"),
SND_SOC_DAPM_INPUT("AINR"),
};
static const struct snd_soc_dapm_route audio_map[] = {
{ "VOUT1L", NULL, "DAC1" },
{ "VOUT1R", NULL, "DAC1" },
{ "VOUT2L", NULL, "DAC2" },
{ "VOUT2R", NULL, "DAC2" },
{ "VOUT3L", NULL, "DAC3" },
{ "VOUT3R", NULL, "DAC3" },
{ "ADC", NULL, "AINL" },
{ "ADC", NULL, "AINR" },
};
static int wm8580_add_widgets(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
snd_soc_dapm_new_controls(dapm, wm8580_dapm_widgets,
ARRAY_SIZE(wm8580_dapm_widgets));
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
return 0;
}
/* PLL divisors */
struct _pll_div {
u32 prescale:1;
u32 postscale:1;
u32 freqmode:2;
u32 n:4;
u32 k:24;
};
/* The size in bits of the pll divide */
#define FIXED_PLL_SIZE (1 << 22)
/* PLL rate to output rate divisions */
static struct {
unsigned int div;
unsigned int freqmode;
unsigned int postscale;
} post_table[] = {
{ 2, 0, 0 },
{ 4, 0, 1 },
{ 4, 1, 0 },
{ 8, 1, 1 },
{ 8, 2, 0 },
{ 16, 2, 1 },
{ 12, 3, 0 },
{ 24, 3, 1 }
};
static int pll_factors(struct _pll_div *pll_div, unsigned int target,
unsigned int source)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod;
int i;
pr_debug("wm8580: PLL %uHz->%uHz\n", source, target);
/* Scale the output frequency up; the PLL should run in the
* region of 90-100MHz.
*/
for (i = 0; i < ARRAY_SIZE(post_table); i++) {
if (target * post_table[i].div >= 90000000 &&
target * post_table[i].div <= 100000000) {
pll_div->freqmode = post_table[i].freqmode;
pll_div->postscale = post_table[i].postscale;
target *= post_table[i].div;
break;
}
}
if (i == ARRAY_SIZE(post_table)) {
printk(KERN_ERR "wm8580: Unable to scale output frequency "
"%u\n", target);
return -EINVAL;
}
Ndiv = target / source;
if (Ndiv < 5) {
source /= 2;
pll_div->prescale = 1;
Ndiv = target / source;
} else
pll_div->prescale = 0;
if ((Ndiv < 5) || (Ndiv > 13)) {
printk(KERN_ERR
"WM8580 N=%u outside supported range\n", Ndiv);
return -EINVAL;
}
pll_div->n = Ndiv;
Nmod = target % source;
Kpart = FIXED_PLL_SIZE * (long long)Nmod;
do_div(Kpart, source);
K = Kpart & 0xFFFFFFFF;
pll_div->k = K;
pr_debug("PLL %x.%x prescale %d freqmode %d postscale %d\n",
pll_div->n, pll_div->k, pll_div->prescale, pll_div->freqmode,
pll_div->postscale);
return 0;
}
static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
int offset;
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
struct pll_state *state;
struct _pll_div pll_div;
unsigned int reg;
unsigned int pwr_mask;
int ret;
/* GCC isn't able to work out the ifs below for initialising/using
* pll_div so suppress warnings.
*/
memset(&pll_div, 0, sizeof(pll_div));
switch (pll_id) {
case WM8580_PLLA:
state = &wm8580->a;
offset = 0;
pwr_mask = WM8580_PWRDN2_PLLAPD;
break;
case WM8580_PLLB:
state = &wm8580->b;
offset = 4;
pwr_mask = WM8580_PWRDN2_PLLBPD;
break;
default:
return -ENODEV;
}
if (freq_in && freq_out) {
ret = pll_factors(&pll_div, freq_out, freq_in);
if (ret != 0)
return ret;
}
state->in = freq_in;
state->out = freq_out;
/* Always disable the PLL - it is not safe to leave it running
* while reprogramming it.
*/
reg = snd_soc_read(codec, WM8580_PWRDN2);
snd_soc_write(codec, WM8580_PWRDN2, reg | pwr_mask);
if (!freq_in || !freq_out)
return 0;
snd_soc_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
snd_soc_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0x1ff);
snd_soc_write(codec, WM8580_PLLA3 + offset,
(pll_div.k >> 18 & 0xf) | (pll_div.n << 4));
reg = snd_soc_read(codec, WM8580_PLLA4 + offset);
reg &= ~0x1b;
reg |= pll_div.prescale | pll_div.postscale << 1 |
pll_div.freqmode << 3;
snd_soc_write(codec, WM8580_PLLA4 + offset, reg);
/* All done, turn it on */
reg = snd_soc_read(codec, WM8580_PWRDN2);
snd_soc_write(codec, WM8580_PWRDN2, reg & ~pwr_mask);
return 0;
}
static const int wm8580_sysclk_ratios[] = {
128, 192, 256, 384, 512, 768, 1152,
};
/*
* Set PCM DAI bit size and sample rate.
*/
static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
u16 paifa = 0;
u16 paifb = 0;
int i, ratio, osr;
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
paifa |= 0x8;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_32;
break;
default:
return -EINVAL;
}
/* Look up the SYSCLK ratio; accept only exact matches */
ratio = wm8580->sysclk[dai->driver->id] / params_rate(params);
for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++)
if (ratio == wm8580_sysclk_ratios[i])
break;
if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) {
dev_err(codec->dev, "Invalid clock ratio %d/%d\n",
wm8580->sysclk[dai->driver->id], params_rate(params));
return -EINVAL;
}
paifa |= i;
dev_dbg(codec->dev, "Running at %dfs with %dHz clock\n",
wm8580_sysclk_ratios[i], wm8580->sysclk[dai->driver->id]);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
switch (ratio) {
case 128:
case 192:
osr = WM8580_DACOSR;
dev_dbg(codec->dev, "Selecting 64x OSR\n");
break;
default:
osr = 0;
dev_dbg(codec->dev, "Selecting 128x OSR\n");
break;
}
snd_soc_update_bits(codec, WM8580_PAIF3, WM8580_DACOSR, osr);
}
snd_soc_update_bits(codec, WM8580_PAIF1 + dai->driver->id,
WM8580_AIF_RATE_MASK | WM8580_AIF_BCLKSEL_MASK,
paifa);
snd_soc_update_bits(codec, WM8580_PAIF3 + dai->driver->id,
WM8580_AIF_LENGTH_MASK, paifb);
return 0;
}
static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int aifa;
unsigned int aifb;
int can_invert_lrclk;
aifa = snd_soc_read(codec, WM8580_PAIF1 + codec_dai->driver->id);
aifb = snd_soc_read(codec, WM8580_PAIF3 + codec_dai->driver->id);
aifb &= ~(WM8580_AIF_FMT_MASK | WM8580_AIF_LRP | WM8580_AIF_BCP);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
aifa &= ~WM8580_AIF_MS;
break;
case SND_SOC_DAIFMT_CBM_CFM:
aifa |= WM8580_AIF_MS;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_I2S;
break;
case SND_SOC_DAIFMT_RIGHT_J:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_RIGHTJ;
break;
case SND_SOC_DAIFMT_LEFT_J:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_LEFTJ;
break;
case SND_SOC_DAIFMT_DSP_A:
can_invert_lrclk = 0;
aifb |= WM8580_AIF_FMT_DSP;
break;
case SND_SOC_DAIFMT_DSP_B:
can_invert_lrclk = 0;
aifb |= WM8580_AIF_FMT_DSP;
aifb |= WM8580_AIF_LRP;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
if (!can_invert_lrclk)
return -EINVAL;
aifb |= WM8580_AIF_BCP;
aifb |= WM8580_AIF_LRP;
break;
case SND_SOC_DAIFMT_IB_NF:
aifb |= WM8580_AIF_BCP;
break;
case SND_SOC_DAIFMT_NB_IF:
if (!can_invert_lrclk)
return -EINVAL;
aifb |= WM8580_AIF_LRP;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PAIF1 + codec_dai->driver->id, aifa);
snd_soc_write(codec, WM8580_PAIF3 + codec_dai->driver->id, aifb);
return 0;
}
static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
switch (div_id) {
case WM8580_MCLK:
reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_MCLKOUTSRC_MASK;
switch (div) {
case WM8580_CLKSRC_MCLK:
/* Input */
break;
case WM8580_CLKSRC_PLLA:
reg |= WM8580_PLLB4_MCLKOUTSRC_PLLA;
break;
case WM8580_CLKSRC_PLLB:
reg |= WM8580_PLLB4_MCLKOUTSRC_PLLB;
break;
case WM8580_CLKSRC_OSC:
reg |= WM8580_PLLB4_MCLKOUTSRC_OSC;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PLLB4, reg);
break;
case WM8580_CLKOUTSRC:
reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_CLKOUTSRC_MASK;
switch (div) {
case WM8580_CLKSRC_NONE:
break;
case WM8580_CLKSRC_PLLA:
reg |= WM8580_PLLB4_CLKOUTSRC_PLLACLK;
break;
case WM8580_CLKSRC_PLLB:
reg |= WM8580_PLLB4_CLKOUTSRC_PLLBCLK;
break;
case WM8580_CLKSRC_OSC:
reg |= WM8580_PLLB4_CLKOUTSRC_OSCCLK;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PLLB4, reg);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8580_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
int sel, sel_mask, sel_shift;
switch (dai->driver->id) {
case WM8580_DAI_PAIFRX:
sel_mask = 0x3;
sel_shift = 0;
break;
case WM8580_DAI_PAIFTX:
sel_mask = 0xc;
sel_shift = 2;
break;
default:
BUG_ON("Unknown DAI driver ID\n");
return -EINVAL;
}
switch (clk_id) {
case WM8580_CLKSRC_ADCMCLK:
if (dai->driver->id != WM8580_DAI_PAIFTX)
return -EINVAL;
sel = 0 << sel_shift;
break;
case WM8580_CLKSRC_PLLA:
sel = 1 << sel_shift;
break;
case WM8580_CLKSRC_PLLB:
sel = 2 << sel_shift;
break;
case WM8580_CLKSRC_MCLK:
sel = 3 << sel_shift;
break;
default:
dev_err(codec->dev, "Unknown clock %d\n", clk_id);
return -EINVAL;
}
/* We really should validate PLL settings but not yet */
wm8580->sysclk[dai->driver->id] = freq;
return snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel);
}
static int wm8580_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
reg = snd_soc_read(codec, WM8580_DAC_CONTROL5);
if (mute)
reg |= WM8580_DAC_CONTROL5_MUTEALL;
else
reg &= ~WM8580_DAC_CONTROL5_MUTEALL;
snd_soc_write(codec, WM8580_DAC_CONTROL5, reg);
return 0;
}
static int wm8580_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
u16 reg;
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
/* Power up and get individual control of the DACs */
reg = snd_soc_read(codec, WM8580_PWRDN1);
reg &= ~(WM8580_PWRDN1_PWDN | WM8580_PWRDN1_ALLDACPD);
snd_soc_write(codec, WM8580_PWRDN1, reg);
/* Make VMID high impedance */
reg = snd_soc_read(codec, WM8580_ADC_CONTROL1);
reg &= ~0x100;
snd_soc_write(codec, WM8580_ADC_CONTROL1, reg);
}
break;
case SND_SOC_BIAS_OFF:
reg = snd_soc_read(codec, WM8580_PWRDN1);
snd_soc_write(codec, WM8580_PWRDN1, reg | WM8580_PWRDN1_PWDN);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8580_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_ops wm8580_dai_ops_playback = {
.set_sysclk = wm8580_set_sysclk,
.hw_params = wm8580_paif_hw_params,
.set_fmt = wm8580_set_paif_dai_fmt,
.set_clkdiv = wm8580_set_dai_clkdiv,
.set_pll = wm8580_set_dai_pll,
.digital_mute = wm8580_digital_mute,
};
static struct snd_soc_dai_ops wm8580_dai_ops_capture = {
.set_sysclk = wm8580_set_sysclk,
.hw_params = wm8580_paif_hw_params,
.set_fmt = wm8580_set_paif_dai_fmt,
.set_clkdiv = wm8580_set_dai_clkdiv,
.set_pll = wm8580_set_dai_pll,
};
static struct snd_soc_dai_driver wm8580_dai[] = {
{
.name = "wm8580-hifi-playback",
.id = WM8580_DAI_PAIFRX,
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 6,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8580_FORMATS,
},
.ops = &wm8580_dai_ops_playback,
},
{
.name = "wm8580-hifi-capture",
.id = WM8580_DAI_PAIFTX,
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8580_FORMATS,
},
.ops = &wm8580_dai_ops_capture,
},
};
static int wm8580_probe(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
int ret = 0,i;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8580->control_type);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
for (i = 0; i < ARRAY_SIZE(wm8580->supplies); i++)
wm8580->supplies[i].supply = wm8580_supply_names[i];
ret = regulator_bulk_get(codec->dev, ARRAY_SIZE(wm8580->supplies),
wm8580->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
wm8580->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_regulator_get;
}
/* Get the codec into a known state */
ret = snd_soc_write(codec, WM8580_RESET, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
goto err_regulator_enable;
}
wm8580_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
snd_soc_add_controls(codec, wm8580_snd_controls,
ARRAY_SIZE(wm8580_snd_controls));
wm8580_add_widgets(codec);
return 0;
err_regulator_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
err_regulator_get:
regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
return ret;
}
/* power down chip */
static int wm8580_remove(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
wm8580_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
regulator_bulk_free(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
.probe = wm8580_probe,
.remove = wm8580_remove,
.set_bias_level = wm8580_set_bias_level,
.reg_cache_size = ARRAY_SIZE(wm8580_reg),
.reg_word_size = sizeof(u16),
.reg_cache_default = wm8580_reg,
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8580_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8580_priv *wm8580;
int ret;
wm8580 = kzalloc(sizeof(struct wm8580_priv), GFP_KERNEL);
if (wm8580 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, wm8580);
wm8580->control_type = SND_SOC_I2C;
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8580, wm8580_dai, ARRAY_SIZE(wm8580_dai));
if (ret < 0)
kfree(wm8580);
return ret;
}
static int wm8580_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
kfree(i2c_get_clientdata(client));
return 0;
}
static const struct i2c_device_id wm8580_i2c_id[] = {
{ "wm8580", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8580_i2c_id);
static struct i2c_driver wm8580_i2c_driver = {
.driver = {
.name = "wm8580-codec",
.owner = THIS_MODULE,
},
.probe = wm8580_i2c_probe,
.remove = wm8580_i2c_remove,
.id_table = wm8580_i2c_id,
};
#endif
static int __init wm8580_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8580_i2c_driver);
if (ret != 0) {
pr_err("Failed to register WM8580 I2C driver: %d\n", ret);
}
#endif
return ret;
}
module_init(wm8580_modinit);
static void __exit wm8580_exit(void)
{
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8580_i2c_driver);
#endif
}
module_exit(wm8580_exit);
MODULE_DESCRIPTION("ASoC WM8580 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
andr00ib/3.0.94-victor-kernel | arch/alpha/kernel/sys_sx164.c | 4179 | 4574 | /*
* linux/arch/alpha/kernel/sys_sx164.c
*
* Copyright (C) 1995 David A Rusling
* Copyright (C) 1996 Jay A Estabrook
* Copyright (C) 1998, 1999, 2000 Richard Henderson
*
* Code supporting the SX164 (PCA56+PYXIS).
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/bitops.h>
#include <asm/ptrace.h>
#include <asm/system.h>
#include <asm/dma.h>
#include <asm/irq.h>
#include <asm/mmu_context.h>
#include <asm/io.h>
#include <asm/pgtable.h>
#include <asm/core_cia.h>
#include <asm/hwrpb.h>
#include <asm/tlbflush.h>
#include "proto.h"
#include "irq_impl.h"
#include "pci_impl.h"
#include "machvec_impl.h"
static void __init
sx164_init_irq(void)
{
outb(0, DMA1_RESET_REG);
outb(0, DMA2_RESET_REG);
outb(DMA_MODE_CASCADE, DMA2_MODE_REG);
outb(0, DMA2_MASK_REG);
if (alpha_using_srm)
alpha_mv.device_interrupt = srm_device_interrupt;
init_i8259a_irqs();
/* Not interested in the bogus interrupts (0,3,4,5,40-47),
NMI (1), or HALT (2). */
if (alpha_using_srm)
init_srm_irqs(40, 0x3f0000);
else
init_pyxis_irqs(0xff00003f0000UL);
setup_irq(16+6, &timer_cascade_irqaction);
}
/*
* PCI Fixup configuration.
*
* Summary @ PYXIS_INT_REQ:
* Bit Meaning
* 0 RSVD
* 1 NMI
* 2 Halt/Reset switch
* 3 MBZ
* 4 RAZ
* 5 RAZ
* 6 Interval timer (RTC)
* 7 PCI-ISA Bridge
* 8 Interrupt Line A from slot 3
* 9 Interrupt Line A from slot 2
*10 Interrupt Line A from slot 1
*11 Interrupt Line A from slot 0
*12 Interrupt Line B from slot 3
*13 Interrupt Line B from slot 2
*14 Interrupt Line B from slot 1
*15 Interrupt line B from slot 0
*16 Interrupt Line C from slot 3
*17 Interrupt Line C from slot 2
*18 Interrupt Line C from slot 1
*19 Interrupt Line C from slot 0
*20 Interrupt Line D from slot 3
*21 Interrupt Line D from slot 2
*22 Interrupt Line D from slot 1
*23 Interrupt Line D from slot 0
*
* IdSel
* 5 32 bit PCI option slot 2
* 6 64 bit PCI option slot 0
* 7 64 bit PCI option slot 1
* 8 Cypress I/O
* 9 32 bit PCI option slot 3
*/
static int __init
sx164_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
{
static char irq_tab[5][5] __initdata = {
/*INT INTA INTB INTC INTD */
{ 16+ 9, 16+ 9, 16+13, 16+17, 16+21}, /* IdSel 5 slot 2 J17 */
{ 16+11, 16+11, 16+15, 16+19, 16+23}, /* IdSel 6 slot 0 J19 */
{ 16+10, 16+10, 16+14, 16+18, 16+22}, /* IdSel 7 slot 1 J18 */
{ -1, -1, -1, -1, -1}, /* IdSel 8 SIO */
{ 16+ 8, 16+ 8, 16+12, 16+16, 16+20} /* IdSel 9 slot 3 J15 */
};
const long min_idsel = 5, max_idsel = 9, irqs_per_slot = 5;
return COMMON_TABLE_LOOKUP;
}
static void __init
sx164_init_pci(void)
{
cia_init_pci();
SMC669_Init(0);
}
static void __init
sx164_init_arch(void)
{
/*
* OSF palcode v1.23 forgets to enable PCA56 Motion Video
* Instructions. Let's enable it.
* We have to check palcode revision because CSERVE interface
* is subject to change without notice. For example, it
* has been changed completely since v1.16 (found in MILO
* distribution). -ink
*/
struct percpu_struct *cpu = (struct percpu_struct*)
((char*)hwrpb + hwrpb->processor_offset);
if (amask(AMASK_MAX) != 0
&& alpha_using_srm
&& (cpu->pal_revision & 0xffff) <= 0x117) {
__asm__ __volatile__(
"lda $16,8($31)\n"
"call_pal 9\n" /* Allow PALRES insns in kernel mode */
".long 0x64000118\n\n" /* hw_mfpr $0,icsr */
"ldah $16,(1<<(19-16))($31)\n"
"or $0,$16,$0\n" /* set MVE bit */
".long 0x74000118\n" /* hw_mtpr $0,icsr */
"lda $16,9($31)\n"
"call_pal 9" /* Disable PALRES insns */
: : : "$0", "$16");
printk("PCA56 MVI set enabled\n");
}
pyxis_init_arch();
}
/*
* The System Vector
*/
struct alpha_machine_vector sx164_mv __initmv = {
.vector_name = "SX164",
DO_EV5_MMU,
DO_DEFAULT_RTC,
DO_PYXIS_IO,
.machine_check = cia_machine_check,
.max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
.min_io_address = DEFAULT_IO_BASE,
.min_mem_address = DEFAULT_MEM_BASE,
.pci_dac_offset = PYXIS_DAC_OFFSET,
.nr_irqs = 48,
.device_interrupt = pyxis_device_interrupt,
.init_arch = sx164_init_arch,
.init_irq = sx164_init_irq,
.init_rtc = common_init_rtc,
.init_pci = sx164_init_pci,
.kill_arch = cia_kill_arch,
.pci_map_irq = sx164_map_irq,
.pci_swizzle = common_swizzle,
};
ALIAS_MV(sx164)
| gpl-2.0 |
SlimRoms/kernel_lge_geeb | drivers/staging/bcm/hostmibs.c | 4947 | 5968 | /*
* File Name: hostmibs.c
*
* Author: Beceem Communications Pvt. Ltd
*
* Abstract: This file contains the routines to copy the statistics used by
* the driver to the Host MIBS structure and giving the same to Application.
*/
#include "headers.h"
INT ProcessGetHostMibs(PMINI_ADAPTER Adapter, S_MIBS_HOST_STATS_MIBS *pstHostMibs)
{
S_SERVICEFLOW_ENTRY *pstServiceFlowEntry = NULL;
S_PHS_RULE *pstPhsRule = NULL;
S_CLASSIFIER_TABLE *pstClassifierTable = NULL;
S_CLASSIFIER_ENTRY *pstClassifierRule = NULL;
PPHS_DEVICE_EXTENSION pDeviceExtension = (PPHS_DEVICE_EXTENSION) &Adapter->stBCMPhsContext;
UINT nClassifierIndex = 0, nPhsTableIndex = 0, nSfIndex = 0, uiIndex = 0;
if (pDeviceExtension == NULL) {
BCM_DEBUG_PRINT(Adapter, DBG_TYPE_OTHERS, HOST_MIBS, DBG_LVL_ALL, "Invalid Device Extension\n");
return STATUS_FAILURE;
}
/* Copy the classifier Table */
for (nClassifierIndex = 0; nClassifierIndex < MAX_CLASSIFIERS; nClassifierIndex++) {
if (Adapter->astClassifierTable[nClassifierIndex].bUsed == TRUE)
memcpy((PVOID) & pstHostMibs->
astClassifierTable[nClassifierIndex],
(PVOID) & Adapter->
astClassifierTable[nClassifierIndex],
sizeof(S_MIBS_CLASSIFIER_RULE));
}
/* Copy the SF Table */
for (nSfIndex = 0; nSfIndex < NO_OF_QUEUES; nSfIndex++) {
if (Adapter->PackInfo[nSfIndex].bValid) {
memcpy((PVOID) & pstHostMibs->astSFtable[nSfIndex],
(PVOID) & Adapter->PackInfo[nSfIndex],
sizeof(S_MIBS_SERVICEFLOW_TABLE));
} else {
/* If index in not valid,
* don't process this for the PHS table.
* Go For the next entry.
*/
continue;
}
/* Retrieve the SFID Entry Index for requested Service Flow */
if (PHS_INVALID_TABLE_INDEX ==
GetServiceFlowEntry(pDeviceExtension->
pstServiceFlowPhsRulesTable,
Adapter->PackInfo[nSfIndex].
usVCID_Value, &pstServiceFlowEntry))
continue;
pstClassifierTable = pstServiceFlowEntry->pstClassifierTable;
for (uiIndex = 0; uiIndex < MAX_PHSRULE_PER_SF; uiIndex++) {
pstClassifierRule = &pstClassifierTable->stActivePhsRulesList[uiIndex];
if (pstClassifierRule->bUsed) {
pstPhsRule = pstClassifierRule->pstPhsRule;
pstHostMibs->astPhsRulesTable[nPhsTableIndex].
ulSFID = Adapter->PackInfo[nSfIndex].ulSFID;
memcpy(&pstHostMibs->
astPhsRulesTable[nPhsTableIndex].u8PHSI,
&pstPhsRule->u8PHSI, sizeof(S_PHS_RULE));
nPhsTableIndex++;
}
}
}
/* Copy other Host Statistics parameters */
pstHostMibs->stHostInfo.GoodTransmits = Adapter->dev->stats.tx_packets;
pstHostMibs->stHostInfo.GoodReceives = Adapter->dev->stats.rx_packets;
pstHostMibs->stHostInfo.CurrNumFreeDesc = atomic_read(&Adapter->CurrNumFreeTxDesc);
pstHostMibs->stHostInfo.BEBucketSize = Adapter->BEBucketSize;
pstHostMibs->stHostInfo.rtPSBucketSize = Adapter->rtPSBucketSize;
pstHostMibs->stHostInfo.TimerActive = Adapter->TimerActive;
pstHostMibs->stHostInfo.u32TotalDSD = Adapter->u32TotalDSD;
memcpy(pstHostMibs->stHostInfo.aTxPktSizeHist, Adapter->aTxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
memcpy(pstHostMibs->stHostInfo.aRxPktSizeHist, Adapter->aRxPktSizeHist, sizeof(UINT32) * MIBS_MAX_HIST_ENTRIES);
return STATUS_SUCCESS;
}
VOID GetDroppedAppCntrlPktMibs(S_MIBS_HOST_STATS_MIBS *pstHostMibs, const PPER_TARANG_DATA pTarang)
{
memcpy(&(pstHostMibs->stDroppedAppCntrlMsgs),
&(pTarang->stDroppedAppCntrlMsgs),
sizeof(S_MIBS_DROPPED_APP_CNTRL_MESSAGES));
}
VOID CopyMIBSExtendedSFParameters(PMINI_ADAPTER Adapter, CServiceFlowParamSI *psfLocalSet, UINT uiSearchRuleIndex)
{
S_MIBS_EXTSERVICEFLOW_PARAMETERS *t = &Adapter->PackInfo[uiSearchRuleIndex].stMibsExtServiceFlowTable;
t->wmanIfSfid = psfLocalSet->u32SFID;
t->wmanIfCmnCpsMaxSustainedRate = psfLocalSet->u32MaxSustainedTrafficRate;
t->wmanIfCmnCpsMaxTrafficBurst = psfLocalSet->u32MaxTrafficBurst;
t->wmanIfCmnCpsMinReservedRate = psfLocalSet->u32MinReservedTrafficRate;
t->wmanIfCmnCpsToleratedJitter = psfLocalSet->u32ToleratedJitter;
t->wmanIfCmnCpsMaxLatency = psfLocalSet->u32MaximumLatency;
t->wmanIfCmnCpsFixedVsVariableSduInd = psfLocalSet->u8FixedLengthVSVariableLengthSDUIndicator;
t->wmanIfCmnCpsFixedVsVariableSduInd = ntohl(t->wmanIfCmnCpsFixedVsVariableSduInd);
t->wmanIfCmnCpsSduSize = psfLocalSet->u8SDUSize;
t->wmanIfCmnCpsSduSize = ntohl(t->wmanIfCmnCpsSduSize);
t->wmanIfCmnCpsSfSchedulingType = psfLocalSet->u8ServiceFlowSchedulingType;
t->wmanIfCmnCpsSfSchedulingType = ntohl(t->wmanIfCmnCpsSfSchedulingType);
t->wmanIfCmnCpsArqEnable = psfLocalSet->u8ARQEnable;
t->wmanIfCmnCpsArqEnable = ntohl(t->wmanIfCmnCpsArqEnable);
t->wmanIfCmnCpsArqWindowSize = ntohs(psfLocalSet->u16ARQWindowSize);
t->wmanIfCmnCpsArqWindowSize = ntohl(t->wmanIfCmnCpsArqWindowSize);
t->wmanIfCmnCpsArqBlockLifetime = ntohs(psfLocalSet->u16ARQBlockLifeTime);
t->wmanIfCmnCpsArqBlockLifetime = ntohl(t->wmanIfCmnCpsArqBlockLifetime);
t->wmanIfCmnCpsArqSyncLossTimeout = ntohs(psfLocalSet->u16ARQSyncLossTimeOut);
t->wmanIfCmnCpsArqSyncLossTimeout = ntohl(t->wmanIfCmnCpsArqSyncLossTimeout);
t->wmanIfCmnCpsArqDeliverInOrder = psfLocalSet->u8ARQDeliverInOrder;
t->wmanIfCmnCpsArqDeliverInOrder = ntohl(t->wmanIfCmnCpsArqDeliverInOrder);
t->wmanIfCmnCpsArqRxPurgeTimeout = ntohs(psfLocalSet->u16ARQRxPurgeTimeOut);
t->wmanIfCmnCpsArqRxPurgeTimeout = ntohl(t->wmanIfCmnCpsArqRxPurgeTimeout);
t->wmanIfCmnCpsArqBlockSize = ntohs(psfLocalSet->u16ARQBlockSize);
t->wmanIfCmnCpsArqBlockSize = ntohl(t->wmanIfCmnCpsArqBlockSize);
t->wmanIfCmnCpsReqTxPolicy = psfLocalSet->u8RequesttransmissionPolicy;
t->wmanIfCmnCpsReqTxPolicy = ntohl(t->wmanIfCmnCpsReqTxPolicy);
t->wmanIfCmnSfCsSpecification = psfLocalSet->u8CSSpecification;
t->wmanIfCmnSfCsSpecification = ntohl(t->wmanIfCmnSfCsSpecification);
t->wmanIfCmnCpsTargetSaid = ntohs(psfLocalSet->u16TargetSAID);
t->wmanIfCmnCpsTargetSaid = ntohl(t->wmanIfCmnCpsTargetSaid);
}
| gpl-2.0 |
l0rdg3x/AK-OnePlusOne-CAF | drivers/staging/line6/pod.c | 4947 | 37089 | /*
* Line6 Linux USB driver - 0.9.1beta
*
* Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#include <linux/slab.h>
#include <linux/wait.h>
#include <sound/control.h>
#include "audio.h"
#include "capture.h"
#include "control.h"
#include "driver.h"
#include "playback.h"
#include "pod.h"
#define POD_SYSEX_CODE 3
#define POD_BYTES_PER_FRAME 6 /* 24bit audio (stereo) */
/* *INDENT-OFF* */
enum {
POD_SYSEX_CLIP = 0x0f,
POD_SYSEX_SAVE = 0x24,
POD_SYSEX_SYSTEM = 0x56,
POD_SYSEX_SYSTEMREQ = 0x57,
/* POD_SYSEX_UPDATE = 0x6c, */ /* software update! */
POD_SYSEX_STORE = 0x71,
POD_SYSEX_FINISH = 0x72,
POD_SYSEX_DUMPMEM = 0x73,
POD_SYSEX_DUMP = 0x74,
POD_SYSEX_DUMPREQ = 0x75
/* POD_SYSEX_DUMPMEM2 = 0x76 */ /* dumps entire internal memory of PODxt Pro */
};
enum {
POD_monitor_level = 0x04,
POD_routing = 0x05,
POD_tuner_mute = 0x13,
POD_tuner_freq = 0x15,
POD_tuner_note = 0x16,
POD_tuner_pitch = 0x17,
POD_system_invalid = 0x10000
};
/* *INDENT-ON* */
enum {
POD_DUMP_MEMORY = 2
};
enum {
POD_BUSY_READ,
POD_BUSY_WRITE,
POD_CHANNEL_DIRTY,
POD_SAVE_PRESSED,
POD_BUSY_MIDISEND
};
static struct snd_ratden pod_ratden = {
.num_min = 78125,
.num_max = 78125,
.num_step = 1,
.den = 2
};
static struct line6_pcm_properties pod_pcm_properties = {
.snd_line6_playback_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_PAUSE |
#ifdef CONFIG_PM
SNDRV_PCM_INFO_RESUME |
#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
.rate_min = 39062,
.rate_max = 39063,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 60000,
.period_bytes_min = 64,
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
.snd_line6_capture_hw = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_BLOCK_TRANSFER |
SNDRV_PCM_INFO_MMAP_VALID |
#ifdef CONFIG_PM
SNDRV_PCM_INFO_RESUME |
#endif
SNDRV_PCM_INFO_SYNC_START),
.formats = SNDRV_PCM_FMTBIT_S24_3LE,
.rates = SNDRV_PCM_RATE_KNOT,
.rate_min = 39062,
.rate_max = 39063,
.channels_min = 2,
.channels_max = 2,
.buffer_bytes_max = 60000,
.period_bytes_min = 64,
.period_bytes_max = 8192,
.periods_min = 1,
.periods_max = 1024},
.snd_line6_rates = {
.nrats = 1,
.rats = &pod_ratden},
.bytes_per_frame = POD_BYTES_PER_FRAME
};
static const char pod_request_channel[] = {
0xf0, 0x00, 0x01, 0x0c, 0x03, 0x75, 0xf7
};
static const char pod_version_header[] = {
0xf2, 0x7e, 0x7f, 0x06, 0x02
};
/* forward declarations: */
static void pod_startup2(unsigned long data);
static void pod_startup3(struct usb_line6_pod *pod);
static void pod_startup4(struct usb_line6_pod *pod);
/*
Mark all parameters as dirty and notify waiting processes.
*/
static void pod_mark_batch_all_dirty(struct usb_line6_pod *pod)
{
int i;
for (i = 0; i < POD_CONTROL_SIZE; i++)
set_bit(i, pod->param_dirty);
}
static char *pod_alloc_sysex_buffer(struct usb_line6_pod *pod, int code,
int size)
{
return line6_alloc_sysex_buffer(&pod->line6, POD_SYSEX_CODE, code,
size);
}
/*
Send channel dump data to the PODxt Pro.
*/
static void pod_dump(struct usb_line6_pod *pod, const unsigned char *data)
{
int size = 1 + sizeof(pod->prog_data);
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_DUMP, size);
if (!sysex)
return;
/* Don't know what this is good for, but PODxt Pro transmits it, so we
* also do... */
sysex[SYSEX_DATA_OFS] = 5;
memcpy(sysex + SYSEX_DATA_OFS + 1, data, sizeof(pod->prog_data));
line6_send_sysex_message(&pod->line6, sysex, size);
memcpy(&pod->prog_data, data, sizeof(pod->prog_data));
pod_mark_batch_all_dirty(pod);
kfree(sysex);
}
/*
Store parameter value in driver memory and mark it as dirty.
*/
static void pod_store_parameter(struct usb_line6_pod *pod, int param, int value)
{
pod->prog_data.control[param] = value;
set_bit(param, pod->param_dirty);
pod->dirty = 1;
}
/*
Handle SAVE button.
*/
static void pod_save_button_pressed(struct usb_line6_pod *pod, int type,
int index)
{
pod->dirty = 0;
set_bit(POD_SAVE_PRESSED, &pod->atomic_flags);
}
/*
Process a completely received message.
*/
void line6_pod_process_message(struct usb_line6_pod *pod)
{
const unsigned char *buf = pod->line6.buffer_message;
/* filter messages by type */
switch (buf[0] & 0xf0) {
case LINE6_PARAM_CHANGE:
case LINE6_PROGRAM_CHANGE:
case LINE6_SYSEX_BEGIN:
break; /* handle these further down */
default:
return; /* ignore all others */
}
/* process all remaining messages */
switch (buf[0]) {
case LINE6_PARAM_CHANGE | LINE6_CHANNEL_DEVICE:
pod_store_parameter(pod, buf[1], buf[2]);
/* intentionally no break here! */
case LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST:
if ((buf[1] == POD_amp_model_setup) ||
(buf[1] == POD_effect_setup))
/* these also affect other settings */
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
break;
case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_DEVICE:
case LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST:
pod->channel_num = buf[1];
pod->dirty = 0;
set_bit(POD_CHANNEL_DIRTY, &pod->atomic_flags);
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
break;
case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_DEVICE:
case LINE6_SYSEX_BEGIN | LINE6_CHANNEL_UNKNOWN:
if (memcmp(buf + 1, line6_midi_id, sizeof(line6_midi_id)) == 0) {
switch (buf[5]) {
case POD_SYSEX_DUMP:
if (pod->line6.message_length ==
sizeof(pod->prog_data) + 7) {
switch (pod->dumpreq.in_progress) {
case LINE6_DUMP_CURRENT:
memcpy(&pod->prog_data, buf + 7,
sizeof(pod->prog_data));
pod_mark_batch_all_dirty(pod);
break;
case POD_DUMP_MEMORY:
memcpy(&pod->prog_data_buf,
buf + 7,
sizeof
(pod->prog_data_buf));
break;
default:
DEBUG_MESSAGES(dev_err
(pod->
line6.ifcdev,
"unknown dump code %02X\n",
pod->
dumpreq.in_progress));
}
line6_dump_finished(&pod->dumpreq);
pod_startup3(pod);
} else
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"wrong size of channel dump message (%d instead of %d)\n",
pod->
line6.message_length,
(int)
sizeof(pod->prog_data) +
7));
break;
case POD_SYSEX_SYSTEM:{
short value =
((int)buf[7] << 12) | ((int)buf[8]
<< 8) |
((int)buf[9] << 4) | (int)buf[10];
#define PROCESS_SYSTEM_PARAM(x) \
case POD_ ## x: \
pod->x.value = value; \
wake_up(&pod->x.wait); \
break;
switch (buf[6]) {
PROCESS_SYSTEM_PARAM
(monitor_level);
PROCESS_SYSTEM_PARAM(routing);
PROCESS_SYSTEM_PARAM
(tuner_mute);
PROCESS_SYSTEM_PARAM
(tuner_freq);
PROCESS_SYSTEM_PARAM
(tuner_note);
PROCESS_SYSTEM_PARAM
(tuner_pitch);
#undef PROCESS_SYSTEM_PARAM
default:
DEBUG_MESSAGES(dev_err
(pod->
line6.ifcdev,
"unknown tuner/system response %02X\n",
buf[6]));
}
break;
}
case POD_SYSEX_FINISH:
/* do we need to respond to this? */
break;
case POD_SYSEX_SAVE:
pod_save_button_pressed(pod, buf[6], buf[7]);
break;
case POD_SYSEX_CLIP:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"audio clipped\n"));
pod->clipping.value = 1;
wake_up(&pod->clipping.wait);
break;
case POD_SYSEX_STORE:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"message %02X not yet implemented\n",
buf[5]));
break;
default:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"unknown sysex message %02X\n",
buf[5]));
}
} else
if (memcmp
(buf, pod_version_header,
sizeof(pod_version_header)) == 0) {
pod->firmware_version =
buf[13] * 100 + buf[14] * 10 + buf[15];
pod->device_id =
((int)buf[8] << 16) | ((int)buf[9] << 8) | (int)
buf[10];
pod_startup4(pod);
} else
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"unknown sysex header\n"));
break;
case LINE6_SYSEX_END:
break;
default:
DEBUG_MESSAGES(dev_err
(pod->line6.ifcdev,
"POD: unknown message %02X\n", buf[0]));
}
}
/*
Detect some cases that require a channel dump after sending a command to the
device. Important notes:
*) The actual dump request can not be sent here since we are not allowed to
wait for the completion of the first message in this context, and sending
the dump request before completion of the previous message leaves the POD
in an undefined state. The dump request will be sent when the echoed
commands are received.
*) This method fails if a param change message is "chopped" after the first
byte.
*/
void line6_pod_midi_postprocess(struct usb_line6_pod *pod, unsigned char *data,
int length)
{
int i;
if (!pod->midi_postprocess)
return;
for (i = 0; i < length; ++i) {
if (data[i] == (LINE6_PROGRAM_CHANGE | LINE6_CHANNEL_HOST)) {
line6_invalidate_current(&pod->dumpreq);
break;
} else
if ((data[i] == (LINE6_PARAM_CHANGE | LINE6_CHANNEL_HOST))
&& (i < length - 1))
if ((data[i + 1] == POD_amp_model_setup)
|| (data[i + 1] == POD_effect_setup)) {
line6_invalidate_current(&pod->dumpreq);
break;
}
}
}
/*
Send channel number (i.e., switch to a different sound).
*/
static void pod_send_channel(struct usb_line6_pod *pod, int value)
{
line6_invalidate_current(&pod->dumpreq);
if (line6_send_program(&pod->line6, value) == 0)
pod->channel_num = value;
else
line6_dump_finished(&pod->dumpreq);
}
/*
Transmit PODxt Pro control parameter.
*/
void line6_pod_transmit_parameter(struct usb_line6_pod *pod, int param,
int value)
{
if (line6_transmit_parameter(&pod->line6, param, value) == 0)
pod_store_parameter(pod, param, value);
if ((param == POD_amp_model_setup) || (param == POD_effect_setup)) /* these also affect other settings */
line6_invalidate_current(&pod->dumpreq);
}
/*
Resolve value to memory location.
*/
static int pod_resolve(const char *buf, short block0, short block1,
unsigned char *location)
{
unsigned long value;
short block;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
block = (value < 0x40) ? block0 : block1;
value &= 0x3f;
location[0] = block >> 7;
location[1] = value | (block & 0x7f);
return 0;
}
/*
Send command to store channel/effects setup/amp setup to PODxt Pro.
*/
static ssize_t pod_send_store_command(struct device *dev, const char *buf,
size_t count, short block0, short block1)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int ret;
int size = 3 + sizeof(pod->prog_data_buf);
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_STORE, size);
if (!sysex)
return 0;
sysex[SYSEX_DATA_OFS] = 5; /* see pod_dump() */
ret = pod_resolve(buf, block0, block1, sysex + SYSEX_DATA_OFS + 1);
if (ret) {
kfree(sysex);
return ret;
}
memcpy(sysex + SYSEX_DATA_OFS + 3, &pod->prog_data_buf,
sizeof(pod->prog_data_buf));
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
/* needs some delay here on AMD64 platform */
return count;
}
/*
Send command to retrieve channel/effects setup/amp setup to PODxt Pro.
*/
static ssize_t pod_send_retrieve_command(struct device *dev, const char *buf,
size_t count, short block0,
short block1)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int ret;
int size = 4;
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_DUMPMEM, size);
if (!sysex)
return 0;
ret = pod_resolve(buf, block0, block1, sysex + SYSEX_DATA_OFS);
if (ret) {
kfree(sysex);
return ret;
}
sysex[SYSEX_DATA_OFS + 2] = 0;
sysex[SYSEX_DATA_OFS + 3] = 0;
line6_dump_started(&pod->dumpreq, POD_DUMP_MEMORY);
if (line6_send_sysex_message(&pod->line6, sysex, size) < size)
line6_dump_finished(&pod->dumpreq);
kfree(sysex);
/* needs some delay here on AMD64 platform */
return count;
}
/*
Generic get name function.
*/
static ssize_t get_name_generic(struct usb_line6_pod *pod, const char *str,
char *buf)
{
int length = 0;
const char *p1;
char *p2;
char *last_non_space = buf;
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
for (p1 = str, p2 = buf; *p1; ++p1, ++p2) {
*p2 = *p1;
if (*p2 != ' ')
last_non_space = p2;
if (++length == POD_NAME_LENGTH)
break;
}
*(last_non_space + 1) = '\n';
return last_non_space - buf + 2;
}
/*
"read" request on "channel" special file.
*/
static ssize_t pod_get_channel(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->channel_num);
}
/*
"write" request on "channel" special file.
*/
static ssize_t pod_set_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
pod_send_channel(pod, value);
return count;
}
/*
"read" request on "name" special file.
*/
static ssize_t pod_get_name(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return get_name_generic(pod, pod->prog_data.header + POD_NAME_OFFSET,
buf);
}
/*
"read" request on "name" special file.
*/
static ssize_t pod_get_name_buf(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return get_name_generic(pod,
pod->prog_data_buf.header + POD_NAME_OFFSET,
buf);
}
/*
"read" request on "dump" special file.
*/
static ssize_t pod_get_dump(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
memcpy(buf, &pod->prog_data, sizeof(pod->prog_data));
return sizeof(pod->prog_data);
}
/*
"write" request on "dump" special file.
*/
static ssize_t pod_set_dump(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (count != sizeof(pod->prog_data)) {
dev_err(pod->line6.ifcdev,
"data block must be exactly %d bytes\n",
(int)sizeof(pod->prog_data));
return -EINVAL;
}
pod_dump(pod, buf);
return sizeof(pod->prog_data);
}
/*
Identify system parameters related to the tuner.
*/
static bool pod_is_tuner(int code)
{
return
(code == POD_tuner_mute) ||
(code == POD_tuner_freq) ||
(code == POD_tuner_note) || (code == POD_tuner_pitch);
}
/*
Get system parameter (as integer).
@param tuner non-zero, if code refers to a tuner parameter
*/
static int pod_get_system_param_int(struct usb_line6_pod *pod, int *value,
int code, struct ValueWait *param, int sign)
{
char *sysex;
static const int size = 1;
int retval = 0;
if (((pod->prog_data.control[POD_tuner] & 0x40) == 0)
&& pod_is_tuner(code))
return -ENODEV;
/* send value request to device: */
param->value = POD_system_invalid;
sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_SYSTEMREQ, size);
if (!sysex)
return -ENOMEM;
sysex[SYSEX_DATA_OFS] = code;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
/* wait for device to respond: */
retval =
wait_event_interruptible(param->wait,
param->value != POD_system_invalid);
if (retval < 0)
return retval;
*value = sign ? (int)(signed short)param->value : (int)(unsigned short)
param->value;
if (*value == POD_system_invalid)
*value = 0; /* don't report uninitialized values */
return 0;
}
/*
Get system parameter (as string).
@param tuner non-zero, if code refers to a tuner parameter
*/
static ssize_t pod_get_system_param_string(struct usb_line6_pod *pod, char *buf,
int code, struct ValueWait *param,
int sign)
{
int retval, value = 0;
retval = pod_get_system_param_int(pod, &value, code, param, sign);
if (retval < 0)
return retval;
return sprintf(buf, "%d\n", value);
}
/*
Send system parameter (from integer).
@param tuner non-zero, if code refers to a tuner parameter
*/
static int pod_set_system_param_int(struct usb_line6_pod *pod, int value,
int code)
{
char *sysex;
static const int size = 5;
if (((pod->prog_data.control[POD_tuner] & 0x40) == 0)
&& pod_is_tuner(code))
return -EINVAL;
/* send value to tuner: */
sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_SYSTEM, size);
if (!sysex)
return -ENOMEM;
sysex[SYSEX_DATA_OFS] = code;
sysex[SYSEX_DATA_OFS + 1] = (value >> 12) & 0x0f;
sysex[SYSEX_DATA_OFS + 2] = (value >> 8) & 0x0f;
sysex[SYSEX_DATA_OFS + 3] = (value >> 4) & 0x0f;
sysex[SYSEX_DATA_OFS + 4] = (value) & 0x0f;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
return 0;
}
/*
Send system parameter (from string).
@param tuner non-zero, if code refers to a tuner parameter
*/
static ssize_t pod_set_system_param_string(struct usb_line6_pod *pod,
const char *buf, int count, int code,
unsigned short mask)
{
int retval;
unsigned short value = simple_strtoul(buf, NULL, 10) & mask;
retval = pod_set_system_param_int(pod, value, code);
return (retval < 0) ? retval : count;
}
/*
"read" request on "dump_buf" special file.
*/
static ssize_t pod_get_dump_buf(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int retval = line6_dump_wait_interruptible(&pod->dumpreq);
if (retval < 0)
return retval;
memcpy(buf, &pod->prog_data_buf, sizeof(pod->prog_data_buf));
return sizeof(pod->prog_data_buf);
}
/*
"write" request on "dump_buf" special file.
*/
static ssize_t pod_set_dump_buf(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (count != sizeof(pod->prog_data)) {
dev_err(pod->line6.ifcdev,
"data block must be exactly %d bytes\n",
(int)sizeof(pod->prog_data));
return -EINVAL;
}
memcpy(&pod->prog_data_buf, buf, sizeof(pod->prog_data));
return sizeof(pod->prog_data);
}
/*
"write" request on "finish" special file.
*/
static ssize_t pod_set_finish(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
int size = 0;
char *sysex = pod_alloc_sysex_buffer(pod, POD_SYSEX_FINISH, size);
if (!sysex)
return 0;
line6_send_sysex_message(&pod->line6, sysex, size);
kfree(sysex);
return count;
}
/*
"write" request on "store_channel" special file.
*/
static ssize_t pod_set_store_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0000, 0x00c0);
}
/*
"write" request on "store_effects_setup" special file.
*/
static ssize_t pod_set_store_effects_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0080, 0x0080);
}
/*
"write" request on "store_amp_setup" special file.
*/
static ssize_t pod_set_store_amp_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_store_command(dev, buf, count, 0x0040, 0x0100);
}
/*
"write" request on "retrieve_channel" special file.
*/
static ssize_t pod_set_retrieve_channel(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0000, 0x00c0);
}
/*
"write" request on "retrieve_effects_setup" special file.
*/
static ssize_t pod_set_retrieve_effects_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0080, 0x0080);
}
/*
"write" request on "retrieve_amp_setup" special file.
*/
static ssize_t pod_set_retrieve_amp_setup(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return pod_send_retrieve_command(dev, buf, count, 0x0040, 0x0100);
}
/*
"read" request on "dirty" special file.
*/
static ssize_t pod_get_dirty(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
buf[0] = pod->dirty ? '1' : '0';
buf[1] = '\n';
return 2;
}
/*
"read" request on "midi_postprocess" special file.
*/
static ssize_t pod_get_midi_postprocess(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->midi_postprocess);
}
/*
"write" request on "midi_postprocess" special file.
*/
static ssize_t pod_set_midi_postprocess(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
unsigned long value;
int ret;
ret = strict_strtoul(buf, 10, &value);
if (ret)
return ret;
pod->midi_postprocess = value ? 1 : 0;
return count;
}
/*
"read" request on "serial_number" special file.
*/
static ssize_t pod_get_serial_number(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->serial_number);
}
/*
"read" request on "firmware_version" special file.
*/
static ssize_t pod_get_firmware_version(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d.%02d\n", pod->firmware_version / 100,
pod->firmware_version % 100);
}
/*
"read" request on "device_id" special file.
*/
static ssize_t pod_get_device_id(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return sprintf(buf, "%d\n", pod->device_id);
}
/*
"read" request on "clip" special file.
*/
static ssize_t pod_wait_for_clip(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct usb_interface *interface = to_usb_interface(dev);
struct usb_line6_pod *pod = usb_get_intfdata(interface);
return wait_event_interruptible(pod->clipping.wait,
pod->clipping.value != 0);
}
/*
POD startup procedure.
This is a sequence of functions with special requirements (e.g., must
not run immediately after initialization, must not run in interrupt
context). After the last one has finished, the device is ready to use.
*/
static void pod_startup1(struct usb_line6_pod *pod)
{
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_INIT);
/* delay startup procedure: */
line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2,
(unsigned long)pod);
}
static void pod_startup2(unsigned long data)
{
struct usb_line6_pod *pod = (struct usb_line6_pod *)data;
/* schedule another startup procedure until startup is complete: */
if (pod->startup_progress >= POD_STARTUP_LAST)
return;
pod->startup_progress = POD_STARTUP_DUMPREQ;
line6_start_timer(&pod->startup_timer, POD_STARTUP_DELAY, pod_startup2,
(unsigned long)pod);
/* current channel dump: */
line6_dump_request_async(&pod->dumpreq, &pod->line6, 0,
LINE6_DUMP_CURRENT);
}
static void pod_startup3(struct usb_line6_pod *pod)
{
struct usb_line6 *line6 = &pod->line6;
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_VERSIONREQ);
/* request firmware version: */
line6_version_request_async(line6);
}
static void pod_startup4(struct usb_line6_pod *pod)
{
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_WORKQUEUE);
/* schedule work for global work queue: */
schedule_work(&pod->startup_work);
}
static void pod_startup5(struct work_struct *work)
{
struct usb_line6_pod *pod =
container_of(work, struct usb_line6_pod, startup_work);
struct usb_line6 *line6 = &pod->line6;
CHECK_STARTUP_PROGRESS(pod->startup_progress, POD_STARTUP_SETUP);
/* serial number: */
line6_read_serial_number(&pod->line6, &pod->serial_number);
/* ALSA audio interface: */
line6_register_audio(line6);
/* device files: */
line6_pod_create_files(pod->firmware_version,
line6->properties->device_bit, line6->ifcdev);
}
#define POD_GET_SYSTEM_PARAM(code, sign) \
static ssize_t pod_get_ ## code(struct device *dev, \
struct device_attribute *attr, char *buf) \
{ \
struct usb_interface *interface = to_usb_interface(dev); \
struct usb_line6_pod *pod = usb_get_intfdata(interface); \
return pod_get_system_param_string(pod, buf, POD_ ## code, \
&pod->code, sign); \
}
#define POD_GET_SET_SYSTEM_PARAM(code, mask, sign) \
POD_GET_SYSTEM_PARAM(code, sign) \
static ssize_t pod_set_ ## code(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct usb_interface *interface = to_usb_interface(dev); \
struct usb_line6_pod *pod = usb_get_intfdata(interface); \
return pod_set_system_param_string(pod, buf, count, POD_ ## code, mask); \
}
POD_GET_SET_SYSTEM_PARAM(monitor_level, 0xffff, 0);
POD_GET_SET_SYSTEM_PARAM(routing, 0x0003, 0);
POD_GET_SET_SYSTEM_PARAM(tuner_mute, 0x0001, 0);
POD_GET_SET_SYSTEM_PARAM(tuner_freq, 0xffff, 0);
POD_GET_SYSTEM_PARAM(tuner_note, 1);
POD_GET_SYSTEM_PARAM(tuner_pitch, 1);
#undef GET_SET_SYSTEM_PARAM
#undef GET_SYSTEM_PARAM
/* POD special files: */
static DEVICE_ATTR(channel, S_IWUSR | S_IRUGO, pod_get_channel,
pod_set_channel);
static DEVICE_ATTR(clip, S_IRUGO, pod_wait_for_clip, line6_nop_write);
static DEVICE_ATTR(device_id, S_IRUGO, pod_get_device_id, line6_nop_write);
static DEVICE_ATTR(dirty, S_IRUGO, pod_get_dirty, line6_nop_write);
static DEVICE_ATTR(dump, S_IWUSR | S_IRUGO, pod_get_dump, pod_set_dump);
static DEVICE_ATTR(dump_buf, S_IWUSR | S_IRUGO, pod_get_dump_buf,
pod_set_dump_buf);
static DEVICE_ATTR(finish, S_IWUSR, line6_nop_read, pod_set_finish);
static DEVICE_ATTR(firmware_version, S_IRUGO, pod_get_firmware_version,
line6_nop_write);
static DEVICE_ATTR(midi_postprocess, S_IWUSR | S_IRUGO,
pod_get_midi_postprocess, pod_set_midi_postprocess);
static DEVICE_ATTR(monitor_level, S_IWUSR | S_IRUGO, pod_get_monitor_level,
pod_set_monitor_level);
static DEVICE_ATTR(name, S_IRUGO, pod_get_name, line6_nop_write);
static DEVICE_ATTR(name_buf, S_IRUGO, pod_get_name_buf, line6_nop_write);
static DEVICE_ATTR(retrieve_amp_setup, S_IWUSR, line6_nop_read,
pod_set_retrieve_amp_setup);
static DEVICE_ATTR(retrieve_channel, S_IWUSR, line6_nop_read,
pod_set_retrieve_channel);
static DEVICE_ATTR(retrieve_effects_setup, S_IWUSR, line6_nop_read,
pod_set_retrieve_effects_setup);
static DEVICE_ATTR(routing, S_IWUSR | S_IRUGO, pod_get_routing,
pod_set_routing);
static DEVICE_ATTR(serial_number, S_IRUGO, pod_get_serial_number,
line6_nop_write);
static DEVICE_ATTR(store_amp_setup, S_IWUSR, line6_nop_read,
pod_set_store_amp_setup);
static DEVICE_ATTR(store_channel, S_IWUSR, line6_nop_read,
pod_set_store_channel);
static DEVICE_ATTR(store_effects_setup, S_IWUSR, line6_nop_read,
pod_set_store_effects_setup);
static DEVICE_ATTR(tuner_freq, S_IWUSR | S_IRUGO, pod_get_tuner_freq,
pod_set_tuner_freq);
static DEVICE_ATTR(tuner_mute, S_IWUSR | S_IRUGO, pod_get_tuner_mute,
pod_set_tuner_mute);
static DEVICE_ATTR(tuner_note, S_IRUGO, pod_get_tuner_note, line6_nop_write);
static DEVICE_ATTR(tuner_pitch, S_IRUGO, pod_get_tuner_pitch, line6_nop_write);
#ifdef CONFIG_LINE6_USB_RAW
static DEVICE_ATTR(raw, S_IWUSR, line6_nop_read, line6_set_raw);
#endif
/* control info callback */
static int snd_pod_control_monitor_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 65535;
return 0;
}
/* control get callback */
static int snd_pod_control_monitor_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_pod *pod = (struct usb_line6_pod *)line6pcm->line6;
ucontrol->value.integer.value[0] = pod->monitor_level.value;
return 0;
}
/* control put callback */
static int snd_pod_control_monitor_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_line6_pcm *line6pcm = snd_kcontrol_chip(kcontrol);
struct usb_line6_pod *pod = (struct usb_line6_pod *)line6pcm->line6;
if (ucontrol->value.integer.value[0] == pod->monitor_level.value)
return 0;
pod->monitor_level.value = ucontrol->value.integer.value[0];
pod_set_system_param_int(pod, ucontrol->value.integer.value[0],
POD_monitor_level);
return 1;
}
/* control definition */
static struct snd_kcontrol_new pod_control_monitor = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Monitor Playback Volume",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.info = snd_pod_control_monitor_info,
.get = snd_pod_control_monitor_get,
.put = snd_pod_control_monitor_put
};
/*
POD destructor.
*/
static void pod_destruct(struct usb_interface *interface)
{
struct usb_line6_pod *pod = usb_get_intfdata(interface);
if (pod == NULL)
return;
line6_cleanup_audio(&pod->line6);
del_timer(&pod->startup_timer);
cancel_work_sync(&pod->startup_work);
/* free dump request data: */
line6_dumpreq_destruct(&pod->dumpreq);
}
/*
Create sysfs entries.
*/
static int pod_create_files2(struct device *dev)
{
int err;
CHECK_RETURN(device_create_file(dev, &dev_attr_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_clip));
CHECK_RETURN(device_create_file(dev, &dev_attr_device_id));
CHECK_RETURN(device_create_file(dev, &dev_attr_dirty));
CHECK_RETURN(device_create_file(dev, &dev_attr_dump));
CHECK_RETURN(device_create_file(dev, &dev_attr_dump_buf));
CHECK_RETURN(device_create_file(dev, &dev_attr_finish));
CHECK_RETURN(device_create_file(dev, &dev_attr_firmware_version));
CHECK_RETURN(device_create_file(dev, &dev_attr_midi_postprocess));
CHECK_RETURN(device_create_file(dev, &dev_attr_monitor_level));
CHECK_RETURN(device_create_file(dev, &dev_attr_name));
CHECK_RETURN(device_create_file(dev, &dev_attr_name_buf));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_amp_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_retrieve_effects_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_routing));
CHECK_RETURN(device_create_file(dev, &dev_attr_serial_number));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_amp_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_channel));
CHECK_RETURN(device_create_file(dev, &dev_attr_store_effects_setup));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_freq));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_mute));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_note));
CHECK_RETURN(device_create_file(dev, &dev_attr_tuner_pitch));
#ifdef CONFIG_LINE6_USB_RAW
CHECK_RETURN(device_create_file(dev, &dev_attr_raw));
#endif
return 0;
}
/*
Try to init POD device.
*/
static int pod_try_init(struct usb_interface *interface,
struct usb_line6_pod *pod)
{
int err;
struct usb_line6 *line6 = &pod->line6;
init_timer(&pod->startup_timer);
INIT_WORK(&pod->startup_work, pod_startup5);
if ((interface == NULL) || (pod == NULL))
return -ENODEV;
pod->channel_num = 255;
/* initialize wait queues: */
init_waitqueue_head(&pod->monitor_level.wait);
init_waitqueue_head(&pod->routing.wait);
init_waitqueue_head(&pod->tuner_mute.wait);
init_waitqueue_head(&pod->tuner_freq.wait);
init_waitqueue_head(&pod->tuner_note.wait);
init_waitqueue_head(&pod->tuner_pitch.wait);
init_waitqueue_head(&pod->clipping.wait);
memset(pod->param_dirty, 0xff, sizeof(pod->param_dirty));
/* initialize USB buffers: */
err = line6_dumpreq_init(&pod->dumpreq, pod_request_channel,
sizeof(pod_request_channel));
if (err < 0) {
dev_err(&interface->dev, "Out of memory\n");
return -ENOMEM;
}
/* create sysfs entries: */
err = pod_create_files2(&interface->dev);
if (err < 0)
return err;
/* initialize audio system: */
err = line6_init_audio(line6);
if (err < 0)
return err;
/* initialize MIDI subsystem: */
err = line6_init_midi(line6);
if (err < 0)
return err;
/* initialize PCM subsystem: */
err = line6_init_pcm(line6, &pod_pcm_properties);
if (err < 0)
return err;
/* register monitor control: */
err = snd_ctl_add(line6->card,
snd_ctl_new1(&pod_control_monitor, line6->line6pcm));
if (err < 0)
return err;
/*
When the sound card is registered at this point, the PODxt Live
displays "Invalid Code Error 07", so we do it later in the event
handler.
*/
if (pod->line6.properties->capabilities & LINE6_BIT_CONTROL) {
pod->monitor_level.value = POD_system_invalid;
/* initiate startup procedure: */
pod_startup1(pod);
}
return 0;
}
/*
Init POD device (and clean up in case of failure).
*/
int line6_pod_init(struct usb_interface *interface, struct usb_line6_pod *pod)
{
int err = pod_try_init(interface, pod);
if (err < 0)
pod_destruct(interface);
return err;
}
/*
POD device disconnected.
*/
void line6_pod_disconnect(struct usb_interface *interface)
{
struct usb_line6_pod *pod;
if (interface == NULL)
return;
pod = usb_get_intfdata(interface);
if (pod != NULL) {
struct snd_line6_pcm *line6pcm = pod->line6.line6pcm;
struct device *dev = &interface->dev;
if (line6pcm != NULL)
line6_pcm_disconnect(line6pcm);
if (dev != NULL) {
/* remove sysfs entries: */
line6_pod_remove_files(pod->firmware_version,
pod->line6.
properties->device_bit, dev);
device_remove_file(dev, &dev_attr_channel);
device_remove_file(dev, &dev_attr_clip);
device_remove_file(dev, &dev_attr_device_id);
device_remove_file(dev, &dev_attr_dirty);
device_remove_file(dev, &dev_attr_dump);
device_remove_file(dev, &dev_attr_dump_buf);
device_remove_file(dev, &dev_attr_finish);
device_remove_file(dev, &dev_attr_firmware_version);
device_remove_file(dev, &dev_attr_midi_postprocess);
device_remove_file(dev, &dev_attr_monitor_level);
device_remove_file(dev, &dev_attr_name);
device_remove_file(dev, &dev_attr_name_buf);
device_remove_file(dev, &dev_attr_retrieve_amp_setup);
device_remove_file(dev, &dev_attr_retrieve_channel);
device_remove_file(dev,
&dev_attr_retrieve_effects_setup);
device_remove_file(dev, &dev_attr_routing);
device_remove_file(dev, &dev_attr_serial_number);
device_remove_file(dev, &dev_attr_store_amp_setup);
device_remove_file(dev, &dev_attr_store_channel);
device_remove_file(dev, &dev_attr_store_effects_setup);
device_remove_file(dev, &dev_attr_tuner_freq);
device_remove_file(dev, &dev_attr_tuner_mute);
device_remove_file(dev, &dev_attr_tuner_note);
device_remove_file(dev, &dev_attr_tuner_pitch);
#ifdef CONFIG_LINE6_USB_RAW
device_remove_file(dev, &dev_attr_raw);
#endif
}
}
pod_destruct(interface);
}
| gpl-2.0 |
draekko/android_kernel_lg_hammerhead-neobuddy89 | drivers/input/touchscreen/cy8c_tmg_ts.c | 4947 | 11299 | /* drivers/input/touchscreen/cy8c_tmg_ts.c
*
* Copyright (C) 2007-2008 HTC Corporation.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/cy8c_tmg_ts.h>
#include <linux/delay.h>
#include <linux/earlysuspend.h>
#include <linux/hrtimer.h>
#include <linux/i2c.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#define CY8C_REG_START_NEW_SCAN 0x0F
#define CY8C_REG_INTR_STATUS 0x3C
#define CY8C_REG_VERSION 0x3E
struct cy8c_ts_data {
struct i2c_client *client;
struct input_dev *input_dev;
int use_irq;
struct hrtimer timer;
struct work_struct work;
uint16_t version;
int (*power) (int on);
struct early_suspend early_suspend;
};
struct workqueue_struct *cypress_touch_wq;
#ifdef CONFIG_HAS_EARLYSUSPEND
static void cy8c_ts_early_suspend(struct early_suspend *h);
static void cy8c_ts_late_resume(struct early_suspend *h);
#endif
uint16_t sample_count, X_mean, Y_mean, first_touch;
static s32 cy8c_read_word_data(struct i2c_client *client,
u8 command, uint16_t * data)
{
s32 ret = i2c_smbus_read_word_data(client, command);
if (ret != -1) {
*data = (u16) ((ret << 8) | (ret >> 8));
}
return ret;
}
static int cy8c_init_panel(struct cy8c_ts_data *ts)
{
int ret;
sample_count = X_mean = Y_mean = first_touch = 0;
/* clean intr busy */
ret = i2c_smbus_write_byte_data(ts->client, CY8C_REG_INTR_STATUS,
0x00);
if (ret < 0) {
dev_err(&ts->client->dev,
"cy8c_init_panel failed for clean intr busy\n");
goto exit;
}
/* start new scan */
ret = i2c_smbus_write_byte_data(ts->client, CY8C_REG_START_NEW_SCAN,
0x01);
if (ret < 0) {
dev_err(&ts->client->dev,
"cy8c_init_panel failed for start new scan\n");
goto exit;
}
exit:
return ret;
}
static void cy8c_ts_reset(struct i2c_client *client)
{
struct cy8c_ts_data *ts = i2c_get_clientdata(client);
if (ts->power) {
ts->power(0);
msleep(10);
ts->power(1);
msleep(10);
}
cy8c_init_panel(ts);
}
static void cy8c_ts_work_func(struct work_struct *work)
{
struct cy8c_ts_data *ts = container_of(work, struct cy8c_ts_data, work);
uint16_t x1, y1, x2, y2;
uint8_t is_touch, start_reg, force, area, finger2_pressed;
uint8_t buf[11];
struct i2c_msg msg[2];
int ret = 0;
x2 = y2 = 0;
/*printk("%s: enter\n",__func__);*/
is_touch = i2c_smbus_read_byte_data(ts->client, 0x20);
dev_dbg(&ts->client->dev, "fIsTouch %d,\n", is_touch);
if (is_touch < 0 || is_touch > 3) {
pr_err("%s: invalid is_touch = %d\n", __func__, is_touch);
cy8c_ts_reset(ts->client);
msleep(10);
goto done;
}
msg[0].addr = ts->client->addr;
msg[0].flags = 0;
msg[0].len = 1;
start_reg = 0x16;
msg[0].buf = &start_reg;
msg[1].addr = ts->client->addr;
msg[1].flags = I2C_M_RD;
msg[1].len = sizeof(buf);
msg[1].buf = buf;
ret = i2c_transfer(ts->client->adapter, msg, 2);
if (ret < 0)
goto done;
/* parse data */
force = buf[0];
area = buf[1];
x1 = (buf[2] << 8) | buf[3];
y1 = (buf[6] << 8) | buf[7];
is_touch = buf[10];
if (is_touch == 2) {
x2 = (buf[4] << 8) | buf[5];
y2 = (buf[8] << 8) | buf[9];
finger2_pressed = 1;
}
dev_dbg(&ts->client->dev,
"bFingerForce %d, bFingerArea %d \n", force, area);
dev_dbg(&ts->client->dev, "x1: %d, y1: %d \n", x1, y1);
if (finger2_pressed)
dev_dbg(&ts->client->dev, "x2: %d, y2: %d \n", x2, y2);
/* drop the first one? */
if ((is_touch == 1) && (first_touch == 0)) {
first_touch = 1;
goto done;
}
if (!first_touch)
goto done;
if (is_touch == 2)
finger2_pressed = 1;
input_report_abs(ts->input_dev, ABS_X, x1);
input_report_abs(ts->input_dev, ABS_Y, y1);
input_report_abs(ts->input_dev, ABS_PRESSURE, force);
input_report_abs(ts->input_dev, ABS_TOOL_WIDTH, area);
input_report_key(ts->input_dev, BTN_TOUCH, is_touch);
input_report_key(ts->input_dev, BTN_2, finger2_pressed);
if (finger2_pressed) {
input_report_abs(ts->input_dev, ABS_HAT0X, x2);
input_report_abs(ts->input_dev, ABS_HAT0Y, y2);
}
input_sync(ts->input_dev);
done:
if (is_touch == 0)
first_touch = sample_count = 0;
/* prepare for next intr */
i2c_smbus_write_byte_data(ts->client, CY8C_REG_INTR_STATUS, 0x00);
if (!ts->use_irq)
hrtimer_start(&ts->timer, ktime_set(0, 12500000), HRTIMER_MODE_REL);
else
enable_irq(ts->client->irq);
}
static enum hrtimer_restart cy8c_ts_timer_func(struct hrtimer *timer)
{
struct cy8c_ts_data *ts;
ts = container_of(timer, struct cy8c_ts_data, timer);
queue_work(cypress_touch_wq, &ts->work);
return HRTIMER_NORESTART;
}
static irqreturn_t cy8c_ts_irq_handler(int irq, void *dev_id)
{
struct cy8c_ts_data *ts = dev_id;
disable_irq_nosync(ts->client->irq);
queue_work(cypress_touch_wq, &ts->work);
return IRQ_HANDLED;
}
static int cy8c_ts_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cy8c_ts_data *ts;
struct cy8c_i2c_platform_data *pdata;
uint16_t panel_version;
int ret = 0;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_err(&client->dev, "need I2C_FUNC_I2C\n");
ret = -ENODEV;
goto err_check_functionality_failed;
}
ts = kzalloc(sizeof(struct cy8c_ts_data), GFP_KERNEL);
if (ts == NULL) {
dev_err(&client->dev, "allocate cy8c_ts_data failed\n");
ret = -ENOMEM;
goto err_alloc_data_failed;
}
INIT_WORK(&ts->work, cy8c_ts_work_func);
ts->client = client;
i2c_set_clientdata(client, ts);
pdata = client->dev.platform_data;
if (pdata) {
ts->version = pdata->version;
ts->power = pdata->power;
}
if (ts->power) {
ret = ts->power(1);
msleep(10);
if (ret < 0) {
dev_err(&client->dev, "power on failed\n");
goto err_power_failed;
}
}
ret = cy8c_read_word_data(ts->client, CY8C_REG_VERSION, &panel_version);
if (ret < 0) {
dev_err(&client->dev, "init panel failed\n");
goto err_detect_failed;
}
dev_info(&client->dev, "Panel Version %04X\n", panel_version);
if (pdata) {
while (pdata->version > panel_version) {
dev_info(&client->dev, "old tp detected, "
"panel version = %x\n", panel_version);
pdata++;
}
}
ret = cy8c_init_panel(ts);
if (ret < 0) {
dev_err(&client->dev, "init panel failed\n");
goto err_detect_failed;
}
ts->input_dev = input_allocate_device();
if (ts->input_dev == NULL) {
ret = -ENOMEM;
dev_err(&client->dev, "Failed to allocate input device\n");
goto err_input_dev_alloc_failed;
}
ts->input_dev->name = "cy8c-touchscreen";
set_bit(EV_SYN, ts->input_dev->evbit);
set_bit(EV_ABS, ts->input_dev->evbit);
set_bit(EV_KEY, ts->input_dev->evbit);
input_set_capability(ts->input_dev, EV_KEY, BTN_TOUCH);
input_set_capability(ts->input_dev, EV_KEY, BTN_2);
input_set_abs_params(ts->input_dev, ABS_X,
pdata->abs_x_min, pdata->abs_x_max, 5, 0);
input_set_abs_params(ts->input_dev, ABS_Y,
pdata->abs_y_min, pdata->abs_y_max, 5, 0);
input_set_abs_params(ts->input_dev, ABS_HAT0X,
pdata->abs_x_min, pdata->abs_x_max, 0, 0);
input_set_abs_params(ts->input_dev, ABS_HAT0Y,
pdata->abs_y_min, pdata->abs_y_max, 0, 0);
input_set_abs_params(ts->input_dev, ABS_PRESSURE,
pdata->abs_pressure_min, pdata->abs_pressure_max,
0, 0);
input_set_abs_params(ts->input_dev, ABS_TOOL_WIDTH,
pdata->abs_width_min, pdata->abs_width_max, 0, 0);
ret = input_register_device(ts->input_dev);
if (ret) {
dev_err(&client->dev,
"cy8c_ts_probe: Unable to register %s input device\n",
ts->input_dev->name);
goto err_input_register_device_failed;
}
if (client->irq) {
ret = request_irq(client->irq, cy8c_ts_irq_handler,
IRQF_TRIGGER_LOW, CYPRESS_TMG_NAME, ts);
if (ret == 0)
ts->use_irq = 1;
else
dev_err(&client->dev, "request_irq failed\n");
}
if (!ts->use_irq) {
hrtimer_init(&ts->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ts->timer.function = cy8c_ts_timer_func;
hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
}
#ifdef CONFIG_HAS_EARLYSUSPEND
ts->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 1;
ts->early_suspend.suspend = cy8c_ts_early_suspend;
ts->early_suspend.resume = cy8c_ts_late_resume;
register_early_suspend(&ts->early_suspend);
#endif
dev_info(&client->dev, "Start touchscreen %s in %s mode\n",
ts->input_dev->name, (ts->use_irq ? "interrupt" : "polling"));
return 0;
err_input_register_device_failed:
input_free_device(ts->input_dev);
err_input_dev_alloc_failed:
if (ts->power)
ts->power(0);
err_detect_failed:
err_power_failed:
kfree(ts);
err_alloc_data_failed:
err_check_functionality_failed:
return ret;
}
static int cy8c_ts_remove(struct i2c_client *client)
{
struct cy8c_ts_data *ts = i2c_get_clientdata(client);
unregister_early_suspend(&ts->early_suspend);
if (ts->use_irq)
free_irq(client->irq, ts);
else
hrtimer_cancel(&ts->timer);
input_unregister_device(ts->input_dev);
kfree(ts);
return 0;
}
static int cy8c_ts_suspend(struct i2c_client *client, pm_message_t mesg)
{
struct cy8c_ts_data *ts = i2c_get_clientdata(client);
int ret;
if (ts->use_irq)
disable_irq_nosync(client->irq);
else
hrtimer_cancel(&ts->timer);
ret = cancel_work_sync(&ts->work);
if (ret && ts->use_irq)
enable_irq(client->irq);
if (ts->power)
ts->power(0);
return 0;
}
static int cy8c_ts_resume(struct i2c_client *client)
{
int ret;
struct cy8c_ts_data *ts = i2c_get_clientdata(client);
if (ts->power) {
ret = ts->power(1);
if (ret < 0)
dev_err(&client->dev,
"cy8c_ts_resume power on failed\n");
msleep(10);
cy8c_init_panel(ts);
}
if (ts->use_irq)
enable_irq(client->irq);
else
hrtimer_start(&ts->timer, ktime_set(1, 0), HRTIMER_MODE_REL);
return 0;
}
#ifdef CONFIG_HAS_EARLYSUSPEND
static void cy8c_ts_early_suspend(struct early_suspend *h)
{
struct cy8c_ts_data *ts;
ts = container_of(h, struct cy8c_ts_data, early_suspend);
cy8c_ts_suspend(ts->client, PMSG_SUSPEND);
}
static void cy8c_ts_late_resume(struct early_suspend *h)
{
struct cy8c_ts_data *ts;
ts = container_of(h, struct cy8c_ts_data, early_suspend);
cy8c_ts_resume(ts->client);
}
#endif
static const struct i2c_device_id cy8c_ts_i2c_id[] = {
{CYPRESS_TMG_NAME, 0},
{}
};
static struct i2c_driver cy8c_ts_driver = {
.id_table = cy8c_ts_i2c_id,
.probe = cy8c_ts_probe,
.remove = cy8c_ts_remove,
#ifndef CONFIG_HAS_EARLYSUSPEND
.suspend = cy8c_ts_suspend,
.resume = cy8c_ts_resume,
#endif
.driver = {
.name = CYPRESS_TMG_NAME,
.owner = THIS_MODULE,
},
};
static int __devinit cy8c_ts_init(void)
{
cypress_touch_wq = create_singlethread_workqueue("cypress_touch_wq");
if (!cypress_touch_wq)
return -ENOMEM;
return i2c_add_driver(&cy8c_ts_driver);
}
static void __exit cy8c_ts_exit(void)
{
if (cypress_touch_wq)
destroy_workqueue(cypress_touch_wq);
i2c_del_driver(&cy8c_ts_driver);
}
module_init(cy8c_ts_init);
module_exit(cy8c_ts_exit);
MODULE_DESCRIPTION("Cypress TMG Touchscreen Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
hariimurti/kernel_armani | drivers/isdn/sc/event.c | 7507 | 1733 | /* $Id: event.c,v 1.4.8.1 2001/09/23 22:24:59 kai Exp $
*
* Copyright (C) 1996 SpellCaster Telecommunications Inc.
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For more information, please contact gpl-info@spellcast.com or write:
*
* SpellCaster Telecommunications Inc.
* 5621 Finch Avenue East, Unit #3
* Scarborough, Ontario Canada
* M1B 2T9
* +1 (416) 297-8565
* +1 (416) 297-6433 Facsimile
*/
#include "includes.h"
#include "hardware.h"
#include "message.h"
#include "card.h"
#ifdef DEBUG
static char *events[] = { "ISDN_STAT_STAVAIL",
"ISDN_STAT_ICALL",
"ISDN_STAT_RUN",
"ISDN_STAT_STOP",
"ISDN_STAT_DCONN",
"ISDN_STAT_BCONN",
"ISDN_STAT_DHUP",
"ISDN_STAT_BHUP",
"ISDN_STAT_CINF",
"ISDN_STAT_LOAD",
"ISDN_STAT_UNLOAD",
"ISDN_STAT_BSENT",
"ISDN_STAT_NODCH",
"ISDN_STAT_ADDCH",
"ISDN_STAT_CAUSE" };
#endif
int indicate_status(int card, int event, ulong Channel, char *Data)
{
isdn_ctrl cmd;
#ifdef DEBUG
pr_debug("%s: Indicating event %s on Channel %d\n",
sc_adapter[card]->devicename, events[event - 256], Channel);
#endif
if (Data != NULL) {
pr_debug("%s: Event data: %s\n", sc_adapter[card]->devicename,
Data);
switch (event) {
case ISDN_STAT_BSENT:
memcpy(&cmd.parm.length, Data, sizeof(cmd.parm.length));
break;
case ISDN_STAT_ICALL:
memcpy(&cmd.parm.setup, Data, sizeof(cmd.parm.setup));
break;
default:
strcpy(cmd.parm.num, Data);
}
}
cmd.command = event;
cmd.driver = sc_adapter[card]->driverId;
cmd.arg = Channel;
return sc_adapter[card]->card->statcallb(&cmd);
}
| gpl-2.0 |
felipeizzo/sprat | drivers/net/wireless/b43/leds.c | 9555 | 9271 | /*
Broadcom B43 wireless driver
LED control
Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>,
Copyright (c) 2005 Stefano Brivio <stefano.brivio@polimi.it>
Copyright (c) 2005-2007 Michael Buesch <m@bues.ch>
Copyright (c) 2005 Danny van Dyk <kugelfang@gentoo.org>
Copyright (c) 2005 Andreas Jaggi <andreas.jaggi@waterwave.ch>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include "b43.h"
#include "leds.h"
#include "rfkill.h"
static void b43_led_turn_on(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl &= ~(1 << led_index);
else
ctl |= (1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_turn_off(struct b43_wldev *dev, u8 led_index,
bool activelow)
{
u16 ctl;
ctl = b43_read16(dev, B43_MMIO_GPIO_CONTROL);
if (activelow)
ctl |= (1 << led_index);
else
ctl &= ~(1 << led_index);
b43_write16(dev, B43_MMIO_GPIO_CONTROL, ctl);
}
static void b43_led_update(struct b43_wldev *dev,
struct b43_led *led)
{
bool radio_enabled;
bool turn_on;
if (!led->wl)
return;
radio_enabled = (dev->phy.radio_on && dev->radio_hw_enable);
/* The led->state read is racy, but we don't care. In case we raced
* with the brightness_set handler, we will be called again soon
* to fixup our state. */
if (radio_enabled)
turn_on = atomic_read(&led->state) != LED_OFF;
else
turn_on = false;
if (turn_on == led->hw_state)
return;
led->hw_state = turn_on;
if (turn_on)
b43_led_turn_on(dev, led->index, led->activelow);
else
b43_led_turn_off(dev, led->index, led->activelow);
}
static void b43_leds_work(struct work_struct *work)
{
struct b43_leds *leds = container_of(work, struct b43_leds, work);
struct b43_wl *wl = container_of(leds, struct b43_wl, leds);
struct b43_wldev *dev;
mutex_lock(&wl->mutex);
dev = wl->current_dev;
if (unlikely(!dev || b43_status(dev) < B43_STAT_STARTED))
goto out_unlock;
b43_led_update(dev, &wl->leds.led_tx);
b43_led_update(dev, &wl->leds.led_rx);
b43_led_update(dev, &wl->leds.led_radio);
b43_led_update(dev, &wl->leds.led_assoc);
out_unlock:
mutex_unlock(&wl->mutex);
}
/* Callback from the LED subsystem. */
static void b43_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct b43_led *led = container_of(led_dev, struct b43_led, led_dev);
struct b43_wl *wl = led->wl;
if (likely(!wl->leds.stop)) {
atomic_set(&led->state, brightness);
ieee80211_queue_work(wl->hw, &wl->leds.work);
}
}
static int b43_register_led(struct b43_wldev *dev, struct b43_led *led,
const char *name, const char *default_trigger,
u8 led_index, bool activelow)
{
int err;
if (led->wl)
return -EEXIST;
if (!default_trigger)
return -EINVAL;
led->wl = dev->wl;
led->index = led_index;
led->activelow = activelow;
strncpy(led->name, name, sizeof(led->name));
atomic_set(&led->state, 0);
led->led_dev.name = led->name;
led->led_dev.default_trigger = default_trigger;
led->led_dev.brightness_set = b43_led_brightness_set;
err = led_classdev_register(dev->dev->dev, &led->led_dev);
if (err) {
b43warn(dev->wl, "LEDs: Failed to register %s\n", name);
led->wl = NULL;
return err;
}
return 0;
}
static void b43_unregister_led(struct b43_led *led)
{
if (!led->wl)
return;
led_classdev_unregister(&led->led_dev);
led->wl = NULL;
}
static void b43_map_led(struct b43_wldev *dev,
u8 led_index,
enum b43_led_behaviour behaviour,
bool activelow)
{
struct ieee80211_hw *hw = dev->wl->hw;
char name[B43_LED_MAX_NAME_LEN + 1];
/* Map the b43 specific LED behaviour value to the
* generic LED triggers. */
switch (behaviour) {
case B43_LED_INACTIVE:
case B43_LED_OFF:
case B43_LED_ON:
break;
case B43_LED_ACTIVITY:
case B43_LED_TRANSFER:
case B43_LED_APTRANSFER:
snprintf(name, sizeof(name),
"b43-%s::tx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_tx, name,
ieee80211_get_tx_led_name(hw),
led_index, activelow);
snprintf(name, sizeof(name),
"b43-%s::rx", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_rx, name,
ieee80211_get_rx_led_name(hw),
led_index, activelow);
break;
case B43_LED_RADIO_ALL:
case B43_LED_RADIO_A:
case B43_LED_RADIO_B:
case B43_LED_MODE_BG:
snprintf(name, sizeof(name),
"b43-%s::radio", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_radio, name,
ieee80211_get_radio_led_name(hw),
led_index, activelow);
break;
case B43_LED_WEIRD:
case B43_LED_ASSOC:
snprintf(name, sizeof(name),
"b43-%s::assoc", wiphy_name(hw->wiphy));
b43_register_led(dev, &dev->wl->leds.led_assoc, name,
ieee80211_get_assoc_led_name(hw),
led_index, activelow);
break;
default:
b43warn(dev->wl, "LEDs: Unknown behaviour 0x%02X\n",
behaviour);
break;
}
}
static void b43_led_get_sprominfo(struct b43_wldev *dev,
unsigned int led_index,
enum b43_led_behaviour *behaviour,
bool *activelow)
{
u8 sprom[4];
sprom[0] = dev->dev->bus_sprom->gpio0;
sprom[1] = dev->dev->bus_sprom->gpio1;
sprom[2] = dev->dev->bus_sprom->gpio2;
sprom[3] = dev->dev->bus_sprom->gpio3;
if (sprom[led_index] == 0xFF) {
/* There is no LED information in the SPROM
* for this LED. Hardcode it here. */
*activelow = false;
switch (led_index) {
case 0:
*behaviour = B43_LED_ACTIVITY;
*activelow = true;
if (dev->dev->board_vendor == PCI_VENDOR_ID_COMPAQ)
*behaviour = B43_LED_RADIO_ALL;
break;
case 1:
*behaviour = B43_LED_RADIO_B;
if (dev->dev->board_vendor == PCI_VENDOR_ID_ASUSTEK)
*behaviour = B43_LED_ASSOC;
break;
case 2:
*behaviour = B43_LED_RADIO_A;
break;
case 3:
*behaviour = B43_LED_OFF;
break;
default:
*behaviour = B43_LED_OFF;
B43_WARN_ON(1);
return;
}
} else {
*behaviour = sprom[led_index] & B43_LED_BEHAVIOUR;
*activelow = !!(sprom[led_index] & B43_LED_ACTIVELOW);
}
}
void b43_leds_init(struct b43_wldev *dev)
{
struct b43_led *led;
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
/* Sync the RF-kill LED state (if we have one) with radio and switch states. */
led = &dev->wl->leds.led_radio;
if (led->wl) {
if (dev->phy.radio_on && b43_is_hw_radio_enabled(dev)) {
b43_led_turn_on(dev, led->index, led->activelow);
led->hw_state = true;
atomic_set(&led->state, 1);
} else {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
}
/* Initialize TX/RX/ASSOC leds */
led = &dev->wl->leds.led_tx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_rx;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
led = &dev->wl->leds.led_assoc;
if (led->wl) {
b43_led_turn_off(dev, led->index, led->activelow);
led->hw_state = false;
atomic_set(&led->state, 0);
}
/* Initialize other LED states. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
switch (behaviour) {
case B43_LED_OFF:
b43_led_turn_off(dev, i, activelow);
break;
case B43_LED_ON:
b43_led_turn_on(dev, i, activelow);
break;
default:
/* Leave others as-is. */
break;
}
}
dev->wl->leds.stop = 0;
}
void b43_leds_exit(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
b43_led_turn_off(dev, leds->led_tx.index, leds->led_tx.activelow);
b43_led_turn_off(dev, leds->led_rx.index, leds->led_rx.activelow);
b43_led_turn_off(dev, leds->led_assoc.index, leds->led_assoc.activelow);
b43_led_turn_off(dev, leds->led_radio.index, leds->led_radio.activelow);
}
void b43_leds_stop(struct b43_wldev *dev)
{
struct b43_leds *leds = &dev->wl->leds;
leds->stop = 1;
cancel_work_sync(&leds->work);
}
void b43_leds_register(struct b43_wldev *dev)
{
unsigned int i;
enum b43_led_behaviour behaviour;
bool activelow;
INIT_WORK(&dev->wl->leds.work, b43_leds_work);
/* Register the LEDs to the LED subsystem. */
for (i = 0; i < B43_MAX_NR_LEDS; i++) {
b43_led_get_sprominfo(dev, i, &behaviour, &activelow);
b43_map_led(dev, i, behaviour, activelow);
}
}
void b43_leds_unregister(struct b43_wl *wl)
{
struct b43_leds *leds = &wl->leds;
b43_unregister_led(&leds->led_tx);
b43_unregister_led(&leds->led_rx);
b43_unregister_led(&leds->led_assoc);
b43_unregister_led(&leds->led_radio);
}
| gpl-2.0 |
nikhiljan93/sony_yuga_kernel | sound/pci/ice1712/hoontech.c | 10067 | 11152 | /*
* ALSA driver for ICEnsemble ICE1712 (Envy24)
*
* Lowlevel functions for Hoontech STDSP24
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include "ice1712.h"
#include "hoontech.h"
/* Hoontech-specific setting */
struct hoontech_spec {
unsigned char boxbits[4];
unsigned int config;
unsigned short boxconfig[4];
};
static void __devinit snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, unsigned char byte)
{
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte &= ~ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
}
static void __devinit snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MUTE(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_INSEL(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
/* prepare for write */
if (chn == 3)
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
if (chn == 3) {
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
} else {
switch (chn) {
case 0: ICE1712_STDSP24_1_CHN1(spec->boxbits, 0); break;
case 1: ICE1712_STDSP24_1_CHN2(spec->boxbits, 0); break;
case 2: ICE1712_STDSP24_1_CHN3(spec->boxbits, 0); break;
}
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
}
udelay(100);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, master);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
udelay(100);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mdelay(10);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static int __devinit snd_ice1712_hoontech_init(struct snd_ice1712 *ice)
{
struct hoontech_spec *spec;
int box, chn;
ice->num_total_dacs = 8;
ice->num_total_adcs = 8;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
ice->spec = spec;
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 0);
ICE1712_STDSP24_CLOCK(spec->boxbits, 0, 1);
ICE1712_STDSP24_0_BOX(spec->boxbits, 0);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 1);
ICE1712_STDSP24_CLOCK(spec->boxbits, 1, 1);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 2);
ICE1712_STDSP24_CLOCK(spec->boxbits, 2, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 3);
ICE1712_STDSP24_CLOCK(spec->boxbits, 3, 1);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, 0);
ICE1712_STDSP24_3_MUTE(spec->boxbits, 1);
ICE1712_STDSP24_3_INSEL(spec->boxbits, 0);
/* let's go - activate only functions in first box */
spec->config = 0;
/* ICE1712_STDSP24_MUTE |
ICE1712_STDSP24_INSEL |
ICE1712_STDSP24_DAREAR; */
/* These boxconfigs have caused problems in the past.
* The code is not optimal, but should now enable a working config to
* be achieved.
* ** MIDI IN can only be configured on one box **
* ICE1712_STDSP24_BOX_MIDI1 needs to be set for that box.
* Tests on a ADAC2000 box suggest the box config flags do not
* work as would be expected, and the inputs are crossed.
* Setting ICE1712_STDSP24_BOX_MIDI1 and ICE1712_STDSP24_BOX_MIDI2
* on the same box connects MIDI-In to both 401 uarts; both outputs
* are then active on all boxes.
* The default config here sets up everything on the first box.
* Alan Horstmann 5.2.2008
*/
spec->boxconfig[0] = ICE1712_STDSP24_BOX_CHN1 |
ICE1712_STDSP24_BOX_CHN2 |
ICE1712_STDSP24_BOX_CHN3 |
ICE1712_STDSP24_BOX_CHN4 |
ICE1712_STDSP24_BOX_MIDI1 |
ICE1712_STDSP24_BOX_MIDI2;
spec->boxconfig[1] =
spec->boxconfig[2] =
spec->boxconfig[3] = 0;
snd_ice1712_stdsp24_darear(ice,
(spec->config & ICE1712_STDSP24_DAREAR) ? 1 : 0);
snd_ice1712_stdsp24_mute(ice,
(spec->config & ICE1712_STDSP24_MUTE) ? 1 : 0);
snd_ice1712_stdsp24_insel(ice,
(spec->config & ICE1712_STDSP24_INSEL) ? 1 : 0);
for (box = 0; box < 4; box++) {
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI2)
snd_ice1712_stdsp24_midi2(ice, 1);
for (chn = 0; chn < 4; chn++)
snd_ice1712_stdsp24_box_channel(ice, box, chn,
(spec->boxconfig[box] & (1 << chn)) ? 1 : 0);
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI1)
snd_ice1712_stdsp24_box_midi(ice, box, 1);
}
return 0;
}
/*
* AK4524 access
*/
/* start callback for STDSP24 with modified hardware */
static void stdsp24_ak4524_lock(struct snd_akm4xxx *ak, int chip)
{
struct snd_ice1712 *ice = ak->private_data[0];
unsigned char tmp;
snd_ice1712_save_gpio_status(ice);
tmp = ICE1712_STDSP24_SERIAL_DATA |
ICE1712_STDSP24_SERIAL_CLOCK |
ICE1712_STDSP24_AK4524_CS;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION,
ice->gpio.direction | tmp);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ~tmp);
}
static int __devinit snd_ice1712_value_init(struct snd_ice1712 *ice)
{
/* Hoontech STDSP24 with modified hardware */
static struct snd_akm4xxx akm_stdsp24_mv __devinitdata = {
.num_adcs = 2,
.num_dacs = 2,
.type = SND_AK4524,
.ops = {
.lock = stdsp24_ak4524_lock
}
};
static struct snd_ak4xxx_private akm_stdsp24_mv_priv __devinitdata = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_STDSP24_SERIAL_DATA,
.clk_mask = ICE1712_STDSP24_SERIAL_CLOCK,
.cs_mask = ICE1712_STDSP24_AK4524_CS,
.cs_addr = ICE1712_STDSP24_AK4524_CS,
.cs_none = 0,
.add_flags = 0,
};
int err;
struct snd_akm4xxx *ak;
/* set the analog DACs */
ice->num_total_dacs = 2;
/* set the analog ADCs */
ice->num_total_adcs = 2;
/* analog section */
ak = ice->akm = kmalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
if (! ak)
return -ENOMEM;
ice->akm_codecs = 1;
err = snd_ice1712_akm4xxx_init(ak, &akm_stdsp24_mv, &akm_stdsp24_mv_priv, ice);
if (err < 0)
return err;
/* ak4524 controls */
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
return 0;
}
static int __devinit snd_ice1712_ez8_init(struct snd_ice1712 *ice)
{
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ice->eeprom.gpiomask);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION, ice->eeprom.gpiodir);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, ice->eeprom.gpiostate);
return 0;
}
/* entry point */
struct snd_ice1712_card_info snd_ice1712_hoontech_cards[] __devinitdata = {
{
.subvendor = ICE1712_SUBDEVICE_STDSP24,
.name = "Hoontech SoundTrack Audio DSP24",
.model = "dsp24",
.chip_init = snd_ice1712_hoontech_init,
.mpu401_1_name = "MIDI-1 Hoontech/STA DSP24",
.mpu401_2_name = "MIDI-2 Hoontech/STA DSP24",
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_VALUE, /* a dummy id */
.name = "Hoontech SoundTrack Audio DSP24 Value",
.model = "dsp24_value",
.chip_init = snd_ice1712_value_init,
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_MEDIA7_1,
.name = "Hoontech STA DSP24 Media 7.1",
.model = "dsp24_71",
.chip_init = snd_ice1712_hoontech_init,
},
{
.subvendor = ICE1712_SUBDEVICE_EVENT_EZ8, /* a dummy id */
.name = "Event Electronics EZ8",
.model = "ez8",
.chip_init = snd_ice1712_ez8_init,
},
{ } /* terminator */
};
| gpl-2.0 |
FrancescoCG/CrazySuperKernel-TW-MM-KLTE | sound/pci/ice1712/hoontech.c | 10067 | 11152 | /*
* ALSA driver for ICEnsemble ICE1712 (Envy24)
*
* Lowlevel functions for Hoontech STDSP24
*
* Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <sound/core.h>
#include "ice1712.h"
#include "hoontech.h"
/* Hoontech-specific setting */
struct hoontech_spec {
unsigned char boxbits[4];
unsigned int config;
unsigned short boxconfig[4];
};
static void __devinit snd_ice1712_stdsp24_gpio_write(struct snd_ice1712 *ice, unsigned char byte)
{
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte &= ~ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
byte |= ICE1712_STDSP24_CLOCK_BIT;
udelay(100);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, byte);
}
static void __devinit snd_ice1712_stdsp24_darear(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_mute(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MUTE(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_insel(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_INSEL(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
/* prepare for write */
if (chn == 3)
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
if (chn == 3) {
ICE1712_STDSP24_2_CHN4(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
} else {
switch (chn) {
case 0: ICE1712_STDSP24_1_CHN1(spec->boxbits, 0); break;
case 1: ICE1712_STDSP24_1_CHN2(spec->boxbits, 0); break;
case 2: ICE1712_STDSP24_1_CHN3(spec->boxbits, 0); break;
}
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
}
udelay(100);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[1]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
udelay(100);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
/* select box */
ICE1712_STDSP24_0_BOX(spec->boxbits, box);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[0]);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, master);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
udelay(100);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 0);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mdelay(10);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[2]);
mutex_unlock(&ice->gpio_mutex);
}
static void __devinit snd_ice1712_stdsp24_midi2(struct snd_ice1712 *ice, int activate)
{
struct hoontech_spec *spec = ice->spec;
mutex_lock(&ice->gpio_mutex);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, activate);
snd_ice1712_stdsp24_gpio_write(ice, spec->boxbits[3]);
mutex_unlock(&ice->gpio_mutex);
}
static int __devinit snd_ice1712_hoontech_init(struct snd_ice1712 *ice)
{
struct hoontech_spec *spec;
int box, chn;
ice->num_total_dacs = 8;
ice->num_total_adcs = 8;
spec = kzalloc(sizeof(*spec), GFP_KERNEL);
if (!spec)
return -ENOMEM;
ice->spec = spec;
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 0);
ICE1712_STDSP24_CLOCK(spec->boxbits, 0, 1);
ICE1712_STDSP24_0_BOX(spec->boxbits, 0);
ICE1712_STDSP24_0_DAREAR(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 1);
ICE1712_STDSP24_CLOCK(spec->boxbits, 1, 1);
ICE1712_STDSP24_1_CHN1(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN2(spec->boxbits, 1);
ICE1712_STDSP24_1_CHN3(spec->boxbits, 1);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 2);
ICE1712_STDSP24_CLOCK(spec->boxbits, 2, 1);
ICE1712_STDSP24_2_CHN4(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDIIN(spec->boxbits, 1);
ICE1712_STDSP24_2_MIDI1(spec->boxbits, 0);
ICE1712_STDSP24_SET_ADDR(spec->boxbits, 3);
ICE1712_STDSP24_CLOCK(spec->boxbits, 3, 1);
ICE1712_STDSP24_3_MIDI2(spec->boxbits, 0);
ICE1712_STDSP24_3_MUTE(spec->boxbits, 1);
ICE1712_STDSP24_3_INSEL(spec->boxbits, 0);
/* let's go - activate only functions in first box */
spec->config = 0;
/* ICE1712_STDSP24_MUTE |
ICE1712_STDSP24_INSEL |
ICE1712_STDSP24_DAREAR; */
/* These boxconfigs have caused problems in the past.
* The code is not optimal, but should now enable a working config to
* be achieved.
* ** MIDI IN can only be configured on one box **
* ICE1712_STDSP24_BOX_MIDI1 needs to be set for that box.
* Tests on a ADAC2000 box suggest the box config flags do not
* work as would be expected, and the inputs are crossed.
* Setting ICE1712_STDSP24_BOX_MIDI1 and ICE1712_STDSP24_BOX_MIDI2
* on the same box connects MIDI-In to both 401 uarts; both outputs
* are then active on all boxes.
* The default config here sets up everything on the first box.
* Alan Horstmann 5.2.2008
*/
spec->boxconfig[0] = ICE1712_STDSP24_BOX_CHN1 |
ICE1712_STDSP24_BOX_CHN2 |
ICE1712_STDSP24_BOX_CHN3 |
ICE1712_STDSP24_BOX_CHN4 |
ICE1712_STDSP24_BOX_MIDI1 |
ICE1712_STDSP24_BOX_MIDI2;
spec->boxconfig[1] =
spec->boxconfig[2] =
spec->boxconfig[3] = 0;
snd_ice1712_stdsp24_darear(ice,
(spec->config & ICE1712_STDSP24_DAREAR) ? 1 : 0);
snd_ice1712_stdsp24_mute(ice,
(spec->config & ICE1712_STDSP24_MUTE) ? 1 : 0);
snd_ice1712_stdsp24_insel(ice,
(spec->config & ICE1712_STDSP24_INSEL) ? 1 : 0);
for (box = 0; box < 4; box++) {
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI2)
snd_ice1712_stdsp24_midi2(ice, 1);
for (chn = 0; chn < 4; chn++)
snd_ice1712_stdsp24_box_channel(ice, box, chn,
(spec->boxconfig[box] & (1 << chn)) ? 1 : 0);
if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI1)
snd_ice1712_stdsp24_box_midi(ice, box, 1);
}
return 0;
}
/*
* AK4524 access
*/
/* start callback for STDSP24 with modified hardware */
static void stdsp24_ak4524_lock(struct snd_akm4xxx *ak, int chip)
{
struct snd_ice1712 *ice = ak->private_data[0];
unsigned char tmp;
snd_ice1712_save_gpio_status(ice);
tmp = ICE1712_STDSP24_SERIAL_DATA |
ICE1712_STDSP24_SERIAL_CLOCK |
ICE1712_STDSP24_AK4524_CS;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION,
ice->gpio.direction | tmp);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ~tmp);
}
static int __devinit snd_ice1712_value_init(struct snd_ice1712 *ice)
{
/* Hoontech STDSP24 with modified hardware */
static struct snd_akm4xxx akm_stdsp24_mv __devinitdata = {
.num_adcs = 2,
.num_dacs = 2,
.type = SND_AK4524,
.ops = {
.lock = stdsp24_ak4524_lock
}
};
static struct snd_ak4xxx_private akm_stdsp24_mv_priv __devinitdata = {
.caddr = 2,
.cif = 1, /* CIF high */
.data_mask = ICE1712_STDSP24_SERIAL_DATA,
.clk_mask = ICE1712_STDSP24_SERIAL_CLOCK,
.cs_mask = ICE1712_STDSP24_AK4524_CS,
.cs_addr = ICE1712_STDSP24_AK4524_CS,
.cs_none = 0,
.add_flags = 0,
};
int err;
struct snd_akm4xxx *ak;
/* set the analog DACs */
ice->num_total_dacs = 2;
/* set the analog ADCs */
ice->num_total_adcs = 2;
/* analog section */
ak = ice->akm = kmalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL);
if (! ak)
return -ENOMEM;
ice->akm_codecs = 1;
err = snd_ice1712_akm4xxx_init(ak, &akm_stdsp24_mv, &akm_stdsp24_mv_priv, ice);
if (err < 0)
return err;
/* ak4524 controls */
err = snd_ice1712_akm4xxx_build_controls(ice);
if (err < 0)
return err;
return 0;
}
static int __devinit snd_ice1712_ez8_init(struct snd_ice1712 *ice)
{
ice->gpio.write_mask = ice->eeprom.gpiomask;
ice->gpio.direction = ice->eeprom.gpiodir;
snd_ice1712_write(ice, ICE1712_IREG_GPIO_WRITE_MASK, ice->eeprom.gpiomask);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DIRECTION, ice->eeprom.gpiodir);
snd_ice1712_write(ice, ICE1712_IREG_GPIO_DATA, ice->eeprom.gpiostate);
return 0;
}
/* entry point */
struct snd_ice1712_card_info snd_ice1712_hoontech_cards[] __devinitdata = {
{
.subvendor = ICE1712_SUBDEVICE_STDSP24,
.name = "Hoontech SoundTrack Audio DSP24",
.model = "dsp24",
.chip_init = snd_ice1712_hoontech_init,
.mpu401_1_name = "MIDI-1 Hoontech/STA DSP24",
.mpu401_2_name = "MIDI-2 Hoontech/STA DSP24",
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_VALUE, /* a dummy id */
.name = "Hoontech SoundTrack Audio DSP24 Value",
.model = "dsp24_value",
.chip_init = snd_ice1712_value_init,
},
{
.subvendor = ICE1712_SUBDEVICE_STDSP24_MEDIA7_1,
.name = "Hoontech STA DSP24 Media 7.1",
.model = "dsp24_71",
.chip_init = snd_ice1712_hoontech_init,
},
{
.subvendor = ICE1712_SUBDEVICE_EVENT_EZ8, /* a dummy id */
.name = "Event Electronics EZ8",
.model = "ez8",
.chip_init = snd_ice1712_ez8_init,
},
{ } /* terminator */
};
| gpl-2.0 |
manashmndl/CHIP-linux | drivers/video/console/fbcon_rotate.c | 13907 | 2559 | /*
* linux/drivers/video/console/fbcon_rotate.c -- Software Rotation
*
* Copyright (C) 2005 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
#include "fbcon_rotate.h"
static int fbcon_rotate_font(struct fb_info *info, struct vc_data *vc)
{
struct fbcon_ops *ops = info->fbcon_par;
int len, err = 0;
int s_cellsize, d_cellsize, i;
const u8 *src;
u8 *dst;
if (vc->vc_font.data == ops->fontdata &&
ops->p->con_rotate == ops->cur_rotate)
goto finished;
src = ops->fontdata = vc->vc_font.data;
ops->cur_rotate = ops->p->con_rotate;
len = (!ops->p->userfont) ? 256 : FNTCHARCNT(src);
s_cellsize = ((vc->vc_font.width + 7)/8) *
vc->vc_font.height;
d_cellsize = s_cellsize;
if (ops->rotate == FB_ROTATE_CW ||
ops->rotate == FB_ROTATE_CCW)
d_cellsize = ((vc->vc_font.height + 7)/8) *
vc->vc_font.width;
if (info->fbops->fb_sync)
info->fbops->fb_sync(info);
if (ops->fd_size < d_cellsize * len) {
dst = kmalloc(d_cellsize * len, GFP_KERNEL);
if (dst == NULL) {
err = -ENOMEM;
goto finished;
}
ops->fd_size = d_cellsize * len;
kfree(ops->fontbuffer);
ops->fontbuffer = dst;
}
dst = ops->fontbuffer;
memset(dst, 0, ops->fd_size);
switch (ops->rotate) {
case FB_ROTATE_UD:
for (i = len; i--; ) {
rotate_ud(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
case FB_ROTATE_CW:
for (i = len; i--; ) {
rotate_cw(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
case FB_ROTATE_CCW:
for (i = len; i--; ) {
rotate_ccw(src, dst, vc->vc_font.width,
vc->vc_font.height);
src += s_cellsize;
dst += d_cellsize;
}
break;
}
finished:
return err;
}
void fbcon_set_rotate(struct fbcon_ops *ops)
{
ops->rotate_font = fbcon_rotate_font;
switch(ops->rotate) {
case FB_ROTATE_CW:
fbcon_rotate_cw(ops);
break;
case FB_ROTATE_UD:
fbcon_rotate_ud(ops);
break;
case FB_ROTATE_CCW:
fbcon_rotate_ccw(ops);
break;
}
}
EXPORT_SYMBOL(fbcon_set_rotate);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Console Rotation Support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
keyodi/ti-omap-encore-kernel3 | drivers/pci/remove.c | 2132 | 3564 | #include <linux/pci.h>
#include <linux/module.h>
#include <linux/pci-aspm.h>
#include "pci.h"
static void pci_free_resources(struct pci_dev *dev)
{
int i;
msi_remove_pci_irq_vectors(dev);
pci_cleanup_rom(dev);
for (i = 0; i < PCI_NUM_RESOURCES; i++) {
struct resource *res = dev->resource + i;
if (res->parent)
release_resource(res);
}
}
static void pci_stop_dev(struct pci_dev *dev)
{
if (dev->is_added) {
pci_proc_detach_device(dev);
pci_remove_sysfs_dev_files(dev);
device_unregister(&dev->dev);
dev->is_added = 0;
}
if (dev->bus->self)
pcie_aspm_exit_link_state(dev);
}
static void pci_destroy_dev(struct pci_dev *dev)
{
/* Remove the device from the device lists, and prevent any further
* list accesses from this device */
down_write(&pci_bus_sem);
list_del(&dev->bus_list);
dev->bus_list.next = dev->bus_list.prev = NULL;
up_write(&pci_bus_sem);
pci_free_resources(dev);
pci_dev_put(dev);
}
/**
* pci_remove_device_safe - remove an unused hotplug device
* @dev: the device to remove
*
* Delete the device structure from the device lists and
* notify userspace (/sbin/hotplug), but only if the device
* in question is not being used by a driver.
* Returns 0 on success.
*/
#if 0
int pci_remove_device_safe(struct pci_dev *dev)
{
if (pci_dev_driver(dev))
return -EBUSY;
pci_destroy_dev(dev);
return 0;
}
#endif /* 0 */
void pci_remove_bus(struct pci_bus *pci_bus)
{
pci_proc_detach_bus(pci_bus);
down_write(&pci_bus_sem);
list_del(&pci_bus->node);
up_write(&pci_bus_sem);
if (!pci_bus->is_added)
return;
pci_remove_legacy_files(pci_bus);
device_unregister(&pci_bus->dev);
}
EXPORT_SYMBOL(pci_remove_bus);
/**
* pci_remove_bus_device - remove a PCI device and any children
* @dev: the device to remove
*
* Remove a PCI device from the device lists, informing the drivers
* that the device has been removed. We also remove any subordinate
* buses and children in a depth-first manner.
*
* For each device we remove, delete the device structure from the
* device lists, remove the /proc entry, and notify userspace
* (/sbin/hotplug).
*/
void pci_remove_bus_device(struct pci_dev *dev)
{
pci_stop_bus_device(dev);
if (dev->subordinate) {
struct pci_bus *b = dev->subordinate;
pci_remove_behind_bridge(dev);
pci_remove_bus(b);
dev->subordinate = NULL;
}
pci_destroy_dev(dev);
}
/**
* pci_remove_behind_bridge - remove all devices behind a PCI bridge
* @dev: PCI bridge device
*
* Remove all devices on the bus, except for the parent bridge.
* This also removes any child buses, and any devices they may
* contain in a depth-first manner.
*/
void pci_remove_behind_bridge(struct pci_dev *dev)
{
struct list_head *l, *n;
if (dev->subordinate)
list_for_each_safe(l, n, &dev->subordinate->devices)
pci_remove_bus_device(pci_dev_b(l));
}
static void pci_stop_bus_devices(struct pci_bus *bus)
{
struct list_head *l, *n;
list_for_each_safe(l, n, &bus->devices) {
struct pci_dev *dev = pci_dev_b(l);
pci_stop_bus_device(dev);
}
}
/**
* pci_stop_bus_device - stop a PCI device and any children
* @dev: the device to stop
*
* Stop a PCI device (detach the driver, remove from the global list
* and so on). This also stop any subordinate buses and children in a
* depth-first manner.
*/
void pci_stop_bus_device(struct pci_dev *dev)
{
if (dev->subordinate)
pci_stop_bus_devices(dev->subordinate);
pci_stop_dev(dev);
}
EXPORT_SYMBOL(pci_remove_bus_device);
EXPORT_SYMBOL(pci_remove_behind_bridge);
EXPORT_SYMBOL_GPL(pci_stop_bus_device);
| gpl-2.0 |
iAMr00t/android_kernel_huawei_msm8916 | drivers/staging/csr/csr_wifi_sme_sef.c | 2388 | 4123 | /*****************************************************************************
(c) Cambridge Silicon Radio Limited 2010
Confidential information of CSR
Refer to LICENSE.txt included with this source for details
on the license terms.
*****************************************************************************/
#include "csr_wifi_sme_sef.h"
const CsrWifiSmeStateHandlerType CsrWifiSmeUpstreamStateHandlers[CSR_WIFI_SME_PRIM_UPSTREAM_COUNT] =
{
/* 0x8000 */ CsrWifiSmeActivateCfmHandler,
/* 0x8001 */ CsrWifiSmeAdhocConfigGetCfmHandler,
/* 0x8002 */ CsrWifiSmeAdhocConfigSetCfmHandler,
/* 0x8003 */ CsrWifiSmeAssociationCompleteIndHandler,
/* 0x8004 */ CsrWifiSmeAssociationStartIndHandler,
/* 0x8005 */ CsrWifiSmeBlacklistCfmHandler,
/* 0x8006 */ CsrWifiSmeCalibrationDataGetCfmHandler,
/* 0x8007 */ CsrWifiSmeCalibrationDataSetCfmHandler,
/* 0x8008 */ CsrWifiSmeCcxConfigGetCfmHandler,
/* 0x8009 */ CsrWifiSmeCcxConfigSetCfmHandler,
/* 0x800A */ CsrWifiSmeCoexConfigGetCfmHandler,
/* 0x800B */ CsrWifiSmeCoexConfigSetCfmHandler,
/* 0x800C */ CsrWifiSmeCoexInfoGetCfmHandler,
/* 0x800D */ CsrWifiSmeConnectCfmHandler,
/* 0x800E */ CsrWifiSmeConnectionConfigGetCfmHandler,
/* 0x800F */ CsrWifiSmeConnectionInfoGetCfmHandler,
/* 0x8010 */ CsrWifiSmeConnectionQualityIndHandler,
/* 0x8011 */ CsrWifiSmeConnectionStatsGetCfmHandler,
/* 0x8012 */ CsrWifiSmeDeactivateCfmHandler,
/* 0x8013 */ CsrWifiSmeDisconnectCfmHandler,
/* 0x8014 */ CsrWifiSmeEventMaskSetCfmHandler,
/* 0x8015 */ CsrWifiSmeHostConfigGetCfmHandler,
/* 0x8016 */ CsrWifiSmeHostConfigSetCfmHandler,
/* 0x8017 */ CsrWifiSmeIbssStationIndHandler,
/* 0x8018 */ CsrWifiSmeKeyCfmHandler,
/* 0x8019 */ CsrWifiSmeLinkQualityGetCfmHandler,
/* 0x801A */ CsrWifiSmeMediaStatusIndHandler,
/* 0x801B */ CsrWifiSmeMibConfigGetCfmHandler,
/* 0x801C */ CsrWifiSmeMibConfigSetCfmHandler,
/* 0x801D */ CsrWifiSmeMibGetCfmHandler,
/* 0x801E */ CsrWifiSmeMibGetNextCfmHandler,
/* 0x801F */ CsrWifiSmeMibSetCfmHandler,
/* 0x8020 */ CsrWifiSmeMicFailureIndHandler,
/* 0x8021 */ CsrWifiSmeMulticastAddressCfmHandler,
/* 0x8022 */ CsrWifiSmePacketFilterSetCfmHandler,
/* 0x8023 */ CsrWifiSmePermanentMacAddressGetCfmHandler,
/* 0x8024 */ CsrWifiSmePmkidCandidateListIndHandler,
/* 0x8025 */ CsrWifiSmePmkidCfmHandler,
/* 0x8026 */ CsrWifiSmePowerConfigGetCfmHandler,
/* 0x8027 */ CsrWifiSmePowerConfigSetCfmHandler,
/* 0x8028 */ CsrWifiSmeRegulatoryDomainInfoGetCfmHandler,
/* 0x8029 */ CsrWifiSmeRoamCompleteIndHandler,
/* 0x802A */ CsrWifiSmeRoamStartIndHandler,
/* 0x802B */ CsrWifiSmeRoamingConfigGetCfmHandler,
/* 0x802C */ CsrWifiSmeRoamingConfigSetCfmHandler,
/* 0x802D */ CsrWifiSmeScanConfigGetCfmHandler,
/* 0x802E */ CsrWifiSmeScanConfigSetCfmHandler,
/* 0x802F */ CsrWifiSmeScanFullCfmHandler,
/* 0x8030 */ CsrWifiSmeScanResultIndHandler,
/* 0x8031 */ CsrWifiSmeScanResultsFlushCfmHandler,
/* 0x8032 */ CsrWifiSmeScanResultsGetCfmHandler,
/* 0x8033 */ CsrWifiSmeSmeStaConfigGetCfmHandler,
/* 0x8034 */ CsrWifiSmeSmeStaConfigSetCfmHandler,
/* 0x8035 */ CsrWifiSmeStationMacAddressGetCfmHandler,
/* 0x8036 */ CsrWifiSmeTspecIndHandler,
/* 0x8037 */ CsrWifiSmeTspecCfmHandler,
/* 0x8038 */ CsrWifiSmeVersionsGetCfmHandler,
/* 0x8039 */ CsrWifiSmeWifiFlightmodeCfmHandler,
/* 0x803A */ CsrWifiSmeWifiOffIndHandler,
/* 0x803B */ CsrWifiSmeWifiOffCfmHandler,
/* 0x803C */ CsrWifiSmeWifiOnCfmHandler,
/* 0x803D */ CsrWifiSmeCloakedSsidsSetCfmHandler,
/* 0x803E */ CsrWifiSmeCloakedSsidsGetCfmHandler,
/* 0x803F */ CsrWifiSmeWifiOnIndHandler,
/* 0x8040 */ CsrWifiSmeSmeCommonConfigGetCfmHandler,
/* 0x8041 */ CsrWifiSmeSmeCommonConfigSetCfmHandler,
/* 0x8042 */ CsrWifiSmeGetInterfaceCapabilityCfmHandler,
/* 0x8043 */ CsrWifiSmeErrorIndHandler,
/* 0x8044 */ CsrWifiSmeInfoIndHandler,
/* 0x8045 */ CsrWifiSmeCoreDumpIndHandler,
/* 0x8046 */ CsrWifiSmeAmpStatusChangeIndHandler,
};
| gpl-2.0 |
Maroc-OS/android_kernel_bn_encore | drivers/media/radio/si4713-i2c.c | 2900 | 51563 | /*
* drivers/media/radio/si4713-i2c.c
*
* Silicon Labs Si4713 FM Radio Transmitter I2C commands.
*
* Copyright (c) 2009 Nokia Corporation
* Contact: Eduardo Valentin <eduardo.valentin@nokia.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/slab.h>
#include <linux/gpio.h>
#include <linux/regulator/consumer.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-common.h>
#include "si4713-i2c.h"
/* module parameters */
static int debug;
module_param(debug, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Debug level (0 - 2)");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Eduardo Valentin <eduardo.valentin@nokia.com>");
MODULE_DESCRIPTION("I2C driver for Si4713 FM Radio Transmitter");
MODULE_VERSION("0.0.1");
static const char *si4713_supply_names[SI4713_NUM_SUPPLIES] = {
"vio",
"vdd",
};
#define DEFAULT_RDS_PI 0x00
#define DEFAULT_RDS_PTY 0x00
#define DEFAULT_RDS_PS_NAME ""
#define DEFAULT_RDS_RADIO_TEXT DEFAULT_RDS_PS_NAME
#define DEFAULT_RDS_DEVIATION 0x00C8
#define DEFAULT_RDS_PS_REPEAT_COUNT 0x0003
#define DEFAULT_LIMITER_RTIME 0x1392
#define DEFAULT_LIMITER_DEV 0x102CA
#define DEFAULT_PILOT_FREQUENCY 0x4A38
#define DEFAULT_PILOT_DEVIATION 0x1A5E
#define DEFAULT_ACOMP_ATIME 0x0000
#define DEFAULT_ACOMP_RTIME 0xF4240L
#define DEFAULT_ACOMP_GAIN 0x0F
#define DEFAULT_ACOMP_THRESHOLD (-0x28)
#define DEFAULT_MUTE 0x01
#define DEFAULT_POWER_LEVEL 88
#define DEFAULT_FREQUENCY 8800
#define DEFAULT_PREEMPHASIS FMPE_EU
#define DEFAULT_TUNE_RNL 0xFF
#define to_si4713_device(sd) container_of(sd, struct si4713_device, sd)
/* frequency domain transformation (using times 10 to avoid floats) */
#define FREQDEV_UNIT 100000
#define FREQV4L2_MULTI 625
#define si4713_to_v4l2(f) ((f * FREQDEV_UNIT) / FREQV4L2_MULTI)
#define v4l2_to_si4713(f) ((f * FREQV4L2_MULTI) / FREQDEV_UNIT)
#define FREQ_RANGE_LOW 7600
#define FREQ_RANGE_HIGH 10800
#define MAX_ARGS 7
#define RDS_BLOCK 8
#define RDS_BLOCK_CLEAR 0x03
#define RDS_BLOCK_LOAD 0x04
#define RDS_RADIOTEXT_2A 0x20
#define RDS_RADIOTEXT_BLK_SIZE 4
#define RDS_RADIOTEXT_INDEX_MAX 0x0F
#define RDS_CARRIAGE_RETURN 0x0D
#define rds_ps_nblocks(len) ((len / RDS_BLOCK) + (len % RDS_BLOCK ? 1 : 0))
#define get_status_bit(p, b, m) (((p) & (m)) >> (b))
#define set_bits(p, v, b, m) (((p) & ~(m)) | ((v) << (b)))
#define ATTACK_TIME_UNIT 500
#define POWER_OFF 0x00
#define POWER_ON 0x01
#define msb(x) ((u8)((u16) x >> 8))
#define lsb(x) ((u8)((u16) x & 0x00FF))
#define compose_u16(msb, lsb) (((u16)msb << 8) | lsb)
#define check_command_failed(status) (!(status & SI4713_CTS) || \
(status & SI4713_ERR))
/* mute definition */
#define set_mute(p) ((p & 1) | ((p & 1) << 1));
#define get_mute(p) (p & 0x01)
#ifdef DEBUG
#define DBG_BUFFER(device, message, buffer, size) \
{ \
int i; \
char str[(size)*5]; \
for (i = 0; i < size; i++) \
sprintf(str + i * 5, " 0x%02x", buffer[i]); \
v4l2_dbg(2, debug, device, "%s:%s\n", message, str); \
}
#else
#define DBG_BUFFER(device, message, buffer, size)
#endif
/*
* Values for limiter release time (sorted by second column)
* device release
* value time (us)
*/
static long limiter_times[] = {
2000, 250,
1000, 500,
510, 1000,
255, 2000,
170, 3000,
127, 4020,
102, 5010,
85, 6020,
73, 7010,
64, 7990,
57, 8970,
51, 10030,
25, 20470,
17, 30110,
13, 39380,
10, 51190,
8, 63690,
7, 73140,
6, 85330,
5, 102390,
};
/*
* Values for audio compression release time (sorted by second column)
* device release
* value time (us)
*/
static unsigned long acomp_rtimes[] = {
0, 100000,
1, 200000,
2, 350000,
3, 525000,
4, 1000000,
};
/*
* Values for preemphasis (sorted by second column)
* device preemphasis
* value value (v4l2)
*/
static unsigned long preemphasis_values[] = {
FMPE_DISABLED, V4L2_PREEMPHASIS_DISABLED,
FMPE_EU, V4L2_PREEMPHASIS_50_uS,
FMPE_USA, V4L2_PREEMPHASIS_75_uS,
};
static int usecs_to_dev(unsigned long usecs, unsigned long const array[],
int size)
{
int i;
int rval = -EINVAL;
for (i = 0; i < size / 2; i++)
if (array[(i * 2) + 1] >= usecs) {
rval = array[i * 2];
break;
}
return rval;
}
static unsigned long dev_to_usecs(int value, unsigned long const array[],
int size)
{
int i;
int rval = -EINVAL;
for (i = 0; i < size / 2; i++)
if (array[i * 2] == value) {
rval = array[(i * 2) + 1];
break;
}
return rval;
}
/* si4713_handler: IRQ handler, just complete work */
static irqreturn_t si4713_handler(int irq, void *dev)
{
struct si4713_device *sdev = dev;
v4l2_dbg(2, debug, &sdev->sd,
"%s: sending signal to completion work.\n", __func__);
complete(&sdev->work);
return IRQ_HANDLED;
}
/*
* si4713_send_command - sends a command to si4713 and waits its response
* @sdev: si4713_device structure for the device we are communicating
* @command: command id
* @args: command arguments we are sending (up to 7)
* @argn: actual size of @args
* @response: buffer to place the expected response from the device (up to 15)
* @respn: actual size of @response
* @usecs: amount of time to wait before reading the response (in usecs)
*/
static int si4713_send_command(struct si4713_device *sdev, const u8 command,
const u8 args[], const int argn,
u8 response[], const int respn, const int usecs)
{
struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
u8 data1[MAX_ARGS + 1];
int err;
if (!client->adapter)
return -ENODEV;
/* First send the command and its arguments */
data1[0] = command;
memcpy(data1 + 1, args, argn);
DBG_BUFFER(&sdev->sd, "Parameters", data1, argn + 1);
err = i2c_master_send(client, data1, argn + 1);
if (err != argn + 1) {
v4l2_err(&sdev->sd, "Error while sending command 0x%02x\n",
command);
return (err > 0) ? -EIO : err;
}
/* Wait response from interrupt */
if (!wait_for_completion_timeout(&sdev->work,
usecs_to_jiffies(usecs) + 1))
v4l2_warn(&sdev->sd,
"(%s) Device took too much time to answer.\n",
__func__);
/* Then get the response */
err = i2c_master_recv(client, response, respn);
if (err != respn) {
v4l2_err(&sdev->sd,
"Error while reading response for command 0x%02x\n",
command);
return (err > 0) ? -EIO : err;
}
DBG_BUFFER(&sdev->sd, "Response", response, respn);
if (check_command_failed(response[0]))
return -EBUSY;
return 0;
}
/*
* si4713_read_property - reads a si4713 property
* @sdev: si4713_device structure for the device we are communicating
* @prop: property identification number
* @pv: property value to be returned on success
*/
static int si4713_read_property(struct si4713_device *sdev, u16 prop, u32 *pv)
{
int err;
u8 val[SI4713_GET_PROP_NRESP];
/*
* .First byte = 0
* .Second byte = property's MSB
* .Third byte = property's LSB
*/
const u8 args[SI4713_GET_PROP_NARGS] = {
0x00,
msb(prop),
lsb(prop),
};
err = si4713_send_command(sdev, SI4713_CMD_GET_PROPERTY,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (err < 0)
return err;
*pv = compose_u16(val[2], val[3]);
v4l2_dbg(1, debug, &sdev->sd,
"%s: property=0x%02x value=0x%02x status=0x%02x\n",
__func__, prop, *pv, val[0]);
return err;
}
/*
* si4713_write_property - modifies a si4713 property
* @sdev: si4713_device structure for the device we are communicating
* @prop: property identification number
* @val: new value for that property
*/
static int si4713_write_property(struct si4713_device *sdev, u16 prop, u16 val)
{
int rval;
u8 resp[SI4713_SET_PROP_NRESP];
/*
* .First byte = 0
* .Second byte = property's MSB
* .Third byte = property's LSB
* .Fourth byte = value's MSB
* .Fifth byte = value's LSB
*/
const u8 args[SI4713_SET_PROP_NARGS] = {
0x00,
msb(prop),
lsb(prop),
msb(val),
lsb(val),
};
rval = si4713_send_command(sdev, SI4713_CMD_SET_PROPERTY,
args, ARRAY_SIZE(args),
resp, ARRAY_SIZE(resp),
DEFAULT_TIMEOUT);
if (rval < 0)
return rval;
v4l2_dbg(1, debug, &sdev->sd,
"%s: property=0x%02x value=0x%02x status=0x%02x\n",
__func__, prop, val, resp[0]);
/*
* As there is no command response for SET_PROPERTY,
* wait Tcomp time to finish before proceed, in order
* to have property properly set.
*/
msleep(TIMEOUT_SET_PROPERTY);
return rval;
}
/*
* si4713_powerup - Powers the device up
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_powerup(struct si4713_device *sdev)
{
int err;
u8 resp[SI4713_PWUP_NRESP];
/*
* .First byte = Enabled interrupts and boot function
* .Second byte = Input operation mode
*/
const u8 args[SI4713_PWUP_NARGS] = {
SI4713_PWUP_CTSIEN | SI4713_PWUP_GPO2OEN | SI4713_PWUP_FUNC_TX,
SI4713_PWUP_OPMOD_ANALOG,
};
if (sdev->power_state)
return 0;
err = regulator_bulk_enable(ARRAY_SIZE(sdev->supplies),
sdev->supplies);
if (err) {
v4l2_err(&sdev->sd, "Failed to enable supplies: %d\n", err);
return err;
}
if (gpio_is_valid(sdev->gpio_reset)) {
udelay(50);
gpio_set_value(sdev->gpio_reset, 1);
}
err = si4713_send_command(sdev, SI4713_CMD_POWER_UP,
args, ARRAY_SIZE(args),
resp, ARRAY_SIZE(resp),
TIMEOUT_POWER_UP);
if (!err) {
v4l2_dbg(1, debug, &sdev->sd, "Powerup response: 0x%02x\n",
resp[0]);
v4l2_dbg(1, debug, &sdev->sd, "Device in power up mode\n");
sdev->power_state = POWER_ON;
err = si4713_write_property(sdev, SI4713_GPO_IEN,
SI4713_STC_INT | SI4713_CTS);
} else {
if (gpio_is_valid(sdev->gpio_reset))
gpio_set_value(sdev->gpio_reset, 0);
err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
sdev->supplies);
if (err)
v4l2_err(&sdev->sd,
"Failed to disable supplies: %d\n", err);
}
return err;
}
/*
* si4713_powerdown - Powers the device down
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_powerdown(struct si4713_device *sdev)
{
int err;
u8 resp[SI4713_PWDN_NRESP];
if (!sdev->power_state)
return 0;
err = si4713_send_command(sdev, SI4713_CMD_POWER_DOWN,
NULL, 0,
resp, ARRAY_SIZE(resp),
DEFAULT_TIMEOUT);
if (!err) {
v4l2_dbg(1, debug, &sdev->sd, "Power down response: 0x%02x\n",
resp[0]);
v4l2_dbg(1, debug, &sdev->sd, "Device in reset mode\n");
if (gpio_is_valid(sdev->gpio_reset))
gpio_set_value(sdev->gpio_reset, 0);
err = regulator_bulk_disable(ARRAY_SIZE(sdev->supplies),
sdev->supplies);
if (err)
v4l2_err(&sdev->sd,
"Failed to disable supplies: %d\n", err);
sdev->power_state = POWER_OFF;
}
return err;
}
/*
* si4713_checkrev - Checks if we are treating a device with the correct rev.
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_checkrev(struct si4713_device *sdev)
{
struct i2c_client *client = v4l2_get_subdevdata(&sdev->sd);
int rval;
u8 resp[SI4713_GETREV_NRESP];
mutex_lock(&sdev->mutex);
rval = si4713_send_command(sdev, SI4713_CMD_GET_REV,
NULL, 0,
resp, ARRAY_SIZE(resp),
DEFAULT_TIMEOUT);
if (rval < 0)
goto unlock;
if (resp[1] == SI4713_PRODUCT_NUMBER) {
v4l2_info(&sdev->sd, "chip found @ 0x%02x (%s)\n",
client->addr << 1, client->adapter->name);
} else {
v4l2_err(&sdev->sd, "Invalid product number\n");
rval = -EINVAL;
}
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
/*
* si4713_wait_stc - Waits STC interrupt and clears status bits. Useful
* for TX_TUNE_POWER, TX_TUNE_FREQ and TX_TUNE_MEAS
* @sdev: si4713_device structure for the device we are communicating
* @usecs: timeout to wait for STC interrupt signal
*/
static int si4713_wait_stc(struct si4713_device *sdev, const int usecs)
{
int err;
u8 resp[SI4713_GET_STATUS_NRESP];
/* Wait response from STC interrupt */
if (!wait_for_completion_timeout(&sdev->work,
usecs_to_jiffies(usecs) + 1))
v4l2_warn(&sdev->sd,
"%s: device took too much time to answer (%d usec).\n",
__func__, usecs);
/* Clear status bits */
err = si4713_send_command(sdev, SI4713_CMD_GET_INT_STATUS,
NULL, 0,
resp, ARRAY_SIZE(resp),
DEFAULT_TIMEOUT);
if (err < 0)
goto exit;
v4l2_dbg(1, debug, &sdev->sd,
"%s: status bits: 0x%02x\n", __func__, resp[0]);
if (!(resp[0] & SI4713_STC_INT))
err = -EIO;
exit:
return err;
}
/*
* si4713_tx_tune_freq - Sets the state of the RF carrier and sets the tuning
* frequency between 76 and 108 MHz in 10 kHz units and
* steps of 50 kHz.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
*/
static int si4713_tx_tune_freq(struct si4713_device *sdev, u16 frequency)
{
int err;
u8 val[SI4713_TXFREQ_NRESP];
/*
* .First byte = 0
* .Second byte = frequency's MSB
* .Third byte = frequency's LSB
*/
const u8 args[SI4713_TXFREQ_NARGS] = {
0x00,
msb(frequency),
lsb(frequency),
};
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_FREQ,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (err < 0)
return err;
v4l2_dbg(1, debug, &sdev->sd,
"%s: frequency=0x%02x status=0x%02x\n", __func__,
frequency, val[0]);
err = si4713_wait_stc(sdev, TIMEOUT_TX_TUNE);
if (err < 0)
return err;
return compose_u16(args[1], args[2]);
}
/*
* si4713_tx_tune_power - Sets the RF voltage level between 88 and 115 dBuV in
* 1 dB units. A value of 0x00 indicates off. The command
* also sets the antenna tuning capacitance. A value of 0
* indicates autotuning, and a value of 1 - 191 indicates
* a manual override, which results in a tuning
* capacitance of 0.25 pF x @antcap.
* @sdev: si4713_device structure for the device we are communicating
* @power: tuning power (88 - 115 dBuV, unit/step 1 dB)
* @antcap: value of antenna tuning capacitor (0 - 191)
*/
static int si4713_tx_tune_power(struct si4713_device *sdev, u8 power,
u8 antcap)
{
int err;
u8 val[SI4713_TXPWR_NRESP];
/*
* .First byte = 0
* .Second byte = 0
* .Third byte = power
* .Fourth byte = antcap
*/
const u8 args[SI4713_TXPWR_NARGS] = {
0x00,
0x00,
power,
antcap,
};
if (((power > 0) && (power < SI4713_MIN_POWER)) ||
power > SI4713_MAX_POWER || antcap > SI4713_MAX_ANTCAP)
return -EDOM;
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_POWER,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (err < 0)
return err;
v4l2_dbg(1, debug, &sdev->sd,
"%s: power=0x%02x antcap=0x%02x status=0x%02x\n",
__func__, power, antcap, val[0]);
return si4713_wait_stc(sdev, TIMEOUT_TX_TUNE_POWER);
}
/*
* si4713_tx_tune_measure - Enters receive mode and measures the received noise
* level in units of dBuV on the selected frequency.
* The Frequency must be between 76 and 108 MHz in 10 kHz
* units and steps of 50 kHz. The command also sets the
* antenna tuning capacitance. A value of 0 means
* autotuning, and a value of 1 to 191 indicates manual
* override.
* @sdev: si4713_device structure for the device we are communicating
* @frequency: desired frequency (76 - 108 MHz, unit 10 KHz, step 50 kHz)
* @antcap: value of antenna tuning capacitor (0 - 191)
*/
static int si4713_tx_tune_measure(struct si4713_device *sdev, u16 frequency,
u8 antcap)
{
int err;
u8 val[SI4713_TXMEA_NRESP];
/*
* .First byte = 0
* .Second byte = frequency's MSB
* .Third byte = frequency's LSB
* .Fourth byte = antcap
*/
const u8 args[SI4713_TXMEA_NARGS] = {
0x00,
msb(frequency),
lsb(frequency),
antcap,
};
sdev->tune_rnl = DEFAULT_TUNE_RNL;
if (antcap > SI4713_MAX_ANTCAP)
return -EDOM;
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_MEASURE,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (err < 0)
return err;
v4l2_dbg(1, debug, &sdev->sd,
"%s: frequency=0x%02x antcap=0x%02x status=0x%02x\n",
__func__, frequency, antcap, val[0]);
return si4713_wait_stc(sdev, TIMEOUT_TX_TUNE);
}
/*
* si4713_tx_tune_status- Returns the status of the tx_tune_freq, tx_tune_mea or
* tx_tune_power commands. This command return the current
* frequency, output voltage in dBuV, the antenna tunning
* capacitance value and the received noise level. The
* command also clears the stcint interrupt bit when the
* first bit of its arguments is high.
* @sdev: si4713_device structure for the device we are communicating
* @intack: 0x01 to clear the seek/tune complete interrupt status indicator.
* @frequency: returned frequency
* @power: returned power
* @antcap: returned antenna capacitance
* @noise: returned noise level
*/
static int si4713_tx_tune_status(struct si4713_device *sdev, u8 intack,
u16 *frequency, u8 *power,
u8 *antcap, u8 *noise)
{
int err;
u8 val[SI4713_TXSTATUS_NRESP];
/*
* .First byte = intack bit
*/
const u8 args[SI4713_TXSTATUS_NARGS] = {
intack & SI4713_INTACK_MASK,
};
err = si4713_send_command(sdev, SI4713_CMD_TX_TUNE_STATUS,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (!err) {
v4l2_dbg(1, debug, &sdev->sd,
"%s: status=0x%02x\n", __func__, val[0]);
*frequency = compose_u16(val[2], val[3]);
sdev->frequency = *frequency;
*power = val[5];
*antcap = val[6];
*noise = val[7];
v4l2_dbg(1, debug, &sdev->sd, "%s: response: %d x 10 kHz "
"(power %d, antcap %d, rnl %d)\n", __func__,
*frequency, *power, *antcap, *noise);
}
return err;
}
/*
* si4713_tx_rds_buff - Loads the RDS group buffer FIFO or circular buffer.
* @sdev: si4713_device structure for the device we are communicating
* @mode: the buffer operation mode.
* @rdsb: RDS Block B
* @rdsc: RDS Block C
* @rdsd: RDS Block D
* @cbleft: returns the number of available circular buffer blocks minus the
* number of used circular buffer blocks.
*/
static int si4713_tx_rds_buff(struct si4713_device *sdev, u8 mode, u16 rdsb,
u16 rdsc, u16 rdsd, s8 *cbleft)
{
int err;
u8 val[SI4713_RDSBUFF_NRESP];
const u8 args[SI4713_RDSBUFF_NARGS] = {
mode & SI4713_RDSBUFF_MODE_MASK,
msb(rdsb),
lsb(rdsb),
msb(rdsc),
lsb(rdsc),
msb(rdsd),
lsb(rdsd),
};
err = si4713_send_command(sdev, SI4713_CMD_TX_RDS_BUFF,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (!err) {
v4l2_dbg(1, debug, &sdev->sd,
"%s: status=0x%02x\n", __func__, val[0]);
*cbleft = (s8)val[2] - val[3];
v4l2_dbg(1, debug, &sdev->sd, "%s: response: interrupts"
" 0x%02x cb avail: %d cb used %d fifo avail"
" %d fifo used %d\n", __func__, val[1],
val[2], val[3], val[4], val[5]);
}
return err;
}
/*
* si4713_tx_rds_ps - Loads the program service buffer.
* @sdev: si4713_device structure for the device we are communicating
* @psid: program service id to be loaded.
* @pschar: assumed 4 size char array to be loaded into the program service
*/
static int si4713_tx_rds_ps(struct si4713_device *sdev, u8 psid,
unsigned char *pschar)
{
int err;
u8 val[SI4713_RDSPS_NRESP];
const u8 args[SI4713_RDSPS_NARGS] = {
psid & SI4713_RDSPS_PSID_MASK,
pschar[0],
pschar[1],
pschar[2],
pschar[3],
};
err = si4713_send_command(sdev, SI4713_CMD_TX_RDS_PS,
args, ARRAY_SIZE(args), val,
ARRAY_SIZE(val), DEFAULT_TIMEOUT);
if (err < 0)
return err;
v4l2_dbg(1, debug, &sdev->sd, "%s: status=0x%02x\n", __func__, val[0]);
return err;
}
static int si4713_set_power_state(struct si4713_device *sdev, u8 value)
{
int rval;
mutex_lock(&sdev->mutex);
if (value)
rval = si4713_powerup(sdev);
else
rval = si4713_powerdown(sdev);
mutex_unlock(&sdev->mutex);
return rval;
}
static int si4713_set_mute(struct si4713_device *sdev, u16 mute)
{
int rval = 0;
mute = set_mute(mute);
mutex_lock(&sdev->mutex);
if (sdev->power_state)
rval = si4713_write_property(sdev,
SI4713_TX_LINE_INPUT_MUTE, mute);
if (rval >= 0)
sdev->mute = get_mute(mute);
mutex_unlock(&sdev->mutex);
return rval;
}
static int si4713_set_rds_ps_name(struct si4713_device *sdev, char *ps_name)
{
int rval = 0, i;
u8 len = 0;
/* We want to clear the whole thing */
if (!strlen(ps_name))
memset(ps_name, 0, MAX_RDS_PS_NAME + 1);
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
/* Write the new ps name and clear the padding */
for (i = 0; i < MAX_RDS_PS_NAME; i += (RDS_BLOCK / 2)) {
rval = si4713_tx_rds_ps(sdev, (i / (RDS_BLOCK / 2)),
ps_name + i);
if (rval < 0)
goto unlock;
}
/* Setup the size to be sent */
if (strlen(ps_name))
len = strlen(ps_name) - 1;
else
len = 1;
rval = si4713_write_property(sdev,
SI4713_TX_RDS_PS_MESSAGE_COUNT,
rds_ps_nblocks(len));
if (rval < 0)
goto unlock;
rval = si4713_write_property(sdev,
SI4713_TX_RDS_PS_REPEAT_COUNT,
DEFAULT_RDS_PS_REPEAT_COUNT * 2);
if (rval < 0)
goto unlock;
}
strncpy(sdev->rds_info.ps_name, ps_name, MAX_RDS_PS_NAME);
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
static int si4713_set_rds_radio_text(struct si4713_device *sdev, char *rt)
{
int rval = 0, i;
u16 t_index = 0;
u8 b_index = 0, cr_inserted = 0;
s8 left;
mutex_lock(&sdev->mutex);
if (!sdev->power_state)
goto copy;
rval = si4713_tx_rds_buff(sdev, RDS_BLOCK_CLEAR, 0, 0, 0, &left);
if (rval < 0)
goto unlock;
if (!strlen(rt))
goto copy;
do {
/* RDS spec says that if the last block isn't used,
* then apply a carriage return
*/
if (t_index < (RDS_RADIOTEXT_INDEX_MAX *
RDS_RADIOTEXT_BLK_SIZE)) {
for (i = 0; i < RDS_RADIOTEXT_BLK_SIZE; i++) {
if (!rt[t_index + i] || rt[t_index + i] ==
RDS_CARRIAGE_RETURN) {
rt[t_index + i] = RDS_CARRIAGE_RETURN;
cr_inserted = 1;
break;
}
}
}
rval = si4713_tx_rds_buff(sdev, RDS_BLOCK_LOAD,
compose_u16(RDS_RADIOTEXT_2A, b_index++),
compose_u16(rt[t_index], rt[t_index + 1]),
compose_u16(rt[t_index + 2], rt[t_index + 3]),
&left);
if (rval < 0)
goto unlock;
t_index += RDS_RADIOTEXT_BLK_SIZE;
if (cr_inserted)
break;
} while (left > 0);
copy:
strncpy(sdev->rds_info.radio_text, rt, MAX_RDS_RADIO_TEXT);
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
static int si4713_choose_econtrol_action(struct si4713_device *sdev, u32 id,
u32 **shadow, s32 *bit, s32 *mask, u16 *property, int *mul,
unsigned long **table, int *size)
{
s32 rval = 0;
switch (id) {
/* FM_TX class controls */
case V4L2_CID_RDS_TX_PI:
*property = SI4713_TX_RDS_PI;
*mul = 1;
*shadow = &sdev->rds_info.pi;
break;
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
*property = SI4713_TX_ACOMP_THRESHOLD;
*mul = 1;
*shadow = &sdev->acomp_info.threshold;
break;
case V4L2_CID_AUDIO_COMPRESSION_GAIN:
*property = SI4713_TX_ACOMP_GAIN;
*mul = 1;
*shadow = &sdev->acomp_info.gain;
break;
case V4L2_CID_PILOT_TONE_FREQUENCY:
*property = SI4713_TX_PILOT_FREQUENCY;
*mul = 1;
*shadow = &sdev->pilot_info.frequency;
break;
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
*property = SI4713_TX_ACOMP_ATTACK_TIME;
*mul = ATTACK_TIME_UNIT;
*shadow = &sdev->acomp_info.attack_time;
break;
case V4L2_CID_PILOT_TONE_DEVIATION:
*property = SI4713_TX_PILOT_DEVIATION;
*mul = 10;
*shadow = &sdev->pilot_info.deviation;
break;
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
*property = SI4713_TX_AUDIO_DEVIATION;
*mul = 10;
*shadow = &sdev->limiter_info.deviation;
break;
case V4L2_CID_RDS_TX_DEVIATION:
*property = SI4713_TX_RDS_DEVIATION;
*mul = 1;
*shadow = &sdev->rds_info.deviation;
break;
case V4L2_CID_RDS_TX_PTY:
*property = SI4713_TX_RDS_PS_MISC;
*bit = 5;
*mask = 0x1F << 5;
*shadow = &sdev->rds_info.pty;
break;
case V4L2_CID_AUDIO_LIMITER_ENABLED:
*property = SI4713_TX_ACOMP_ENABLE;
*bit = 1;
*mask = 1 << 1;
*shadow = &sdev->limiter_info.enabled;
break;
case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
*property = SI4713_TX_ACOMP_ENABLE;
*bit = 0;
*mask = 1 << 0;
*shadow = &sdev->acomp_info.enabled;
break;
case V4L2_CID_PILOT_TONE_ENABLED:
*property = SI4713_TX_COMPONENT_ENABLE;
*bit = 0;
*mask = 1 << 0;
*shadow = &sdev->pilot_info.enabled;
break;
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
*property = SI4713_TX_LIMITER_RELEASE_TIME;
*table = limiter_times;
*size = ARRAY_SIZE(limiter_times);
*shadow = &sdev->limiter_info.release_time;
break;
case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
*property = SI4713_TX_ACOMP_RELEASE_TIME;
*table = acomp_rtimes;
*size = ARRAY_SIZE(acomp_rtimes);
*shadow = &sdev->acomp_info.release_time;
break;
case V4L2_CID_TUNE_PREEMPHASIS:
*property = SI4713_TX_PREEMPHASIS;
*table = preemphasis_values;
*size = ARRAY_SIZE(preemphasis_values);
*shadow = &sdev->preemphasis;
break;
default:
rval = -EINVAL;
};
return rval;
}
static int si4713_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc);
/* write string property */
static int si4713_write_econtrol_string(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
struct v4l2_queryctrl vqc;
int len;
s32 rval = 0;
vqc.id = control->id;
rval = si4713_queryctrl(&sdev->sd, &vqc);
if (rval < 0)
goto exit;
switch (control->id) {
case V4L2_CID_RDS_TX_PS_NAME: {
char ps_name[MAX_RDS_PS_NAME + 1];
len = control->size - 1;
if (len < 0 || len > MAX_RDS_PS_NAME) {
rval = -ERANGE;
goto exit;
}
rval = copy_from_user(ps_name, control->string, len);
if (rval) {
rval = -EFAULT;
goto exit;
}
ps_name[len] = '\0';
if (strlen(ps_name) % vqc.step) {
rval = -ERANGE;
goto exit;
}
rval = si4713_set_rds_ps_name(sdev, ps_name);
}
break;
case V4L2_CID_RDS_TX_RADIO_TEXT: {
char radio_text[MAX_RDS_RADIO_TEXT + 1];
len = control->size - 1;
if (len < 0 || len > MAX_RDS_RADIO_TEXT) {
rval = -ERANGE;
goto exit;
}
rval = copy_from_user(radio_text, control->string, len);
if (rval) {
rval = -EFAULT;
goto exit;
}
radio_text[len] = '\0';
if (strlen(radio_text) % vqc.step) {
rval = -ERANGE;
goto exit;
}
rval = si4713_set_rds_radio_text(sdev, radio_text);
}
break;
default:
rval = -EINVAL;
break;
};
exit:
return rval;
}
static int validate_range(struct v4l2_subdev *sd,
struct v4l2_ext_control *control)
{
struct v4l2_queryctrl vqc;
int rval;
vqc.id = control->id;
rval = si4713_queryctrl(sd, &vqc);
if (rval < 0)
goto exit;
if (control->value < vqc.minimum || control->value > vqc.maximum)
rval = -ERANGE;
exit:
return rval;
}
/* properties which use tx_tune_power*/
static int si4713_write_econtrol_tune(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
s32 rval = 0;
u8 power, antcap;
rval = validate_range(&sdev->sd, control);
if (rval < 0)
goto exit;
mutex_lock(&sdev->mutex);
switch (control->id) {
case V4L2_CID_TUNE_POWER_LEVEL:
power = control->value;
antcap = sdev->antenna_capacitor;
break;
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
power = sdev->power_level;
antcap = control->value;
break;
default:
rval = -EINVAL;
goto unlock;
};
if (sdev->power_state)
rval = si4713_tx_tune_power(sdev, power, antcap);
if (rval == 0) {
sdev->power_level = power;
sdev->antenna_capacitor = antcap;
}
unlock:
mutex_unlock(&sdev->mutex);
exit:
return rval;
}
static int si4713_write_econtrol_integers(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
s32 rval;
u32 *shadow = NULL, val = 0;
s32 bit = 0, mask = 0;
u16 property = 0;
int mul = 0;
unsigned long *table = NULL;
int size = 0;
rval = validate_range(&sdev->sd, control);
if (rval < 0)
goto exit;
rval = si4713_choose_econtrol_action(sdev, control->id, &shadow, &bit,
&mask, &property, &mul, &table, &size);
if (rval < 0)
goto exit;
val = control->value;
if (mul) {
val = control->value / mul;
} else if (table) {
rval = usecs_to_dev(control->value, table, size);
if (rval < 0)
goto exit;
val = rval;
rval = 0;
}
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
if (mask) {
rval = si4713_read_property(sdev, property, &val);
if (rval < 0)
goto unlock;
val = set_bits(val, control->value, bit, mask);
}
rval = si4713_write_property(sdev, property, val);
if (rval < 0)
goto unlock;
if (mask)
val = control->value;
}
if (mul) {
*shadow = val * mul;
} else if (table) {
rval = dev_to_usecs(val, table, size);
if (rval < 0)
goto unlock;
*shadow = rval;
rval = 0;
} else {
*shadow = val;
}
unlock:
mutex_unlock(&sdev->mutex);
exit:
return rval;
}
static int si4713_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f);
static int si4713_s_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *);
/*
* si4713_setup - Sets the device up with current configuration.
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_setup(struct si4713_device *sdev)
{
struct v4l2_ext_control ctrl;
struct v4l2_frequency f;
struct v4l2_modulator vm;
struct si4713_device *tmp;
int rval = 0;
tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
if (!tmp)
return -ENOMEM;
/* Get a local copy to avoid race */
mutex_lock(&sdev->mutex);
memcpy(tmp, sdev, sizeof(*sdev));
mutex_unlock(&sdev->mutex);
ctrl.id = V4L2_CID_RDS_TX_PI;
ctrl.value = tmp->rds_info.pi;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_COMPRESSION_THRESHOLD;
ctrl.value = tmp->acomp_info.threshold;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_COMPRESSION_GAIN;
ctrl.value = tmp->acomp_info.gain;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_PILOT_TONE_FREQUENCY;
ctrl.value = tmp->pilot_info.frequency;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME;
ctrl.value = tmp->acomp_info.attack_time;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_PILOT_TONE_DEVIATION;
ctrl.value = tmp->pilot_info.deviation;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_LIMITER_DEVIATION;
ctrl.value = tmp->limiter_info.deviation;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_RDS_TX_DEVIATION;
ctrl.value = tmp->rds_info.deviation;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_RDS_TX_PTY;
ctrl.value = tmp->rds_info.pty;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_LIMITER_ENABLED;
ctrl.value = tmp->limiter_info.enabled;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_COMPRESSION_ENABLED;
ctrl.value = tmp->acomp_info.enabled;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_PILOT_TONE_ENABLED;
ctrl.value = tmp->pilot_info.enabled;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_LIMITER_RELEASE_TIME;
ctrl.value = tmp->limiter_info.release_time;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME;
ctrl.value = tmp->acomp_info.release_time;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_TUNE_PREEMPHASIS;
ctrl.value = tmp->preemphasis;
rval |= si4713_write_econtrol_integers(sdev, &ctrl);
ctrl.id = V4L2_CID_RDS_TX_PS_NAME;
rval |= si4713_set_rds_ps_name(sdev, tmp->rds_info.ps_name);
ctrl.id = V4L2_CID_RDS_TX_RADIO_TEXT;
rval |= si4713_set_rds_radio_text(sdev, tmp->rds_info.radio_text);
/* Device procedure needs to set frequency first */
f.frequency = tmp->frequency ? tmp->frequency : DEFAULT_FREQUENCY;
f.frequency = si4713_to_v4l2(f.frequency);
rval |= si4713_s_frequency(&sdev->sd, &f);
ctrl.id = V4L2_CID_TUNE_POWER_LEVEL;
ctrl.value = tmp->power_level;
rval |= si4713_write_econtrol_tune(sdev, &ctrl);
ctrl.id = V4L2_CID_TUNE_ANTENNA_CAPACITOR;
ctrl.value = tmp->antenna_capacitor;
rval |= si4713_write_econtrol_tune(sdev, &ctrl);
vm.index = 0;
if (tmp->stereo)
vm.txsubchans = V4L2_TUNER_SUB_STEREO;
else
vm.txsubchans = V4L2_TUNER_SUB_MONO;
if (tmp->rds_info.enabled)
vm.txsubchans |= V4L2_TUNER_SUB_RDS;
si4713_s_modulator(&sdev->sd, &vm);
kfree(tmp);
return rval;
}
/*
* si4713_initialize - Sets the device up with default configuration.
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_initialize(struct si4713_device *sdev)
{
int rval;
rval = si4713_set_power_state(sdev, POWER_ON);
if (rval < 0)
goto exit;
rval = si4713_checkrev(sdev);
if (rval < 0)
goto exit;
rval = si4713_set_power_state(sdev, POWER_OFF);
if (rval < 0)
goto exit;
mutex_lock(&sdev->mutex);
sdev->rds_info.pi = DEFAULT_RDS_PI;
sdev->rds_info.pty = DEFAULT_RDS_PTY;
sdev->rds_info.deviation = DEFAULT_RDS_DEVIATION;
strlcpy(sdev->rds_info.ps_name, DEFAULT_RDS_PS_NAME, MAX_RDS_PS_NAME);
strlcpy(sdev->rds_info.radio_text, DEFAULT_RDS_RADIO_TEXT,
MAX_RDS_RADIO_TEXT);
sdev->rds_info.enabled = 1;
sdev->limiter_info.release_time = DEFAULT_LIMITER_RTIME;
sdev->limiter_info.deviation = DEFAULT_LIMITER_DEV;
sdev->limiter_info.enabled = 1;
sdev->pilot_info.deviation = DEFAULT_PILOT_DEVIATION;
sdev->pilot_info.frequency = DEFAULT_PILOT_FREQUENCY;
sdev->pilot_info.enabled = 1;
sdev->acomp_info.release_time = DEFAULT_ACOMP_RTIME;
sdev->acomp_info.attack_time = DEFAULT_ACOMP_ATIME;
sdev->acomp_info.threshold = DEFAULT_ACOMP_THRESHOLD;
sdev->acomp_info.gain = DEFAULT_ACOMP_GAIN;
sdev->acomp_info.enabled = 1;
sdev->frequency = DEFAULT_FREQUENCY;
sdev->preemphasis = DEFAULT_PREEMPHASIS;
sdev->mute = DEFAULT_MUTE;
sdev->power_level = DEFAULT_POWER_LEVEL;
sdev->antenna_capacitor = 0;
sdev->stereo = 1;
sdev->tune_rnl = DEFAULT_TUNE_RNL;
mutex_unlock(&sdev->mutex);
exit:
return rval;
}
/* read string property */
static int si4713_read_econtrol_string(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
s32 rval = 0;
switch (control->id) {
case V4L2_CID_RDS_TX_PS_NAME:
if (strlen(sdev->rds_info.ps_name) + 1 > control->size) {
control->size = MAX_RDS_PS_NAME + 1;
rval = -ENOSPC;
goto exit;
}
rval = copy_to_user(control->string, sdev->rds_info.ps_name,
strlen(sdev->rds_info.ps_name) + 1);
if (rval)
rval = -EFAULT;
break;
case V4L2_CID_RDS_TX_RADIO_TEXT:
if (strlen(sdev->rds_info.radio_text) + 1 > control->size) {
control->size = MAX_RDS_RADIO_TEXT + 1;
rval = -ENOSPC;
goto exit;
}
rval = copy_to_user(control->string, sdev->rds_info.radio_text,
strlen(sdev->rds_info.radio_text) + 1);
if (rval)
rval = -EFAULT;
break;
default:
rval = -EINVAL;
break;
};
exit:
return rval;
}
/*
* si4713_update_tune_status - update properties from tx_tune_status
* command. Must be called with sdev->mutex held.
* @sdev: si4713_device structure for the device we are communicating
*/
static int si4713_update_tune_status(struct si4713_device *sdev)
{
int rval;
u16 f = 0;
u8 p = 0, a = 0, n = 0;
rval = si4713_tx_tune_status(sdev, 0x00, &f, &p, &a, &n);
if (rval < 0)
goto exit;
sdev->power_level = p;
sdev->antenna_capacitor = a;
sdev->tune_rnl = n;
exit:
return rval;
}
/* properties which use tx_tune_status */
static int si4713_read_econtrol_tune(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
s32 rval = 0;
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
rval = si4713_update_tune_status(sdev);
if (rval < 0)
goto unlock;
}
switch (control->id) {
case V4L2_CID_TUNE_POWER_LEVEL:
control->value = sdev->power_level;
break;
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
control->value = sdev->antenna_capacitor;
break;
default:
rval = -EINVAL;
};
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
static int si4713_read_econtrol_integers(struct si4713_device *sdev,
struct v4l2_ext_control *control)
{
s32 rval;
u32 *shadow = NULL, val = 0;
s32 bit = 0, mask = 0;
u16 property = 0;
int mul = 0;
unsigned long *table = NULL;
int size = 0;
rval = si4713_choose_econtrol_action(sdev, control->id, &shadow, &bit,
&mask, &property, &mul, &table, &size);
if (rval < 0)
goto exit;
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
rval = si4713_read_property(sdev, property, &val);
if (rval < 0)
goto unlock;
/* Keep negative values for threshold */
if (control->id == V4L2_CID_AUDIO_COMPRESSION_THRESHOLD)
*shadow = (s16)val;
else if (mask)
*shadow = get_status_bit(val, bit, mask);
else if (mul)
*shadow = val * mul;
else
*shadow = dev_to_usecs(val, table, size);
}
control->value = *shadow;
unlock:
mutex_unlock(&sdev->mutex);
exit:
return rval;
}
/*
* Video4Linux Subdev Interface
*/
/* si4713_s_ext_ctrls - set extended controls value */
static int si4713_s_ext_ctrls(struct v4l2_subdev *sd,
struct v4l2_ext_controls *ctrls)
{
struct si4713_device *sdev = to_si4713_device(sd);
int i;
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_FM_TX)
return -EINVAL;
for (i = 0; i < ctrls->count; i++) {
int err;
switch ((ctrls->controls + i)->id) {
case V4L2_CID_RDS_TX_PS_NAME:
case V4L2_CID_RDS_TX_RADIO_TEXT:
err = si4713_write_econtrol_string(sdev,
ctrls->controls + i);
break;
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
case V4L2_CID_TUNE_POWER_LEVEL:
err = si4713_write_econtrol_tune(sdev,
ctrls->controls + i);
break;
default:
err = si4713_write_econtrol_integers(sdev,
ctrls->controls + i);
}
if (err < 0) {
ctrls->error_idx = i;
return err;
}
}
return 0;
}
/* si4713_g_ext_ctrls - get extended controls value */
static int si4713_g_ext_ctrls(struct v4l2_subdev *sd,
struct v4l2_ext_controls *ctrls)
{
struct si4713_device *sdev = to_si4713_device(sd);
int i;
if (ctrls->ctrl_class != V4L2_CTRL_CLASS_FM_TX)
return -EINVAL;
for (i = 0; i < ctrls->count; i++) {
int err;
switch ((ctrls->controls + i)->id) {
case V4L2_CID_RDS_TX_PS_NAME:
case V4L2_CID_RDS_TX_RADIO_TEXT:
err = si4713_read_econtrol_string(sdev,
ctrls->controls + i);
break;
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
case V4L2_CID_TUNE_POWER_LEVEL:
err = si4713_read_econtrol_tune(sdev,
ctrls->controls + i);
break;
default:
err = si4713_read_econtrol_integers(sdev,
ctrls->controls + i);
}
if (err < 0) {
ctrls->error_idx = i;
return err;
}
}
return 0;
}
/* si4713_queryctrl - enumerate control items */
static int si4713_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc)
{
int rval = 0;
switch (qc->id) {
/* User class controls */
case V4L2_CID_AUDIO_MUTE:
rval = v4l2_ctrl_query_fill(qc, 0, 1, 1, DEFAULT_MUTE);
break;
/* FM_TX class controls */
case V4L2_CID_RDS_TX_PI:
rval = v4l2_ctrl_query_fill(qc, 0, 0xFFFF, 1, DEFAULT_RDS_PI);
break;
case V4L2_CID_RDS_TX_PTY:
rval = v4l2_ctrl_query_fill(qc, 0, 31, 1, DEFAULT_RDS_PTY);
break;
case V4L2_CID_RDS_TX_DEVIATION:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_RDS_DEVIATION,
10, DEFAULT_RDS_DEVIATION);
break;
case V4L2_CID_RDS_TX_PS_NAME:
/*
* Report step as 8. From RDS spec, psname
* should be 8. But there are receivers which scroll strings
* sized as 8xN.
*/
rval = v4l2_ctrl_query_fill(qc, 0, MAX_RDS_PS_NAME, 8, 0);
break;
case V4L2_CID_RDS_TX_RADIO_TEXT:
/*
* Report step as 32 (2A block). From RDS spec,
* radio text should be 32 for 2A block. But there are receivers
* which scroll strings sized as 32xN. Setting default to 32.
*/
rval = v4l2_ctrl_query_fill(qc, 0, MAX_RDS_RADIO_TEXT, 32, 0);
break;
case V4L2_CID_AUDIO_LIMITER_ENABLED:
rval = v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
break;
case V4L2_CID_AUDIO_LIMITER_RELEASE_TIME:
rval = v4l2_ctrl_query_fill(qc, 250, MAX_LIMITER_RELEASE_TIME,
50, DEFAULT_LIMITER_RTIME);
break;
case V4L2_CID_AUDIO_LIMITER_DEVIATION:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_LIMITER_DEVIATION,
10, DEFAULT_LIMITER_DEV);
break;
case V4L2_CID_AUDIO_COMPRESSION_ENABLED:
rval = v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
break;
case V4L2_CID_AUDIO_COMPRESSION_GAIN:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_ACOMP_GAIN, 1,
DEFAULT_ACOMP_GAIN);
break;
case V4L2_CID_AUDIO_COMPRESSION_THRESHOLD:
rval = v4l2_ctrl_query_fill(qc, MIN_ACOMP_THRESHOLD,
MAX_ACOMP_THRESHOLD, 1,
DEFAULT_ACOMP_THRESHOLD);
break;
case V4L2_CID_AUDIO_COMPRESSION_ATTACK_TIME:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_ACOMP_ATTACK_TIME,
500, DEFAULT_ACOMP_ATIME);
break;
case V4L2_CID_AUDIO_COMPRESSION_RELEASE_TIME:
rval = v4l2_ctrl_query_fill(qc, 100000, MAX_ACOMP_RELEASE_TIME,
100000, DEFAULT_ACOMP_RTIME);
break;
case V4L2_CID_PILOT_TONE_ENABLED:
rval = v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
break;
case V4L2_CID_PILOT_TONE_DEVIATION:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_PILOT_DEVIATION,
10, DEFAULT_PILOT_DEVIATION);
break;
case V4L2_CID_PILOT_TONE_FREQUENCY:
rval = v4l2_ctrl_query_fill(qc, 0, MAX_PILOT_FREQUENCY,
1, DEFAULT_PILOT_FREQUENCY);
break;
case V4L2_CID_TUNE_PREEMPHASIS:
rval = v4l2_ctrl_query_fill(qc, V4L2_PREEMPHASIS_DISABLED,
V4L2_PREEMPHASIS_75_uS, 1,
V4L2_PREEMPHASIS_50_uS);
break;
case V4L2_CID_TUNE_POWER_LEVEL:
rval = v4l2_ctrl_query_fill(qc, 0, 120, 1, DEFAULT_POWER_LEVEL);
break;
case V4L2_CID_TUNE_ANTENNA_CAPACITOR:
rval = v4l2_ctrl_query_fill(qc, 0, 191, 1, 0);
break;
default:
rval = -EINVAL;
break;
};
return rval;
}
/* si4713_g_ctrl - get the value of a control */
static int si4713_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
if (!sdev)
return -ENODEV;
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
rval = si4713_read_property(sdev, SI4713_TX_LINE_INPUT_MUTE,
&sdev->mute);
if (rval < 0)
goto unlock;
}
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value = get_mute(sdev->mute);
break;
}
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
/* si4713_s_ctrl - set the value of a control */
static int si4713_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
if (!sdev)
return -ENODEV;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
if (ctrl->value) {
rval = si4713_set_mute(sdev, ctrl->value);
if (rval < 0)
goto exit;
rval = si4713_set_power_state(sdev, POWER_DOWN);
} else {
rval = si4713_set_power_state(sdev, POWER_UP);
if (rval < 0)
goto exit;
rval = si4713_setup(sdev);
if (rval < 0)
goto exit;
rval = si4713_set_mute(sdev, ctrl->value);
}
break;
}
exit:
return rval;
}
/* si4713_ioctl - deal with private ioctls (only rnl for now) */
long si4713_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct si4713_device *sdev = to_si4713_device(sd);
struct si4713_rnl *rnl = arg;
u16 frequency;
int rval = 0;
if (!arg)
return -EINVAL;
mutex_lock(&sdev->mutex);
switch (cmd) {
case SI4713_IOC_MEASURE_RNL:
frequency = v4l2_to_si4713(rnl->frequency);
if (sdev->power_state) {
/* Set desired measurement frequency */
rval = si4713_tx_tune_measure(sdev, frequency, 0);
if (rval < 0)
goto unlock;
/* get results from tune status */
rval = si4713_update_tune_status(sdev);
if (rval < 0)
goto unlock;
}
rnl->rnl = sdev->tune_rnl;
break;
default:
/* nothing */
rval = -ENOIOCTLCMD;
}
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
static const struct v4l2_subdev_core_ops si4713_subdev_core_ops = {
.queryctrl = si4713_queryctrl,
.g_ext_ctrls = si4713_g_ext_ctrls,
.s_ext_ctrls = si4713_s_ext_ctrls,
.g_ctrl = si4713_g_ctrl,
.s_ctrl = si4713_s_ctrl,
.ioctl = si4713_ioctl,
};
/* si4713_g_modulator - get modulator attributes */
static int si4713_g_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
if (!sdev) {
rval = -ENODEV;
goto exit;
}
if (vm->index > 0) {
rval = -EINVAL;
goto exit;
}
strncpy(vm->name, "FM Modulator", 32);
vm->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LOW |
V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_CONTROLS;
/* Report current frequency range limits */
vm->rangelow = si4713_to_v4l2(FREQ_RANGE_LOW);
vm->rangehigh = si4713_to_v4l2(FREQ_RANGE_HIGH);
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
u32 comp_en = 0;
rval = si4713_read_property(sdev, SI4713_TX_COMPONENT_ENABLE,
&comp_en);
if (rval < 0)
goto unlock;
sdev->stereo = get_status_bit(comp_en, 1, 1 << 1);
sdev->rds_info.enabled = get_status_bit(comp_en, 2, 1 << 2);
}
/* Report current audio mode: mono or stereo */
if (sdev->stereo)
vm->txsubchans = V4L2_TUNER_SUB_STEREO;
else
vm->txsubchans = V4L2_TUNER_SUB_MONO;
/* Report rds feature status */
if (sdev->rds_info.enabled)
vm->txsubchans |= V4L2_TUNER_SUB_RDS;
else
vm->txsubchans &= ~V4L2_TUNER_SUB_RDS;
unlock:
mutex_unlock(&sdev->mutex);
exit:
return rval;
}
/* si4713_s_modulator - set modulator attributes */
static int si4713_s_modulator(struct v4l2_subdev *sd, struct v4l2_modulator *vm)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
u16 stereo, rds;
u32 p;
if (!sdev)
return -ENODEV;
if (vm->index > 0)
return -EINVAL;
/* Set audio mode: mono or stereo */
if (vm->txsubchans & V4L2_TUNER_SUB_STEREO)
stereo = 1;
else if (vm->txsubchans & V4L2_TUNER_SUB_MONO)
stereo = 0;
else
return -EINVAL;
rds = !!(vm->txsubchans & V4L2_TUNER_SUB_RDS);
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
rval = si4713_read_property(sdev,
SI4713_TX_COMPONENT_ENABLE, &p);
if (rval < 0)
goto unlock;
p = set_bits(p, stereo, 1, 1 << 1);
p = set_bits(p, rds, 2, 1 << 2);
rval = si4713_write_property(sdev,
SI4713_TX_COMPONENT_ENABLE, p);
if (rval < 0)
goto unlock;
}
sdev->stereo = stereo;
sdev->rds_info.enabled = rds;
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
/* si4713_g_frequency - get tuner or modulator radio frequency */
static int si4713_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
f->type = V4L2_TUNER_RADIO;
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
u16 freq;
u8 p, a, n;
rval = si4713_tx_tune_status(sdev, 0x00, &freq, &p, &a, &n);
if (rval < 0)
goto unlock;
sdev->frequency = freq;
}
f->frequency = si4713_to_v4l2(sdev->frequency);
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
/* si4713_s_frequency - set tuner or modulator radio frequency */
static int si4713_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f)
{
struct si4713_device *sdev = to_si4713_device(sd);
int rval = 0;
u16 frequency = v4l2_to_si4713(f->frequency);
/* Check frequency range */
if (frequency < FREQ_RANGE_LOW || frequency > FREQ_RANGE_HIGH)
return -EDOM;
mutex_lock(&sdev->mutex);
if (sdev->power_state) {
rval = si4713_tx_tune_freq(sdev, frequency);
if (rval < 0)
goto unlock;
frequency = rval;
rval = 0;
}
sdev->frequency = frequency;
f->frequency = si4713_to_v4l2(frequency);
unlock:
mutex_unlock(&sdev->mutex);
return rval;
}
static const struct v4l2_subdev_tuner_ops si4713_subdev_tuner_ops = {
.g_frequency = si4713_g_frequency,
.s_frequency = si4713_s_frequency,
.g_modulator = si4713_g_modulator,
.s_modulator = si4713_s_modulator,
};
static const struct v4l2_subdev_ops si4713_subdev_ops = {
.core = &si4713_subdev_core_ops,
.tuner = &si4713_subdev_tuner_ops,
};
/*
* I2C driver interface
*/
/* si4713_probe - probe for the device */
static int si4713_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct si4713_device *sdev;
struct si4713_platform_data *pdata = client->dev.platform_data;
int rval, i;
sdev = kzalloc(sizeof *sdev, GFP_KERNEL);
if (!sdev) {
dev_err(&client->dev, "Failed to alloc video device.\n");
rval = -ENOMEM;
goto exit;
}
sdev->gpio_reset = -1;
if (pdata && gpio_is_valid(pdata->gpio_reset)) {
rval = gpio_request(pdata->gpio_reset, "si4713 reset");
if (rval) {
dev_err(&client->dev,
"Failed to request gpio: %d\n", rval);
goto free_sdev;
}
sdev->gpio_reset = pdata->gpio_reset;
gpio_direction_output(sdev->gpio_reset, 0);
}
for (i = 0; i < ARRAY_SIZE(sdev->supplies); i++)
sdev->supplies[i].supply = si4713_supply_names[i];
rval = regulator_bulk_get(&client->dev, ARRAY_SIZE(sdev->supplies),
sdev->supplies);
if (rval) {
dev_err(&client->dev, "Cannot get regulators: %d\n", rval);
goto free_gpio;
}
v4l2_i2c_subdev_init(&sdev->sd, client, &si4713_subdev_ops);
mutex_init(&sdev->mutex);
init_completion(&sdev->work);
if (client->irq) {
rval = request_irq(client->irq,
si4713_handler, IRQF_TRIGGER_FALLING | IRQF_DISABLED,
client->name, sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Could not request IRQ\n");
goto put_reg;
}
v4l2_dbg(1, debug, &sdev->sd, "IRQ requested.\n");
} else {
v4l2_warn(&sdev->sd, "IRQ not configured. Using timeouts.\n");
}
rval = si4713_initialize(sdev);
if (rval < 0) {
v4l2_err(&sdev->sd, "Failed to probe device information.\n");
goto free_irq;
}
return 0;
free_irq:
if (client->irq)
free_irq(client->irq, sdev);
put_reg:
regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
free_gpio:
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
free_sdev:
kfree(sdev);
exit:
return rval;
}
/* si4713_remove - remove the device */
static int si4713_remove(struct i2c_client *client)
{
struct v4l2_subdev *sd = i2c_get_clientdata(client);
struct si4713_device *sdev = to_si4713_device(sd);
if (sdev->power_state)
si4713_set_power_state(sdev, POWER_DOWN);
if (client->irq > 0)
free_irq(client->irq, sdev);
v4l2_device_unregister_subdev(sd);
regulator_bulk_free(ARRAY_SIZE(sdev->supplies), sdev->supplies);
if (gpio_is_valid(sdev->gpio_reset))
gpio_free(sdev->gpio_reset);
kfree(sdev);
return 0;
}
/* si4713_i2c_driver - i2c driver interface */
static const struct i2c_device_id si4713_id[] = {
{ "si4713" , 0 },
{ },
};
MODULE_DEVICE_TABLE(i2c, si4713_id);
static struct i2c_driver si4713_i2c_driver = {
.driver = {
.name = "si4713",
},
.probe = si4713_probe,
.remove = si4713_remove,
.id_table = si4713_id,
};
/* Module Interface */
static int __init si4713_module_init(void)
{
return i2c_add_driver(&si4713_i2c_driver);
}
static void __exit si4713_module_exit(void)
{
i2c_del_driver(&si4713_i2c_driver);
}
module_init(si4713_module_init);
module_exit(si4713_module_exit);
| gpl-2.0 |
wang701/nexus_9_flounder_kernel_src | drivers/scsi/fdomain.c | 3668 | 57128 | /* fdomain.c -- Future Domain TMC-16x0 SCSI driver
* Created: Sun May 3 18:53:19 1992 by faith@cs.unc.edu
* Revised: Mon Dec 28 21:59:02 1998 by faith@acm.org
* Author: Rickard E. Faith, faith@cs.unc.edu
* Copyright 1992-1996, 1998 Rickard E. Faith (faith@acm.org)
* Shared IRQ supported added 7/7/2001 Alan Cox <alan@lxorguk.ukuu.org.uk>
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
**************************************************************************
SUMMARY:
Future Domain BIOS versions supported for autodetect:
2.0, 3.0, 3.2, 3.4 (1.0), 3.5 (2.0), 3.6, 3.61
Chips are supported:
TMC-1800, TMC-18C50, TMC-18C30, TMC-36C70
Boards supported:
Future Domain TMC-1650, TMC-1660, TMC-1670, TMC-1680, TMC-1610M/MER/MEX
Future Domain TMC-3260 (PCI)
Quantum ISA-200S, ISA-250MG
Adaptec AHA-2920A (PCI) [BUT *NOT* AHA-2920C -- use aic7xxx instead]
IBM ?
LILO/INSMOD command-line options:
fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]
NOTE:
The Adaptec AHA-2920C has an Adaptec AIC-7850 chip on it.
Use the aic7xxx driver for this board.
The Adaptec AHA-2920A has a Future Domain chip on it, so this is the right
driver for that card. Unfortunately, the boxes will probably just say
"2920", so you'll have to look on the card for a Future Domain logo, or a
letter after the 2920.
THANKS:
Thanks to Adaptec for providing PCI boards for testing. This finally
enabled me to test the PCI detection and correct it for PCI boards that do
not have a BIOS at a standard ISA location. For PCI boards, LILO/INSMOD
command-line options should no longer be needed. --RF 18Nov98
DESCRIPTION:
This is the Linux low-level SCSI driver for Future Domain TMC-1660/1680
TMC-1650/1670, and TMC-3260 SCSI host adapters. The 1650 and 1670 have a
25-pin external connector, whereas the 1660 and 1680 have a SCSI-2 50-pin
high-density external connector. The 1670 and 1680 have floppy disk
controllers built in. The TMC-3260 is a PCI bus card.
Future Domain's older boards are based on the TMC-1800 chip, and this
driver was originally written for a TMC-1680 board with the TMC-1800 chip.
More recently, boards are being produced with the TMC-18C50 and TMC-18C30
chips. The latest and greatest board may not work with this driver. If
you have to patch this driver so that it will recognize your board's BIOS
signature, then the driver may fail to function after the board is
detected.
Please note that the drive ordering that Future Domain implemented in BIOS
versions 3.4 and 3.5 is the opposite of the order (currently) used by the
rest of the SCSI industry. If you have BIOS version 3.4 or 3.5, and have
more than one drive, then the drive ordering will be the reverse of that
which you see under DOS. For example, under DOS SCSI ID 0 will be D: and
SCSI ID 1 will be C: (the boot device). Under Linux, SCSI ID 0 will be
/dev/sda and SCSI ID 1 will be /dev/sdb. The Linux ordering is consistent
with that provided by all the other SCSI drivers for Linux. If you want
this changed, you will probably have to patch the higher level SCSI code.
If you do so, please send me patches that are protected by #ifdefs.
If you have a TMC-8xx or TMC-9xx board, then this is not the driver for
your board. Please refer to the Seagate driver for more information and
possible support.
HISTORY:
Linux Driver Driver
Version Version Date Support/Notes
0.0 3 May 1992 V2.0 BIOS; 1800 chip
0.97 1.9 28 Jul 1992
0.98.6 3.1 27 Nov 1992
0.99 3.2 9 Dec 1992
0.99.3 3.3 10 Jan 1993 V3.0 BIOS
0.99.5 3.5 18 Feb 1993
0.99.10 3.6 15 May 1993 V3.2 BIOS; 18C50 chip
0.99.11 3.17 3 Jul 1993 (now under RCS)
0.99.12 3.18 13 Aug 1993
0.99.14 5.6 31 Oct 1993 (reselection code removed)
0.99.15 5.9 23 Jan 1994 V3.4 BIOS (preliminary)
1.0.8/1.1.1 5.15 1 Apr 1994 V3.4 BIOS; 18C30 chip (preliminary)
1.0.9/1.1.3 5.16 7 Apr 1994 V3.4 BIOS; 18C30 chip
1.1.38 5.18 30 Jul 1994 36C70 chip (PCI version of 18C30)
1.1.62 5.20 2 Nov 1994 V3.5 BIOS
1.1.73 5.22 7 Dec 1994 Quantum ISA-200S board; V2.0 BIOS
1.1.82 5.26 14 Jan 1995 V3.5 BIOS; TMC-1610M/MER/MEX board
1.2.10 5.28 5 Jun 1995 Quantum ISA-250MG board; V2.0, V2.01 BIOS
1.3.4 5.31 23 Jun 1995 PCI BIOS-32 detection (preliminary)
1.3.7 5.33 4 Jul 1995 PCI BIOS-32 detection
1.3.28 5.36 17 Sep 1995 V3.61 BIOS; LILO command-line support
1.3.34 5.39 12 Oct 1995 V3.60 BIOS; /proc
1.3.72 5.39 8 Feb 1996 Adaptec AHA-2920 board
1.3.85 5.41 4 Apr 1996
2.0.12 5.44 8 Aug 1996 Use ID 7 for all PCI cards
2.1.1 5.45 2 Oct 1996 Update ROM accesses for 2.1.x
2.1.97 5.46 23 Apr 1998 Rewritten PCI detection routines [mj]
2.1.11x 5.47 9 Aug 1998 Touched for 8 SCSI disk majors support
5.48 18 Nov 1998 BIOS no longer needed for PCI detection
2.2.0 5.50 28 Dec 1998 Support insmod parameters
REFERENCES USED:
"TMC-1800 SCSI Chip Specification (FDC-1800T)", Future Domain Corporation,
1990.
"Technical Reference Manual: 18C50 SCSI Host Adapter Chip", Future Domain
Corporation, January 1992.
"LXT SCSI Products: Specifications and OEM Technical Manual (Revision
B/September 1991)", Maxtor Corporation, 1991.
"7213S product Manual (Revision P3)", Maxtor Corporation, 1992.
"Draft Proposed American National Standard: Small Computer System
Interface - 2 (SCSI-2)", Global Engineering Documents. (X3T9.2/86-109,
revision 10h, October 17, 1991)
Private communications, Drew Eckhardt (drew@cs.colorado.edu) and Eric
Youngdale (ericy@cais.com), 1992.
Private communication, Tuong Le (Future Domain Engineering department),
1994. (Disk geometry computations for Future Domain BIOS version 3.4, and
TMC-18C30 detection.)
Hogan, Thom. The Programmer's PC Sourcebook. Microsoft Press, 1988. Page
60 (2.39: Disk Partition Table Layout).
"18C30 Technical Reference Manual", Future Domain Corporation, 1993, page
6-1.
NOTES ON REFERENCES:
The Maxtor manuals were free. Maxtor telephone technical support is
great!
The Future Domain manuals were $25 and $35. They document the chip, not
the TMC-16x0 boards, so some information I had to guess at. In 1992,
Future Domain sold DOS BIOS source for $250 and the UN*X driver source was
$750, but these required a non-disclosure agreement, so even if I could
have afforded them, they would *not* have been useful for writing this
publicly distributable driver. Future Domain technical support has
provided some information on the phone and have sent a few useful FAXs.
They have been much more helpful since they started to recognize that the
word "Linux" refers to an operating system :-).
ALPHA TESTERS:
There are many other alpha testers that come and go as the driver
develops. The people listed here were most helpful in times of greatest
need (mostly early on -- I've probably left out a few worthy people in
more recent times):
Todd Carrico (todd@wutc.wustl.edu), Dan Poirier (poirier@cs.unc.edu ), Ken
Corey (kenc@sol.acs.unt.edu), C. de Bruin (bruin@bruin@sterbbs.nl), Sakari
Aaltonen (sakaria@vipunen.hit.fi), John Rice (rice@xanth.cs.odu.edu), Brad
Yearwood (brad@optilink.com), and Ray Toy (toy@soho.crd.ge.com).
Special thanks to Tien-Wan Yang (twyang@cs.uh.edu), who graciously lent me
his 18C50-based card for debugging. He is the sole reason that this
driver works with the 18C50 chip.
Thanks to Dave Newman (dnewman@crl.com) for providing initial patches for
the version 3.4 BIOS.
Thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for providing
patches that support the TMC-3260, a PCI bus card with the 36C70 chip.
The 36C70 chip appears to be "completely compatible" with the 18C30 chip.
Thanks to Eric Kasten (tigger@petroglyph.cl.msu.edu) for providing the
patch for the version 3.5 BIOS.
Thanks for Stephen Henson (shenson@nyx10.cs.du.edu) for providing the
patch for the Quantum ISA-200S SCSI adapter.
Thanks to Adam Bowen for the signature to the 1610M/MER/MEX scsi cards, to
Martin Andrews (andrewm@ccfadm.eeg.ccf.org) for the signature to some
random TMC-1680 repackaged by IBM; and to Mintak Ng (mintak@panix.com) for
the version 3.61 BIOS signature.
Thanks for Mark Singer (elf@netcom.com) and Richard Simpson
(rsimpson@ewrcsdra.demon.co.uk) for more Quantum signatures and detective
work on the Quantum RAM layout.
Special thanks to James T. McKinley (mckinley@msupa.pa.msu.edu) for
providing patches for proper PCI BIOS32-mediated detection of the TMC-3260
card (a PCI bus card with the 36C70 chip). Please send James PCI-related
bug reports.
Thanks to Tom Cavin (tec@usa1.com) for preliminary command-line option
patches.
New PCI detection code written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>
Insmod parameter code based on patches from Daniel Graham
<graham@balance.uoregon.edu>.
All of the alpha testers deserve much thanks.
NOTES ON USER DEFINABLE OPTIONS:
DEBUG: This turns on the printing of various debug information.
ENABLE_PARITY: This turns on SCSI parity checking. With the current
driver, all attached devices must support SCSI parity. If none of your
devices support parity, then you can probably get the driver to work by
turning this option off. I have no way of testing this, however, and it
would appear that no one ever uses this option.
FIFO_COUNT: The host adapter has an 8K cache (host adapters based on the
18C30 chip have a 2k cache). When this many 512 byte blocks are filled by
the SCSI device, an interrupt will be raised. Therefore, this could be as
low as 0, or as high as 16. Note, however, that values which are too high
or too low seem to prevent any interrupts from occurring, and thereby lock
up the machine. I have found that 2 is a good number, but throughput may
be increased by changing this value to values which are close to 2.
Please let me know if you try any different values.
RESELECTION: This is no longer an option, since I gave up trying to
implement it in version 4.x of this driver. It did not improve
performance at all and made the driver unstable (because I never found one
of the two race conditions which were introduced by the multiple
outstanding command code). The instability seems a very high price to pay
just so that you don't have to wait for the tape to rewind. If you want
this feature implemented, send me patches. I'll be happy to send a copy
of my (broken) driver to anyone who would like to see a copy.
**************************************************************************/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/proc_fs.h>
#include <linux/pci.h>
#include <linux/stat.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <scsi/scsicam.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include "fdomain.h"
#ifndef PCMCIA
MODULE_AUTHOR("Rickard E. Faith");
MODULE_DESCRIPTION("Future domain SCSI driver");
MODULE_LICENSE("GPL");
#endif
#define VERSION "$Revision: 5.51 $"
/* START OF USER DEFINABLE OPTIONS */
#define DEBUG 0 /* Enable debugging output */
#define ENABLE_PARITY 1 /* Enable SCSI Parity */
#define FIFO_COUNT 2 /* Number of 512 byte blocks before INTR */
/* END OF USER DEFINABLE OPTIONS */
#if DEBUG
#define EVERY_ACCESS 0 /* Write a line on every scsi access */
#define ERRORS_ONLY 1 /* Only write a line if there is an error */
#define DEBUG_DETECT 0 /* Debug fdomain_16x0_detect() */
#define DEBUG_MESSAGES 1 /* Debug MESSAGE IN phase */
#define DEBUG_ABORT 1 /* Debug abort() routine */
#define DEBUG_RESET 1 /* Debug reset() routine */
#define DEBUG_RACE 1 /* Debug interrupt-driven race condition */
#else
#define EVERY_ACCESS 0 /* LEAVE THESE ALONE--CHANGE THE ONES ABOVE */
#define ERRORS_ONLY 0
#define DEBUG_DETECT 0
#define DEBUG_MESSAGES 0
#define DEBUG_ABORT 0
#define DEBUG_RESET 0
#define DEBUG_RACE 0
#endif
/* Errors are reported on the line, so we don't need to report them again */
#if EVERY_ACCESS
#undef ERRORS_ONLY
#define ERRORS_ONLY 0
#endif
#if ENABLE_PARITY
#define PARITY_MASK 0x08
#else
#define PARITY_MASK 0x00
#endif
enum chip_type {
unknown = 0x00,
tmc1800 = 0x01,
tmc18c50 = 0x02,
tmc18c30 = 0x03,
};
enum {
in_arbitration = 0x02,
in_selection = 0x04,
in_other = 0x08,
disconnect = 0x10,
aborted = 0x20,
sent_ident = 0x40,
};
enum in_port_type {
Read_SCSI_Data = 0,
SCSI_Status = 1,
TMC_Status = 2,
FIFO_Status = 3, /* tmc18c50/tmc18c30 only */
Interrupt_Cond = 4, /* tmc18c50/tmc18c30 only */
LSB_ID_Code = 5,
MSB_ID_Code = 6,
Read_Loopback = 7,
SCSI_Data_NoACK = 8,
Interrupt_Status = 9,
Configuration1 = 10,
Configuration2 = 11, /* tmc18c50/tmc18c30 only */
Read_FIFO = 12,
FIFO_Data_Count = 14
};
enum out_port_type {
Write_SCSI_Data = 0,
SCSI_Cntl = 1,
Interrupt_Cntl = 2,
SCSI_Mode_Cntl = 3,
TMC_Cntl = 4,
Memory_Cntl = 5, /* tmc18c50/tmc18c30 only */
Write_Loopback = 7,
IO_Control = 11, /* tmc18c30 only */
Write_FIFO = 12
};
/* .bss will zero all the static variables below */
static int port_base;
static unsigned long bios_base;
static void __iomem * bios_mem;
static int bios_major;
static int bios_minor;
static int PCI_bus;
#ifdef CONFIG_PCI
static struct pci_dev *PCI_dev;
#endif
static int Quantum; /* Quantum board variant */
static int interrupt_level;
static volatile int in_command;
static struct scsi_cmnd *current_SC;
static enum chip_type chip = unknown;
static int adapter_mask;
static int this_id;
static int setup_called;
#if DEBUG_RACE
static volatile int in_interrupt_flag;
#endif
static int FIFO_Size = 0x2000; /* 8k FIFO for
pre-tmc18c30 chips */
static irqreturn_t do_fdomain_16x0_intr( int irq, void *dev_id );
/* Allow insmod parameters to be like LILO parameters. For example:
insmod fdomain fdomain=0x140,11 */
static char * fdomain = NULL;
module_param(fdomain, charp, 0);
#ifndef PCMCIA
static unsigned long addresses[] = {
0xc8000,
0xca000,
0xce000,
0xde000,
0xcc000, /* Extra addresses for PCI boards */
0xd0000,
0xe0000,
};
#define ADDRESS_COUNT ARRAY_SIZE(addresses)
static unsigned short ports[] = { 0x140, 0x150, 0x160, 0x170 };
#define PORT_COUNT ARRAY_SIZE(ports)
static unsigned short ints[] = { 3, 5, 10, 11, 12, 14, 15, 0 };
#endif /* !PCMCIA */
/*
READ THIS BEFORE YOU ADD A SIGNATURE!
READING THIS SHORT NOTE CAN SAVE YOU LOTS OF TIME!
READ EVERY WORD, ESPECIALLY THE WORD *NOT*
This driver works *ONLY* for Future Domain cards using the TMC-1800,
TMC-18C50, or TMC-18C30 chip. This includes models TMC-1650, 1660, 1670,
and 1680. These are all 16-bit cards.
The following BIOS signature signatures are for boards which do *NOT*
work with this driver (these TMC-8xx and TMC-9xx boards may work with the
Seagate driver):
FUTURE DOMAIN CORP. (C) 1986-1988 V4.0I 03/16/88
FUTURE DOMAIN CORP. (C) 1986-1989 V5.0C2/14/89
FUTURE DOMAIN CORP. (C) 1986-1989 V6.0A7/28/89
FUTURE DOMAIN CORP. (C) 1986-1990 V6.0105/31/90
FUTURE DOMAIN CORP. (C) 1986-1990 V6.0209/18/90
FUTURE DOMAIN CORP. (C) 1986-1990 V7.009/18/90
FUTURE DOMAIN CORP. (C) 1992 V8.00.004/02/92
(The cards which do *NOT* work are all 8-bit cards -- although some of
them have a 16-bit form-factor, the upper 8-bits are used only for IRQs
and are *NOT* used for data. You can tell the difference by following
the tracings on the circuit board -- if only the IRQ lines are involved,
you have a "8-bit" card, and should *NOT* use this driver.)
*/
#ifndef PCMCIA
static struct signature {
const char *signature;
int sig_offset;
int sig_length;
int major_bios_version;
int minor_bios_version;
int flag; /* 1 == PCI_bus, 2 == ISA_200S, 3 == ISA_250MG, 4 == ISA_200S */
} signatures[] = {
/* 1 2 3 4 5 6 */
/* 123456789012345678901234567890123456789012345678901234567890 */
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 5, 50, 2, 0, 0 },
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V1.07/28/89", 5, 50, 2, 0, 0 },
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.07/28/89", 72, 50, 2, 0, 2 },
{ "FUTURE DOMAIN CORP. (C) 1986-1990 1800-V2.0", 73, 43, 2, 0, 3 },
{ "FUTURE DOMAIN CORP. (C) 1991 1800-V2.0.", 72, 39, 2, 0, 4 },
{ "FUTURE DOMAIN CORP. (C) 1992 V3.00.004/02/92", 5, 44, 3, 0, 0 },
{ "FUTURE DOMAIN TMC-18XX (C) 1993 V3.203/12/93", 5, 44, 3, 2, 0 },
{ "IBM F1 P2 BIOS v1.0104/29/93", 5, 28, 3, -1, 0 },
{ "Future Domain Corp. V1.0008/18/93", 5, 33, 3, 4, 0 },
{ "Future Domain Corp. V1.0008/18/93", 26, 33, 3, 4, 1 },
{ "Adaptec AHA-2920 PCI-SCSI Card", 42, 31, 3, -1, 1 },
{ "IBM F1 P264/32", 5, 14, 3, -1, 1 },
/* This next signature may not be a 3.5 bios */
{ "Future Domain Corp. V2.0108/18/93", 5, 33, 3, 5, 0 },
{ "FUTURE DOMAIN CORP. V3.5008/18/93", 5, 34, 3, 5, 0 },
{ "FUTURE DOMAIN 18c30/18c50/1800 (C) 1994 V3.5", 5, 44, 3, 5, 0 },
{ "FUTURE DOMAIN CORP. V3.6008/18/93", 5, 34, 3, 6, 0 },
{ "FUTURE DOMAIN CORP. V3.6108/18/93", 5, 34, 3, 6, 0 },
{ "FUTURE DOMAIN TMC-18XX", 5, 22, -1, -1, 0 },
/* READ NOTICE ABOVE *BEFORE* YOU WASTE YOUR TIME ADDING A SIGNATURE
Also, fix the disk geometry code for your signature and send your
changes for faith@cs.unc.edu. Above all, do *NOT* change any old
signatures!
Note that the last line will match a "generic" 18XX bios. Because
Future Domain has changed the host SCSI ID and/or the location of the
geometry information in the on-board RAM area for each of the first
three BIOS's, it is still important to enter a fully qualified
signature in the table for any new BIOS's (after the host SCSI ID and
geometry location are verified). */
};
#define SIGNATURE_COUNT ARRAY_SIZE(signatures)
#endif /* !PCMCIA */
static void print_banner( struct Scsi_Host *shpnt )
{
if (!shpnt) return; /* This won't ever happen */
if (bios_major < 0 && bios_minor < 0) {
printk(KERN_INFO "scsi%d: <fdomain> No BIOS; using scsi id %d\n",
shpnt->host_no, shpnt->this_id);
} else {
printk(KERN_INFO "scsi%d: <fdomain> BIOS version ", shpnt->host_no);
if (bios_major >= 0) printk("%d.", bios_major);
else printk("?.");
if (bios_minor >= 0) printk("%d", bios_minor);
else printk("?.");
printk( " at 0x%lx using scsi id %d\n",
bios_base, shpnt->this_id );
}
/* If this driver works for later FD PCI
boards, we will have to modify banner
for additional PCI cards, but for now if
it's PCI it's a TMC-3260 - JTM */
printk(KERN_INFO "scsi%d: <fdomain> %s chip at 0x%x irq ",
shpnt->host_no,
chip == tmc1800 ? "TMC-1800" : (chip == tmc18c50 ? "TMC-18C50" : (chip == tmc18c30 ? (PCI_bus ? "TMC-36C70 (PCI bus)" : "TMC-18C30") : "Unknown")),
port_base);
if (interrupt_level)
printk("%d", interrupt_level);
else
printk("<none>");
printk( "\n" );
}
int fdomain_setup(char *str)
{
int ints[4];
(void)get_options(str, ARRAY_SIZE(ints), ints);
if (setup_called++ || ints[0] < 2 || ints[0] > 3) {
printk(KERN_INFO "scsi: <fdomain> Usage: fdomain=<PORT_BASE>,<IRQ>[,<ADAPTER_ID>]\n");
printk(KERN_ERR "scsi: <fdomain> Bad LILO/INSMOD parameters?\n");
return 0;
}
port_base = ints[0] >= 1 ? ints[1] : 0;
interrupt_level = ints[0] >= 2 ? ints[2] : 0;
this_id = ints[0] >= 3 ? ints[3] : 0;
bios_major = bios_minor = -1; /* Use geometry for BIOS version >= 3.4 */
++setup_called;
return 1;
}
__setup("fdomain=", fdomain_setup);
static void do_pause(unsigned amount) /* Pause for amount*10 milliseconds */
{
mdelay(10*amount);
}
static inline void fdomain_make_bus_idle( void )
{
outb(0, port_base + SCSI_Cntl);
outb(0, port_base + SCSI_Mode_Cntl);
if (chip == tmc18c50 || chip == tmc18c30)
outb(0x21 | PARITY_MASK, port_base + TMC_Cntl); /* Clear forced intr. */
else
outb(0x01 | PARITY_MASK, port_base + TMC_Cntl);
}
static int fdomain_is_valid_port( int port )
{
#if DEBUG_DETECT
printk( " (%x%x),",
inb( port + MSB_ID_Code ), inb( port + LSB_ID_Code ) );
#endif
/* The MCA ID is a unique id for each MCA compatible board. We
are using ISA boards, but Future Domain provides the MCA ID
anyway. We can use this ID to ensure that this is a Future
Domain TMC-1660/TMC-1680.
*/
if (inb( port + LSB_ID_Code ) != 0xe9) { /* test for 0x6127 id */
if (inb( port + LSB_ID_Code ) != 0x27) return 0;
if (inb( port + MSB_ID_Code ) != 0x61) return 0;
chip = tmc1800;
} else { /* test for 0xe960 id */
if (inb( port + MSB_ID_Code ) != 0x60) return 0;
chip = tmc18c50;
/* Try to toggle 32-bit mode. This only
works on an 18c30 chip. (User reports
say this works, so we should switch to
it in the near future.) */
outb( 0x80, port + IO_Control );
if ((inb( port + Configuration2 ) & 0x80) == 0x80) {
outb( 0x00, port + IO_Control );
if ((inb( port + Configuration2 ) & 0x80) == 0x00) {
chip = tmc18c30;
FIFO_Size = 0x800; /* 2k FIFO */
}
}
/* If that failed, we are an 18c50. */
}
return 1;
}
static int fdomain_test_loopback( void )
{
int i;
int result;
for (i = 0; i < 255; i++) {
outb( i, port_base + Write_Loopback );
result = inb( port_base + Read_Loopback );
if (i != result)
return 1;
}
return 0;
}
#ifndef PCMCIA
/* fdomain_get_irq assumes that we have a valid MCA ID for a
TMC-1660/TMC-1680 Future Domain board. Now, check to be sure the
bios_base matches these ports. If someone was unlucky enough to have
purchased more than one Future Domain board, then they will have to
modify this code, as we only detect one board here. [The one with the
lowest bios_base.]
Note that this routine is only used for systems without a PCI BIOS32
(e.g., ISA bus). For PCI bus systems, this routine will likely fail
unless one of the IRQs listed in the ints array is used by the board.
Sometimes it is possible to use the computer's BIOS setup screen to
configure a PCI system so that one of these IRQs will be used by the
Future Domain card. */
static int fdomain_get_irq( int base )
{
int options = inb(base + Configuration1);
#if DEBUG_DETECT
printk("scsi: <fdomain> Options = %x\n", options);
#endif
/* Check for board with lowest bios_base --
this isn't valid for the 18c30 or for
boards on the PCI bus, so just assume we
have the right board. */
if (chip != tmc18c30 && !PCI_bus && addresses[(options & 0xc0) >> 6 ] != bios_base)
return 0;
return ints[(options & 0x0e) >> 1];
}
static int fdomain_isa_detect( int *irq, int *iobase )
{
int i, j;
int base = 0xdeadbeef;
int flag = 0;
#if DEBUG_DETECT
printk( "scsi: <fdomain> fdomain_isa_detect:" );
#endif
for (i = 0; i < ADDRESS_COUNT; i++) {
void __iomem *p = ioremap(addresses[i], 0x2000);
if (!p)
continue;
#if DEBUG_DETECT
printk( " %lx(%lx),", addresses[i], bios_base );
#endif
for (j = 0; j < SIGNATURE_COUNT; j++) {
if (check_signature(p + signatures[j].sig_offset,
signatures[j].signature,
signatures[j].sig_length )) {
bios_major = signatures[j].major_bios_version;
bios_minor = signatures[j].minor_bios_version;
PCI_bus = (signatures[j].flag == 1);
Quantum = (signatures[j].flag > 1) ? signatures[j].flag : 0;
bios_base = addresses[i];
bios_mem = p;
goto found;
}
}
iounmap(p);
}
found:
if (bios_major == 2) {
/* The TMC-1660/TMC-1680 has a RAM area just after the BIOS ROM.
Assuming the ROM is enabled (otherwise we wouldn't have been
able to read the ROM signature :-), then the ROM sets up the
RAM area with some magic numbers, such as a list of port
base addresses and a list of the disk "geometry" reported to
DOS (this geometry has nothing to do with physical geometry).
*/
switch (Quantum) {
case 2: /* ISA_200S */
case 3: /* ISA_250MG */
base = readb(bios_mem + 0x1fa2) + (readb(bios_mem + 0x1fa3) << 8);
break;
case 4: /* ISA_200S (another one) */
base = readb(bios_mem + 0x1fa3) + (readb(bios_mem + 0x1fa4) << 8);
break;
default:
base = readb(bios_mem + 0x1fcc) + (readb(bios_mem + 0x1fcd) << 8);
break;
}
#if DEBUG_DETECT
printk( " %x,", base );
#endif
for (i = 0; i < PORT_COUNT; i++) {
if (base == ports[i]) {
if (!request_region(base, 0x10, "fdomain"))
break;
if (!fdomain_is_valid_port(base)) {
release_region(base, 0x10);
break;
}
*irq = fdomain_get_irq( base );
*iobase = base;
return 1;
}
}
/* This is a bad sign. It usually means that someone patched the
BIOS signature list (the signatures variable) to contain a BIOS
signature for a board *OTHER THAN* the TMC-1660/TMC-1680. */
#if DEBUG_DETECT
printk( " RAM FAILED, " );
#endif
}
/* Anyway, the alternative to finding the address in the RAM is to just
search through every possible port address for one that is attached
to the Future Domain card. Don't panic, though, about reading all
these random port addresses -- there are rumors that the Future
Domain BIOS does something very similar.
Do not, however, check ports which the kernel knows are being used by
another driver. */
for (i = 0; i < PORT_COUNT; i++) {
base = ports[i];
if (!request_region(base, 0x10, "fdomain")) {
#if DEBUG_DETECT
printk( " (%x inuse),", base );
#endif
continue;
}
#if DEBUG_DETECT
printk( " %x,", base );
#endif
flag = fdomain_is_valid_port(base);
if (flag)
break;
release_region(base, 0x10);
}
#if DEBUG_DETECT
if (flag) printk( " SUCCESS\n" );
else printk( " FAILURE\n" );
#endif
if (!flag) return 0; /* iobase not found */
*irq = fdomain_get_irq( base );
*iobase = base;
return 1; /* success */
}
#else /* PCMCIA */
static int fdomain_isa_detect( int *irq, int *iobase )
{
if (irq)
*irq = 0;
if (iobase)
*iobase = 0;
return 0;
}
#endif /* !PCMCIA */
/* PCI detection function: int fdomain_pci_bios_detect(int* irq, int*
iobase) This function gets the Interrupt Level and I/O base address from
the PCI configuration registers. */
#ifdef CONFIG_PCI
static int fdomain_pci_bios_detect( int *irq, int *iobase, struct pci_dev **ret_pdev )
{
unsigned int pci_irq; /* PCI interrupt line */
unsigned long pci_base; /* PCI I/O base address */
struct pci_dev *pdev = NULL;
#if DEBUG_DETECT
/* Tell how to print a list of the known PCI devices from bios32 and
list vendor and device IDs being used if in debug mode. */
printk( "scsi: <fdomain> INFO: use lspci -v to see list of PCI devices\n" );
printk( "scsi: <fdomain> TMC-3260 detect:"
" Using Vendor ID: 0x%x and Device ID: 0x%x\n",
PCI_VENDOR_ID_FD,
PCI_DEVICE_ID_FD_36C70 );
#endif
if ((pdev = pci_get_device(PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70, pdev)) == NULL)
return 0;
if (pci_enable_device(pdev))
goto fail;
#if DEBUG_DETECT
printk( "scsi: <fdomain> TMC-3260 detect:"
" PCI bus %u, device %u, function %u\n",
pdev->bus->number,
PCI_SLOT(pdev->devfn),
PCI_FUNC(pdev->devfn));
#endif
/* We now have the appropriate device function for the FD board so we
just read the PCI config info from the registers. */
pci_base = pci_resource_start(pdev, 0);
pci_irq = pdev->irq;
if (!request_region( pci_base, 0x10, "fdomain" ))
goto fail;
/* Now we have the I/O base address and interrupt from the PCI
configuration registers. */
*irq = pci_irq;
*iobase = pci_base;
*ret_pdev = pdev;
#if DEBUG_DETECT
printk( "scsi: <fdomain> TMC-3260 detect:"
" IRQ = %d, I/O base = 0x%x [0x%lx]\n", *irq, *iobase, pci_base );
#endif
if (!fdomain_is_valid_port(pci_base)) {
printk(KERN_ERR "scsi: <fdomain> PCI card detected, but driver not loaded (invalid port)\n" );
release_region(pci_base, 0x10);
goto fail;
}
/* Fill in a few global variables. Ugh. */
bios_major = bios_minor = -1;
PCI_bus = 1;
PCI_dev = pdev;
Quantum = 0;
bios_base = 0;
return 1;
fail:
pci_dev_put(pdev);
return 0;
}
#endif
struct Scsi_Host *__fdomain_16x0_detect(struct scsi_host_template *tpnt )
{
int retcode;
struct Scsi_Host *shpnt;
struct pci_dev *pdev = NULL;
if (setup_called) {
#if DEBUG_DETECT
printk( "scsi: <fdomain> No BIOS, using port_base = 0x%x, irq = %d\n",
port_base, interrupt_level );
#endif
if (!request_region(port_base, 0x10, "fdomain")) {
printk( "scsi: <fdomain> port 0x%x is busy\n", port_base );
printk( "scsi: <fdomain> Bad LILO/INSMOD parameters?\n" );
return NULL;
}
if (!fdomain_is_valid_port( port_base )) {
printk( "scsi: <fdomain> Cannot locate chip at port base 0x%x\n",
port_base );
printk( "scsi: <fdomain> Bad LILO/INSMOD parameters?\n" );
release_region(port_base, 0x10);
return NULL;
}
} else {
int flag = 0;
#ifdef CONFIG_PCI
/* Try PCI detection first */
flag = fdomain_pci_bios_detect( &interrupt_level, &port_base, &pdev );
#endif
if (!flag) {
/* Then try ISA bus detection */
flag = fdomain_isa_detect( &interrupt_level, &port_base );
if (!flag) {
printk( "scsi: <fdomain> Detection failed (no card)\n" );
return NULL;
}
}
}
fdomain_16x0_bus_reset(NULL);
if (fdomain_test_loopback()) {
printk(KERN_ERR "scsi: <fdomain> Detection failed (loopback test failed at port base 0x%x)\n", port_base);
if (setup_called) {
printk(KERN_ERR "scsi: <fdomain> Bad LILO/INSMOD parameters?\n");
}
goto fail;
}
if (this_id) {
tpnt->this_id = (this_id & 0x07);
adapter_mask = (1 << tpnt->this_id);
} else {
if (PCI_bus || (bios_major == 3 && bios_minor >= 2) || bios_major < 0) {
tpnt->this_id = 7;
adapter_mask = 0x80;
} else {
tpnt->this_id = 6;
adapter_mask = 0x40;
}
}
/* Print out a banner here in case we can't
get resources. */
shpnt = scsi_register( tpnt, 0 );
if(shpnt == NULL) {
release_region(port_base, 0x10);
return NULL;
}
shpnt->irq = interrupt_level;
shpnt->io_port = port_base;
shpnt->n_io_port = 0x10;
print_banner( shpnt );
/* Log IRQ with kernel */
if (!interrupt_level) {
printk(KERN_ERR "scsi: <fdomain> Card Detected, but driver not loaded (no IRQ)\n" );
goto fail;
} else {
/* Register the IRQ with the kernel */
retcode = request_irq( interrupt_level,
do_fdomain_16x0_intr, pdev?IRQF_SHARED:0, "fdomain", shpnt);
if (retcode < 0) {
if (retcode == -EINVAL) {
printk(KERN_ERR "scsi: <fdomain> IRQ %d is bad!\n", interrupt_level );
printk(KERN_ERR " This shouldn't happen!\n" );
printk(KERN_ERR " Send mail to faith@acm.org\n" );
} else if (retcode == -EBUSY) {
printk(KERN_ERR "scsi: <fdomain> IRQ %d is already in use!\n", interrupt_level );
printk(KERN_ERR " Please use another IRQ!\n" );
} else {
printk(KERN_ERR "scsi: <fdomain> Error getting IRQ %d\n", interrupt_level );
printk(KERN_ERR " This shouldn't happen!\n" );
printk(KERN_ERR " Send mail to faith@acm.org\n" );
}
printk(KERN_ERR "scsi: <fdomain> Detected, but driver not loaded (IRQ)\n" );
goto fail;
}
}
return shpnt;
fail:
pci_dev_put(pdev);
release_region(port_base, 0x10);
return NULL;
}
static int fdomain_16x0_detect(struct scsi_host_template *tpnt)
{
if (fdomain)
fdomain_setup(fdomain);
return (__fdomain_16x0_detect(tpnt) != NULL);
}
static const char *fdomain_16x0_info( struct Scsi_Host *ignore )
{
static char buffer[128];
char *pt;
strcpy( buffer, "Future Domain 16-bit SCSI Driver Version" );
if (strchr( VERSION, ':')) { /* Assume VERSION is an RCS Revision string */
strcat( buffer, strchr( VERSION, ':' ) + 1 );
pt = strrchr( buffer, '$') - 1;
if (!pt) /* Stripped RCS Revision string? */
pt = buffer + strlen( buffer ) - 1;
if (*pt != ' ')
++pt;
*pt = '\0';
} else { /* Assume VERSION is a number */
strcat( buffer, " " VERSION );
}
return buffer;
}
#if 0
static int fdomain_arbitrate( void )
{
int status = 0;
unsigned long timeout;
#if EVERY_ACCESS
printk( "fdomain_arbitrate()\n" );
#endif
outb(0x00, port_base + SCSI_Cntl); /* Disable data drivers */
outb(adapter_mask, port_base + SCSI_Data_NoACK); /* Set our id bit */
outb(0x04 | PARITY_MASK, port_base + TMC_Cntl); /* Start arbitration */
timeout = 500;
do {
status = inb(port_base + TMC_Status); /* Read adapter status */
if (status & 0x02) /* Arbitration complete */
return 0;
mdelay(1); /* Wait one millisecond */
} while (--timeout);
/* Make bus idle */
fdomain_make_bus_idle();
#if EVERY_ACCESS
printk( "Arbitration failed, status = %x\n", status );
#endif
#if ERRORS_ONLY
printk( "scsi: <fdomain> Arbitration failed, status = %x\n", status );
#endif
return 1;
}
#endif
static int fdomain_select( int target )
{
int status;
unsigned long timeout;
#if ERRORS_ONLY
static int flag = 0;
#endif
outb(0x82, port_base + SCSI_Cntl); /* Bus Enable + Select */
outb(adapter_mask | (1 << target), port_base + SCSI_Data_NoACK);
/* Stop arbitration and enable parity */
outb(PARITY_MASK, port_base + TMC_Cntl);
timeout = 350; /* 350 msec */
do {
status = inb(port_base + SCSI_Status); /* Read adapter status */
if (status & 1) { /* Busy asserted */
/* Enable SCSI Bus (on error, should make bus idle with 0) */
outb(0x80, port_base + SCSI_Cntl);
return 0;
}
mdelay(1); /* wait one msec */
} while (--timeout);
/* Make bus idle */
fdomain_make_bus_idle();
#if EVERY_ACCESS
if (!target) printk( "Selection failed\n" );
#endif
#if ERRORS_ONLY
if (!target) {
if (!flag) /* Skip first failure for all chips. */
++flag;
else
printk( "scsi: <fdomain> Selection failed\n" );
}
#endif
return 1;
}
static void my_done(int error)
{
if (in_command) {
in_command = 0;
outb(0x00, port_base + Interrupt_Cntl);
fdomain_make_bus_idle();
current_SC->result = error;
if (current_SC->scsi_done)
current_SC->scsi_done( current_SC );
else panic( "scsi: <fdomain> current_SC->scsi_done() == NULL" );
} else {
panic( "scsi: <fdomain> my_done() called outside of command\n" );
}
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
}
static irqreturn_t do_fdomain_16x0_intr(int irq, void *dev_id)
{
unsigned long flags;
int status;
int done = 0;
unsigned data_count;
/* The fdomain_16x0_intr is only called via
the interrupt handler. The goal of the
sti() here is to allow other
interruptions while this routine is
running. */
/* Check for other IRQ sources */
if ((inb(port_base + TMC_Status) & 0x01) == 0)
return IRQ_NONE;
/* It is our IRQ */
outb(0x00, port_base + Interrupt_Cntl);
/* We usually have one spurious interrupt after each command. Ignore it. */
if (!in_command || !current_SC) { /* Spurious interrupt */
#if EVERY_ACCESS
printk( "Spurious interrupt, in_command = %d, current_SC = %x\n",
in_command, current_SC );
#endif
return IRQ_NONE;
}
/* Abort calls my_done, so we do nothing here. */
if (current_SC->SCp.phase & aborted) {
#if DEBUG_ABORT
printk( "scsi: <fdomain> Interrupt after abort, ignoring\n" );
#endif
/*
return IRQ_HANDLED; */
}
#if DEBUG_RACE
++in_interrupt_flag;
#endif
if (current_SC->SCp.phase & in_arbitration) {
status = inb(port_base + TMC_Status); /* Read adapter status */
if (!(status & 0x02)) {
#if EVERY_ACCESS
printk( " AFAIL " );
#endif
spin_lock_irqsave(current_SC->device->host->host_lock, flags);
my_done( DID_BUS_BUSY << 16 );
spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
return IRQ_HANDLED;
}
current_SC->SCp.phase = in_selection;
outb(0x40 | FIFO_COUNT, port_base + Interrupt_Cntl);
outb(0x82, port_base + SCSI_Cntl); /* Bus Enable + Select */
outb(adapter_mask | (1 << scmd_id(current_SC)), port_base + SCSI_Data_NoACK);
/* Stop arbitration and enable parity */
outb(0x10 | PARITY_MASK, port_base + TMC_Cntl);
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
} else if (current_SC->SCp.phase & in_selection) {
status = inb(port_base + SCSI_Status);
if (!(status & 0x01)) {
/* Try again, for slow devices */
if (fdomain_select( scmd_id(current_SC) )) {
#if EVERY_ACCESS
printk( " SFAIL " );
#endif
spin_lock_irqsave(current_SC->device->host->host_lock, flags);
my_done( DID_NO_CONNECT << 16 );
spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
return IRQ_HANDLED;
} else {
#if EVERY_ACCESS
printk( " AltSel " );
#endif
/* Stop arbitration and enable parity */
outb(0x10 | PARITY_MASK, port_base + TMC_Cntl);
}
}
current_SC->SCp.phase = in_other;
outb(0x90 | FIFO_COUNT, port_base + Interrupt_Cntl);
outb(0x80, port_base + SCSI_Cntl);
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
}
/* current_SC->SCp.phase == in_other: this is the body of the routine */
status = inb(port_base + SCSI_Status);
if (status & 0x10) { /* REQ */
switch (status & 0x0e) {
case 0x08: /* COMMAND OUT */
outb(current_SC->cmnd[current_SC->SCp.sent_command++],
port_base + Write_SCSI_Data);
#if EVERY_ACCESS
printk( "CMD = %x,",
current_SC->cmnd[ current_SC->SCp.sent_command - 1] );
#endif
break;
case 0x00: /* DATA OUT -- tmc18c50/tmc18c30 only */
if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
current_SC->SCp.have_data_in = -1;
outb(0xd0 | PARITY_MASK, port_base + TMC_Cntl);
}
break;
case 0x04: /* DATA IN -- tmc18c50/tmc18c30 only */
if (chip != tmc1800 && !current_SC->SCp.have_data_in) {
current_SC->SCp.have_data_in = 1;
outb(0x90 | PARITY_MASK, port_base + TMC_Cntl);
}
break;
case 0x0c: /* STATUS IN */
current_SC->SCp.Status = inb(port_base + Read_SCSI_Data);
#if EVERY_ACCESS
printk( "Status = %x, ", current_SC->SCp.Status );
#endif
#if ERRORS_ONLY
if (current_SC->SCp.Status
&& current_SC->SCp.Status != 2
&& current_SC->SCp.Status != 8) {
printk( "scsi: <fdomain> target = %d, command = %x, status = %x\n",
current_SC->device->id,
current_SC->cmnd[0],
current_SC->SCp.Status );
}
#endif
break;
case 0x0a: /* MESSAGE OUT */
outb(MESSAGE_REJECT, port_base + Write_SCSI_Data); /* Reject */
break;
case 0x0e: /* MESSAGE IN */
current_SC->SCp.Message = inb(port_base + Read_SCSI_Data);
#if EVERY_ACCESS
printk( "Message = %x, ", current_SC->SCp.Message );
#endif
if (!current_SC->SCp.Message) ++done;
#if DEBUG_MESSAGES || EVERY_ACCESS
if (current_SC->SCp.Message) {
printk( "scsi: <fdomain> message = %x\n",
current_SC->SCp.Message );
}
#endif
break;
}
}
if (chip == tmc1800 && !current_SC->SCp.have_data_in
&& (current_SC->SCp.sent_command >= current_SC->cmd_len)) {
if(current_SC->sc_data_direction == DMA_TO_DEVICE)
{
current_SC->SCp.have_data_in = -1;
outb(0xd0 | PARITY_MASK, port_base + TMC_Cntl);
}
else
{
current_SC->SCp.have_data_in = 1;
outb(0x90 | PARITY_MASK, port_base + TMC_Cntl);
}
}
if (current_SC->SCp.have_data_in == -1) { /* DATA OUT */
while ((data_count = FIFO_Size - inw(port_base + FIFO_Data_Count)) > 512) {
#if EVERY_ACCESS
printk( "DC=%d, ", data_count ) ;
#endif
if (data_count > current_SC->SCp.this_residual)
data_count = current_SC->SCp.this_residual;
if (data_count > 0) {
#if EVERY_ACCESS
printk( "%d OUT, ", data_count );
#endif
if (data_count == 1) {
outb(*current_SC->SCp.ptr++, port_base + Write_FIFO);
--current_SC->SCp.this_residual;
} else {
data_count >>= 1;
outsw(port_base + Write_FIFO, current_SC->SCp.ptr, data_count);
current_SC->SCp.ptr += 2 * data_count;
current_SC->SCp.this_residual -= 2 * data_count;
}
}
if (!current_SC->SCp.this_residual) {
if (current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
} else
break;
}
}
}
if (current_SC->SCp.have_data_in == 1) { /* DATA IN */
while ((data_count = inw(port_base + FIFO_Data_Count)) > 0) {
#if EVERY_ACCESS
printk( "DC=%d, ", data_count );
#endif
if (data_count > current_SC->SCp.this_residual)
data_count = current_SC->SCp.this_residual;
if (data_count) {
#if EVERY_ACCESS
printk( "%d IN, ", data_count );
#endif
if (data_count == 1) {
*current_SC->SCp.ptr++ = inb(port_base + Read_FIFO);
--current_SC->SCp.this_residual;
} else {
data_count >>= 1; /* Number of words */
insw(port_base + Read_FIFO, current_SC->SCp.ptr, data_count);
current_SC->SCp.ptr += 2 * data_count;
current_SC->SCp.this_residual -= 2 * data_count;
}
}
if (!current_SC->SCp.this_residual
&& current_SC->SCp.buffers_residual) {
--current_SC->SCp.buffers_residual;
++current_SC->SCp.buffer;
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
}
}
}
if (done) {
#if EVERY_ACCESS
printk( " ** IN DONE %d ** ", current_SC->SCp.have_data_in );
#endif
#if ERRORS_ONLY
if (current_SC->cmnd[0] == REQUEST_SENSE && !current_SC->SCp.Status) {
char *buf = scsi_sglist(current_SC);
if ((unsigned char)(*(buf + 2)) & 0x0f) {
unsigned char key;
unsigned char code;
unsigned char qualifier;
key = (unsigned char)(*(buf + 2)) & 0x0f;
code = (unsigned char)(*(buf + 12));
qualifier = (unsigned char)(*(buf + 13));
if (key != UNIT_ATTENTION
&& !(key == NOT_READY
&& code == 0x04
&& (!qualifier || qualifier == 0x02 || qualifier == 0x01))
&& !(key == ILLEGAL_REQUEST && (code == 0x25
|| code == 0x24
|| !code)))
printk( "scsi: <fdomain> REQUEST SENSE"
" Key = %x, Code = %x, Qualifier = %x\n",
key, code, qualifier );
}
}
#endif
#if EVERY_ACCESS
printk( "BEFORE MY_DONE. . ." );
#endif
spin_lock_irqsave(current_SC->device->host->host_lock, flags);
my_done( (current_SC->SCp.Status & 0xff)
| ((current_SC->SCp.Message & 0xff) << 8) | (DID_OK << 16) );
spin_unlock_irqrestore(current_SC->device->host->host_lock, flags);
#if EVERY_ACCESS
printk( "RETURNING.\n" );
#endif
} else {
if (current_SC->SCp.phase & disconnect) {
outb(0xd0 | FIFO_COUNT, port_base + Interrupt_Cntl);
outb(0x00, port_base + SCSI_Cntl);
} else {
outb(0x90 | FIFO_COUNT, port_base + Interrupt_Cntl);
}
}
#if DEBUG_RACE
in_interrupt_flag = 0;
#endif
return IRQ_HANDLED;
}
static int fdomain_16x0_queue_lck(struct scsi_cmnd *SCpnt,
void (*done)(struct scsi_cmnd *))
{
if (in_command) {
panic( "scsi: <fdomain> fdomain_16x0_queue() NOT REENTRANT!\n" );
}
#if EVERY_ACCESS
printk( "queue: target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->target,
*(unsigned char *)SCpnt->cmnd,
scsi_sg_count(SCpnt),
scsi_bufflen(SCpnt));
#endif
fdomain_make_bus_idle();
current_SC = SCpnt; /* Save this for the done function */
current_SC->scsi_done = done;
/* Initialize static data */
if (scsi_sg_count(current_SC)) {
current_SC->SCp.buffer = scsi_sglist(current_SC);
current_SC->SCp.ptr = sg_virt(current_SC->SCp.buffer);
current_SC->SCp.this_residual = current_SC->SCp.buffer->length;
current_SC->SCp.buffers_residual = scsi_sg_count(current_SC) - 1;
} else {
current_SC->SCp.ptr = NULL;
current_SC->SCp.this_residual = 0;
current_SC->SCp.buffer = NULL;
current_SC->SCp.buffers_residual = 0;
}
current_SC->SCp.Status = 0;
current_SC->SCp.Message = 0;
current_SC->SCp.have_data_in = 0;
current_SC->SCp.sent_command = 0;
current_SC->SCp.phase = in_arbitration;
/* Start arbitration */
outb(0x00, port_base + Interrupt_Cntl);
outb(0x00, port_base + SCSI_Cntl); /* Disable data drivers */
outb(adapter_mask, port_base + SCSI_Data_NoACK); /* Set our id bit */
++in_command;
outb(0x20, port_base + Interrupt_Cntl);
outb(0x14 | PARITY_MASK, port_base + TMC_Cntl); /* Start arbitration */
return 0;
}
static DEF_SCSI_QCMD(fdomain_16x0_queue)
#if DEBUG_ABORT
static void print_info(struct scsi_cmnd *SCpnt)
{
unsigned int imr;
unsigned int irr;
unsigned int isr;
if (!SCpnt || !SCpnt->device || !SCpnt->device->host) {
printk(KERN_WARNING "scsi: <fdomain> Cannot provide detailed information\n");
return;
}
printk(KERN_INFO "%s\n", fdomain_16x0_info( SCpnt->device->host ) );
print_banner(SCpnt->device->host);
switch (SCpnt->SCp.phase) {
case in_arbitration: printk("arbitration"); break;
case in_selection: printk("selection"); break;
case in_other: printk("other"); break;
default: printk("unknown"); break;
}
printk( " (%d), target = %d cmnd = 0x%02x pieces = %d size = %u\n",
SCpnt->SCp.phase,
SCpnt->device->id,
*(unsigned char *)SCpnt->cmnd,
scsi_sg_count(SCpnt),
scsi_bufflen(SCpnt));
printk( "sent_command = %d, have_data_in = %d, timeout = %d\n",
SCpnt->SCp.sent_command,
SCpnt->SCp.have_data_in,
SCpnt->timeout );
#if DEBUG_RACE
printk( "in_interrupt_flag = %d\n", in_interrupt_flag );
#endif
imr = (inb( 0x0a1 ) << 8) + inb( 0x21 );
outb( 0x0a, 0xa0 );
irr = inb( 0xa0 ) << 8;
outb( 0x0a, 0x20 );
irr += inb( 0x20 );
outb( 0x0b, 0xa0 );
isr = inb( 0xa0 ) << 8;
outb( 0x0b, 0x20 );
isr += inb( 0x20 );
/* Print out interesting information */
printk( "IMR = 0x%04x", imr );
if (imr & (1 << interrupt_level))
printk( " (masked)" );
printk( ", IRR = 0x%04x, ISR = 0x%04x\n", irr, isr );
printk( "SCSI Status = 0x%02x\n", inb(port_base + SCSI_Status));
printk( "TMC Status = 0x%02x", inb(port_base + TMC_Status));
if (inb((port_base + TMC_Status) & 1))
printk( " (interrupt)" );
printk( "\n" );
printk("Interrupt Status = 0x%02x", inb(port_base + Interrupt_Status));
if (inb(port_base + Interrupt_Status) & 0x08)
printk( " (enabled)" );
printk( "\n" );
if (chip == tmc18c50 || chip == tmc18c30) {
printk("FIFO Status = 0x%02x\n", inb(port_base + FIFO_Status));
printk( "Int. Condition = 0x%02x\n",
inb( port_base + Interrupt_Cond ) );
}
printk( "Configuration 1 = 0x%02x\n", inb( port_base + Configuration1 ) );
if (chip == tmc18c50 || chip == tmc18c30)
printk( "Configuration 2 = 0x%02x\n",
inb( port_base + Configuration2 ) );
}
#endif
static int fdomain_16x0_abort(struct scsi_cmnd *SCpnt)
{
#if EVERY_ACCESS || ERRORS_ONLY || DEBUG_ABORT
printk( "scsi: <fdomain> abort " );
#endif
if (!in_command) {
#if EVERY_ACCESS || ERRORS_ONLY
printk( " (not in command)\n" );
#endif
return FAILED;
} else printk( "\n" );
#if DEBUG_ABORT
print_info( SCpnt );
#endif
fdomain_make_bus_idle();
current_SC->SCp.phase |= aborted;
current_SC->result = DID_ABORT << 16;
/* Aborts are not done well. . . */
my_done(DID_ABORT << 16);
return SUCCESS;
}
int fdomain_16x0_bus_reset(struct scsi_cmnd *SCpnt)
{
unsigned long flags;
local_irq_save(flags);
outb(1, port_base + SCSI_Cntl);
do_pause( 2 );
outb(0, port_base + SCSI_Cntl);
do_pause( 115 );
outb(0, port_base + SCSI_Mode_Cntl);
outb(PARITY_MASK, port_base + TMC_Cntl);
local_irq_restore(flags);
return SUCCESS;
}
static int fdomain_16x0_biosparam(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int *info_array)
{
int drive;
int size = capacity;
unsigned long offset;
struct drive_info {
unsigned short cylinders;
unsigned char heads;
unsigned char sectors;
} i;
/* NOTES:
The RAM area starts at 0x1f00 from the bios_base address.
For BIOS Version 2.0:
The drive parameter table seems to start at 0x1f30.
The first byte's purpose is not known.
Next is the cylinder, head, and sector information.
The last 4 bytes appear to be the drive's size in sectors.
The other bytes in the drive parameter table are unknown.
If anyone figures them out, please send me mail, and I will
update these notes.
Tape drives do not get placed in this table.
There is another table at 0x1fea:
If the byte is 0x01, then the SCSI ID is not in use.
If the byte is 0x18 or 0x48, then the SCSI ID is in use,
although tapes don't seem to be in this table. I haven't
seen any other numbers (in a limited sample).
0x1f2d is a drive count (i.e., not including tapes)
The table at 0x1fcc are I/O ports addresses for the various
operations. I calculate these by hand in this driver code.
For the ISA-200S version of BIOS Version 2.0:
The drive parameter table starts at 0x1f33.
WARNING: Assume that the table entry is 25 bytes long. Someone needs
to check this for the Quantum ISA-200S card.
For BIOS Version 3.2:
The drive parameter table starts at 0x1f70. Each entry is
0x0a bytes long. Heads are one less than we need to report.
*/
if (MAJOR(bdev->bd_dev) != SCSI_DISK0_MAJOR) {
printk("scsi: <fdomain> fdomain_16x0_biosparam: too many disks");
return 0;
}
drive = MINOR(bdev->bd_dev) >> 4;
if (bios_major == 2) {
switch (Quantum) {
case 2: /* ISA_200S */
/* The value of 25 has never been verified.
It should probably be 15. */
offset = 0x1f33 + drive * 25;
break;
case 3: /* ISA_250MG */
offset = 0x1f36 + drive * 15;
break;
case 4: /* ISA_200S (another one) */
offset = 0x1f34 + drive * 15;
break;
default:
offset = 0x1f31 + drive * 25;
break;
}
memcpy_fromio( &i, bios_mem + offset, sizeof( struct drive_info ) );
info_array[0] = i.heads;
info_array[1] = i.sectors;
info_array[2] = i.cylinders;
} else if (bios_major == 3
&& bios_minor >= 0
&& bios_minor < 4) { /* 3.0 and 3.2 BIOS */
memcpy_fromio( &i, bios_mem + 0x1f71 + drive * 10,
sizeof( struct drive_info ) );
info_array[0] = i.heads + 1;
info_array[1] = i.sectors;
info_array[2] = i.cylinders;
} else { /* 3.4 BIOS (and up?) */
/* This algorithm was provided by Future Domain (much thanks!). */
unsigned char *p = scsi_bios_ptable(bdev);
if (p && p[65] == 0xaa && p[64] == 0x55 /* Partition table valid */
&& p[4]) { /* Partition type */
/* The partition table layout is as follows:
Start: 0x1b3h
Offset: 0 = partition status
1 = starting head
2 = starting sector and cylinder (word, encoded)
4 = partition type
5 = ending head
6 = ending sector and cylinder (word, encoded)
8 = starting absolute sector (double word)
c = number of sectors (double word)
Signature: 0x1fe = 0x55aa
So, this algorithm assumes:
1) the first partition table is in use,
2) the data in the first entry is correct, and
3) partitions never divide cylinders
Note that (1) may be FALSE for NetBSD (and other BSD flavors),
as well as for Linux. Note also, that Linux doesn't pay any
attention to the fields that are used by this algorithm -- it
only uses the absolute sector data. Recent versions of Linux's
fdisk(1) will fill this data in correctly, and forthcoming
versions will check for consistency.
Checking for a non-zero partition type is not part of the
Future Domain algorithm, but it seemed to be a reasonable thing
to do, especially in the Linux and BSD worlds. */
info_array[0] = p[5] + 1; /* heads */
info_array[1] = p[6] & 0x3f; /* sectors */
} else {
/* Note that this new method guarantees that there will always be
less than 1024 cylinders on a platter. This is good for drives
up to approximately 7.85GB (where 1GB = 1024 * 1024 kB). */
if ((unsigned int)size >= 0x7e0000U) {
info_array[0] = 0xff; /* heads = 255 */
info_array[1] = 0x3f; /* sectors = 63 */
} else if ((unsigned int)size >= 0x200000U) {
info_array[0] = 0x80; /* heads = 128 */
info_array[1] = 0x3f; /* sectors = 63 */
} else {
info_array[0] = 0x40; /* heads = 64 */
info_array[1] = 0x20; /* sectors = 32 */
}
}
/* For both methods, compute the cylinders */
info_array[2] = (unsigned int)size / (info_array[0] * info_array[1] );
kfree(p);
}
return 0;
}
static int fdomain_16x0_release(struct Scsi_Host *shpnt)
{
if (shpnt->irq)
free_irq(shpnt->irq, shpnt);
if (shpnt->io_port && shpnt->n_io_port)
release_region(shpnt->io_port, shpnt->n_io_port);
if (PCI_bus)
pci_dev_put(PCI_dev);
return 0;
}
struct scsi_host_template fdomain_driver_template = {
.module = THIS_MODULE,
.name = "fdomain",
.proc_name = "fdomain",
.detect = fdomain_16x0_detect,
.info = fdomain_16x0_info,
.queuecommand = fdomain_16x0_queue,
.eh_abort_handler = fdomain_16x0_abort,
.eh_bus_reset_handler = fdomain_16x0_bus_reset,
.bios_param = fdomain_16x0_biosparam,
.release = fdomain_16x0_release,
.can_queue = 1,
.this_id = 6,
.sg_tablesize = 64,
.cmd_per_lun = 1,
.use_clustering = DISABLE_CLUSTERING,
};
#ifndef PCMCIA
#ifdef CONFIG_PCI
static struct pci_device_id fdomain_pci_tbl[] = {
{ PCI_VENDOR_ID_FD, PCI_DEVICE_ID_FD_36C70,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
{ }
};
MODULE_DEVICE_TABLE(pci, fdomain_pci_tbl);
#endif
#define driver_template fdomain_driver_template
#include "scsi_module.c"
#endif
| gpl-2.0 |
TheEdge-/Leaping_kernel | drivers/media/video/s5p-fimc/mipi-csis.c | 4692 | 18852 | /*
* Samsung S5P/EXYNOS4 SoC series MIPI-CSI receiver driver
*
* Copyright (C) 2011 - 2012 Samsung Electronics Co., Ltd.
* Sylwester Nawrocki, <s.nawrocki@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/memory.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/videodev2.h>
#include <media/v4l2-subdev.h>
#include <plat/mipi_csis.h>
#include "mipi-csis.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Debug level (0-1)");
/* Register map definition */
/* CSIS global control */
#define S5PCSIS_CTRL 0x00
#define S5PCSIS_CTRL_DPDN_DEFAULT (0 << 31)
#define S5PCSIS_CTRL_DPDN_SWAP (1 << 31)
#define S5PCSIS_CTRL_ALIGN_32BIT (1 << 20)
#define S5PCSIS_CTRL_UPDATE_SHADOW (1 << 16)
#define S5PCSIS_CTRL_WCLK_EXTCLK (1 << 8)
#define S5PCSIS_CTRL_RESET (1 << 4)
#define S5PCSIS_CTRL_ENABLE (1 << 0)
/* D-PHY control */
#define S5PCSIS_DPHYCTRL 0x04
#define S5PCSIS_DPHYCTRL_HSS_MASK (0x1f << 27)
#define S5PCSIS_DPHYCTRL_ENABLE (0x1f << 0)
#define S5PCSIS_CONFIG 0x08
#define S5PCSIS_CFG_FMT_YCBCR422_8BIT (0x1e << 2)
#define S5PCSIS_CFG_FMT_RAW8 (0x2a << 2)
#define S5PCSIS_CFG_FMT_RAW10 (0x2b << 2)
#define S5PCSIS_CFG_FMT_RAW12 (0x2c << 2)
/* User defined formats, x = 1...4 */
#define S5PCSIS_CFG_FMT_USER(x) ((0x30 + x - 1) << 2)
#define S5PCSIS_CFG_FMT_MASK (0x3f << 2)
#define S5PCSIS_CFG_NR_LANE_MASK 3
/* Interrupt mask. */
#define S5PCSIS_INTMSK 0x10
#define S5PCSIS_INTMSK_EN_ALL 0xf000003f
#define S5PCSIS_INTSRC 0x14
/* Pixel resolution */
#define S5PCSIS_RESOL 0x2c
#define CSIS_MAX_PIX_WIDTH 0xffff
#define CSIS_MAX_PIX_HEIGHT 0xffff
enum {
CSIS_CLK_MUX,
CSIS_CLK_GATE,
};
static char *csi_clock_name[] = {
[CSIS_CLK_MUX] = "sclk_csis",
[CSIS_CLK_GATE] = "csis",
};
#define NUM_CSIS_CLOCKS ARRAY_SIZE(csi_clock_name)
static const char * const csis_supply_name[] = {
"vdd11", /* 1.1V or 1.2V (s5pc100) MIPI CSI suppply */
"vdd18", /* VDD 1.8V and MIPI CSI PLL supply */
};
#define CSIS_NUM_SUPPLIES ARRAY_SIZE(csis_supply_name)
enum {
ST_POWERED = 1,
ST_STREAMING = 2,
ST_SUSPENDED = 4,
};
/**
* struct csis_state - the driver's internal state data structure
* @lock: mutex serializing the subdev and power management operations,
* protecting @format and @flags members
* @pads: CSIS pads array
* @sd: v4l2_subdev associated with CSIS device instance
* @pdev: CSIS platform device
* @regs: mmaped I/O registers memory
* @clock: CSIS clocks
* @irq: requested s5p-mipi-csis irq number
* @flags: the state variable for power and streaming control
* @csis_fmt: current CSIS pixel format
* @format: common media bus format for the source and sink pad
*/
struct csis_state {
struct mutex lock;
struct media_pad pads[CSIS_PADS_NUM];
struct v4l2_subdev sd;
struct platform_device *pdev;
void __iomem *regs;
struct regulator_bulk_data supplies[CSIS_NUM_SUPPLIES];
struct clk *clock[NUM_CSIS_CLOCKS];
int irq;
u32 flags;
const struct csis_pix_format *csis_fmt;
struct v4l2_mbus_framefmt format;
};
/**
* struct csis_pix_format - CSIS pixel format description
* @pix_width_alignment: horizontal pixel alignment, width will be
* multiple of 2^pix_width_alignment
* @code: corresponding media bus code
* @fmt_reg: S5PCSIS_CONFIG register value
*/
struct csis_pix_format {
unsigned int pix_width_alignment;
enum v4l2_mbus_pixelcode code;
u32 fmt_reg;
};
static const struct csis_pix_format s5pcsis_formats[] = {
{
.code = V4L2_MBUS_FMT_VYUY8_2X8,
.fmt_reg = S5PCSIS_CFG_FMT_YCBCR422_8BIT,
}, {
.code = V4L2_MBUS_FMT_JPEG_1X8,
.fmt_reg = S5PCSIS_CFG_FMT_USER(1),
},
};
#define s5pcsis_write(__csis, __r, __v) writel(__v, __csis->regs + __r)
#define s5pcsis_read(__csis, __r) readl(__csis->regs + __r)
static struct csis_state *sd_to_csis_state(struct v4l2_subdev *sdev)
{
return container_of(sdev, struct csis_state, sd);
}
static const struct csis_pix_format *find_csis_format(
struct v4l2_mbus_framefmt *mf)
{
int i;
for (i = 0; i < ARRAY_SIZE(s5pcsis_formats); i++)
if (mf->code == s5pcsis_formats[i].code)
return &s5pcsis_formats[i];
return NULL;
}
static void s5pcsis_enable_interrupts(struct csis_state *state, bool on)
{
u32 val = s5pcsis_read(state, S5PCSIS_INTMSK);
val = on ? val | S5PCSIS_INTMSK_EN_ALL :
val & ~S5PCSIS_INTMSK_EN_ALL;
s5pcsis_write(state, S5PCSIS_INTMSK, val);
}
static void s5pcsis_reset(struct csis_state *state)
{
u32 val = s5pcsis_read(state, S5PCSIS_CTRL);
s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_RESET);
udelay(10);
}
static void s5pcsis_system_enable(struct csis_state *state, int on)
{
u32 val;
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (on)
val |= S5PCSIS_CTRL_ENABLE;
else
val &= ~S5PCSIS_CTRL_ENABLE;
s5pcsis_write(state, S5PCSIS_CTRL, val);
val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
if (on)
val |= S5PCSIS_DPHYCTRL_ENABLE;
else
val &= ~S5PCSIS_DPHYCTRL_ENABLE;
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
/* Called with the state.lock mutex held */
static void __s5pcsis_set_format(struct csis_state *state)
{
struct v4l2_mbus_framefmt *mf = &state->format;
u32 val;
v4l2_dbg(1, debug, &state->sd, "fmt: %d, %d x %d\n",
mf->code, mf->width, mf->height);
/* Color format */
val = s5pcsis_read(state, S5PCSIS_CONFIG);
val = (val & ~S5PCSIS_CFG_FMT_MASK) | state->csis_fmt->fmt_reg;
s5pcsis_write(state, S5PCSIS_CONFIG, val);
/* Pixel resolution */
val = (mf->width << 16) | mf->height;
s5pcsis_write(state, S5PCSIS_RESOL, val);
}
static void s5pcsis_set_hsync_settle(struct csis_state *state, int settle)
{
u32 val = s5pcsis_read(state, S5PCSIS_DPHYCTRL);
val = (val & ~S5PCSIS_DPHYCTRL_HSS_MASK) | (settle << 27);
s5pcsis_write(state, S5PCSIS_DPHYCTRL, val);
}
static void s5pcsis_set_params(struct csis_state *state)
{
struct s5p_platform_mipi_csis *pdata = state->pdev->dev.platform_data;
u32 val;
val = s5pcsis_read(state, S5PCSIS_CONFIG);
val = (val & ~S5PCSIS_CFG_NR_LANE_MASK) | (pdata->lanes - 1);
s5pcsis_write(state, S5PCSIS_CONFIG, val);
__s5pcsis_set_format(state);
s5pcsis_set_hsync_settle(state, pdata->hs_settle);
val = s5pcsis_read(state, S5PCSIS_CTRL);
if (pdata->alignment == 32)
val |= S5PCSIS_CTRL_ALIGN_32BIT;
else /* 24-bits */
val &= ~S5PCSIS_CTRL_ALIGN_32BIT;
/* Not using external clock. */
val &= ~S5PCSIS_CTRL_WCLK_EXTCLK;
s5pcsis_write(state, S5PCSIS_CTRL, val);
/* Update the shadow register. */
val = s5pcsis_read(state, S5PCSIS_CTRL);
s5pcsis_write(state, S5PCSIS_CTRL, val | S5PCSIS_CTRL_UPDATE_SHADOW);
}
static void s5pcsis_clk_put(struct csis_state *state)
{
int i;
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
if (IS_ERR_OR_NULL(state->clock[i]))
continue;
clk_unprepare(state->clock[i]);
clk_put(state->clock[i]);
state->clock[i] = NULL;
}
}
static int s5pcsis_clk_get(struct csis_state *state)
{
struct device *dev = &state->pdev->dev;
int i, ret;
for (i = 0; i < NUM_CSIS_CLOCKS; i++) {
state->clock[i] = clk_get(dev, csi_clock_name[i]);
if (IS_ERR(state->clock[i]))
goto err;
ret = clk_prepare(state->clock[i]);
if (ret < 0) {
clk_put(state->clock[i]);
state->clock[i] = NULL;
goto err;
}
}
return 0;
err:
s5pcsis_clk_put(state);
dev_err(dev, "failed to get clock: %s\n", csi_clock_name[i]);
return -ENXIO;
}
static int s5pcsis_s_power(struct v4l2_subdev *sd, int on)
{
struct csis_state *state = sd_to_csis_state(sd);
struct device *dev = &state->pdev->dev;
if (on)
return pm_runtime_get_sync(dev);
return pm_runtime_put_sync(dev);
}
static void s5pcsis_start_stream(struct csis_state *state)
{
s5pcsis_reset(state);
s5pcsis_set_params(state);
s5pcsis_system_enable(state, true);
s5pcsis_enable_interrupts(state, true);
}
static void s5pcsis_stop_stream(struct csis_state *state)
{
s5pcsis_enable_interrupts(state, false);
s5pcsis_system_enable(state, false);
}
/* v4l2_subdev operations */
static int s5pcsis_s_stream(struct v4l2_subdev *sd, int enable)
{
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: %d, state: 0x%x\n",
__func__, enable, state->flags);
if (enable) {
ret = pm_runtime_get_sync(&state->pdev->dev);
if (ret && ret != 1)
return ret;
}
mutex_lock(&state->lock);
if (enable) {
if (state->flags & ST_SUSPENDED) {
ret = -EBUSY;
goto unlock;
}
s5pcsis_start_stream(state);
state->flags |= ST_STREAMING;
} else {
s5pcsis_stop_stream(state);
state->flags &= ~ST_STREAMING;
}
unlock:
mutex_unlock(&state->lock);
if (!enable)
pm_runtime_put(&state->pdev->dev);
return ret == 1 ? 0 : ret;
}
static int s5pcsis_enum_mbus_code(struct v4l2_subdev *sd,
struct v4l2_subdev_fh *fh,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index >= ARRAY_SIZE(s5pcsis_formats))
return -EINVAL;
code->code = s5pcsis_formats[code->index].code;
return 0;
}
static struct csis_pix_format const *s5pcsis_try_format(
struct v4l2_mbus_framefmt *mf)
{
struct csis_pix_format const *csis_fmt;
csis_fmt = find_csis_format(mf);
if (csis_fmt == NULL)
csis_fmt = &s5pcsis_formats[0];
mf->code = csis_fmt->code;
v4l_bound_align_image(&mf->width, 1, CSIS_MAX_PIX_WIDTH,
csis_fmt->pix_width_alignment,
&mf->height, 1, CSIS_MAX_PIX_HEIGHT, 1,
0);
return csis_fmt;
}
static struct v4l2_mbus_framefmt *__s5pcsis_get_format(
struct csis_state *state, struct v4l2_subdev_fh *fh,
u32 pad, enum v4l2_subdev_format_whence which)
{
if (which == V4L2_SUBDEV_FORMAT_TRY)
return fh ? v4l2_subdev_get_try_format(fh, pad) : NULL;
return &state->format;
}
static int s5pcsis_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct csis_pix_format const *csis_fmt;
struct v4l2_mbus_framefmt *mf;
if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
return -EINVAL;
mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
if (fmt->pad == CSIS_PAD_SOURCE) {
if (mf) {
mutex_lock(&state->lock);
fmt->format = *mf;
mutex_unlock(&state->lock);
}
return 0;
}
csis_fmt = s5pcsis_try_format(&fmt->format);
if (mf) {
mutex_lock(&state->lock);
*mf = fmt->format;
if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE)
state->csis_fmt = csis_fmt;
mutex_unlock(&state->lock);
}
return 0;
}
static int s5pcsis_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh,
struct v4l2_subdev_format *fmt)
{
struct csis_state *state = sd_to_csis_state(sd);
struct v4l2_mbus_framefmt *mf;
if (fmt->pad != CSIS_PAD_SOURCE && fmt->pad != CSIS_PAD_SINK)
return -EINVAL;
mf = __s5pcsis_get_format(state, fh, fmt->pad, fmt->which);
if (!mf)
return -EINVAL;
mutex_lock(&state->lock);
fmt->format = *mf;
mutex_unlock(&state->lock);
return 0;
}
static int s5pcsis_open(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
{
struct v4l2_mbus_framefmt *format = v4l2_subdev_get_try_format(fh, 0);
format->colorspace = V4L2_COLORSPACE_JPEG;
format->code = s5pcsis_formats[0].code;
format->width = S5PCSIS_DEF_PIX_WIDTH;
format->height = S5PCSIS_DEF_PIX_HEIGHT;
format->field = V4L2_FIELD_NONE;
return 0;
}
static const struct v4l2_subdev_internal_ops s5pcsis_sd_internal_ops = {
.open = s5pcsis_open,
};
static struct v4l2_subdev_core_ops s5pcsis_core_ops = {
.s_power = s5pcsis_s_power,
};
static struct v4l2_subdev_pad_ops s5pcsis_pad_ops = {
.enum_mbus_code = s5pcsis_enum_mbus_code,
.get_fmt = s5pcsis_get_fmt,
.set_fmt = s5pcsis_set_fmt,
};
static struct v4l2_subdev_video_ops s5pcsis_video_ops = {
.s_stream = s5pcsis_s_stream,
};
static struct v4l2_subdev_ops s5pcsis_subdev_ops = {
.core = &s5pcsis_core_ops,
.pad = &s5pcsis_pad_ops,
.video = &s5pcsis_video_ops,
};
static irqreturn_t s5pcsis_irq_handler(int irq, void *dev_id)
{
struct csis_state *state = dev_id;
u32 val;
/* Just clear the interrupt pending bits. */
val = s5pcsis_read(state, S5PCSIS_INTSRC);
s5pcsis_write(state, S5PCSIS_INTSRC, val);
return IRQ_HANDLED;
}
static int __devinit s5pcsis_probe(struct platform_device *pdev)
{
struct s5p_platform_mipi_csis *pdata;
struct resource *mem_res;
struct csis_state *state;
int ret = -ENOMEM;
int i;
state = devm_kzalloc(&pdev->dev, sizeof(*state), GFP_KERNEL);
if (!state)
return -ENOMEM;
mutex_init(&state->lock);
state->pdev = pdev;
pdata = pdev->dev.platform_data;
if (pdata == NULL || pdata->phy_enable == NULL) {
dev_err(&pdev->dev, "Platform data not fully specified\n");
return -EINVAL;
}
if ((pdev->id == 1 && pdata->lanes > CSIS1_MAX_LANES) ||
pdata->lanes > CSIS0_MAX_LANES) {
dev_err(&pdev->dev, "Unsupported number of data lanes: %d\n",
pdata->lanes);
return -EINVAL;
}
mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
state->regs = devm_request_and_ioremap(&pdev->dev, mem_res);
if (state->regs == NULL) {
dev_err(&pdev->dev, "Failed to request and remap io memory\n");
return -ENXIO;
}
state->irq = platform_get_irq(pdev, 0);
if (state->irq < 0) {
dev_err(&pdev->dev, "Failed to get irq\n");
return state->irq;
}
for (i = 0; i < CSIS_NUM_SUPPLIES; i++)
state->supplies[i].supply = csis_supply_name[i];
ret = regulator_bulk_get(&pdev->dev, CSIS_NUM_SUPPLIES,
state->supplies);
if (ret)
return ret;
ret = s5pcsis_clk_get(state);
if (ret)
goto e_clkput;
clk_enable(state->clock[CSIS_CLK_MUX]);
if (pdata->clk_rate)
clk_set_rate(state->clock[CSIS_CLK_MUX], pdata->clk_rate);
else
dev_WARN(&pdev->dev, "No clock frequency specified!\n");
ret = devm_request_irq(&pdev->dev, state->irq, s5pcsis_irq_handler,
0, dev_name(&pdev->dev), state);
if (ret) {
dev_err(&pdev->dev, "Interrupt request failed\n");
goto e_regput;
}
v4l2_subdev_init(&state->sd, &s5pcsis_subdev_ops);
state->sd.owner = THIS_MODULE;
strlcpy(state->sd.name, dev_name(&pdev->dev), sizeof(state->sd.name));
state->sd.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
state->csis_fmt = &s5pcsis_formats[0];
state->format.code = s5pcsis_formats[0].code;
state->format.width = S5PCSIS_DEF_PIX_WIDTH;
state->format.height = S5PCSIS_DEF_PIX_HEIGHT;
state->pads[CSIS_PAD_SINK].flags = MEDIA_PAD_FL_SINK;
state->pads[CSIS_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_init(&state->sd.entity,
CSIS_PADS_NUM, state->pads, 0);
if (ret < 0)
goto e_clkput;
/* This allows to retrieve the platform device id by the host driver */
v4l2_set_subdevdata(&state->sd, pdev);
/* .. and a pointer to the subdev. */
platform_set_drvdata(pdev, &state->sd);
pm_runtime_enable(&pdev->dev);
return 0;
e_regput:
regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
e_clkput:
clk_disable(state->clock[CSIS_CLK_MUX]);
s5pcsis_clk_put(state);
return ret;
}
static int s5pcsis_pm_suspend(struct device *dev, bool runtime)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
__func__, state->flags);
mutex_lock(&state->lock);
if (state->flags & ST_POWERED) {
s5pcsis_stop_stream(state);
ret = pdata->phy_enable(state->pdev, false);
if (ret)
goto unlock;
ret = regulator_bulk_disable(CSIS_NUM_SUPPLIES,
state->supplies);
if (ret)
goto unlock;
clk_disable(state->clock[CSIS_CLK_GATE]);
state->flags &= ~ST_POWERED;
if (!runtime)
state->flags |= ST_SUSPENDED;
}
unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
static int s5pcsis_pm_resume(struct device *dev, bool runtime)
{
struct s5p_platform_mipi_csis *pdata = dev->platform_data;
struct platform_device *pdev = to_platform_device(dev);
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
int ret = 0;
v4l2_dbg(1, debug, sd, "%s: flags: 0x%x\n",
__func__, state->flags);
mutex_lock(&state->lock);
if (!runtime && !(state->flags & ST_SUSPENDED))
goto unlock;
if (!(state->flags & ST_POWERED)) {
ret = regulator_bulk_enable(CSIS_NUM_SUPPLIES,
state->supplies);
if (ret)
goto unlock;
ret = pdata->phy_enable(state->pdev, true);
if (!ret) {
state->flags |= ST_POWERED;
} else {
regulator_bulk_disable(CSIS_NUM_SUPPLIES,
state->supplies);
goto unlock;
}
clk_enable(state->clock[CSIS_CLK_GATE]);
}
if (state->flags & ST_STREAMING)
s5pcsis_start_stream(state);
state->flags &= ~ST_SUSPENDED;
unlock:
mutex_unlock(&state->lock);
return ret ? -EAGAIN : 0;
}
#ifdef CONFIG_PM_SLEEP
static int s5pcsis_suspend(struct device *dev)
{
return s5pcsis_pm_suspend(dev, false);
}
static int s5pcsis_resume(struct device *dev)
{
return s5pcsis_pm_resume(dev, false);
}
#endif
#ifdef CONFIG_PM_RUNTIME
static int s5pcsis_runtime_suspend(struct device *dev)
{
return s5pcsis_pm_suspend(dev, true);
}
static int s5pcsis_runtime_resume(struct device *dev)
{
return s5pcsis_pm_resume(dev, true);
}
#endif
static int __devexit s5pcsis_remove(struct platform_device *pdev)
{
struct v4l2_subdev *sd = platform_get_drvdata(pdev);
struct csis_state *state = sd_to_csis_state(sd);
pm_runtime_disable(&pdev->dev);
s5pcsis_pm_suspend(&pdev->dev, false);
clk_disable(state->clock[CSIS_CLK_MUX]);
pm_runtime_set_suspended(&pdev->dev);
s5pcsis_clk_put(state);
regulator_bulk_free(CSIS_NUM_SUPPLIES, state->supplies);
media_entity_cleanup(&state->sd.entity);
return 0;
}
static const struct dev_pm_ops s5pcsis_pm_ops = {
SET_RUNTIME_PM_OPS(s5pcsis_runtime_suspend, s5pcsis_runtime_resume,
NULL)
SET_SYSTEM_SLEEP_PM_OPS(s5pcsis_suspend, s5pcsis_resume)
};
static struct platform_driver s5pcsis_driver = {
.probe = s5pcsis_probe,
.remove = __devexit_p(s5pcsis_remove),
.driver = {
.name = CSIS_DRIVER_NAME,
.owner = THIS_MODULE,
.pm = &s5pcsis_pm_ops,
},
};
static int __init s5pcsis_init(void)
{
return platform_driver_probe(&s5pcsis_driver, s5pcsis_probe);
}
static void __exit s5pcsis_exit(void)
{
platform_driver_unregister(&s5pcsis_driver);
}
module_init(s5pcsis_init);
module_exit(s5pcsis_exit);
MODULE_AUTHOR("Sylwester Nawrocki <s.nawrocki@samsung.com>");
MODULE_DESCRIPTION("S5P/EXYNOS4 MIPI CSI receiver driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
fazerg/android_kernel_ZTE_X9180 | arch/mips/lantiq/prom.c | 4692 | 1412 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* Copyright (C) 2010 John Crispin <blogic@openwrt.org>
*/
#include <linux/export.h>
#include <linux/clk.h>
#include <asm/bootinfo.h>
#include <asm/time.h>
#include <lantiq.h>
#include "prom.h"
#include "clk.h"
static struct ltq_soc_info soc_info;
unsigned int ltq_get_cpu_ver(void)
{
return soc_info.rev;
}
EXPORT_SYMBOL(ltq_get_cpu_ver);
unsigned int ltq_get_soc_type(void)
{
return soc_info.type;
}
EXPORT_SYMBOL(ltq_get_soc_type);
const char *get_system_type(void)
{
return soc_info.sys_type;
}
void prom_free_prom_memory(void)
{
}
static void __init prom_init_cmdline(void)
{
int argc = fw_arg0;
char **argv = (char **) KSEG1ADDR(fw_arg1);
int i;
for (i = 0; i < argc; i++) {
char *p = (char *) KSEG1ADDR(argv[i]);
if (p && *p) {
strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
}
}
}
void __init prom_init(void)
{
struct clk *clk;
ltq_soc_detect(&soc_info);
clk_init();
clk = clk_get(0, "cpu");
snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
soc_info.name, soc_info.rev);
clk_put(clk);
soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
pr_info("SoC: %s\n", soc_info.sys_type);
prom_init_cmdline();
}
| gpl-2.0 |
yodok/u8833_nethunter_kernel | drivers/media/dvb/frontends/stv0367.c | 4948 | 94618 | /*
* stv0367.c
*
* Driver for ST STV0367 DVB-T & DVB-C demodulator IC.
*
* Copyright (C) ST Microelectronics.
* Copyright (C) 2010,2011 NetUP Inc.
* Copyright (C) 2010,2011 Igor M. Liplianin <liplianin@netup.ru>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
*
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include "stv0367.h"
#include "stv0367_regs.h"
#include "stv0367_priv.h"
static int stvdebug;
module_param_named(debug, stvdebug, int, 0644);
static int i2cdebug;
module_param_named(i2c_debug, i2cdebug, int, 0644);
#define dprintk(args...) \
do { \
if (stvdebug) \
printk(KERN_DEBUG args); \
} while (0)
/* DVB-C */
struct stv0367cab_state {
enum stv0367_cab_signal_type state;
u32 mclk;
u32 adc_clk;
s32 search_range;
s32 derot_offset;
/* results */
int locked; /* channel found */
u32 freq_khz; /* found frequency (in kHz) */
u32 symbol_rate; /* found symbol rate (in Bds) */
enum stv0367cab_mod modulation; /* modulation */
fe_spectral_inversion_t spect_inv; /* Spectrum Inversion */
};
struct stv0367ter_state {
/* DVB-T */
enum stv0367_ter_signal_type state;
enum stv0367_ter_if_iq_mode if_iq_mode;
enum stv0367_ter_mode mode;/* mode 2K or 8K */
fe_guard_interval_t guard;
enum stv0367_ter_hierarchy hierarchy;
u32 frequency;
fe_spectral_inversion_t sense; /* current search spectrum */
u8 force; /* force mode/guard */
u8 bw; /* channel width 6, 7 or 8 in MHz */
u8 pBW; /* channel width used during previous lock */
u32 pBER;
u32 pPER;
u32 ucblocks;
s8 echo_pos; /* echo position */
u8 first_lock;
u8 unlock_counter;
u32 agc_val;
};
struct stv0367_state {
struct dvb_frontend fe;
struct i2c_adapter *i2c;
/* config settings */
const struct stv0367_config *config;
u8 chip_id;
/* DVB-C */
struct stv0367cab_state *cab_state;
/* DVB-T */
struct stv0367ter_state *ter_state;
};
struct st_register {
u16 addr;
u8 value;
};
/* values for STV4100 XTAL=30M int clk=53.125M*/
static struct st_register def0367ter[STV0367TER_NBREGS] = {
{R367TER_ID, 0x60},
{R367TER_I2CRPT, 0xa0},
/* {R367TER_I2CRPT, 0x22},*/
{R367TER_TOPCTRL, 0x00},/* for xc5000; was 0x02 */
{R367TER_IOCFG0, 0x40},
{R367TER_DAC0R, 0x00},
{R367TER_IOCFG1, 0x00},
{R367TER_DAC1R, 0x00},
{R367TER_IOCFG2, 0x62},
{R367TER_SDFR, 0x00},
{R367TER_STATUS, 0xf8},
{R367TER_AUX_CLK, 0x0a},
{R367TER_FREESYS1, 0x00},
{R367TER_FREESYS2, 0x00},
{R367TER_FREESYS3, 0x00},
{R367TER_GPIO_CFG, 0x55},
{R367TER_GPIO_CMD, 0x00},
{R367TER_AGC2MAX, 0xff},
{R367TER_AGC2MIN, 0x00},
{R367TER_AGC1MAX, 0xff},
{R367TER_AGC1MIN, 0x00},
{R367TER_AGCR, 0xbc},
{R367TER_AGC2TH, 0x00},
{R367TER_AGC12C, 0x00},
{R367TER_AGCCTRL1, 0x85},
{R367TER_AGCCTRL2, 0x1f},
{R367TER_AGC1VAL1, 0x00},
{R367TER_AGC1VAL2, 0x00},
{R367TER_AGC2VAL1, 0x6f},
{R367TER_AGC2VAL2, 0x05},
{R367TER_AGC2PGA, 0x00},
{R367TER_OVF_RATE1, 0x00},
{R367TER_OVF_RATE2, 0x00},
{R367TER_GAIN_SRC1, 0xaa},/* for xc5000; was 0x2b */
{R367TER_GAIN_SRC2, 0xd6},/* for xc5000; was 0x04 */
{R367TER_INC_DEROT1, 0x55},
{R367TER_INC_DEROT2, 0x55},
{R367TER_PPM_CPAMP_DIR, 0x2c},
{R367TER_PPM_CPAMP_INV, 0x00},
{R367TER_FREESTFE_1, 0x00},
{R367TER_FREESTFE_2, 0x1c},
{R367TER_DCOFFSET, 0x00},
{R367TER_EN_PROCESS, 0x05},
{R367TER_SDI_SMOOTHER, 0x80},
{R367TER_FE_LOOP_OPEN, 0x1c},
{R367TER_FREQOFF1, 0x00},
{R367TER_FREQOFF2, 0x00},
{R367TER_FREQOFF3, 0x00},
{R367TER_TIMOFF1, 0x00},
{R367TER_TIMOFF2, 0x00},
{R367TER_EPQ, 0x02},
{R367TER_EPQAUTO, 0x01},
{R367TER_SYR_UPDATE, 0xf5},
{R367TER_CHPFREE, 0x00},
{R367TER_PPM_STATE_MAC, 0x23},
{R367TER_INR_THRESHOLD, 0xff},
{R367TER_EPQ_TPS_ID_CELL, 0xf9},
{R367TER_EPQ_CFG, 0x00},
{R367TER_EPQ_STATUS, 0x01},
{R367TER_AUTORELOCK, 0x81},
{R367TER_BER_THR_VMSB, 0x00},
{R367TER_BER_THR_MSB, 0x00},
{R367TER_BER_THR_LSB, 0x00},
{R367TER_CCD, 0x83},
{R367TER_SPECTR_CFG, 0x00},
{R367TER_CHC_DUMMY, 0x18},
{R367TER_INC_CTL, 0x88},
{R367TER_INCTHRES_COR1, 0xb4},
{R367TER_INCTHRES_COR2, 0x96},
{R367TER_INCTHRES_DET1, 0x0e},
{R367TER_INCTHRES_DET2, 0x11},
{R367TER_IIR_CELLNB, 0x8d},
{R367TER_IIRCX_COEFF1_MSB, 0x00},
{R367TER_IIRCX_COEFF1_LSB, 0x00},
{R367TER_IIRCX_COEFF2_MSB, 0x09},
{R367TER_IIRCX_COEFF2_LSB, 0x18},
{R367TER_IIRCX_COEFF3_MSB, 0x14},
{R367TER_IIRCX_COEFF3_LSB, 0x9c},
{R367TER_IIRCX_COEFF4_MSB, 0x00},
{R367TER_IIRCX_COEFF4_LSB, 0x00},
{R367TER_IIRCX_COEFF5_MSB, 0x36},
{R367TER_IIRCX_COEFF5_LSB, 0x42},
{R367TER_FEPATH_CFG, 0x00},
{R367TER_PMC1_FUNC, 0x65},
{R367TER_PMC1_FOR, 0x00},
{R367TER_PMC2_FUNC, 0x00},
{R367TER_STATUS_ERR_DA, 0xe0},
{R367TER_DIG_AGC_R, 0xfe},
{R367TER_COMAGC_TARMSB, 0x0b},
{R367TER_COM_AGC_TAR_ENMODE, 0x41},
{R367TER_COM_AGC_CFG, 0x3e},
{R367TER_COM_AGC_GAIN1, 0x39},
{R367TER_AUT_AGC_TARGETMSB, 0x0b},
{R367TER_LOCK_DET_MSB, 0x01},
{R367TER_AGCTAR_LOCK_LSBS, 0x40},
{R367TER_AUT_GAIN_EN, 0xf4},
{R367TER_AUT_CFG, 0xf0},
{R367TER_LOCKN, 0x23},
{R367TER_INT_X_3, 0x00},
{R367TER_INT_X_2, 0x03},
{R367TER_INT_X_1, 0x8d},
{R367TER_INT_X_0, 0xa0},
{R367TER_MIN_ERRX_MSB, 0x00},
{R367TER_COR_CTL, 0x23},
{R367TER_COR_STAT, 0xf6},
{R367TER_COR_INTEN, 0x00},
{R367TER_COR_INTSTAT, 0x3f},
{R367TER_COR_MODEGUARD, 0x03},
{R367TER_AGC_CTL, 0x08},
{R367TER_AGC_MANUAL1, 0x00},
{R367TER_AGC_MANUAL2, 0x00},
{R367TER_AGC_TARG, 0x16},
{R367TER_AGC_GAIN1, 0x53},
{R367TER_AGC_GAIN2, 0x1d},
{R367TER_RESERVED_1, 0x00},
{R367TER_RESERVED_2, 0x00},
{R367TER_RESERVED_3, 0x00},
{R367TER_CAS_CTL, 0x44},
{R367TER_CAS_FREQ, 0xb3},
{R367TER_CAS_DAGCGAIN, 0x12},
{R367TER_SYR_CTL, 0x04},
{R367TER_SYR_STAT, 0x10},
{R367TER_SYR_NCO1, 0x00},
{R367TER_SYR_NCO2, 0x00},
{R367TER_SYR_OFFSET1, 0x00},
{R367TER_SYR_OFFSET2, 0x00},
{R367TER_FFT_CTL, 0x00},
{R367TER_SCR_CTL, 0x70},
{R367TER_PPM_CTL1, 0xf8},
{R367TER_TRL_CTL, 0x14},/* for xc5000; was 0xac */
{R367TER_TRL_NOMRATE1, 0xae},/* for xc5000; was 0x1e */
{R367TER_TRL_NOMRATE2, 0x56},/* for xc5000; was 0x58 */
{R367TER_TRL_TIME1, 0x1d},
{R367TER_TRL_TIME2, 0xfc},
{R367TER_CRL_CTL, 0x24},
{R367TER_CRL_FREQ1, 0xad},
{R367TER_CRL_FREQ2, 0x9d},
{R367TER_CRL_FREQ3, 0xff},
{R367TER_CHC_CTL, 0x01},
{R367TER_CHC_SNR, 0xf0},
{R367TER_BDI_CTL, 0x00},
{R367TER_DMP_CTL, 0x00},
{R367TER_TPS_RCVD1, 0x30},
{R367TER_TPS_RCVD2, 0x02},
{R367TER_TPS_RCVD3, 0x01},
{R367TER_TPS_RCVD4, 0x00},
{R367TER_TPS_ID_CELL1, 0x00},
{R367TER_TPS_ID_CELL2, 0x00},
{R367TER_TPS_RCVD5_SET1, 0x02},
{R367TER_TPS_SET2, 0x02},
{R367TER_TPS_SET3, 0x01},
{R367TER_TPS_CTL, 0x00},
{R367TER_CTL_FFTOSNUM, 0x34},
{R367TER_TESTSELECT, 0x09},
{R367TER_MSC_REV, 0x0a},
{R367TER_PIR_CTL, 0x00},
{R367TER_SNR_CARRIER1, 0xa1},
{R367TER_SNR_CARRIER2, 0x9a},
{R367TER_PPM_CPAMP, 0x2c},
{R367TER_TSM_AP0, 0x00},
{R367TER_TSM_AP1, 0x00},
{R367TER_TSM_AP2 , 0x00},
{R367TER_TSM_AP3, 0x00},
{R367TER_TSM_AP4, 0x00},
{R367TER_TSM_AP5, 0x00},
{R367TER_TSM_AP6, 0x00},
{R367TER_TSM_AP7, 0x00},
{R367TER_TSTRES, 0x00},
{R367TER_ANACTRL, 0x0D},/* PLL stoped, restart at init!!! */
{R367TER_TSTBUS, 0x00},
{R367TER_TSTRATE, 0x00},
{R367TER_CONSTMODE, 0x01},
{R367TER_CONSTCARR1, 0x00},
{R367TER_CONSTCARR2, 0x00},
{R367TER_ICONSTEL, 0x0a},
{R367TER_QCONSTEL, 0x15},
{R367TER_TSTBISTRES0, 0x00},
{R367TER_TSTBISTRES1, 0x00},
{R367TER_TSTBISTRES2, 0x28},
{R367TER_TSTBISTRES3, 0x00},
{R367TER_RF_AGC1, 0xff},
{R367TER_RF_AGC2, 0x83},
{R367TER_ANADIGCTRL, 0x19},
{R367TER_PLLMDIV, 0x01},/* for xc5000; was 0x0c */
{R367TER_PLLNDIV, 0x06},/* for xc5000; was 0x55 */
{R367TER_PLLSETUP, 0x18},
{R367TER_DUAL_AD12, 0x0C},/* for xc5000 AGC voltage 1.6V */
{R367TER_TSTBIST, 0x00},
{R367TER_PAD_COMP_CTRL, 0x00},
{R367TER_PAD_COMP_WR, 0x00},
{R367TER_PAD_COMP_RD, 0xe0},
{R367TER_SYR_TARGET_FFTADJT_MSB, 0x00},
{R367TER_SYR_TARGET_FFTADJT_LSB, 0x00},
{R367TER_SYR_TARGET_CHCADJT_MSB, 0x00},
{R367TER_SYR_TARGET_CHCADJT_LSB, 0x00},
{R367TER_SYR_FLAG, 0x00},
{R367TER_CRL_TARGET1, 0x00},
{R367TER_CRL_TARGET2, 0x00},
{R367TER_CRL_TARGET3, 0x00},
{R367TER_CRL_TARGET4, 0x00},
{R367TER_CRL_FLAG, 0x00},
{R367TER_TRL_TARGET1, 0x00},
{R367TER_TRL_TARGET2, 0x00},
{R367TER_TRL_CHC, 0x00},
{R367TER_CHC_SNR_TARG, 0x00},
{R367TER_TOP_TRACK, 0x00},
{R367TER_TRACKER_FREE1, 0x00},
{R367TER_ERROR_CRL1, 0x00},
{R367TER_ERROR_CRL2, 0x00},
{R367TER_ERROR_CRL3, 0x00},
{R367TER_ERROR_CRL4, 0x00},
{R367TER_DEC_NCO1, 0x2c},
{R367TER_DEC_NCO2, 0x0f},
{R367TER_DEC_NCO3, 0x20},
{R367TER_SNR, 0xf1},
{R367TER_SYR_FFTADJ1, 0x00},
{R367TER_SYR_FFTADJ2, 0x00},
{R367TER_SYR_CHCADJ1, 0x00},
{R367TER_SYR_CHCADJ2, 0x00},
{R367TER_SYR_OFF, 0x00},
{R367TER_PPM_OFFSET1, 0x00},
{R367TER_PPM_OFFSET2, 0x03},
{R367TER_TRACKER_FREE2, 0x00},
{R367TER_DEBG_LT10, 0x00},
{R367TER_DEBG_LT11, 0x00},
{R367TER_DEBG_LT12, 0x00},
{R367TER_DEBG_LT13, 0x00},
{R367TER_DEBG_LT14, 0x00},
{R367TER_DEBG_LT15, 0x00},
{R367TER_DEBG_LT16, 0x00},
{R367TER_DEBG_LT17, 0x00},
{R367TER_DEBG_LT18, 0x00},
{R367TER_DEBG_LT19, 0x00},
{R367TER_DEBG_LT1A, 0x00},
{R367TER_DEBG_LT1B, 0x00},
{R367TER_DEBG_LT1C, 0x00},
{R367TER_DEBG_LT1D, 0x00},
{R367TER_DEBG_LT1E, 0x00},
{R367TER_DEBG_LT1F, 0x00},
{R367TER_RCCFGH, 0x00},
{R367TER_RCCFGM, 0x00},
{R367TER_RCCFGL, 0x00},
{R367TER_RCINSDELH, 0x00},
{R367TER_RCINSDELM, 0x00},
{R367TER_RCINSDELL, 0x00},
{R367TER_RCSTATUS, 0x00},
{R367TER_RCSPEED, 0x6f},
{R367TER_RCDEBUGM, 0xe7},
{R367TER_RCDEBUGL, 0x9b},
{R367TER_RCOBSCFG, 0x00},
{R367TER_RCOBSM, 0x00},
{R367TER_RCOBSL, 0x00},
{R367TER_RCFECSPY, 0x00},
{R367TER_RCFSPYCFG, 0x00},
{R367TER_RCFSPYDATA, 0x00},
{R367TER_RCFSPYOUT, 0x00},
{R367TER_RCFSTATUS, 0x00},
{R367TER_RCFGOODPACK, 0x00},
{R367TER_RCFPACKCNT, 0x00},
{R367TER_RCFSPYMISC, 0x00},
{R367TER_RCFBERCPT4, 0x00},
{R367TER_RCFBERCPT3, 0x00},
{R367TER_RCFBERCPT2, 0x00},
{R367TER_RCFBERCPT1, 0x00},
{R367TER_RCFBERCPT0, 0x00},
{R367TER_RCFBERERR2, 0x00},
{R367TER_RCFBERERR1, 0x00},
{R367TER_RCFBERERR0, 0x00},
{R367TER_RCFSTATESM, 0x00},
{R367TER_RCFSTATESL, 0x00},
{R367TER_RCFSPYBER, 0x00},
{R367TER_RCFSPYDISTM, 0x00},
{R367TER_RCFSPYDISTL, 0x00},
{R367TER_RCFSPYOBS7, 0x00},
{R367TER_RCFSPYOBS6, 0x00},
{R367TER_RCFSPYOBS5, 0x00},
{R367TER_RCFSPYOBS4, 0x00},
{R367TER_RCFSPYOBS3, 0x00},
{R367TER_RCFSPYOBS2, 0x00},
{R367TER_RCFSPYOBS1, 0x00},
{R367TER_RCFSPYOBS0, 0x00},
{R367TER_TSGENERAL, 0x00},
{R367TER_RC1SPEED, 0x6f},
{R367TER_TSGSTATUS, 0x18},
{R367TER_FECM, 0x01},
{R367TER_VTH12, 0xff},
{R367TER_VTH23, 0xa1},
{R367TER_VTH34, 0x64},
{R367TER_VTH56, 0x40},
{R367TER_VTH67, 0x00},
{R367TER_VTH78, 0x2c},
{R367TER_VITCURPUN, 0x12},
{R367TER_VERROR, 0x01},
{R367TER_PRVIT, 0x3f},
{R367TER_VAVSRVIT, 0x00},
{R367TER_VSTATUSVIT, 0xbd},
{R367TER_VTHINUSE, 0xa1},
{R367TER_KDIV12, 0x20},
{R367TER_KDIV23, 0x40},
{R367TER_KDIV34, 0x20},
{R367TER_KDIV56, 0x30},
{R367TER_KDIV67, 0x00},
{R367TER_KDIV78, 0x30},
{R367TER_SIGPOWER, 0x54},
{R367TER_DEMAPVIT, 0x40},
{R367TER_VITSCALE, 0x00},
{R367TER_FFEC1PRG, 0x00},
{R367TER_FVITCURPUN, 0x12},
{R367TER_FVERROR, 0x01},
{R367TER_FVSTATUSVIT, 0xbd},
{R367TER_DEBUG_LT1, 0x00},
{R367TER_DEBUG_LT2, 0x00},
{R367TER_DEBUG_LT3, 0x00},
{R367TER_TSTSFMET, 0x00},
{R367TER_SELOUT, 0x00},
{R367TER_TSYNC, 0x00},
{R367TER_TSTERR, 0x00},
{R367TER_TSFSYNC, 0x00},
{R367TER_TSTSFERR, 0x00},
{R367TER_TSTTSSF1, 0x01},
{R367TER_TSTTSSF2, 0x1f},
{R367TER_TSTTSSF3, 0x00},
{R367TER_TSTTS1, 0x00},
{R367TER_TSTTS2, 0x1f},
{R367TER_TSTTS3, 0x01},
{R367TER_TSTTS4, 0x00},
{R367TER_TSTTSRC, 0x00},
{R367TER_TSTTSRS, 0x00},
{R367TER_TSSTATEM, 0xb0},
{R367TER_TSSTATEL, 0x40},
{R367TER_TSCFGH, 0xC0},
{R367TER_TSCFGM, 0xc0},/* for xc5000; was 0x00 */
{R367TER_TSCFGL, 0x20},
{R367TER_TSSYNC, 0x00},
{R367TER_TSINSDELH, 0x00},
{R367TER_TSINSDELM, 0x00},
{R367TER_TSINSDELL, 0x00},
{R367TER_TSDIVN, 0x03},
{R367TER_TSDIVPM, 0x00},
{R367TER_TSDIVPL, 0x00},
{R367TER_TSDIVQM, 0x00},
{R367TER_TSDIVQL, 0x00},
{R367TER_TSDILSTKM, 0x00},
{R367TER_TSDILSTKL, 0x00},
{R367TER_TSSPEED, 0x40},/* for xc5000; was 0x6f */
{R367TER_TSSTATUS, 0x81},
{R367TER_TSSTATUS2, 0x6a},
{R367TER_TSBITRATEM, 0x0f},
{R367TER_TSBITRATEL, 0xc6},
{R367TER_TSPACKLENM, 0x00},
{R367TER_TSPACKLENL, 0xfc},
{R367TER_TSBLOCLENM, 0x0a},
{R367TER_TSBLOCLENL, 0x80},
{R367TER_TSDLYH, 0x90},
{R367TER_TSDLYM, 0x68},
{R367TER_TSDLYL, 0x01},
{R367TER_TSNPDAV, 0x00},
{R367TER_TSBUFSTATH, 0x00},
{R367TER_TSBUFSTATM, 0x00},
{R367TER_TSBUFSTATL, 0x00},
{R367TER_TSDEBUGM, 0xcf},
{R367TER_TSDEBUGL, 0x1e},
{R367TER_TSDLYSETH, 0x00},
{R367TER_TSDLYSETM, 0x68},
{R367TER_TSDLYSETL, 0x00},
{R367TER_TSOBSCFG, 0x00},
{R367TER_TSOBSM, 0x47},
{R367TER_TSOBSL, 0x1f},
{R367TER_ERRCTRL1, 0x95},
{R367TER_ERRCNT1H, 0x80},
{R367TER_ERRCNT1M, 0x00},
{R367TER_ERRCNT1L, 0x00},
{R367TER_ERRCTRL2, 0x95},
{R367TER_ERRCNT2H, 0x00},
{R367TER_ERRCNT2M, 0x00},
{R367TER_ERRCNT2L, 0x00},
{R367TER_FECSPY, 0x88},
{R367TER_FSPYCFG, 0x2c},
{R367TER_FSPYDATA, 0x3a},
{R367TER_FSPYOUT, 0x06},
{R367TER_FSTATUS, 0x61},
{R367TER_FGOODPACK, 0xff},
{R367TER_FPACKCNT, 0xff},
{R367TER_FSPYMISC, 0x66},
{R367TER_FBERCPT4, 0x00},
{R367TER_FBERCPT3, 0x00},
{R367TER_FBERCPT2, 0x36},
{R367TER_FBERCPT1, 0x36},
{R367TER_FBERCPT0, 0x14},
{R367TER_FBERERR2, 0x00},
{R367TER_FBERERR1, 0x03},
{R367TER_FBERERR0, 0x28},
{R367TER_FSTATESM, 0x00},
{R367TER_FSTATESL, 0x02},
{R367TER_FSPYBER, 0x00},
{R367TER_FSPYDISTM, 0x01},
{R367TER_FSPYDISTL, 0x9f},
{R367TER_FSPYOBS7, 0xc9},
{R367TER_FSPYOBS6, 0x99},
{R367TER_FSPYOBS5, 0x08},
{R367TER_FSPYOBS4, 0xec},
{R367TER_FSPYOBS3, 0x01},
{R367TER_FSPYOBS2, 0x0f},
{R367TER_FSPYOBS1, 0xf5},
{R367TER_FSPYOBS0, 0x08},
{R367TER_SFDEMAP, 0x40},
{R367TER_SFERROR, 0x00},
{R367TER_SFAVSR, 0x30},
{R367TER_SFECSTATUS, 0xcc},
{R367TER_SFKDIV12, 0x20},
{R367TER_SFKDIV23, 0x40},
{R367TER_SFKDIV34, 0x20},
{R367TER_SFKDIV56, 0x20},
{R367TER_SFKDIV67, 0x00},
{R367TER_SFKDIV78, 0x20},
{R367TER_SFDILSTKM, 0x00},
{R367TER_SFDILSTKL, 0x00},
{R367TER_SFSTATUS, 0xb5},
{R367TER_SFDLYH, 0x90},
{R367TER_SFDLYM, 0x60},
{R367TER_SFDLYL, 0x01},
{R367TER_SFDLYSETH, 0xc0},
{R367TER_SFDLYSETM, 0x60},
{R367TER_SFDLYSETL, 0x00},
{R367TER_SFOBSCFG, 0x00},
{R367TER_SFOBSM, 0x47},
{R367TER_SFOBSL, 0x05},
{R367TER_SFECINFO, 0x40},
{R367TER_SFERRCTRL, 0x74},
{R367TER_SFERRCNTH, 0x80},
{R367TER_SFERRCNTM , 0x00},
{R367TER_SFERRCNTL, 0x00},
{R367TER_SYMBRATEM, 0x2f},
{R367TER_SYMBRATEL, 0x50},
{R367TER_SYMBSTATUS, 0x7f},
{R367TER_SYMBCFG, 0x00},
{R367TER_SYMBFIFOM, 0xf4},
{R367TER_SYMBFIFOL, 0x0d},
{R367TER_SYMBOFFSM, 0xf0},
{R367TER_SYMBOFFSL, 0x2d},
{R367TER_DEBUG_LT4, 0x00},
{R367TER_DEBUG_LT5, 0x00},
{R367TER_DEBUG_LT6, 0x00},
{R367TER_DEBUG_LT7, 0x00},
{R367TER_DEBUG_LT8, 0x00},
{R367TER_DEBUG_LT9, 0x00},
};
#define RF_LOOKUP_TABLE_SIZE 31
#define RF_LOOKUP_TABLE2_SIZE 16
/* RF Level (for RF AGC->AGC1) Lookup Table, depends on the board and tuner.*/
s32 stv0367cab_RF_LookUp1[RF_LOOKUP_TABLE_SIZE][RF_LOOKUP_TABLE_SIZE] = {
{/*AGC1*/
48, 50, 51, 53, 54, 56, 57, 58, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 80, 83, 85, 88,
}, {/*RF(dbm)*/
22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33,
34, 35, 36, 37, 38, 39, 41, 42, 43, 44, 46, 47,
49, 50, 52, 53, 54, 55, 56,
}
};
/* RF Level (for IF AGC->AGC2) Lookup Table, depends on the board and tuner.*/
s32 stv0367cab_RF_LookUp2[RF_LOOKUP_TABLE2_SIZE][RF_LOOKUP_TABLE2_SIZE] = {
{/*AGC2*/
28, 29, 31, 32, 34, 35, 36, 37,
38, 39, 40, 41, 42, 43, 44, 45,
}, {/*RF(dbm)*/
57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72,
}
};
static struct st_register def0367cab[STV0367CAB_NBREGS] = {
{R367CAB_ID, 0x60},
{R367CAB_I2CRPT, 0xa0},
/*{R367CAB_I2CRPT, 0x22},*/
{R367CAB_TOPCTRL, 0x10},
{R367CAB_IOCFG0, 0x80},
{R367CAB_DAC0R, 0x00},
{R367CAB_IOCFG1, 0x00},
{R367CAB_DAC1R, 0x00},
{R367CAB_IOCFG2, 0x00},
{R367CAB_SDFR, 0x00},
{R367CAB_AUX_CLK, 0x00},
{R367CAB_FREESYS1, 0x00},
{R367CAB_FREESYS2, 0x00},
{R367CAB_FREESYS3, 0x00},
{R367CAB_GPIO_CFG, 0x55},
{R367CAB_GPIO_CMD, 0x01},
{R367CAB_TSTRES, 0x00},
{R367CAB_ANACTRL, 0x0d},/* was 0x00 need to check - I.M.L.*/
{R367CAB_TSTBUS, 0x00},
{R367CAB_RF_AGC1, 0xea},
{R367CAB_RF_AGC2, 0x82},
{R367CAB_ANADIGCTRL, 0x0b},
{R367CAB_PLLMDIV, 0x01},
{R367CAB_PLLNDIV, 0x08},
{R367CAB_PLLSETUP, 0x18},
{R367CAB_DUAL_AD12, 0x0C}, /* for xc5000 AGC voltage 1.6V */
{R367CAB_TSTBIST, 0x00},
{R367CAB_CTRL_1, 0x00},
{R367CAB_CTRL_2, 0x03},
{R367CAB_IT_STATUS1, 0x2b},
{R367CAB_IT_STATUS2, 0x08},
{R367CAB_IT_EN1, 0x00},
{R367CAB_IT_EN2, 0x00},
{R367CAB_CTRL_STATUS, 0x04},
{R367CAB_TEST_CTL, 0x00},
{R367CAB_AGC_CTL, 0x73},
{R367CAB_AGC_IF_CFG, 0x50},
{R367CAB_AGC_RF_CFG, 0x00},
{R367CAB_AGC_PWM_CFG, 0x03},
{R367CAB_AGC_PWR_REF_L, 0x5a},
{R367CAB_AGC_PWR_REF_H, 0x00},
{R367CAB_AGC_RF_TH_L, 0xff},
{R367CAB_AGC_RF_TH_H, 0x07},
{R367CAB_AGC_IF_LTH_L, 0x00},
{R367CAB_AGC_IF_LTH_H, 0x08},
{R367CAB_AGC_IF_HTH_L, 0xff},
{R367CAB_AGC_IF_HTH_H, 0x07},
{R367CAB_AGC_PWR_RD_L, 0xa0},
{R367CAB_AGC_PWR_RD_M, 0xe9},
{R367CAB_AGC_PWR_RD_H, 0x03},
{R367CAB_AGC_PWM_IFCMD_L, 0xe4},
{R367CAB_AGC_PWM_IFCMD_H, 0x00},
{R367CAB_AGC_PWM_RFCMD_L, 0xff},
{R367CAB_AGC_PWM_RFCMD_H, 0x07},
{R367CAB_IQDEM_CFG, 0x01},
{R367CAB_MIX_NCO_LL, 0x22},
{R367CAB_MIX_NCO_HL, 0x96},
{R367CAB_MIX_NCO_HH, 0x55},
{R367CAB_SRC_NCO_LL, 0xff},
{R367CAB_SRC_NCO_LH, 0x0c},
{R367CAB_SRC_NCO_HL, 0xf5},
{R367CAB_SRC_NCO_HH, 0x20},
{R367CAB_IQDEM_GAIN_SRC_L, 0x06},
{R367CAB_IQDEM_GAIN_SRC_H, 0x01},
{R367CAB_IQDEM_DCRM_CFG_LL, 0xfe},
{R367CAB_IQDEM_DCRM_CFG_LH, 0xff},
{R367CAB_IQDEM_DCRM_CFG_HL, 0x0f},
{R367CAB_IQDEM_DCRM_CFG_HH, 0x00},
{R367CAB_IQDEM_ADJ_COEFF0, 0x34},
{R367CAB_IQDEM_ADJ_COEFF1, 0xae},
{R367CAB_IQDEM_ADJ_COEFF2, 0x46},
{R367CAB_IQDEM_ADJ_COEFF3, 0x77},
{R367CAB_IQDEM_ADJ_COEFF4, 0x96},
{R367CAB_IQDEM_ADJ_COEFF5, 0x69},
{R367CAB_IQDEM_ADJ_COEFF6, 0xc7},
{R367CAB_IQDEM_ADJ_COEFF7, 0x01},
{R367CAB_IQDEM_ADJ_EN, 0x04},
{R367CAB_IQDEM_ADJ_AGC_REF, 0x94},
{R367CAB_ALLPASSFILT1, 0xc9},
{R367CAB_ALLPASSFILT2, 0x2d},
{R367CAB_ALLPASSFILT3, 0xa3},
{R367CAB_ALLPASSFILT4, 0xfb},
{R367CAB_ALLPASSFILT5, 0xf6},
{R367CAB_ALLPASSFILT6, 0x45},
{R367CAB_ALLPASSFILT7, 0x6f},
{R367CAB_ALLPASSFILT8, 0x7e},
{R367CAB_ALLPASSFILT9, 0x05},
{R367CAB_ALLPASSFILT10, 0x0a},
{R367CAB_ALLPASSFILT11, 0x51},
{R367CAB_TRL_AGC_CFG, 0x20},
{R367CAB_TRL_LPF_CFG, 0x28},
{R367CAB_TRL_LPF_ACQ_GAIN, 0x44},
{R367CAB_TRL_LPF_TRK_GAIN, 0x22},
{R367CAB_TRL_LPF_OUT_GAIN, 0x03},
{R367CAB_TRL_LOCKDET_LTH, 0x04},
{R367CAB_TRL_LOCKDET_HTH, 0x11},
{R367CAB_TRL_LOCKDET_TRGVAL, 0x20},
{R367CAB_IQ_QAM, 0x01},
{R367CAB_FSM_STATE, 0xa0},
{R367CAB_FSM_CTL, 0x08},
{R367CAB_FSM_STS, 0x0c},
{R367CAB_FSM_SNR0_HTH, 0x00},
{R367CAB_FSM_SNR1_HTH, 0x00},
{R367CAB_FSM_SNR2_HTH, 0x23},/* 0x00 */
{R367CAB_FSM_SNR0_LTH, 0x00},
{R367CAB_FSM_SNR1_LTH, 0x00},
{R367CAB_FSM_EQA1_HTH, 0x00},
{R367CAB_FSM_TEMPO, 0x32},
{R367CAB_FSM_CONFIG, 0x03},
{R367CAB_EQU_I_TESTTAP_L, 0x11},
{R367CAB_EQU_I_TESTTAP_M, 0x00},
{R367CAB_EQU_I_TESTTAP_H, 0x00},
{R367CAB_EQU_TESTAP_CFG, 0x00},
{R367CAB_EQU_Q_TESTTAP_L, 0xff},
{R367CAB_EQU_Q_TESTTAP_M, 0x00},
{R367CAB_EQU_Q_TESTTAP_H, 0x00},
{R367CAB_EQU_TAP_CTRL, 0x00},
{R367CAB_EQU_CTR_CRL_CONTROL_L, 0x11},
{R367CAB_EQU_CTR_CRL_CONTROL_H, 0x05},
{R367CAB_EQU_CTR_HIPOW_L, 0x00},
{R367CAB_EQU_CTR_HIPOW_H, 0x00},
{R367CAB_EQU_I_EQU_LO, 0xef},
{R367CAB_EQU_I_EQU_HI, 0x00},
{R367CAB_EQU_Q_EQU_LO, 0xee},
{R367CAB_EQU_Q_EQU_HI, 0x00},
{R367CAB_EQU_MAPPER, 0xc5},
{R367CAB_EQU_SWEEP_RATE, 0x80},
{R367CAB_EQU_SNR_LO, 0x64},
{R367CAB_EQU_SNR_HI, 0x03},
{R367CAB_EQU_GAMMA_LO, 0x00},
{R367CAB_EQU_GAMMA_HI, 0x00},
{R367CAB_EQU_ERR_GAIN, 0x36},
{R367CAB_EQU_RADIUS, 0xaa},
{R367CAB_EQU_FFE_MAINTAP, 0x00},
{R367CAB_EQU_FFE_LEAKAGE, 0x63},
{R367CAB_EQU_FFE_MAINTAP_POS, 0xdf},
{R367CAB_EQU_GAIN_WIDE, 0x88},
{R367CAB_EQU_GAIN_NARROW, 0x41},
{R367CAB_EQU_CTR_LPF_GAIN, 0xd1},
{R367CAB_EQU_CRL_LPF_GAIN, 0xa7},
{R367CAB_EQU_GLOBAL_GAIN, 0x06},
{R367CAB_EQU_CRL_LD_SEN, 0x85},
{R367CAB_EQU_CRL_LD_VAL, 0xe2},
{R367CAB_EQU_CRL_TFR, 0x20},
{R367CAB_EQU_CRL_BISTH_LO, 0x00},
{R367CAB_EQU_CRL_BISTH_HI, 0x00},
{R367CAB_EQU_SWEEP_RANGE_LO, 0x00},
{R367CAB_EQU_SWEEP_RANGE_HI, 0x00},
{R367CAB_EQU_CRL_LIMITER, 0x40},
{R367CAB_EQU_MODULUS_MAP, 0x90},
{R367CAB_EQU_PNT_GAIN, 0xa7},
{R367CAB_FEC_AC_CTR_0, 0x16},
{R367CAB_FEC_AC_CTR_1, 0x0b},
{R367CAB_FEC_AC_CTR_2, 0x88},
{R367CAB_FEC_AC_CTR_3, 0x02},
{R367CAB_FEC_STATUS, 0x12},
{R367CAB_RS_COUNTER_0, 0x7d},
{R367CAB_RS_COUNTER_1, 0xd0},
{R367CAB_RS_COUNTER_2, 0x19},
{R367CAB_RS_COUNTER_3, 0x0b},
{R367CAB_RS_COUNTER_4, 0xa3},
{R367CAB_RS_COUNTER_5, 0x00},
{R367CAB_BERT_0, 0x01},
{R367CAB_BERT_1, 0x25},
{R367CAB_BERT_2, 0x41},
{R367CAB_BERT_3, 0x39},
{R367CAB_OUTFORMAT_0, 0xc2},
{R367CAB_OUTFORMAT_1, 0x22},
{R367CAB_SMOOTHER_2, 0x28},
{R367CAB_TSMF_CTRL_0, 0x01},
{R367CAB_TSMF_CTRL_1, 0xc6},
{R367CAB_TSMF_CTRL_3, 0x43},
{R367CAB_TS_ON_ID_0, 0x00},
{R367CAB_TS_ON_ID_1, 0x00},
{R367CAB_TS_ON_ID_2, 0x00},
{R367CAB_TS_ON_ID_3, 0x00},
{R367CAB_RE_STATUS_0, 0x00},
{R367CAB_RE_STATUS_1, 0x00},
{R367CAB_RE_STATUS_2, 0x00},
{R367CAB_RE_STATUS_3, 0x00},
{R367CAB_TS_STATUS_0, 0x00},
{R367CAB_TS_STATUS_1, 0x00},
{R367CAB_TS_STATUS_2, 0xa0},
{R367CAB_TS_STATUS_3, 0x00},
{R367CAB_T_O_ID_0, 0x00},
{R367CAB_T_O_ID_1, 0x00},
{R367CAB_T_O_ID_2, 0x00},
{R367CAB_T_O_ID_3, 0x00},
};
static
int stv0367_writeregs(struct stv0367_state *state, u16 reg, u8 *data, int len)
{
u8 buf[len + 2];
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
.len = len + 2
};
int ret;
buf[0] = MSB(reg);
buf[1] = LSB(reg);
memcpy(buf + 2, data, len);
if (i2cdebug)
printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, buf[2]);
ret = i2c_transfer(state->i2c, &msg, 1);
if (ret != 1)
printk(KERN_ERR "%s: i2c write error!\n", __func__);
return (ret != 1) ? -EREMOTEIO : 0;
}
static int stv0367_writereg(struct stv0367_state *state, u16 reg, u8 data)
{
return stv0367_writeregs(state, reg, &data, 1);
}
static u8 stv0367_readreg(struct stv0367_state *state, u16 reg)
{
u8 b0[] = { 0, 0 };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{
.addr = state->config->demod_address,
.flags = 0,
.buf = b0,
.len = 2
}, {
.addr = state->config->demod_address,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
}
};
int ret;
b0[0] = MSB(reg);
b0[1] = LSB(reg);
ret = i2c_transfer(state->i2c, msg, 2);
if (ret != 2)
printk(KERN_ERR "%s: i2c read error\n", __func__);
if (i2cdebug)
printk(KERN_DEBUG "%s: %02x: %02x\n", __func__, reg, b1[0]);
return b1[0];
}
static void extract_mask_pos(u32 label, u8 *mask, u8 *pos)
{
u8 position = 0, i = 0;
(*mask) = label & 0xff;
while ((position == 0) && (i < 8)) {
position = ((*mask) >> i) & 0x01;
i++;
}
(*pos) = (i - 1);
}
static void stv0367_writebits(struct stv0367_state *state, u32 label, u8 val)
{
u8 reg, mask, pos;
reg = stv0367_readreg(state, (label >> 16) & 0xffff);
extract_mask_pos(label, &mask, &pos);
val = mask & (val << pos);
reg = (reg & (~mask)) | val;
stv0367_writereg(state, (label >> 16) & 0xffff, reg);
}
static void stv0367_setbits(u8 *reg, u32 label, u8 val)
{
u8 mask, pos;
extract_mask_pos(label, &mask, &pos);
val = mask & (val << pos);
(*reg) = ((*reg) & (~mask)) | val;
}
static u8 stv0367_readbits(struct stv0367_state *state, u32 label)
{
u8 val = 0xff;
u8 mask, pos;
extract_mask_pos(label, &mask, &pos);
val = stv0367_readreg(state, label >> 16);
val = (val & mask) >> pos;
return val;
}
u8 stv0367_getbits(u8 reg, u32 label)
{
u8 mask, pos;
extract_mask_pos(label, &mask, &pos);
return (reg & mask) >> pos;
}
static int stv0367ter_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
u8 tmp = stv0367_readreg(state, R367TER_I2CRPT);
dprintk("%s:\n", __func__);
if (enable) {
stv0367_setbits(&tmp, F367TER_STOP_ENABLE, 0);
stv0367_setbits(&tmp, F367TER_I2CT_ON, 1);
} else {
stv0367_setbits(&tmp, F367TER_STOP_ENABLE, 1);
stv0367_setbits(&tmp, F367TER_I2CT_ON, 0);
}
stv0367_writereg(state, R367TER_I2CRPT, tmp);
return 0;
}
static u32 stv0367_get_tuner_freq(struct dvb_frontend *fe)
{
struct dvb_frontend_ops *frontend_ops = NULL;
struct dvb_tuner_ops *tuner_ops = NULL;
u32 freq = 0;
int err = 0;
dprintk("%s:\n", __func__);
if (&fe->ops)
frontend_ops = &fe->ops;
if (&frontend_ops->tuner_ops)
tuner_ops = &frontend_ops->tuner_ops;
if (tuner_ops->get_frequency) {
err = tuner_ops->get_frequency(fe, &freq);
if (err < 0) {
printk(KERN_ERR "%s: Invalid parameter\n", __func__);
return err;
}
dprintk("%s: frequency=%d\n", __func__, freq);
} else
return -1;
return freq;
}
static u16 CellsCoeffs_8MHz_367cofdm[3][6][5] = {
{
{0x10EF, 0xE205, 0x10EF, 0xCE49, 0x6DA7}, /* CELL 1 COEFFS 27M*/
{0x2151, 0xc557, 0x2151, 0xc705, 0x6f93}, /* CELL 2 COEFFS */
{0x2503, 0xc000, 0x2503, 0xc375, 0x7194}, /* CELL 3 COEFFS */
{0x20E9, 0xca94, 0x20e9, 0xc153, 0x7194}, /* CELL 4 COEFFS */
{0x06EF, 0xF852, 0x06EF, 0xC057, 0x7207}, /* CELL 5 COEFFS */
{0x0000, 0x0ECC, 0x0ECC, 0x0000, 0x3647} /* CELL 6 COEFFS */
}, {
{0x10A0, 0xE2AF, 0x10A1, 0xCE76, 0x6D6D}, /* CELL 1 COEFFS 25M*/
{0x20DC, 0xC676, 0x20D9, 0xC80A, 0x6F29},
{0x2532, 0xC000, 0x251D, 0xC391, 0x706F},
{0x1F7A, 0xCD2B, 0x2032, 0xC15E, 0x711F},
{0x0698, 0xFA5E, 0x0568, 0xC059, 0x7193},
{0x0000, 0x0918, 0x149C, 0x0000, 0x3642} /* CELL 6 COEFFS */
}, {
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, /* 30M */
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}
}
};
static u16 CellsCoeffs_7MHz_367cofdm[3][6][5] = {
{
{0x12CA, 0xDDAF, 0x12CA, 0xCCEB, 0x6FB1}, /* CELL 1 COEFFS 27M*/
{0x2329, 0xC000, 0x2329, 0xC6B0, 0x725F}, /* CELL 2 COEFFS */
{0x2394, 0xC000, 0x2394, 0xC2C7, 0x7410}, /* CELL 3 COEFFS */
{0x251C, 0xC000, 0x251C, 0xC103, 0x74D9}, /* CELL 4 COEFFS */
{0x0804, 0xF546, 0x0804, 0xC040, 0x7544}, /* CELL 5 COEFFS */
{0x0000, 0x0CD9, 0x0CD9, 0x0000, 0x370A} /* CELL 6 COEFFS */
}, {
{0x1285, 0xDE47, 0x1285, 0xCD17, 0x6F76}, /*25M*/
{0x234C, 0xC000, 0x2348, 0xC6DA, 0x7206},
{0x23B4, 0xC000, 0x23AC, 0xC2DB, 0x73B3},
{0x253D, 0xC000, 0x25B6, 0xC10B, 0x747F},
{0x0721, 0xF79C, 0x065F, 0xC041, 0x74EB},
{0x0000, 0x08FA, 0x1162, 0x0000, 0x36FF}
}, {
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, /* 30M */
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}
}
};
static u16 CellsCoeffs_6MHz_367cofdm[3][6][5] = {
{
{0x1699, 0xD5B8, 0x1699, 0xCBC3, 0x713B}, /* CELL 1 COEFFS 27M*/
{0x2245, 0xC000, 0x2245, 0xC568, 0x74D5}, /* CELL 2 COEFFS */
{0x227F, 0xC000, 0x227F, 0xC1FC, 0x76C6}, /* CELL 3 COEFFS */
{0x235E, 0xC000, 0x235E, 0xC0A7, 0x778A}, /* CELL 4 COEFFS */
{0x0ECB, 0xEA0B, 0x0ECB, 0xC027, 0x77DD}, /* CELL 5 COEFFS */
{0x0000, 0x0B68, 0x0B68, 0x0000, 0xC89A}, /* CELL 6 COEFFS */
}, {
{0x1655, 0xD64E, 0x1658, 0xCBEF, 0x70FE}, /*25M*/
{0x225E, 0xC000, 0x2256, 0xC589, 0x7489},
{0x2293, 0xC000, 0x2295, 0xC209, 0x767E},
{0x2377, 0xC000, 0x23AA, 0xC0AB, 0x7746},
{0x0DC7, 0xEBC8, 0x0D07, 0xC027, 0x7799},
{0x0000, 0x0888, 0x0E9C, 0x0000, 0x3757}
}, {
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}, /* 30M */
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000},
{0x0000, 0x0000, 0x0000, 0x0000, 0x0000}
}
};
static u32 stv0367ter_get_mclk(struct stv0367_state *state, u32 ExtClk_Hz)
{
u32 mclk_Hz = 0; /* master clock frequency (Hz) */
u32 m, n, p;
dprintk("%s:\n", __func__);
if (stv0367_readbits(state, F367TER_BYPASS_PLLXN) == 0) {
n = (u32)stv0367_readbits(state, F367TER_PLL_NDIV);
if (n == 0)
n = n + 1;
m = (u32)stv0367_readbits(state, F367TER_PLL_MDIV);
if (m == 0)
m = m + 1;
p = (u32)stv0367_readbits(state, F367TER_PLL_PDIV);
if (p > 5)
p = 5;
mclk_Hz = ((ExtClk_Hz / 2) * n) / (m * (1 << p));
dprintk("N=%d M=%d P=%d mclk_Hz=%d ExtClk_Hz=%d\n",
n, m, p, mclk_Hz, ExtClk_Hz);
} else
mclk_Hz = ExtClk_Hz;
dprintk("%s: mclk_Hz=%d\n", __func__, mclk_Hz);
return mclk_Hz;
}
static int stv0367ter_filt_coeff_init(struct stv0367_state *state,
u16 CellsCoeffs[3][6][5], u32 DemodXtal)
{
int i, j, k, freq;
dprintk("%s:\n", __func__);
freq = stv0367ter_get_mclk(state, DemodXtal);
if (freq == 53125000)
k = 1; /* equivalent to Xtal 25M on 362*/
else if (freq == 54000000)
k = 0; /* equivalent to Xtal 27M on 362*/
else if (freq == 52500000)
k = 2; /* equivalent to Xtal 30M on 362*/
else
return 0;
for (i = 1; i <= 6; i++) {
stv0367_writebits(state, F367TER_IIR_CELL_NB, i - 1);
for (j = 1; j <= 5; j++) {
stv0367_writereg(state,
(R367TER_IIRCX_COEFF1_MSB + 2 * (j - 1)),
MSB(CellsCoeffs[k][i-1][j-1]));
stv0367_writereg(state,
(R367TER_IIRCX_COEFF1_LSB + 2 * (j - 1)),
LSB(CellsCoeffs[k][i-1][j-1]));
}
}
return 1;
}
static void stv0367ter_agc_iir_lock_detect_set(struct stv0367_state *state)
{
dprintk("%s:\n", __func__);
stv0367_writebits(state, F367TER_LOCK_DETECT_LSB, 0x00);
/* Lock detect 1 */
stv0367_writebits(state, F367TER_LOCK_DETECT_CHOICE, 0x00);
stv0367_writebits(state, F367TER_LOCK_DETECT_MSB, 0x06);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_LSB, 0x04);
/* Lock detect 2 */
stv0367_writebits(state, F367TER_LOCK_DETECT_CHOICE, 0x01);
stv0367_writebits(state, F367TER_LOCK_DETECT_MSB, 0x06);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_LSB, 0x04);
/* Lock detect 3 */
stv0367_writebits(state, F367TER_LOCK_DETECT_CHOICE, 0x02);
stv0367_writebits(state, F367TER_LOCK_DETECT_MSB, 0x01);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_LSB, 0x00);
/* Lock detect 4 */
stv0367_writebits(state, F367TER_LOCK_DETECT_CHOICE, 0x03);
stv0367_writebits(state, F367TER_LOCK_DETECT_MSB, 0x01);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_LSB, 0x00);
}
static int stv0367_iir_filt_init(struct stv0367_state *state, u8 Bandwidth,
u32 DemodXtalValue)
{
dprintk("%s:\n", __func__);
stv0367_writebits(state, F367TER_NRST_IIR, 0);
switch (Bandwidth) {
case 6:
if (!stv0367ter_filt_coeff_init(state,
CellsCoeffs_6MHz_367cofdm,
DemodXtalValue))
return 0;
break;
case 7:
if (!stv0367ter_filt_coeff_init(state,
CellsCoeffs_7MHz_367cofdm,
DemodXtalValue))
return 0;
break;
case 8:
if (!stv0367ter_filt_coeff_init(state,
CellsCoeffs_8MHz_367cofdm,
DemodXtalValue))
return 0;
break;
default:
return 0;
}
stv0367_writebits(state, F367TER_NRST_IIR, 1);
return 1;
}
static void stv0367ter_agc_iir_rst(struct stv0367_state *state)
{
u8 com_n;
dprintk("%s:\n", __func__);
com_n = stv0367_readbits(state, F367TER_COM_N);
stv0367_writebits(state, F367TER_COM_N, 0x07);
stv0367_writebits(state, F367TER_COM_SOFT_RSTN, 0x00);
stv0367_writebits(state, F367TER_COM_AGC_ON, 0x00);
stv0367_writebits(state, F367TER_COM_SOFT_RSTN, 0x01);
stv0367_writebits(state, F367TER_COM_AGC_ON, 0x01);
stv0367_writebits(state, F367TER_COM_N, com_n);
}
static int stv0367ter_duration(s32 mode, int tempo1, int tempo2, int tempo3)
{
int local_tempo = 0;
switch (mode) {
case 0:
local_tempo = tempo1;
break;
case 1:
local_tempo = tempo2;
break ;
case 2:
local_tempo = tempo3;
break;
default:
break;
}
/* msleep(local_tempo); */
return local_tempo;
}
static enum
stv0367_ter_signal_type stv0367ter_check_syr(struct stv0367_state *state)
{
int wd = 100;
unsigned short int SYR_var;
s32 SYRStatus;
dprintk("%s:\n", __func__);
SYR_var = stv0367_readbits(state, F367TER_SYR_LOCK);
while ((!SYR_var) && (wd > 0)) {
usleep_range(2000, 3000);
wd -= 2;
SYR_var = stv0367_readbits(state, F367TER_SYR_LOCK);
}
if (!SYR_var)
SYRStatus = FE_TER_NOSYMBOL;
else
SYRStatus = FE_TER_SYMBOLOK;
dprintk("stv0367ter_check_syr SYRStatus %s\n",
SYR_var == 0 ? "No Symbol" : "OK");
return SYRStatus;
}
static enum
stv0367_ter_signal_type stv0367ter_check_cpamp(struct stv0367_state *state,
s32 FFTmode)
{
s32 CPAMPvalue = 0, CPAMPStatus, CPAMPMin;
int wd = 0;
dprintk("%s:\n", __func__);
switch (FFTmode) {
case 0: /*2k mode*/
CPAMPMin = 20;
wd = 10;
break;
case 1: /*8k mode*/
CPAMPMin = 80;
wd = 55;
break;
case 2: /*4k mode*/
CPAMPMin = 40;
wd = 30;
break;
default:
CPAMPMin = 0xffff; /*drives to NOCPAMP */
break;
}
dprintk("%s: CPAMPMin=%d wd=%d\n", __func__, CPAMPMin, wd);
CPAMPvalue = stv0367_readbits(state, F367TER_PPM_CPAMP_DIRECT);
while ((CPAMPvalue < CPAMPMin) && (wd > 0)) {
usleep_range(1000, 2000);
wd -= 1;
CPAMPvalue = stv0367_readbits(state, F367TER_PPM_CPAMP_DIRECT);
/*dprintk("CPAMPvalue= %d at wd=%d\n",CPAMPvalue,wd); */
}
dprintk("******last CPAMPvalue= %d at wd=%d\n", CPAMPvalue, wd);
if (CPAMPvalue < CPAMPMin) {
CPAMPStatus = FE_TER_NOCPAMP;
printk(KERN_ERR "CPAMP failed\n");
} else {
printk(KERN_ERR "CPAMP OK !\n");
CPAMPStatus = FE_TER_CPAMPOK;
}
return CPAMPStatus;
}
enum
stv0367_ter_signal_type stv0367ter_lock_algo(struct stv0367_state *state)
{
enum stv0367_ter_signal_type ret_flag;
short int wd, tempo;
u8 try, u_var1 = 0, u_var2 = 0, u_var3 = 0, u_var4 = 0, mode, guard;
u8 tmp, tmp2;
dprintk("%s:\n", __func__);
if (state == NULL)
return FE_TER_SWNOK;
try = 0;
do {
ret_flag = FE_TER_LOCKOK;
stv0367_writebits(state, F367TER_CORE_ACTIVE, 0);
if (state->config->if_iq_mode != 0)
stv0367_writebits(state, F367TER_COM_N, 0x07);
stv0367_writebits(state, F367TER_GUARD, 3);/* suggest 2k 1/4 */
stv0367_writebits(state, F367TER_MODE, 0);
stv0367_writebits(state, F367TER_SYR_TR_DIS, 0);
usleep_range(5000, 10000);
stv0367_writebits(state, F367TER_CORE_ACTIVE, 1);
if (stv0367ter_check_syr(state) == FE_TER_NOSYMBOL)
return FE_TER_NOSYMBOL;
else { /*
if chip locked on wrong mode first try,
it must lock correctly second try */
mode = stv0367_readbits(state, F367TER_SYR_MODE);
if (stv0367ter_check_cpamp(state, mode) ==
FE_TER_NOCPAMP) {
if (try == 0)
ret_flag = FE_TER_NOCPAMP;
}
}
try++;
} while ((try < 10) && (ret_flag != FE_TER_LOCKOK));
tmp = stv0367_readreg(state, R367TER_SYR_STAT);
tmp2 = stv0367_readreg(state, R367TER_STATUS);
dprintk("state=%p\n", state);
dprintk("LOCK OK! mode=%d SYR_STAT=0x%x R367TER_STATUS=0x%x\n",
mode, tmp, tmp2);
tmp = stv0367_readreg(state, R367TER_PRVIT);
tmp2 = stv0367_readreg(state, R367TER_I2CRPT);
dprintk("PRVIT=0x%x I2CRPT=0x%x\n", tmp, tmp2);
tmp = stv0367_readreg(state, R367TER_GAIN_SRC1);
dprintk("GAIN_SRC1=0x%x\n", tmp);
if ((mode != 0) && (mode != 1) && (mode != 2))
return FE_TER_SWNOK;
/*guard=stv0367_readbits(state,F367TER_SYR_GUARD); */
/*suppress EPQ auto for SYR_GARD 1/16 or 1/32
and set channel predictor in automatic */
#if 0
switch (guard) {
case 0:
case 1:
stv0367_writebits(state, F367TER_AUTO_LE_EN, 0);
stv0367_writereg(state, R367TER_CHC_CTL, 0x01);
break;
case 2:
case 3:
stv0367_writebits(state, F367TER_AUTO_LE_EN, 1);
stv0367_writereg(state, R367TER_CHC_CTL, 0x11);
break;
default:
return FE_TER_SWNOK;
}
#endif
/*reset fec an reedsolo FOR 367 only*/
stv0367_writebits(state, F367TER_RST_SFEC, 1);
stv0367_writebits(state, F367TER_RST_REEDSOLO, 1);
usleep_range(1000, 2000);
stv0367_writebits(state, F367TER_RST_SFEC, 0);
stv0367_writebits(state, F367TER_RST_REEDSOLO, 0);
u_var1 = stv0367_readbits(state, F367TER_LK);
u_var2 = stv0367_readbits(state, F367TER_PRF);
u_var3 = stv0367_readbits(state, F367TER_TPS_LOCK);
/* u_var4=stv0367_readbits(state,F367TER_TSFIFO_LINEOK); */
wd = stv0367ter_duration(mode, 125, 500, 250);
tempo = stv0367ter_duration(mode, 4, 16, 8);
/*while ( ((!u_var1)||(!u_var2)||(!u_var3)||(!u_var4)) && (wd>=0)) */
while (((!u_var1) || (!u_var2) || (!u_var3)) && (wd >= 0)) {
usleep_range(1000 * tempo, 1000 * (tempo + 1));
wd -= tempo;
u_var1 = stv0367_readbits(state, F367TER_LK);
u_var2 = stv0367_readbits(state, F367TER_PRF);
u_var3 = stv0367_readbits(state, F367TER_TPS_LOCK);
/*u_var4=stv0367_readbits(state, F367TER_TSFIFO_LINEOK); */
}
if (!u_var1)
return FE_TER_NOLOCK;
if (!u_var2)
return FE_TER_NOPRFOUND;
if (!u_var3)
return FE_TER_NOTPS;
guard = stv0367_readbits(state, F367TER_SYR_GUARD);
stv0367_writereg(state, R367TER_CHC_CTL, 0x11);
switch (guard) {
case 0:
case 1:
stv0367_writebits(state, F367TER_AUTO_LE_EN, 0);
/*stv0367_writereg(state,R367TER_CHC_CTL, 0x1);*/
stv0367_writebits(state, F367TER_SYR_FILTER, 0);
break;
case 2:
case 3:
stv0367_writebits(state, F367TER_AUTO_LE_EN, 1);
/*stv0367_writereg(state,R367TER_CHC_CTL, 0x11);*/
stv0367_writebits(state, F367TER_SYR_FILTER, 1);
break;
default:
return FE_TER_SWNOK;
}
/* apply Sfec workaround if 8K 64QAM CR!=1/2*/
if ((stv0367_readbits(state, F367TER_TPS_CONST) == 2) &&
(mode == 1) &&
(stv0367_readbits(state, F367TER_TPS_HPCODE) != 0)) {
stv0367_writereg(state, R367TER_SFDLYSETH, 0xc0);
stv0367_writereg(state, R367TER_SFDLYSETM, 0x60);
stv0367_writereg(state, R367TER_SFDLYSETL, 0x0);
} else
stv0367_writereg(state, R367TER_SFDLYSETH, 0x0);
wd = stv0367ter_duration(mode, 125, 500, 250);
u_var4 = stv0367_readbits(state, F367TER_TSFIFO_LINEOK);
while ((!u_var4) && (wd >= 0)) {
usleep_range(1000 * tempo, 1000 * (tempo + 1));
wd -= tempo;
u_var4 = stv0367_readbits(state, F367TER_TSFIFO_LINEOK);
}
if (!u_var4)
return FE_TER_NOLOCK;
/* for 367 leave COM_N at 0x7 for IQ_mode*/
/*if(ter_state->if_iq_mode!=FE_TER_NORMAL_IF_TUNER) {
tempo=0;
while ((stv0367_readbits(state,F367TER_COM_USEGAINTRK)!=1) &&
(stv0367_readbits(state,F367TER_COM_AGCLOCK)!=1)&&(tempo<100)) {
ChipWaitOrAbort(state,1);
tempo+=1;
}
stv0367_writebits(state,F367TER_COM_N,0x17);
} */
stv0367_writebits(state, F367TER_SYR_TR_DIS, 1);
dprintk("FE_TER_LOCKOK !!!\n");
return FE_TER_LOCKOK;
}
static void stv0367ter_set_ts_mode(struct stv0367_state *state,
enum stv0367_ts_mode PathTS)
{
dprintk("%s:\n", __func__);
if (state == NULL)
return;
stv0367_writebits(state, F367TER_TS_DIS, 0);
switch (PathTS) {
default:
/*for removing warning :default we can assume in parallel mode*/
case STV0367_PARALLEL_PUNCT_CLOCK:
stv0367_writebits(state, F367TER_TSFIFO_SERIAL, 0);
stv0367_writebits(state, F367TER_TSFIFO_DVBCI, 0);
break;
case STV0367_SERIAL_PUNCT_CLOCK:
stv0367_writebits(state, F367TER_TSFIFO_SERIAL, 1);
stv0367_writebits(state, F367TER_TSFIFO_DVBCI, 1);
break;
}
}
static void stv0367ter_set_clk_pol(struct stv0367_state *state,
enum stv0367_clk_pol clock)
{
dprintk("%s:\n", __func__);
if (state == NULL)
return;
switch (clock) {
case STV0367_RISINGEDGE_CLOCK:
stv0367_writebits(state, F367TER_TS_BYTE_CLK_INV, 1);
break;
case STV0367_FALLINGEDGE_CLOCK:
stv0367_writebits(state, F367TER_TS_BYTE_CLK_INV, 0);
break;
/*case FE_TER_CLOCK_POLARITY_DEFAULT:*/
default:
stv0367_writebits(state, F367TER_TS_BYTE_CLK_INV, 0);
break;
}
}
#if 0
static void stv0367ter_core_sw(struct stv0367_state *state)
{
dprintk("%s:\n", __func__);
stv0367_writebits(state, F367TER_CORE_ACTIVE, 0);
stv0367_writebits(state, F367TER_CORE_ACTIVE, 1);
msleep(350);
}
#endif
static int stv0367ter_standby(struct dvb_frontend *fe, u8 standby_on)
{
struct stv0367_state *state = fe->demodulator_priv;
dprintk("%s:\n", __func__);
if (standby_on) {
stv0367_writebits(state, F367TER_STDBY, 1);
stv0367_writebits(state, F367TER_STDBY_FEC, 1);
stv0367_writebits(state, F367TER_STDBY_CORE, 1);
} else {
stv0367_writebits(state, F367TER_STDBY, 0);
stv0367_writebits(state, F367TER_STDBY_FEC, 0);
stv0367_writebits(state, F367TER_STDBY_CORE, 0);
}
return 0;
}
static int stv0367ter_sleep(struct dvb_frontend *fe)
{
return stv0367ter_standby(fe, 1);
}
int stv0367ter_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
int i;
dprintk("%s:\n", __func__);
ter_state->pBER = 0;
for (i = 0; i < STV0367TER_NBREGS; i++)
stv0367_writereg(state, def0367ter[i].addr,
def0367ter[i].value);
switch (state->config->xtal) {
/*set internal freq to 53.125MHz */
case 25000000:
stv0367_writereg(state, R367TER_PLLMDIV, 0xa);
stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
break;
default:
case 27000000:
dprintk("FE_STV0367TER_SetCLKgen for 27Mhz\n");
stv0367_writereg(state, R367TER_PLLMDIV, 0x1);
stv0367_writereg(state, R367TER_PLLNDIV, 0x8);
stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
break;
case 30000000:
stv0367_writereg(state, R367TER_PLLMDIV, 0xc);
stv0367_writereg(state, R367TER_PLLNDIV, 0x55);
stv0367_writereg(state, R367TER_PLLSETUP, 0x18);
break;
}
stv0367_writereg(state, R367TER_I2CRPT, 0xa0);
stv0367_writereg(state, R367TER_ANACTRL, 0x00);
/*Set TS1 and TS2 to serial or parallel mode */
stv0367ter_set_ts_mode(state, state->config->ts_mode);
stv0367ter_set_clk_pol(state, state->config->clk_pol);
state->chip_id = stv0367_readreg(state, R367TER_ID);
ter_state->first_lock = 0;
ter_state->unlock_counter = 2;
return 0;
}
static int stv0367ter_algo(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
int offset = 0, tempo = 0;
u8 u_var;
u8 /*constell,*/ counter, tps_rcvd[2];
s8 step;
s32 timing_offset = 0;
u32 trl_nomrate = 0, InternalFreq = 0, temp = 0;
dprintk("%s:\n", __func__);
ter_state->frequency = p->frequency;
ter_state->force = FE_TER_FORCENONE
+ stv0367_readbits(state, F367TER_FORCE) * 2;
ter_state->if_iq_mode = state->config->if_iq_mode;
switch (state->config->if_iq_mode) {
case FE_TER_NORMAL_IF_TUNER: /* Normal IF mode */
dprintk("ALGO: FE_TER_NORMAL_IF_TUNER selected\n");
stv0367_writebits(state, F367TER_TUNER_BB, 0);
stv0367_writebits(state, F367TER_LONGPATH_IF, 0);
stv0367_writebits(state, F367TER_DEMUX_SWAP, 0);
break;
case FE_TER_LONGPATH_IF_TUNER: /* Long IF mode */
dprintk("ALGO: FE_TER_LONGPATH_IF_TUNER selected\n");
stv0367_writebits(state, F367TER_TUNER_BB, 0);
stv0367_writebits(state, F367TER_LONGPATH_IF, 1);
stv0367_writebits(state, F367TER_DEMUX_SWAP, 1);
break;
case FE_TER_IQ_TUNER: /* IQ mode */
dprintk("ALGO: FE_TER_IQ_TUNER selected\n");
stv0367_writebits(state, F367TER_TUNER_BB, 1);
stv0367_writebits(state, F367TER_PPM_INVSEL, 0);
break;
default:
printk(KERN_ERR "ALGO: wrong TUNER type selected\n");
return -EINVAL;
}
usleep_range(5000, 7000);
switch (p->inversion) {
case INVERSION_AUTO:
default:
dprintk("%s: inversion AUTO\n", __func__);
if (ter_state->if_iq_mode == FE_TER_IQ_TUNER)
stv0367_writebits(state, F367TER_IQ_INVERT,
ter_state->sense);
else
stv0367_writebits(state, F367TER_INV_SPECTR,
ter_state->sense);
break;
case INVERSION_ON:
case INVERSION_OFF:
if (ter_state->if_iq_mode == FE_TER_IQ_TUNER)
stv0367_writebits(state, F367TER_IQ_INVERT,
p->inversion);
else
stv0367_writebits(state, F367TER_INV_SPECTR,
p->inversion);
break;
}
if ((ter_state->if_iq_mode != FE_TER_NORMAL_IF_TUNER) &&
(ter_state->pBW != ter_state->bw)) {
stv0367ter_agc_iir_lock_detect_set(state);
/*set fine agc target to 180 for LPIF or IQ mode*/
/* set Q_AGCTarget */
stv0367_writebits(state, F367TER_SEL_IQNTAR, 1);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_MSB, 0xB);
/*stv0367_writebits(state,AUT_AGC_TARGET_LSB,0x04); */
/* set Q_AGCTarget */
stv0367_writebits(state, F367TER_SEL_IQNTAR, 0);
stv0367_writebits(state, F367TER_AUT_AGC_TARGET_MSB, 0xB);
/*stv0367_writebits(state,AUT_AGC_TARGET_LSB,0x04); */
if (!stv0367_iir_filt_init(state, ter_state->bw,
state->config->xtal))
return -EINVAL;
/*set IIR filter once for 6,7 or 8MHz BW*/
ter_state->pBW = ter_state->bw;
stv0367ter_agc_iir_rst(state);
}
if (ter_state->hierarchy == FE_TER_HIER_LOW_PRIO)
stv0367_writebits(state, F367TER_BDI_LPSEL, 0x01);
else
stv0367_writebits(state, F367TER_BDI_LPSEL, 0x00);
InternalFreq = stv0367ter_get_mclk(state, state->config->xtal) / 1000;
temp = (int)
((((ter_state->bw * 64 * (1 << 15) * 100)
/ (InternalFreq)) * 10) / 7);
stv0367_writebits(state, F367TER_TRL_NOMRATE_LSB, temp % 2);
temp = temp / 2;
stv0367_writebits(state, F367TER_TRL_NOMRATE_HI, temp / 256);
stv0367_writebits(state, F367TER_TRL_NOMRATE_LO, temp % 256);
temp = stv0367_readbits(state, F367TER_TRL_NOMRATE_HI) * 512 +
stv0367_readbits(state, F367TER_TRL_NOMRATE_LO) * 2 +
stv0367_readbits(state, F367TER_TRL_NOMRATE_LSB);
temp = (int)(((1 << 17) * ter_state->bw * 1000) / (7 * (InternalFreq)));
stv0367_writebits(state, F367TER_GAIN_SRC_HI, temp / 256);
stv0367_writebits(state, F367TER_GAIN_SRC_LO, temp % 256);
temp = stv0367_readbits(state, F367TER_GAIN_SRC_HI) * 256 +
stv0367_readbits(state, F367TER_GAIN_SRC_LO);
temp = (int)
((InternalFreq - state->config->if_khz) * (1 << 16)
/ (InternalFreq));
dprintk("DEROT temp=0x%x\n", temp);
stv0367_writebits(state, F367TER_INC_DEROT_HI, temp / 256);
stv0367_writebits(state, F367TER_INC_DEROT_LO, temp % 256);
ter_state->echo_pos = 0;
ter_state->ucblocks = 0; /* liplianin */
ter_state->pBER = 0; /* liplianin */
stv0367_writebits(state, F367TER_LONG_ECHO, ter_state->echo_pos);
if (stv0367ter_lock_algo(state) != FE_TER_LOCKOK)
return 0;
ter_state->state = FE_TER_LOCKOK;
/* update results */
tps_rcvd[0] = stv0367_readreg(state, R367TER_TPS_RCVD2);
tps_rcvd[1] = stv0367_readreg(state, R367TER_TPS_RCVD3);
ter_state->mode = stv0367_readbits(state, F367TER_SYR_MODE);
ter_state->guard = stv0367_readbits(state, F367TER_SYR_GUARD);
ter_state->first_lock = 1; /* we know sense now :) */
ter_state->agc_val =
(stv0367_readbits(state, F367TER_AGC1_VAL_LO) << 16) +
(stv0367_readbits(state, F367TER_AGC1_VAL_HI) << 24) +
stv0367_readbits(state, F367TER_AGC2_VAL_LO) +
(stv0367_readbits(state, F367TER_AGC2_VAL_HI) << 8);
/* Carrier offset calculation */
stv0367_writebits(state, F367TER_FREEZE, 1);
offset = (stv0367_readbits(state, F367TER_CRL_FOFFSET_VHI) << 16) ;
offset += (stv0367_readbits(state, F367TER_CRL_FOFFSET_HI) << 8);
offset += (stv0367_readbits(state, F367TER_CRL_FOFFSET_LO));
stv0367_writebits(state, F367TER_FREEZE, 0);
if (offset > 8388607)
offset -= 16777216;
offset = offset * 2 / 16384;
if (ter_state->mode == FE_TER_MODE_2K)
offset = (offset * 4464) / 1000;/*** 1 FFT BIN=4.464khz***/
else if (ter_state->mode == FE_TER_MODE_4K)
offset = (offset * 223) / 100;/*** 1 FFT BIN=2.23khz***/
else if (ter_state->mode == FE_TER_MODE_8K)
offset = (offset * 111) / 100;/*** 1 FFT BIN=1.1khz***/
if (stv0367_readbits(state, F367TER_PPM_INVSEL) == 1) {
if ((stv0367_readbits(state, F367TER_INV_SPECTR) ==
(stv0367_readbits(state,
F367TER_STATUS_INV_SPECRUM) == 1)))
offset = offset * -1;
}
if (ter_state->bw == 6)
offset = (offset * 6) / 8;
else if (ter_state->bw == 7)
offset = (offset * 7) / 8;
ter_state->frequency += offset;
tempo = 10; /* exit even if timing_offset stays null */
while ((timing_offset == 0) && (tempo > 0)) {
usleep_range(10000, 20000); /*was 20ms */
/* fine tuning of timing offset if required */
timing_offset = stv0367_readbits(state, F367TER_TRL_TOFFSET_LO)
+ 256 * stv0367_readbits(state,
F367TER_TRL_TOFFSET_HI);
if (timing_offset >= 32768)
timing_offset -= 65536;
trl_nomrate = (512 * stv0367_readbits(state,
F367TER_TRL_NOMRATE_HI)
+ stv0367_readbits(state, F367TER_TRL_NOMRATE_LO) * 2
+ stv0367_readbits(state, F367TER_TRL_NOMRATE_LSB));
timing_offset = ((signed)(1000000 / trl_nomrate) *
timing_offset) / 2048;
tempo--;
}
if (timing_offset <= 0) {
timing_offset = (timing_offset - 11) / 22;
step = -1;
} else {
timing_offset = (timing_offset + 11) / 22;
step = 1;
}
for (counter = 0; counter < abs(timing_offset); counter++) {
trl_nomrate += step;
stv0367_writebits(state, F367TER_TRL_NOMRATE_LSB,
trl_nomrate % 2);
stv0367_writebits(state, F367TER_TRL_NOMRATE_LO,
trl_nomrate / 2);
usleep_range(1000, 2000);
}
usleep_range(5000, 6000);
/* unlocks could happen in case of trl centring big step,
then a core off/on restarts demod */
u_var = stv0367_readbits(state, F367TER_LK);
if (!u_var) {
stv0367_writebits(state, F367TER_CORE_ACTIVE, 0);
msleep(20);
stv0367_writebits(state, F367TER_CORE_ACTIVE, 1);
}
return 0;
}
static int stv0367ter_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
/*u8 trials[2]; */
s8 num_trials, index;
u8 SenseTrials[] = { INVERSION_ON, INVERSION_OFF };
stv0367ter_init(fe);
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
switch (p->transmission_mode) {
default:
case TRANSMISSION_MODE_AUTO:
case TRANSMISSION_MODE_2K:
ter_state->mode = FE_TER_MODE_2K;
break;
/* case TRANSMISSION_MODE_4K:
pLook.mode = FE_TER_MODE_4K;
break;*/
case TRANSMISSION_MODE_8K:
ter_state->mode = FE_TER_MODE_8K;
break;
}
switch (p->guard_interval) {
default:
case GUARD_INTERVAL_1_32:
case GUARD_INTERVAL_1_16:
case GUARD_INTERVAL_1_8:
case GUARD_INTERVAL_1_4:
ter_state->guard = p->guard_interval;
break;
case GUARD_INTERVAL_AUTO:
ter_state->guard = GUARD_INTERVAL_1_32;
break;
}
switch (p->bandwidth_hz) {
case 6000000:
ter_state->bw = FE_TER_CHAN_BW_6M;
break;
case 7000000:
ter_state->bw = FE_TER_CHAN_BW_7M;
break;
case 8000000:
default:
ter_state->bw = FE_TER_CHAN_BW_8M;
}
ter_state->hierarchy = FE_TER_HIER_NONE;
switch (p->inversion) {
case INVERSION_OFF:
case INVERSION_ON:
num_trials = 1;
break;
default:
num_trials = 2;
if (ter_state->first_lock)
num_trials = 1;
break;
}
ter_state->state = FE_TER_NOLOCK;
index = 0;
while (((index) < num_trials) && (ter_state->state != FE_TER_LOCKOK)) {
if (!ter_state->first_lock) {
if (p->inversion == INVERSION_AUTO)
ter_state->sense = SenseTrials[index];
}
stv0367ter_algo(fe);
if ((ter_state->state == FE_TER_LOCKOK) &&
(p->inversion == INVERSION_AUTO) &&
(index == 1)) {
/* invert spectrum sense */
SenseTrials[index] = SenseTrials[0];
SenseTrials[(index + 1) % 2] = (SenseTrials[1] + 1) % 2;
}
index++;
}
return 0;
}
static int stv0367ter_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
u32 errs = 0;
/*wait for counting completion*/
if (stv0367_readbits(state, F367TER_SFERRC_OLDVALUE) == 0) {
errs =
((u32)stv0367_readbits(state, F367TER_ERR_CNT1)
* (1 << 16))
+ ((u32)stv0367_readbits(state, F367TER_ERR_CNT1_HI)
* (1 << 8))
+ ((u32)stv0367_readbits(state, F367TER_ERR_CNT1_LO));
ter_state->ucblocks = errs;
}
(*ucblocks) = ter_state->ucblocks;
return 0;
}
static int stv0367ter_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
int error = 0;
enum stv0367_ter_mode mode;
int constell = 0,/* snr = 0,*/ Data = 0;
p->frequency = stv0367_get_tuner_freq(fe);
if ((int)p->frequency < 0)
p->frequency = -p->frequency;
constell = stv0367_readbits(state, F367TER_TPS_CONST);
if (constell == 0)
p->modulation = QPSK;
else if (constell == 1)
p->modulation = QAM_16;
else
p->modulation = QAM_64;
p->inversion = stv0367_readbits(state, F367TER_INV_SPECTR);
/* Get the Hierarchical mode */
Data = stv0367_readbits(state, F367TER_TPS_HIERMODE);
switch (Data) {
case 0:
p->hierarchy = HIERARCHY_NONE;
break;
case 1:
p->hierarchy = HIERARCHY_1;
break;
case 2:
p->hierarchy = HIERARCHY_2;
break;
case 3:
p->hierarchy = HIERARCHY_4;
break;
default:
p->hierarchy = HIERARCHY_AUTO;
break; /* error */
}
/* Get the FEC Rate */
if (ter_state->hierarchy == FE_TER_HIER_LOW_PRIO)
Data = stv0367_readbits(state, F367TER_TPS_LPCODE);
else
Data = stv0367_readbits(state, F367TER_TPS_HPCODE);
switch (Data) {
case 0:
p->code_rate_HP = FEC_1_2;
break;
case 1:
p->code_rate_HP = FEC_2_3;
break;
case 2:
p->code_rate_HP = FEC_3_4;
break;
case 3:
p->code_rate_HP = FEC_5_6;
break;
case 4:
p->code_rate_HP = FEC_7_8;
break;
default:
p->code_rate_HP = FEC_AUTO;
break; /* error */
}
mode = stv0367_readbits(state, F367TER_SYR_MODE);
switch (mode) {
case FE_TER_MODE_2K:
p->transmission_mode = TRANSMISSION_MODE_2K;
break;
/* case FE_TER_MODE_4K:
p->transmission_mode = TRANSMISSION_MODE_4K;
break;*/
case FE_TER_MODE_8K:
p->transmission_mode = TRANSMISSION_MODE_8K;
break;
default:
p->transmission_mode = TRANSMISSION_MODE_AUTO;
}
p->guard_interval = stv0367_readbits(state, F367TER_SYR_GUARD);
return error;
}
static int stv0367ter_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct stv0367_state *state = fe->demodulator_priv;
u32 snru32 = 0;
int cpt = 0;
u8 cut = stv0367_readbits(state, F367TER_IDENTIFICATIONREG);
while (cpt < 10) {
usleep_range(2000, 3000);
if (cut == 0x50) /*cut 1.0 cut 1.1*/
snru32 += stv0367_readbits(state, F367TER_CHCSNR) / 4;
else /*cu2.0*/
snru32 += 125 * stv0367_readbits(state, F367TER_CHCSNR);
cpt++;
}
snru32 /= 10;/*average on 10 values*/
*snr = snru32 / 1000;
return 0;
}
#if 0
static int stv0367ter_status(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
int locked = FALSE;
locked = (stv0367_readbits(state, F367TER_LK));
if (!locked)
ter_state->unlock_counter += 1;
else
ter_state->unlock_counter = 0;
if (ter_state->unlock_counter > 2) {
if (!stv0367_readbits(state, F367TER_TPS_LOCK) ||
(!stv0367_readbits(state, F367TER_LK))) {
stv0367_writebits(state, F367TER_CORE_ACTIVE, 0);
usleep_range(2000, 3000);
stv0367_writebits(state, F367TER_CORE_ACTIVE, 1);
msleep(350);
locked = (stv0367_readbits(state, F367TER_TPS_LOCK)) &&
(stv0367_readbits(state, F367TER_LK));
}
}
return locked;
}
#endif
static int stv0367ter_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct stv0367_state *state = fe->demodulator_priv;
dprintk("%s:\n", __func__);
*status = 0;
if (stv0367_readbits(state, F367TER_LK)) {
*status |= FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
}
return 0;
}
static int stv0367ter_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367ter_state *ter_state = state->ter_state;
u32 Errors = 0, tber = 0, temporary = 0;
int abc = 0, def = 0;
/*wait for counting completion*/
if (stv0367_readbits(state, F367TER_SFERRC_OLDVALUE) == 0)
Errors = ((u32)stv0367_readbits(state, F367TER_SFEC_ERR_CNT)
* (1 << 16))
+ ((u32)stv0367_readbits(state, F367TER_SFEC_ERR_CNT_HI)
* (1 << 8))
+ ((u32)stv0367_readbits(state,
F367TER_SFEC_ERR_CNT_LO));
/*measurement not completed, load previous value*/
else {
tber = ter_state->pBER;
return 0;
}
abc = stv0367_readbits(state, F367TER_SFEC_ERR_SOURCE);
def = stv0367_readbits(state, F367TER_SFEC_NUM_EVENT);
if (Errors == 0) {
tber = 0;
} else if (abc == 0x7) {
if (Errors <= 4) {
temporary = (Errors * 1000000000) / (8 * (1 << 14));
temporary = temporary;
} else if (Errors <= 42) {
temporary = (Errors * 100000000) / (8 * (1 << 14));
temporary = temporary * 10;
} else if (Errors <= 429) {
temporary = (Errors * 10000000) / (8 * (1 << 14));
temporary = temporary * 100;
} else if (Errors <= 4294) {
temporary = (Errors * 1000000) / (8 * (1 << 14));
temporary = temporary * 1000;
} else if (Errors <= 42949) {
temporary = (Errors * 100000) / (8 * (1 << 14));
temporary = temporary * 10000;
} else if (Errors <= 429496) {
temporary = (Errors * 10000) / (8 * (1 << 14));
temporary = temporary * 100000;
} else { /*if (Errors<4294967) 2^22 max error*/
temporary = (Errors * 1000) / (8 * (1 << 14));
temporary = temporary * 100000; /* still to *10 */
}
/* Byte error*/
if (def == 2)
/*tber=Errors/(8*(1 <<14));*/
tber = temporary;
else if (def == 3)
/*tber=Errors/(8*(1 <<16));*/
tber = temporary / 4;
else if (def == 4)
/*tber=Errors/(8*(1 <<18));*/
tber = temporary / 16;
else if (def == 5)
/*tber=Errors/(8*(1 <<20));*/
tber = temporary / 64;
else if (def == 6)
/*tber=Errors/(8*(1 <<22));*/
tber = temporary / 256;
else
/* should not pass here*/
tber = 0;
if ((Errors < 4294967) && (Errors > 429496))
tber *= 10;
}
/* save actual value */
ter_state->pBER = tber;
(*ber) = tber;
return 0;
}
#if 0
static u32 stv0367ter_get_per(struct stv0367_state *state)
{
struct stv0367ter_state *ter_state = state->ter_state;
u32 Errors = 0, Per = 0, temporary = 0;
int abc = 0, def = 0, cpt = 0;
while (((stv0367_readbits(state, F367TER_SFERRC_OLDVALUE) == 1) &&
(cpt < 400)) || ((Errors == 0) && (cpt < 400))) {
usleep_range(1000, 2000);
Errors = ((u32)stv0367_readbits(state, F367TER_ERR_CNT1)
* (1 << 16))
+ ((u32)stv0367_readbits(state, F367TER_ERR_CNT1_HI)
* (1 << 8))
+ ((u32)stv0367_readbits(state, F367TER_ERR_CNT1_LO));
cpt++;
}
abc = stv0367_readbits(state, F367TER_ERR_SRC1);
def = stv0367_readbits(state, F367TER_NUM_EVT1);
if (Errors == 0)
Per = 0;
else if (abc == 0x9) {
if (Errors <= 4) {
temporary = (Errors * 1000000000) / (8 * (1 << 8));
temporary = temporary;
} else if (Errors <= 42) {
temporary = (Errors * 100000000) / (8 * (1 << 8));
temporary = temporary * 10;
} else if (Errors <= 429) {
temporary = (Errors * 10000000) / (8 * (1 << 8));
temporary = temporary * 100;
} else if (Errors <= 4294) {
temporary = (Errors * 1000000) / (8 * (1 << 8));
temporary = temporary * 1000;
} else if (Errors <= 42949) {
temporary = (Errors * 100000) / (8 * (1 << 8));
temporary = temporary * 10000;
} else { /*if(Errors<=429496) 2^16 errors max*/
temporary = (Errors * 10000) / (8 * (1 << 8));
temporary = temporary * 100000;
}
/* pkt error*/
if (def == 2)
/*Per=Errors/(1 << 8);*/
Per = temporary;
else if (def == 3)
/*Per=Errors/(1 << 10);*/
Per = temporary / 4;
else if (def == 4)
/*Per=Errors/(1 << 12);*/
Per = temporary / 16;
else if (def == 5)
/*Per=Errors/(1 << 14);*/
Per = temporary / 64;
else if (def == 6)
/*Per=Errors/(1 << 16);*/
Per = temporary / 256;
else
Per = 0;
}
/* save actual value */
ter_state->pPER = Per;
return Per;
}
#endif
static int stv0367_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings
*fe_tune_settings)
{
fe_tune_settings->min_delay_ms = 1000;
fe_tune_settings->step_size = 0;
fe_tune_settings->max_drift = 0;
return 0;
}
static void stv0367_release(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
kfree(state->ter_state);
kfree(state->cab_state);
kfree(state);
}
static struct dvb_frontend_ops stv0367ter_ops = {
.delsys = { SYS_DVBT },
.info = {
.name = "ST STV0367 DVB-T",
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_stepsize = 15625,
.frequency_tolerance = 0,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 |
FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_QAM_AUTO |
FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_RECOVER |
FE_CAN_INVERSION_AUTO |
FE_CAN_MUTE_TS
},
.release = stv0367_release,
.init = stv0367ter_init,
.sleep = stv0367ter_sleep,
.i2c_gate_ctrl = stv0367ter_gate_ctrl,
.set_frontend = stv0367ter_set_frontend,
.get_frontend = stv0367ter_get_frontend,
.get_tune_settings = stv0367_get_tune_settings,
.read_status = stv0367ter_read_status,
.read_ber = stv0367ter_read_ber,/* too slow */
/* .read_signal_strength = stv0367_read_signal_strength,*/
.read_snr = stv0367ter_read_snr,
.read_ucblocks = stv0367ter_read_ucblocks,
};
struct dvb_frontend *stv0367ter_attach(const struct stv0367_config *config,
struct i2c_adapter *i2c)
{
struct stv0367_state *state = NULL;
struct stv0367ter_state *ter_state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct stv0367_state), GFP_KERNEL);
if (state == NULL)
goto error;
ter_state = kzalloc(sizeof(struct stv0367ter_state), GFP_KERNEL);
if (ter_state == NULL)
goto error;
/* setup the state */
state->i2c = i2c;
state->config = config;
state->ter_state = ter_state;
state->fe.ops = stv0367ter_ops;
state->fe.demodulator_priv = state;
state->chip_id = stv0367_readreg(state, 0xf000);
dprintk("%s: chip_id = 0x%x\n", __func__, state->chip_id);
/* check if the demod is there */
if ((state->chip_id != 0x50) && (state->chip_id != 0x60))
goto error;
return &state->fe;
error:
kfree(ter_state);
kfree(state);
return NULL;
}
EXPORT_SYMBOL(stv0367ter_attach);
static int stv0367cab_gate_ctrl(struct dvb_frontend *fe, int enable)
{
struct stv0367_state *state = fe->demodulator_priv;
dprintk("%s:\n", __func__);
stv0367_writebits(state, F367CAB_I2CT_ON, (enable > 0) ? 1 : 0);
return 0;
}
static u32 stv0367cab_get_mclk(struct dvb_frontend *fe, u32 ExtClk_Hz)
{
struct stv0367_state *state = fe->demodulator_priv;
u32 mclk_Hz = 0;/* master clock frequency (Hz) */
u32 M, N, P;
if (stv0367_readbits(state, F367CAB_BYPASS_PLLXN) == 0) {
N = (u32)stv0367_readbits(state, F367CAB_PLL_NDIV);
if (N == 0)
N = N + 1;
M = (u32)stv0367_readbits(state, F367CAB_PLL_MDIV);
if (M == 0)
M = M + 1;
P = (u32)stv0367_readbits(state, F367CAB_PLL_PDIV);
if (P > 5)
P = 5;
mclk_Hz = ((ExtClk_Hz / 2) * N) / (M * (1 << P));
dprintk("stv0367cab_get_mclk BYPASS_PLLXN mclk_Hz=%d\n",
mclk_Hz);
} else
mclk_Hz = ExtClk_Hz;
dprintk("stv0367cab_get_mclk final mclk_Hz=%d\n", mclk_Hz);
return mclk_Hz;
}
static u32 stv0367cab_get_adc_freq(struct dvb_frontend *fe, u32 ExtClk_Hz)
{
u32 ADCClk_Hz = ExtClk_Hz;
ADCClk_Hz = stv0367cab_get_mclk(fe, ExtClk_Hz);
return ADCClk_Hz;
}
enum stv0367cab_mod stv0367cab_SetQamSize(struct stv0367_state *state,
u32 SymbolRate,
enum stv0367cab_mod QAMSize)
{
/* Set QAM size */
stv0367_writebits(state, F367CAB_QAM_MODE, QAMSize);
/* Set Registers settings specific to the QAM size */
switch (QAMSize) {
case FE_CAB_MOD_QAM4:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
break;
case FE_CAB_MOD_QAM16:
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x64);
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
stv0367_writereg(state, R367CAB_FSM_STATE, 0x90);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa7);
stv0367_writereg(state, R367CAB_EQU_CRL_LD_SEN, 0x95);
stv0367_writereg(state, R367CAB_EQU_CRL_LIMITER, 0x40);
stv0367_writereg(state, R367CAB_EQU_PNT_GAIN, 0x8a);
break;
case FE_CAB_MOD_QAM32:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x6e);
stv0367_writereg(state, R367CAB_FSM_STATE, 0xb0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xb7);
stv0367_writereg(state, R367CAB_EQU_CRL_LD_SEN, 0x9d);
stv0367_writereg(state, R367CAB_EQU_CRL_LIMITER, 0x7f);
stv0367_writereg(state, R367CAB_EQU_PNT_GAIN, 0xa7);
break;
case FE_CAB_MOD_QAM64:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x82);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x5a);
if (SymbolRate > 45000000) {
stv0367_writereg(state, R367CAB_FSM_STATE, 0xb0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa5);
} else if (SymbolRate > 25000000) {
stv0367_writereg(state, R367CAB_FSM_STATE, 0xa0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa6);
} else {
stv0367_writereg(state, R367CAB_FSM_STATE, 0xa0);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xd1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa7);
}
stv0367_writereg(state, R367CAB_EQU_CRL_LD_SEN, 0x95);
stv0367_writereg(state, R367CAB_EQU_CRL_LIMITER, 0x40);
stv0367_writereg(state, R367CAB_EQU_PNT_GAIN, 0x99);
break;
case FE_CAB_MOD_QAM128:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x76);
stv0367_writereg(state, R367CAB_FSM_STATE, 0x90);
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xb1);
if (SymbolRate > 45000000)
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa7);
else if (SymbolRate > 25000000)
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa6);
else
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0x97);
stv0367_writereg(state, R367CAB_EQU_CRL_LD_SEN, 0x8e);
stv0367_writereg(state, R367CAB_EQU_CRL_LIMITER, 0x7f);
stv0367_writereg(state, R367CAB_EQU_PNT_GAIN, 0xa7);
break;
case FE_CAB_MOD_QAM256:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x94);
stv0367_writereg(state, R367CAB_AGC_PWR_REF_L, 0x5a);
stv0367_writereg(state, R367CAB_FSM_STATE, 0xa0);
if (SymbolRate > 45000000)
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
else if (SymbolRate > 25000000)
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xc1);
else
stv0367_writereg(state, R367CAB_EQU_CTR_LPF_GAIN, 0xd1);
stv0367_writereg(state, R367CAB_EQU_CRL_LPF_GAIN, 0xa7);
stv0367_writereg(state, R367CAB_EQU_CRL_LD_SEN, 0x85);
stv0367_writereg(state, R367CAB_EQU_CRL_LIMITER, 0x40);
stv0367_writereg(state, R367CAB_EQU_PNT_GAIN, 0xa7);
break;
case FE_CAB_MOD_QAM512:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
break;
case FE_CAB_MOD_QAM1024:
stv0367_writereg(state, R367CAB_IQDEM_ADJ_AGC_REF, 0x00);
break;
default:
break;
}
return QAMSize;
}
static u32 stv0367cab_set_derot_freq(struct stv0367_state *state,
u32 adc_hz, s32 derot_hz)
{
u32 sampled_if = 0;
u32 adc_khz;
adc_khz = adc_hz / 1000;
dprintk("%s: adc_hz=%d derot_hz=%d\n", __func__, adc_hz, derot_hz);
if (adc_khz != 0) {
if (derot_hz < 1000000)
derot_hz = adc_hz / 4; /* ZIF operation */
if (derot_hz > adc_hz)
derot_hz = derot_hz - adc_hz;
sampled_if = (u32)derot_hz / 1000;
sampled_if *= 32768;
sampled_if /= adc_khz;
sampled_if *= 256;
}
if (sampled_if > 8388607)
sampled_if = 8388607;
dprintk("%s: sampled_if=0x%x\n", __func__, sampled_if);
stv0367_writereg(state, R367CAB_MIX_NCO_LL, sampled_if);
stv0367_writereg(state, R367CAB_MIX_NCO_HL, (sampled_if >> 8));
stv0367_writebits(state, F367CAB_MIX_NCO_INC_HH, (sampled_if >> 16));
return derot_hz;
}
static u32 stv0367cab_get_derot_freq(struct stv0367_state *state, u32 adc_hz)
{
u32 sampled_if;
sampled_if = stv0367_readbits(state, F367CAB_MIX_NCO_INC_LL) +
(stv0367_readbits(state, F367CAB_MIX_NCO_INC_HL) << 8) +
(stv0367_readbits(state, F367CAB_MIX_NCO_INC_HH) << 16);
sampled_if /= 256;
sampled_if *= (adc_hz / 1000);
sampled_if += 1;
sampled_if /= 32768;
return sampled_if;
}
static u32 stv0367cab_set_srate(struct stv0367_state *state, u32 adc_hz,
u32 mclk_hz, u32 SymbolRate,
enum stv0367cab_mod QAMSize)
{
u32 QamSizeCorr = 0;
u32 u32_tmp = 0, u32_tmp1 = 0;
u32 adp_khz;
dprintk("%s:\n", __func__);
/* Set Correction factor of SRC gain */
switch (QAMSize) {
case FE_CAB_MOD_QAM4:
QamSizeCorr = 1110;
break;
case FE_CAB_MOD_QAM16:
QamSizeCorr = 1032;
break;
case FE_CAB_MOD_QAM32:
QamSizeCorr = 954;
break;
case FE_CAB_MOD_QAM64:
QamSizeCorr = 983;
break;
case FE_CAB_MOD_QAM128:
QamSizeCorr = 957;
break;
case FE_CAB_MOD_QAM256:
QamSizeCorr = 948;
break;
case FE_CAB_MOD_QAM512:
QamSizeCorr = 0;
break;
case FE_CAB_MOD_QAM1024:
QamSizeCorr = 944;
break;
default:
break;
}
/* Transfer ratio calculation */
if (adc_hz != 0) {
u32_tmp = 256 * SymbolRate;
u32_tmp = u32_tmp / adc_hz;
}
stv0367_writereg(state, R367CAB_EQU_CRL_TFR, (u8)u32_tmp);
/* Symbol rate and SRC gain calculation */
adp_khz = (mclk_hz >> 1) / 1000;/* TRL works at half the system clock */
if (adp_khz != 0) {
u32_tmp = SymbolRate;
u32_tmp1 = SymbolRate;
if (u32_tmp < 2097152) { /* 2097152 = 2^21 */
/* Symbol rate calculation */
u32_tmp *= 2048; /* 2048 = 2^11 */
u32_tmp = u32_tmp / adp_khz;
u32_tmp = u32_tmp * 16384; /* 16384 = 2^14 */
u32_tmp /= 125 ; /* 125 = 1000/2^3 */
u32_tmp = u32_tmp * 8; /* 8 = 2^3 */
/* SRC Gain Calculation */
u32_tmp1 *= 2048; /* *2*2^10 */
u32_tmp1 /= 439; /* *2/878 */
u32_tmp1 *= 256; /* *2^8 */
u32_tmp1 = u32_tmp1 / adp_khz; /* /(AdpClk in kHz) */
u32_tmp1 *= QamSizeCorr * 9; /* *1000*corr factor */
u32_tmp1 = u32_tmp1 / 10000000;
} else if (u32_tmp < 4194304) { /* 4194304 = 2**22 */
/* Symbol rate calculation */
u32_tmp *= 1024 ; /* 1024 = 2**10 */
u32_tmp = u32_tmp / adp_khz;
u32_tmp = u32_tmp * 16384; /* 16384 = 2**14 */
u32_tmp /= 125 ; /* 125 = 1000/2**3 */
u32_tmp = u32_tmp * 16; /* 16 = 2**4 */
/* SRC Gain Calculation */
u32_tmp1 *= 1024; /* *2*2^9 */
u32_tmp1 /= 439; /* *2/878 */
u32_tmp1 *= 256; /* *2^8 */
u32_tmp1 = u32_tmp1 / adp_khz; /* /(AdpClk in kHz)*/
u32_tmp1 *= QamSizeCorr * 9; /* *1000*corr factor */
u32_tmp1 = u32_tmp1 / 5000000;
} else if (u32_tmp < 8388607) { /* 8388607 = 2**23 */
/* Symbol rate calculation */
u32_tmp *= 512 ; /* 512 = 2**9 */
u32_tmp = u32_tmp / adp_khz;
u32_tmp = u32_tmp * 16384; /* 16384 = 2**14 */
u32_tmp /= 125 ; /* 125 = 1000/2**3 */
u32_tmp = u32_tmp * 32; /* 32 = 2**5 */
/* SRC Gain Calculation */
u32_tmp1 *= 512; /* *2*2^8 */
u32_tmp1 /= 439; /* *2/878 */
u32_tmp1 *= 256; /* *2^8 */
u32_tmp1 = u32_tmp1 / adp_khz; /* /(AdpClk in kHz) */
u32_tmp1 *= QamSizeCorr * 9; /* *1000*corr factor */
u32_tmp1 = u32_tmp1 / 2500000;
} else {
/* Symbol rate calculation */
u32_tmp *= 256 ; /* 256 = 2**8 */
u32_tmp = u32_tmp / adp_khz;
u32_tmp = u32_tmp * 16384; /* 16384 = 2**13 */
u32_tmp /= 125 ; /* 125 = 1000/2**3 */
u32_tmp = u32_tmp * 64; /* 64 = 2**6 */
/* SRC Gain Calculation */
u32_tmp1 *= 256; /* 2*2^7 */
u32_tmp1 /= 439; /* *2/878 */
u32_tmp1 *= 256; /* *2^8 */
u32_tmp1 = u32_tmp1 / adp_khz; /* /(AdpClk in kHz) */
u32_tmp1 *= QamSizeCorr * 9; /* *1000*corr factor */
u32_tmp1 = u32_tmp1 / 1250000;
}
}
#if 0
/* Filters' coefficients are calculated and written
into registers only if the filters are enabled */
if (stv0367_readbits(state, F367CAB_ADJ_EN)) {
stv0367cab_SetIirAdjacentcoefficient(state, mclk_hz,
SymbolRate);
/* AllPass filter must be enabled
when the adjacents filter is used */
stv0367_writebits(state, F367CAB_ALLPASSFILT_EN, 1);
stv0367cab_SetAllPasscoefficient(state, mclk_hz, SymbolRate);
} else
/* AllPass filter must be disabled
when the adjacents filter is not used */
#endif
stv0367_writebits(state, F367CAB_ALLPASSFILT_EN, 0);
stv0367_writereg(state, R367CAB_SRC_NCO_LL, u32_tmp);
stv0367_writereg(state, R367CAB_SRC_NCO_LH, (u32_tmp >> 8));
stv0367_writereg(state, R367CAB_SRC_NCO_HL, (u32_tmp >> 16));
stv0367_writereg(state, R367CAB_SRC_NCO_HH, (u32_tmp >> 24));
stv0367_writereg(state, R367CAB_IQDEM_GAIN_SRC_L, u32_tmp1 & 0x00ff);
stv0367_writebits(state, F367CAB_GAIN_SRC_HI, (u32_tmp1 >> 8) & 0x00ff);
return SymbolRate ;
}
static u32 stv0367cab_GetSymbolRate(struct stv0367_state *state, u32 mclk_hz)
{
u32 regsym;
u32 adp_khz;
regsym = stv0367_readreg(state, R367CAB_SRC_NCO_LL) +
(stv0367_readreg(state, R367CAB_SRC_NCO_LH) << 8) +
(stv0367_readreg(state, R367CAB_SRC_NCO_HL) << 16) +
(stv0367_readreg(state, R367CAB_SRC_NCO_HH) << 24);
adp_khz = (mclk_hz >> 1) / 1000;/* TRL works at half the system clock */
if (regsym < 134217728) { /* 134217728L = 2**27*/
regsym = regsym * 32; /* 32 = 2**5 */
regsym = regsym / 32768; /* 32768L = 2**15 */
regsym = adp_khz * regsym; /* AdpClk in kHz */
regsym = regsym / 128; /* 128 = 2**7 */
regsym *= 125 ; /* 125 = 1000/2**3 */
regsym /= 2048 ; /* 2048 = 2**11 */
} else if (regsym < 268435456) { /* 268435456L = 2**28 */
regsym = regsym * 16; /* 16 = 2**4 */
regsym = regsym / 32768; /* 32768L = 2**15 */
regsym = adp_khz * regsym; /* AdpClk in kHz */
regsym = regsym / 128; /* 128 = 2**7 */
regsym *= 125 ; /* 125 = 1000/2**3*/
regsym /= 1024 ; /* 256 = 2**10*/
} else if (regsym < 536870912) { /* 536870912L = 2**29*/
regsym = regsym * 8; /* 8 = 2**3 */
regsym = regsym / 32768; /* 32768L = 2**15 */
regsym = adp_khz * regsym; /* AdpClk in kHz */
regsym = regsym / 128; /* 128 = 2**7 */
regsym *= 125 ; /* 125 = 1000/2**3 */
regsym /= 512 ; /* 128 = 2**9 */
} else {
regsym = regsym * 4; /* 4 = 2**2 */
regsym = regsym / 32768; /* 32768L = 2**15 */
regsym = adp_khz * regsym; /* AdpClk in kHz */
regsym = regsym / 128; /* 128 = 2**7 */
regsym *= 125 ; /* 125 = 1000/2**3 */
regsym /= 256 ; /* 64 = 2**8 */
}
return regsym;
}
static int stv0367cab_read_status(struct dvb_frontend *fe, fe_status_t *status)
{
struct stv0367_state *state = fe->demodulator_priv;
dprintk("%s:\n", __func__);
*status = 0;
if (stv0367_readbits(state, F367CAB_QAMFEC_LOCK)) {
*status |= FE_HAS_LOCK;
dprintk("%s: stv0367 has locked\n", __func__);
}
return 0;
}
static int stv0367cab_standby(struct dvb_frontend *fe, u8 standby_on)
{
struct stv0367_state *state = fe->demodulator_priv;
dprintk("%s:\n", __func__);
if (standby_on) {
stv0367_writebits(state, F367CAB_BYPASS_PLLXN, 0x03);
stv0367_writebits(state, F367CAB_STDBY_PLLXN, 0x01);
stv0367_writebits(state, F367CAB_STDBY, 1);
stv0367_writebits(state, F367CAB_STDBY_CORE, 1);
stv0367_writebits(state, F367CAB_EN_BUFFER_I, 0);
stv0367_writebits(state, F367CAB_EN_BUFFER_Q, 0);
stv0367_writebits(state, F367CAB_POFFQ, 1);
stv0367_writebits(state, F367CAB_POFFI, 1);
} else {
stv0367_writebits(state, F367CAB_STDBY_PLLXN, 0x00);
stv0367_writebits(state, F367CAB_BYPASS_PLLXN, 0x00);
stv0367_writebits(state, F367CAB_STDBY, 0);
stv0367_writebits(state, F367CAB_STDBY_CORE, 0);
stv0367_writebits(state, F367CAB_EN_BUFFER_I, 1);
stv0367_writebits(state, F367CAB_EN_BUFFER_Q, 1);
stv0367_writebits(state, F367CAB_POFFQ, 0);
stv0367_writebits(state, F367CAB_POFFI, 0);
}
return 0;
}
static int stv0367cab_sleep(struct dvb_frontend *fe)
{
return stv0367cab_standby(fe, 1);
}
int stv0367cab_init(struct dvb_frontend *fe)
{
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
int i;
dprintk("%s:\n", __func__);
for (i = 0; i < STV0367CAB_NBREGS; i++)
stv0367_writereg(state, def0367cab[i].addr,
def0367cab[i].value);
switch (state->config->ts_mode) {
case STV0367_DVBCI_CLOCK:
dprintk("Setting TSMode = STV0367_DVBCI_CLOCK\n");
stv0367_writebits(state, F367CAB_OUTFORMAT, 0x03);
break;
case STV0367_SERIAL_PUNCT_CLOCK:
case STV0367_SERIAL_CONT_CLOCK:
stv0367_writebits(state, F367CAB_OUTFORMAT, 0x01);
break;
case STV0367_PARALLEL_PUNCT_CLOCK:
case STV0367_OUTPUTMODE_DEFAULT:
stv0367_writebits(state, F367CAB_OUTFORMAT, 0x00);
break;
}
switch (state->config->clk_pol) {
case STV0367_RISINGEDGE_CLOCK:
stv0367_writebits(state, F367CAB_CLK_POLARITY, 0x00);
break;
case STV0367_FALLINGEDGE_CLOCK:
case STV0367_CLOCKPOLARITY_DEFAULT:
stv0367_writebits(state, F367CAB_CLK_POLARITY, 0x01);
break;
}
stv0367_writebits(state, F367CAB_SYNC_STRIP, 0x00);
stv0367_writebits(state, F367CAB_CT_NBST, 0x01);
stv0367_writebits(state, F367CAB_TS_SWAP, 0x01);
stv0367_writebits(state, F367CAB_FIFO_BYPASS, 0x00);
stv0367_writereg(state, R367CAB_ANACTRL, 0x00);/*PLL enabled and used */
cab_state->mclk = stv0367cab_get_mclk(fe, state->config->xtal);
cab_state->adc_clk = stv0367cab_get_adc_freq(fe, state->config->xtal);
return 0;
}
static
enum stv0367_cab_signal_type stv0367cab_algo(struct stv0367_state *state,
struct dtv_frontend_properties *p)
{
struct stv0367cab_state *cab_state = state->cab_state;
enum stv0367_cab_signal_type signalType = FE_CAB_NOAGC;
u32 QAMFEC_Lock, QAM_Lock, u32_tmp,
LockTime, TRLTimeOut, AGCTimeOut, CRLSymbols,
CRLTimeOut, EQLTimeOut, DemodTimeOut, FECTimeOut;
u8 TrackAGCAccum;
s32 tmp;
dprintk("%s:\n", __func__);
/* Timeouts calculation */
/* A max lock time of 25 ms is allowed for delayed AGC */
AGCTimeOut = 25;
/* 100000 symbols needed by the TRL as a maximum value */
TRLTimeOut = 100000000 / p->symbol_rate;
/* CRLSymbols is the needed number of symbols to achieve a lock
within [-4%, +4%] of the symbol rate.
CRL timeout is calculated
for a lock within [-search_range, +search_range].
EQL timeout can be changed depending on
the micro-reflections we want to handle.
A characterization must be performed
with these echoes to get new timeout values.
*/
switch (p->modulation) {
case QAM_16:
CRLSymbols = 150000;
EQLTimeOut = 100;
break;
case QAM_32:
CRLSymbols = 250000;
EQLTimeOut = 100;
break;
case QAM_64:
CRLSymbols = 200000;
EQLTimeOut = 100;
break;
case QAM_128:
CRLSymbols = 250000;
EQLTimeOut = 100;
break;
case QAM_256:
CRLSymbols = 250000;
EQLTimeOut = 100;
break;
default:
CRLSymbols = 200000;
EQLTimeOut = 100;
break;
}
#if 0
if (pIntParams->search_range < 0) {
CRLTimeOut = (25 * CRLSymbols *
(-pIntParams->search_range / 1000)) /
(pIntParams->symbol_rate / 1000);
} else
#endif
CRLTimeOut = (25 * CRLSymbols * (cab_state->search_range / 1000)) /
(p->symbol_rate / 1000);
CRLTimeOut = (1000 * CRLTimeOut) / p->symbol_rate;
/* Timeouts below 50ms are coerced */
if (CRLTimeOut < 50)
CRLTimeOut = 50;
/* A maximum of 100 TS packets is needed to get FEC lock even in case
the spectrum inversion needs to be changed.
This is equal to 20 ms in case of the lowest symbol rate of 0.87Msps
*/
FECTimeOut = 20;
DemodTimeOut = AGCTimeOut + TRLTimeOut + CRLTimeOut + EQLTimeOut;
dprintk("%s: DemodTimeOut=%d\n", __func__, DemodTimeOut);
/* Reset the TRL to ensure nothing starts until the
AGC is stable which ensures a better lock time
*/
stv0367_writereg(state, R367CAB_CTRL_1, 0x04);
/* Set AGC accumulation time to minimum and lock threshold to maximum
in order to speed up the AGC lock */
TrackAGCAccum = stv0367_readbits(state, F367CAB_AGC_ACCUMRSTSEL);
stv0367_writebits(state, F367CAB_AGC_ACCUMRSTSEL, 0x0);
/* Modulus Mapper is disabled */
stv0367_writebits(state, F367CAB_MODULUSMAP_EN, 0);
/* Disable the sweep function */
stv0367_writebits(state, F367CAB_SWEEP_EN, 0);
/* The sweep function is never used, Sweep rate must be set to 0 */
/* Set the derotator frequency in Hz */
stv0367cab_set_derot_freq(state, cab_state->adc_clk,
(1000 * (s32)state->config->if_khz + cab_state->derot_offset));
/* Disable the Allpass Filter when the symbol rate is out of range */
if ((p->symbol_rate > 10800000) | (p->symbol_rate < 1800000)) {
stv0367_writebits(state, F367CAB_ADJ_EN, 0);
stv0367_writebits(state, F367CAB_ALLPASSFILT_EN, 0);
}
#if 0
/* Check if the tuner is locked */
tuner_lock = stv0367cab_tuner_get_status(fe);
if (tuner_lock == 0)
return FE_367CAB_NOTUNER;
#endif
/* Relase the TRL to start demodulator acquisition */
/* Wait for QAM lock */
LockTime = 0;
stv0367_writereg(state, R367CAB_CTRL_1, 0x00);
do {
QAM_Lock = stv0367_readbits(state, F367CAB_FSM_STATUS);
if ((LockTime >= (DemodTimeOut - EQLTimeOut)) &&
(QAM_Lock == 0x04))
/*
* We don't wait longer, the frequency/phase offset
* must be too big
*/
LockTime = DemodTimeOut;
else if ((LockTime >= (AGCTimeOut + TRLTimeOut)) &&
(QAM_Lock == 0x02))
/*
* We don't wait longer, either there is no signal or
* it is not the right symbol rate or it is an analog
* carrier
*/
{
LockTime = DemodTimeOut;
u32_tmp = stv0367_readbits(state,
F367CAB_AGC_PWR_WORD_LO) +
(stv0367_readbits(state,
F367CAB_AGC_PWR_WORD_ME) << 8) +
(stv0367_readbits(state,
F367CAB_AGC_PWR_WORD_HI) << 16);
if (u32_tmp >= 131072)
u32_tmp = 262144 - u32_tmp;
u32_tmp = u32_tmp / (1 << (11 - stv0367_readbits(state,
F367CAB_AGC_IF_BWSEL)));
if (u32_tmp < stv0367_readbits(state,
F367CAB_AGC_PWRREF_LO) +
256 * stv0367_readbits(state,
F367CAB_AGC_PWRREF_HI) - 10)
QAM_Lock = 0x0f;
} else {
usleep_range(10000, 20000);
LockTime += 10;
}
dprintk("QAM_Lock=0x%x LockTime=%d\n", QAM_Lock, LockTime);
tmp = stv0367_readreg(state, R367CAB_IT_STATUS1);
dprintk("R367CAB_IT_STATUS1=0x%x\n", tmp);
} while (((QAM_Lock != 0x0c) && (QAM_Lock != 0x0b)) &&
(LockTime < DemodTimeOut));
dprintk("QAM_Lock=0x%x\n", QAM_Lock);
tmp = stv0367_readreg(state, R367CAB_IT_STATUS1);
dprintk("R367CAB_IT_STATUS1=0x%x\n", tmp);
tmp = stv0367_readreg(state, R367CAB_IT_STATUS2);
dprintk("R367CAB_IT_STATUS2=0x%x\n", tmp);
tmp = stv0367cab_get_derot_freq(state, cab_state->adc_clk);
dprintk("stv0367cab_get_derot_freq=0x%x\n", tmp);
if ((QAM_Lock == 0x0c) || (QAM_Lock == 0x0b)) {
/* Wait for FEC lock */
LockTime = 0;
do {
usleep_range(5000, 7000);
LockTime += 5;
QAMFEC_Lock = stv0367_readbits(state,
F367CAB_QAMFEC_LOCK);
} while (!QAMFEC_Lock && (LockTime < FECTimeOut));
} else
QAMFEC_Lock = 0;
if (QAMFEC_Lock) {
signalType = FE_CAB_DATAOK;
cab_state->modulation = p->modulation;
cab_state->spect_inv = stv0367_readbits(state,
F367CAB_QUAD_INV);
#if 0
/* not clear for me */
if (state->config->if_khz != 0) {
if (state->config->if_khz > cab_state->adc_clk / 1000) {
cab_state->freq_khz =
FE_Cab_TunerGetFrequency(pIntParams->hTuner)
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- cab_state->adc_clk / 1000 + state->config->if_khz;
} else {
cab_state->freq_khz =
FE_Cab_TunerGetFrequency(pIntParams->hTuner)
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
+ state->config->if_khz;
}
} else {
cab_state->freq_khz =
FE_Cab_TunerGetFrequency(pIntParams->hTuner) +
stv0367cab_get_derot_freq(state,
cab_state->adc_clk) -
cab_state->adc_clk / 4000;
}
#endif
cab_state->symbol_rate = stv0367cab_GetSymbolRate(state,
cab_state->mclk);
cab_state->locked = 1;
/* stv0367_setbits(state, F367CAB_AGC_ACCUMRSTSEL,7);*/
} else {
switch (QAM_Lock) {
case 1:
signalType = FE_CAB_NOAGC;
break;
case 2:
signalType = FE_CAB_NOTIMING;
break;
case 3:
signalType = FE_CAB_TIMINGOK;
break;
case 4:
signalType = FE_CAB_NOCARRIER;
break;
case 5:
signalType = FE_CAB_CARRIEROK;
break;
case 7:
signalType = FE_CAB_NOBLIND;
break;
case 8:
signalType = FE_CAB_BLINDOK;
break;
case 10:
signalType = FE_CAB_NODEMOD;
break;
case 11:
signalType = FE_CAB_DEMODOK;
break;
case 12:
signalType = FE_CAB_DEMODOK;
break;
case 13:
signalType = FE_CAB_NODEMOD;
break;
case 14:
signalType = FE_CAB_NOBLIND;
break;
case 15:
signalType = FE_CAB_NOSIGNAL;
break;
default:
break;
}
}
/* Set the AGC control values to tracking values */
stv0367_writebits(state, F367CAB_AGC_ACCUMRSTSEL, TrackAGCAccum);
return signalType;
}
static int stv0367cab_set_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
enum stv0367cab_mod QAMSize = 0;
dprintk("%s: freq = %d, srate = %d\n", __func__,
p->frequency, p->symbol_rate);
cab_state->derot_offset = 0;
switch (p->modulation) {
case QAM_16:
QAMSize = FE_CAB_MOD_QAM16;
break;
case QAM_32:
QAMSize = FE_CAB_MOD_QAM32;
break;
case QAM_64:
QAMSize = FE_CAB_MOD_QAM64;
break;
case QAM_128:
QAMSize = FE_CAB_MOD_QAM128;
break;
case QAM_256:
QAMSize = FE_CAB_MOD_QAM256;
break;
default:
break;
}
stv0367cab_init(fe);
/* Tuner Frequency Setting */
if (fe->ops.tuner_ops.set_params) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
fe->ops.tuner_ops.set_params(fe);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
stv0367cab_SetQamSize(
state,
p->symbol_rate,
QAMSize);
stv0367cab_set_srate(state,
cab_state->adc_clk,
cab_state->mclk,
p->symbol_rate,
QAMSize);
/* Search algorithm launch, [-1.1*RangeOffset, +1.1*RangeOffset] scan */
cab_state->state = stv0367cab_algo(state, p);
return 0;
}
static int stv0367cab_get_frontend(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct stv0367_state *state = fe->demodulator_priv;
struct stv0367cab_state *cab_state = state->cab_state;
enum stv0367cab_mod QAMSize;
dprintk("%s:\n", __func__);
p->symbol_rate = stv0367cab_GetSymbolRate(state, cab_state->mclk);
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
switch (QAMSize) {
case FE_CAB_MOD_QAM16:
p->modulation = QAM_16;
break;
case FE_CAB_MOD_QAM32:
p->modulation = QAM_32;
break;
case FE_CAB_MOD_QAM64:
p->modulation = QAM_64;
break;
case FE_CAB_MOD_QAM128:
p->modulation = QAM_128;
break;
case QAM_256:
p->modulation = QAM_256;
break;
default:
break;
}
p->frequency = stv0367_get_tuner_freq(fe);
dprintk("%s: tuner frequency = %d\n", __func__, p->frequency);
if (state->config->if_khz == 0) {
p->frequency +=
(stv0367cab_get_derot_freq(state, cab_state->adc_clk) -
cab_state->adc_clk / 4000);
return 0;
}
if (state->config->if_khz > cab_state->adc_clk / 1000)
p->frequency += (state->config->if_khz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk)
- cab_state->adc_clk / 1000);
else
p->frequency += (state->config->if_khz
- stv0367cab_get_derot_freq(state, cab_state->adc_clk));
return 0;
}
#if 0
void stv0367cab_GetErrorCount(state, enum stv0367cab_mod QAMSize,
u32 symbol_rate, FE_367qam_Monitor *Monitor_results)
{
stv0367cab_OptimiseNByteAndGetBER(state, QAMSize, symbol_rate, Monitor_results);
stv0367cab_GetPacketsCount(state, Monitor_results);
return;
}
static int stv0367cab_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct stv0367_state *state = fe->demodulator_priv;
return 0;
}
#endif
static s32 stv0367cab_get_rf_lvl(struct stv0367_state *state)
{
s32 rfLevel = 0;
s32 RfAgcPwm = 0, IfAgcPwm = 0;
u8 i;
stv0367_writebits(state, F367CAB_STDBY_ADCGP, 0x0);
RfAgcPwm =
(stv0367_readbits(state, F367CAB_RF_AGC1_LEVEL_LO) & 0x03) +
(stv0367_readbits(state, F367CAB_RF_AGC1_LEVEL_HI) << 2);
RfAgcPwm = 100 * RfAgcPwm / 1023;
IfAgcPwm =
stv0367_readbits(state, F367CAB_AGC_IF_PWMCMD_LO) +
(stv0367_readbits(state, F367CAB_AGC_IF_PWMCMD_HI) << 8);
if (IfAgcPwm >= 2048)
IfAgcPwm -= 2048;
else
IfAgcPwm += 2048;
IfAgcPwm = 100 * IfAgcPwm / 4095;
/* For DTT75467 on NIM */
if (RfAgcPwm < 90 && IfAgcPwm < 28) {
for (i = 0; i < RF_LOOKUP_TABLE_SIZE; i++) {
if (RfAgcPwm <= stv0367cab_RF_LookUp1[0][i]) {
rfLevel = (-1) * stv0367cab_RF_LookUp1[1][i];
break;
}
}
if (i == RF_LOOKUP_TABLE_SIZE)
rfLevel = -56;
} else { /*if IF AGC>10*/
for (i = 0; i < RF_LOOKUP_TABLE2_SIZE; i++) {
if (IfAgcPwm <= stv0367cab_RF_LookUp2[0][i]) {
rfLevel = (-1) * stv0367cab_RF_LookUp2[1][i];
break;
}
}
if (i == RF_LOOKUP_TABLE2_SIZE)
rfLevel = -72;
}
return rfLevel;
}
static int stv0367cab_read_strength(struct dvb_frontend *fe, u16 *strength)
{
struct stv0367_state *state = fe->demodulator_priv;
s32 signal = stv0367cab_get_rf_lvl(state);
dprintk("%s: signal=%d dBm\n", __func__, signal);
if (signal <= -72)
*strength = 65535;
else
*strength = (22 + signal) * (-1311);
dprintk("%s: strength=%d\n", __func__, (*strength));
return 0;
}
static int stv0367cab_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct stv0367_state *state = fe->demodulator_priv;
u32 noisepercentage;
enum stv0367cab_mod QAMSize;
u32 regval = 0, temp = 0;
int power, i;
QAMSize = stv0367_readbits(state, F367CAB_QAM_MODE);
switch (QAMSize) {
case FE_CAB_MOD_QAM4:
power = 21904;
break;
case FE_CAB_MOD_QAM16:
power = 20480;
break;
case FE_CAB_MOD_QAM32:
power = 23040;
break;
case FE_CAB_MOD_QAM64:
power = 21504;
break;
case FE_CAB_MOD_QAM128:
power = 23616;
break;
case FE_CAB_MOD_QAM256:
power = 21760;
break;
case FE_CAB_MOD_QAM512:
power = 1;
break;
case FE_CAB_MOD_QAM1024:
power = 21280;
break;
default:
power = 1;
break;
}
for (i = 0; i < 10; i++) {
regval += (stv0367_readbits(state, F367CAB_SNR_LO)
+ 256 * stv0367_readbits(state, F367CAB_SNR_HI));
}
regval /= 10; /*for average over 10 times in for loop above*/
if (regval != 0) {
temp = power
* (1 << (3 + stv0367_readbits(state, F367CAB_SNR_PER)));
temp /= regval;
}
/* table values, not needed to calculate logarithms */
if (temp >= 5012)
noisepercentage = 100;
else if (temp >= 3981)
noisepercentage = 93;
else if (temp >= 3162)
noisepercentage = 86;
else if (temp >= 2512)
noisepercentage = 79;
else if (temp >= 1995)
noisepercentage = 72;
else if (temp >= 1585)
noisepercentage = 65;
else if (temp >= 1259)
noisepercentage = 58;
else if (temp >= 1000)
noisepercentage = 50;
else if (temp >= 794)
noisepercentage = 43;
else if (temp >= 501)
noisepercentage = 36;
else if (temp >= 316)
noisepercentage = 29;
else if (temp >= 200)
noisepercentage = 22;
else if (temp >= 158)
noisepercentage = 14;
else if (temp >= 126)
noisepercentage = 7;
else
noisepercentage = 0;
dprintk("%s: noisepercentage=%d\n", __func__, noisepercentage);
*snr = (noisepercentage * 65535) / 100;
return 0;
}
static int stv0367cab_read_ucblcks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct stv0367_state *state = fe->demodulator_priv;
int corrected, tscount;
*ucblocks = (stv0367_readreg(state, R367CAB_RS_COUNTER_5) << 8)
| stv0367_readreg(state, R367CAB_RS_COUNTER_4);
corrected = (stv0367_readreg(state, R367CAB_RS_COUNTER_3) << 8)
| stv0367_readreg(state, R367CAB_RS_COUNTER_2);
tscount = (stv0367_readreg(state, R367CAB_RS_COUNTER_2) << 8)
| stv0367_readreg(state, R367CAB_RS_COUNTER_1);
dprintk("%s: uncorrected blocks=%d corrected blocks=%d tscount=%d\n",
__func__, *ucblocks, corrected, tscount);
return 0;
};
static struct dvb_frontend_ops stv0367cab_ops = {
.delsys = { SYS_DVBC_ANNEX_A },
.info = {
.name = "ST STV0367 DVB-C",
.frequency_min = 47000000,
.frequency_max = 862000000,
.frequency_stepsize = 62500,
.symbol_rate_min = 870000,
.symbol_rate_max = 11700000,
.caps = 0x400 |/* FE_CAN_QAM_4 */
FE_CAN_QAM_16 | FE_CAN_QAM_32 |
FE_CAN_QAM_64 | FE_CAN_QAM_128 |
FE_CAN_QAM_256 | FE_CAN_FEC_AUTO
},
.release = stv0367_release,
.init = stv0367cab_init,
.sleep = stv0367cab_sleep,
.i2c_gate_ctrl = stv0367cab_gate_ctrl,
.set_frontend = stv0367cab_set_frontend,
.get_frontend = stv0367cab_get_frontend,
.read_status = stv0367cab_read_status,
/* .read_ber = stv0367cab_read_ber, */
.read_signal_strength = stv0367cab_read_strength,
.read_snr = stv0367cab_read_snr,
.read_ucblocks = stv0367cab_read_ucblcks,
.get_tune_settings = stv0367_get_tune_settings,
};
struct dvb_frontend *stv0367cab_attach(const struct stv0367_config *config,
struct i2c_adapter *i2c)
{
struct stv0367_state *state = NULL;
struct stv0367cab_state *cab_state = NULL;
/* allocate memory for the internal state */
state = kzalloc(sizeof(struct stv0367_state), GFP_KERNEL);
if (state == NULL)
goto error;
cab_state = kzalloc(sizeof(struct stv0367cab_state), GFP_KERNEL);
if (cab_state == NULL)
goto error;
/* setup the state */
state->i2c = i2c;
state->config = config;
cab_state->search_range = 280000;
state->cab_state = cab_state;
state->fe.ops = stv0367cab_ops;
state->fe.demodulator_priv = state;
state->chip_id = stv0367_readreg(state, 0xf000);
dprintk("%s: chip_id = 0x%x\n", __func__, state->chip_id);
/* check if the demod is there */
if ((state->chip_id != 0x50) && (state->chip_id != 0x60))
goto error;
return &state->fe;
error:
kfree(cab_state);
kfree(state);
return NULL;
}
EXPORT_SYMBOL(stv0367cab_attach);
MODULE_PARM_DESC(debug, "Set debug");
MODULE_PARM_DESC(i2c_debug, "Set i2c debug");
MODULE_AUTHOR("Igor M. Liplianin");
MODULE_DESCRIPTION("ST STV0367 DVB-C/T demodulator driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
vDorst/linux | drivers/leds/leds-renesas-tpu.c | 4948 | 9147 | /*
* LED control using Renesas TPU
*
* Copyright (C) 2011 Magnus Damm
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/printk.h>
#include <linux/ioport.h>
#include <linux/io.h>
#include <linux/clk.h>
#include <linux/leds.h>
#include <linux/platform_data/leds-renesas-tpu.h>
#include <linux/gpio.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/pm_runtime.h>
#include <linux/workqueue.h>
enum r_tpu_pin { R_TPU_PIN_UNUSED, R_TPU_PIN_GPIO, R_TPU_PIN_GPIO_FN };
enum r_tpu_timer { R_TPU_TIMER_UNUSED, R_TPU_TIMER_ON };
struct r_tpu_priv {
struct led_classdev ldev;
void __iomem *mapbase;
struct clk *clk;
struct platform_device *pdev;
enum r_tpu_pin pin_state;
enum r_tpu_timer timer_state;
unsigned long min_rate;
unsigned int refresh_rate;
struct work_struct work;
enum led_brightness new_brightness;
};
static DEFINE_SPINLOCK(r_tpu_lock);
#define TSTR -1 /* Timer start register (shared register) */
#define TCR 0 /* Timer control register (+0x00) */
#define TMDR 1 /* Timer mode register (+0x04) */
#define TIOR 2 /* Timer I/O control register (+0x08) */
#define TIER 3 /* Timer interrupt enable register (+0x0c) */
#define TSR 4 /* Timer status register (+0x10) */
#define TCNT 5 /* Timer counter (+0x14) */
#define TGRA 6 /* Timer general register A (+0x18) */
#define TGRB 7 /* Timer general register B (+0x1c) */
#define TGRC 8 /* Timer general register C (+0x20) */
#define TGRD 9 /* Timer general register D (+0x24) */
static inline unsigned short r_tpu_read(struct r_tpu_priv *p, int reg_nr)
{
struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs = reg_nr << 2;
if (reg_nr == TSTR)
return ioread16(base - cfg->channel_offset);
return ioread16(base + offs);
}
static inline void r_tpu_write(struct r_tpu_priv *p, int reg_nr,
unsigned short value)
{
struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
void __iomem *base = p->mapbase;
unsigned long offs = reg_nr << 2;
if (reg_nr == TSTR) {
iowrite16(value, base - cfg->channel_offset);
return;
}
iowrite16(value, base + offs);
}
static void r_tpu_start_stop_ch(struct r_tpu_priv *p, int start)
{
struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
unsigned long flags, value;
/* start stop register shared by multiple timer channels */
spin_lock_irqsave(&r_tpu_lock, flags);
value = r_tpu_read(p, TSTR);
if (start)
value |= 1 << cfg->timer_bit;
else
value &= ~(1 << cfg->timer_bit);
r_tpu_write(p, TSTR, value);
spin_unlock_irqrestore(&r_tpu_lock, flags);
}
static int r_tpu_enable(struct r_tpu_priv *p, enum led_brightness brightness)
{
struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
int prescaler[] = { 1, 4, 16, 64 };
int k, ret;
unsigned long rate, tmp;
if (p->timer_state == R_TPU_TIMER_ON)
return 0;
/* wake up device and enable clock */
pm_runtime_get_sync(&p->pdev->dev);
ret = clk_enable(p->clk);
if (ret) {
dev_err(&p->pdev->dev, "cannot enable clock\n");
return ret;
}
/* make sure channel is disabled */
r_tpu_start_stop_ch(p, 0);
/* get clock rate after enabling it */
rate = clk_get_rate(p->clk);
/* pick the lowest acceptable rate */
for (k = 0; k < ARRAY_SIZE(prescaler); k++)
if ((rate / prescaler[k]) < p->min_rate)
break;
if (!k) {
dev_err(&p->pdev->dev, "clock rate mismatch\n");
goto err0;
}
dev_dbg(&p->pdev->dev, "rate = %lu, prescaler %u\n",
rate, prescaler[k - 1]);
/* clear TCNT on TGRB match, count on rising edge, set prescaler */
r_tpu_write(p, TCR, 0x0040 | (k - 1));
/* output 0 until TGRA, output 1 until TGRB */
r_tpu_write(p, TIOR, 0x0002);
rate /= prescaler[k - 1] * p->refresh_rate;
r_tpu_write(p, TGRB, rate);
dev_dbg(&p->pdev->dev, "TRGB = 0x%04lx\n", rate);
tmp = (cfg->max_brightness - brightness) * rate;
r_tpu_write(p, TGRA, tmp / cfg->max_brightness);
dev_dbg(&p->pdev->dev, "TRGA = 0x%04lx\n", tmp / cfg->max_brightness);
/* PWM mode */
r_tpu_write(p, TMDR, 0x0002);
/* enable channel */
r_tpu_start_stop_ch(p, 1);
p->timer_state = R_TPU_TIMER_ON;
return 0;
err0:
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
return -ENOTSUPP;
}
static void r_tpu_disable(struct r_tpu_priv *p)
{
if (p->timer_state == R_TPU_TIMER_UNUSED)
return;
/* disable channel */
r_tpu_start_stop_ch(p, 0);
/* stop clock and mark device as idle */
clk_disable(p->clk);
pm_runtime_put_sync(&p->pdev->dev);
p->timer_state = R_TPU_TIMER_UNUSED;
}
static void r_tpu_set_pin(struct r_tpu_priv *p, enum r_tpu_pin new_state,
enum led_brightness brightness)
{
struct led_renesas_tpu_config *cfg = p->pdev->dev.platform_data;
if (p->pin_state == new_state) {
if (p->pin_state == R_TPU_PIN_GPIO)
gpio_set_value(cfg->pin_gpio, brightness);
return;
}
if (p->pin_state == R_TPU_PIN_GPIO)
gpio_free(cfg->pin_gpio);
if (p->pin_state == R_TPU_PIN_GPIO_FN)
gpio_free(cfg->pin_gpio_fn);
if (new_state == R_TPU_PIN_GPIO) {
gpio_request(cfg->pin_gpio, cfg->name);
gpio_direction_output(cfg->pin_gpio, !!brightness);
}
if (new_state == R_TPU_PIN_GPIO_FN)
gpio_request(cfg->pin_gpio_fn, cfg->name);
p->pin_state = new_state;
}
static void r_tpu_work(struct work_struct *work)
{
struct r_tpu_priv *p = container_of(work, struct r_tpu_priv, work);
enum led_brightness brightness = p->new_brightness;
r_tpu_disable(p);
/* off and maximum are handled as GPIO pins, in between PWM */
if ((brightness == 0) || (brightness == p->ldev.max_brightness))
r_tpu_set_pin(p, R_TPU_PIN_GPIO, brightness);
else {
r_tpu_set_pin(p, R_TPU_PIN_GPIO_FN, 0);
r_tpu_enable(p, brightness);
}
}
static void r_tpu_set_brightness(struct led_classdev *ldev,
enum led_brightness brightness)
{
struct r_tpu_priv *p = container_of(ldev, struct r_tpu_priv, ldev);
p->new_brightness = brightness;
schedule_work(&p->work);
}
static int __devinit r_tpu_probe(struct platform_device *pdev)
{
struct led_renesas_tpu_config *cfg = pdev->dev.platform_data;
struct r_tpu_priv *p;
struct resource *res;
int ret = -ENXIO;
if (!cfg) {
dev_err(&pdev->dev, "missing platform data\n");
goto err0;
}
p = kzalloc(sizeof(*p), GFP_KERNEL);
if (p == NULL) {
dev_err(&pdev->dev, "failed to allocate driver data\n");
ret = -ENOMEM;
goto err0;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get I/O memory\n");
goto err1;
}
/* map memory, let mapbase point to our channel */
p->mapbase = ioremap_nocache(res->start, resource_size(res));
if (p->mapbase == NULL) {
dev_err(&pdev->dev, "failed to remap I/O memory\n");
goto err1;
}
/* get hold of clock */
p->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(p->clk)) {
dev_err(&pdev->dev, "cannot get clock\n");
ret = PTR_ERR(p->clk);
goto err2;
}
p->pdev = pdev;
p->pin_state = R_TPU_PIN_UNUSED;
p->timer_state = R_TPU_TIMER_UNUSED;
p->refresh_rate = cfg->refresh_rate ? cfg->refresh_rate : 100;
r_tpu_set_pin(p, R_TPU_PIN_GPIO, LED_OFF);
platform_set_drvdata(pdev, p);
INIT_WORK(&p->work, r_tpu_work);
p->ldev.name = cfg->name;
p->ldev.brightness = LED_OFF;
p->ldev.max_brightness = cfg->max_brightness;
p->ldev.brightness_set = r_tpu_set_brightness;
p->ldev.flags |= LED_CORE_SUSPENDRESUME;
ret = led_classdev_register(&pdev->dev, &p->ldev);
if (ret < 0)
goto err3;
/* max_brightness may be updated by the LED core code */
p->min_rate = p->ldev.max_brightness * p->refresh_rate;
pm_runtime_enable(&pdev->dev);
return 0;
err3:
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
clk_put(p->clk);
err2:
iounmap(p->mapbase);
err1:
kfree(p);
err0:
return ret;
}
static int __devexit r_tpu_remove(struct platform_device *pdev)
{
struct r_tpu_priv *p = platform_get_drvdata(pdev);
r_tpu_set_brightness(&p->ldev, LED_OFF);
led_classdev_unregister(&p->ldev);
cancel_work_sync(&p->work);
r_tpu_disable(p);
r_tpu_set_pin(p, R_TPU_PIN_UNUSED, LED_OFF);
pm_runtime_disable(&pdev->dev);
clk_put(p->clk);
iounmap(p->mapbase);
kfree(p);
return 0;
}
static struct platform_driver r_tpu_device_driver = {
.probe = r_tpu_probe,
.remove = __devexit_p(r_tpu_remove),
.driver = {
.name = "leds-renesas-tpu",
}
};
module_platform_driver(r_tpu_device_driver);
MODULE_AUTHOR("Magnus Damm");
MODULE_DESCRIPTION("Renesas TPU LED Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Sudokamikaze/XKernel-grouper | arch/blackfin/mm/sram-alloc.c | 7252 | 20498 | /*
* SRAM allocator for Blackfin on-chip memory
*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <asm/blackfin.h>
#include <asm/mem_map.h>
#include "blackfin_sram.h"
/* the data structure for L1 scratchpad and DATA SRAM */
struct sram_piece {
void *paddr;
int size;
pid_t pid;
struct sram_piece *next;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
#if L1_DATA_A_LENGTH != 0
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
#endif
#if L1_DATA_B_LENGTH != 0
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
#endif
#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
#endif
#if L1_CODE_LENGTH != 0
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
#endif
#if L2_LENGTH != 0
static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
static struct sram_piece free_l2_sram_head, used_l2_sram_head;
#endif
static struct kmem_cache *sram_piece_cache;
/* L1 Scratchpad SRAM initialization function */
static void __init l1sram_init(void)
{
unsigned int cpu;
unsigned long reserve;
#ifdef CONFIG_SMP
reserve = 0;
#else
reserve = sizeof(struct l1_scratch_task_info);
#endif
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_ssram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_ssram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
return;
}
per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
per_cpu(used_l1_ssram_head, cpu).next = NULL;
/* mutex initialize */
spin_lock_init(&per_cpu(l1sram_lock, cpu));
printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
L1_SCRATCH_LENGTH >> 10);
}
}
static void __init l1_data_sram_init(void)
{
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
unsigned int cpu;
#endif
#if L1_DATA_A_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_data_A_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
return;
}
per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
per_cpu(free_l1_data_A_sram_head, cpu).next->size =
L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
L1_DATA_A_LENGTH >> 10,
per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
}
#endif
#if L1_DATA_B_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_data_B_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
return;
}
per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
per_cpu(free_l1_data_B_sram_head, cpu).next->size =
L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
L1_DATA_B_LENGTH >> 10,
per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
/* mutex initialize */
}
#endif
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
#endif
}
static void __init l1_inst_sram_init(void)
{
#if L1_CODE_LENGTH != 0
unsigned int cpu;
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_inst_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
return;
}
per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
per_cpu(free_l1_inst_sram_head, cpu).next->size =
L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
L1_CODE_LENGTH >> 10,
per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
/* mutex initialize */
spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
}
#endif
}
static void __init l2_sram_init(void)
{
#if L2_LENGTH != 0
free_l2_sram_head.next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!free_l2_sram_head.next) {
printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
return;
}
free_l2_sram_head.next->paddr =
(void *)L2_START + (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->size =
L2_LENGTH - (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->pid = 0;
free_l2_sram_head.next->next = NULL;
used_l2_sram_head.next = NULL;
printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
L2_LENGTH >> 10,
free_l2_sram_head.next->size >> 10);
/* mutex initialize */
spin_lock_init(&l2_sram_lock);
#endif
}
static int __init bfin_sram_init(void)
{
sram_piece_cache = kmem_cache_create("sram_piece_cache",
sizeof(struct sram_piece),
0, SLAB_PANIC, NULL);
l1sram_init();
l1_data_sram_init();
l1_inst_sram_init();
l2_sram_init();
return 0;
}
pure_initcall(bfin_sram_init);
/* SRAM allocate function */
static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot, *plast, *pavail;
if (size <= 0 || !pfree_head || !pused_head)
return NULL;
/* Align the size */
size = (size + 3) & ~3;
pslot = pfree_head->next;
plast = pfree_head;
/* search an available piece slot */
while (pslot != NULL && size > pslot->size) {
plast = pslot;
pslot = pslot->next;
}
if (!pslot)
return NULL;
if (pslot->size == size) {
plast->next = pslot->next;
pavail = pslot;
} else {
/* use atomic so our L1 allocator can be used atomically */
pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
if (!pavail)
return NULL;
pavail->paddr = pslot->paddr;
pavail->size = size;
pslot->paddr += size;
pslot->size -= size;
}
pavail->pid = current->pid;
pslot = pused_head->next;
plast = pused_head;
/* insert new piece into used piece list !!! */
while (pslot != NULL && pavail->paddr < pslot->paddr) {
plast = pslot;
pslot = pslot->next;
}
pavail->next = pslot;
plast->next = pavail;
return pavail->paddr;
}
/* Allocate the largest available block. */
static void *_sram_alloc_max(struct sram_piece *pfree_head,
struct sram_piece *pused_head,
unsigned long *psize)
{
struct sram_piece *pslot, *pmax;
if (!pfree_head || !pused_head)
return NULL;
pmax = pslot = pfree_head->next;
/* search an available piece slot */
while (pslot != NULL) {
if (pslot->size > pmax->size)
pmax = pslot;
pslot = pslot->next;
}
if (!pmax)
return NULL;
*psize = pmax->size;
return _sram_alloc(*psize, pfree_head, pused_head);
}
/* SRAM free function */
static int _sram_free(const void *addr,
struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot, *plast, *pavail;
if (!pfree_head || !pused_head)
return -1;
/* search the relevant memory slot */
pslot = pused_head->next;
plast = pused_head;
/* search an available piece slot */
while (pslot != NULL && pslot->paddr != addr) {
plast = pslot;
pslot = pslot->next;
}
if (!pslot)
return -1;
plast->next = pslot->next;
pavail = pslot;
pavail->pid = 0;
/* insert free pieces back to the free list */
pslot = pfree_head->next;
plast = pfree_head;
while (pslot != NULL && addr > pslot->paddr) {
plast = pslot;
pslot = pslot->next;
}
if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
plast->size += pavail->size;
kmem_cache_free(sram_piece_cache, pavail);
} else {
pavail->next = plast->next;
plast->next = pavail;
plast = pavail;
}
if (pslot && plast->paddr + plast->size == pslot->paddr) {
plast->size += pslot->size;
plast->next = pslot->next;
kmem_cache_free(sram_piece_cache, pslot);
}
return 0;
}
int sram_free(const void *addr)
{
#if L1_CODE_LENGTH != 0
if (addr >= (void *)get_l1_code_start()
&& addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
return l1_inst_sram_free(addr);
else
#endif
#if L1_DATA_A_LENGTH != 0
if (addr >= (void *)get_l1_data_a_start()
&& addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
return l1_data_A_sram_free(addr);
else
#endif
#if L1_DATA_B_LENGTH != 0
if (addr >= (void *)get_l1_data_b_start()
&& addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
return l1_data_B_sram_free(addr);
else
#endif
#if L2_LENGTH != 0
if (addr >= (void *)L2_START
&& addr < (void *)(L2_START + L2_LENGTH))
return l2_sram_free(addr);
else
#endif
return -1;
}
EXPORT_SYMBOL(sram_free);
void *l1_data_A_sram_alloc(size_t size)
{
#if L1_DATA_A_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_data_A_sram_alloc);
int l1_data_A_sram_free(const void *addr)
{
#if L1_DATA_A_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_data_A_sram_free);
void *l1_data_B_sram_alloc(size_t size)
{
#if L1_DATA_B_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_alloc);
int l1_data_B_sram_free(const void *addr)
{
#if L1_DATA_B_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_free);
void *l1_data_sram_alloc(size_t size)
{
void *addr = l1_data_A_sram_alloc(size);
if (!addr)
addr = l1_data_B_sram_alloc(size);
return addr;
}
EXPORT_SYMBOL(l1_data_sram_alloc);
void *l1_data_sram_zalloc(size_t size)
{
void *addr = l1_data_sram_alloc(size);
if (addr)
memset(addr, 0x00, size);
return addr;
}
EXPORT_SYMBOL(l1_data_sram_zalloc);
int l1_data_sram_free(const void *addr)
{
int ret;
ret = l1_data_A_sram_free(addr);
if (ret == -1)
ret = l1_data_B_sram_free(addr);
return ret;
}
EXPORT_SYMBOL(l1_data_sram_free);
void *l1_inst_sram_alloc(size_t size)
{
#if L1_CODE_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_alloc);
int l1_inst_sram_free(const void *addr)
{
#if L1_CODE_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_free);
/* L1 Scratchpad memory allocate function */
void *l1sram_alloc(size_t size)
{
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return addr;
}
/* L1 Scratchpad memory allocate function */
void *l1sram_alloc_max(size_t *psize)
{
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu), psize);
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return addr;
}
/* L1 Scratchpad memory free function */
int l1sram_free(const void *addr)
{
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return ret;
}
void *l2_sram_alloc(size_t size)
{
#if L2_LENGTH != 0
unsigned long flags;
void *addr;
/* add mutex operation */
spin_lock_irqsave(&l2_sram_lock, flags);
addr = _sram_alloc(size, &free_l2_sram_head,
&used_l2_sram_head);
/* add mutex operation */
spin_unlock_irqrestore(&l2_sram_lock, flags);
pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l2_sram_alloc);
void *l2_sram_zalloc(size_t size)
{
void *addr = l2_sram_alloc(size);
if (addr)
memset(addr, 0x00, size);
return addr;
}
EXPORT_SYMBOL(l2_sram_zalloc);
int l2_sram_free(const void *addr)
{
#if L2_LENGTH != 0
unsigned long flags;
int ret;
/* add mutex operation */
spin_lock_irqsave(&l2_sram_lock, flags);
ret = _sram_free(addr, &free_l2_sram_head,
&used_l2_sram_head);
/* add mutex operation */
spin_unlock_irqrestore(&l2_sram_lock, flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l2_sram_free);
int sram_free_with_lsl(const void *addr)
{
struct sram_list_struct *lsl, **tmp;
struct mm_struct *mm = current->mm;
int ret = -1;
for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
if ((*tmp)->addr == addr) {
lsl = *tmp;
ret = sram_free(addr);
*tmp = lsl->next;
kfree(lsl);
break;
}
return ret;
}
EXPORT_SYMBOL(sram_free_with_lsl);
/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
* tracked. These are designed for userspace so that when a process exits,
* we can safely reap their resources.
*/
void *sram_alloc_with_lsl(size_t size, unsigned long flags)
{
void *addr = NULL;
struct sram_list_struct *lsl = NULL;
struct mm_struct *mm = current->mm;
lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
if (!lsl)
return NULL;
if (flags & L1_INST_SRAM)
addr = l1_inst_sram_alloc(size);
if (addr == NULL && (flags & L1_DATA_A_SRAM))
addr = l1_data_A_sram_alloc(size);
if (addr == NULL && (flags & L1_DATA_B_SRAM))
addr = l1_data_B_sram_alloc(size);
if (addr == NULL && (flags & L2_SRAM))
addr = l2_sram_alloc(size);
if (addr == NULL) {
kfree(lsl);
return NULL;
}
lsl->addr = addr;
lsl->length = size;
lsl->next = mm->context.sram_list;
mm->context.sram_list = lsl;
return addr;
}
EXPORT_SYMBOL(sram_alloc_with_lsl);
#ifdef CONFIG_PROC_FS
/* Once we get a real allocator, we'll throw all of this away.
* Until then, we need some sort of visibility into the L1 alloc.
*/
/* Need to keep line of output the same. Currently, that is 44 bytes
* (including newline).
*/
static int _sram_proc_show(struct seq_file *m, const char *desc,
struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot;
if (!pfree_head || !pused_head)
return -1;
seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
/* search the relevant memory slot */
pslot = pused_head->next;
while (pslot != NULL) {
seq_printf(m, "%p-%p %10i %5i %-10s\n",
pslot->paddr, pslot->paddr + pslot->size,
pslot->size, pslot->pid, "ALLOCATED");
pslot = pslot->next;
}
pslot = pfree_head->next;
while (pslot != NULL) {
seq_printf(m, "%p-%p %10i %5i %-10s\n",
pslot->paddr, pslot->paddr + pslot->size,
pslot->size, pslot->pid, "FREE");
pslot = pslot->next;
}
return 0;
}
static int sram_proc_show(struct seq_file *m, void *v)
{
unsigned int cpu;
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
if (_sram_proc_show(m, "Scratchpad",
&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
goto not_done;
#if L1_DATA_A_LENGTH != 0
if (_sram_proc_show(m, "L1 Data A",
&per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu)))
goto not_done;
#endif
#if L1_DATA_B_LENGTH != 0
if (_sram_proc_show(m, "L1 Data B",
&per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu)))
goto not_done;
#endif
#if L1_CODE_LENGTH != 0
if (_sram_proc_show(m, "L1 Instruction",
&per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu)))
goto not_done;
#endif
}
#if L2_LENGTH != 0
if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
goto not_done;
#endif
not_done:
return 0;
}
static int sram_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, sram_proc_show, NULL);
}
static const struct file_operations sram_proc_ops = {
.open = sram_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init sram_proc_init(void)
{
struct proc_dir_entry *ptr;
ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
if (!ptr) {
printk(KERN_WARNING "unable to create /proc/sram\n");
return -1;
}
return 0;
}
late_initcall(sram_proc_init);
#endif
| gpl-2.0 |
javelinanddart/bricked-flo | arch/blackfin/mm/sram-alloc.c | 7252 | 20498 | /*
* SRAM allocator for Blackfin on-chip memory
*
* Copyright 2004-2009 Analog Devices Inc.
*
* Licensed under the GPL-2 or later.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/rtc.h>
#include <linux/slab.h>
#include <asm/blackfin.h>
#include <asm/mem_map.h>
#include "blackfin_sram.h"
/* the data structure for L1 scratchpad and DATA SRAM */
struct sram_piece {
void *paddr;
int size;
pid_t pid;
struct sram_piece *next;
};
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock);
static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head);
#if L1_DATA_A_LENGTH != 0
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head);
#endif
#if L1_DATA_B_LENGTH != 0
static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head);
#endif
#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock);
#endif
#if L1_CODE_LENGTH != 0
static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock);
static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head);
static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head);
#endif
#if L2_LENGTH != 0
static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp;
static struct sram_piece free_l2_sram_head, used_l2_sram_head;
#endif
static struct kmem_cache *sram_piece_cache;
/* L1 Scratchpad SRAM initialization function */
static void __init l1sram_init(void)
{
unsigned int cpu;
unsigned long reserve;
#ifdef CONFIG_SMP
reserve = 0;
#else
reserve = sizeof(struct l1_scratch_task_info);
#endif
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_ssram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_ssram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n");
return;
}
per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve;
per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve;
per_cpu(free_l1_ssram_head, cpu).next->pid = 0;
per_cpu(free_l1_ssram_head, cpu).next->next = NULL;
per_cpu(used_l1_ssram_head, cpu).next = NULL;
/* mutex initialize */
spin_lock_init(&per_cpu(l1sram_lock, cpu));
printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n",
L1_SCRATCH_LENGTH >> 10);
}
}
static void __init l1_data_sram_init(void)
{
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
unsigned int cpu;
#endif
#if L1_DATA_A_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_data_A_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_data_A_sram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n");
return;
}
per_cpu(free_l1_data_A_sram_head, cpu).next->paddr =
(void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1);
per_cpu(free_l1_data_A_sram_head, cpu).next->size =
L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1);
per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_data_A_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n",
L1_DATA_A_LENGTH >> 10,
per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10);
}
#endif
#if L1_DATA_B_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_data_B_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_data_B_sram_head, cpu).next) {
printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n");
return;
}
per_cpu(free_l1_data_B_sram_head, cpu).next->paddr =
(void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1);
per_cpu(free_l1_data_B_sram_head, cpu).next->size =
L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1);
per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_data_B_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n",
L1_DATA_B_LENGTH >> 10,
per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10);
/* mutex initialize */
}
#endif
#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0
for (cpu = 0; cpu < num_possible_cpus(); ++cpu)
spin_lock_init(&per_cpu(l1_data_sram_lock, cpu));
#endif
}
static void __init l1_inst_sram_init(void)
{
#if L1_CODE_LENGTH != 0
unsigned int cpu;
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
per_cpu(free_l1_inst_sram_head, cpu).next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!per_cpu(free_l1_inst_sram_head, cpu).next) {
printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n");
return;
}
per_cpu(free_l1_inst_sram_head, cpu).next->paddr =
(void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1);
per_cpu(free_l1_inst_sram_head, cpu).next->size =
L1_CODE_LENGTH - (_etext_l1 - _stext_l1);
per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0;
per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL;
per_cpu(used_l1_inst_sram_head, cpu).next = NULL;
printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n",
L1_CODE_LENGTH >> 10,
per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10);
/* mutex initialize */
spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu));
}
#endif
}
static void __init l2_sram_init(void)
{
#if L2_LENGTH != 0
free_l2_sram_head.next =
kmem_cache_alloc(sram_piece_cache, GFP_KERNEL);
if (!free_l2_sram_head.next) {
printk(KERN_INFO "Fail to initialize L2 SRAM.\n");
return;
}
free_l2_sram_head.next->paddr =
(void *)L2_START + (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->size =
L2_LENGTH - (_ebss_l2 - _stext_l2);
free_l2_sram_head.next->pid = 0;
free_l2_sram_head.next->next = NULL;
used_l2_sram_head.next = NULL;
printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n",
L2_LENGTH >> 10,
free_l2_sram_head.next->size >> 10);
/* mutex initialize */
spin_lock_init(&l2_sram_lock);
#endif
}
static int __init bfin_sram_init(void)
{
sram_piece_cache = kmem_cache_create("sram_piece_cache",
sizeof(struct sram_piece),
0, SLAB_PANIC, NULL);
l1sram_init();
l1_data_sram_init();
l1_inst_sram_init();
l2_sram_init();
return 0;
}
pure_initcall(bfin_sram_init);
/* SRAM allocate function */
static void *_sram_alloc(size_t size, struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot, *plast, *pavail;
if (size <= 0 || !pfree_head || !pused_head)
return NULL;
/* Align the size */
size = (size + 3) & ~3;
pslot = pfree_head->next;
plast = pfree_head;
/* search an available piece slot */
while (pslot != NULL && size > pslot->size) {
plast = pslot;
pslot = pslot->next;
}
if (!pslot)
return NULL;
if (pslot->size == size) {
plast->next = pslot->next;
pavail = pslot;
} else {
/* use atomic so our L1 allocator can be used atomically */
pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC);
if (!pavail)
return NULL;
pavail->paddr = pslot->paddr;
pavail->size = size;
pslot->paddr += size;
pslot->size -= size;
}
pavail->pid = current->pid;
pslot = pused_head->next;
plast = pused_head;
/* insert new piece into used piece list !!! */
while (pslot != NULL && pavail->paddr < pslot->paddr) {
plast = pslot;
pslot = pslot->next;
}
pavail->next = pslot;
plast->next = pavail;
return pavail->paddr;
}
/* Allocate the largest available block. */
static void *_sram_alloc_max(struct sram_piece *pfree_head,
struct sram_piece *pused_head,
unsigned long *psize)
{
struct sram_piece *pslot, *pmax;
if (!pfree_head || !pused_head)
return NULL;
pmax = pslot = pfree_head->next;
/* search an available piece slot */
while (pslot != NULL) {
if (pslot->size > pmax->size)
pmax = pslot;
pslot = pslot->next;
}
if (!pmax)
return NULL;
*psize = pmax->size;
return _sram_alloc(*psize, pfree_head, pused_head);
}
/* SRAM free function */
static int _sram_free(const void *addr,
struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot, *plast, *pavail;
if (!pfree_head || !pused_head)
return -1;
/* search the relevant memory slot */
pslot = pused_head->next;
plast = pused_head;
/* search an available piece slot */
while (pslot != NULL && pslot->paddr != addr) {
plast = pslot;
pslot = pslot->next;
}
if (!pslot)
return -1;
plast->next = pslot->next;
pavail = pslot;
pavail->pid = 0;
/* insert free pieces back to the free list */
pslot = pfree_head->next;
plast = pfree_head;
while (pslot != NULL && addr > pslot->paddr) {
plast = pslot;
pslot = pslot->next;
}
if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) {
plast->size += pavail->size;
kmem_cache_free(sram_piece_cache, pavail);
} else {
pavail->next = plast->next;
plast->next = pavail;
plast = pavail;
}
if (pslot && plast->paddr + plast->size == pslot->paddr) {
plast->size += pslot->size;
plast->next = pslot->next;
kmem_cache_free(sram_piece_cache, pslot);
}
return 0;
}
int sram_free(const void *addr)
{
#if L1_CODE_LENGTH != 0
if (addr >= (void *)get_l1_code_start()
&& addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH))
return l1_inst_sram_free(addr);
else
#endif
#if L1_DATA_A_LENGTH != 0
if (addr >= (void *)get_l1_data_a_start()
&& addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH))
return l1_data_A_sram_free(addr);
else
#endif
#if L1_DATA_B_LENGTH != 0
if (addr >= (void *)get_l1_data_b_start()
&& addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH))
return l1_data_B_sram_free(addr);
else
#endif
#if L2_LENGTH != 0
if (addr >= (void *)L2_START
&& addr < (void *)(L2_START + L2_LENGTH))
return l2_sram_free(addr);
else
#endif
return -1;
}
EXPORT_SYMBOL(sram_free);
void *l1_data_A_sram_alloc(size_t size)
{
#if L1_DATA_A_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_data_A_sram_alloc);
int l1_data_A_sram_free(const void *addr)
{
#if L1_DATA_A_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_data_A_sram_free);
void *l1_data_B_sram_alloc(size_t size)
{
#if L1_DATA_B_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_alloc);
int l1_data_B_sram_free(const void *addr)
{
#if L1_DATA_B_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_data_B_sram_free);
void *l1_data_sram_alloc(size_t size)
{
void *addr = l1_data_A_sram_alloc(size);
if (!addr)
addr = l1_data_B_sram_alloc(size);
return addr;
}
EXPORT_SYMBOL(l1_data_sram_alloc);
void *l1_data_sram_zalloc(size_t size)
{
void *addr = l1_data_sram_alloc(size);
if (addr)
memset(addr, 0x00, size);
return addr;
}
EXPORT_SYMBOL(l1_data_sram_zalloc);
int l1_data_sram_free(const void *addr)
{
int ret;
ret = l1_data_A_sram_free(addr);
if (ret == -1)
ret = l1_data_B_sram_free(addr);
return ret;
}
EXPORT_SYMBOL(l1_data_sram_free);
void *l1_inst_sram_alloc(size_t size)
{
#if L1_CODE_LENGTH != 0
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_alloc);
int l1_inst_sram_free(const void *addr)
{
#if L1_CODE_LENGTH != 0
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l1_inst_sram_free);
/* L1 Scratchpad memory allocate function */
void *l1sram_alloc(size_t size)
{
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return addr;
}
/* L1 Scratchpad memory allocate function */
void *l1sram_alloc_max(size_t *psize)
{
unsigned long flags;
void *addr;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu), psize);
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return addr;
}
/* L1 Scratchpad memory free function */
int l1sram_free(const void *addr)
{
unsigned long flags;
int ret;
unsigned int cpu;
cpu = smp_processor_id();
/* add mutex operation */
spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags);
ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu),
&per_cpu(used_l1_ssram_head, cpu));
/* add mutex operation */
spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags);
return ret;
}
void *l2_sram_alloc(size_t size)
{
#if L2_LENGTH != 0
unsigned long flags;
void *addr;
/* add mutex operation */
spin_lock_irqsave(&l2_sram_lock, flags);
addr = _sram_alloc(size, &free_l2_sram_head,
&used_l2_sram_head);
/* add mutex operation */
spin_unlock_irqrestore(&l2_sram_lock, flags);
pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n",
(long unsigned int)addr, size);
return addr;
#else
return NULL;
#endif
}
EXPORT_SYMBOL(l2_sram_alloc);
void *l2_sram_zalloc(size_t size)
{
void *addr = l2_sram_alloc(size);
if (addr)
memset(addr, 0x00, size);
return addr;
}
EXPORT_SYMBOL(l2_sram_zalloc);
int l2_sram_free(const void *addr)
{
#if L2_LENGTH != 0
unsigned long flags;
int ret;
/* add mutex operation */
spin_lock_irqsave(&l2_sram_lock, flags);
ret = _sram_free(addr, &free_l2_sram_head,
&used_l2_sram_head);
/* add mutex operation */
spin_unlock_irqrestore(&l2_sram_lock, flags);
return ret;
#else
return -1;
#endif
}
EXPORT_SYMBOL(l2_sram_free);
int sram_free_with_lsl(const void *addr)
{
struct sram_list_struct *lsl, **tmp;
struct mm_struct *mm = current->mm;
int ret = -1;
for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next)
if ((*tmp)->addr == addr) {
lsl = *tmp;
ret = sram_free(addr);
*tmp = lsl->next;
kfree(lsl);
break;
}
return ret;
}
EXPORT_SYMBOL(sram_free_with_lsl);
/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are
* tracked. These are designed for userspace so that when a process exits,
* we can safely reap their resources.
*/
void *sram_alloc_with_lsl(size_t size, unsigned long flags)
{
void *addr = NULL;
struct sram_list_struct *lsl = NULL;
struct mm_struct *mm = current->mm;
lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL);
if (!lsl)
return NULL;
if (flags & L1_INST_SRAM)
addr = l1_inst_sram_alloc(size);
if (addr == NULL && (flags & L1_DATA_A_SRAM))
addr = l1_data_A_sram_alloc(size);
if (addr == NULL && (flags & L1_DATA_B_SRAM))
addr = l1_data_B_sram_alloc(size);
if (addr == NULL && (flags & L2_SRAM))
addr = l2_sram_alloc(size);
if (addr == NULL) {
kfree(lsl);
return NULL;
}
lsl->addr = addr;
lsl->length = size;
lsl->next = mm->context.sram_list;
mm->context.sram_list = lsl;
return addr;
}
EXPORT_SYMBOL(sram_alloc_with_lsl);
#ifdef CONFIG_PROC_FS
/* Once we get a real allocator, we'll throw all of this away.
* Until then, we need some sort of visibility into the L1 alloc.
*/
/* Need to keep line of output the same. Currently, that is 44 bytes
* (including newline).
*/
static int _sram_proc_show(struct seq_file *m, const char *desc,
struct sram_piece *pfree_head,
struct sram_piece *pused_head)
{
struct sram_piece *pslot;
if (!pfree_head || !pused_head)
return -1;
seq_printf(m, "--- SRAM %-14s Size PID State \n", desc);
/* search the relevant memory slot */
pslot = pused_head->next;
while (pslot != NULL) {
seq_printf(m, "%p-%p %10i %5i %-10s\n",
pslot->paddr, pslot->paddr + pslot->size,
pslot->size, pslot->pid, "ALLOCATED");
pslot = pslot->next;
}
pslot = pfree_head->next;
while (pslot != NULL) {
seq_printf(m, "%p-%p %10i %5i %-10s\n",
pslot->paddr, pslot->paddr + pslot->size,
pslot->size, pslot->pid, "FREE");
pslot = pslot->next;
}
return 0;
}
static int sram_proc_show(struct seq_file *m, void *v)
{
unsigned int cpu;
for (cpu = 0; cpu < num_possible_cpus(); ++cpu) {
if (_sram_proc_show(m, "Scratchpad",
&per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu)))
goto not_done;
#if L1_DATA_A_LENGTH != 0
if (_sram_proc_show(m, "L1 Data A",
&per_cpu(free_l1_data_A_sram_head, cpu),
&per_cpu(used_l1_data_A_sram_head, cpu)))
goto not_done;
#endif
#if L1_DATA_B_LENGTH != 0
if (_sram_proc_show(m, "L1 Data B",
&per_cpu(free_l1_data_B_sram_head, cpu),
&per_cpu(used_l1_data_B_sram_head, cpu)))
goto not_done;
#endif
#if L1_CODE_LENGTH != 0
if (_sram_proc_show(m, "L1 Instruction",
&per_cpu(free_l1_inst_sram_head, cpu),
&per_cpu(used_l1_inst_sram_head, cpu)))
goto not_done;
#endif
}
#if L2_LENGTH != 0
if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head))
goto not_done;
#endif
not_done:
return 0;
}
static int sram_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, sram_proc_show, NULL);
}
static const struct file_operations sram_proc_ops = {
.open = sram_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init sram_proc_init(void)
{
struct proc_dir_entry *ptr;
ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops);
if (!ptr) {
printk(KERN_WARNING "unable to create /proc/sram\n");
return -1;
}
return 0;
}
late_initcall(sram_proc_init);
#endif
| gpl-2.0 |
android-armv7a-belalang-tempur/belalang-tempur | drivers/net/ethernet/ibm/emac/phy.c | 9300 | 12820 | /*
* drivers/net/ethernet/ibm/emac/phy.c
*
* Driver for PowerPC 4xx on-chip ethernet controller, PHY support.
* Borrowed from sungem_phy.c, though I only kept the generic MII
* driver for now.
*
* This file should be shared with other drivers or eventually
* merged as the "low level" part of miilib
*
* Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* Based on the arch/ppc version of the driver:
*
* (c) 2003, Benjamin Herrenscmidt (benh@kernel.crashing.org)
* (c) 2004-2005, Eugene Surovegin <ebs@ebshome.net>
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/netdevice.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/delay.h>
#include "emac.h"
#include "phy.h"
#define phy_read _phy_read
#define phy_write _phy_write
static inline int _phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->address, reg);
}
static inline void _phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->address, reg, val);
}
static inline int gpcs_phy_read(struct mii_phy *phy, int reg)
{
return phy->mdio_read(phy->dev, phy->gpcs_address, reg);
}
static inline void gpcs_phy_write(struct mii_phy *phy, int reg, int val)
{
phy->mdio_write(phy->dev, phy->gpcs_address, reg, val);
}
int emac_mii_reset_phy(struct mii_phy *phy)
{
int val;
int limit = 10000;
val = phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
phy_write(phy, MII_BMCR, val);
udelay(300);
while (--limit) {
val = phy_read(phy, MII_BMCR);
if (val >= 0 && (val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
return limit <= 0;
}
int emac_mii_reset_gpcs(struct mii_phy *phy)
{
int val;
int limit = 10000;
val = gpcs_phy_read(phy, MII_BMCR);
val &= ~(BMCR_ISOLATE | BMCR_ANENABLE);
val |= BMCR_RESET;
gpcs_phy_write(phy, MII_BMCR, val);
udelay(300);
while (--limit) {
val = gpcs_phy_read(phy, MII_BMCR);
if (val >= 0 && (val & BMCR_RESET) == 0)
break;
udelay(10);
}
if ((val & BMCR_ISOLATE) && limit > 0)
gpcs_phy_write(phy, MII_BMCR, val & ~BMCR_ISOLATE);
if (limit > 0 && phy->mode == PHY_MODE_SGMII) {
/* Configure GPCS interface to recommended setting for SGMII */
gpcs_phy_write(phy, 0x04, 0x8120); /* AsymPause, FDX */
gpcs_phy_write(phy, 0x07, 0x2801); /* msg_pg, toggle */
gpcs_phy_write(phy, 0x00, 0x0140); /* 1Gbps, FDX */
}
return limit <= 0;
}
static int genmii_setup_aneg(struct mii_phy *phy, u32 advertise)
{
int ctl, adv;
phy->autoneg = AUTONEG_ENABLE;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
phy->advertising = advertise;
ctl = phy_read(phy, MII_BMCR);
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
/* First clear the PHY */
phy_write(phy, MII_BMCR, ctl);
/* Setup standard advertise */
adv = phy_read(phy, MII_ADVERTISE);
if (adv < 0)
return adv;
adv &= ~(ADVERTISE_ALL | ADVERTISE_100BASE4 | ADVERTISE_PAUSE_CAP |
ADVERTISE_PAUSE_ASYM);
if (advertise & ADVERTISED_10baseT_Half)
adv |= ADVERTISE_10HALF;
if (advertise & ADVERTISED_10baseT_Full)
adv |= ADVERTISE_10FULL;
if (advertise & ADVERTISED_100baseT_Half)
adv |= ADVERTISE_100HALF;
if (advertise & ADVERTISED_100baseT_Full)
adv |= ADVERTISE_100FULL;
if (advertise & ADVERTISED_Pause)
adv |= ADVERTISE_PAUSE_CAP;
if (advertise & ADVERTISED_Asym_Pause)
adv |= ADVERTISE_PAUSE_ASYM;
phy_write(phy, MII_ADVERTISE, adv);
if (phy->features &
(SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
adv = phy_read(phy, MII_CTRL1000);
if (adv < 0)
return adv;
adv &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
if (advertise & ADVERTISED_1000baseT_Full)
adv |= ADVERTISE_1000FULL;
if (advertise & ADVERTISED_1000baseT_Half)
adv |= ADVERTISE_1000HALF;
phy_write(phy, MII_CTRL1000, adv);
}
/* Start/Restart aneg */
ctl = phy_read(phy, MII_BMCR);
ctl |= (BMCR_ANENABLE | BMCR_ANRESTART);
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_setup_forced(struct mii_phy *phy, int speed, int fd)
{
int ctl;
phy->autoneg = AUTONEG_DISABLE;
phy->speed = speed;
phy->duplex = fd;
phy->pause = phy->asym_pause = 0;
ctl = phy_read(phy, MII_BMCR);
if (ctl < 0)
return ctl;
ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_SPEED1000 | BMCR_ANENABLE);
/* First clear the PHY */
phy_write(phy, MII_BMCR, ctl | BMCR_RESET);
/* Select speed & duplex */
switch (speed) {
case SPEED_10:
break;
case SPEED_100:
ctl |= BMCR_SPEED100;
break;
case SPEED_1000:
ctl |= BMCR_SPEED1000;
break;
default:
return -EINVAL;
}
if (fd == DUPLEX_FULL)
ctl |= BMCR_FULLDPLX;
phy_write(phy, MII_BMCR, ctl);
return 0;
}
static int genmii_poll_link(struct mii_phy *phy)
{
int status;
/* Clear latched value with dummy read */
phy_read(phy, MII_BMSR);
status = phy_read(phy, MII_BMSR);
if (status < 0 || (status & BMSR_LSTATUS) == 0)
return 0;
if (phy->autoneg == AUTONEG_ENABLE && !(status & BMSR_ANEGCOMPLETE))
return 0;
return 1;
}
static int genmii_read_link(struct mii_phy *phy)
{
if (phy->autoneg == AUTONEG_ENABLE) {
int glpa = 0;
int lpa = phy_read(phy, MII_LPA) & phy_read(phy, MII_ADVERTISE);
if (lpa < 0)
return lpa;
if (phy->features &
(SUPPORTED_1000baseT_Full | SUPPORTED_1000baseT_Half)) {
int adv = phy_read(phy, MII_CTRL1000);
glpa = phy_read(phy, MII_STAT1000);
if (glpa < 0 || adv < 0)
return adv;
glpa &= adv << 2;
}
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
if (glpa & (LPA_1000FULL | LPA_1000HALF)) {
phy->speed = SPEED_1000;
if (glpa & LPA_1000FULL)
phy->duplex = DUPLEX_FULL;
} else if (lpa & (LPA_100FULL | LPA_100HALF)) {
phy->speed = SPEED_100;
if (lpa & LPA_100FULL)
phy->duplex = DUPLEX_FULL;
} else if (lpa & LPA_10FULL)
phy->duplex = DUPLEX_FULL;
if (phy->duplex == DUPLEX_FULL) {
phy->pause = lpa & LPA_PAUSE_CAP ? 1 : 0;
phy->asym_pause = lpa & LPA_PAUSE_ASYM ? 1 : 0;
}
} else {
int bmcr = phy_read(phy, MII_BMCR);
if (bmcr < 0)
return bmcr;
if (bmcr & BMCR_FULLDPLX)
phy->duplex = DUPLEX_FULL;
else
phy->duplex = DUPLEX_HALF;
if (bmcr & BMCR_SPEED1000)
phy->speed = SPEED_1000;
else if (bmcr & BMCR_SPEED100)
phy->speed = SPEED_100;
else
phy->speed = SPEED_10;
phy->pause = phy->asym_pause = 0;
}
return 0;
}
/* Generic implementation for most 10/100/1000 PHYs */
static struct mii_phy_ops generic_phy_ops = {
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def genmii_phy_def = {
.phy_id = 0x00000000,
.phy_id_mask = 0x00000000,
.name = "Generic MII",
.ops = &generic_phy_ops
};
/* CIS8201 */
#define MII_CIS8201_10BTCSR 0x16
#define TENBTCSR_ECHO_DISABLE 0x2000
#define MII_CIS8201_EPCR 0x17
#define EPCR_MODE_MASK 0x3000
#define EPCR_GMII_MODE 0x0000
#define EPCR_RGMII_MODE 0x1000
#define EPCR_TBI_MODE 0x2000
#define EPCR_RTBI_MODE 0x3000
#define MII_CIS8201_ACSR 0x1c
#define ACSR_PIN_PRIO_SELECT 0x0004
static int cis8201_init(struct mii_phy *phy)
{
int epcr;
epcr = phy_read(phy, MII_CIS8201_EPCR);
if (epcr < 0)
return epcr;
epcr &= ~EPCR_MODE_MASK;
switch (phy->mode) {
case PHY_MODE_TBI:
epcr |= EPCR_TBI_MODE;
break;
case PHY_MODE_RTBI:
epcr |= EPCR_RTBI_MODE;
break;
case PHY_MODE_GMII:
epcr |= EPCR_GMII_MODE;
break;
case PHY_MODE_RGMII:
default:
epcr |= EPCR_RGMII_MODE;
}
phy_write(phy, MII_CIS8201_EPCR, epcr);
/* MII regs override strap pins */
phy_write(phy, MII_CIS8201_ACSR,
phy_read(phy, MII_CIS8201_ACSR) | ACSR_PIN_PRIO_SELECT);
/* Disable TX_EN -> CRS echo mode, otherwise 10/HDX doesn't work */
phy_write(phy, MII_CIS8201_10BTCSR,
phy_read(phy, MII_CIS8201_10BTCSR) | TENBTCSR_ECHO_DISABLE);
return 0;
}
static struct mii_phy_ops cis8201_phy_ops = {
.init = cis8201_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def cis8201_phy_def = {
.phy_id = 0x000fc410,
.phy_id_mask = 0x000ffff0,
.name = "CIS8201 Gigabit Ethernet",
.ops = &cis8201_phy_ops
};
static struct mii_phy_def bcm5248_phy_def = {
.phy_id = 0x0143bc00,
.phy_id_mask = 0x0ffffff0,
.name = "BCM5248 10/100 SMII Ethernet",
.ops = &generic_phy_ops
};
static int m88e1111_init(struct mii_phy *phy)
{
pr_debug("%s: Marvell 88E1111 Ethernet\n", __func__);
phy_write(phy, 0x14, 0x0ce3);
phy_write(phy, 0x18, 0x4101);
phy_write(phy, 0x09, 0x0e00);
phy_write(phy, 0x04, 0x01e1);
phy_write(phy, 0x00, 0x9140);
phy_write(phy, 0x00, 0x1140);
return 0;
}
static int m88e1112_init(struct mii_phy *phy)
{
/*
* Marvell 88E1112 PHY needs to have the SGMII MAC
* interace (page 2) properly configured to
* communicate with the 460EX/GT GPCS interface.
*/
u16 reg_short;
pr_debug("%s: Marvell 88E1112 Ethernet\n", __func__);
/* Set access to Page 2 */
phy_write(phy, 0x16, 0x0002);
phy_write(phy, 0x00, 0x0040); /* 1Gbps */
reg_short = (u16)(phy_read(phy, 0x1a));
reg_short |= 0x8000; /* bypass Auto-Negotiation */
phy_write(phy, 0x1a, reg_short);
emac_mii_reset_phy(phy); /* reset MAC interface */
/* Reset access to Page 0 */
phy_write(phy, 0x16, 0x0000);
return 0;
}
static int et1011c_init(struct mii_phy *phy)
{
u16 reg_short;
reg_short = (u16)(phy_read(phy, 0x16));
reg_short &= ~(0x7);
reg_short |= 0x6; /* RGMII Trace Delay*/
phy_write(phy, 0x16, reg_short);
reg_short = (u16)(phy_read(phy, 0x17));
reg_short &= ~(0x40);
phy_write(phy, 0x17, reg_short);
phy_write(phy, 0x1c, 0x74f0);
return 0;
}
static struct mii_phy_ops et1011c_phy_ops = {
.init = et1011c_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def et1011c_phy_def = {
.phy_id = 0x0282f000,
.phy_id_mask = 0x0fffff00,
.name = "ET1011C Gigabit Ethernet",
.ops = &et1011c_phy_ops
};
static struct mii_phy_ops m88e1111_phy_ops = {
.init = m88e1111_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def m88e1111_phy_def = {
.phy_id = 0x01410CC0,
.phy_id_mask = 0x0ffffff0,
.name = "Marvell 88E1111 Ethernet",
.ops = &m88e1111_phy_ops,
};
static struct mii_phy_ops m88e1112_phy_ops = {
.init = m88e1112_init,
.setup_aneg = genmii_setup_aneg,
.setup_forced = genmii_setup_forced,
.poll_link = genmii_poll_link,
.read_link = genmii_read_link
};
static struct mii_phy_def m88e1112_phy_def = {
.phy_id = 0x01410C90,
.phy_id_mask = 0x0ffffff0,
.name = "Marvell 88E1112 Ethernet",
.ops = &m88e1112_phy_ops,
};
static struct mii_phy_def *mii_phy_table[] = {
&et1011c_phy_def,
&cis8201_phy_def,
&bcm5248_phy_def,
&m88e1111_phy_def,
&m88e1112_phy_def,
&genmii_phy_def,
NULL
};
int emac_mii_phy_probe(struct mii_phy *phy, int address)
{
struct mii_phy_def *def;
int i;
u32 id;
phy->autoneg = AUTONEG_DISABLE;
phy->advertising = 0;
phy->address = address;
phy->speed = SPEED_10;
phy->duplex = DUPLEX_HALF;
phy->pause = phy->asym_pause = 0;
/* Take PHY out of isolate mode and reset it. */
if (emac_mii_reset_phy(phy))
return -ENODEV;
/* Read ID and find matching entry */
id = (phy_read(phy, MII_PHYSID1) << 16) | phy_read(phy, MII_PHYSID2);
for (i = 0; (def = mii_phy_table[i]) != NULL; i++)
if ((id & def->phy_id_mask) == def->phy_id)
break;
/* Should never be NULL (we have a generic entry), but... */
if (!def)
return -ENODEV;
phy->def = def;
/* Determine PHY features if needed */
phy->features = def->features;
if (!phy->features) {
u16 bmsr = phy_read(phy, MII_BMSR);
if (bmsr & BMSR_ANEGCAPABLE)
phy->features |= SUPPORTED_Autoneg;
if (bmsr & BMSR_10HALF)
phy->features |= SUPPORTED_10baseT_Half;
if (bmsr & BMSR_10FULL)
phy->features |= SUPPORTED_10baseT_Full;
if (bmsr & BMSR_100HALF)
phy->features |= SUPPORTED_100baseT_Half;
if (bmsr & BMSR_100FULL)
phy->features |= SUPPORTED_100baseT_Full;
if (bmsr & BMSR_ESTATEN) {
u16 esr = phy_read(phy, MII_ESTATUS);
if (esr & ESTATUS_1000_TFULL)
phy->features |= SUPPORTED_1000baseT_Full;
if (esr & ESTATUS_1000_THALF)
phy->features |= SUPPORTED_1000baseT_Half;
}
phy->features |= SUPPORTED_MII;
}
/* Setup default advertising */
phy->advertising = phy->features;
return 0;
}
MODULE_LICENSE("GPL");
| gpl-2.0 |
letama/android_kernel_nozomi | drivers/usb/storage/libusual.c | 9812 | 5800 | /*
* libusual
*
* The libusual contains the table of devices common for ub and usb-storage.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/usb.h>
#include <linux/usb_usual.h>
#include <linux/vmalloc.h>
#include <linux/kthread.h>
#include <linux/mutex.h>
/*
*/
#define USU_MOD_FL_THREAD 1 /* Thread is running */
#define USU_MOD_FL_PRESENT 2 /* The module is loaded */
struct mod_status {
unsigned long fls;
};
static struct mod_status stat[3];
static DEFINE_SPINLOCK(usu_lock);
/*
*/
#define USB_US_DEFAULT_BIAS USB_US_TYPE_STOR
static atomic_t usu_bias = ATOMIC_INIT(USB_US_DEFAULT_BIAS);
#define BIAS_NAME_SIZE (sizeof("usb-storage"))
static const char *bias_names[3] = { "none", "usb-storage", "ub" };
static DEFINE_MUTEX(usu_probe_mutex);
static DECLARE_COMPLETION(usu_end_notify);
static atomic_t total_threads = ATOMIC_INIT(0);
static int usu_probe_thread(void *arg);
/*
* @type: the module type as an integer
*/
void usb_usual_set_present(int type)
{
struct mod_status *st;
unsigned long flags;
if (type <= 0 || type >= 3)
return;
st = &stat[type];
spin_lock_irqsave(&usu_lock, flags);
st->fls |= USU_MOD_FL_PRESENT;
spin_unlock_irqrestore(&usu_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_usual_set_present);
void usb_usual_clear_present(int type)
{
struct mod_status *st;
unsigned long flags;
if (type <= 0 || type >= 3)
return;
st = &stat[type];
spin_lock_irqsave(&usu_lock, flags);
st->fls &= ~USU_MOD_FL_PRESENT;
spin_unlock_irqrestore(&usu_lock, flags);
}
EXPORT_SYMBOL_GPL(usb_usual_clear_present);
/*
* Match the calling driver type against the table.
* Returns: 0 if the device matches.
*/
int usb_usual_check_type(const struct usb_device_id *id, int caller_type)
{
int id_type = USB_US_TYPE(id->driver_info);
if (caller_type <= 0 || caller_type >= 3)
return -EINVAL;
/* Drivers grab fixed assignment devices */
if (id_type == caller_type)
return 0;
/* Drivers grab devices biased to them */
if (id_type == USB_US_TYPE_NONE && caller_type == atomic_read(&usu_bias))
return 0;
return -ENODEV;
}
EXPORT_SYMBOL_GPL(usb_usual_check_type);
/*
*/
static int usu_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
int rc;
unsigned long type;
struct task_struct* task;
unsigned long flags;
type = USB_US_TYPE(id->driver_info);
if (type == 0)
type = atomic_read(&usu_bias);
spin_lock_irqsave(&usu_lock, flags);
if ((stat[type].fls & (USU_MOD_FL_THREAD|USU_MOD_FL_PRESENT)) != 0) {
spin_unlock_irqrestore(&usu_lock, flags);
return -ENXIO;
}
stat[type].fls |= USU_MOD_FL_THREAD;
spin_unlock_irqrestore(&usu_lock, flags);
task = kthread_run(usu_probe_thread, (void*)type, "libusual_%ld", type);
if (IS_ERR(task)) {
rc = PTR_ERR(task);
printk(KERN_WARNING "libusual: "
"Unable to start the thread for %s: %d\n",
bias_names[type], rc);
spin_lock_irqsave(&usu_lock, flags);
stat[type].fls &= ~USU_MOD_FL_THREAD;
spin_unlock_irqrestore(&usu_lock, flags);
return rc; /* Not being -ENXIO causes a message printed */
}
atomic_inc(&total_threads);
return -ENXIO;
}
static void usu_disconnect(struct usb_interface *intf)
{
; /* We should not be here. */
}
static struct usb_driver usu_driver = {
.name = "libusual",
.probe = usu_probe,
.disconnect = usu_disconnect,
.id_table = usb_storage_usb_ids,
};
/*
* A whole new thread for a purpose of request_module seems quite stupid.
* The request_module forks once inside again. However, if we attempt
* to load a storage module from our own modprobe thread, that module
* references our symbols, which cannot be resolved until our module is
* initialized. I wish there was a way to wait for the end of initialization.
* The module notifier reports MODULE_STATE_COMING only.
* So, we wait until module->init ends as the next best thing.
*/
static int usu_probe_thread(void *arg)
{
int type = (unsigned long) arg;
struct mod_status *st = &stat[type];
int rc;
unsigned long flags;
mutex_lock(&usu_probe_mutex);
rc = request_module(bias_names[type]);
spin_lock_irqsave(&usu_lock, flags);
if (rc == 0 && (st->fls & USU_MOD_FL_PRESENT) == 0) {
/*
* This should not happen, but let us keep tabs on it.
*/
printk(KERN_NOTICE "libusual: "
"modprobe for %s succeeded, but module is not present\n",
bias_names[type]);
}
st->fls &= ~USU_MOD_FL_THREAD;
spin_unlock_irqrestore(&usu_lock, flags);
mutex_unlock(&usu_probe_mutex);
complete_and_exit(&usu_end_notify, 0);
}
/*
*/
static int __init usb_usual_init(void)
{
int rc;
mutex_lock(&usu_probe_mutex);
rc = usb_register(&usu_driver);
mutex_unlock(&usu_probe_mutex);
return rc;
}
static void __exit usb_usual_exit(void)
{
/*
* We do not check for any drivers present, because
* they keep us pinned with symbol references.
*/
usb_deregister(&usu_driver);
while (atomic_read(&total_threads) > 0) {
wait_for_completion(&usu_end_notify);
atomic_dec(&total_threads);
}
}
/*
* Validate and accept the bias parameter.
*/
static int usu_set_bias(const char *bias_s, struct kernel_param *kp)
{
int i;
int len;
int bias_n = 0;
len = strlen(bias_s);
if (len == 0)
return -EDOM;
if (bias_s[len-1] == '\n')
--len;
for (i = 1; i < 3; i++) {
if (strncmp(bias_s, bias_names[i], len) == 0) {
bias_n = i;
break;
}
}
if (bias_n == 0)
return -EINVAL;
atomic_set(&usu_bias, bias_n);
return 0;
}
static int usu_get_bias(char *buffer, struct kernel_param *kp)
{
return strlen(strcpy(buffer, bias_names[atomic_read(&usu_bias)]));
}
module_init(usb_usual_init);
module_exit(usb_usual_exit);
module_param_call(bias, usu_set_bias, usu_get_bias, NULL, S_IRUGO|S_IWUSR);
__MODULE_PARM_TYPE(bias, "string");
MODULE_PARM_DESC(bias, "Bias to usb-storage or ub");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pseudonymous-foss/clydefs | drivers/xen/xenfs/xenstored.c | 12884 | 1531 | #include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <xen/page.h>
#include "xenfs.h"
#include "../xenbus/xenbus_comms.h"
static ssize_t xsd_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
const char *str = (const char *)file->private_data;
return simple_read_from_buffer(buf, size, off, str, strlen(str));
}
static int xsd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static int xsd_kva_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
xen_store_interface);
if (!file->private_data)
return -ENOMEM;
return 0;
}
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
return -EINVAL;
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(xen_store_interface),
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
const struct file_operations xsd_kva_file_ops = {
.open = xsd_kva_open,
.mmap = xsd_kva_mmap,
.read = xsd_read,
.release = xsd_release,
};
static int xsd_port_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
xen_store_evtchn);
if (!file->private_data)
return -ENOMEM;
return 0;
}
const struct file_operations xsd_port_file_ops = {
.open = xsd_port_open,
.read = xsd_read,
.release = xsd_release,
};
| gpl-2.0 |
abusnooze/mint-v3.2-psp26 | drivers/xen/xenfs/xenstored.c | 12884 | 1531 | #include <linux/slab.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <xen/page.h>
#include "xenfs.h"
#include "../xenbus/xenbus_comms.h"
static ssize_t xsd_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
const char *str = (const char *)file->private_data;
return simple_read_from_buffer(buf, size, off, str, strlen(str));
}
static int xsd_release(struct inode *inode, struct file *file)
{
kfree(file->private_data);
return 0;
}
static int xsd_kva_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
xen_store_interface);
if (!file->private_data)
return -ENOMEM;
return 0;
}
static int xsd_kva_mmap(struct file *file, struct vm_area_struct *vma)
{
size_t size = vma->vm_end - vma->vm_start;
if ((size > PAGE_SIZE) || (vma->vm_pgoff != 0))
return -EINVAL;
if (remap_pfn_range(vma, vma->vm_start,
virt_to_pfn(xen_store_interface),
size, vma->vm_page_prot))
return -EAGAIN;
return 0;
}
const struct file_operations xsd_kva_file_ops = {
.open = xsd_kva_open,
.mmap = xsd_kva_mmap,
.read = xsd_read,
.release = xsd_release,
};
static int xsd_port_open(struct inode *inode, struct file *file)
{
file->private_data = (void *)kasprintf(GFP_KERNEL, "%d",
xen_store_evtchn);
if (!file->private_data)
return -ENOMEM;
return 0;
}
const struct file_operations xsd_port_file_ops = {
.open = xsd_port_open,
.read = xsd_read,
.release = xsd_release,
};
| gpl-2.0 |
VegaDevTeam/android_kernel_pantech_msm8974 | arch/x86/math-emu/poly_l2.c | 14420 | 7242 | /*---------------------------------------------------------------------------+
| poly_l2.c |
| |
| Compute the base 2 log of a FPU_REG, using a polynomial approximation. |
| |
| Copyright (C) 1992,1993,1994,1997 |
| W. Metzenthen, 22 Parker St, Ormond, Vic 3163, Australia |
| E-mail billm@suburbia.net |
| |
| |
+---------------------------------------------------------------------------*/
#include "exception.h"
#include "reg_constant.h"
#include "fpu_emu.h"
#include "fpu_system.h"
#include "control_w.h"
#include "poly.h"
static void log2_kernel(FPU_REG const *arg, u_char argsign,
Xsig * accum_result, long int *expon);
/*--- poly_l2() -------------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
+---------------------------------------------------------------------------*/
void poly_l2(FPU_REG *st0_ptr, FPU_REG *st1_ptr, u_char st1_sign)
{
long int exponent, expon, expon_expon;
Xsig accumulator, expon_accum, yaccum;
u_char sign, argsign;
FPU_REG x;
int tag;
exponent = exponent16(st0_ptr);
/* From st0_ptr, make a number > sqrt(2)/2 and < sqrt(2) */
if (st0_ptr->sigh > (unsigned)0xb504f334) {
/* Treat as sqrt(2)/2 < st0_ptr < 1 */
significand(&x) = -significand(st0_ptr);
setexponent16(&x, -1);
exponent++;
argsign = SIGN_NEG;
} else {
/* Treat as 1 <= st0_ptr < sqrt(2) */
x.sigh = st0_ptr->sigh - 0x80000000;
x.sigl = st0_ptr->sigl;
setexponent16(&x, 0);
argsign = SIGN_POS;
}
tag = FPU_normalize_nuo(&x);
if (tag == TAG_Zero) {
expon = 0;
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
} else {
log2_kernel(&x, argsign, &accumulator, &expon);
}
if (exponent < 0) {
sign = SIGN_NEG;
exponent = -exponent;
} else
sign = SIGN_POS;
expon_accum.msw = exponent;
expon_accum.midw = expon_accum.lsw = 0;
if (exponent) {
expon_expon = 31 + norm_Xsig(&expon_accum);
shr_Xsig(&accumulator, expon_expon - expon);
if (sign ^ argsign)
negate_Xsig(&accumulator);
add_Xsig_Xsig(&accumulator, &expon_accum);
} else {
expon_expon = expon;
sign = argsign;
}
yaccum.lsw = 0;
XSIG_LL(yaccum) = significand(st1_ptr);
mul_Xsig_Xsig(&accumulator, &yaccum);
expon_expon += round_Xsig(&accumulator);
if (accumulator.msw == 0) {
FPU_copy_to_reg1(&CONST_Z, TAG_Zero);
return;
}
significand(st1_ptr) = XSIG_LL(accumulator);
setexponent16(st1_ptr, expon_expon + exponent16(st1_ptr) + 1);
tag = FPU_round(st1_ptr, 1, 0, FULL_PRECISION, sign ^ st1_sign);
FPU_settagi(1, tag);
set_precision_flag_up(); /* 80486 appears to always do this */
return;
}
/*--- poly_l2p1() -----------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
| log2(x+1) |
+---------------------------------------------------------------------------*/
int poly_l2p1(u_char sign0, u_char sign1,
FPU_REG * st0_ptr, FPU_REG * st1_ptr, FPU_REG * dest)
{
u_char tag;
long int exponent;
Xsig accumulator, yaccum;
if (exponent16(st0_ptr) < 0) {
log2_kernel(st0_ptr, sign0, &accumulator, &exponent);
yaccum.lsw = 0;
XSIG_LL(yaccum) = significand(st1_ptr);
mul_Xsig_Xsig(&accumulator, &yaccum);
exponent += round_Xsig(&accumulator);
exponent += exponent16(st1_ptr) + 1;
if (exponent < EXP_WAY_UNDER)
exponent = EXP_WAY_UNDER;
significand(dest) = XSIG_LL(accumulator);
setexponent16(dest, exponent);
tag = FPU_round(dest, 1, 0, FULL_PRECISION, sign0 ^ sign1);
FPU_settagi(1, tag);
if (tag == TAG_Valid)
set_precision_flag_up(); /* 80486 appears to always do this */
} else {
/* The magnitude of st0_ptr is far too large. */
if (sign0 != SIGN_POS) {
/* Trying to get the log of a negative number. */
#ifdef PECULIAR_486 /* Stupid 80486 doesn't worry about log(negative). */
changesign(st1_ptr);
#else
if (arith_invalid(1) < 0)
return 1;
#endif /* PECULIAR_486 */
}
/* 80486 appears to do this */
if (sign0 == SIGN_NEG)
set_precision_flag_down();
else
set_precision_flag_up();
}
if (exponent(dest) <= EXP_UNDER)
EXCEPTION(EX_Underflow);
return 0;
}
#undef HIPOWER
#define HIPOWER 10
static const unsigned long long logterms[HIPOWER] = {
0x2a8eca5705fc2ef0LL,
0xf6384ee1d01febceLL,
0x093bb62877cdf642LL,
0x006985d8a9ec439bLL,
0x0005212c4f55a9c8LL,
0x00004326a16927f0LL,
0x0000038d1d80a0e7LL,
0x0000003141cc80c6LL,
0x00000002b1668c9fLL,
0x000000002c7a46aaLL
};
static const unsigned long leadterm = 0xb8000000;
/*--- log2_kernel() ---------------------------------------------------------+
| Base 2 logarithm by a polynomial approximation. |
| log2(x+1) |
+---------------------------------------------------------------------------*/
static void log2_kernel(FPU_REG const *arg, u_char argsign, Xsig *accum_result,
long int *expon)
{
long int exponent, adj;
unsigned long long Xsq;
Xsig accumulator, Numer, Denom, argSignif, arg_signif;
exponent = exponent16(arg);
Numer.lsw = Denom.lsw = 0;
XSIG_LL(Numer) = XSIG_LL(Denom) = significand(arg);
if (argsign == SIGN_POS) {
shr_Xsig(&Denom, 2 - (1 + exponent));
Denom.msw |= 0x80000000;
div_Xsig(&Numer, &Denom, &argSignif);
} else {
shr_Xsig(&Denom, 1 - (1 + exponent));
negate_Xsig(&Denom);
if (Denom.msw & 0x80000000) {
div_Xsig(&Numer, &Denom, &argSignif);
exponent++;
} else {
/* Denom must be 1.0 */
argSignif.lsw = Numer.lsw;
argSignif.midw = Numer.midw;
argSignif.msw = Numer.msw;
}
}
#ifndef PECULIAR_486
/* Should check here that |local_arg| is within the valid range */
if (exponent >= -2) {
if ((exponent > -2) || (argSignif.msw > (unsigned)0xafb0ccc0)) {
/* The argument is too large */
}
}
#endif /* PECULIAR_486 */
arg_signif.lsw = argSignif.lsw;
XSIG_LL(arg_signif) = XSIG_LL(argSignif);
adj = norm_Xsig(&argSignif);
accumulator.lsw = argSignif.lsw;
XSIG_LL(accumulator) = XSIG_LL(argSignif);
mul_Xsig_Xsig(&accumulator, &accumulator);
shr_Xsig(&accumulator, 2 * (-1 - (1 + exponent + adj)));
Xsq = XSIG_LL(accumulator);
if (accumulator.lsw & 0x80000000)
Xsq++;
accumulator.msw = accumulator.midw = accumulator.lsw = 0;
/* Do the basic fixed point polynomial evaluation */
polynomial_Xsig(&accumulator, &Xsq, logterms, HIPOWER - 1);
mul_Xsig_Xsig(&accumulator, &argSignif);
shr_Xsig(&accumulator, 6 - adj);
mul32_Xsig(&arg_signif, leadterm);
add_two_Xsig(&accumulator, &arg_signif, &exponent);
*expon = exponent + 1;
accum_result->lsw = accumulator.lsw;
accum_result->midw = accumulator.midw;
accum_result->msw = accumulator.msw;
}
| gpl-2.0 |
bwrsandman/android_kernel_zte_roamer | drivers/s390/char/defkeymap.c | 14676 | 6243 |
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
u_short plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *funcbufptr = func_buf;
int funcbufsize = sizeof(func_buf);
int funcbufleft = 0; /* space left */
char *func_table[MAX_NR_FUNC] = {
func_buf + 0,
func_buf + 5,
func_buf + 10,
func_buf + 15,
func_buf + 20,
func_buf + 25,
func_buf + 31,
func_buf + 37,
func_buf + 43,
func_buf + 49,
func_buf + 55,
func_buf + 61,
func_buf + 67,
func_buf + 73,
func_buf + 79,
func_buf + 85,
func_buf + 91,
func_buf + 97,
func_buf + 103,
func_buf + 109,
NULL,
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;
| gpl-2.0 |
InfinitiveOS-Devices/android_kernel_motorola_msm8226 | drivers/s390/char/defkeymap.c | 14676 | 6243 |
/* Do not edit this file! It was automatically generated by */
/* loadkeys --mktable defkeymap.map > defkeymap.c */
#include <linux/types.h>
#include <linux/keyboard.h>
#include <linux/kd.h>
#include <linux/kbd_kern.h>
#include <linux/kbd_diacr.h>
u_short plain_map[NR_KEYS] = {
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000,
0xf020, 0xf000, 0xf0e2, 0xf0e4, 0xf0e0, 0xf0e1, 0xf0e3, 0xf0e5,
0xf0e7, 0xf0f1, 0xf0a2, 0xf02e, 0xf03c, 0xf028, 0xf02b, 0xf07c,
0xf026, 0xf0e9, 0xf0e2, 0xf0eb, 0xf0e8, 0xf0ed, 0xf0ee, 0xf0ef,
0xf0ec, 0xf0df, 0xf021, 0xf024, 0xf02a, 0xf029, 0xf03b, 0xf0ac,
0xf02d, 0xf02f, 0xf0c2, 0xf0c4, 0xf0c0, 0xf0c1, 0xf0c3, 0xf0c5,
0xf0c7, 0xf0d1, 0xf0a6, 0xf02c, 0xf025, 0xf05f, 0xf03e, 0xf03f,
0xf0f8, 0xf0c9, 0xf0ca, 0xf0cb, 0xf0c8, 0xf0cd, 0xf0ce, 0xf0cf,
0xf0cc, 0xf060, 0xf03a, 0xf023, 0xf040, 0xf027, 0xf03d, 0xf022,
};
static u_short shift_map[NR_KEYS] = {
0xf0d8, 0xf061, 0xf062, 0xf063, 0xf064, 0xf065, 0xf066, 0xf067,
0xf068, 0xf069, 0xf0ab, 0xf0bb, 0xf0f0, 0xf0fd, 0xf0fe, 0xf0b1,
0xf0b0, 0xf06a, 0xf06b, 0xf06c, 0xf06d, 0xf06e, 0xf06f, 0xf070,
0xf071, 0xf072, 0xf000, 0xf000, 0xf0e6, 0xf0b8, 0xf0c6, 0xf0a4,
0xf0b5, 0xf07e, 0xf073, 0xf074, 0xf075, 0xf076, 0xf077, 0xf078,
0xf079, 0xf07a, 0xf0a1, 0xf0bf, 0xf0d0, 0xf0dd, 0xf0de, 0xf0ae,
0xf402, 0xf0a3, 0xf0a5, 0xf0b7, 0xf0a9, 0xf0a7, 0xf0b6, 0xf0bc,
0xf0bd, 0xf0be, 0xf05b, 0xf05d, 0xf000, 0xf0a8, 0xf0b4, 0xf0d7,
0xf07b, 0xf041, 0xf042, 0xf043, 0xf044, 0xf045, 0xf046, 0xf047,
0xf048, 0xf049, 0xf000, 0xf0f4, 0xf0f6, 0xf0f2, 0xf0f3, 0xf0f5,
0xf07d, 0xf04a, 0xf04b, 0xf04c, 0xf04d, 0xf04e, 0xf04f, 0xf050,
0xf051, 0xf052, 0xf0b9, 0xf0fb, 0xf0fc, 0xf0f9, 0xf0fa, 0xf0ff,
0xf05c, 0xf0f7, 0xf053, 0xf054, 0xf055, 0xf056, 0xf057, 0xf058,
0xf059, 0xf05a, 0xf0b2, 0xf0d4, 0xf0d6, 0xf0d2, 0xf0d3, 0xf0d5,
0xf030, 0xf031, 0xf032, 0xf033, 0xf034, 0xf035, 0xf036, 0xf037,
0xf038, 0xf039, 0xf0b3, 0xf0db, 0xf0dc, 0xf0d9, 0xf0da, 0xf000,
};
static u_short ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf11f, 0xf120, 0xf121, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf01a, 0xf003, 0xf212, 0xf004, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf109, 0xf10a, 0xf206, 0xf00a, 0xf200, 0xf200,
};
static u_short shift_ctrl_map[NR_KEYS] = {
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf10c, 0xf10d, 0xf10e, 0xf10f, 0xf110, 0xf111, 0xf112,
0xf113, 0xf11e, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
0xf200, 0xf100, 0xf101, 0xf211, 0xf103, 0xf104, 0xf105, 0xf20b,
0xf20a, 0xf108, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200, 0xf200,
};
ushort *key_maps[MAX_NR_KEYMAPS] = {
plain_map, shift_map, NULL, NULL,
ctrl_map, shift_ctrl_map, NULL,
};
unsigned int keymap_count = 4;
/*
* Philosophy: most people do not define more strings, but they who do
* often want quite a lot of string space. So, we statically allocate
* the default and allocate dynamically in chunks of 512 bytes.
*/
char func_buf[] = {
'\033', '[', '[', 'A', 0,
'\033', '[', '[', 'B', 0,
'\033', '[', '[', 'C', 0,
'\033', '[', '[', 'D', 0,
'\033', '[', '[', 'E', 0,
'\033', '[', '1', '7', '~', 0,
'\033', '[', '1', '8', '~', 0,
'\033', '[', '1', '9', '~', 0,
'\033', '[', '2', '0', '~', 0,
'\033', '[', '2', '1', '~', 0,
'\033', '[', '2', '3', '~', 0,
'\033', '[', '2', '4', '~', 0,
'\033', '[', '2', '5', '~', 0,
'\033', '[', '2', '6', '~', 0,
'\033', '[', '2', '8', '~', 0,
'\033', '[', '2', '9', '~', 0,
'\033', '[', '3', '1', '~', 0,
'\033', '[', '3', '2', '~', 0,
'\033', '[', '3', '3', '~', 0,
'\033', '[', '3', '4', '~', 0,
};
char *funcbufptr = func_buf;
int funcbufsize = sizeof(func_buf);
int funcbufleft = 0; /* space left */
char *func_table[MAX_NR_FUNC] = {
func_buf + 0,
func_buf + 5,
func_buf + 10,
func_buf + 15,
func_buf + 20,
func_buf + 25,
func_buf + 31,
func_buf + 37,
func_buf + 43,
func_buf + 49,
func_buf + 55,
func_buf + 61,
func_buf + 67,
func_buf + 73,
func_buf + 79,
func_buf + 85,
func_buf + 91,
func_buf + 97,
func_buf + 103,
func_buf + 109,
NULL,
};
struct kbdiacruc accent_table[MAX_DIACR] = {
{'^', 'c', 0003}, {'^', 'd', 0004},
{'^', 'z', 0032}, {'^', 0012, 0000},
};
unsigned int accent_table_size = 4;
| gpl-2.0 |
lorenzo-stoakes/linux-historical | arch/arm/mm/cache-l2x0.c | 85 | 49856 | /*
* arch/arm/mm/cache-l2x0.c - L210/L220/L310 cache controller support
*
* Copyright (C) 2007 ARM Limited
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/cpu.h>
#include <linux/err.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/log2.h>
#include <linux/io.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <asm/cacheflush.h>
#include <asm/cp15.h>
#include <asm/cputype.h>
#include <asm/hardware/cache-l2x0.h>
#include "cache-tauros3.h"
#include "cache-aurora-l2.h"
struct l2c_init_data {
const char *type;
unsigned way_size_0;
unsigned num_lock;
void (*of_parse)(const struct device_node *, u32 *, u32 *);
void (*enable)(void __iomem *, unsigned);
void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
void (*save)(void __iomem *);
void (*configure)(void __iomem *);
void (*unlock)(void __iomem *, unsigned);
struct outer_cache_fns outer_cache;
};
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static const struct l2c_init_data *l2x0_data;
static DEFINE_RAW_SPINLOCK(l2x0_lock);
static u32 l2x0_way_mask; /* Bitmask of active ways */
static u32 l2x0_size;
static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
struct l2x0_regs l2x0_saved_regs;
/*
* Common code for all cache controllers.
*/
static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
{
/* wait for cache operation by line or way to complete */
while (readl_relaxed(reg) & mask)
cpu_relax();
}
/*
* By default, we write directly to secure registers. Platforms must
* override this if they are running non-secure.
*/
static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
{
if (val == readl_relaxed(base + reg))
return;
if (outer_cache.write_sec)
outer_cache.write_sec(val, reg);
else
writel_relaxed(val, base + reg);
}
/*
* This should only be called when we have a requirement that the
* register be written due to a work-around, as platforms running
* in non-secure mode may not be able to access this register.
*/
static inline void l2c_set_debug(void __iomem *base, unsigned long val)
{
l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
}
static void __l2c_op_way(void __iomem *reg)
{
writel_relaxed(l2x0_way_mask, reg);
l2c_wait_mask(reg, l2x0_way_mask);
}
static inline void l2c_unlock(void __iomem *base, unsigned num)
{
unsigned i;
for (i = 0; i < num; i++) {
writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
i * L2X0_LOCKDOWN_STRIDE);
writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
i * L2X0_LOCKDOWN_STRIDE);
}
}
static void l2c_configure(void __iomem *base)
{
l2c_write_sec(l2x0_saved_regs.aux_ctrl, base, L2X0_AUX_CTRL);
}
/*
* Enable the L2 cache controller. This function must only be
* called when the cache controller is known to be disabled.
*/
static void l2c_enable(void __iomem *base, unsigned num_lock)
{
unsigned long flags;
if (outer_cache.configure)
outer_cache.configure(&l2x0_saved_regs);
else
l2x0_data->configure(base);
l2x0_data->unlock(base, num_lock);
local_irq_save(flags);
__l2c_op_way(base + L2X0_INV_WAY);
writel_relaxed(0, base + sync_reg_offset);
l2c_wait_mask(base + sync_reg_offset, 1);
local_irq_restore(flags);
l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
}
static void l2c_disable(void)
{
void __iomem *base = l2x0_base;
outer_cache.flush_all();
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
}
static void l2c_save(void __iomem *base)
{
l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
}
static void l2c_resume(void)
{
void __iomem *base = l2x0_base;
/* Do not touch the controller if already enabled. */
if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
l2c_enable(base, l2x0_data->num_lock);
}
/*
* L2C-210 specific code.
*
* The L2C-2x0 PA, set/way and sync operations are atomic, but we must
* ensure that no background operation is running. The way operations
* are all background tasks.
*
* While a background operation is in progress, any new operation is
* ignored (unspecified whether this causes an error.) Thankfully, not
* used on SMP.
*
* Never has a different sync register other than L2X0_CACHE_SYNC, but
* we use sync_reg_offset here so we can share some of this with L2C-310.
*/
static void __l2c210_cache_sync(void __iomem *base)
{
writel_relaxed(0, base + sync_reg_offset);
}
static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
unsigned long end)
{
while (start < end) {
writel_relaxed(start, reg);
start += CACHE_LINE_SIZE;
}
}
static void l2c210_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
}
__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
start &= ~(CACHE_LINE_SIZE - 1);
__l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
start &= ~(CACHE_LINE_SIZE - 1);
__l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c210_flush_all(void)
{
void __iomem *base = l2x0_base;
BUG_ON(!irqs_disabled());
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
__l2c210_cache_sync(base);
}
static void l2c210_sync(void)
{
__l2c210_cache_sync(l2x0_base);
}
static const struct l2c_init_data l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
.num_lock = 1,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c_resume,
},
};
/*
* L2C-220 specific code.
*
* All operations are background operations: they have to be waited for.
* Conflicting requests generate a slave error (which will cause an
* imprecise abort.) Never uses sync_reg_offset, so we hard-code the
* sync register here.
*
* However, we can re-use the l2c210_resume call.
*/
static inline void __l2c220_cache_sync(void __iomem *base)
{
writel_relaxed(0, base + L2X0_CACHE_SYNC);
l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
}
static void l2c220_op_way(void __iomem *base, unsigned reg)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + reg);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
unsigned long end, unsigned long flags)
{
raw_spinlock_t *lock = &l2x0_lock;
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
while (start < blk_end) {
l2c_wait_mask(reg, 1);
writel_relaxed(start, reg);
start += CACHE_LINE_SIZE;
}
if (blk_end < end) {
raw_spin_unlock_irqrestore(lock, flags);
raw_spin_lock_irqsave(lock, flags);
}
}
return flags;
}
static void l2c220_inv_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
if ((start | end) & (CACHE_LINE_SIZE - 1)) {
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
}
}
flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_clean_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
start &= ~(CACHE_LINE_SIZE - 1);
if ((end - start) >= l2x0_size) {
l2c220_op_way(base, L2X0_CLEAN_WAY);
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_flush_range(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
unsigned long flags;
start &= ~(CACHE_LINE_SIZE - 1);
if ((end - start) >= l2x0_size) {
l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
return;
}
raw_spin_lock_irqsave(&l2x0_lock, flags);
flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
start, end, flags);
l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
__l2c220_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_flush_all(void)
{
l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
}
static void l2c220_sync(void)
{
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c220_cache_sync(l2x0_base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void l2c220_enable(void __iomem *base, unsigned num_lock)
{
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
l2x0_saved_regs.aux_ctrl |= L220_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, num_lock);
}
static void l2c220_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L220_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c220_data = {
.type = "L2C-220",
.way_size_0 = SZ_8K,
.num_lock = 1,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
.flush_range = l2c220_flush_range,
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c_resume,
},
};
/*
* L2C-310 specific code.
*
* Very similar to L2C-210, the PA, set/way and sync operations are atomic,
* and the way operations are all background tasks. However, issuing an
* operation while a background operation is in progress results in a
* SLVERR response. We can reuse:
*
* __l2c210_cache_sync (using sync_reg_offset)
* l2c210_sync
* l2c210_inv_range (if 588369 is not applicable)
* l2c210_clean_range
* l2c210_flush_range (if 588369 is not applicable)
* l2c210_flush_all (if 727915 is not applicable)
*
* Errata:
* 588369: PL310 R0P0->R1P0, fixed R2P0.
* Affects: all clean+invalidate operations
* clean and invalidate skips the invalidate step, so we need to issue
* separate operations. We also require the above debug workaround
* enclosing this code fragment on affected parts. On unaffected parts,
* we must not use this workaround without the debug register writes
* to avoid exposing a problem similar to 727915.
*
* 727915: PL310 R2P0->R3P0, fixed R3P1.
* Affects: clean+invalidate by way
* clean and invalidate by way runs in the background, and a store can
* hit the line between the clean operation and invalidate operation,
* resulting in the store being lost.
*
* 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
* Affects: 8x64-bit (double fill) line fetches
* double fill line fetches can fail to cause dirty data to be evicted
* from the cache before the new data overwrites the second line.
*
* 753970: PL310 R3P0, fixed R3P1.
* Affects: sync
* prevents merging writes after the sync operation, until another L2C
* operation is performed (or a number of other conditions.)
*
* 769419: PL310 R0P0->R3P1, fixed R3P2.
* Affects: store buffer
* store buffer is not automatically drained.
*/
static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
{
void __iomem *base = l2x0_base;
if ((start | end) & (CACHE_LINE_SIZE - 1)) {
unsigned long flags;
/* Erratum 588369 for both clean+invalidate operations */
raw_spin_lock_irqsave(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(end, base + L2X0_INV_LINE_PA);
}
l2c_set_debug(base, 0x00);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
__l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
__l2c210_cache_sync(base);
}
static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
{
raw_spinlock_t *lock = &l2x0_lock;
unsigned long flags;
void __iomem *base = l2x0_base;
raw_spin_lock_irqsave(lock, flags);
while (start < end) {
unsigned long blk_end = start + min(end - start, 4096UL);
l2c_set_debug(base, 0x03);
while (start < blk_end) {
writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
writel_relaxed(start, base + L2X0_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
l2c_set_debug(base, 0x00);
if (blk_end < end) {
raw_spin_unlock_irqrestore(lock, flags);
raw_spin_lock_irqsave(lock, flags);
}
}
raw_spin_unlock_irqrestore(lock, flags);
__l2c210_cache_sync(base);
}
static void l2c310_flush_all_erratum(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
l2c_set_debug(base, 0x03);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
l2c_set_debug(base, 0x00);
__l2c210_cache_sync(base);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void __init l2c310_save(void __iomem *base)
{
unsigned revision;
l2c_save(base);
l2x0_saved_regs.tag_latency = readl_relaxed(base +
L310_TAG_LATENCY_CTRL);
l2x0_saved_regs.data_latency = readl_relaxed(base +
L310_DATA_LATENCY_CTRL);
l2x0_saved_regs.filter_end = readl_relaxed(base +
L310_ADDR_FILTER_END);
l2x0_saved_regs.filter_start = readl_relaxed(base +
L310_ADDR_FILTER_START);
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
/* From r2p0, there is Prefetch offset/control register */
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
L310_PREFETCH_CTRL);
/* From r3p0, there is Power control register */
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
L310_POWER_CTRL);
}
static void l2c310_configure(void __iomem *base)
{
unsigned revision;
l2c_configure(base);
/* restore pl310 setup */
l2c_write_sec(l2x0_saved_regs.tag_latency, base,
L310_TAG_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.data_latency, base,
L310_DATA_LATENCY_CTRL);
l2c_write_sec(l2x0_saved_regs.filter_end, base,
L310_ADDR_FILTER_END);
l2c_write_sec(l2x0_saved_regs.filter_start, base,
L310_ADDR_FILTER_START);
revision = readl_relaxed(base + L2X0_CACHE_ID) &
L2X0_CACHE_ID_RTL_MASK;
if (revision >= L310_CACHE_ID_RTL_R2P0)
l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
L310_PREFETCH_CTRL);
if (revision >= L310_CACHE_ID_RTL_R3P0)
l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
L310_POWER_CTRL);
}
static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, void *data)
{
switch (act & ~CPU_TASKS_FROZEN) {
case CPU_STARTING:
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
break;
case CPU_DYING:
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
break;
}
return NOTIFY_OK;
}
static void __init l2c310_enable(void __iomem *base, unsigned num_lock)
{
unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
bool cortex_a9 = read_cpuid_part() == ARM_CPU_PART_CORTEX_A9;
u32 aux = l2x0_saved_regs.aux_ctrl;
if (rev >= L310_CACHE_ID_RTL_R2P0) {
if (cortex_a9) {
aux |= L310_AUX_CTRL_EARLY_BRESP;
pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
} else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
aux &= ~L310_AUX_CTRL_EARLY_BRESP;
}
}
if (cortex_a9) {
u32 aux_cur = readl_relaxed(base + L2X0_AUX_CTRL);
u32 acr = get_auxcr();
pr_debug("Cortex-A9 ACR=0x%08x\n", acr);
if (acr & BIT(3) && !(aux_cur & L310_AUX_CTRL_FULL_LINE_ZERO))
pr_err("L2C-310: full line of zeros enabled in Cortex-A9 but not L2C-310 - invalid\n");
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO && !(acr & BIT(3)))
pr_err("L2C-310: enabling full line of zeros but not enabled in Cortex-A9\n");
if (!(aux & L310_AUX_CTRL_FULL_LINE_ZERO) && !outer_cache.write_sec) {
aux |= L310_AUX_CTRL_FULL_LINE_ZERO;
pr_info("L2C-310 full line of zeros enabled for Cortex-A9\n");
}
} else if (aux & (L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP)) {
pr_err("L2C-310: disabling Cortex-A9 specific feature bits\n");
aux &= ~(L310_AUX_CTRL_FULL_LINE_ZERO | L310_AUX_CTRL_EARLY_BRESP);
}
/* r3p0 or later has power control register */
if (rev >= L310_CACHE_ID_RTL_R3P0)
l2x0_saved_regs.pwr_ctrl = L310_DYNAMIC_CLK_GATING_EN |
L310_STNDBY_MODE_EN;
/*
* Always enable non-secure access to the lockdown registers -
* we write to them as part of the L2C enable sequence so they
* need to be accessible.
*/
l2x0_saved_regs.aux_ctrl = aux | L310_AUX_CTRL_NS_LOCKDOWN;
l2c_enable(base, num_lock);
/* Read back resulting AUX_CTRL value as it could have been altered. */
aux = readl_relaxed(base + L2X0_AUX_CTRL);
if (aux & (L310_AUX_CTRL_DATA_PREFETCH | L310_AUX_CTRL_INSTR_PREFETCH)) {
u32 prefetch = readl_relaxed(base + L310_PREFETCH_CTRL);
pr_info("L2C-310 %s%s prefetch enabled, offset %u lines\n",
aux & L310_AUX_CTRL_INSTR_PREFETCH ? "I" : "",
aux & L310_AUX_CTRL_DATA_PREFETCH ? "D" : "",
1 + (prefetch & L310_PREFETCH_CTRL_OFFSET_MASK));
}
/* r3p0 or later has power control register */
if (rev >= L310_CACHE_ID_RTL_R3P0) {
u32 power_ctrl;
power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
}
if (aux & L310_AUX_CTRL_FULL_LINE_ZERO) {
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
cpu_notifier(l2c310_cpu_enable_flz, 0);
}
}
static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
struct outer_cache_fns *fns)
{
unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
const char *errata[8];
unsigned n = 0;
if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
revision < L310_CACHE_ID_RTL_R2P0 &&
/* For bcm compatibility */
fns->inv_range == l2c210_inv_range) {
fns->inv_range = l2c310_inv_range_erratum;
fns->flush_range = l2c310_flush_range_erratum;
errata[n++] = "588369";
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
revision >= L310_CACHE_ID_RTL_R2P0 &&
revision < L310_CACHE_ID_RTL_R3P1) {
fns->flush_all = l2c310_flush_all_erratum;
errata[n++] = "727915";
}
if (revision >= L310_CACHE_ID_RTL_R3P0 &&
revision < L310_CACHE_ID_RTL_R3P2) {
u32 val = l2x0_saved_regs.prefetch_ctrl;
/* I don't think bit23 is required here... but iMX6 does so */
if (val & (BIT(30) | BIT(23))) {
val &= ~(BIT(30) | BIT(23));
l2x0_saved_regs.prefetch_ctrl = val;
errata[n++] = "752271";
}
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
revision == L310_CACHE_ID_RTL_R3P0) {
sync_reg_offset = L2X0_DUMMY_REG;
errata[n++] = "753970";
}
if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
errata[n++] = "769419";
if (n) {
unsigned i;
pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
for (i = 0; i < n; i++)
pr_cont(" %s", errata[i]);
pr_cont(" enabled\n");
}
}
static void l2c310_disable(void)
{
/*
* If full-line-of-zeros is enabled, we must first disable it in the
* Cortex-A9 auxiliary control register before disabling the L2 cache.
*/
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() & ~(BIT(3) | BIT(2) | BIT(1)));
l2c_disable();
}
static void l2c310_resume(void)
{
l2c_resume();
/* Re-enable full-line-of-zeros for Cortex-A9 */
if (l2x0_saved_regs.aux_ctrl & L310_AUX_CTRL_FULL_LINE_ZERO)
set_auxcr(get_auxcr() | BIT(3) | BIT(2) | BIT(1));
}
static void l2c310_unlock(void __iomem *base, unsigned num_lock)
{
if (readl_relaxed(base + L2X0_AUX_CTRL) & L310_AUX_CTRL_NS_LOCKDOWN)
l2c_unlock(base, num_lock);
}
static const struct l2c_init_data l2c310_init_fns __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
static int __init __l2c_init(const struct l2c_init_data *data,
u32 aux_val, u32 aux_mask, u32 cache_id, bool nosync)
{
struct outer_cache_fns fns;
unsigned way_size_bits, ways;
u32 aux, old_aux;
/*
* Save the pointer globally so that callbacks which do not receive
* context from callers can access the structure.
*/
l2x0_data = kmemdup(data, sizeof(*data), GFP_KERNEL);
if (!l2x0_data)
return -ENOMEM;
/*
* Sanity check the aux values. aux_mask is the bits we preserve
* from reading the hardware register, and aux_val is the bits we
* set.
*/
if (aux_val & aux_mask)
pr_alert("L2C: platform provided aux values permit register corruption.\n");
old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
if (old_aux != aux)
pr_warn("L2C: DT/platform modifies aux control register: 0x%08x -> 0x%08x\n",
old_aux, aux);
/* Determine the number of ways */
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
case L2X0_CACHE_ID_PART_L310:
if ((aux_val | ~aux_mask) & (L2C_AUX_CTRL_WAY_SIZE_MASK | L310_AUX_CTRL_ASSOCIATIVITY_16))
pr_warn("L2C: DT/platform tries to modify or specify cache size\n");
if (aux & (1 << 16))
ways = 16;
else
ways = 8;
break;
case L2X0_CACHE_ID_PART_L210:
case L2X0_CACHE_ID_PART_L220:
ways = (aux >> 13) & 0xf;
break;
case AURORA_CACHE_ID:
ways = (aux >> 13) & 0xf;
ways = 2 << ((ways + 1) >> 2);
break;
default:
/* Assume unknown chips have 8 ways */
ways = 8;
break;
}
l2x0_way_mask = (1 << ways) - 1;
/*
* way_size_0 is the size that a way_size value of zero would be
* given the calculation: way_size = way_size_0 << way_size_bits.
* So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
* then way_size_0 would be 8k.
*
* L2 cache size = number of ways * way size.
*/
way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
L2C_AUX_CTRL_WAY_SIZE_SHIFT;
l2x0_size = ways * (data->way_size_0 << way_size_bits);
fns = data->outer_cache;
fns.write_sec = outer_cache.write_sec;
fns.configure = outer_cache.configure;
if (data->fixup)
data->fixup(l2x0_base, cache_id, &fns);
if (nosync) {
pr_info("L2C: disabling outer sync\n");
fns.sync = NULL;
}
/*
* Check if l2x0 controller is already enabled. If we are booting
* in non-secure mode accessing the below registers will fault.
*/
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
l2x0_saved_regs.aux_ctrl = aux;
data->enable(l2x0_base, data->num_lock);
}
outer_cache = fns;
/*
* It is strange to save the register state before initialisation,
* but hey, this is what the DT implementations decided to do.
*/
if (data->save)
data->save(l2x0_base);
/* Re-read it in case some bits are reserved. */
aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
pr_info("%s cache controller enabled, %d ways, %d kB\n",
data->type, ways, l2x0_size >> 10);
pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
data->type, cache_id, aux);
return 0;
}
void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
{
const struct l2c_init_data *data;
u32 cache_id;
l2x0_base = base;
cache_id = readl_relaxed(base + L2X0_CACHE_ID);
switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
default:
case L2X0_CACHE_ID_PART_L210:
data = &l2c210_data;
break;
case L2X0_CACHE_ID_PART_L220:
data = &l2c220_data;
break;
case L2X0_CACHE_ID_PART_L310:
data = &l2c310_init_fns;
break;
}
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
__l2c_init(data, aux_val, aux_mask, cache_id, false);
}
#ifdef CONFIG_OF
static int l2_wt_override;
/* Aurora don't have the cache ID register available, so we have to
* pass it though the device tree */
static u32 cache_id_part_number_from_dt;
/**
* l2x0_cache_size_of_parse() - read cache size parameters from DT
* @np: the device tree node for the l2 cache
* @aux_val: pointer to machine-supplied auxilary register value, to
* be augmented by the call (bits to be set to 1)
* @aux_mask: pointer to machine-supplied auxilary register mask, to
* be augmented by the call (bits to be set to 0)
* @associativity: variable to return the calculated associativity in
* @max_way_size: the maximum size in bytes for the cache ways
*/
static int __init l2x0_cache_size_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask,
u32 *associativity,
u32 max_way_size)
{
u32 mask = 0, val = 0;
u32 cache_size = 0, sets = 0;
u32 way_size_bits = 1;
u32 way_size = 0;
u32 block_size = 0;
u32 line_size = 0;
of_property_read_u32(np, "cache-size", &cache_size);
of_property_read_u32(np, "cache-sets", &sets);
of_property_read_u32(np, "cache-block-size", &block_size);
of_property_read_u32(np, "cache-line-size", &line_size);
if (!cache_size || !sets)
return -ENODEV;
/* All these l2 caches have the same line = block size actually */
if (!line_size) {
if (block_size) {
/* If linesize is not given, it is equal to blocksize */
line_size = block_size;
} else {
/* Fall back to known size */
pr_warn("L2C OF: no cache block/line size given: "
"falling back to default size %d bytes\n",
CACHE_LINE_SIZE);
line_size = CACHE_LINE_SIZE;
}
}
if (line_size != CACHE_LINE_SIZE)
pr_warn("L2C OF: DT supplied line size %d bytes does "
"not match hardware line size of %d bytes\n",
line_size,
CACHE_LINE_SIZE);
/*
* Since:
* set size = cache size / sets
* ways = cache size / (sets * line size)
* way size = cache size / (cache size / (sets * line size))
* way size = sets * line size
* associativity = ways = cache size / way size
*/
way_size = sets * line_size;
*associativity = cache_size / way_size;
if (way_size > max_way_size) {
pr_err("L2C OF: set size %dKB is too large\n", way_size);
return -EINVAL;
}
pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
cache_size, cache_size >> 10);
pr_info("L2C OF: override line size: %d bytes\n", line_size);
pr_info("L2C OF: override way size: %d bytes (%dKB)\n",
way_size, way_size >> 10);
pr_info("L2C OF: override associativity: %d\n", *associativity);
/*
* Calculates the bits 17:19 to set for way size:
* 512KB -> 6, 256KB -> 5, ... 16KB -> 1
*/
way_size_bits = ilog2(way_size >> 10) - 3;
if (way_size_bits < 1 || way_size_bits > 6) {
pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
way_size);
return -EINVAL;
}
mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
val |= (way_size_bits << L2C_AUX_CTRL_WAY_SIZE_SHIFT);
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
return 0;
}
static void __init l2x0_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[2] = { 0, 0 };
u32 tag = 0;
u32 dirty = 0;
u32 val = 0, mask = 0;
u32 assoc;
int ret;
of_property_read_u32(np, "arm,tag-latency", &tag);
if (tag) {
mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
}
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1]) {
mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
}
of_property_read_u32(np, "arm,dirty-latency", &dirty);
if (dirty) {
mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
val |= L2C_AUX_CTRL_PARITY_ENABLE;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
if (of_property_read_bool(np, "arm,shared-override")) {
mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
if (ret)
return;
if (assoc > 8) {
pr_err("l2x0 of: cache setting yield too high associativity\n");
pr_err("l2x0 of: %d calculated, max 8\n", assoc);
} else {
mask |= L2X0_AUX_CTRL_ASSOC_MASK;
val |= (assoc << L2X0_AUX_CTRL_ASSOC_SHIFT);
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
}
static const struct l2c_init_data of_l2c210_data __initconst = {
.type = "L2C-210",
.way_size_0 = SZ_8K,
.num_lock = 1,
.of_parse = l2x0_of_parse,
.enable = l2c_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c_disable,
.sync = l2c210_sync,
.resume = l2c_resume,
},
};
static const struct l2c_init_data of_l2c220_data __initconst = {
.type = "L2C-220",
.way_size_0 = SZ_8K,
.num_lock = 1,
.of_parse = l2x0_of_parse,
.enable = l2c220_enable,
.save = l2c_save,
.configure = l2c_configure,
.unlock = l2c220_unlock,
.outer_cache = {
.inv_range = l2c220_inv_range,
.clean_range = l2c220_clean_range,
.flush_range = l2c220_flush_range,
.flush_all = l2c220_flush_all,
.disable = l2c_disable,
.sync = l2c220_sync,
.resume = l2c_resume,
},
};
static void __init l2c310_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 data[3] = { 0, 0, 0 };
u32 tag[3] = { 0, 0, 0 };
u32 filter[2] = { 0, 0 };
u32 assoc;
u32 prefetch;
u32 val;
int ret;
of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
if (tag[0] && tag[1] && tag[2])
l2x0_saved_regs.tag_latency =
L310_LATENCY_CTRL_RD(tag[0] - 1) |
L310_LATENCY_CTRL_WR(tag[1] - 1) |
L310_LATENCY_CTRL_SETUP(tag[2] - 1);
of_property_read_u32_array(np, "arm,data-latency",
data, ARRAY_SIZE(data));
if (data[0] && data[1] && data[2])
l2x0_saved_regs.data_latency =
L310_LATENCY_CTRL_RD(data[0] - 1) |
L310_LATENCY_CTRL_WR(data[1] - 1) |
L310_LATENCY_CTRL_SETUP(data[2] - 1);
of_property_read_u32_array(np, "arm,filter-ranges",
filter, ARRAY_SIZE(filter));
if (filter[1]) {
l2x0_saved_regs.filter_end =
ALIGN(filter[0] + filter[1], SZ_1M);
l2x0_saved_regs.filter_start = (filter[0] & ~(SZ_1M - 1))
| L310_ADDR_FILTER_EN;
}
ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
if (!ret) {
switch (assoc) {
case 16:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_val |= L310_AUX_CTRL_ASSOCIATIVITY_16;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
case 8:
*aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
*aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
break;
default:
pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
assoc);
break;
}
}
if (of_property_read_bool(np, "arm,shared-override")) {
*aux_val |= L2C_AUX_CTRL_SHARED_OVERRIDE;
*aux_mask &= ~L2C_AUX_CTRL_SHARED_OVERRIDE;
}
if (of_property_read_bool(np, "arm,parity-enable")) {
*aux_val |= L2C_AUX_CTRL_PARITY_ENABLE;
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
} else if (of_property_read_bool(np, "arm,parity-disable")) {
*aux_val &= ~L2C_AUX_CTRL_PARITY_ENABLE;
*aux_mask &= ~L2C_AUX_CTRL_PARITY_ENABLE;
}
prefetch = l2x0_saved_regs.prefetch_ctrl;
ret = of_property_read_u32(np, "arm,double-linefill", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-incr", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_INCR;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-incr property value is missing\n");
}
ret = of_property_read_u32(np, "arm,double-linefill-wrap", &val);
if (ret == 0) {
if (!val)
prefetch |= L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
else
prefetch &= ~L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,double-linefill-wrap property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-drop", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_PREFETCH_DROP;
else
prefetch &= ~L310_PREFETCH_CTRL_PREFETCH_DROP;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-drop property value is missing\n");
}
ret = of_property_read_u32(np, "arm,prefetch-offset", &val);
if (ret == 0) {
prefetch &= ~L310_PREFETCH_CTRL_OFFSET_MASK;
prefetch |= val & L310_PREFETCH_CTRL_OFFSET_MASK;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF arm,prefetch-offset property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-data", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_DATA_PREFETCH;
else
prefetch &= ~L310_PREFETCH_CTRL_DATA_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-data property value is missing\n");
}
ret = of_property_read_u32(np, "prefetch-instr", &val);
if (ret == 0) {
if (val)
prefetch |= L310_PREFETCH_CTRL_INSTR_PREFETCH;
else
prefetch &= ~L310_PREFETCH_CTRL_INSTR_PREFETCH;
} else if (ret != -EINVAL) {
pr_err("L2C-310 OF prefetch-instr property value is missing\n");
}
l2x0_saved_regs.prefetch_ctrl = prefetch;
}
static const struct l2c_init_data of_l2c310_data __initconst = {
.type = "L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
/*
* This is a variant of the of_l2c310_data with .sync set to
* NULL. Outer sync operations are not needed when the system is I/O
* coherent, and potentially harmful in certain situations (PCIe/PL310
* deadlock on Armada 375/38x due to hardware I/O coherency). The
* other operations are kept because they are infrequent (therefore do
* not cause the deadlock in practice) and needed for secondary CPU
* boot and other power management activities.
*/
static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
.type = "L2C-310 Coherent",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.fixup = l2c310_fixup,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = l2c210_inv_range,
.clean_range = l2c210_clean_range,
.flush_range = l2c210_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.resume = l2c310_resume,
},
};
/*
* Note that the end addresses passed to Linux primitives are
* noninclusive, while the hardware cache range operations use
* inclusive start and end addresses.
*/
static unsigned long aurora_range_end(unsigned long start, unsigned long end)
{
/*
* Limit the number of cache lines processed at once,
* since cache range operations stall the CPU pipeline
* until completion.
*/
if (end > start + MAX_RANGE_SIZE)
end = start + MAX_RANGE_SIZE;
/*
* Cache range operations can't straddle a page boundary.
*/
if (end > PAGE_ALIGN(start+1))
end = PAGE_ALIGN(start+1);
return end;
}
static void aurora_pa_range(unsigned long start, unsigned long end,
unsigned long offset)
{
void __iomem *base = l2x0_base;
unsigned long range_end;
unsigned long flags;
/*
* round start and end adresses up to cache line size
*/
start &= ~(CACHE_LINE_SIZE - 1);
end = ALIGN(end, CACHE_LINE_SIZE);
/*
* perform operation on all full cache lines between 'start' and 'end'
*/
while (start < end) {
range_end = aurora_range_end(start, end);
raw_spin_lock_irqsave(&l2x0_lock, flags);
writel_relaxed(start, base + AURORA_RANGE_BASE_ADDR_REG);
writel_relaxed(range_end - CACHE_LINE_SIZE, base + offset);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
start = range_end;
}
}
static void aurora_inv_range(unsigned long start, unsigned long end)
{
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
}
static void aurora_clean_range(unsigned long start, unsigned long end)
{
/*
* If L2 is forced to WT, the L2 will always be clean and we
* don't need to do anything here.
*/
if (!l2_wt_override)
aurora_pa_range(start, end, AURORA_CLEAN_RANGE_REG);
}
static void aurora_flush_range(unsigned long start, unsigned long end)
{
if (l2_wt_override)
aurora_pa_range(start, end, AURORA_INVAL_RANGE_REG);
else
aurora_pa_range(start, end, AURORA_FLUSH_RANGE_REG);
}
static void aurora_flush_all(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
/* clean all ways */
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
writel_relaxed(0, base + AURORA_SYNC_REG);
}
static void aurora_cache_sync(void)
{
writel_relaxed(0, l2x0_base + AURORA_SYNC_REG);
}
static void aurora_disable(void)
{
void __iomem *base = l2x0_base;
unsigned long flags;
raw_spin_lock_irqsave(&l2x0_lock, flags);
__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
writel_relaxed(0, base + AURORA_SYNC_REG);
l2c_write_sec(0, base, L2X0_CTRL);
dsb(st);
raw_spin_unlock_irqrestore(&l2x0_lock, flags);
}
static void aurora_save(void __iomem *base)
{
l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
}
/*
* For Aurora cache in no outer mode, enable via the CP15 coprocessor
* broadcasting of cache commands to L2.
*/
static void __init aurora_enable_no_outer(void __iomem *base,
unsigned num_lock)
{
u32 u;
asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
u |= AURORA_CTRL_FW; /* Set the FW bit */
asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
isb();
l2c_enable(base, num_lock);
}
static void __init aurora_fixup(void __iomem *base, u32 cache_id,
struct outer_cache_fns *fns)
{
sync_reg_offset = AURORA_SYNC_REG;
}
static void __init aurora_of_parse(const struct device_node *np,
u32 *aux_val, u32 *aux_mask)
{
u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
u32 mask = AURORA_ACR_REPLACEMENT_MASK;
of_property_read_u32(np, "cache-id-part",
&cache_id_part_number_from_dt);
/* Determine and save the write policy */
l2_wt_override = of_property_read_bool(np, "wt-override");
if (l2_wt_override) {
val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
}
*aux_val &= ~mask;
*aux_val |= val;
*aux_mask &= ~mask;
}
static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
.type = "Aurora",
.way_size_0 = SZ_4K,
.num_lock = 4,
.of_parse = aurora_of_parse,
.enable = l2c_enable,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.inv_range = aurora_inv_range,
.clean_range = aurora_clean_range,
.flush_range = aurora_flush_range,
.flush_all = aurora_flush_all,
.disable = aurora_disable,
.sync = aurora_cache_sync,
.resume = l2c_resume,
},
};
static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
.type = "Aurora",
.way_size_0 = SZ_4K,
.num_lock = 4,
.of_parse = aurora_of_parse,
.enable = aurora_enable_no_outer,
.fixup = aurora_fixup,
.save = aurora_save,
.configure = l2c_configure,
.unlock = l2c_unlock,
.outer_cache = {
.resume = l2c_resume,
},
};
/*
* For certain Broadcom SoCs, depending on the address range, different offsets
* need to be added to the address before passing it to L2 for
* invalidation/clean/flush
*
* Section Address Range Offset EMI
* 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
* 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
* 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
*
* When the start and end addresses have crossed two different sections, we
* need to break the L2 operation into two, each within its own section.
* For example, if we need to invalidate addresses starts at 0xBFFF0000 and
* ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
* 0xC0000000 - 0xC0001000
*
* Note 1:
* By breaking a single L2 operation into two, we may potentially suffer some
* performance hit, but keep in mind the cross section case is very rare
*
* Note 2:
* We do not need to handle the case when the start address is in
* Section 1 and the end address is in Section 3, since it is not a valid use
* case
*
* Note 3:
* Section 1 in practical terms can no longer be used on rev A2. Because of
* that the code does not need to handle section 1 at all.
*
*/
#define BCM_SYS_EMI_START_ADDR 0x40000000UL
#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
#define BCM_SYS_EMI_OFFSET 0x40000000UL
#define BCM_VC_EMI_OFFSET 0x80000000UL
static inline int bcm_addr_is_sys_emi(unsigned long addr)
{
return (addr >= BCM_SYS_EMI_START_ADDR) &&
(addr < BCM_VC_EMI_SEC3_START_ADDR);
}
static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
{
if (bcm_addr_is_sys_emi(addr))
return addr + BCM_SYS_EMI_OFFSET;
else
return addr + BCM_VC_EMI_OFFSET;
}
static void bcm_inv_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_inv_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_inv_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
static void bcm_clean_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_clean_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_clean_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
static void bcm_flush_range(unsigned long start, unsigned long end)
{
unsigned long new_start, new_end;
BUG_ON(start < BCM_SYS_EMI_START_ADDR);
if (unlikely(end <= start))
return;
if ((end - start) >= l2x0_size) {
outer_cache.flush_all();
return;
}
new_start = bcm_l2_phys_addr(start);
new_end = bcm_l2_phys_addr(end);
/* normal case, no cross section between start and end */
if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
l2c210_flush_range(new_start, new_end);
return;
}
/* They cross sections, so it can only be a cross from section
* 2 to section 3
*/
l2c210_flush_range(new_start,
bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
new_end);
}
/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
.type = "BCM-L2C-310",
.way_size_0 = SZ_8K,
.num_lock = 8,
.of_parse = l2c310_of_parse,
.enable = l2c310_enable,
.save = l2c310_save,
.configure = l2c310_configure,
.unlock = l2c310_unlock,
.outer_cache = {
.inv_range = bcm_inv_range,
.clean_range = bcm_clean_range,
.flush_range = bcm_flush_range,
.flush_all = l2c210_flush_all,
.disable = l2c310_disable,
.sync = l2c210_sync,
.resume = l2c310_resume,
},
};
static void __init tauros3_save(void __iomem *base)
{
l2c_save(base);
l2x0_saved_regs.aux2_ctrl =
readl_relaxed(base + TAUROS3_AUX2_CTRL);
l2x0_saved_regs.prefetch_ctrl =
readl_relaxed(base + L310_PREFETCH_CTRL);
}
static void tauros3_configure(void __iomem *base)
{
l2c_configure(base);
writel_relaxed(l2x0_saved_regs.aux2_ctrl,
base + TAUROS3_AUX2_CTRL);
writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
base + L310_PREFETCH_CTRL);
}
static const struct l2c_init_data of_tauros3_data __initconst = {
.type = "Tauros3",
.way_size_0 = SZ_8K,
.num_lock = 8,
.enable = l2c_enable,
.save = tauros3_save,
.configure = tauros3_configure,
.unlock = l2c_unlock,
/* Tauros3 broadcasts L1 cache operations to L2 */
.outer_cache = {
.resume = l2c_resume,
},
};
#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
static const struct of_device_id l2x0_ids[] __initconst = {
L2C_ID("arm,l210-cache", of_l2c210_data),
L2C_ID("arm,l220-cache", of_l2c220_data),
L2C_ID("arm,pl310-cache", of_l2c310_data),
L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
L2C_ID("marvell,tauros3-cache", of_tauros3_data),
/* Deprecated IDs */
L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
{}
};
int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
{
const struct l2c_init_data *data;
struct device_node *np;
struct resource res;
u32 cache_id, old_aux;
u32 cache_level = 2;
bool nosync = false;
np = of_find_matching_node(NULL, l2x0_ids);
if (!np)
return -ENODEV;
if (of_address_to_resource(np, 0, &res))
return -ENODEV;
l2x0_base = ioremap(res.start, resource_size(&res));
if (!l2x0_base)
return -ENOMEM;
l2x0_saved_regs.phy_base = res.start;
data = of_match_node(l2x0_ids, np)->data;
if (of_device_is_compatible(np, "arm,pl310-cache") &&
of_property_read_bool(np, "arm,io-coherent"))
data = &of_l2c310_coherent_data;
old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
if (old_aux != ((old_aux & aux_mask) | aux_val)) {
pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
old_aux, (old_aux & aux_mask) | aux_val);
} else if (aux_mask != ~0U && aux_val != 0) {
pr_alert("L2C: platform provided aux values match the hardware, so have no effect. Please remove them.\n");
}
/* All L2 caches are unified, so this property should be specified */
if (!of_property_read_bool(np, "cache-unified"))
pr_err("L2C: device tree omits to specify unified cache\n");
if (of_property_read_u32(np, "cache-level", &cache_level))
pr_err("L2C: device tree omits to specify cache-level\n");
if (cache_level != 2)
pr_err("L2C: device tree specifies invalid cache level\n");
nosync = of_property_read_bool(np, "arm,outer-sync-disable");
/* Read back current (default) hardware configuration */
if (data->save)
data->save(l2x0_base);
/* L2 configuration can only be changed if the cache is disabled */
if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
if (data->of_parse)
data->of_parse(np, &aux_val, &aux_mask);
if (cache_id_part_number_from_dt)
cache_id = cache_id_part_number_from_dt;
else
cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
return __l2c_init(data, aux_val, aux_mask, cache_id, nosync);
}
#endif
| gpl-2.0 |
narantech/linux-pc64 | drivers/staging/comedi/drivers/addi_apci_3501.c | 85 | 11572 | /*
* addi_apci_3501.c
* Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module.
* Project manager: Eric Stolz
*
* ADDI-DATA GmbH
* Dieselstrasse 3
* D-77833 Ottersweier
* Tel: +19(0)7223/9493-0
* Fax: +49(0)7223/9493-92
* http://www.addi-data.com
* info@addi-data.com
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/interrupt.h>
#include <linux/sched.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
/*
* PCI bar 1 register I/O map
*/
#define APCI3501_AO_CTRL_STATUS_REG 0x00
#define APCI3501_AO_CTRL_BIPOLAR (1 << 0)
#define APCI3501_AO_STATUS_READY (1 << 8)
#define APCI3501_AO_DATA_REG 0x04
#define APCI3501_AO_DATA_CHAN(x) ((x) << 0)
#define APCI3501_AO_DATA_VAL(x) ((x) << 8)
#define APCI3501_AO_DATA_BIPOLAR (1 << 31)
#define APCI3501_AO_TRIG_SCS_REG 0x08
#define APCI3501_TIMER_SYNC_REG 0x20
#define APCI3501_TIMER_RELOAD_REG 0x24
#define APCI3501_TIMER_TIMEBASE_REG 0x28
#define APCI3501_TIMER_CTRL_REG 0x2c
#define APCI3501_TIMER_STATUS_REG 0x30
#define APCI3501_TIMER_IRQ_REG 0x34
#define APCI3501_TIMER_WARN_RELOAD_REG 0x38
#define APCI3501_TIMER_WARN_TIMEBASE_REG 0x3c
#define APCI3501_DO_REG 0x40
#define APCI3501_DI_REG 0x50
/*
* AMCC S5933 NVRAM
*/
#define NVRAM_USER_DATA_START 0x100
#define NVCMD_BEGIN_READ (0x7 << 5)
#define NVCMD_LOAD_LOW (0x4 << 5)
#define NVCMD_LOAD_HIGH (0x5 << 5)
/*
* Function types stored in the eeprom
*/
#define EEPROM_DIGITALINPUT 0
#define EEPROM_DIGITALOUTPUT 1
#define EEPROM_ANALOGINPUT 2
#define EEPROM_ANALOGOUTPUT 3
#define EEPROM_TIMER 4
#define EEPROM_WATCHDOG 5
#define EEPROM_TIMER_WATCHDOG_COUNTER 10
struct apci3501_private {
int i_IobaseAmcc;
struct task_struct *tsk_Current;
unsigned char b_TimerSelectMode;
};
static struct comedi_lrange apci3501_ao_range = {
2, {
BIP_RANGE(10),
UNI_RANGE(10)
}
};
static int apci3501_wait_for_dac(struct comedi_device *dev)
{
unsigned int status;
do {
status = inl(dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
} while (!(status & APCI3501_AO_STATUS_READY));
return 0;
}
static int apci3501_ao_insn_write(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int chan = CR_CHAN(insn->chanspec);
unsigned int range = CR_RANGE(insn->chanspec);
unsigned int val = 0;
int i;
int ret;
/*
* All analog output channels have the same output range.
* 14-bit bipolar: 0-10V
* 13-bit unipolar: +/-10V
* Changing the range of one channel changes all of them!
*/
if (range) {
outl(0, dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
} else {
val |= APCI3501_AO_DATA_BIPOLAR;
outl(APCI3501_AO_CTRL_BIPOLAR,
dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
}
val |= APCI3501_AO_DATA_CHAN(chan);
for (i = 0; i < insn->n; i++) {
if (range == 1) {
if (data[i] > 0x1fff) {
dev_err(dev->class_dev,
"Unipolar resolution is only 13-bits\n");
return -EINVAL;
}
}
ret = apci3501_wait_for_dac(dev);
if (ret)
return ret;
outl(val | APCI3501_AO_DATA_VAL(data[i]),
dev->iobase + APCI3501_AO_DATA_REG);
}
return insn->n;
}
#include "addi-data/hwdrv_apci3501.c"
static int apci3501_di_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
data[1] = inl(dev->iobase + APCI3501_DI_REG) & 0x3;
return insn->n;
}
static int apci3501_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
unsigned int mask = data[0];
unsigned int bits = data[1];
s->state = inl(dev->iobase + APCI3501_DO_REG);
if (mask) {
s->state &= ~mask;
s->state |= (bits & mask);
outl(s->state, dev->iobase + APCI3501_DO_REG);
}
data[1] = s->state;
return insn->n;
}
static void apci3501_eeprom_wait(unsigned long iobase)
{
unsigned char val;
do {
val = inb(iobase + AMCC_OP_REG_MCSR_NVCMD);
} while (val & 0x80);
}
static unsigned short apci3501_eeprom_readw(unsigned long iobase,
unsigned short addr)
{
unsigned short val = 0;
unsigned char tmp;
unsigned char i;
/* Add the offset to the start of the user data */
addr += NVRAM_USER_DATA_START;
for (i = 0; i < 2; i++) {
/* Load the low 8 bit address */
outb(NVCMD_LOAD_LOW, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
outb((addr + i) & 0xff, iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
/* Load the high 8 bit address */
outb(NVCMD_LOAD_HIGH, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
outb(((addr + i) >> 8) & 0xff,
iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
/* Read the eeprom data byte */
outb(NVCMD_BEGIN_READ, iobase + AMCC_OP_REG_MCSR_NVCMD);
apci3501_eeprom_wait(iobase);
tmp = inb(iobase + AMCC_OP_REG_MCSR_NVDATA);
apci3501_eeprom_wait(iobase);
if (i == 0)
val |= tmp;
else
val |= (tmp << 8);
}
return val;
}
static int apci3501_eeprom_get_ao_n_chan(struct comedi_device *dev)
{
struct apci3501_private *devpriv = dev->private;
unsigned long iobase = devpriv->i_IobaseAmcc;
unsigned char nfuncs;
int i;
nfuncs = apci3501_eeprom_readw(iobase, 10) & 0xff;
/* Read functionality details */
for (i = 0; i < nfuncs; i++) {
unsigned short offset = i * 4;
unsigned short addr;
unsigned char func;
unsigned short val;
func = apci3501_eeprom_readw(iobase, 12 + offset) & 0x3f;
addr = apci3501_eeprom_readw(iobase, 14 + offset);
if (func == EEPROM_ANALOGOUTPUT) {
val = apci3501_eeprom_readw(iobase, addr + 10);
return (val >> 4) & 0x3ff;
}
}
return 0;
}
static int apci3501_eeprom_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
struct apci3501_private *devpriv = dev->private;
unsigned short addr = CR_CHAN(insn->chanspec);
data[0] = apci3501_eeprom_readw(devpriv->i_IobaseAmcc, 2 * addr);
return insn->n;
}
static irqreturn_t apci3501_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
struct apci3501_private *devpriv = dev->private;
unsigned int ui_Timer_AOWatchdog;
unsigned long ul_Command1;
int i_temp;
/* Disable Interrupt */
ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = (ul_Command1 & 0xFFFFF9FDul);
outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
ui_Timer_AOWatchdog = inl(dev->iobase + APCI3501_TIMER_IRQ_REG) & 0x1;
if ((!ui_Timer_AOWatchdog)) {
comedi_error(dev, "IRQ from unknown source");
return IRQ_NONE;
}
/* Enable Interrupt Send a signal to from kernel to user space */
send_sig(SIGIO, devpriv->tsk_Current, 0);
ul_Command1 = inl(dev->iobase + APCI3501_TIMER_CTRL_REG);
ul_Command1 = ((ul_Command1 & 0xFFFFF9FDul) | 1 << 1);
outl(ul_Command1, dev->iobase + APCI3501_TIMER_CTRL_REG);
i_temp = inl(dev->iobase + APCI3501_TIMER_STATUS_REG) & 0x1;
return IRQ_HANDLED;
}
static int apci3501_reset(struct comedi_device *dev)
{
unsigned int val;
int chan;
int ret;
/* Reset all digital outputs to "0" */
outl(0x0, dev->iobase + APCI3501_DO_REG);
/* Default all analog outputs to 0V (bipolar) */
outl(APCI3501_AO_CTRL_BIPOLAR,
dev->iobase + APCI3501_AO_CTRL_STATUS_REG);
val = APCI3501_AO_DATA_BIPOLAR | APCI3501_AO_DATA_VAL(0);
/* Set all analog output channels */
for (chan = 0; chan < 8; chan++) {
ret = apci3501_wait_for_dac(dev);
if (ret) {
dev_warn(dev->class_dev,
"%s: DAC not-ready for channel %i\n",
__func__, chan);
} else {
outl(val | APCI3501_AO_DATA_CHAN(chan),
dev->iobase + APCI3501_AO_DATA_REG);
}
}
return 0;
}
static int apci3501_auto_attach(struct comedi_device *dev,
unsigned long context_unused)
{
struct pci_dev *pcidev = comedi_to_pci_dev(dev);
struct apci3501_private *devpriv;
struct comedi_subdevice *s;
int ao_n_chan;
int ret;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
return -ENOMEM;
ret = comedi_pci_enable(dev);
if (ret)
return ret;
dev->iobase = pci_resource_start(pcidev, 1);
devpriv->i_IobaseAmcc = pci_resource_start(pcidev, 0);
ao_n_chan = apci3501_eeprom_get_ao_n_chan(dev);
if (pcidev->irq > 0) {
ret = request_irq(pcidev->irq, apci3501_interrupt, IRQF_SHARED,
dev->board_name, dev);
if (ret == 0)
dev->irq = pcidev->irq;
}
ret = comedi_alloc_subdevices(dev, 5);
if (ret)
return ret;
/* Initialize the analog output subdevice */
s = &dev->subdevices[0];
if (ao_n_chan) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = ao_n_chan;
s->maxdata = 0x3fff;
s->range_table = &apci3501_ao_range;
s->insn_write = apci3501_ao_insn_write;
} else {
s->type = COMEDI_SUBD_UNUSED;
}
/* Initialize the digital input subdevice */
s = &dev->subdevices[1];
s->type = COMEDI_SUBD_DI;
s->subdev_flags = SDF_READABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3501_di_insn_bits;
/* Initialize the digital output subdevice */
s = &dev->subdevices[2];
s->type = COMEDI_SUBD_DO;
s->subdev_flags = SDF_WRITEABLE;
s->n_chan = 2;
s->maxdata = 1;
s->range_table = &range_digital;
s->insn_bits = apci3501_do_insn_bits;
/* Initialize the timer/watchdog subdevice */
s = &dev->subdevices[3];
s->type = COMEDI_SUBD_TIMER;
s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON;
s->n_chan = 1;
s->maxdata = 0;
s->len_chanlist = 1;
s->range_table = &range_digital;
s->insn_write = i_APCI3501_StartStopWriteTimerCounterWatchdog;
s->insn_read = i_APCI3501_ReadTimerCounterWatchdog;
s->insn_config = i_APCI3501_ConfigTimerCounterWatchdog;
/* Initialize the eeprom subdevice */
s = &dev->subdevices[4];
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_INTERNAL;
s->n_chan = 256;
s->maxdata = 0xffff;
s->insn_read = apci3501_eeprom_insn_read;
apci3501_reset(dev);
return 0;
}
static void apci3501_detach(struct comedi_device *dev)
{
if (dev->iobase)
apci3501_reset(dev);
if (dev->irq)
free_irq(dev->irq, dev);
comedi_pci_disable(dev);
}
static struct comedi_driver apci3501_driver = {
.driver_name = "addi_apci_3501",
.module = THIS_MODULE,
.auto_attach = apci3501_auto_attach,
.detach = apci3501_detach,
};
static int apci3501_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci3501_driver, id->driver_data);
}
static DEFINE_PCI_DEVICE_TABLE(apci3501_pci_table) = {
{ PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3001) },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci3501_pci_table);
static struct pci_driver apci3501_pci_driver = {
.name = "addi_apci_3501",
.id_table = apci3501_pci_table,
.probe = apci3501_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3501_driver, apci3501_pci_driver);
MODULE_DESCRIPTION("ADDI-DATA APCI-3501 Analog output board");
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jevinskie/ps3-gcc | libgomp/testsuite/libgomp.oacc-c-c++-common/lib-81.c | 85 | 3952 | /* { dg-do run { target openacc_nvidia_accel_selected } } */
/* { dg-additional-options "-lcuda" } */
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <openacc.h>
#include <cuda.h>
#include "timer.h"
int
main (int argc, char **argv)
{
CUdevice dev;
CUfunction delay;
CUmodule module;
CUresult r;
int N;
int i;
CUstream *streams, stream;
unsigned long *a, *d_a, dticks;
int nbytes;
float atime, dtime;
void *kargs[2];
int clkrate;
int devnum, nprocs;
acc_init (acc_device_nvidia);
devnum = acc_get_device_num (acc_device_nvidia);
r = cuDeviceGet (&dev, devnum);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuDeviceGet failed: %d\n", r);
abort ();
}
r =
cuDeviceGetAttribute (&nprocs, CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT,
dev);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuDeviceGetAttribute failed: %d\n", r);
abort ();
}
r = cuDeviceGetAttribute (&clkrate, CU_DEVICE_ATTRIBUTE_CLOCK_RATE, dev);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuDeviceGetAttribute failed: %d\n", r);
abort ();
}
r = cuModuleLoad (&module, "subr.ptx");
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuModuleLoad failed: %d\n", r);
abort ();
}
r = cuModuleGetFunction (&delay, module, "delay");
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuModuleGetFunction failed: %d\n", r);
abort ();
}
nbytes = nprocs * sizeof (unsigned long);
dtime = 500.0;
dticks = (unsigned long) (dtime * clkrate);
N = nprocs;
a = (unsigned long *) malloc (nbytes);
d_a = (unsigned long *) acc_malloc (nbytes);
acc_map_data (a, d_a, nbytes);
streams = (CUstream *) malloc (N * sizeof (void *));
for (i = 0; i < N; i++)
{
streams[i] = (CUstream) acc_get_cuda_stream (i);
if (streams[i] != NULL)
abort ();
r = cuStreamCreate (&streams[i], CU_STREAM_DEFAULT);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuStreamCreate failed: %d\n", r);
abort ();
}
if (!acc_set_cuda_stream (i, streams[i]))
abort ();
}
init_timers (1);
kargs[0] = (void *) &d_a;
kargs[1] = (void *) &dticks;
stream = (CUstream) acc_get_cuda_stream (N);
if (stream != NULL)
abort ();
r = cuStreamCreate (&stream, CU_STREAM_DEFAULT);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuStreamCreate failed: %d\n", r);
abort ();
}
if (!acc_set_cuda_stream (N, stream))
abort ();
start_timer (0);
for (i = 0; i < N; i++)
{
r = cuLaunchKernel (delay, 1, 1, 1, 1, 1, 1, 0, streams[i], kargs, 0);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuLaunchKernel failed: %d\n", r);
abort ();
}
}
acc_wait_all_async (N);
for (i = 0; i <= N; i++)
{
if (acc_async_test (i) != 0)
abort ();
}
acc_wait (N);
for (i = 0; i <= N; i++)
{
if (acc_async_test (i) != 1)
abort ();
}
atime = stop_timer (0);
if (atime < dtime)
{
fprintf (stderr, "actual time < delay time\n");
abort ();
}
start_timer (0);
stream = (CUstream) acc_get_cuda_stream (N + 1);
if (stream != NULL)
abort ();
r = cuStreamCreate (&stream, CU_STREAM_DEFAULT);
if (r != CUDA_SUCCESS)
{
fprintf (stderr, "cuStreamCreate failed: %d\n", r);
abort ();
}
if (!acc_set_cuda_stream (N + 1, stream))
abort ();
acc_wait_all_async (N + 1);
acc_wait (N + 1);
atime = stop_timer (0);
if (0.10 < atime)
{
fprintf (stderr, "actual time too long\n");
abort ();
}
start_timer (0);
acc_wait_all_async (N);
acc_wait (N);
atime = stop_timer (0);
if (0.10 < atime)
{
fprintf (stderr, "actual time too long\n");
abort ();
}
acc_unmap_data (a);
fini_timers ();
free (streams);
free (a);
acc_free (d_a);
acc_shutdown (acc_device_nvidia);
exit (0);
}
/* { dg-output "" } */
| gpl-2.0 |
temasek/GCC_SaberMod | gcc/testsuite/gcc.dg/tree-ssa/ifc-4.c | 85 | 1308 | /* { dg-do compile } */
/* { dg-options "-c -O2 -ftree-vectorize -fdump-tree-ifcvt-stats" { target *-*-* } } */
struct ht
{
void * (*alloc_subobject) (int);
};
typedef struct cpp_reader cpp_reader;
typedef struct cpp_token cpp_token;
typedef struct cpp_macro cpp_macro;
enum cpp_ttype
{
CPP_PASTE,
};
struct cpp_token {
__extension__ enum cpp_ttype type : 8;
} cpp_comment_table;
struct cpp_macro {
union cpp_macro_u
{
cpp_token * tokens;
} exp;
unsigned int count;
};
struct cpp_reader
{
struct ht *hash_table;
};
create_iso_definition (cpp_reader *pfile, cpp_macro *macro)
{
unsigned int num_extra_tokens = 0;
{
cpp_token *tokns =
(cpp_token *) pfile->hash_table->alloc_subobject (sizeof (cpp_token)
* macro->count);
{
cpp_token *normal_dest = tokns;
cpp_token *extra_dest = tokns + macro->count - num_extra_tokens;
unsigned int i;
for (i = 0; i < macro->count; i++)
{
if (macro->exp.tokens[i].type == CPP_PASTE)
*extra_dest++ = macro->exp.tokens[i];
else
*normal_dest++ = macro->exp.tokens[i];
}
}
}
}
/* This cannot be if-converted because the stores are to aggregate types. */
/* { dg-final { scan-tree-dump-times "Applying if-conversion" 0 "ifcvt" } } */
/* { dg-final { cleanup-tree-dump "ifcvt" } } */
| gpl-2.0 |
mdr78/Linux-3.8.7-galileo | drivers/misc/mic/host/mic_debugfs.c | 853 | 13275 | /*
* Intel MIC Platform Software Stack (MPSS)
*
* Copyright(c) 2013 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution in
* the file called "COPYING".
*
* Intel MIC Host driver.
*
*/
#include <linux/debugfs.h>
#include <linux/pci.h>
#include <linux/seq_file.h>
#include <linux/mic_common.h>
#include "../common/mic_dev.h"
#include "mic_device.h"
#include "mic_smpt.h"
#include "mic_virtio.h"
/* Debugfs parent dir */
static struct dentry *mic_dbg;
/**
* mic_log_buf_show - Display MIC kernel log buffer.
*
* log_buf addr/len is read from System.map by user space
* and populated in sysfs entries.
*/
static int mic_log_buf_show(struct seq_file *s, void *unused)
{
void __iomem *log_buf_va;
int __iomem *log_buf_len_va;
struct mic_device *mdev = s->private;
void *kva;
int size;
unsigned long aper_offset;
if (!mdev || !mdev->log_buf_addr || !mdev->log_buf_len)
goto done;
/*
* Card kernel will never be relocated and any kernel text/data mapping
* can be translated to phys address by subtracting __START_KERNEL_map.
*/
aper_offset = (unsigned long)mdev->log_buf_len - __START_KERNEL_map;
log_buf_len_va = mdev->aper.va + aper_offset;
aper_offset = (unsigned long)mdev->log_buf_addr - __START_KERNEL_map;
log_buf_va = mdev->aper.va + aper_offset;
size = ioread32(log_buf_len_va);
kva = kmalloc(size, GFP_KERNEL);
if (!kva)
goto done;
mutex_lock(&mdev->mic_mutex);
memcpy_fromio(kva, log_buf_va, size);
switch (mdev->state) {
case MIC_ONLINE:
/* Fall through */
case MIC_SHUTTING_DOWN:
seq_write(s, kva, size);
break;
default:
break;
}
mutex_unlock(&mdev->mic_mutex);
kfree(kva);
done:
return 0;
}
static int mic_log_buf_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_log_buf_show, inode->i_private);
}
static int mic_log_buf_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations log_buf_ops = {
.owner = THIS_MODULE,
.open = mic_log_buf_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_log_buf_release
};
static int mic_smpt_show(struct seq_file *s, void *pos)
{
int i;
struct mic_device *mdev = s->private;
unsigned long flags;
seq_printf(s, "MIC %-2d |%-10s| %-14s %-10s\n",
mdev->id, "SMPT entry", "SW DMA addr", "RefCount");
seq_puts(s, "====================================================\n");
if (mdev->smpt) {
struct mic_smpt_info *smpt_info = mdev->smpt;
spin_lock_irqsave(&smpt_info->smpt_lock, flags);
for (i = 0; i < smpt_info->info.num_reg; i++) {
seq_printf(s, "%9s|%-10d| %-#14llx %-10lld\n",
" ", i, smpt_info->entry[i].dma_addr,
smpt_info->entry[i].ref_count);
}
spin_unlock_irqrestore(&smpt_info->smpt_lock, flags);
}
seq_puts(s, "====================================================\n");
return 0;
}
static int mic_smpt_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_smpt_show, inode->i_private);
}
static int mic_smpt_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations smpt_file_ops = {
.owner = THIS_MODULE,
.open = mic_smpt_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_smpt_debug_release
};
static int mic_soft_reset_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
mic_stop(mdev, true);
return 0;
}
static int mic_soft_reset_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_soft_reset_show, inode->i_private);
}
static int mic_soft_reset_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations soft_reset_ops = {
.owner = THIS_MODULE,
.open = mic_soft_reset_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_soft_reset_debug_release
};
static int mic_post_code_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
u32 reg = mdev->ops->get_postcode(mdev);
seq_printf(s, "%c%c", reg & 0xff, (reg >> 8) & 0xff);
return 0;
}
static int mic_post_code_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_post_code_show, inode->i_private);
}
static int mic_post_code_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations post_code_ops = {
.owner = THIS_MODULE,
.open = mic_post_code_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_post_code_debug_release
};
static int mic_dp_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
struct mic_device_desc *d;
struct mic_device_ctrl *dc;
struct mic_vqconfig *vqconfig;
__u32 *features;
__u8 *config;
struct mic_bootparam *bootparam = mdev->dp;
int i, j;
seq_printf(s, "Bootparam: magic 0x%x\n",
bootparam->magic);
seq_printf(s, "Bootparam: h2c_shutdown_db %d\n",
bootparam->h2c_shutdown_db);
seq_printf(s, "Bootparam: h2c_config_db %d\n",
bootparam->h2c_config_db);
seq_printf(s, "Bootparam: c2h_shutdown_db %d\n",
bootparam->c2h_shutdown_db);
seq_printf(s, "Bootparam: shutdown_status %d\n",
bootparam->shutdown_status);
seq_printf(s, "Bootparam: shutdown_card %d\n",
bootparam->shutdown_card);
for (i = sizeof(*bootparam); i < MIC_DP_SIZE;
i += mic_total_desc_size(d)) {
d = mdev->dp + i;
dc = (void *)d + mic_aligned_desc_size(d);
/* end of list */
if (d->type == 0)
break;
if (d->type == -1)
continue;
seq_printf(s, "Type %d ", d->type);
seq_printf(s, "Num VQ %d ", d->num_vq);
seq_printf(s, "Feature Len %d\n", d->feature_len);
seq_printf(s, "Config Len %d ", d->config_len);
seq_printf(s, "Shutdown Status %d\n", d->status);
for (j = 0; j < d->num_vq; j++) {
vqconfig = mic_vq_config(d) + j;
seq_printf(s, "vqconfig[%d]: ", j);
seq_printf(s, "address 0x%llx ", vqconfig->address);
seq_printf(s, "num %d ", vqconfig->num);
seq_printf(s, "used address 0x%llx\n",
vqconfig->used_address);
}
features = (__u32 *)mic_vq_features(d);
seq_printf(s, "Features: Host 0x%x ", features[0]);
seq_printf(s, "Guest 0x%x\n", features[1]);
config = mic_vq_configspace(d);
for (j = 0; j < d->config_len; j++)
seq_printf(s, "config[%d]=%d\n", j, config[j]);
seq_puts(s, "Device control:\n");
seq_printf(s, "Config Change %d ", dc->config_change);
seq_printf(s, "Vdev reset %d\n", dc->vdev_reset);
seq_printf(s, "Guest Ack %d ", dc->guest_ack);
seq_printf(s, "Host ack %d\n", dc->host_ack);
seq_printf(s, "Used address updated %d ",
dc->used_address_updated);
seq_printf(s, "Vdev 0x%llx\n", dc->vdev);
seq_printf(s, "c2h doorbell %d ", dc->c2h_vdev_db);
seq_printf(s, "h2c doorbell %d\n", dc->h2c_vdev_db);
}
return 0;
}
static int mic_dp_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_dp_show, inode->i_private);
}
static int mic_dp_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations dp_ops = {
.owner = THIS_MODULE,
.open = mic_dp_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_dp_debug_release
};
static int mic_vdev_info_show(struct seq_file *s, void *unused)
{
struct mic_device *mdev = s->private;
struct list_head *pos, *tmp;
struct mic_vdev *mvdev;
int i, j;
mutex_lock(&mdev->mic_mutex);
list_for_each_safe(pos, tmp, &mdev->vdev_list) {
mvdev = list_entry(pos, struct mic_vdev, list);
seq_printf(s, "VDEV type %d state %s in %ld out %ld\n",
mvdev->virtio_id,
mic_vdevup(mvdev) ? "UP" : "DOWN",
mvdev->in_bytes,
mvdev->out_bytes);
for (i = 0; i < MIC_MAX_VRINGS; i++) {
struct vring_desc *desc;
struct vring_avail *avail;
struct vring_used *used;
struct mic_vringh *mvr = &mvdev->mvr[i];
struct vringh *vrh = &mvr->vrh;
int num = vrh->vring.num;
if (!num)
continue;
desc = vrh->vring.desc;
seq_printf(s, "vring i %d avail_idx %d",
i, mvr->vring.info->avail_idx & (num - 1));
seq_printf(s, " vring i %d avail_idx %d\n",
i, mvr->vring.info->avail_idx);
seq_printf(s, "vrh i %d weak_barriers %d",
i, vrh->weak_barriers);
seq_printf(s, " last_avail_idx %d last_used_idx %d",
vrh->last_avail_idx, vrh->last_used_idx);
seq_printf(s, " completed %d\n", vrh->completed);
for (j = 0; j < num; j++) {
seq_printf(s, "desc[%d] addr 0x%llx len %d",
j, desc->addr, desc->len);
seq_printf(s, " flags 0x%x next %d\n",
desc->flags, desc->next);
desc++;
}
avail = vrh->vring.avail;
seq_printf(s, "avail flags 0x%x idx %d\n",
avail->flags, avail->idx & (num - 1));
seq_printf(s, "avail flags 0x%x idx %d\n",
avail->flags, avail->idx);
for (j = 0; j < num; j++)
seq_printf(s, "avail ring[%d] %d\n",
j, avail->ring[j]);
used = vrh->vring.used;
seq_printf(s, "used flags 0x%x idx %d\n",
used->flags, used->idx & (num - 1));
seq_printf(s, "used flags 0x%x idx %d\n",
used->flags, used->idx);
for (j = 0; j < num; j++)
seq_printf(s, "used ring[%d] id %d len %d\n",
j, used->ring[j].id,
used->ring[j].len);
}
}
mutex_unlock(&mdev->mic_mutex);
return 0;
}
static int mic_vdev_info_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_vdev_info_show, inode->i_private);
}
static int mic_vdev_info_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations vdev_info_ops = {
.owner = THIS_MODULE,
.open = mic_vdev_info_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_vdev_info_debug_release
};
static int mic_msi_irq_info_show(struct seq_file *s, void *pos)
{
struct mic_device *mdev = s->private;
int reg;
int i, j;
u16 entry;
u16 vector;
struct pci_dev *pdev = container_of(mdev->sdev->parent,
struct pci_dev, dev);
if (pci_dev_msi_enabled(pdev)) {
for (i = 0; i < mdev->irq_info.num_vectors; i++) {
if (pdev->msix_enabled) {
entry = mdev->irq_info.msix_entries[i].entry;
vector = mdev->irq_info.msix_entries[i].vector;
} else {
entry = 0;
vector = pdev->irq;
}
reg = mdev->intr_ops->read_msi_to_src_map(mdev, entry);
seq_printf(s, "%s %-10d %s %-10d MXAR[%d]: %08X\n",
"IRQ:", vector, "Entry:", entry, i, reg);
seq_printf(s, "%-10s", "offset:");
for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
seq_printf(s, "%4d ", j);
seq_puts(s, "\n");
seq_printf(s, "%-10s", "count:");
for (j = (MIC_NUM_OFFSETS - 1); j >= 0; j--)
seq_printf(s, "%4d ",
(mdev->irq_info.mic_msi_map[i] &
BIT(j)) ? 1 : 0);
seq_puts(s, "\n\n");
}
} else {
seq_puts(s, "MSI/MSIx interrupts not enabled\n");
}
return 0;
}
static int mic_msi_irq_info_debug_open(struct inode *inode, struct file *file)
{
return single_open(file, mic_msi_irq_info_show, inode->i_private);
}
static int
mic_msi_irq_info_debug_release(struct inode *inode, struct file *file)
{
return single_release(inode, file);
}
static const struct file_operations msi_irq_info_ops = {
.owner = THIS_MODULE,
.open = mic_msi_irq_info_debug_open,
.read = seq_read,
.llseek = seq_lseek,
.release = mic_msi_irq_info_debug_release
};
/**
* mic_create_debug_dir - Initialize MIC debugfs entries.
*/
void mic_create_debug_dir(struct mic_device *mdev)
{
if (!mic_dbg)
return;
mdev->dbg_dir = debugfs_create_dir(dev_name(mdev->sdev), mic_dbg);
if (!mdev->dbg_dir)
return;
debugfs_create_file("log_buf", 0444, mdev->dbg_dir, mdev, &log_buf_ops);
debugfs_create_file("smpt", 0444, mdev->dbg_dir, mdev, &smpt_file_ops);
debugfs_create_file("soft_reset", 0444, mdev->dbg_dir, mdev,
&soft_reset_ops);
debugfs_create_file("post_code", 0444, mdev->dbg_dir, mdev,
&post_code_ops);
debugfs_create_file("dp", 0444, mdev->dbg_dir, mdev, &dp_ops);
debugfs_create_file("vdev_info", 0444, mdev->dbg_dir, mdev,
&vdev_info_ops);
debugfs_create_file("msi_irq_info", 0444, mdev->dbg_dir, mdev,
&msi_irq_info_ops);
}
/**
* mic_delete_debug_dir - Uninitialize MIC debugfs entries.
*/
void mic_delete_debug_dir(struct mic_device *mdev)
{
if (!mdev->dbg_dir)
return;
debugfs_remove_recursive(mdev->dbg_dir);
}
/**
* mic_init_debugfs - Initialize global debugfs entry.
*/
void __init mic_init_debugfs(void)
{
mic_dbg = debugfs_create_dir(KBUILD_MODNAME, NULL);
if (!mic_dbg)
pr_err("can't create debugfs dir\n");
}
/**
* mic_exit_debugfs - Uninitialize global debugfs entry
*/
void mic_exit_debugfs(void)
{
debugfs_remove(mic_dbg);
}
| gpl-2.0 |
namhyung/linux | drivers/net/ethernet/sun/sunqe.c | 853 | 25726 | /* sunqe.c: Sparc QuadEthernet 10baseT SBUS card driver.
* Once again I am out to prove that every ethernet
* controller out there can be most efficiently programmed
* if you make it look like a LANCE.
*
* Copyright (C) 1996, 1999, 2003, 2006, 2008 David S. Miller (davem@davemloft.net)
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/crc32.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/dma-mapping.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <asm/io.h>
#include <asm/dma.h>
#include <asm/byteorder.h>
#include <asm/idprom.h>
#include <asm/openprom.h>
#include <asm/oplib.h>
#include <asm/auxio.h>
#include <asm/pgtable.h>
#include <asm/irq.h>
#include "sunqe.h"
#define DRV_NAME "sunqe"
#define DRV_VERSION "4.1"
#define DRV_RELDATE "August 27, 2008"
#define DRV_AUTHOR "David S. Miller (davem@davemloft.net)"
static char version[] =
DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n";
MODULE_VERSION(DRV_VERSION);
MODULE_AUTHOR(DRV_AUTHOR);
MODULE_DESCRIPTION("Sun QuadEthernet 10baseT SBUS card driver");
MODULE_LICENSE("GPL");
static struct sunqec *root_qec_dev;
static void qe_set_multicast(struct net_device *dev);
#define QEC_RESET_TRIES 200
static inline int qec_global_reset(void __iomem *gregs)
{
int tries = QEC_RESET_TRIES;
sbus_writel(GLOB_CTRL_RESET, gregs + GLOB_CTRL);
while (--tries) {
u32 tmp = sbus_readl(gregs + GLOB_CTRL);
if (tmp & GLOB_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (tries)
return 0;
printk(KERN_ERR "QuadEther: AIEEE cannot reset the QEC!\n");
return -1;
}
#define MACE_RESET_RETRIES 200
#define QE_RESET_RETRIES 200
static inline int qe_stop(struct sunqe *qep)
{
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
int tries;
/* Reset the MACE, then the QEC channel. */
sbus_writeb(MREGS_BCONFIG_RESET, mregs + MREGS_BCONFIG);
tries = MACE_RESET_RETRIES;
while (--tries) {
u8 tmp = sbus_readb(mregs + MREGS_BCONFIG);
if (tmp & MREGS_BCONFIG_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: AIEEE cannot reset the MACE!\n");
return -1;
}
sbus_writel(CREG_CTRL_RESET, cregs + CREG_CTRL);
tries = QE_RESET_RETRIES;
while (--tries) {
u32 tmp = sbus_readl(cregs + CREG_CTRL);
if (tmp & CREG_CTRL_RESET) {
udelay(20);
continue;
}
break;
}
if (!tries) {
printk(KERN_ERR "QuadEther: Cannot reset QE channel!\n");
return -1;
}
return 0;
}
static void qe_init_rings(struct sunqe *qep)
{
struct qe_init_block *qb = qep->qe_block;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int i;
qep->rx_new = qep->rx_old = qep->tx_new = qep->tx_old = 0;
memset(qb, 0, sizeof(struct qe_init_block));
memset(qbufs, 0, sizeof(struct sunqe_buffers));
for (i = 0; i < RX_RING_SIZE; i++) {
qb->qe_rxd[i].rx_addr = qbufs_dvma + qebuf_offset(rx_buf, i);
qb->qe_rxd[i].rx_flags =
(RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
}
}
static int qe_init(struct sunqe *qep, int from_irq)
{
struct sunqec *qecp = qep->parent;
void __iomem *cregs = qep->qcregs;
void __iomem *mregs = qep->mregs;
void __iomem *gregs = qecp->gregs;
unsigned char *e = &qep->dev->dev_addr[0];
u32 tmp;
int i;
/* Shut it up. */
if (qe_stop(qep))
return -EAGAIN;
/* Setup initial rx/tx init block pointers. */
sbus_writel(qep->qblock_dvma + qib_offset(qe_rxd, 0), cregs + CREG_RXDS);
sbus_writel(qep->qblock_dvma + qib_offset(qe_txd, 0), cregs + CREG_TXDS);
/* Enable/mask the various irq's. */
sbus_writel(0, cregs + CREG_RIMASK);
sbus_writel(1, cregs + CREG_TIMASK);
sbus_writel(0, cregs + CREG_QMASK);
sbus_writel(CREG_MMASK_RXCOLL, cregs + CREG_MMASK);
/* Setup the FIFO pointers into QEC local memory. */
tmp = qep->channel * sbus_readl(gregs + GLOB_MSIZE);
sbus_writel(tmp, cregs + CREG_RXRBUFPTR);
sbus_writel(tmp, cregs + CREG_RXWBUFPTR);
tmp = sbus_readl(cregs + CREG_RXRBUFPTR) +
sbus_readl(gregs + GLOB_RSIZE);
sbus_writel(tmp, cregs + CREG_TXRBUFPTR);
sbus_writel(tmp, cregs + CREG_TXWBUFPTR);
/* Clear the channel collision counter. */
sbus_writel(0, cregs + CREG_CCNT);
/* For 10baseT, inter frame space nor throttle seems to be necessary. */
sbus_writel(0, cregs + CREG_PIPG);
/* Now dork with the AMD MACE. */
sbus_writeb(MREGS_PHYCONFIG_AUTO, mregs + MREGS_PHYCONFIG);
sbus_writeb(MREGS_TXFCNTL_AUTOPAD, mregs + MREGS_TXFCNTL);
sbus_writeb(0, mregs + MREGS_RXFCNTL);
/* The QEC dma's the rx'd packets from local memory out to main memory,
* and therefore it interrupts when the packet reception is "complete".
* So don't listen for the MACE talking about it.
*/
sbus_writeb(MREGS_IMASK_COLL | MREGS_IMASK_RXIRQ, mregs + MREGS_IMASK);
sbus_writeb(MREGS_BCONFIG_BSWAP | MREGS_BCONFIG_64TS, mregs + MREGS_BCONFIG);
sbus_writeb((MREGS_FCONFIG_TXF16 | MREGS_FCONFIG_RXF32 |
MREGS_FCONFIG_RFWU | MREGS_FCONFIG_TFWU),
mregs + MREGS_FCONFIG);
/* Only usable interface on QuadEther is twisted pair. */
sbus_writeb(MREGS_PLSCONFIG_TP, mregs + MREGS_PLSCONFIG);
/* Tell MACE we are changing the ether address. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_PARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
sbus_writeb(e[0], mregs + MREGS_ETHADDR);
sbus_writeb(e[1], mregs + MREGS_ETHADDR);
sbus_writeb(e[2], mregs + MREGS_ETHADDR);
sbus_writeb(e[3], mregs + MREGS_ETHADDR);
sbus_writeb(e[4], mregs + MREGS_ETHADDR);
sbus_writeb(e[5], mregs + MREGS_ETHADDR);
/* Clear out the address filter. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
mregs + MREGS_IACONFIG);
while ((sbus_readb(mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0, mregs + MREGS_FILTER);
/* Address changes are now complete. */
sbus_writeb(0, mregs + MREGS_IACONFIG);
qe_init_rings(qep);
/* Wait a little bit for the link to come up... */
mdelay(5);
if (!(sbus_readb(mregs + MREGS_PHYCONFIG) & MREGS_PHYCONFIG_LTESTDIS)) {
int tries = 50;
while (--tries) {
u8 tmp;
mdelay(5);
barrier();
tmp = sbus_readb(mregs + MREGS_PHYCONFIG);
if ((tmp & MREGS_PHYCONFIG_LSTAT) != 0)
break;
}
if (tries == 0)
printk(KERN_NOTICE "%s: Warning, link state is down.\n", qep->dev->name);
}
/* Missed packet counter is cleared on a read. */
sbus_readb(mregs + MREGS_MPCNT);
/* Reload multicast information, this will enable the receiver
* and transmitter.
*/
qe_set_multicast(qep->dev);
/* QEC should now start to show interrupts. */
return 0;
}
/* Grrr, certain error conditions completely lock up the AMD MACE,
* so when we get these we _must_ reset the chip.
*/
static int qe_is_bolixed(struct sunqe *qep, u32 qe_status)
{
struct net_device *dev = qep->dev;
int mace_hwbug_workaround = 0;
if (qe_status & CREG_STAT_EDEFER) {
printk(KERN_ERR "%s: Excessive transmit defers.\n", dev->name);
dev->stats.tx_errors++;
}
if (qe_status & CREG_STAT_CLOSS) {
printk(KERN_ERR "%s: Carrier lost, link down?\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
}
if (qe_status & CREG_STAT_ERETRIES) {
printk(KERN_ERR "%s: Excessive transmit retries (more than 16).\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_LCOLL) {
printk(KERN_ERR "%s: Late transmit collision.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.collisions++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_FUFLOW) {
printk(KERN_ERR "%s: Transmit fifo underflow, driver bug.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_JERROR) {
printk(KERN_ERR "%s: Jabber error.\n", dev->name);
}
if (qe_status & CREG_STAT_BERROR) {
printk(KERN_ERR "%s: Babble error.\n", dev->name);
}
if (qe_status & CREG_STAT_CCOFLOW) {
dev->stats.tx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_TXDERROR) {
printk(KERN_ERR "%s: Transmit descriptor is bogus, driver bug.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXLERR) {
printk(KERN_ERR "%s: Transmit late error.\n", dev->name);
dev->stats.tx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXPERR) {
printk(KERN_ERR "%s: Transmit DMA parity error.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_TXSERR) {
printk(KERN_ERR "%s: Transmit DMA sbus error ack.\n", dev->name);
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RCCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.collisions += 256;
}
if (qe_status & CREG_STAT_RUOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_over_errors += 256;
}
if (qe_status & CREG_STAT_MCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_missed_errors += 256;
}
if (qe_status & CREG_STAT_RXFOFLOW) {
printk(KERN_ERR "%s: Receive fifo overflow.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_over_errors++;
}
if (qe_status & CREG_STAT_RLCOLL) {
printk(KERN_ERR "%s: Late receive collision.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.collisions++;
}
if (qe_status & CREG_STAT_FCOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_frame_errors += 256;
}
if (qe_status & CREG_STAT_CECOFLOW) {
dev->stats.rx_errors += 256;
dev->stats.rx_crc_errors += 256;
}
if (qe_status & CREG_STAT_RXDROP) {
printk(KERN_ERR "%s: Receive packet dropped.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_dropped++;
dev->stats.rx_missed_errors++;
}
if (qe_status & CREG_STAT_RXSMALL) {
printk(KERN_ERR "%s: Receive buffer too small, driver bug.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
}
if (qe_status & CREG_STAT_RXLERR) {
printk(KERN_ERR "%s: Receive late error.\n", dev->name);
dev->stats.rx_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXPERR) {
printk(KERN_ERR "%s: Receive DMA parity error.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (qe_status & CREG_STAT_RXSERR) {
printk(KERN_ERR "%s: Receive DMA sbus error ack.\n", dev->name);
dev->stats.rx_errors++;
dev->stats.rx_missed_errors++;
mace_hwbug_workaround = 1;
}
if (mace_hwbug_workaround)
qe_init(qep, 1);
return mace_hwbug_workaround;
}
/* Per-QE receive interrupt service routine. Just like on the happy meal
* we receive directly into skb's with a small packet copy water mark.
*/
static void qe_rx(struct sunqe *qep)
{
struct qe_rxd *rxbase = &qep->qe_block->qe_rxd[0];
struct net_device *dev = qep->dev;
struct qe_rxd *this;
struct sunqe_buffers *qbufs = qep->buffers;
__u32 qbufs_dvma = qep->buffers_dvma;
int elem = qep->rx_new;
u32 flags;
this = &rxbase[elem];
while (!((flags = this->rx_flags) & RXD_OWN)) {
struct sk_buff *skb;
unsigned char *this_qbuf =
&qbufs->rx_buf[elem & (RX_RING_SIZE - 1)][0];
__u32 this_qbuf_dvma = qbufs_dvma +
qebuf_offset(rx_buf, (elem & (RX_RING_SIZE - 1)));
struct qe_rxd *end_rxd =
&rxbase[(elem+RX_RING_SIZE)&(RX_RING_MAXSIZE-1)];
int len = (flags & RXD_LENGTH) - 4; /* QE adds ether FCS size to len */
/* Check for errors. */
if (len < ETH_ZLEN) {
dev->stats.rx_errors++;
dev->stats.rx_length_errors++;
dev->stats.rx_dropped++;
} else {
skb = netdev_alloc_skb(dev, len + 2);
if (skb == NULL) {
dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
skb_put(skb, len);
skb_copy_to_linear_data(skb, this_qbuf,
len);
skb->protocol = eth_type_trans(skb, qep->dev);
netif_rx(skb);
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
}
}
end_rxd->rx_addr = this_qbuf_dvma;
end_rxd->rx_flags = (RXD_OWN | ((RXD_PKT_SZ) & RXD_LENGTH));
elem = NEXT_RX(elem);
this = &rxbase[elem];
}
qep->rx_new = elem;
}
static void qe_tx_reclaim(struct sunqe *qep);
/* Interrupts for all QE's get filtered out via the QEC master controller,
* so we just run through each qe and check to see who is signaling
* and thus needs to be serviced.
*/
static irqreturn_t qec_interrupt(int irq, void *dev_id)
{
struct sunqec *qecp = dev_id;
u32 qec_status;
int channel = 0;
/* Latch the status now. */
qec_status = sbus_readl(qecp->gregs + GLOB_STAT);
while (channel < 4) {
if (qec_status & 0xf) {
struct sunqe *qep = qecp->qes[channel];
u32 qe_status;
qe_status = sbus_readl(qep->qcregs + CREG_STAT);
if (qe_status & CREG_STAT_ERRORS) {
if (qe_is_bolixed(qep, qe_status))
goto next;
}
if (qe_status & CREG_STAT_RXIRQ)
qe_rx(qep);
if (netif_queue_stopped(qep->dev) &&
(qe_status & CREG_STAT_TXIRQ)) {
spin_lock(&qep->lock);
qe_tx_reclaim(qep);
if (TX_BUFFS_AVAIL(qep) > 0) {
/* Wake net queue and return to
* lazy tx reclaim.
*/
netif_wake_queue(qep->dev);
sbus_writel(1, qep->qcregs + CREG_TIMASK);
}
spin_unlock(&qep->lock);
}
next:
;
}
qec_status >>= 4;
channel++;
}
return IRQ_HANDLED;
}
static int qe_open(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qep->mconfig = (MREGS_MCONFIG_TXENAB |
MREGS_MCONFIG_RXENAB |
MREGS_MCONFIG_MBAENAB);
return qe_init(qep, 0);
}
static int qe_close(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
qe_stop(qep);
return 0;
}
/* Reclaim TX'd frames from the ring. This must always run under
* the IRQ protected qep->lock.
*/
static void qe_tx_reclaim(struct sunqe *qep)
{
struct qe_txd *txbase = &qep->qe_block->qe_txd[0];
int elem = qep->tx_old;
while (elem != qep->tx_new) {
u32 flags = txbase[elem].tx_flags;
if (flags & TXD_OWN)
break;
elem = NEXT_TX(elem);
}
qep->tx_old = elem;
}
static void qe_tx_timeout(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
int tx_full;
spin_lock_irq(&qep->lock);
/* Try to reclaim, if that frees up some tx
* entries, we're fine.
*/
qe_tx_reclaim(qep);
tx_full = TX_BUFFS_AVAIL(qep) <= 0;
spin_unlock_irq(&qep->lock);
if (! tx_full)
goto out;
printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
qe_init(qep, 1);
out:
netif_wake_queue(dev);
}
/* Get a packet queued to go onto the wire. */
static int qe_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct sunqe_buffers *qbufs = qep->buffers;
__u32 txbuf_dvma, qbufs_dvma = qep->buffers_dvma;
unsigned char *txbuf;
int len, entry;
spin_lock_irq(&qep->lock);
qe_tx_reclaim(qep);
len = skb->len;
entry = qep->tx_new;
txbuf = &qbufs->tx_buf[entry & (TX_RING_SIZE - 1)][0];
txbuf_dvma = qbufs_dvma +
qebuf_offset(tx_buf, (entry & (TX_RING_SIZE - 1)));
/* Avoid a race... */
qep->qe_block->qe_txd[entry].tx_flags = TXD_UPDATE;
skb_copy_from_linear_data(skb, txbuf, len);
qep->qe_block->qe_txd[entry].tx_addr = txbuf_dvma;
qep->qe_block->qe_txd[entry].tx_flags =
(TXD_OWN | TXD_SOP | TXD_EOP | (len & TXD_LENGTH));
qep->tx_new = NEXT_TX(entry);
/* Get it going. */
sbus_writel(CREG_CTRL_TWAKEUP, qep->qcregs + CREG_CTRL);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
if (TX_BUFFS_AVAIL(qep) <= 0) {
/* Halt the net queue and enable tx interrupts.
* When the tx queue empties the tx irq handler
* will wake up the queue and return us back to
* the lazy tx reclaim scheme.
*/
netif_stop_queue(dev);
sbus_writel(0, qep->qcregs + CREG_TIMASK);
}
spin_unlock_irq(&qep->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void qe_set_multicast(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
struct netdev_hw_addr *ha;
u8 new_mconfig = qep->mconfig;
int i;
u32 crc;
/* Lock out others. */
netif_stop_queue(dev);
if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) {
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++)
sbus_writeb(0xff, qep->mregs + MREGS_FILTER);
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
} else if (dev->flags & IFF_PROMISC) {
new_mconfig |= MREGS_MCONFIG_PROMISC;
} else {
u16 hash_table[4];
u8 *hbytes = (unsigned char *) &hash_table[0];
memset(hash_table, 0, sizeof(hash_table));
netdev_for_each_mc_addr(ha, dev) {
crc = ether_crc_le(6, ha->addr);
crc >>= 26;
hash_table[crc >> 4] |= 1 << (crc & 0xf);
}
/* Program the qe with the new filter value. */
sbus_writeb(MREGS_IACONFIG_ACHNGE | MREGS_IACONFIG_LARESET,
qep->mregs + MREGS_IACONFIG);
while ((sbus_readb(qep->mregs + MREGS_IACONFIG) & MREGS_IACONFIG_ACHNGE) != 0)
barrier();
for (i = 0; i < 8; i++) {
u8 tmp = *hbytes++;
sbus_writeb(tmp, qep->mregs + MREGS_FILTER);
}
sbus_writeb(0, qep->mregs + MREGS_IACONFIG);
}
/* Any change of the logical address filter, the physical address,
* or enabling/disabling promiscuous mode causes the MACE to disable
* the receiver. So we must re-enable them here or else the MACE
* refuses to listen to anything on the network. Sheesh, took
* me a day or two to find this bug.
*/
qep->mconfig = new_mconfig;
sbus_writeb(qep->mconfig, qep->mregs + MREGS_MCONFIG);
/* Let us get going again. */
netif_wake_queue(dev);
}
/* Ethtool support... */
static void qe_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
const struct linux_prom_registers *regs;
struct sunqe *qep = netdev_priv(dev);
struct platform_device *op;
strlcpy(info->driver, "sunqe", sizeof(info->driver));
strlcpy(info->version, "3.0", sizeof(info->version));
op = qep->op;
regs = of_get_property(op->dev.of_node, "reg", NULL);
if (regs)
snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d",
regs->which_io);
}
static u32 qe_get_link(struct net_device *dev)
{
struct sunqe *qep = netdev_priv(dev);
void __iomem *mregs = qep->mregs;
u8 phyconfig;
spin_lock_irq(&qep->lock);
phyconfig = sbus_readb(mregs + MREGS_PHYCONFIG);
spin_unlock_irq(&qep->lock);
return phyconfig & MREGS_PHYCONFIG_LSTAT;
}
static const struct ethtool_ops qe_ethtool_ops = {
.get_drvinfo = qe_get_drvinfo,
.get_link = qe_get_link,
};
/* This is only called once at boot time for each card probed. */
static void qec_init_once(struct sunqec *qecp, struct platform_device *op)
{
u8 bsizes = qecp->qec_bursts;
if (sbus_can_burst64() && (bsizes & DMA_BURST64)) {
sbus_writel(GLOB_CTRL_B64, qecp->gregs + GLOB_CTRL);
} else if (bsizes & DMA_BURST32) {
sbus_writel(GLOB_CTRL_B32, qecp->gregs + GLOB_CTRL);
} else {
sbus_writel(GLOB_CTRL_B16, qecp->gregs + GLOB_CTRL);
}
/* Packetsize only used in 100baseT BigMAC configurations,
* set it to zero just to be on the safe side.
*/
sbus_writel(GLOB_PSIZE_2048, qecp->gregs + GLOB_PSIZE);
/* Set the local memsize register, divided up to one piece per QE channel. */
sbus_writel((resource_size(&op->resource[1]) >> 2),
qecp->gregs + GLOB_MSIZE);
/* Divide up the local QEC memory amongst the 4 QE receiver and
* transmitter FIFOs. Basically it is (total / 2 / num_channels).
*/
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_TSIZE);
sbus_writel((resource_size(&op->resource[1]) >> 2) >> 1,
qecp->gregs + GLOB_RSIZE);
}
static u8 qec_get_burst(struct device_node *dp)
{
u8 bsizes, bsizes_more;
/* Find and set the burst sizes for the QEC, since it
* does the actual dma for all 4 channels.
*/
bsizes = of_getintprop_default(dp, "burst-sizes", 0xff);
bsizes &= 0xff;
bsizes_more = of_getintprop_default(dp->parent, "burst-sizes", 0xff);
if (bsizes_more != 0xff)
bsizes &= bsizes_more;
if (bsizes == 0xff || (bsizes & DMA_BURST16) == 0 ||
(bsizes & DMA_BURST32)==0)
bsizes = (DMA_BURST32 - 1);
return bsizes;
}
static struct sunqec *get_qec(struct platform_device *child)
{
struct platform_device *op = to_platform_device(child->dev.parent);
struct sunqec *qecp;
qecp = platform_get_drvdata(op);
if (!qecp) {
qecp = kzalloc(sizeof(struct sunqec), GFP_KERNEL);
if (qecp) {
u32 ctrl;
qecp->op = op;
qecp->gregs = of_ioremap(&op->resource[0], 0,
GLOB_REG_SIZE,
"QEC Global Registers");
if (!qecp->gregs)
goto fail;
/* Make sure the QEC is in MACE mode. */
ctrl = sbus_readl(qecp->gregs + GLOB_CTRL);
ctrl &= 0xf0000000;
if (ctrl != GLOB_CTRL_MMODE) {
printk(KERN_ERR "qec: Not in MACE mode!\n");
goto fail;
}
if (qec_global_reset(qecp->gregs))
goto fail;
qecp->qec_bursts = qec_get_burst(op->dev.of_node);
qec_init_once(qecp, op);
if (request_irq(op->archdata.irqs[0], qec_interrupt,
IRQF_SHARED, "qec", (void *) qecp)) {
printk(KERN_ERR "qec: Can't register irq.\n");
goto fail;
}
platform_set_drvdata(op, qecp);
qecp->next_module = root_qec_dev;
root_qec_dev = qecp;
}
}
return qecp;
fail:
if (qecp->gregs)
of_iounmap(&op->resource[0], qecp->gregs, GLOB_REG_SIZE);
kfree(qecp);
return NULL;
}
static const struct net_device_ops qec_ops = {
.ndo_open = qe_open,
.ndo_stop = qe_close,
.ndo_start_xmit = qe_start_xmit,
.ndo_set_rx_mode = qe_set_multicast,
.ndo_tx_timeout = qe_tx_timeout,
.ndo_change_mtu = eth_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static int qec_ether_init(struct platform_device *op)
{
static unsigned version_printed;
struct net_device *dev;
struct sunqec *qecp;
struct sunqe *qe;
int i, res;
if (version_printed++ == 0)
printk(KERN_INFO "%s", version);
dev = alloc_etherdev(sizeof(struct sunqe));
if (!dev)
return -ENOMEM;
memcpy(dev->dev_addr, idprom->id_ethaddr, ETH_ALEN);
qe = netdev_priv(dev);
res = -ENODEV;
i = of_getintprop_default(op->dev.of_node, "channel#", -1);
if (i == -1)
goto fail;
qe->channel = i;
spin_lock_init(&qe->lock);
qecp = get_qec(op);
if (!qecp)
goto fail;
qecp->qes[qe->channel] = qe;
qe->dev = dev;
qe->parent = qecp;
qe->op = op;
res = -ENOMEM;
qe->qcregs = of_ioremap(&op->resource[0], 0,
CREG_REG_SIZE, "QEC Channel Registers");
if (!qe->qcregs) {
printk(KERN_ERR "qe: Cannot map channel registers.\n");
goto fail;
}
qe->mregs = of_ioremap(&op->resource[1], 0,
MREGS_REG_SIZE, "QE MACE Registers");
if (!qe->mregs) {
printk(KERN_ERR "qe: Cannot map MACE registers.\n");
goto fail;
}
qe->qe_block = dma_alloc_coherent(&op->dev, PAGE_SIZE,
&qe->qblock_dvma, GFP_ATOMIC);
qe->buffers = dma_alloc_coherent(&op->dev, sizeof(struct sunqe_buffers),
&qe->buffers_dvma, GFP_ATOMIC);
if (qe->qe_block == NULL || qe->qblock_dvma == 0 ||
qe->buffers == NULL || qe->buffers_dvma == 0)
goto fail;
/* Stop this QE. */
qe_stop(qe);
SET_NETDEV_DEV(dev, &op->dev);
dev->watchdog_timeo = 5*HZ;
dev->irq = op->archdata.irqs[0];
dev->dma = 0;
dev->ethtool_ops = &qe_ethtool_ops;
dev->netdev_ops = &qec_ops;
res = register_netdev(dev);
if (res)
goto fail;
platform_set_drvdata(op, qe);
printk(KERN_INFO "%s: qe channel[%d] %pM\n", dev->name, qe->channel,
dev->dev_addr);
return 0;
fail:
if (qe->qcregs)
of_iounmap(&op->resource[0], qe->qcregs, CREG_REG_SIZE);
if (qe->mregs)
of_iounmap(&op->resource[1], qe->mregs, MREGS_REG_SIZE);
if (qe->qe_block)
dma_free_coherent(&op->dev, PAGE_SIZE,
qe->qe_block, qe->qblock_dvma);
if (qe->buffers)
dma_free_coherent(&op->dev,
sizeof(struct sunqe_buffers),
qe->buffers,
qe->buffers_dvma);
free_netdev(dev);
return res;
}
static int qec_sbus_probe(struct platform_device *op)
{
return qec_ether_init(op);
}
static int qec_sbus_remove(struct platform_device *op)
{
struct sunqe *qp = platform_get_drvdata(op);
struct net_device *net_dev = qp->dev;
unregister_netdev(net_dev);
of_iounmap(&op->resource[0], qp->qcregs, CREG_REG_SIZE);
of_iounmap(&op->resource[1], qp->mregs, MREGS_REG_SIZE);
dma_free_coherent(&op->dev, PAGE_SIZE,
qp->qe_block, qp->qblock_dvma);
dma_free_coherent(&op->dev, sizeof(struct sunqe_buffers),
qp->buffers, qp->buffers_dvma);
free_netdev(net_dev);
return 0;
}
static const struct of_device_id qec_sbus_match[] = {
{
.name = "qe",
},
{},
};
MODULE_DEVICE_TABLE(of, qec_sbus_match);
static struct platform_driver qec_sbus_driver = {
.driver = {
.name = "qec",
.owner = THIS_MODULE,
.of_match_table = qec_sbus_match,
},
.probe = qec_sbus_probe,
.remove = qec_sbus_remove,
};
static int __init qec_init(void)
{
return platform_driver_register(&qec_sbus_driver);
}
static void __exit qec_exit(void)
{
platform_driver_unregister(&qec_sbus_driver);
while (root_qec_dev) {
struct sunqec *next = root_qec_dev->next_module;
struct platform_device *op = root_qec_dev->op;
free_irq(op->archdata.irqs[0], (void *) root_qec_dev);
of_iounmap(&op->resource[0], root_qec_dev->gregs,
GLOB_REG_SIZE);
kfree(root_qec_dev);
root_qec_dev = next;
}
}
module_init(qec_init);
module_exit(qec_exit);
| gpl-2.0 |
TeamNostalgia/amlogic-3.0.8 | drivers/usb/misc/usbtest.c | 1621 | 66387 | #include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/scatterlist.h>
#include <linux/mutex.h>
#include <linux/usb.h>
/*-------------------------------------------------------------------------*/
/* FIXME make these public somewhere; usbdevfs.h? */
struct usbtest_param {
/* inputs */
unsigned test_num; /* 0..(TEST_CASES-1) */
unsigned iterations;
unsigned length;
unsigned vary;
unsigned sglen;
/* outputs */
struct timeval duration;
};
#define USBTEST_REQUEST _IOWR('U', 100, struct usbtest_param)
/*-------------------------------------------------------------------------*/
#define GENERIC /* let probe() bind using module params */
/* Some devices that can be used for testing will have "real" drivers.
* Entries for those need to be enabled here by hand, after disabling
* that "real" driver.
*/
//#define IBOT2 /* grab iBOT2 webcams */
//#define KEYSPAN_19Qi /* grab un-renumerated serial adapter */
/*-------------------------------------------------------------------------*/
struct usbtest_info {
const char *name;
u8 ep_in; /* bulk/intr source */
u8 ep_out; /* bulk/intr sink */
unsigned autoconf:1;
unsigned ctrl_out:1;
unsigned iso:1; /* try iso in/out */
int alt;
};
/* this is accessed only through usbfs ioctl calls.
* one ioctl to issue a test ... one lock per device.
* tests create other threads if they need them.
* urbs and buffers are allocated dynamically,
* and data generated deterministically.
*/
struct usbtest_dev {
struct usb_interface *intf;
struct usbtest_info *info;
int in_pipe;
int out_pipe;
int in_iso_pipe;
int out_iso_pipe;
struct usb_endpoint_descriptor *iso_in, *iso_out;
struct mutex lock;
#define TBUF_SIZE 256
u8 *buf;
};
static struct usb_device *testdev_to_usbdev(struct usbtest_dev *test)
{
return interface_to_usbdev(test->intf);
}
/* set up all urbs so they can be used with either bulk or interrupt */
#define INTERRUPT_RATE 1 /* msec/transfer */
#define ERROR(tdev, fmt, args...) \
dev_err(&(tdev)->intf->dev , fmt , ## args)
#define WARNING(tdev, fmt, args...) \
dev_warn(&(tdev)->intf->dev , fmt , ## args)
#define GUARD_BYTE 0xA5
/*-------------------------------------------------------------------------*/
static int
get_endpoints(struct usbtest_dev *dev, struct usb_interface *intf)
{
int tmp;
struct usb_host_interface *alt;
struct usb_host_endpoint *in, *out;
struct usb_host_endpoint *iso_in, *iso_out;
struct usb_device *udev;
for (tmp = 0; tmp < intf->num_altsetting; tmp++) {
unsigned ep;
in = out = NULL;
iso_in = iso_out = NULL;
alt = intf->altsetting + tmp;
/* take the first altsetting with in-bulk + out-bulk;
* ignore other endpoints and altsettings.
*/
for (ep = 0; ep < alt->desc.bNumEndpoints; ep++) {
struct usb_host_endpoint *e;
e = alt->endpoint + ep;
switch (e->desc.bmAttributes) {
case USB_ENDPOINT_XFER_BULK:
break;
case USB_ENDPOINT_XFER_ISOC:
if (dev->info->iso)
goto try_iso;
/* FALLTHROUGH */
default:
continue;
}
if (usb_endpoint_dir_in(&e->desc)) {
if (!in)
in = e;
} else {
if (!out)
out = e;
}
continue;
try_iso:
if (usb_endpoint_dir_in(&e->desc)) {
if (!iso_in)
iso_in = e;
} else {
if (!iso_out)
iso_out = e;
}
}
if ((in && out) || iso_in || iso_out)
goto found;
}
return -EINVAL;
found:
udev = testdev_to_usbdev(dev);
if (alt->desc.bAlternateSetting != 0) {
tmp = usb_set_interface(udev,
alt->desc.bInterfaceNumber,
alt->desc.bAlternateSetting);
if (tmp < 0)
return tmp;
}
if (in) {
dev->in_pipe = usb_rcvbulkpipe(udev,
in->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
dev->out_pipe = usb_sndbulkpipe(udev,
out->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
}
if (iso_in) {
dev->iso_in = &iso_in->desc;
dev->in_iso_pipe = usb_rcvisocpipe(udev,
iso_in->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
if (iso_out) {
dev->iso_out = &iso_out->desc;
dev->out_iso_pipe = usb_sndisocpipe(udev,
iso_out->desc.bEndpointAddress
& USB_ENDPOINT_NUMBER_MASK);
}
return 0;
}
/*-------------------------------------------------------------------------*/
/* Support for testing basic non-queued I/O streams.
*
* These just package urbs as requests that can be easily canceled.
* Each urb's data buffer is dynamically allocated; callers can fill
* them with non-zero test data (or test for it) when appropriate.
*/
static void simple_callback(struct urb *urb)
{
complete(urb->context);
}
static struct urb *usbtest_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes,
unsigned transfer_flags,
unsigned offset)
{
struct urb *urb;
urb = usb_alloc_urb(0, GFP_KERNEL);
if (!urb)
return urb;
usb_fill_bulk_urb(urb, udev, pipe, NULL, bytes, simple_callback, NULL);
urb->interval = (udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE;
urb->transfer_flags = transfer_flags;
if (usb_pipein(pipe))
urb->transfer_flags |= URB_SHORT_NOT_OK;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL, &urb->transfer_dma);
else
urb->transfer_buffer = kmalloc(bytes + offset, GFP_KERNEL);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
/* To test unaligned transfers add an offset and fill the
unused memory with a guard value */
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
return urb;
}
static struct urb *simple_alloc_urb(
struct usb_device *udev,
int pipe,
unsigned long bytes)
{
return usbtest_alloc_urb(udev, pipe, bytes, URB_NO_TRANSFER_DMA_MAP, 0);
}
static unsigned pattern;
static unsigned mod_pattern;
module_param_named(pattern, mod_pattern, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mod_pattern, "i/o pattern (0 == zeroes)");
static inline void simple_fill_buf(struct urb *urb)
{
unsigned i;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->transfer_buffer_length;
switch (pattern) {
default:
/* FALLTHROUGH */
case 0:
memset(buf, 0, len);
break;
case 1: /* mod63 */
for (i = 0; i < len; i++)
*buf++ = (u8) (i % 63);
break;
}
}
static inline unsigned long buffer_offset(void *buf)
{
return (unsigned long)buf & (ARCH_KMALLOC_MINALIGN - 1);
}
static int check_guard_bytes(struct usbtest_dev *tdev, struct urb *urb)
{
u8 *buf = urb->transfer_buffer;
u8 *guard = buf - buffer_offset(buf);
unsigned i;
for (i = 0; guard < buf; i++, guard++) {
if (*guard != GUARD_BYTE) {
ERROR(tdev, "guard byte[%d] %d (not %d)\n",
i, *guard, GUARD_BYTE);
return -EINVAL;
}
}
return 0;
}
static int simple_check_buf(struct usbtest_dev *tdev, struct urb *urb)
{
unsigned i;
u8 expected;
u8 *buf = urb->transfer_buffer;
unsigned len = urb->actual_length;
int ret = check_guard_bytes(tdev, urb);
if (ret)
return ret;
for (i = 0; i < len; i++, buf++) {
switch (pattern) {
/* all-zeroes has no synchronization issues */
case 0:
expected = 0;
break;
/* mod63 stays in sync with short-terminated transfers,
* or otherwise when host and gadget agree on how large
* each usb transfer request should be. resync is done
* with set_interface or set_config.
*/
case 1: /* mod63 */
expected = i % 63;
break;
/* always fail unsupported patterns */
default:
expected = !*buf;
break;
}
if (*buf == expected)
continue;
ERROR(tdev, "buf[%d] = %d (not %d)\n", i, *buf, expected);
return -EINVAL;
}
return 0;
}
static void simple_free_urb(struct urb *urb)
{
unsigned long offset = buffer_offset(urb->transfer_buffer);
if (urb->transfer_flags & URB_NO_TRANSFER_DMA_MAP)
usb_free_coherent(
urb->dev,
urb->transfer_buffer_length + offset,
urb->transfer_buffer - offset,
urb->transfer_dma - offset);
else
kfree(urb->transfer_buffer - offset);
usb_free_urb(urb);
}
static int simple_io(
struct usbtest_dev *tdev,
struct urb *urb,
int iterations,
int vary,
int expected,
const char *label
)
{
struct usb_device *udev = urb->dev;
int max = urb->transfer_buffer_length;
struct completion completion;
int retval = 0;
urb->context = &completion;
while (retval == 0 && iterations-- > 0) {
init_completion(&completion);
if (usb_pipeout(urb->pipe))
simple_fill_buf(urb);
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0)
break;
/* NOTE: no timeouts; can't be broken out of by interrupt */
wait_for_completion(&completion);
retval = urb->status;
urb->dev = udev;
if (retval == 0 && usb_pipein(urb->pipe))
retval = simple_check_buf(tdev, urb);
if (vary) {
int len = urb->transfer_buffer_length;
len += vary;
len %= max;
if (len == 0)
len = (vary < max) ? vary : max;
urb->transfer_buffer_length = len;
}
/* FIXME if endpoint halted, clear halt (and log) */
}
urb->transfer_buffer_length = max;
if (expected != retval)
dev_err(&udev->dev,
"%s failed, iterations left %d, status %d (not %d)\n",
label, iterations, retval, expected);
return retval;
}
/*-------------------------------------------------------------------------*/
/* We use scatterlist primitives to test queued I/O.
* Yes, this also tests the scatterlist primitives.
*/
static void free_sglist(struct scatterlist *sg, int nents)
{
unsigned i;
if (!sg)
return;
for (i = 0; i < nents; i++) {
if (!sg_page(&sg[i]))
continue;
kfree(sg_virt(&sg[i]));
}
kfree(sg);
}
static struct scatterlist *
alloc_sglist(int nents, int max, int vary)
{
struct scatterlist *sg;
unsigned i;
unsigned size = max;
sg = kmalloc(nents * sizeof *sg, GFP_KERNEL);
if (!sg)
return NULL;
sg_init_table(sg, nents);
for (i = 0; i < nents; i++) {
char *buf;
unsigned j;
buf = kzalloc(size, GFP_KERNEL);
if (!buf) {
free_sglist(sg, i);
return NULL;
}
/* kmalloc pages are always physically contiguous! */
sg_set_buf(&sg[i], buf, size);
switch (pattern) {
case 0:
/* already zeroed */
break;
case 1:
for (j = 0; j < size; j++)
*buf++ = (u8) (j % 63);
break;
}
if (vary) {
size += vary;
size %= max;
if (size == 0)
size = (vary < max) ? vary : max;
}
}
return sg;
}
static int perform_sglist(
struct usbtest_dev *tdev,
unsigned iterations,
int pipe,
struct usb_sg_request *req,
struct scatterlist *sg,
int nents
)
{
struct usb_device *udev = testdev_to_usbdev(tdev);
int retval = 0;
while (retval == 0 && iterations-- > 0) {
retval = usb_sg_init(req, udev, pipe,
(udev->speed == USB_SPEED_HIGH)
? (INTERRUPT_RATE << 3)
: INTERRUPT_RATE,
sg, nents, 0, GFP_KERNEL);
if (retval)
break;
usb_sg_wait(req);
retval = req->status;
/* FIXME check resulting data pattern */
/* FIXME if endpoint halted, clear halt (and log) */
}
/* FIXME for unlink or fault handling tests, don't report
* failure if retval is as we expected ...
*/
if (retval)
ERROR(tdev, "perform_sglist failed, "
"iterations left %d, status %d\n",
iterations, retval);
return retval;
}
/*-------------------------------------------------------------------------*/
/* unqueued control message testing
*
* there's a nice set of device functional requirements in chapter 9 of the
* usb 2.0 spec, which we can apply to ANY device, even ones that don't use
* special test firmware.
*
* we know the device is configured (or suspended) by the time it's visible
* through usbfs. we can't change that, so we won't test enumeration (which
* worked 'well enough' to get here, this time), power management (ditto),
* or remote wakeup (which needs human interaction).
*/
static unsigned realworld = 1;
module_param(realworld, uint, 0);
MODULE_PARM_DESC(realworld, "clear to demand stricter spec compliance");
static int get_altsetting(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int retval;
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_INTERFACE, USB_DIR_IN|USB_RECIP_INTERFACE,
0, iface->altsetting[0].desc.bInterfaceNumber,
dev->buf, 1, USB_CTRL_GET_TIMEOUT);
switch (retval) {
case 1:
return dev->buf[0];
case 0:
retval = -ERANGE;
/* FALLTHROUGH */
default:
return retval;
}
}
static int set_altsetting(struct usbtest_dev *dev, int alternate)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev;
if (alternate < 0 || alternate >= 256)
return -EINVAL;
udev = interface_to_usbdev(iface);
return usb_set_interface(udev,
iface->altsetting[0].desc.bInterfaceNumber,
alternate);
}
static int is_good_config(struct usbtest_dev *tdev, int len)
{
struct usb_config_descriptor *config;
if (len < sizeof *config)
return 0;
config = (struct usb_config_descriptor *) tdev->buf;
switch (config->bDescriptorType) {
case USB_DT_CONFIG:
case USB_DT_OTHER_SPEED_CONFIG:
if (config->bLength != 9) {
ERROR(tdev, "bogus config descriptor length\n");
return 0;
}
/* this bit 'must be 1' but often isn't */
if (!realworld && !(config->bmAttributes & 0x80)) {
ERROR(tdev, "high bit of config attributes not set\n");
return 0;
}
if (config->bmAttributes & 0x1f) { /* reserved == 0 */
ERROR(tdev, "reserved config bits set\n");
return 0;
}
break;
default:
return 0;
}
if (le16_to_cpu(config->wTotalLength) == len) /* read it all */
return 1;
if (le16_to_cpu(config->wTotalLength) >= TBUF_SIZE) /* max partial read */
return 1;
ERROR(tdev, "bogus config descriptor read size\n");
return 0;
}
/* sanity test for standard requests working with usb_control_mesg() and some
* of the utility functions which use it.
*
* this doesn't test how endpoint halts behave or data toggles get set, since
* we won't do I/O to bulk/interrupt endpoints here (which is how to change
* halt or toggle). toggle testing is impractical without support from hcds.
*
* this avoids failing devices linux would normally work with, by not testing
* config/altsetting operations for devices that only support their defaults.
* such devices rarely support those needless operations.
*
* NOTE that since this is a sanity test, it's not examining boundary cases
* to see if usbcore, hcd, and device all behave right. such testing would
* involve varied read sizes and other operation sequences.
*/
static int ch9_postconfig(struct usbtest_dev *dev)
{
struct usb_interface *iface = dev->intf;
struct usb_device *udev = interface_to_usbdev(iface);
int i, alt, retval;
/* [9.2.3] if there's more than one altsetting, we need to be able to
* set and get each one. mostly trusts the descriptors from usbcore.
*/
for (i = 0; i < iface->num_altsetting; i++) {
/* 9.2.3 constrains the range here */
alt = iface->altsetting[i].desc.bAlternateSetting;
if (alt < 0 || alt >= iface->num_altsetting) {
dev_err(&iface->dev,
"invalid alt [%d].bAltSetting = %d\n",
i, alt);
}
/* [real world] get/set unimplemented if there's only one */
if (realworld && iface->num_altsetting == 1)
continue;
/* [9.4.10] set_interface */
retval = set_altsetting(dev, alt);
if (retval) {
dev_err(&iface->dev, "can't set_interface = %d, %d\n",
alt, retval);
return retval;
}
/* [9.4.4] get_interface always works */
retval = get_altsetting(dev);
if (retval != alt) {
dev_err(&iface->dev, "get alt should be %d, was %d\n",
alt, retval);
return (retval < 0) ? retval : -EDOM;
}
}
/* [real world] get_config unimplemented if there's only one */
if (!realworld || udev->descriptor.bNumConfigurations != 1) {
int expected = udev->actconfig->desc.bConfigurationValue;
/* [9.4.2] get_configuration always works
* ... although some cheap devices (like one TI Hub I've got)
* won't return config descriptors except before set_config.
*/
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
USB_REQ_GET_CONFIGURATION,
USB_DIR_IN | USB_RECIP_DEVICE,
0, 0, dev->buf, 1, USB_CTRL_GET_TIMEOUT);
if (retval != 1 || dev->buf[0] != expected) {
dev_err(&iface->dev, "get config --> %d %d (1 %d)\n",
retval, dev->buf[0], expected);
return (retval < 0) ? retval : -EDOM;
}
}
/* there's always [9.4.3] a device descriptor [9.6.1] */
retval = usb_get_descriptor(udev, USB_DT_DEVICE, 0,
dev->buf, sizeof udev->descriptor);
if (retval != sizeof udev->descriptor) {
dev_err(&iface->dev, "dev descriptor --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
/* there's always [9.4.3] at least one config descriptor [9.6.3] */
for (i = 0; i < udev->descriptor.bNumConfigurations; i++) {
retval = usb_get_descriptor(udev, USB_DT_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"config [%d] descriptor --> %d\n",
i, retval);
return (retval < 0) ? retval : -EDOM;
}
/* FIXME cross-checking udev->config[i] to make sure usbcore
* parsed it right (etc) would be good testing paranoia
*/
}
/* and sometimes [9.2.6.6] speed dependent descriptors */
if (le16_to_cpu(udev->descriptor.bcdUSB) == 0x0200) {
struct usb_qualifier_descriptor *d = NULL;
/* device qualifier [9.6.2] */
retval = usb_get_descriptor(udev,
USB_DT_DEVICE_QUALIFIER, 0, dev->buf,
sizeof(struct usb_qualifier_descriptor));
if (retval == -EPIPE) {
if (udev->speed == USB_SPEED_HIGH) {
dev_err(&iface->dev,
"hs dev qualifier --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
/* usb2.0 but not high-speed capable; fine */
} else if (retval != sizeof(struct usb_qualifier_descriptor)) {
dev_err(&iface->dev, "dev qualifier --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
} else
d = (struct usb_qualifier_descriptor *) dev->buf;
/* might not have [9.6.2] any other-speed configs [9.6.4] */
if (d) {
unsigned max = d->bNumConfigurations;
for (i = 0; i < max; i++) {
retval = usb_get_descriptor(udev,
USB_DT_OTHER_SPEED_CONFIG, i,
dev->buf, TBUF_SIZE);
if (!is_good_config(dev, retval)) {
dev_err(&iface->dev,
"other speed config --> %d\n",
retval);
return (retval < 0) ? retval : -EDOM;
}
}
}
}
/* FIXME fetch strings from at least the device descriptor */
/* [9.4.5] get_status always works */
retval = usb_get_status(udev, USB_RECIP_DEVICE, 0, dev->buf);
if (retval != 2) {
dev_err(&iface->dev, "get dev status --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
/* FIXME configuration.bmAttributes says if we could try to set/clear
* the device's remote wakeup feature ... if we can, test that here
*/
retval = usb_get_status(udev, USB_RECIP_INTERFACE,
iface->altsetting[0].desc.bInterfaceNumber, dev->buf);
if (retval != 2) {
dev_err(&iface->dev, "get interface status --> %d\n", retval);
return (retval < 0) ? retval : -EDOM;
}
/* FIXME get status for each endpoint in the interface */
return 0;
}
/*-------------------------------------------------------------------------*/
/* use ch9 requests to test whether:
* (a) queues work for control, keeping N subtests queued and
* active (auto-resubmit) for M loops through the queue.
* (b) protocol stalls (control-only) will autorecover.
* it's not like bulk/intr; no halt clearing.
* (c) short control reads are reported and handled.
* (d) queues are always processed in-order
*/
struct ctrl_ctx {
spinlock_t lock;
struct usbtest_dev *dev;
struct completion complete;
unsigned count;
unsigned pending;
int status;
struct urb **urb;
struct usbtest_param *param;
int last;
};
#define NUM_SUBCASES 15 /* how many test subcases here? */
struct subcase {
struct usb_ctrlrequest setup;
int number;
int expected;
};
static void ctrl_complete(struct urb *urb)
{
struct ctrl_ctx *ctx = urb->context;
struct usb_ctrlrequest *reqp;
struct subcase *subcase;
int status = urb->status;
reqp = (struct usb_ctrlrequest *)urb->setup_packet;
subcase = container_of(reqp, struct subcase, setup);
spin_lock(&ctx->lock);
ctx->count--;
ctx->pending--;
/* queue must transfer and complete in fifo order, unless
* usb_unlink_urb() is used to unlink something not at the
* physical queue head (not tested).
*/
if (subcase->number > 0) {
if ((subcase->number - ctx->last) != 1) {
ERROR(ctx->dev,
"subcase %d completed out of order, last %d\n",
subcase->number, ctx->last);
status = -EDOM;
ctx->last = subcase->number;
goto error;
}
}
ctx->last = subcase->number;
/* succeed or fault in only one way? */
if (status == subcase->expected)
status = 0;
/* async unlink for cleanup? */
else if (status != -ECONNRESET) {
/* some faults are allowed, not required */
if (subcase->expected > 0 && (
((status == -subcase->expected /* happened */
|| status == 0)))) /* didn't */
status = 0;
/* sometimes more than one fault is allowed */
else if (subcase->number == 12 && status == -EPIPE)
status = 0;
else
ERROR(ctx->dev, "subtest %d error, status %d\n",
subcase->number, status);
}
/* unexpected status codes mean errors; ideally, in hardware */
if (status) {
error:
if (ctx->status == 0) {
int i;
ctx->status = status;
ERROR(ctx->dev, "control queue %02x.%02x, err %d, "
"%d left, subcase %d, len %d/%d\n",
reqp->bRequestType, reqp->bRequest,
status, ctx->count, subcase->number,
urb->actual_length,
urb->transfer_buffer_length);
/* FIXME this "unlink everything" exit route should
* be a separate test case.
*/
/* unlink whatever's still pending */
for (i = 1; i < ctx->param->sglen; i++) {
struct urb *u = ctx->urb[
(i + subcase->number)
% ctx->param->sglen];
if (u == urb || !u->dev)
continue;
spin_unlock(&ctx->lock);
status = usb_unlink_urb(u);
spin_lock(&ctx->lock);
switch (status) {
case -EINPROGRESS:
case -EBUSY:
case -EIDRM:
continue;
default:
ERROR(ctx->dev, "urb unlink --> %d\n",
status);
}
}
status = ctx->status;
}
}
/* resubmit if we need to, else mark this as done */
if ((status == 0) && (ctx->pending < ctx->count)) {
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status != 0) {
ERROR(ctx->dev,
"can't resubmit ctrl %02x.%02x, err %d\n",
reqp->bRequestType, reqp->bRequest, status);
urb->dev = NULL;
} else
ctx->pending++;
} else
urb->dev = NULL;
/* signal completion when nothing's queued */
if (ctx->pending == 0)
complete(&ctx->complete);
spin_unlock(&ctx->lock);
}
static int
test_ctrl_queue(struct usbtest_dev *dev, struct usbtest_param *param)
{
struct usb_device *udev = testdev_to_usbdev(dev);
struct urb **urb;
struct ctrl_ctx context;
int i;
spin_lock_init(&context.lock);
context.dev = dev;
init_completion(&context.complete);
context.count = param->sglen * param->iterations;
context.pending = 0;
context.status = -ENOMEM;
context.param = param;
context.last = -1;
/* allocate and init the urbs we'll queue.
* as with bulk/intr sglists, sglen is the queue depth; it also
* controls which subtests run (more tests than sglen) or rerun.
*/
urb = kcalloc(param->sglen, sizeof(struct urb *), GFP_KERNEL);
if (!urb)
return -ENOMEM;
for (i = 0; i < param->sglen; i++) {
int pipe = usb_rcvctrlpipe(udev, 0);
unsigned len;
struct urb *u;
struct usb_ctrlrequest req;
struct subcase *reqp;
/* sign of this variable means:
* -: tested code must return this (negative) error code
* +: tested code may return this (negative too) error code
*/
int expected = 0;
/* requests here are mostly expected to succeed on any
* device, but some are chosen to trigger protocol stalls
* or short reads.
*/
memset(&req, 0, sizeof req);
req.bRequest = USB_REQ_GET_DESCRIPTOR;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
switch (i % NUM_SUBCASES) {
case 0: /* get device descriptor */
req.wValue = cpu_to_le16(USB_DT_DEVICE << 8);
len = sizeof(struct usb_device_descriptor);
break;
case 1: /* get first config descriptor (only) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
break;
case 2: /* get altsetting (OFTEN STALLS) */
req.bRequest = USB_REQ_GET_INTERFACE;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* index = 0 means first interface */
len = 1;
expected = EPIPE;
break;
case 3: /* get interface status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_INTERFACE;
/* interface 0 */
len = 2;
break;
case 4: /* get device status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_DEVICE;
len = 2;
break;
case 5: /* get device qualifier (MAY STALL) */
req.wValue = cpu_to_le16 (USB_DT_DEVICE_QUALIFIER << 8);
len = sizeof(struct usb_qualifier_descriptor);
if (udev->speed != USB_SPEED_HIGH)
expected = EPIPE;
break;
case 6: /* get first config descriptor, plus interface */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = sizeof(struct usb_config_descriptor);
len += sizeof(struct usb_interface_descriptor);
break;
case 7: /* get interface descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16 (USB_DT_INTERFACE << 8);
/* interface == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = -EPIPE;
break;
/* NOTE: two consecutive stalls in the queue here.
* that tests fault recovery a bit more aggressively. */
case 8: /* clear endpoint halt (MAY STALL) */
req.bRequest = USB_REQ_CLEAR_FEATURE;
req.bRequestType = USB_RECIP_ENDPOINT;
/* wValue 0 == ep halt */
/* wIndex 0 == ep0 (shouldn't halt!) */
len = 0;
pipe = usb_sndctrlpipe(udev, 0);
expected = EPIPE;
break;
case 9: /* get endpoint status */
req.bRequest = USB_REQ_GET_STATUS;
req.bRequestType = USB_DIR_IN|USB_RECIP_ENDPOINT;
/* endpoint 0 */
len = 2;
break;
case 10: /* trigger short read (EREMOTEIO) */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
len = 1024;
expected = -EREMOTEIO;
break;
/* NOTE: two consecutive _different_ faults in the queue. */
case 11: /* get endpoint descriptor (ALWAYS STALLS) */
req.wValue = cpu_to_le16(USB_DT_ENDPOINT << 8);
/* endpoint == 0 */
len = sizeof(struct usb_interface_descriptor);
expected = EPIPE;
break;
/* NOTE: sometimes even a third fault in the queue! */
case 12: /* get string 0 descriptor (MAY STALL) */
req.wValue = cpu_to_le16(USB_DT_STRING << 8);
/* string == 0, for language IDs */
len = sizeof(struct usb_interface_descriptor);
/* may succeed when > 4 languages */
expected = EREMOTEIO; /* or EPIPE, if no strings */
break;
case 13: /* short read, resembling case 10 */
req.wValue = cpu_to_le16((USB_DT_CONFIG << 8) | 0);
/* last data packet "should" be DATA1, not DATA0 */
len = 1024 - udev->descriptor.bMaxPacketSize0;
expected = -EREMOTEIO;
break;
case 14: /* short read; try to fill the last packet */
req.wValue = cpu_to_le16((USB_DT_DEVICE << 8) | 0);
/* device descriptor size == 18 bytes */
len = udev->descriptor.bMaxPacketSize0;
if (udev->speed == USB_SPEED_SUPER)
len = 512;
switch (len) {
case 8:
len = 24;
break;
case 16:
len = 32;
break;
}
expected = -EREMOTEIO;
break;
default:
ERROR(dev, "bogus number of ctrl queue testcases!\n");
context.status = -EINVAL;
goto cleanup;
}
req.wLength = cpu_to_le16(len);
urb[i] = u = simple_alloc_urb(udev, pipe, len);
if (!u)
goto cleanup;
reqp = kmalloc(sizeof *reqp, GFP_KERNEL);
if (!reqp)
goto cleanup;
reqp->setup = req;
reqp->number = i % NUM_SUBCASES;
reqp->expected = expected;
u->setup_packet = (char *) &reqp->setup;
u->context = &context;
u->complete = ctrl_complete;
}
/* queue the urbs */
context.urb = urb;
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
context.status = usb_submit_urb(urb[i], GFP_ATOMIC);
if (context.status != 0) {
ERROR(dev, "can't submit urb[%d], status %d\n",
i, context.status);
context.count = context.pending;
break;
}
context.pending++;
}
spin_unlock_irq(&context.lock);
/* FIXME set timer and time out; provide a disconnect hook */
/* wait for the last one to complete */
if (context.pending > 0)
wait_for_completion(&context.complete);
cleanup:
for (i = 0; i < param->sglen; i++) {
if (!urb[i])
continue;
urb[i]->dev = udev;
kfree(urb[i]->setup_packet);
simple_free_urb(urb[i]);
}
kfree(urb);
return context.status;
}
#undef NUM_SUBCASES
/*-------------------------------------------------------------------------*/
static void unlink1_callback(struct urb *urb)
{
int status = urb->status;
/* we "know" -EPIPE (stall) never happens */
if (!status)
status = usb_submit_urb(urb, GFP_ATOMIC);
if (status) {
urb->status = status;
complete(urb->context);
}
}
static int unlink1(struct usbtest_dev *dev, int pipe, int size, int async)
{
struct urb *urb;
struct completion completion;
int retval = 0;
init_completion(&completion);
urb = simple_alloc_urb(testdev_to_usbdev(dev), pipe, size);
if (!urb)
return -ENOMEM;
urb->context = &completion;
urb->complete = unlink1_callback;
/* keep the endpoint busy. there are lots of hc/hcd-internal
* states, and testing should get to all of them over time.
*
* FIXME want additional tests for when endpoint is STALLing
* due to errors, or is just NAKing requests.
*/
retval = usb_submit_urb(urb, GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit fail %d\n", retval);
return retval;
}
/* unlinking that should always work. variable delay tests more
* hcd states and code paths, even with little other system load.
*/
msleep(jiffies % (2 * INTERRUPT_RATE));
if (async) {
while (!completion_done(&completion)) {
retval = usb_unlink_urb(urb);
switch (retval) {
case -EBUSY:
case -EIDRM:
/* we can't unlink urbs while they're completing
* or if they've completed, and we haven't
* resubmitted. "normal" drivers would prevent
* resubmission, but since we're testing unlink
* paths, we can't.
*/
ERROR(dev, "unlink retry\n");
continue;
case 0:
case -EINPROGRESS:
break;
default:
dev_err(&dev->intf->dev,
"unlink fail %d\n", retval);
return retval;
}
break;
}
} else
usb_kill_urb(urb);
wait_for_completion(&completion);
retval = urb->status;
simple_free_urb(urb);
if (async)
return (retval == -ECONNRESET) ? 0 : retval - 1000;
else
return (retval == -ENOENT || retval == -EPERM) ?
0 : retval - 2000;
}
static int unlink_simple(struct usbtest_dev *dev, int pipe, int len)
{
int retval = 0;
/* test sync and async paths */
retval = unlink1(dev, pipe, len, 1);
if (!retval)
retval = unlink1(dev, pipe, len, 0);
return retval;
}
/*-------------------------------------------------------------------------*/
struct queued_ctx {
struct completion complete;
atomic_t pending;
unsigned num;
int status;
struct urb **urbs;
};
static void unlink_queued_callback(struct urb *urb)
{
int status = urb->status;
struct queued_ctx *ctx = urb->context;
if (ctx->status)
goto done;
if (urb == ctx->urbs[ctx->num - 4] || urb == ctx->urbs[ctx->num - 2]) {
if (status == -ECONNRESET)
goto done;
/* What error should we report if the URB completed normally? */
}
if (status != 0)
ctx->status = status;
done:
if (atomic_dec_and_test(&ctx->pending))
complete(&ctx->complete);
}
static int unlink_queued(struct usbtest_dev *dev, int pipe, unsigned num,
unsigned size)
{
struct queued_ctx ctx;
struct usb_device *udev = testdev_to_usbdev(dev);
void *buf;
dma_addr_t buf_dma;
int i;
int retval = -ENOMEM;
init_completion(&ctx.complete);
atomic_set(&ctx.pending, 1); /* One more than the actual value */
ctx.num = num;
ctx.status = 0;
buf = usb_alloc_coherent(udev, size, GFP_KERNEL, &buf_dma);
if (!buf)
return retval;
memset(buf, 0, size);
/* Allocate and init the urbs we'll queue */
ctx.urbs = kcalloc(num, sizeof(struct urb *), GFP_KERNEL);
if (!ctx.urbs)
goto free_buf;
for (i = 0; i < num; i++) {
ctx.urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (!ctx.urbs[i])
goto free_urbs;
usb_fill_bulk_urb(ctx.urbs[i], udev, pipe, buf, size,
unlink_queued_callback, &ctx);
ctx.urbs[i]->transfer_dma = buf_dma;
ctx.urbs[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP;
}
/* Submit all the URBs and then unlink URBs num - 4 and num - 2. */
for (i = 0; i < num; i++) {
atomic_inc(&ctx.pending);
retval = usb_submit_urb(ctx.urbs[i], GFP_KERNEL);
if (retval != 0) {
dev_err(&dev->intf->dev, "submit urbs[%d] fail %d\n",
i, retval);
atomic_dec(&ctx.pending);
ctx.status = retval;
break;
}
}
if (i == num) {
usb_unlink_urb(ctx.urbs[num - 4]);
usb_unlink_urb(ctx.urbs[num - 2]);
} else {
while (--i >= 0)
usb_unlink_urb(ctx.urbs[i]);
}
if (atomic_dec_and_test(&ctx.pending)) /* The extra count */
complete(&ctx.complete);
wait_for_completion(&ctx.complete);
retval = ctx.status;
free_urbs:
for (i = 0; i < num; i++)
usb_free_urb(ctx.urbs[i]);
kfree(ctx.urbs);
free_buf:
usb_free_coherent(udev, size, buf, buf_dma);
return retval;
}
/*-------------------------------------------------------------------------*/
static int verify_not_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* shouldn't look or act halted */
retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get no-halt status, %d\n",
ep, retval);
return retval;
}
if (status != 0) {
ERROR(tdev, "ep %02x bogus status: %04x != 0\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, 0, __func__);
if (retval != 0)
return -EINVAL;
return 0;
}
static int verify_halted(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
u16 status;
/* should look and act halted */
retval = usb_get_status(urb->dev, USB_RECIP_ENDPOINT, ep, &status);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't get halt status, %d\n",
ep, retval);
return retval;
}
le16_to_cpus(&status);
if (status != 1) {
ERROR(tdev, "ep %02x bogus status: %04x != 1\n", ep, status);
return -EINVAL;
}
retval = simple_io(tdev, urb, 1, 0, -EPIPE, __func__);
if (retval != -EPIPE)
return -EINVAL;
retval = simple_io(tdev, urb, 1, 0, -EPIPE, "verify_still_halted");
if (retval != -EPIPE)
return -EINVAL;
return 0;
}
static int test_halt(struct usbtest_dev *tdev, int ep, struct urb *urb)
{
int retval;
/* shouldn't look or act halted now */
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* set halt (protocol test only), verify it worked */
retval = usb_control_msg(urb->dev, usb_sndctrlpipe(urb->dev, 0),
USB_REQ_SET_FEATURE, USB_RECIP_ENDPOINT,
USB_ENDPOINT_HALT, ep,
NULL, 0, USB_CTRL_SET_TIMEOUT);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't set halt, %d\n", ep, retval);
return retval;
}
retval = verify_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* clear halt (tests API + protocol), verify it worked */
retval = usb_clear_halt(urb->dev, urb->pipe);
if (retval < 0) {
ERROR(tdev, "ep %02x couldn't clear halt, %d\n", ep, retval);
return retval;
}
retval = verify_not_halted(tdev, ep, urb);
if (retval < 0)
return retval;
/* NOTE: could also verify SET_INTERFACE clear halts ... */
return 0;
}
static int halt_simple(struct usbtest_dev *dev)
{
int ep;
int retval = 0;
struct urb *urb;
urb = simple_alloc_urb(testdev_to_usbdev(dev), 0, 512);
if (urb == NULL)
return -ENOMEM;
if (dev->in_pipe) {
ep = usb_pipeendpoint(dev->in_pipe) | USB_DIR_IN;
urb->pipe = dev->in_pipe;
retval = test_halt(dev, ep, urb);
if (retval < 0)
goto done;
}
if (dev->out_pipe) {
ep = usb_pipeendpoint(dev->out_pipe);
urb->pipe = dev->out_pipe;
retval = test_halt(dev, ep, urb);
}
done:
simple_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------------*/
/* Control OUT tests use the vendor control requests from Intel's
* USB 2.0 compliance test device: write a buffer, read it back.
*
* Intel's spec only _requires_ that it work for one packet, which
* is pretty weak. Some HCDs place limits here; most devices will
* need to be able to handle more than one OUT data packet. We'll
* try whatever we're told to try.
*/
static int ctrl_out(struct usbtest_dev *dev,
unsigned count, unsigned length, unsigned vary, unsigned offset)
{
unsigned i, j, len;
int retval;
u8 *buf;
char *what = "?";
struct usb_device *udev;
if (length < 1 || length > 0xffff || vary >= length)
return -EINVAL;
buf = kmalloc(length + offset, GFP_KERNEL);
if (!buf)
return -ENOMEM;
buf += offset;
udev = testdev_to_usbdev(dev);
len = length;
retval = 0;
/* NOTE: hardware might well act differently if we pushed it
* with lots back-to-back queued requests.
*/
for (i = 0; i < count; i++) {
/* write patterned data */
for (j = 0; j < len; j++)
buf[j] = i + j;
retval = usb_control_msg(udev, usb_sndctrlpipe(udev, 0),
0x5b, USB_DIR_OUT|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_SET_TIMEOUT);
if (retval != len) {
what = "write";
if (retval >= 0) {
ERROR(dev, "ctrl_out, wlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* read it back -- assuming nothing intervened!! */
retval = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0),
0x5c, USB_DIR_IN|USB_TYPE_VENDOR,
0, 0, buf, len, USB_CTRL_GET_TIMEOUT);
if (retval != len) {
what = "read";
if (retval >= 0) {
ERROR(dev, "ctrl_out, rlen %d (expected %d)\n",
retval, len);
retval = -EBADMSG;
}
break;
}
/* fail if we can't verify */
for (j = 0; j < len; j++) {
if (buf[j] != (u8) (i + j)) {
ERROR(dev, "ctrl_out, byte %d is %d not %d\n",
j, buf[j], (u8) i + j);
retval = -EBADMSG;
break;
}
}
if (retval < 0) {
what = "verify";
break;
}
len += vary;
/* [real world] the "zero bytes IN" case isn't really used.
* hardware can easily trip up in this weird case, since its
* status stage is IN, not OUT like other ep0in transfers.
*/
if (len > length)
len = realworld ? 1 : 0;
}
if (retval < 0)
ERROR(dev, "ctrl_out %s failed, code %d, count %d\n",
what, retval, i);
kfree(buf - offset);
return retval;
}
/*-------------------------------------------------------------------------*/
/* ISO tests ... mimics common usage
* - buffer length is split into N packets (mostly maxpacket sized)
* - multi-buffers according to sglen
*/
struct iso_context {
unsigned count;
unsigned pending;
spinlock_t lock;
struct completion done;
int submit_error;
unsigned long errors;
unsigned long packet_count;
struct usbtest_dev *dev;
};
static void iso_callback(struct urb *urb)
{
struct iso_context *ctx = urb->context;
spin_lock(&ctx->lock);
ctx->count--;
ctx->packet_count += urb->number_of_packets;
if (urb->error_count > 0)
ctx->errors += urb->error_count;
else if (urb->status != 0)
ctx->errors += urb->number_of_packets;
else if (urb->actual_length != urb->transfer_buffer_length)
ctx->errors++;
else if (check_guard_bytes(ctx->dev, urb) != 0)
ctx->errors++;
if (urb->status == 0 && ctx->count > (ctx->pending - 1)
&& !ctx->submit_error) {
int status = usb_submit_urb(urb, GFP_ATOMIC);
switch (status) {
case 0:
goto done;
default:
dev_err(&ctx->dev->intf->dev,
"iso resubmit err %d\n",
status);
/* FALLTHROUGH */
case -ENODEV: /* disconnected */
case -ESHUTDOWN: /* endpoint disabled */
ctx->submit_error = 1;
break;
}
}
ctx->pending--;
if (ctx->pending == 0) {
if (ctx->errors)
dev_err(&ctx->dev->intf->dev,
"iso test, %lu errors out of %lu\n",
ctx->errors, ctx->packet_count);
complete(&ctx->done);
}
done:
spin_unlock(&ctx->lock);
}
static struct urb *iso_alloc_urb(
struct usb_device *udev,
int pipe,
struct usb_endpoint_descriptor *desc,
long bytes,
unsigned offset
)
{
struct urb *urb;
unsigned i, maxp, packets;
if (bytes < 0 || !desc)
return NULL;
maxp = 0x7ff & le16_to_cpu(desc->wMaxPacketSize);
maxp *= 1 + (0x3 & (le16_to_cpu(desc->wMaxPacketSize) >> 11));
packets = DIV_ROUND_UP(bytes, maxp);
urb = usb_alloc_urb(packets, GFP_KERNEL);
if (!urb)
return urb;
urb->dev = udev;
urb->pipe = pipe;
urb->number_of_packets = packets;
urb->transfer_buffer_length = bytes;
urb->transfer_buffer = usb_alloc_coherent(udev, bytes + offset,
GFP_KERNEL,
&urb->transfer_dma);
if (!urb->transfer_buffer) {
usb_free_urb(urb);
return NULL;
}
if (offset) {
memset(urb->transfer_buffer, GUARD_BYTE, offset);
urb->transfer_buffer += offset;
urb->transfer_dma += offset;
}
/* For inbound transfers use guard byte so that test fails if
data not correctly copied */
memset(urb->transfer_buffer,
usb_pipein(urb->pipe) ? GUARD_BYTE : 0,
bytes);
for (i = 0; i < packets; i++) {
/* here, only the last packet will be short */
urb->iso_frame_desc[i].length = min((unsigned) bytes, maxp);
bytes -= urb->iso_frame_desc[i].length;
urb->iso_frame_desc[i].offset = maxp * i;
}
urb->complete = iso_callback;
/* urb->context = SET BY CALLER */
urb->interval = 1 << (desc->bInterval - 1);
urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP;
return urb;
}
static int
test_iso_queue(struct usbtest_dev *dev, struct usbtest_param *param,
int pipe, struct usb_endpoint_descriptor *desc, unsigned offset)
{
struct iso_context context;
struct usb_device *udev;
unsigned i;
unsigned long packets = 0;
int status = 0;
struct urb *urbs[10]; /* FIXME no limit */
if (param->sglen > 10)
return -EDOM;
memset(&context, 0, sizeof context);
context.count = param->iterations * param->sglen;
context.dev = dev;
init_completion(&context.done);
spin_lock_init(&context.lock);
memset(urbs, 0, sizeof urbs);
udev = testdev_to_usbdev(dev);
dev_info(&dev->intf->dev,
"... iso period %d %sframes, wMaxPacket %04x\n",
1 << (desc->bInterval - 1),
(udev->speed == USB_SPEED_HIGH) ? "micro" : "",
le16_to_cpu(desc->wMaxPacketSize));
for (i = 0; i < param->sglen; i++) {
urbs[i] = iso_alloc_urb(udev, pipe, desc,
param->length, offset);
if (!urbs[i]) {
status = -ENOMEM;
goto fail;
}
packets += urbs[i]->number_of_packets;
urbs[i]->context = &context;
}
packets *= param->iterations;
dev_info(&dev->intf->dev,
"... total %lu msec (%lu packets)\n",
(packets * (1 << (desc->bInterval - 1)))
/ ((udev->speed == USB_SPEED_HIGH) ? 8 : 1),
packets);
spin_lock_irq(&context.lock);
for (i = 0; i < param->sglen; i++) {
++context.pending;
status = usb_submit_urb(urbs[i], GFP_ATOMIC);
if (status < 0) {
ERROR(dev, "submit iso[%d], error %d\n", i, status);
if (i == 0) {
spin_unlock_irq(&context.lock);
goto fail;
}
simple_free_urb(urbs[i]);
urbs[i] = NULL;
context.pending--;
context.submit_error = 1;
break;
}
}
spin_unlock_irq(&context.lock);
wait_for_completion(&context.done);
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
/*
* Isochronous transfers are expected to fail sometimes. As an
* arbitrary limit, we will report an error if any submissions
* fail or if the transfer failure rate is > 10%.
*/
if (status != 0)
;
else if (context.submit_error)
status = -EACCES;
else if (context.errors > context.packet_count / 10)
status = -EIO;
return status;
fail:
for (i = 0; i < param->sglen; i++) {
if (urbs[i])
simple_free_urb(urbs[i]);
}
return status;
}
static int test_unaligned_bulk(
struct usbtest_dev *tdev,
int pipe,
unsigned length,
int iterations,
unsigned transfer_flags,
const char *label)
{
int retval;
struct urb *urb = usbtest_alloc_urb(
testdev_to_usbdev(tdev), pipe, length, transfer_flags, 1);
if (!urb)
return -ENOMEM;
retval = simple_io(tdev, urb, iterations, 0, 0, label);
simple_free_urb(urb);
return retval;
}
/*-------------------------------------------------------------------------*/
/* We only have this one interface to user space, through usbfs.
* User mode code can scan usbfs to find N different devices (maybe on
* different busses) to use when testing, and allocate one thread per
* test. So discovery is simplified, and we have no device naming issues.
*
* Don't use these only as stress/load tests. Use them along with with
* other USB bus activity: plugging, unplugging, mousing, mp3 playback,
* video capture, and so on. Run different tests at different times, in
* different sequences. Nothing here should interact with other devices,
* except indirectly by consuming USB bandwidth and CPU resources for test
* threads and request completion. But the only way to know that for sure
* is to test when HC queues are in use by many devices.
*
* WARNING: Because usbfs grabs udev->dev.sem before calling this ioctl(),
* it locks out usbcore in certain code paths. Notably, if you disconnect
* the device-under-test, khubd will wait block forever waiting for the
* ioctl to complete ... so that usb_disconnect() can abort the pending
* urbs and then call usbtest_disconnect(). To abort a test, you're best
* off just killing the userspace task and waiting for it to exit.
*/
/* No BKL needed */
static int
usbtest_ioctl(struct usb_interface *intf, unsigned int code, void *buf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
struct usb_device *udev = testdev_to_usbdev(dev);
struct usbtest_param *param = buf;
int retval = -EOPNOTSUPP;
struct urb *urb;
struct scatterlist *sg;
struct usb_sg_request req;
struct timeval start;
unsigned i;
/* FIXME USBDEVFS_CONNECTINFO doesn't say how fast the device is. */
pattern = mod_pattern;
if (code != USBTEST_REQUEST)
return -EOPNOTSUPP;
if (param->iterations <= 0)
return -EINVAL;
if (mutex_lock_interruptible(&dev->lock))
return -ERESTARTSYS;
/* FIXME: What if a system sleep starts while a test is running? */
/* some devices, like ez-usb default devices, need a non-default
* altsetting to have any active endpoints. some tests change
* altsettings; force a default so most tests don't need to check.
*/
if (dev->info->alt >= 0) {
int res;
if (intf->altsetting->desc.bInterfaceNumber) {
mutex_unlock(&dev->lock);
return -ENODEV;
}
res = set_altsetting(dev, dev->info->alt);
if (res) {
dev_err(&intf->dev,
"set altsetting to %d failed, %d\n",
dev->info->alt, res);
mutex_unlock(&dev->lock);
return res;
}
}
/*
* Just a bunch of test cases that every HCD is expected to handle.
*
* Some may need specific firmware, though it'd be good to have
* one firmware image to handle all the test cases.
*
* FIXME add more tests! cancel requests, verify the data, control
* queueing, concurrent read+write threads, and so on.
*/
do_gettimeofday(&start);
switch (param->test_num) {
case 0:
dev_info(&intf->dev, "TEST 0: NOP\n");
retval = 0;
break;
/* Simple non-queued bulk I/O tests */
case 1:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 1: write %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test1");
simple_free_urb(urb);
break;
case 2:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 2: read %d bytes %u times\n",
param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, 0, 0, "test2");
simple_free_urb(urb);
break;
case 3:
if (dev->out_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 3: write/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->out_pipe, param->length);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test3");
simple_free_urb(urb);
break;
case 4:
if (dev->in_pipe == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 4: read/%d 0..%d bytes %u times\n",
param->vary, param->length, param->iterations);
urb = simple_alloc_urb(udev, dev->in_pipe, param->length);
if (!urb) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = simple_io(dev, urb, param->iterations, param->vary,
0, "test4");
simple_free_urb(urb);
break;
/* Queued bulk I/O tests */
case 5:
if (dev->out_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 5: write %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, 0);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 6:
if (dev->in_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 6: read %d sglists %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, 0);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 7:
if (dev->out_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 7: write/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, param->vary);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk sink (maybe accepts short writes) */
retval = perform_sglist(dev, param->iterations, dev->out_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
case 8:
if (dev->in_pipe == 0 || param->sglen == 0 || param->vary == 0)
break;
dev_info(&intf->dev,
"TEST 8: read/%d %d sglists %d entries 0..%d bytes\n",
param->vary, param->iterations,
param->sglen, param->length);
sg = alloc_sglist(param->sglen, param->length, param->vary);
if (!sg) {
retval = -ENOMEM;
break;
}
/* FIRMWARE: bulk source (maybe generates short writes) */
retval = perform_sglist(dev, param->iterations, dev->in_pipe,
&req, sg, param->sglen);
free_sglist(sg, param->sglen);
break;
/* non-queued sanity tests for control (chapter 9 subset) */
case 9:
retval = 0;
dev_info(&intf->dev,
"TEST 9: ch9 (subset) control tests, %d times\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = ch9_postconfig(dev);
if (retval)
dev_err(&intf->dev, "ch9 subset failed, "
"iterations left %d\n", i);
break;
/* queued control messaging */
case 10:
if (param->sglen == 0)
break;
retval = 0;
dev_info(&intf->dev,
"TEST 10: queue %d control calls, %d times\n",
param->sglen,
param->iterations);
retval = test_ctrl_queue(dev, param);
break;
/* simple non-queued unlinks (ring with one urb) */
case 11:
if (dev->in_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 11: unlink %d reads of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->in_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink reads failed %d, "
"iterations left %d\n", retval, i);
break;
case 12:
if (dev->out_pipe == 0 || !param->length)
break;
retval = 0;
dev_info(&intf->dev, "TEST 12: unlink %d writes of %d\n",
param->iterations, param->length);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = unlink_simple(dev, dev->out_pipe,
param->length);
if (retval)
dev_err(&intf->dev, "unlink writes failed %d, "
"iterations left %d\n", retval, i);
break;
/* ep halt tests */
case 13:
if (dev->out_pipe == 0 && dev->in_pipe == 0)
break;
retval = 0;
dev_info(&intf->dev, "TEST 13: set/clear %d halts\n",
param->iterations);
for (i = param->iterations; retval == 0 && i--; /* NOP */)
retval = halt_simple(dev);
if (retval)
ERROR(dev, "halts failed, iterations left %d\n", i);
break;
/* control write tests */
case 14:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev, "TEST 14: %d ep0out, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 0);
break;
/* iso write tests */
case 15:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 15: write %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso sink */
retval = test_iso_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 0);
break;
/* iso read tests */
case 16:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 16: read %d iso, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
/* FIRMWARE: iso source */
retval = test_iso_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 0);
break;
/* FIXME scatterlist cancel (needs helper thread) */
/* Tests for bulk I/O using DMA mapping by core and odd address */
case 17:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 17: write odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
0, "test17");
break;
case 18:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 18: read odd addr %d bytes %u times core map\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
0, "test18");
break;
/* Tests for bulk I/O using premapped coherent buffer and odd address */
case 19:
if (dev->out_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 19: write odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->out_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test19");
break;
case 20:
if (dev->in_pipe == 0)
break;
dev_info(&intf->dev,
"TEST 20: read odd addr %d bytes %u times premapped\n",
param->length, param->iterations);
retval = test_unaligned_bulk(
dev, dev->in_pipe,
param->length, param->iterations,
URB_NO_TRANSFER_DMA_MAP, "test20");
break;
/* control write tests with unaligned buffer */
case 21:
if (!dev->info->ctrl_out)
break;
dev_info(&intf->dev,
"TEST 21: %d ep0out odd addr, %d..%d vary %d\n",
param->iterations,
realworld ? 1 : 0, param->length,
param->vary);
retval = ctrl_out(dev, param->iterations,
param->length, param->vary, 1);
break;
/* unaligned iso tests */
case 22:
if (dev->out_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 22: write %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_iso_queue(dev, param,
dev->out_iso_pipe, dev->iso_out, 1);
break;
case 23:
if (dev->in_iso_pipe == 0 || param->sglen == 0)
break;
dev_info(&intf->dev,
"TEST 23: read %d iso odd, %d entries of %d bytes\n",
param->iterations,
param->sglen, param->length);
retval = test_iso_queue(dev, param,
dev->in_iso_pipe, dev->iso_in, 1);
break;
/* unlink URBs from a bulk-OUT queue */
case 24:
if (dev->out_pipe == 0 || !param->length || param->sglen < 4)
break;
retval = 0;
dev_info(&intf->dev, "TEST 17: unlink from %d queues of "
"%d %d-byte writes\n",
param->iterations, param->sglen, param->length);
for (i = param->iterations; retval == 0 && i > 0; --i) {
retval = unlink_queued(dev, dev->out_pipe,
param->sglen, param->length);
if (retval) {
dev_err(&intf->dev,
"unlink queued writes failed %d, "
"iterations left %d\n", retval, i);
break;
}
}
break;
}
do_gettimeofday(¶m->duration);
param->duration.tv_sec -= start.tv_sec;
param->duration.tv_usec -= start.tv_usec;
if (param->duration.tv_usec < 0) {
param->duration.tv_usec += 1000 * 1000;
param->duration.tv_sec -= 1;
}
mutex_unlock(&dev->lock);
return retval;
}
/*-------------------------------------------------------------------------*/
static unsigned force_interrupt;
module_param(force_interrupt, uint, 0);
MODULE_PARM_DESC(force_interrupt, "0 = test default; else interrupt");
#ifdef GENERIC
static unsigned short vendor;
module_param(vendor, ushort, 0);
MODULE_PARM_DESC(vendor, "vendor code (from usb-if)");
static unsigned short product;
module_param(product, ushort, 0);
MODULE_PARM_DESC(product, "product code (from vendor)");
#endif
static int
usbtest_probe(struct usb_interface *intf, const struct usb_device_id *id)
{
struct usb_device *udev;
struct usbtest_dev *dev;
struct usbtest_info *info;
char *rtest, *wtest;
char *irtest, *iwtest;
udev = interface_to_usbdev(intf);
#ifdef GENERIC
/* specify devices by module parameters? */
if (id->match_flags == 0) {
/* vendor match required, product match optional */
if (!vendor || le16_to_cpu(udev->descriptor.idVendor) != (u16)vendor)
return -ENODEV;
if (product && le16_to_cpu(udev->descriptor.idProduct) != (u16)product)
return -ENODEV;
dev_info(&intf->dev, "matched module params, "
"vend=0x%04x prod=0x%04x\n",
le16_to_cpu(udev->descriptor.idVendor),
le16_to_cpu(udev->descriptor.idProduct));
}
#endif
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
return -ENOMEM;
info = (struct usbtest_info *) id->driver_info;
dev->info = info;
mutex_init(&dev->lock);
dev->intf = intf;
/* cacheline-aligned scratch for i/o */
dev->buf = kmalloc(TBUF_SIZE, GFP_KERNEL);
if (dev->buf == NULL) {
kfree(dev);
return -ENOMEM;
}
/* NOTE this doesn't yet test the handful of difference that are
* visible with high speed interrupts: bigger maxpacket (1K) and
* "high bandwidth" modes (up to 3 packets/uframe).
*/
rtest = wtest = "";
irtest = iwtest = "";
if (force_interrupt || udev->speed == USB_SPEED_LOW) {
if (info->ep_in) {
dev->in_pipe = usb_rcvintpipe(udev, info->ep_in);
rtest = " intr-in";
}
if (info->ep_out) {
dev->out_pipe = usb_sndintpipe(udev, info->ep_out);
wtest = " intr-out";
}
} else {
if (info->autoconf) {
int status;
status = get_endpoints(dev, intf);
if (status < 0) {
WARNING(dev, "couldn't get endpoints, %d\n",
status);
return status;
}
/* may find bulk or ISO pipes */
} else {
if (info->ep_in)
dev->in_pipe = usb_rcvbulkpipe(udev,
info->ep_in);
if (info->ep_out)
dev->out_pipe = usb_sndbulkpipe(udev,
info->ep_out);
}
if (dev->in_pipe)
rtest = " bulk-in";
if (dev->out_pipe)
wtest = " bulk-out";
if (dev->in_iso_pipe)
irtest = " iso-in";
if (dev->out_iso_pipe)
iwtest = " iso-out";
}
usb_set_intfdata(intf, dev);
dev_info(&intf->dev, "%s\n", info->name);
dev_info(&intf->dev, "%s speed {control%s%s%s%s%s} tests%s\n",
({ char *tmp;
switch (udev->speed) {
case USB_SPEED_LOW:
tmp = "low";
break;
case USB_SPEED_FULL:
tmp = "full";
break;
case USB_SPEED_HIGH:
tmp = "high";
break;
case USB_SPEED_SUPER:
tmp = "super";
break;
default:
tmp = "unknown";
break;
}; tmp; }),
info->ctrl_out ? " in/out" : "",
rtest, wtest,
irtest, iwtest,
info->alt >= 0 ? " (+alt)" : "");
return 0;
}
static int usbtest_suspend(struct usb_interface *intf, pm_message_t message)
{
return 0;
}
static int usbtest_resume(struct usb_interface *intf)
{
return 0;
}
static void usbtest_disconnect(struct usb_interface *intf)
{
struct usbtest_dev *dev = usb_get_intfdata(intf);
usb_set_intfdata(intf, NULL);
dev_dbg(&intf->dev, "disconnect\n");
kfree(dev);
}
/* Basic testing only needs a device that can source or sink bulk traffic.
* Any device can test control transfers (default with GENERIC binding).
*
* Several entries work with the default EP0 implementation that's built
* into EZ-USB chips. There's a default vendor ID which can be overridden
* by (very) small config EEPROMS, but otherwise all these devices act
* identically until firmware is loaded: only EP0 works. It turns out
* to be easy to make other endpoints work, without modifying that EP0
* behavior. For now, we expect that kind of firmware.
*/
/* an21xx or fx versions of ez-usb */
static struct usbtest_info ez1_info = {
.name = "EZ-USB device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
};
/* fx2 version of ez-usb */
static struct usbtest_info ez2_info = {
.name = "FX2 device",
.ep_in = 6,
.ep_out = 2,
.alt = 1,
};
/* ezusb family device with dedicated usb test firmware,
*/
static struct usbtest_info fw_info = {
.name = "usb test device",
.ep_in = 2,
.ep_out = 2,
.alt = 1,
.autoconf = 1, /* iso and ctrl_out need autoconf */
.ctrl_out = 1,
.iso = 1, /* iso_ep's are #8 in/out */
};
/* peripheral running Linux and 'zero.c' test firmware, or
* its user-mode cousin. different versions of this use
* different hardware with the same vendor/product codes.
* host side MUST rely on the endpoint descriptors.
*/
static struct usbtest_info gz_info = {
.name = "Linux gadget zero",
.autoconf = 1,
.ctrl_out = 1,
.alt = 0,
};
static struct usbtest_info um_info = {
.name = "Linux user mode test driver",
.autoconf = 1,
.alt = -1,
};
static struct usbtest_info um2_info = {
.name = "Linux user mode ISO test driver",
.autoconf = 1,
.iso = 1,
.alt = -1,
};
#ifdef IBOT2
/* this is a nice source of high speed bulk data;
* uses an FX2, with firmware provided in the device
*/
static struct usbtest_info ibot2_info = {
.name = "iBOT2 webcam",
.ep_in = 2,
.alt = -1,
};
#endif
#ifdef GENERIC
/* we can use any device to test control traffic */
static struct usbtest_info generic_info = {
.name = "Generic USB device",
.alt = -1,
};
#endif
static const struct usb_device_id id_table[] = {
/*-------------------------------------------------------------*/
/* EZ-USB devices which download firmware to replace (or in our
* case augment) the default device implementation.
*/
/* generic EZ-USB FX controller */
{ USB_DEVICE(0x0547, 0x2235),
.driver_info = (unsigned long) &ez1_info,
},
/* CY3671 development board with EZ-USB FX */
{ USB_DEVICE(0x0547, 0x0080),
.driver_info = (unsigned long) &ez1_info,
},
/* generic EZ-USB FX2 controller (or development board) */
{ USB_DEVICE(0x04b4, 0x8613),
.driver_info = (unsigned long) &ez2_info,
},
/* re-enumerated usb test device firmware */
{ USB_DEVICE(0xfff0, 0xfff0),
.driver_info = (unsigned long) &fw_info,
},
/* "Gadget Zero" firmware runs under Linux */
{ USB_DEVICE(0x0525, 0xa4a0),
.driver_info = (unsigned long) &gz_info,
},
/* so does a user-mode variant */
{ USB_DEVICE(0x0525, 0xa4a4),
.driver_info = (unsigned long) &um_info,
},
/* ... and a user-mode variant that talks iso */
{ USB_DEVICE(0x0525, 0xa4a3),
.driver_info = (unsigned long) &um2_info,
},
#ifdef KEYSPAN_19Qi
/* Keyspan 19qi uses an21xx (original EZ-USB) */
/* this does not coexist with the real Keyspan 19qi driver! */
{ USB_DEVICE(0x06cd, 0x010b),
.driver_info = (unsigned long) &ez1_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef IBOT2
/* iBOT2 makes a nice source of high speed bulk-in data */
/* this does not coexist with a real iBOT2 driver! */
{ USB_DEVICE(0x0b62, 0x0059),
.driver_info = (unsigned long) &ibot2_info,
},
#endif
/*-------------------------------------------------------------*/
#ifdef GENERIC
/* module params can specify devices to use for control tests */
{ .driver_info = (unsigned long) &generic_info, },
#endif
/*-------------------------------------------------------------*/
{ }
};
MODULE_DEVICE_TABLE(usb, id_table);
static struct usb_driver usbtest_driver = {
.name = "usbtest",
.id_table = id_table,
.probe = usbtest_probe,
.unlocked_ioctl = usbtest_ioctl,
.disconnect = usbtest_disconnect,
.suspend = usbtest_suspend,
.resume = usbtest_resume,
};
/*-------------------------------------------------------------------------*/
static int __init usbtest_init(void)
{
#ifdef GENERIC
if (vendor)
pr_debug("params: vend=0x%04x prod=0x%04x\n", vendor, product);
#endif
return usb_register(&usbtest_driver);
}
module_init(usbtest_init);
static void __exit usbtest_exit(void)
{
usb_deregister(&usbtest_driver);
}
module_exit(usbtest_exit);
MODULE_DESCRIPTION("USB Core/HCD Testing Driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
aloksinha2001/Linux3188 | net/irda/irlan/irlan_provider.c | 2389 | 11183 | /*********************************************************************
*
* Filename: irlan_provider.c
* Version: 0.9
* Description: IrDA LAN Access Protocol Implementation
* Status: Experimental.
* Author: Dag Brattli <dagb@cs.uit.no>
* Created at: Sun Aug 31 20:14:37 1997
* Modified at: Sat Oct 30 12:52:10 1999
* Modified by: Dag Brattli <dagb@cs.uit.no>
* Sources: skeleton.c by Donald Becker <becker@CESDIS.gsfc.nasa.gov>
* slip.c by Laurence Culhane, <loz@holmes.demon.co.uk>
* Fred N. van Kempen, <waltje@uwalt.nl.mugnet.org>
*
* Copyright (c) 1998-1999 Dag Brattli <dagb@cs.uit.no>,
* All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* Neither Dag Brattli nor University of Tromsø admit liability nor
* provide warranty for any of this software. This material is
* provided "AS-IS" and at no charge.
*
********************************************************************/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/bitops.h>
#include <linux/slab.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <net/irda/irda.h>
#include <net/irda/irttp.h>
#include <net/irda/irlmp.h>
#include <net/irda/irias_object.h>
#include <net/irda/iriap.h>
#include <net/irda/timer.h>
#include <net/irda/irlan_common.h>
#include <net/irda/irlan_eth.h>
#include <net/irda/irlan_event.h>
#include <net/irda/irlan_provider.h>
#include <net/irda/irlan_filter.h>
#include <net/irda/irlan_client.h>
static void irlan_provider_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb);
/*
* Function irlan_provider_control_data_indication (handle, skb)
*
* This function gets the data that is received on the control channel
*
*/
static int irlan_provider_data_indication(void *instance, void *sap,
struct sk_buff *skb)
{
struct irlan_cb *self;
__u8 code;
IRDA_DEBUG(4, "%s()\n", __func__ );
self = (struct irlan_cb *) instance;
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
IRDA_ASSERT(skb != NULL, return -1;);
code = skb->data[0];
switch(code) {
case CMD_GET_PROVIDER_INFO:
IRDA_DEBUG(4, "Got GET_PROVIDER_INFO command!\n");
irlan_do_provider_event(self, IRLAN_GET_INFO_CMD, skb);
break;
case CMD_GET_MEDIA_CHAR:
IRDA_DEBUG(4, "Got GET_MEDIA_CHAR command!\n");
irlan_do_provider_event(self, IRLAN_GET_MEDIA_CMD, skb);
break;
case CMD_OPEN_DATA_CHANNEL:
IRDA_DEBUG(4, "Got OPEN_DATA_CHANNEL command!\n");
irlan_do_provider_event(self, IRLAN_OPEN_DATA_CMD, skb);
break;
case CMD_FILTER_OPERATION:
IRDA_DEBUG(4, "Got FILTER_OPERATION command!\n");
irlan_do_provider_event(self, IRLAN_FILTER_CONFIG_CMD, skb);
break;
case CMD_RECONNECT_DATA_CHAN:
IRDA_DEBUG(2, "%s(), Got RECONNECT_DATA_CHAN command\n", __func__ );
IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
break;
case CMD_CLOSE_DATA_CHAN:
IRDA_DEBUG(2, "Got CLOSE_DATA_CHAN command!\n");
IRDA_DEBUG(2, "%s(), NOT IMPLEMENTED\n", __func__ );
break;
default:
IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
break;
}
return 0;
}
/*
* Function irlan_provider_connect_indication (handle, skb, priv)
*
* Got connection from peer IrLAN client
*
*/
static void irlan_provider_connect_indication(void *instance, void *sap,
struct qos_info *qos,
__u32 max_sdu_size,
__u8 max_header_size,
struct sk_buff *skb)
{
struct irlan_cb *self;
struct tsap_cb *tsap;
IRDA_DEBUG(0, "%s()\n", __func__ );
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
IRDA_ASSERT(tsap == self->provider.tsap_ctrl,return;);
IRDA_ASSERT(self->provider.state == IRLAN_IDLE, return;);
self->provider.max_sdu_size = max_sdu_size;
self->provider.max_header_size = max_header_size;
irlan_do_provider_event(self, IRLAN_CONNECT_INDICATION, NULL);
/*
* If we are in peer mode, the client may not have got the discovery
* indication it needs to make progress. If the client is still in
* IDLE state, we must kick it.
*/
if ((self->provider.access_type == ACCESS_PEER) &&
(self->client.state == IRLAN_IDLE))
{
irlan_client_wakeup(self, self->saddr, self->daddr);
}
}
/*
* Function irlan_provider_connect_response (handle)
*
* Accept incoming connection
*
*/
void irlan_provider_connect_response(struct irlan_cb *self,
struct tsap_cb *tsap)
{
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
/* Just accept */
irttp_connect_response(tsap, IRLAN_MTU, NULL);
}
static void irlan_provider_disconnect_indication(void *instance, void *sap,
LM_REASON reason,
struct sk_buff *userdata)
{
struct irlan_cb *self;
struct tsap_cb *tsap;
IRDA_DEBUG(4, "%s(), reason=%d\n", __func__ , reason);
self = (struct irlan_cb *) instance;
tsap = (struct tsap_cb *) sap;
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
IRDA_ASSERT(tsap != NULL, return;);
IRDA_ASSERT(tsap->magic == TTP_TSAP_MAGIC, return;);
IRDA_ASSERT(tsap == self->provider.tsap_ctrl, return;);
irlan_do_provider_event(self, IRLAN_LMP_DISCONNECT, NULL);
}
/*
* Function irlan_parse_open_data_cmd (self, skb)
*
*
*
*/
int irlan_parse_open_data_cmd(struct irlan_cb *self, struct sk_buff *skb)
{
int ret;
ret = irlan_provider_parse_command(self, CMD_OPEN_DATA_CHANNEL, skb);
/* Open data channel */
irlan_open_data_tsap(self);
return ret;
}
/*
* Function parse_command (skb)
*
* Extract all parameters from received buffer, then feed them to
* check_params for parsing
*
*/
int irlan_provider_parse_command(struct irlan_cb *self, int cmd,
struct sk_buff *skb)
{
__u8 *frame;
__u8 *ptr;
int count;
__u16 val_len;
int i;
char *name;
char *value;
int ret = RSP_SUCCESS;
IRDA_ASSERT(skb != NULL, return -RSP_PROTOCOL_ERROR;);
IRDA_DEBUG(4, "%s(), skb->len=%d\n", __func__ , (int)skb->len);
IRDA_ASSERT(self != NULL, return -RSP_PROTOCOL_ERROR;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -RSP_PROTOCOL_ERROR;);
if (!skb)
return -RSP_PROTOCOL_ERROR;
frame = skb->data;
name = kmalloc(255, GFP_ATOMIC);
if (!name)
return -RSP_INSUFFICIENT_RESOURCES;
value = kmalloc(1016, GFP_ATOMIC);
if (!value) {
kfree(name);
return -RSP_INSUFFICIENT_RESOURCES;
}
/* How many parameters? */
count = frame[1];
IRDA_DEBUG(4, "Got %d parameters\n", count);
ptr = frame+2;
/* For all parameters */
for (i=0; i<count;i++) {
ret = irlan_extract_param(ptr, name, value, &val_len);
if (ret < 0) {
IRDA_DEBUG(2, "%s(), IrLAN, Error!\n", __func__ );
break;
}
ptr+=ret;
ret = RSP_SUCCESS;
irlan_check_command_param(self, name, value);
}
/* Cleanup */
kfree(name);
kfree(value);
return ret;
}
/*
* Function irlan_provider_send_reply (self, info)
*
* Send reply to query to peer IrLAN layer
*
*/
void irlan_provider_send_reply(struct irlan_cb *self, int command,
int ret_code)
{
struct sk_buff *skb;
IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return;);
skb = alloc_skb(IRLAN_MAX_HEADER + IRLAN_CMD_HEADER +
/* Bigger param length comes from CMD_GET_MEDIA_CHAR */
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "DIRECTED") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "BORADCAST") +
IRLAN_STRING_PARAMETER_LEN("FILTER_TYPE", "MULTICAST") +
IRLAN_STRING_PARAMETER_LEN("ACCESS_TYPE", "HOSTED"),
GFP_ATOMIC);
if (!skb)
return;
/* Reserve space for TTP, LMP, and LAP header */
skb_reserve(skb, self->provider.max_header_size);
skb_put(skb, 2);
switch (command) {
case CMD_GET_PROVIDER_INFO:
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x02; /* 2 parameters */
switch (self->media) {
case MEDIA_802_3:
irlan_insert_string_param(skb, "MEDIA", "802.3");
break;
case MEDIA_802_5:
irlan_insert_string_param(skb, "MEDIA", "802.5");
break;
default:
IRDA_DEBUG(2, "%s(), unknown media type!\n", __func__ );
break;
}
irlan_insert_short_param(skb, "IRLAN_VER", 0x0101);
break;
case CMD_GET_MEDIA_CHAR:
skb->data[0] = 0x00; /* Success */
skb->data[1] = 0x05; /* 5 parameters */
irlan_insert_string_param(skb, "FILTER_TYPE", "DIRECTED");
irlan_insert_string_param(skb, "FILTER_TYPE", "BROADCAST");
irlan_insert_string_param(skb, "FILTER_TYPE", "MULTICAST");
switch (self->provider.access_type) {
case ACCESS_DIRECT:
irlan_insert_string_param(skb, "ACCESS_TYPE", "DIRECT");
break;
case ACCESS_PEER:
irlan_insert_string_param(skb, "ACCESS_TYPE", "PEER");
break;
case ACCESS_HOSTED:
irlan_insert_string_param(skb, "ACCESS_TYPE", "HOSTED");
break;
default:
IRDA_DEBUG(2, "%s(), Unknown access type\n", __func__ );
break;
}
irlan_insert_short_param(skb, "MAX_FRAME", 0x05ee);
break;
case CMD_OPEN_DATA_CHANNEL:
skb->data[0] = 0x00; /* Success */
if (self->provider.send_arb_val) {
skb->data[1] = 0x03; /* 3 parameters */
irlan_insert_short_param(skb, "CON_ARB",
self->provider.send_arb_val);
} else
skb->data[1] = 0x02; /* 2 parameters */
irlan_insert_byte_param(skb, "DATA_CHAN", self->stsap_sel_data);
irlan_insert_string_param(skb, "RECONNECT_KEY", "LINUX RULES!");
break;
case CMD_FILTER_OPERATION:
irlan_filter_request(self, skb);
break;
default:
IRDA_DEBUG(2, "%s(), Unknown command!\n", __func__ );
break;
}
irttp_data_request(self->provider.tsap_ctrl, skb);
}
/*
* Function irlan_provider_register(void)
*
* Register provider support so we can accept incoming connections.
*
*/
int irlan_provider_open_ctrl_tsap(struct irlan_cb *self)
{
struct tsap_cb *tsap;
notify_t notify;
IRDA_DEBUG(4, "%s()\n", __func__ );
IRDA_ASSERT(self != NULL, return -1;);
IRDA_ASSERT(self->magic == IRLAN_MAGIC, return -1;);
/* Check if already open */
if (self->provider.tsap_ctrl)
return -1;
/*
* First register well known control TSAP
*/
irda_notify_init(¬ify);
notify.data_indication = irlan_provider_data_indication;
notify.connect_indication = irlan_provider_connect_indication;
notify.disconnect_indication = irlan_provider_disconnect_indication;
notify.instance = self;
strlcpy(notify.name, "IrLAN ctrl (p)", sizeof(notify.name));
tsap = irttp_open_tsap(LSAP_ANY, 1, ¬ify);
if (!tsap) {
IRDA_DEBUG(2, "%s(), Got no tsap!\n", __func__ );
return -1;
}
self->provider.tsap_ctrl = tsap;
/* Register with LM-IAS */
irlan_ias_register(self, tsap->stsap_sel);
return 0;
}
| gpl-2.0 |
pjh/linux-stable | fs/compat_binfmt_elf.c | 2389 | 3807 | /*
* 32-bit compatibility support for ELF format executables and core dumps.
*
* Copyright (C) 2007 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
* of the GNU General Public License v.2.
*
* Red Hat Author: Roland McGrath.
*
* This file is used in a 64-bit kernel that wants to support 32-bit ELF.
* asm/elf.h is responsible for defining the compat_* and COMPAT_* macros
* used below, with definitions appropriate for 32-bit ABI compatibility.
*
* We use macros to rename the ABI types and machine-dependent
* functions used in binfmt_elf.c to compat versions.
*/
#include <linux/elfcore-compat.h>
#include <linux/time.h>
/*
* Rename the basic ELF layout types to refer to the 32-bit class of files.
*/
#undef ELF_CLASS
#define ELF_CLASS ELFCLASS32
#undef elfhdr
#undef elf_phdr
#undef elf_shdr
#undef elf_note
#undef elf_addr_t
#define elfhdr elf32_hdr
#define elf_phdr elf32_phdr
#define elf_shdr elf32_shdr
#define elf_note elf32_note
#define elf_addr_t Elf32_Addr
/*
* Some data types as stored in coredump.
*/
#define user_long_t compat_long_t
#define user_siginfo_t compat_siginfo_t
#define copy_siginfo_to_user copy_siginfo_to_user32
/*
* The machine-dependent core note format types are defined in elfcore-compat.h,
* which requires asm/elf.h to define compat_elf_gregset_t et al.
*/
#define elf_prstatus compat_elf_prstatus
#define elf_prpsinfo compat_elf_prpsinfo
/*
* Compat version of cputime_to_compat_timeval, perhaps this
* should be an inline in <linux/compat.h>.
*/
static void cputime_to_compat_timeval(const cputime_t cputime,
struct compat_timeval *value)
{
struct timeval tv;
cputime_to_timeval(cputime, &tv);
value->tv_sec = tv.tv_sec;
value->tv_usec = tv.tv_usec;
}
#undef cputime_to_timeval
#define cputime_to_timeval cputime_to_compat_timeval
/*
* To use this file, asm/elf.h must define compat_elf_check_arch.
* The other following macros can be defined if the compat versions
* differ from the native ones, or omitted when they match.
*/
#undef ELF_ARCH
#undef elf_check_arch
#define elf_check_arch compat_elf_check_arch
#ifdef COMPAT_ELF_PLATFORM
#undef ELF_PLATFORM
#define ELF_PLATFORM COMPAT_ELF_PLATFORM
#endif
#ifdef COMPAT_ELF_HWCAP
#undef ELF_HWCAP
#define ELF_HWCAP COMPAT_ELF_HWCAP
#endif
#ifdef COMPAT_ELF_HWCAP2
#undef ELF_HWCAP2
#define ELF_HWCAP2 COMPAT_ELF_HWCAP2
#endif
#ifdef COMPAT_ARCH_DLINFO
#undef ARCH_DLINFO
#define ARCH_DLINFO COMPAT_ARCH_DLINFO
#endif
#ifdef COMPAT_ELF_ET_DYN_BASE
#undef ELF_ET_DYN_BASE
#define ELF_ET_DYN_BASE COMPAT_ELF_ET_DYN_BASE
#endif
#ifdef COMPAT_ELF_EXEC_PAGESIZE
#undef ELF_EXEC_PAGESIZE
#define ELF_EXEC_PAGESIZE COMPAT_ELF_EXEC_PAGESIZE
#endif
#ifdef COMPAT_ELF_PLAT_INIT
#undef ELF_PLAT_INIT
#define ELF_PLAT_INIT COMPAT_ELF_PLAT_INIT
#endif
#ifdef COMPAT_SET_PERSONALITY
#undef SET_PERSONALITY
#define SET_PERSONALITY COMPAT_SET_PERSONALITY
#endif
#ifdef compat_start_thread
#undef start_thread
#define start_thread compat_start_thread
#endif
#ifdef compat_arch_setup_additional_pages
#undef ARCH_HAS_SETUP_ADDITIONAL_PAGES
#define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
#undef arch_setup_additional_pages
#define arch_setup_additional_pages compat_arch_setup_additional_pages
#endif
/*
* Rename a few of the symbols that binfmt_elf.c will define.
* These are all local so the names don't really matter, but it
* might make some debugging less confusing not to duplicate them.
*/
#define elf_format compat_elf_format
#define init_elf_binfmt init_compat_elf_binfmt
#define exit_elf_binfmt exit_compat_elf_binfmt
/*
* We share all the actual code with the native (64-bit) version.
*/
#include "binfmt_elf.c"
| gpl-2.0 |
Docker-J/Sail_STOCK | arch/x86/kernel/mca_32.c | 4693 | 12764 | /*
* Written by Martin Kolinek, February 1996
*
* Changes:
*
* Chris Beauregard July 28th, 1996
* - Fixed up integrated SCSI detection
*
* Chris Beauregard August 3rd, 1996
* - Made mca_info local
* - Made integrated registers accessible through standard function calls
* - Added name field
* - More sanity checking
*
* Chris Beauregard August 9th, 1996
* - Rewrote /proc/mca
*
* Chris Beauregard January 7th, 1997
* - Added basic NMI-processing
* - Added more information to mca_info structure
*
* David Weinehall October 12th, 1998
* - Made a lot of cleaning up in the source
* - Added use of save_flags / restore_flags
* - Added the 'driver_loaded' flag in MCA_adapter
* - Added an alternative implemention of ZP Gu's mca_find_unused_adapter
*
* David Weinehall March 24th, 1999
* - Fixed the output of 'Driver Installed' in /proc/mca/pos
* - Made the Integrated Video & SCSI show up even if they have id 0000
*
* Alexander Viro November 9th, 1999
* - Switched to regular procfs methods
*
* Alfred Arnold & David Weinehall August 23rd, 2000
* - Added support for Planar POS-registers
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/mca.h>
#include <linux/kprobes.h>
#include <linux/slab.h>
#include <asm/io.h>
#include <linux/proc_fs.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/ioport.h>
#include <asm/uaccess.h>
#include <linux/init.h>
static unsigned char which_scsi;
int MCA_bus;
EXPORT_SYMBOL(MCA_bus);
/*
* Motherboard register spinlock. Untested on SMP at the moment, but
* are there any MCA SMP boxes?
*
* Yes - Alan
*/
static DEFINE_SPINLOCK(mca_lock);
/* Build the status info for the adapter */
static void mca_configure_adapter_status(struct mca_device *mca_dev)
{
mca_dev->status = MCA_ADAPTER_NONE;
mca_dev->pos_id = mca_dev->pos[0]
+ (mca_dev->pos[1] << 8);
if (!mca_dev->pos_id && mca_dev->slot < MCA_MAX_SLOT_NR) {
/*
* id = 0x0000 usually indicates hardware failure,
* however, ZP Gu (zpg@castle.net> reports that his 9556
* has 0x0000 as id and everything still works. There
* also seem to be an adapter with id = 0x0000; the
* NCR Parallel Bus Memory Card. Until this is confirmed,
* however, this code will stay.
*/
mca_dev->status = MCA_ADAPTER_ERROR;
return;
} else if (mca_dev->pos_id != 0xffff) {
/*
* 0xffff usually indicates that there's no adapter,
* however, some integrated adapters may have 0xffff as
* their id and still be valid. Examples are on-board
* VGA of the 55sx, the integrated SCSI of the 56 & 57,
* and possibly also the 95 ULTIMEDIA.
*/
mca_dev->status = MCA_ADAPTER_NORMAL;
}
if ((mca_dev->pos_id == 0xffff ||
mca_dev->pos_id == 0x0000) && mca_dev->slot >= MCA_MAX_SLOT_NR) {
int j;
for (j = 2; j < 8; j++) {
if (mca_dev->pos[j] != 0xff) {
mca_dev->status = MCA_ADAPTER_NORMAL;
break;
}
}
}
if (!(mca_dev->pos[2] & MCA_ENABLED)) {
/* enabled bit is in POS 2 */
mca_dev->status = MCA_ADAPTER_DISABLED;
}
} /* mca_configure_adapter_status */
/*--------------------------------------------------------------------*/
static struct resource mca_standard_resources[] = {
{ .start = 0x60, .end = 0x60, .name = "system control port B (MCA)" },
{ .start = 0x90, .end = 0x90, .name = "arbitration (MCA)" },
{ .start = 0x91, .end = 0x91, .name = "card Select Feedback (MCA)" },
{ .start = 0x92, .end = 0x92, .name = "system Control port A (MCA)" },
{ .start = 0x94, .end = 0x94, .name = "system board setup (MCA)" },
{ .start = 0x96, .end = 0x97, .name = "POS (MCA)" },
{ .start = 0x100, .end = 0x107, .name = "POS (MCA)" }
};
#define MCA_STANDARD_RESOURCES ARRAY_SIZE(mca_standard_resources)
/*
* mca_read_and_store_pos - read the POS registers into a memory buffer
* @pos: a char pointer to 8 bytes, contains the POS register value on
* successful return
*
* Returns 1 if a card actually exists (i.e. the pos isn't
* all 0xff) or 0 otherwise
*/
static int mca_read_and_store_pos(unsigned char *pos)
{
int j;
int found = 0;
for (j = 0; j < 8; j++) {
pos[j] = inb_p(MCA_POS_REG(j));
if (pos[j] != 0xff) {
/* 0xff all across means no device. 0x00 means
* something's broken, but a device is
* probably there. However, if you get 0x00
* from a motherboard register it won't matter
* what we find. For the record, on the
* 57SLC, the integrated SCSI adapter has
* 0xffff for the adapter ID, but nonzero for
* other registers. */
found = 1;
}
}
return found;
}
static unsigned char mca_pc_read_pos(struct mca_device *mca_dev, int reg)
{
unsigned char byte;
unsigned long flags;
if (reg < 0 || reg >= 8)
return 0;
spin_lock_irqsave(&mca_lock, flags);
if (mca_dev->pos_register) {
/* Disable adapter setup, enable motherboard setup */
outb_p(0, MCA_ADAPTER_SETUP_REG);
outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG);
byte = inb_p(MCA_POS_REG(reg));
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
} else {
/* Make sure motherboard setup is off */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
/* Read the appropriate register */
outb_p(0x8|(mca_dev->slot & 0xf), MCA_ADAPTER_SETUP_REG);
byte = inb_p(MCA_POS_REG(reg));
outb_p(0, MCA_ADAPTER_SETUP_REG);
}
spin_unlock_irqrestore(&mca_lock, flags);
mca_dev->pos[reg] = byte;
return byte;
}
static void mca_pc_write_pos(struct mca_device *mca_dev, int reg,
unsigned char byte)
{
unsigned long flags;
if (reg < 0 || reg >= 8)
return;
spin_lock_irqsave(&mca_lock, flags);
/* Make sure motherboard setup is off */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
/* Read in the appropriate register */
outb_p(0x8|(mca_dev->slot&0xf), MCA_ADAPTER_SETUP_REG);
outb_p(byte, MCA_POS_REG(reg));
outb_p(0, MCA_ADAPTER_SETUP_REG);
spin_unlock_irqrestore(&mca_lock, flags);
/* Update the global register list, while we have the byte */
mca_dev->pos[reg] = byte;
}
/* for the primary MCA bus, we have identity transforms */
static int mca_dummy_transform_irq(struct mca_device *mca_dev, int irq)
{
return irq;
}
static int mca_dummy_transform_ioport(struct mca_device *mca_dev, int port)
{
return port;
}
static void *mca_dummy_transform_memory(struct mca_device *mca_dev, void *mem)
{
return mem;
}
static int __init mca_init(void)
{
unsigned int i, j;
struct mca_device *mca_dev;
unsigned char pos[8];
short mca_builtin_scsi_ports[] = {0xf7, 0xfd, 0x00};
struct mca_bus *bus;
/*
* WARNING: Be careful when making changes here. Putting an adapter
* and the motherboard simultaneously into setup mode may result in
* damage to chips (according to The Indispensable PC Hardware Book
* by Hans-Peter Messmer). Also, we disable system interrupts (so
* that we are not disturbed in the middle of this).
*/
/* Make sure the MCA bus is present */
if (mca_system_init()) {
printk(KERN_ERR "MCA bus system initialisation failed\n");
return -ENODEV;
}
if (!MCA_bus)
return -ENODEV;
printk(KERN_INFO "Micro Channel bus detected.\n");
/* All MCA systems have at least a primary bus */
bus = mca_attach_bus(MCA_PRIMARY_BUS);
if (!bus)
goto out_nomem;
bus->default_dma_mask = 0xffffffffLL;
bus->f.mca_write_pos = mca_pc_write_pos;
bus->f.mca_read_pos = mca_pc_read_pos;
bus->f.mca_transform_irq = mca_dummy_transform_irq;
bus->f.mca_transform_ioport = mca_dummy_transform_ioport;
bus->f.mca_transform_memory = mca_dummy_transform_memory;
/* get the motherboard device */
mca_dev = kzalloc(sizeof(struct mca_device), GFP_KERNEL);
if (unlikely(!mca_dev))
goto out_nomem;
/*
* We do not expect many MCA interrupts during initialization,
* but let us be safe:
*/
spin_lock_irq(&mca_lock);
/* Make sure adapter setup is off */
outb_p(0, MCA_ADAPTER_SETUP_REG);
/* Read motherboard POS registers */
mca_dev->pos_register = 0x7f;
outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG);
mca_dev->name[0] = 0;
mca_read_and_store_pos(mca_dev->pos);
mca_configure_adapter_status(mca_dev);
/* fake POS and slot for a motherboard */
mca_dev->pos_id = MCA_MOTHERBOARD_POS;
mca_dev->slot = MCA_MOTHERBOARD;
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if (unlikely(!mca_dev))
goto out_unlock_nomem;
/* Put motherboard into video setup mode, read integrated video
* POS registers, and turn motherboard setup off.
*/
mca_dev->pos_register = 0xdf;
outb_p(mca_dev->pos_register, MCA_MOTHERBOARD_SETUP_REG);
mca_dev->name[0] = 0;
mca_read_and_store_pos(mca_dev->pos);
mca_configure_adapter_status(mca_dev);
/* fake POS and slot for the integrated video */
mca_dev->pos_id = MCA_INTEGVIDEO_POS;
mca_dev->slot = MCA_INTEGVIDEO;
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
/*
* Put motherboard into scsi setup mode, read integrated scsi
* POS registers, and turn motherboard setup off.
*
* It seems there are two possible SCSI registers. Martin says that
* for the 56,57, 0xf7 is the one, but fails on the 76.
* Alfredo (apena@vnet.ibm.com) says
* 0xfd works on his machine. We'll try both of them. I figure it's
* a good bet that only one could be valid at a time. This could
* screw up though if one is used for something else on the other
* machine.
*/
for (i = 0; (which_scsi = mca_builtin_scsi_ports[i]) != 0; i++) {
outb_p(which_scsi, MCA_MOTHERBOARD_SETUP_REG);
if (mca_read_and_store_pos(pos))
break;
}
if (which_scsi) {
/* found a scsi card */
mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if (unlikely(!mca_dev))
goto out_unlock_nomem;
for (j = 0; j < 8; j++)
mca_dev->pos[j] = pos[j];
mca_configure_adapter_status(mca_dev);
/* fake POS and slot for integrated SCSI controller */
mca_dev->pos_id = MCA_INTEGSCSI_POS;
mca_dev->slot = MCA_INTEGSCSI;
mca_dev->pos_register = which_scsi;
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
}
/* Turn off motherboard setup */
outb_p(0xff, MCA_MOTHERBOARD_SETUP_REG);
/*
* Now loop over MCA slots: put each adapter into setup mode, and
* read its POS registers. Then put adapter setup off.
*/
for (i = 0; i < MCA_MAX_SLOT_NR; i++) {
outb_p(0x8|(i&0xf), MCA_ADAPTER_SETUP_REG);
if (!mca_read_and_store_pos(pos))
continue;
mca_dev = kzalloc(sizeof(struct mca_device), GFP_ATOMIC);
if (unlikely(!mca_dev))
goto out_unlock_nomem;
for (j = 0; j < 8; j++)
mca_dev->pos[j] = pos[j];
mca_dev->driver_loaded = 0;
mca_dev->slot = i;
mca_dev->pos_register = 0;
mca_configure_adapter_status(mca_dev);
mca_register_device(MCA_PRIMARY_BUS, mca_dev);
}
outb_p(0, MCA_ADAPTER_SETUP_REG);
/* Enable interrupts and return memory start */
spin_unlock_irq(&mca_lock);
for (i = 0; i < MCA_STANDARD_RESOURCES; i++)
request_resource(&ioport_resource, mca_standard_resources + i);
mca_do_proc_init();
return 0;
out_unlock_nomem:
spin_unlock_irq(&mca_lock);
out_nomem:
printk(KERN_EMERG "Failed memory allocation in MCA setup!\n");
return -ENOMEM;
}
subsys_initcall(mca_init);
/*--------------------------------------------------------------------*/
static __kprobes void
mca_handle_nmi_device(struct mca_device *mca_dev, int check_flag)
{
int slot = mca_dev->slot;
if (slot == MCA_INTEGSCSI) {
printk(KERN_CRIT "NMI: caused by MCA integrated SCSI adapter (%s)\n",
mca_dev->name);
} else if (slot == MCA_INTEGVIDEO) {
printk(KERN_CRIT "NMI: caused by MCA integrated video adapter (%s)\n",
mca_dev->name);
} else if (slot == MCA_MOTHERBOARD) {
printk(KERN_CRIT "NMI: caused by motherboard (%s)\n",
mca_dev->name);
}
/* More info available in POS 6 and 7? */
if (check_flag) {
unsigned char pos6, pos7;
pos6 = mca_device_read_pos(mca_dev, 6);
pos7 = mca_device_read_pos(mca_dev, 7);
printk(KERN_CRIT "NMI: POS 6 = 0x%x, POS 7 = 0x%x\n", pos6, pos7);
}
} /* mca_handle_nmi_slot */
/*--------------------------------------------------------------------*/
static int __kprobes mca_handle_nmi_callback(struct device *dev, void *data)
{
struct mca_device *mca_dev = to_mca_device(dev);
unsigned char pos5;
pos5 = mca_device_read_pos(mca_dev, 5);
if (!(pos5 & 0x80)) {
/*
* Bit 7 of POS 5 is reset when this adapter has a hardware
* error. Bit 7 it reset if there's error information
* available in POS 6 and 7.
*/
mca_handle_nmi_device(mca_dev, !(pos5 & 0x40));
return 1;
}
return 0;
}
void __kprobes mca_handle_nmi(void)
{
/*
* First try - scan the various adapters and see if a specific
* adapter was responsible for the error.
*/
bus_for_each_dev(&mca_bus_type, NULL, NULL, mca_handle_nmi_callback);
}
| gpl-2.0 |
soap-DEIM/l4android | arch/mips/dec/wbflush.c | 4693 | 2110 | /*
* Setup the right wbflush routine for the different DECstations.
*
* Created with information from:
* DECstation 3100 Desktop Workstation Functional Specification
* DECstation 5000/200 KN02 System Module Functional Specification
* mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-)
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1998 Harald Koerfgen
* Copyright (C) 2002 Maciej W. Rozycki
*/
#include <linux/init.h>
#include <asm/bootinfo.h>
#include <asm/wbflush.h>
#include <asm/barrier.h>
static void wbflush_kn01(void);
static void wbflush_kn210(void);
static void wbflush_mips(void);
void (*__wbflush) (void);
void __init wbflush_setup(void)
{
switch (mips_machtype) {
case MACH_DS23100:
case MACH_DS5000_200: /* DS5000 3max */
__wbflush = wbflush_kn01;
break;
case MACH_DS5100: /* DS5100 MIPSMATE */
__wbflush = wbflush_kn210;
break;
case MACH_DS5000_1XX: /* DS5000/100 3min */
case MACH_DS5000_XX: /* Personal DS5000/2x */
case MACH_DS5000_2X0: /* DS5000/240 3max+ */
case MACH_DS5900: /* DS5900 bigmax */
default:
__wbflush = wbflush_mips;
break;
}
}
/*
* For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions
* as part of Coprocessor 0.
*/
static void wbflush_kn01(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"1:\tbc0f\t1b\n\t"
"nop\n\t"
".set\tpop");
}
/*
* For the DS5100 the writeback buffer seems to be a part of Coprocessor 3.
* But CP3 has to enabled first.
*/
static void wbflush_kn210(void)
{
asm(".set\tpush\n\t"
".set\tnoreorder\n\t"
"mfc0\t$2,$12\n\t"
"lui\t$3,0x8000\n\t"
"or\t$3,$2,$3\n\t"
"mtc0\t$3,$12\n\t"
"nop\n"
"1:\tbc3f\t1b\n\t"
"nop\n\t"
"mtc0\t$2,$12\n\t"
"nop\n\t"
".set\tpop"
: : : "$2", "$3");
}
/*
* I/O ASIC systems use a standard writeback buffer that gets flushed
* upon an uncached read.
*/
static void wbflush_mips(void)
{
__fast_iob();
}
#include <linux/module.h>
EXPORT_SYMBOL(__wbflush);
| gpl-2.0 |
forumi0721/android_kernel_lge_batman_lgu_kr | drivers/staging/omapdrm/omap_encoder.c | 4949 | 4581 | /*
* drivers/staging/omapdrm/omap_encoder.c
*
* Copyright (C) 2011 Texas Instruments
* Author: Rob Clark <rob@ti.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "omap_drv.h"
#include "drm_crtc.h"
#include "drm_crtc_helper.h"
/*
* encoder funcs
*/
#define to_omap_encoder(x) container_of(x, struct omap_encoder, base)
struct omap_encoder {
struct drm_encoder base;
struct omap_overlay_manager *mgr;
};
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s", omap_encoder->mgr->name);
drm_encoder_cleanup(encoder);
kfree(omap_encoder);
}
static void omap_encoder_dpms(struct drm_encoder *encoder, int mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s: %d", omap_encoder->mgr->name, mode);
}
static bool omap_encoder_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
DBG("%s", omap_encoder->mgr->name);
return true;
}
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_device *dev = encoder->dev;
struct omap_drm_private *priv = dev->dev_private;
int i;
mode = adjusted_mode;
DBG("%s: set mode: %dx%d", omap_encoder->mgr->name,
mode->hdisplay, mode->vdisplay);
for (i = 0; i < priv->num_connectors; i++) {
struct drm_connector *connector = priv->connectors[i];
if (connector->encoder == encoder) {
omap_connector_mode_set(connector, mode);
}
}
}
static void omap_encoder_prepare(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
DBG("%s", omap_encoder->mgr->name);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
}
static void omap_encoder_commit(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
struct drm_encoder_helper_funcs *encoder_funcs =
encoder->helper_private;
DBG("%s", omap_encoder->mgr->name);
omap_encoder->mgr->apply(omap_encoder->mgr);
encoder_funcs->dpms(encoder, DRM_MODE_DPMS_ON);
}
static const struct drm_encoder_funcs omap_encoder_funcs = {
.destroy = omap_encoder_destroy,
};
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
.dpms = omap_encoder_dpms,
.mode_fixup = omap_encoder_mode_fixup,
.mode_set = omap_encoder_mode_set,
.prepare = omap_encoder_prepare,
.commit = omap_encoder_commit,
};
struct omap_overlay_manager *omap_encoder_get_manager(
struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
return omap_encoder->mgr;
}
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
struct omap_overlay_manager *mgr)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
struct omap_overlay_manager_info info;
int ret;
DBG("%s", mgr->name);
omap_encoder = kzalloc(sizeof(*omap_encoder), GFP_KERNEL);
if (!omap_encoder) {
dev_err(dev->dev, "could not allocate encoder\n");
goto fail;
}
omap_encoder->mgr = mgr;
encoder = &omap_encoder->base;
drm_encoder_init(dev, encoder, &omap_encoder_funcs,
DRM_MODE_ENCODER_TMDS);
drm_encoder_helper_add(encoder, &omap_encoder_helper_funcs);
mgr->get_manager_info(mgr, &info);
/* TODO: fix hard-coded setup.. */
info.default_color = 0x00000000;
info.trans_key = 0x00000000;
info.trans_key_type = OMAP_DSS_COLOR_KEY_GFX_DST;
info.trans_enabled = false;
ret = mgr->set_manager_info(mgr, &info);
if (ret) {
dev_err(dev->dev, "could not set manager info\n");
goto fail;
}
ret = mgr->apply(mgr);
if (ret) {
dev_err(dev->dev, "could not apply\n");
goto fail;
}
return encoder;
fail:
if (encoder) {
omap_encoder_destroy(encoder);
}
return NULL;
}
| gpl-2.0 |
ihadzic/linux-vcrtcm | drivers/isdn/hardware/eicon/s_4bri.c | 5205 | 15753 |
/*
*
Copyright (c) Eicon Networks, 2002.
*
This source file is supplied for the use with
Eicon Networks range of DIVA Server Adapters.
*
Eicon File Revision : 2.1
*
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
*
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY OF ANY KIND WHATSOEVER INCLUDING ANY
implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
See the GNU General Public License for more details.
*
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include "platform.h"
#include "di_defs.h"
#include "pc.h"
#include "pr_pc.h"
#include "di.h"
#include "mi_pc.h"
#include "pc_maint.h"
#include "divasync.h"
#include "pc_init.h"
#include "io.h"
#include "helpers.h"
#include "dsrv4bri.h"
#include "dsp_defs.h"
#include "sdp_hdr.h"
/*****************************************************************************/
#define MAX_XLOG_SIZE (64 * 1024)
/* --------------------------------------------------------------------------
Recovery XLOG from QBRI Card
-------------------------------------------------------------------------- */
static void qBri_cpu_trapped (PISDN_ADAPTER IoAdapter) {
byte __iomem *base ;
word *Xlog ;
dword regs[4], TrapID, offset, size ;
Xdesc xlogDesc ;
int factor = (IoAdapter->tasks == 1) ? 1 : 2;
/*
* check for trapped MIPS 46xx CPU, dump exception frame
*/
base = DIVA_OS_MEM_ATTACH_CONTROL(IoAdapter);
offset = IoAdapter->ControllerNumber * (IoAdapter->MemorySize >> factor) ;
TrapID = READ_DWORD(&base[0x80]) ;
if ( (TrapID == 0x99999999) || (TrapID == 0x99999901) )
{
dump_trap_frame (IoAdapter, &base[0x90]) ;
IoAdapter->trapped = 1 ;
}
regs[0] = READ_DWORD((base + offset) + 0x70);
regs[1] = READ_DWORD((base + offset) + 0x74);
regs[2] = READ_DWORD((base + offset) + 0x78);
regs[3] = READ_DWORD((base + offset) + 0x7c);
regs[0] &= IoAdapter->MemorySize - 1 ;
if ( (regs[0] >= offset)
&& (regs[0] < offset + (IoAdapter->MemorySize >> factor) - 1) )
{
if ( !(Xlog = (word *)diva_os_malloc (0, MAX_XLOG_SIZE)) ) {
DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
return ;
}
size = offset + (IoAdapter->MemorySize >> factor) - regs[0] ;
if ( size > MAX_XLOG_SIZE )
size = MAX_XLOG_SIZE ;
memcpy_fromio (Xlog, &base[regs[0]], size) ;
xlogDesc.buf = Xlog ;
xlogDesc.cnt = READ_WORD(&base[regs[1] & (IoAdapter->MemorySize - 1)]) ;
xlogDesc.out = READ_WORD(&base[regs[2] & (IoAdapter->MemorySize - 1)]) ;
dump_xlog_buffer (IoAdapter, &xlogDesc) ;
diva_os_free (0, Xlog) ;
IoAdapter->trapped = 2 ;
}
DIVA_OS_MEM_DETACH_CONTROL(IoAdapter, base);
}
/* --------------------------------------------------------------------------
Reset QBRI Hardware
-------------------------------------------------------------------------- */
static void reset_qBri_hardware (PISDN_ADAPTER IoAdapter) {
word volatile __iomem *qBriReset ;
byte volatile __iomem *qBriCntrl ;
byte volatile __iomem *p ;
qBriReset = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_SOFT_RESET) ;
diva_os_wait (1) ;
WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_SOFT_RESET) ;
diva_os_wait (1);
WRITE_WORD(qBriReset, READ_WORD(qBriReset) | PLX9054_RELOAD_EEPROM) ;
diva_os_wait (1) ;
WRITE_WORD(qBriReset, READ_WORD(qBriReset) & ~PLX9054_RELOAD_EEPROM) ;
diva_os_wait (1);
DIVA_OS_MEM_DETACH_PROM(IoAdapter, qBriReset);
qBriCntrl = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
p = &qBriCntrl[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
WRITE_DWORD(p, 0) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, qBriCntrl);
DBG_TRC(("resetted board @ reset addr 0x%08lx", qBriReset))
DBG_TRC(("resetted board @ cntrl addr 0x%08lx", p))
}
/* --------------------------------------------------------------------------
Start Card CPU
-------------------------------------------------------------------------- */
void start_qBri_hardware (PISDN_ADAPTER IoAdapter) {
byte volatile __iomem *qBriReset ;
byte volatile __iomem *p ;
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriReset = &p[(DIVA_4BRI_REVISION(IoAdapter)) ? (MQ2_BREG_RISC) : (MQ_BREG_RISC)];
WRITE_DWORD(qBriReset, MQ_RISC_COLD_RESET_MASK) ;
diva_os_wait (2) ;
WRITE_DWORD(qBriReset, MQ_RISC_WARM_RESET_MASK | MQ_RISC_COLD_RESET_MASK) ;
diva_os_wait (10) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
DBG_TRC(("started processor @ addr 0x%08lx", qBriReset))
}
/* --------------------------------------------------------------------------
Stop Card CPU
-------------------------------------------------------------------------- */
static void stop_qBri_hardware (PISDN_ADAPTER IoAdapter) {
byte volatile __iomem *p ;
dword volatile __iomem *qBriReset ;
dword volatile __iomem *qBriIrq ;
dword volatile __iomem *qBriIsacDspReset ;
int rev2 = DIVA_4BRI_REVISION(IoAdapter);
int reset_offset = rev2 ? (MQ2_BREG_RISC) : (MQ_BREG_RISC);
int irq_offset = rev2 ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST);
int hw_offset = rev2 ? (MQ2_ISAC_DSP_RESET) : (MQ_ISAC_DSP_RESET);
if ( IoAdapter->ControllerNumber > 0 )
return ;
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriReset = (dword volatile __iomem *)&p[reset_offset];
qBriIsacDspReset = (dword volatile __iomem *)&p[hw_offset];
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
WRITE_DWORD(qBriReset, 0) ;
WRITE_DWORD(qBriIsacDspReset, 0) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)&p[irq_offset];
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
DBG_TRC(("stopped processor @ addr 0x%08lx", qBriReset))
}
/* --------------------------------------------------------------------------
FPGA download
-------------------------------------------------------------------------- */
#define FPGA_NAME_OFFSET 0x10
static byte * qBri_check_FPGAsrc (PISDN_ADAPTER IoAdapter, char *FileName,
dword *Length, dword *code) {
byte *File ;
char *fpgaFile, *fpgaType, *fpgaDate, *fpgaTime ;
dword fpgaFlen, fpgaTlen, fpgaDlen, cnt, year, i ;
if (!(File = (byte *)xdiLoadFile (FileName, Length, 0))) {
return (NULL) ;
}
/*
* scan file until FF and put id string into buffer
*/
for ( i = 0 ; File[i] != 0xff ; )
{
if ( ++i >= *Length )
{
DBG_FTL(("FPGA download: start of data header not found"))
xdiFreeFile (File) ;
return (NULL) ;
}
}
*code = i++ ;
if ( (File[i] & 0xF0) != 0x20 )
{
DBG_FTL(("FPGA download: data header corrupted"))
xdiFreeFile (File) ;
return (NULL) ;
}
fpgaFlen = (dword) File[FPGA_NAME_OFFSET - 1] ;
if ( fpgaFlen == 0 )
fpgaFlen = 12 ;
fpgaFile = (char *)&File[FPGA_NAME_OFFSET] ;
fpgaTlen = (dword) fpgaFile[fpgaFlen + 2] ;
if ( fpgaTlen == 0 )
fpgaTlen = 10 ;
fpgaType = (char *)&fpgaFile[fpgaFlen + 3] ;
fpgaDlen = (dword) fpgaType[fpgaTlen + 2] ;
if ( fpgaDlen == 0 )
fpgaDlen = 11 ;
fpgaDate = (char *)&fpgaType[fpgaTlen + 3] ;
fpgaTime = (char *)&fpgaDate[fpgaDlen + 3] ;
cnt = (dword)(((File[ i ] & 0x0F) << 20) + (File[i + 1] << 12)
+ (File[i + 2] << 4) + (File[i + 3] >> 4)) ;
if ( (dword)(i + (cnt / 8)) > *Length )
{
DBG_FTL(("FPGA download: '%s' file too small (%ld < %ld)",
FileName, *Length, code + ((cnt + 7) / 8) ))
xdiFreeFile (File) ;
return (NULL) ;
}
i = 0 ;
do
{
while ( (fpgaDate[i] != '\0')
&& ((fpgaDate[i] < '0') || (fpgaDate[i] > '9')) )
{
i++;
}
year = 0 ;
while ( (fpgaDate[i] >= '0') && (fpgaDate[i] <= '9') )
year = year * 10 + (fpgaDate[i++] - '0') ;
} while ( (year < 2000) && (fpgaDate[i] != '\0') );
switch (IoAdapter->cardType) {
case CARDTYPE_DIVASRV_B_2F_PCI:
break;
default:
if ( year >= 2001 ) {
IoAdapter->fpga_features |= PCINIT_FPGA_PLX_ACCESS_SUPPORTED ;
}
}
DBG_LOG(("FPGA[%s] file %s (%s %s) len %d",
fpgaType, fpgaFile, fpgaDate, fpgaTime, cnt))
return (File) ;
}
/******************************************************************************/
#define FPGA_PROG 0x0001 /* PROG enable low */
#define FPGA_BUSY 0x0002 /* BUSY high, DONE low */
#define FPGA_CS 0x000C /* Enable I/O pins */
#define FPGA_CCLK 0x0100
#define FPGA_DOUT 0x0400
#define FPGA_DIN FPGA_DOUT /* bidirectional I/O */
int qBri_FPGA_download (PISDN_ADAPTER IoAdapter) {
int bit ;
byte *File ;
dword code, FileLength ;
word volatile __iomem *addr = (word volatile __iomem *)DIVA_OS_MEM_ATTACH_PROM(IoAdapter);
word val, baseval = FPGA_CS | FPGA_PROG ;
if (DIVA_4BRI_REVISION(IoAdapter))
{
char* name;
switch (IoAdapter->cardType) {
case CARDTYPE_DIVASRV_B_2F_PCI:
name = "dsbri2f.bit";
break;
case CARDTYPE_DIVASRV_B_2M_V2_PCI:
case CARDTYPE_DIVASRV_VOICE_B_2M_V2_PCI:
name = "dsbri2m.bit";
break;
default:
name = "ds4bri2.bit";
}
File = qBri_check_FPGAsrc (IoAdapter, name,
&FileLength, &code);
}
else
{
File = qBri_check_FPGAsrc (IoAdapter, "ds4bri.bit",
&FileLength, &code) ;
}
if ( !File ) {
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
return (0) ;
}
/*
* prepare download, pulse PROGRAM pin down.
*/
WRITE_WORD(addr, baseval & ~FPGA_PROG) ; /* PROGRAM low pulse */
WRITE_WORD(addr, baseval) ; /* release */
diva_os_wait (50) ; /* wait until FPGA finished internal memory clear */
/*
* check done pin, must be low
*/
if ( READ_WORD(addr) & FPGA_BUSY )
{
DBG_FTL(("FPGA download: acknowledge for FPGA memory clear missing"))
xdiFreeFile (File) ;
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
return (0) ;
}
/*
* put data onto the FPGA
*/
while ( code < FileLength )
{
val = ((word)File[code++]) << 3 ;
for ( bit = 8 ; bit-- > 0 ; val <<= 1 ) /* put byte onto FPGA */
{
baseval &= ~FPGA_DOUT ; /* clr data bit */
baseval |= (val & FPGA_DOUT) ; /* copy data bit */
WRITE_WORD(addr, baseval) ;
WRITE_WORD(addr, baseval | FPGA_CCLK) ; /* set CCLK hi */
WRITE_WORD(addr, baseval | FPGA_CCLK) ; /* set CCLK hi */
WRITE_WORD(addr, baseval) ; /* set CCLK lo */
}
}
xdiFreeFile (File) ;
diva_os_wait (100) ;
val = READ_WORD(addr) ;
DIVA_OS_MEM_DETACH_PROM(IoAdapter, addr);
if ( !(val & FPGA_BUSY) )
{
DBG_FTL(("FPGA download: chip remains in busy state (0x%04x)", val))
return (0) ;
}
return (1) ;
}
static int load_qBri_hardware (PISDN_ADAPTER IoAdapter) {
return (0);
}
/* --------------------------------------------------------------------------
Card ISR
-------------------------------------------------------------------------- */
static int qBri_ISR (struct _ISDN_ADAPTER* IoAdapter) {
dword volatile __iomem *qBriIrq ;
PADAPTER_LIST_ENTRY QuadroList = IoAdapter->QuadroList ;
word i ;
int serviced = 0 ;
byte __iomem *p;
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
if ( !(READ_BYTE(&p[PLX9054_INTCSR]) & 0x80) ) {
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
return (0) ;
}
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
for ( i = 0 ; i < IoAdapter->tasks; ++i )
{
IoAdapter = QuadroList->QuadroAdapter[i] ;
if ( IoAdapter && IoAdapter->Initialized
&& IoAdapter->tst_irq (&IoAdapter->a) )
{
IoAdapter->IrqCount++ ;
serviced = 1 ;
diva_os_schedule_soft_isr (&IoAdapter->isr_soft_isr);
}
}
return (serviced) ;
}
/* --------------------------------------------------------------------------
Does disable the interrupt on the card
-------------------------------------------------------------------------- */
static void disable_qBri_interrupt (PISDN_ADAPTER IoAdapter) {
dword volatile __iomem *qBriIrq ;
byte __iomem *p;
if ( IoAdapter->ControllerNumber > 0 )
return ;
/*
* clear interrupt line (reset Local Interrupt Test Register)
*/
p = DIVA_OS_MEM_ATTACH_RESET(IoAdapter);
WRITE_BYTE(&p[PLX9054_INTCSR], 0x00); /* disable PCI interrupts */
DIVA_OS_MEM_DETACH_RESET(IoAdapter, p);
p = DIVA_OS_MEM_ATTACH_CTLREG(IoAdapter);
qBriIrq = (dword volatile __iomem *)(&p[DIVA_4BRI_REVISION(IoAdapter) ? (MQ2_BREG_IRQ_TEST) : (MQ_BREG_IRQ_TEST)]);
WRITE_DWORD(qBriIrq, MQ_IRQ_REQ_OFF) ;
DIVA_OS_MEM_DETACH_CTLREG(IoAdapter, p);
}
/* --------------------------------------------------------------------------
Install Adapter Entry Points
-------------------------------------------------------------------------- */
static void set_common_qBri_functions (PISDN_ADAPTER IoAdapter) {
ADAPTER *a;
a = &IoAdapter->a ;
a->ram_in = mem_in ;
a->ram_inw = mem_inw ;
a->ram_in_buffer = mem_in_buffer ;
a->ram_look_ahead = mem_look_ahead ;
a->ram_out = mem_out ;
a->ram_outw = mem_outw ;
a->ram_out_buffer = mem_out_buffer ;
a->ram_inc = mem_inc ;
IoAdapter->out = pr_out ;
IoAdapter->dpc = pr_dpc ;
IoAdapter->tst_irq = scom_test_int ;
IoAdapter->clr_irq = scom_clear_int ;
IoAdapter->pcm = (struct pc_maint *)MIPS_MAINT_OFFS ;
IoAdapter->load = load_qBri_hardware ;
IoAdapter->disIrq = disable_qBri_interrupt ;
IoAdapter->rstFnc = reset_qBri_hardware ;
IoAdapter->stop = stop_qBri_hardware ;
IoAdapter->trapFnc = qBri_cpu_trapped ;
IoAdapter->diva_isr_handler = qBri_ISR;
IoAdapter->a.io = (void*)IoAdapter ;
}
static void set_qBri_functions (PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
IoAdapter->MemorySize = MQ_MEMORY_SIZE ;
set_common_qBri_functions (IoAdapter) ;
diva_os_set_qBri_functions (IoAdapter) ;
}
static void set_qBri2_functions (PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
IoAdapter->MemorySize = (IoAdapter->tasks == 1) ? BRI2_MEMORY_SIZE : MQ2_MEMORY_SIZE;
set_common_qBri_functions (IoAdapter) ;
diva_os_set_qBri2_functions (IoAdapter) ;
}
/******************************************************************************/
void prepare_qBri_functions (PISDN_ADAPTER IoAdapter) {
set_qBri_functions (IoAdapter->QuadroList->QuadroAdapter[0]) ;
set_qBri_functions (IoAdapter->QuadroList->QuadroAdapter[1]) ;
set_qBri_functions (IoAdapter->QuadroList->QuadroAdapter[2]) ;
set_qBri_functions (IoAdapter->QuadroList->QuadroAdapter[3]) ;
}
void prepare_qBri2_functions (PISDN_ADAPTER IoAdapter) {
if (!IoAdapter->tasks) {
IoAdapter->tasks = MQ_INSTANCE_COUNT;
}
set_qBri2_functions (IoAdapter->QuadroList->QuadroAdapter[0]) ;
if (IoAdapter->tasks > 1) {
set_qBri2_functions (IoAdapter->QuadroList->QuadroAdapter[1]) ;
set_qBri2_functions (IoAdapter->QuadroList->QuadroAdapter[2]) ;
set_qBri2_functions (IoAdapter->QuadroList->QuadroAdapter[3]) ;
}
}
/* -------------------------------------------------------------------------- */
| gpl-2.0 |
davros-/DEMENTED_kernel_jf | sound/core/seq/seq_device.c | 5205 | 13793 | /*
* ALSA sequencer device management
* Copyright (c) 1999 by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*
*----------------------------------------------------------------
*
* This device handler separates the card driver module from sequencer
* stuff (sequencer core, synth drivers, etc), so that user can avoid
* to spend unnecessary resources e.g. if he needs only listening to
* MP3s.
*
* The card (or lowlevel) driver creates a sequencer device entry
* via snd_seq_device_new(). This is an entry pointer to communicate
* with the sequencer device "driver", which is involved with the
* actual part to communicate with the sequencer core.
* Each sequencer device entry has an id string and the corresponding
* driver with the same id is loaded when required. For example,
* lowlevel codes to access emu8000 chip on sbawe card are included in
* emu8000-synth module. To activate this module, the hardware
* resources like i/o port are passed via snd_seq_device argument.
*
*/
#include <linux/init.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/info.h>
#include <sound/seq_device.h>
#include <sound/seq_kernel.h>
#include <sound/initval.h>
#include <linux/kmod.h>
#include <linux/slab.h>
#include <linux/mutex.h>
MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>");
MODULE_DESCRIPTION("ALSA sequencer device management");
MODULE_LICENSE("GPL");
/* driver state */
#define DRIVER_EMPTY 0
#define DRIVER_LOADED (1<<0)
#define DRIVER_REQUESTED (1<<1)
#define DRIVER_LOCKED (1<<2)
struct ops_list {
char id[ID_LEN]; /* driver id */
int driver; /* driver state */
int used; /* reference counter */
int argsize; /* argument size */
/* operators */
struct snd_seq_dev_ops ops;
/* registred devices */
struct list_head dev_list; /* list of devices */
int num_devices; /* number of associated devices */
int num_init_devices; /* number of initialized devices */
struct mutex reg_mutex;
struct list_head list; /* next driver */
};
static LIST_HEAD(opslist);
static int num_ops;
static DEFINE_MUTEX(ops_mutex);
#ifdef CONFIG_PROC_FS
static struct snd_info_entry *info_entry;
#endif
/*
* prototypes
*/
static int snd_seq_device_free(struct snd_seq_device *dev);
static int snd_seq_device_dev_free(struct snd_device *device);
static int snd_seq_device_dev_register(struct snd_device *device);
static int snd_seq_device_dev_disconnect(struct snd_device *device);
static int init_device(struct snd_seq_device *dev, struct ops_list *ops);
static int free_device(struct snd_seq_device *dev, struct ops_list *ops);
static struct ops_list *find_driver(char *id, int create_if_empty);
static struct ops_list *create_driver(char *id);
static void unlock_driver(struct ops_list *ops);
static void remove_drivers(void);
/*
* show all drivers and their status
*/
#ifdef CONFIG_PROC_FS
static void snd_seq_device_info(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
struct ops_list *ops;
mutex_lock(&ops_mutex);
list_for_each_entry(ops, &opslist, list) {
snd_iprintf(buffer, "snd-%s%s%s%s,%d\n",
ops->id,
ops->driver & DRIVER_LOADED ? ",loaded" : (ops->driver == DRIVER_EMPTY ? ",empty" : ""),
ops->driver & DRIVER_REQUESTED ? ",requested" : "",
ops->driver & DRIVER_LOCKED ? ",locked" : "",
ops->num_devices);
}
mutex_unlock(&ops_mutex);
}
#endif
/*
* load all registered drivers (called from seq_clientmgr.c)
*/
#ifdef CONFIG_MODULES
/* avoid auto-loading during module_init() */
static int snd_seq_in_init;
void snd_seq_autoload_lock(void)
{
snd_seq_in_init++;
}
void snd_seq_autoload_unlock(void)
{
snd_seq_in_init--;
}
#endif
void snd_seq_device_load_drivers(void)
{
#ifdef CONFIG_MODULES
struct ops_list *ops;
/* Calling request_module during module_init()
* may cause blocking.
*/
if (snd_seq_in_init)
return;
mutex_lock(&ops_mutex);
list_for_each_entry(ops, &opslist, list) {
if (! (ops->driver & DRIVER_LOADED) &&
! (ops->driver & DRIVER_REQUESTED)) {
ops->used++;
mutex_unlock(&ops_mutex);
ops->driver |= DRIVER_REQUESTED;
request_module("snd-%s", ops->id);
mutex_lock(&ops_mutex);
ops->used--;
}
}
mutex_unlock(&ops_mutex);
#endif
}
/*
* register a sequencer device
* card = card info (NULL allowed)
* device = device number (if any)
* id = id of driver
* result = return pointer (NULL allowed if unnecessary)
*/
int snd_seq_device_new(struct snd_card *card, int device, char *id, int argsize,
struct snd_seq_device **result)
{
struct snd_seq_device *dev;
struct ops_list *ops;
int err;
static struct snd_device_ops dops = {
.dev_free = snd_seq_device_dev_free,
.dev_register = snd_seq_device_dev_register,
.dev_disconnect = snd_seq_device_dev_disconnect,
};
if (result)
*result = NULL;
if (snd_BUG_ON(!id))
return -EINVAL;
ops = find_driver(id, 1);
if (ops == NULL)
return -ENOMEM;
dev = kzalloc(sizeof(*dev)*2 + argsize, GFP_KERNEL);
if (dev == NULL) {
unlock_driver(ops);
return -ENOMEM;
}
/* set up device info */
dev->card = card;
dev->device = device;
strlcpy(dev->id, id, sizeof(dev->id));
dev->argsize = argsize;
dev->status = SNDRV_SEQ_DEVICE_FREE;
/* add this device to the list */
mutex_lock(&ops->reg_mutex);
list_add_tail(&dev->list, &ops->dev_list);
ops->num_devices++;
mutex_unlock(&ops->reg_mutex);
unlock_driver(ops);
if ((err = snd_device_new(card, SNDRV_DEV_SEQUENCER, dev, &dops)) < 0) {
snd_seq_device_free(dev);
return err;
}
if (result)
*result = dev;
return 0;
}
/*
* free the existing device
*/
static int snd_seq_device_free(struct snd_seq_device *dev)
{
struct ops_list *ops;
if (snd_BUG_ON(!dev))
return -EINVAL;
ops = find_driver(dev->id, 0);
if (ops == NULL)
return -ENXIO;
/* remove the device from the list */
mutex_lock(&ops->reg_mutex);
list_del(&dev->list);
ops->num_devices--;
mutex_unlock(&ops->reg_mutex);
free_device(dev, ops);
if (dev->private_free)
dev->private_free(dev);
kfree(dev);
unlock_driver(ops);
return 0;
}
static int snd_seq_device_dev_free(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
return snd_seq_device_free(dev);
}
/*
* register the device
*/
static int snd_seq_device_dev_register(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
struct ops_list *ops;
ops = find_driver(dev->id, 0);
if (ops == NULL)
return -ENOENT;
/* initialize this device if the corresponding driver was
* already loaded
*/
if (ops->driver & DRIVER_LOADED)
init_device(dev, ops);
unlock_driver(ops);
return 0;
}
/*
* disconnect the device
*/
static int snd_seq_device_dev_disconnect(struct snd_device *device)
{
struct snd_seq_device *dev = device->device_data;
struct ops_list *ops;
ops = find_driver(dev->id, 0);
if (ops == NULL)
return -ENOENT;
free_device(dev, ops);
unlock_driver(ops);
return 0;
}
/*
* register device driver
* id = driver id
* entry = driver operators - duplicated to each instance
*/
int snd_seq_device_register_driver(char *id, struct snd_seq_dev_ops *entry,
int argsize)
{
struct ops_list *ops;
struct snd_seq_device *dev;
if (id == NULL || entry == NULL ||
entry->init_device == NULL || entry->free_device == NULL)
return -EINVAL;
snd_seq_autoload_lock();
ops = find_driver(id, 1);
if (ops == NULL) {
snd_seq_autoload_unlock();
return -ENOMEM;
}
if (ops->driver & DRIVER_LOADED) {
snd_printk(KERN_WARNING "driver_register: driver '%s' already exists\n", id);
unlock_driver(ops);
snd_seq_autoload_unlock();
return -EBUSY;
}
mutex_lock(&ops->reg_mutex);
/* copy driver operators */
ops->ops = *entry;
ops->driver |= DRIVER_LOADED;
ops->argsize = argsize;
/* initialize existing devices if necessary */
list_for_each_entry(dev, &ops->dev_list, list) {
init_device(dev, ops);
}
mutex_unlock(&ops->reg_mutex);
unlock_driver(ops);
snd_seq_autoload_unlock();
return 0;
}
/*
* create driver record
*/
static struct ops_list * create_driver(char *id)
{
struct ops_list *ops;
ops = kzalloc(sizeof(*ops), GFP_KERNEL);
if (ops == NULL)
return ops;
/* set up driver entry */
strlcpy(ops->id, id, sizeof(ops->id));
mutex_init(&ops->reg_mutex);
/*
* The ->reg_mutex locking rules are per-driver, so we create
* separate per-driver lock classes:
*/
lockdep_set_class(&ops->reg_mutex, (struct lock_class_key *)id);
ops->driver = DRIVER_EMPTY;
INIT_LIST_HEAD(&ops->dev_list);
/* lock this instance */
ops->used = 1;
/* register driver entry */
mutex_lock(&ops_mutex);
list_add_tail(&ops->list, &opslist);
num_ops++;
mutex_unlock(&ops_mutex);
return ops;
}
/*
* unregister the specified driver
*/
int snd_seq_device_unregister_driver(char *id)
{
struct ops_list *ops;
struct snd_seq_device *dev;
ops = find_driver(id, 0);
if (ops == NULL)
return -ENXIO;
if (! (ops->driver & DRIVER_LOADED) ||
(ops->driver & DRIVER_LOCKED)) {
snd_printk(KERN_ERR "driver_unregister: cannot unload driver '%s': status=%x\n",
id, ops->driver);
unlock_driver(ops);
return -EBUSY;
}
/* close and release all devices associated with this driver */
mutex_lock(&ops->reg_mutex);
ops->driver |= DRIVER_LOCKED; /* do not remove this driver recursively */
list_for_each_entry(dev, &ops->dev_list, list) {
free_device(dev, ops);
}
ops->driver = 0;
if (ops->num_init_devices > 0)
snd_printk(KERN_ERR "free_driver: init_devices > 0!! (%d)\n",
ops->num_init_devices);
mutex_unlock(&ops->reg_mutex);
unlock_driver(ops);
/* remove empty driver entries */
remove_drivers();
return 0;
}
/*
* remove empty driver entries
*/
static void remove_drivers(void)
{
struct list_head *head;
mutex_lock(&ops_mutex);
head = opslist.next;
while (head != &opslist) {
struct ops_list *ops = list_entry(head, struct ops_list, list);
if (! (ops->driver & DRIVER_LOADED) &&
ops->used == 0 && ops->num_devices == 0) {
head = head->next;
list_del(&ops->list);
kfree(ops);
num_ops--;
} else
head = head->next;
}
mutex_unlock(&ops_mutex);
}
/*
* initialize the device - call init_device operator
*/
static int init_device(struct snd_seq_device *dev, struct ops_list *ops)
{
if (! (ops->driver & DRIVER_LOADED))
return 0; /* driver is not loaded yet */
if (dev->status != SNDRV_SEQ_DEVICE_FREE)
return 0; /* already initialized */
if (ops->argsize != dev->argsize) {
snd_printk(KERN_ERR "incompatible device '%s' for plug-in '%s' (%d %d)\n",
dev->name, ops->id, ops->argsize, dev->argsize);
return -EINVAL;
}
if (ops->ops.init_device(dev) >= 0) {
dev->status = SNDRV_SEQ_DEVICE_REGISTERED;
ops->num_init_devices++;
} else {
snd_printk(KERN_ERR "init_device failed: %s: %s\n",
dev->name, dev->id);
}
return 0;
}
/*
* release the device - call free_device operator
*/
static int free_device(struct snd_seq_device *dev, struct ops_list *ops)
{
int result;
if (! (ops->driver & DRIVER_LOADED))
return 0; /* driver is not loaded yet */
if (dev->status != SNDRV_SEQ_DEVICE_REGISTERED)
return 0; /* not registered */
if (ops->argsize != dev->argsize) {
snd_printk(KERN_ERR "incompatible device '%s' for plug-in '%s' (%d %d)\n",
dev->name, ops->id, ops->argsize, dev->argsize);
return -EINVAL;
}
if ((result = ops->ops.free_device(dev)) >= 0 || result == -ENXIO) {
dev->status = SNDRV_SEQ_DEVICE_FREE;
dev->driver_data = NULL;
ops->num_init_devices--;
} else {
snd_printk(KERN_ERR "free_device failed: %s: %s\n",
dev->name, dev->id);
}
return 0;
}
/*
* find the matching driver with given id
*/
static struct ops_list * find_driver(char *id, int create_if_empty)
{
struct ops_list *ops;
mutex_lock(&ops_mutex);
list_for_each_entry(ops, &opslist, list) {
if (strcmp(ops->id, id) == 0) {
ops->used++;
mutex_unlock(&ops_mutex);
return ops;
}
}
mutex_unlock(&ops_mutex);
if (create_if_empty)
return create_driver(id);
return NULL;
}
static void unlock_driver(struct ops_list *ops)
{
mutex_lock(&ops_mutex);
ops->used--;
mutex_unlock(&ops_mutex);
}
/*
* module part
*/
static int __init alsa_seq_device_init(void)
{
#ifdef CONFIG_PROC_FS
info_entry = snd_info_create_module_entry(THIS_MODULE, "drivers",
snd_seq_root);
if (info_entry == NULL)
return -ENOMEM;
info_entry->content = SNDRV_INFO_CONTENT_TEXT;
info_entry->c.text.read = snd_seq_device_info;
if (snd_info_register(info_entry) < 0) {
snd_info_free_entry(info_entry);
return -ENOMEM;
}
#endif
return 0;
}
static void __exit alsa_seq_device_exit(void)
{
remove_drivers();
#ifdef CONFIG_PROC_FS
snd_info_free_entry(info_entry);
#endif
if (num_ops)
snd_printk(KERN_ERR "drivers not released (%d)\n", num_ops);
}
module_init(alsa_seq_device_init)
module_exit(alsa_seq_device_exit)
EXPORT_SYMBOL(snd_seq_device_load_drivers);
EXPORT_SYMBOL(snd_seq_device_new);
EXPORT_SYMBOL(snd_seq_device_register_driver);
EXPORT_SYMBOL(snd_seq_device_unregister_driver);
EXPORT_SYMBOL(snd_seq_autoload_lock);
EXPORT_SYMBOL(snd_seq_autoload_unlock);
| gpl-2.0 |
TEAM-Gummy/android_kernel_samsung_hlte | drivers/gpu/drm/nouveau/nv04_fifo.c | 8021 | 16051 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_util.h"
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
#define NV04_RAMFC_DMA_PUT 0x00
#define NV04_RAMFC_DMA_GET 0x04
#define NV04_RAMFC_DMA_INSTANCE 0x08
#define NV04_RAMFC_DMA_STATE 0x0C
#define NV04_RAMFC_DMA_FETCH 0x10
#define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18
#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
void
nv04_fifo_disable(struct drm_device *dev)
{
uint32_t tmp;
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
}
void
nv04_fifo_enable(struct drm_device *dev)
{
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
bool
nv04_fifo_reassign(struct drm_device *dev, bool enable)
{
uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
return (reassign == 1);
}
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
{
int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
if (!enable) {
/* In some cases the PFIFO puller may be left in an
* inconsistent state if you try to stop it when it's
* busy translating handles. Sometimes you get a
* PFIFO_CACHE_ERROR, sometimes it just fails silently
* sending incorrect instance offsets to PGRAPH after
* it's started up again. To avoid the latter we
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
return pull & 1;
}
int
nv04_fifo_channel_id(struct drm_device *dev)
{
return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
}
#ifdef __BIG_ENDIAN
#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
#else
#define DMA_FETCH_ENDIANNESS 0
#endif
int
nv04_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc);
if (ret)
return ret;
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user)
return -ENOMEM;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
DMA_FETCH_ENDIANNESS));
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
void
nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
pfifo->reassign(dev, false);
/* Unload the context if it's the currently active one */
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
pfifo->unload_context(dev);
pfifo->enable(dev);
}
/* Keep it from being rescheduled */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
pfifo->reassign(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the channel resources */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
nv04_fifo_do_load_context(struct drm_device *dev, int chid)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV04_RAMFC(chid), tmp;
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
tmp = nv_ri32(dev, fc + 8);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
int
nv04_fifo_load_context(struct nouveau_channel *chan)
{
uint32_t tmp;
nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
nv04_fifo_do_load_context(chan->dev, chan->id);
nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
return 0;
}
int
nv04_fifo_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan = NULL;
uint32_t tmp;
int chid;
chid = pfifo->channel_id(dev);
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
return 0;
chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
}
RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
RAMFC_WR(DMA_INSTANCE, tmp);
RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
return 0;
}
static void
nv04_fifo_init_reset(struct drm_device *dev)
{
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
nv_wr32(dev, 0x003224, 0x000f0078);
nv_wr32(dev, 0x002044, 0x0101ffff);
nv_wr32(dev, 0x002040, 0x000000ff);
nv_wr32(dev, 0x002500, 0x00000000);
nv_wr32(dev, 0x003000, 0x00000000);
nv_wr32(dev, 0x003050, 0x00000000);
nv_wr32(dev, 0x003200, 0x00000000);
nv_wr32(dev, 0x003250, 0x00000000);
nv_wr32(dev, 0x003220, 0x00000000);
nv_wr32(dev, 0x003250, 0x00000000);
nv_wr32(dev, 0x003270, 0x00000000);
nv_wr32(dev, 0x003210, 0x00000000);
}
static void
nv04_fifo_init_ramxx(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
}
static void
nv04_fifo_init_intr(struct drm_device *dev)
{
nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
int
nv04_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i;
nv04_fifo_init_reset(dev);
nv04_fifo_init_ramxx(dev);
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
nv04_fifo_init_intr(dev);
pfifo->enable(dev);
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
}
return 0;
}
void
nv04_fifo_fini(struct drm_device *dev)
{
nv_wr32(dev, 0x2140, 0x00000000);
nouveau_irq_unregister(dev, 8);
}
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
unsigned long flags;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
switch (mthd) {
case 0x0000: /* bind object to subchannel */
obj = nouveau_ramht_find(chan, data);
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
chan->sw_subchannel[subc] = obj->class;
engine = 0x0000000f << (subc * 4);
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
handled = true;
break;
default:
engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
mthd, data))
handled = true;
break;
}
out:
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return handled;
}
static const char *nv_dma_state_err(u32 state)
{
static const char * const desc[] = {
"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
};
return desc[(state >> 29) & 0x7];
}
void
nv04_fifo_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
uint32_t status, reassign;
int cnt = 0;
reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
uint32_t chid, get;
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
chid = engine->fifo.channel_id(dev);
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t mthd, data;
int ptr;
/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
* wrapping on my G80 chips, but CACHE1 isn't big
* enough for this much data.. Tests show that it
* wraps around to the start at GET=0x800.. No clue
* as to why..
*/
ptr = (get & 0x7ff) >> 2;
if (dev_priv->card_type < NV_40) {
mthd = nv_rd32(dev,
NV04_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
NV04_PFIFO_CACHE1_DATA(ptr));
} else {
mthd = nv_rd32(dev,
NV40_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
NV40_PFIFO_CACHE1_DATA(ptr));
}
if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
}
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
u32 dma_get = nv_rd32(dev, 0x003244);
u32 dma_put = nv_rd32(dev, 0x003240);
u32 push = nv_rd32(dev, 0x003220);
u32 state = nv_rd32(dev, 0x003228);
if (dev_priv->card_type == NV_50) {
u32 ho_get = nv_rd32(dev, 0x003328);
u32 ho_put = nv_rd32(dev, 0x003320);
u32 ib_get = nv_rd32(dev, 0x003334);
u32 ib_put = nv_rd32(dev, 0x003330);
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nv_wr32(dev, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
nv_wr32(dev, 0x003244, dma_put);
nv_wr32(dev, 0x003328, ho_put);
} else
if (ib_get != ib_put) {
nv_wr32(dev, 0x003334, ib_put);
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
}
nv_wr32(dev, 0x003228, 0x00000000);
nv_wr32(dev, 0x003220, 0x00000001);
nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (status & NV_PFIFO_INTR_SEMAPHORE) {
uint32_t sem;
status &= ~NV_PFIFO_INTR_SEMAPHORE;
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_SEMAPHORE);
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}
}
if (status) {
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
status = 0;
}
nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
}
if (status) {
NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
nv_wr32(dev, 0x2140, 0);
nv_wr32(dev, 0x140, 0);
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
}
| gpl-2.0 |
cphelps76/elite_kernel_tuna | drivers/gpu/drm/nouveau/nv04_fifo.c | 8021 | 16051 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
#include "nouveau_ramht.h"
#include "nouveau_util.h"
#define NV04_RAMFC(c) (dev_priv->ramfc->pinst + ((c) * NV04_RAMFC__SIZE))
#define NV04_RAMFC__SIZE 32
#define NV04_RAMFC_DMA_PUT 0x00
#define NV04_RAMFC_DMA_GET 0x04
#define NV04_RAMFC_DMA_INSTANCE 0x08
#define NV04_RAMFC_DMA_STATE 0x0C
#define NV04_RAMFC_DMA_FETCH 0x10
#define NV04_RAMFC_ENGINE 0x14
#define NV04_RAMFC_PULL1_ENGINE 0x18
#define RAMFC_WR(offset, val) nv_wo32(chan->ramfc, NV04_RAMFC_##offset, (val))
#define RAMFC_RD(offset) nv_ro32(chan->ramfc, NV04_RAMFC_##offset)
void
nv04_fifo_disable(struct drm_device *dev)
{
uint32_t tmp;
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, tmp & ~1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
tmp = nv_rd32(dev, NV03_PFIFO_CACHE1_PULL1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, tmp & ~1);
}
void
nv04_fifo_enable(struct drm_device *dev)
{
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
bool
nv04_fifo_reassign(struct drm_device *dev, bool enable)
{
uint32_t reassign = nv_rd32(dev, NV03_PFIFO_CACHES);
nv_wr32(dev, NV03_PFIFO_CACHES, enable ? 1 : 0);
return (reassign == 1);
}
bool
nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
{
int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
if (!enable) {
/* In some cases the PFIFO puller may be left in an
* inconsistent state if you try to stop it when it's
* busy translating handles. Sometimes you get a
* PFIFO_CACHE_ERROR, sometimes it just fails silently
* sending incorrect instance offsets to PGRAPH after
* it's started up again. To avoid the latter we
* invalidate the most recently calculated instance.
*/
if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
}
return pull & 1;
}
int
nv04_fifo_channel_id(struct drm_device *dev)
{
return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
NV03_PFIFO_CACHE1_PUSH1_CHID_MASK;
}
#ifdef __BIG_ENDIAN
#define DMA_FETCH_ENDIANNESS NV_PFIFO_CACHE1_BIG_ENDIAN
#else
#define DMA_FETCH_ENDIANNESS 0
#endif
int
nv04_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
unsigned long flags;
int ret;
ret = nouveau_gpuobj_new_fake(dev, NV04_RAMFC(chan->id), ~0,
NV04_RAMFC__SIZE,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc);
if (ret)
return ret;
chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
NV03_USER(chan->id), PAGE_SIZE);
if (!chan->user)
return -ENOMEM;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
/* Setup initial state */
RAMFC_WR(DMA_PUT, chan->pushbuf_base);
RAMFC_WR(DMA_GET, chan->pushbuf_base);
RAMFC_WR(DMA_INSTANCE, chan->pushbuf->pinst >> 4);
RAMFC_WR(DMA_FETCH, (NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8 |
DMA_FETCH_ENDIANNESS));
/* enable the fifo dma operation */
nv_wr32(dev, NV04_PFIFO_MODE,
nv_rd32(dev, NV04_PFIFO_MODE) | (1 << chan->id));
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
return 0;
}
void
nv04_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
unsigned long flags;
spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
pfifo->reassign(dev, false);
/* Unload the context if it's the currently active one */
if (pfifo->channel_id(dev) == chan->id) {
pfifo->disable(dev);
pfifo->unload_context(dev);
pfifo->enable(dev);
}
/* Keep it from being rescheduled */
nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
pfifo->reassign(dev, true);
spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
/* Free the channel resources */
if (chan->user) {
iounmap(chan->user);
chan->user = NULL;
}
nouveau_gpuobj_ref(NULL, &chan->ramfc);
}
static void
nv04_fifo_do_load_context(struct drm_device *dev, int chid)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t fc = NV04_RAMFC(chid), tmp;
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUT, nv_ri32(dev, fc + 0));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET, nv_ri32(dev, fc + 4));
tmp = nv_ri32(dev, fc + 8);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE, tmp & 0xFFFF);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT, tmp >> 16);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, nv_ri32(dev, fc + 12));
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_FETCH, nv_ri32(dev, fc + 16));
nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_ri32(dev, fc + 20));
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL1, nv_ri32(dev, fc + 24));
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
}
int
nv04_fifo_load_context(struct nouveau_channel *chan)
{
uint32_t tmp;
nv_wr32(chan->dev, NV03_PFIFO_CACHE1_PUSH1,
NV03_PFIFO_CACHE1_PUSH1_DMA | chan->id);
nv04_fifo_do_load_context(chan->dev, chan->id);
nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_PUSH, 1);
/* Reset NV04_PFIFO_CACHE1_DMA_CTL_AT_INFO to INVALID */
tmp = nv_rd32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL) & ~(1 << 31);
nv_wr32(chan->dev, NV04_PFIFO_CACHE1_DMA_CTL, tmp);
return 0;
}
int
nv04_fifo_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_channel *chan = NULL;
uint32_t tmp;
int chid;
chid = pfifo->channel_id(dev);
if (chid < 0 || chid >= dev_priv->engine.fifo.channels)
return 0;
chan = dev_priv->channels.ptr[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
}
RAMFC_WR(DMA_PUT, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
RAMFC_WR(DMA_GET, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET));
tmp = nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_DCOUNT) << 16;
tmp |= nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_INSTANCE);
RAMFC_WR(DMA_INSTANCE, tmp);
RAMFC_WR(DMA_STATE, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_STATE));
RAMFC_WR(DMA_FETCH, nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_FETCH));
RAMFC_WR(ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE));
RAMFC_WR(PULL1_ENGINE, nv_rd32(dev, NV04_PFIFO_CACHE1_PULL1));
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
return 0;
}
static void
nv04_fifo_init_reset(struct drm_device *dev)
{
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) & ~NV_PMC_ENABLE_PFIFO);
nv_wr32(dev, NV03_PMC_ENABLE,
nv_rd32(dev, NV03_PMC_ENABLE) | NV_PMC_ENABLE_PFIFO);
nv_wr32(dev, 0x003224, 0x000f0078);
nv_wr32(dev, 0x002044, 0x0101ffff);
nv_wr32(dev, 0x002040, 0x000000ff);
nv_wr32(dev, 0x002500, 0x00000000);
nv_wr32(dev, 0x003000, 0x00000000);
nv_wr32(dev, 0x003050, 0x00000000);
nv_wr32(dev, 0x003200, 0x00000000);
nv_wr32(dev, 0x003250, 0x00000000);
nv_wr32(dev, 0x003220, 0x00000000);
nv_wr32(dev, 0x003250, 0x00000000);
nv_wr32(dev, 0x003270, 0x00000000);
nv_wr32(dev, 0x003210, 0x00000000);
}
static void
nv04_fifo_init_ramxx(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
((dev_priv->ramht->bits - 9) << 16) |
(dev_priv->ramht->gpuobj->pinst >> 8));
nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
}
static void
nv04_fifo_init_intr(struct drm_device *dev)
{
nouveau_irq_register(dev, 8, nv04_fifo_isr);
nv_wr32(dev, 0x002100, 0xffffffff);
nv_wr32(dev, 0x002140, 0xffffffff);
}
int
nv04_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
int i;
nv04_fifo_init_reset(dev);
nv04_fifo_init_ramxx(dev);
nv04_fifo_do_load_context(dev, pfifo->channels - 1);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, pfifo->channels - 1);
nv04_fifo_init_intr(dev);
pfifo->enable(dev);
pfifo->reassign(dev, true);
for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
if (dev_priv->channels.ptr[i]) {
uint32_t mode = nv_rd32(dev, NV04_PFIFO_MODE);
nv_wr32(dev, NV04_PFIFO_MODE, mode | (1 << i));
}
}
return 0;
}
void
nv04_fifo_fini(struct drm_device *dev)
{
nv_wr32(dev, 0x2140, 0x00000000);
nouveau_irq_unregister(dev, 8);
}
static bool
nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = NULL;
struct nouveau_gpuobj *obj;
unsigned long flags;
const int subc = (addr >> 13) & 0x7;
const int mthd = addr & 0x1ffc;
bool handled = false;
u32 engine;
spin_lock_irqsave(&dev_priv->channels.lock, flags);
if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
chan = dev_priv->channels.ptr[chid];
if (unlikely(!chan))
goto out;
switch (mthd) {
case 0x0000: /* bind object to subchannel */
obj = nouveau_ramht_find(chan, data);
if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
break;
chan->sw_subchannel[subc] = obj->class;
engine = 0x0000000f << (subc * 4);
nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
handled = true;
break;
default:
engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
break;
if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
mthd, data))
handled = true;
break;
}
out:
spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
return handled;
}
static const char *nv_dma_state_err(u32 state)
{
static const char * const desc[] = {
"NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
"INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
};
return desc[(state >> 29) & 0x7];
}
void
nv04_fifo_isr(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_engine *engine = &dev_priv->engine;
uint32_t status, reassign;
int cnt = 0;
reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
uint32_t chid, get;
nv_wr32(dev, NV03_PFIFO_CACHES, 0);
chid = engine->fifo.channel_id(dev);
get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
if (status & NV_PFIFO_INTR_CACHE_ERROR) {
uint32_t mthd, data;
int ptr;
/* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
* wrapping on my G80 chips, but CACHE1 isn't big
* enough for this much data.. Tests show that it
* wraps around to the start at GET=0x800.. No clue
* as to why..
*/
ptr = (get & 0x7ff) >> 2;
if (dev_priv->card_type < NV_40) {
mthd = nv_rd32(dev,
NV04_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
NV04_PFIFO_CACHE1_DATA(ptr));
} else {
mthd = nv_rd32(dev,
NV40_PFIFO_CACHE1_METHOD(ptr));
data = nv_rd32(dev,
NV40_PFIFO_CACHE1_DATA(ptr));
}
if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
"Mthd 0x%04x Data 0x%08x\n",
chid, (mthd >> 13) & 7, mthd & 0x1ffc,
data);
}
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_CACHE_ERROR);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
status &= ~NV_PFIFO_INTR_CACHE_ERROR;
}
if (status & NV_PFIFO_INTR_DMA_PUSHER) {
u32 dma_get = nv_rd32(dev, 0x003244);
u32 dma_put = nv_rd32(dev, 0x003240);
u32 push = nv_rd32(dev, 0x003220);
u32 state = nv_rd32(dev, 0x003228);
if (dev_priv->card_type == NV_50) {
u32 ho_get = nv_rd32(dev, 0x003328);
u32 ho_put = nv_rd32(dev, 0x003320);
u32 ib_get = nv_rd32(dev, 0x003334);
u32 ib_put = nv_rd32(dev, 0x003330);
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
"Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
"State 0x%08x (err: %s) Push 0x%08x\n",
chid, ho_get, dma_get, ho_put,
dma_put, ib_get, ib_put, state,
nv_dma_state_err(state),
push);
/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
nv_wr32(dev, 0x003364, 0x00000000);
if (dma_get != dma_put || ho_get != ho_put) {
nv_wr32(dev, 0x003244, dma_put);
nv_wr32(dev, 0x003328, ho_put);
} else
if (ib_get != ib_put) {
nv_wr32(dev, 0x003334, ib_put);
}
} else {
NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
"Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
chid, dma_get, dma_put, state,
nv_dma_state_err(state), push);
if (dma_get != dma_put)
nv_wr32(dev, 0x003244, dma_put);
}
nv_wr32(dev, 0x003228, 0x00000000);
nv_wr32(dev, 0x003220, 0x00000001);
nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
status &= ~NV_PFIFO_INTR_DMA_PUSHER;
}
if (status & NV_PFIFO_INTR_SEMAPHORE) {
uint32_t sem;
status &= ~NV_PFIFO_INTR_SEMAPHORE;
nv_wr32(dev, NV03_PFIFO_INTR_0,
NV_PFIFO_INTR_SEMAPHORE);
sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
}
if (dev_priv->card_type == NV_50) {
if (status & 0x00000010) {
nv50_fb_vm_trap(dev, nouveau_ratelimit());
status &= ~0x00000010;
nv_wr32(dev, 0x002100, 0x00000010);
}
}
if (status) {
if (nouveau_ratelimit())
NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
status, chid);
nv_wr32(dev, NV03_PFIFO_INTR_0, status);
status = 0;
}
nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
}
if (status) {
NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
nv_wr32(dev, 0x2140, 0);
nv_wr32(dev, 0x140, 0);
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
}
| gpl-2.0 |
arighi/linux-gt-i9100 | arch/arm/plat-iop/setup.c | 9557 | 1124 | /*
* arch/arm/plat-iop/setup.c
*
* Author: Nicolas Pitre <nico@fluxnic.net>
* Copyright (C) 2001 MontaVista Software, Inc.
* Copyright (C) 2004 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/mm.h>
#include <linux/init.h>
#include <asm/mach/map.h>
#include <asm/hardware/iop3xx.h>
/*
* Standard IO mapping for all IOP3xx based systems. Note that
* the IOP3xx OCCDR must be mapped uncached and unbuffered.
*/
static struct map_desc iop3xx_std_desc[] __initdata = {
{ /* mem mapped registers */
.virtual = IOP3XX_PERIPHERAL_VIRT_BASE,
.pfn = __phys_to_pfn(IOP3XX_PERIPHERAL_PHYS_BASE),
.length = IOP3XX_PERIPHERAL_SIZE,
.type = MT_UNCACHED,
}, { /* PCI IO space */
.virtual = IOP3XX_PCI_LOWER_IO_VA,
.pfn = __phys_to_pfn(IOP3XX_PCI_LOWER_IO_PA),
.length = IOP3XX_PCI_IO_WINDOW_SIZE,
.type = MT_DEVICE,
},
};
void __init iop3xx_map_io(void)
{
iotable_init(iop3xx_std_desc, ARRAY_SIZE(iop3xx_std_desc));
}
| gpl-2.0 |
zarboz/android_kernel_htc_dlx | sound/ppc/beep.c | 9813 | 7937 | /*
* Beep using pcm
*
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/io.h>
#include <asm/irq.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/control.h>
#include "pmac.h"
struct pmac_beep {
int running; /* boolean */
int volume; /* mixer volume: 0-100 */
int volume_play; /* currently playing volume */
int hz;
int nsamples;
short *buf; /* allocated wave buffer */
dma_addr_t addr; /* physical address of buffer */
struct input_dev *dev;
};
/*
* stop beep if running
*/
void snd_pmac_beep_stop(struct snd_pmac *chip)
{
struct pmac_beep *beep = chip->beep;
if (beep && beep->running) {
beep->running = 0;
snd_pmac_beep_dma_stop(chip);
}
}
/*
* Stuff for outputting a beep. The values range from -327 to +327
* so we can multiply by an amplitude in the range 0..100 to get a
* signed short value to put in the output buffer.
*/
static short beep_wform[256] = {
0, 40, 79, 117, 153, 187, 218, 245,
269, 288, 304, 316, 323, 327, 327, 324,
318, 310, 299, 288, 275, 262, 249, 236,
224, 213, 204, 196, 190, 186, 183, 182,
182, 183, 186, 189, 192, 196, 200, 203,
206, 208, 209, 209, 209, 207, 204, 201,
197, 193, 188, 183, 179, 174, 170, 166,
163, 161, 160, 159, 159, 160, 161, 162,
164, 166, 168, 169, 171, 171, 171, 170,
169, 167, 163, 159, 155, 150, 144, 139,
133, 128, 122, 117, 113, 110, 107, 105,
103, 103, 103, 103, 104, 104, 105, 105,
105, 103, 101, 97, 92, 86, 78, 68,
58, 45, 32, 18, 3, -11, -26, -41,
-55, -68, -79, -88, -95, -100, -102, -102,
-99, -93, -85, -75, -62, -48, -33, -16,
0, 16, 33, 48, 62, 75, 85, 93,
99, 102, 102, 100, 95, 88, 79, 68,
55, 41, 26, 11, -3, -18, -32, -45,
-58, -68, -78, -86, -92, -97, -101, -103,
-105, -105, -105, -104, -104, -103, -103, -103,
-103, -105, -107, -110, -113, -117, -122, -128,
-133, -139, -144, -150, -155, -159, -163, -167,
-169, -170, -171, -171, -171, -169, -168, -166,
-164, -162, -161, -160, -159, -159, -160, -161,
-163, -166, -170, -174, -179, -183, -188, -193,
-197, -201, -204, -207, -209, -209, -209, -208,
-206, -203, -200, -196, -192, -189, -186, -183,
-182, -182, -183, -186, -190, -196, -204, -213,
-224, -236, -249, -262, -275, -288, -299, -310,
-318, -324, -327, -327, -323, -316, -304, -288,
-269, -245, -218, -187, -153, -117, -79, -40,
};
#define BEEP_SRATE 22050 /* 22050 Hz sample rate */
#define BEEP_BUFLEN 512
#define BEEP_VOLUME 15 /* 0 - 100 */
static int snd_pmac_beep_event(struct input_dev *dev, unsigned int type,
unsigned int code, int hz)
{
struct snd_pmac *chip;
struct pmac_beep *beep;
unsigned long flags;
int beep_speed = 0;
int srate;
int period, ncycles, nsamples;
int i, j, f;
short *p;
if (type != EV_SND)
return -1;
switch (code) {
case SND_BELL: if (hz) hz = 1000;
case SND_TONE: break;
default: return -1;
}
chip = input_get_drvdata(dev);
if (! chip || (beep = chip->beep) == NULL)
return -1;
if (! hz) {
spin_lock_irqsave(&chip->reg_lock, flags);
if (beep->running)
snd_pmac_beep_stop(chip);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
beep_speed = snd_pmac_rate_index(chip, &chip->playback, BEEP_SRATE);
srate = chip->freq_table[beep_speed];
if (hz <= srate / BEEP_BUFLEN || hz > srate / 2)
hz = 1000;
spin_lock_irqsave(&chip->reg_lock, flags);
if (chip->playback.running || chip->capture.running || beep->running) {
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
beep->running = 1;
spin_unlock_irqrestore(&chip->reg_lock, flags);
if (hz == beep->hz && beep->volume == beep->volume_play) {
nsamples = beep->nsamples;
} else {
period = srate * 256 / hz; /* fixed point */
ncycles = BEEP_BUFLEN * 256 / period;
nsamples = (period * ncycles) >> 8;
f = ncycles * 65536 / nsamples;
j = 0;
p = beep->buf;
for (i = 0; i < nsamples; ++i, p += 2) {
p[0] = p[1] = beep_wform[j >> 8] * beep->volume;
j = (j + f) & 0xffff;
}
beep->hz = hz;
beep->volume_play = beep->volume;
beep->nsamples = nsamples;
}
spin_lock_irqsave(&chip->reg_lock, flags);
snd_pmac_beep_dma_start(chip, beep->nsamples * 4, beep->addr, beep_speed);
spin_unlock_irqrestore(&chip->reg_lock, flags);
return 0;
}
/*
* beep volume mixer
*/
static int snd_pmac_info_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 100;
return 0;
}
static int snd_pmac_get_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
if (snd_BUG_ON(!chip->beep))
return -ENXIO;
ucontrol->value.integer.value[0] = chip->beep->volume;
return 0;
}
static int snd_pmac_put_beep(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_pmac *chip = snd_kcontrol_chip(kcontrol);
unsigned int oval, nval;
if (snd_BUG_ON(!chip->beep))
return -ENXIO;
oval = chip->beep->volume;
nval = ucontrol->value.integer.value[0];
if (nval > 100)
return -EINVAL;
chip->beep->volume = nval;
return oval != chip->beep->volume;
}
static struct snd_kcontrol_new snd_pmac_beep_mixer = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "Beep Playback Volume",
.info = snd_pmac_info_beep,
.get = snd_pmac_get_beep,
.put = snd_pmac_put_beep,
};
/* Initialize beep stuff */
int __devinit snd_pmac_attach_beep(struct snd_pmac *chip)
{
struct pmac_beep *beep;
struct input_dev *input_dev;
struct snd_kcontrol *beep_ctl;
void *dmabuf;
int err = -ENOMEM;
beep = kzalloc(sizeof(*beep), GFP_KERNEL);
if (! beep)
return -ENOMEM;
dmabuf = dma_alloc_coherent(&chip->pdev->dev, BEEP_BUFLEN * 4,
&beep->addr, GFP_KERNEL);
input_dev = input_allocate_device();
if (! dmabuf || ! input_dev)
goto fail1;
/* FIXME: set more better values */
input_dev->name = "PowerMac Beep";
input_dev->phys = "powermac/beep";
input_dev->id.bustype = BUS_ADB;
input_dev->id.vendor = 0x001f;
input_dev->id.product = 0x0001;
input_dev->id.version = 0x0100;
input_dev->evbit[0] = BIT_MASK(EV_SND);
input_dev->sndbit[0] = BIT_MASK(SND_BELL) | BIT_MASK(SND_TONE);
input_dev->event = snd_pmac_beep_event;
input_dev->dev.parent = &chip->pdev->dev;
input_set_drvdata(input_dev, chip);
beep->dev = input_dev;
beep->buf = dmabuf;
beep->volume = BEEP_VOLUME;
beep->running = 0;
beep_ctl = snd_ctl_new1(&snd_pmac_beep_mixer, chip);
err = snd_ctl_add(chip->card, beep_ctl);
if (err < 0)
goto fail1;
chip->beep = beep;
err = input_register_device(beep->dev);
if (err)
goto fail2;
return 0;
fail2: snd_ctl_remove(chip->card, beep_ctl);
fail1: input_free_device(input_dev);
if (dmabuf)
dma_free_coherent(&chip->pdev->dev, BEEP_BUFLEN * 4,
dmabuf, beep->addr);
kfree(beep);
return err;
}
void snd_pmac_detach_beep(struct snd_pmac *chip)
{
if (chip->beep) {
input_unregister_device(chip->beep->dev);
dma_free_coherent(&chip->pdev->dev, BEEP_BUFLEN * 4,
chip->beep->buf, chip->beep->addr);
kfree(chip->beep);
chip->beep = NULL;
}
}
| gpl-2.0 |
DerArtem/android-tegra-2.6.36-honeycomb-folio-nvidia | fs/udf/super.c | 86 | 62153 | /*
* super.c
*
* PURPOSE
* Super block routines for the OSTA-UDF(tm) filesystem.
*
* DESCRIPTION
* OSTA-UDF(tm) = Optical Storage Technology Association
* Universal Disk Format.
*
* This code is based on version 2.00 of the UDF specification,
* and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
* http://www.osta.org/
* http://www.ecma.ch/
* http://www.iso.org/
*
* COPYRIGHT
* This file is distributed under the terms of the GNU General Public
* License (GPL). Copies of the GPL can be obtained from:
* ftp://prep.ai.mit.edu/pub/gnu/GPL
* Each contributing author retains all rights to their own work.
*
* (C) 1998 Dave Boynton
* (C) 1998-2004 Ben Fennema
* (C) 2000 Stelias Computing Inc
*
* HISTORY
*
* 09/24/98 dgb changed to allow compiling outside of kernel, and
* added some debugging.
* 10/01/98 dgb updated to allow (some) possibility of compiling w/2.0.34
* 10/16/98 attempting some multi-session support
* 10/17/98 added freespace count for "df"
* 11/11/98 gr added novrs option
* 11/26/98 dgb added fileset,anchor mount options
* 12/06/98 blf really hosed things royally. vat/sparing support. sequenced
* vol descs. rewrote option handling based on isofs
* 12/20/98 find the free space bitmap (if it exists)
*/
#include "udfdecl.h"
#include <linux/blkdev.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/parser.h>
#include <linux/stat.h>
#include <linux/cdrom.h>
#include <linux/nls.h>
#include <linux/smp_lock.h>
#include <linux/buffer_head.h>
#include <linux/vfs.h>
#include <linux/vmalloc.h>
#include <linux/errno.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/bitmap.h>
#include <linux/crc-itu-t.h>
#include <asm/byteorder.h>
#include "udf_sb.h"
#include "udf_i.h"
#include <linux/init.h>
#include <asm/uaccess.h>
#define VDS_POS_PRIMARY_VOL_DESC 0
#define VDS_POS_UNALLOC_SPACE_DESC 1
#define VDS_POS_LOGICAL_VOL_DESC 2
#define VDS_POS_PARTITION_DESC 3
#define VDS_POS_IMP_USE_VOL_DESC 4
#define VDS_POS_VOL_DESC_PTR 5
#define VDS_POS_TERMINATING_DESC 6
#define VDS_POS_LENGTH 7
#define UDF_DEFAULT_BLOCKSIZE 2048
static char error_buf[1024];
/* These are the "meat" - everything else is stuffing */
static int udf_fill_super(struct super_block *, void *, int);
static void udf_put_super(struct super_block *);
static int udf_sync_fs(struct super_block *, int);
static int udf_remount_fs(struct super_block *, int *, char *);
static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
struct kernel_lb_addr *);
static void udf_load_fileset(struct super_block *, struct buffer_head *,
struct kernel_lb_addr *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
static int udf_statfs(struct dentry *, struct kstatfs *);
static int udf_show_options(struct seq_file *, struct vfsmount *);
static void udf_error(struct super_block *sb, const char *function,
const char *fmt, ...);
struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct udf_sb_info *sbi)
{
struct logicalVolIntegrityDesc *lvid =
(struct logicalVolIntegrityDesc *)sbi->s_lvid_bh->b_data;
__u32 number_of_partitions = le32_to_cpu(lvid->numOfPartitions);
__u32 offset = number_of_partitions * 2 *
sizeof(uint32_t)/sizeof(uint8_t);
return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
}
/* UDF filesystem type */
static int udf_get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
struct vfsmount *mnt)
{
return get_sb_bdev(fs_type, flags, dev_name, data, udf_fill_super, mnt);
}
static struct file_system_type udf_fstype = {
.owner = THIS_MODULE,
.name = "udf",
.get_sb = udf_get_sb,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV,
};
static struct kmem_cache *udf_inode_cachep;
static struct inode *udf_alloc_inode(struct super_block *sb)
{
struct udf_inode_info *ei;
ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
if (!ei)
return NULL;
ei->i_unique = 0;
ei->i_lenExtents = 0;
ei->i_next_alloc_block = 0;
ei->i_next_alloc_goal = 0;
ei->i_strat4096 = 0;
return &ei->vfs_inode;
}
static void udf_destroy_inode(struct inode *inode)
{
kmem_cache_free(udf_inode_cachep, UDF_I(inode));
}
static void init_once(void *foo)
{
struct udf_inode_info *ei = (struct udf_inode_info *)foo;
ei->i_ext.i_data = NULL;
inode_init_once(&ei->vfs_inode);
}
static int init_inodecache(void)
{
udf_inode_cachep = kmem_cache_create("udf_inode_cache",
sizeof(struct udf_inode_info),
0, (SLAB_RECLAIM_ACCOUNT |
SLAB_MEM_SPREAD),
init_once);
if (!udf_inode_cachep)
return -ENOMEM;
return 0;
}
static void destroy_inodecache(void)
{
kmem_cache_destroy(udf_inode_cachep);
}
/* Superblock operations */
static const struct super_operations udf_sb_ops = {
.alloc_inode = udf_alloc_inode,
.destroy_inode = udf_destroy_inode,
.write_inode = udf_write_inode,
.evict_inode = udf_evict_inode,
.put_super = udf_put_super,
.sync_fs = udf_sync_fs,
.statfs = udf_statfs,
.remount_fs = udf_remount_fs,
.show_options = udf_show_options,
};
struct udf_options {
unsigned char novrs;
unsigned int blocksize;
unsigned int session;
unsigned int lastblock;
unsigned int anchor;
unsigned int volume;
unsigned short partition;
unsigned int fileset;
unsigned int rootdir;
unsigned int flags;
mode_t umask;
gid_t gid;
uid_t uid;
mode_t fmode;
mode_t dmode;
struct nls_table *nls_map;
};
static int __init init_udf_fs(void)
{
int err;
err = init_inodecache();
if (err)
goto out1;
err = register_filesystem(&udf_fstype);
if (err)
goto out;
return 0;
out:
destroy_inodecache();
out1:
return err;
}
static void __exit exit_udf_fs(void)
{
unregister_filesystem(&udf_fstype);
destroy_inodecache();
}
module_init(init_udf_fs)
module_exit(exit_udf_fs)
static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
sbi->s_partmaps = kcalloc(count, sizeof(struct udf_part_map),
GFP_KERNEL);
if (!sbi->s_partmaps) {
udf_error(sb, __func__,
"Unable to allocate space for %d partition maps",
count);
sbi->s_partitions = 0;
return -ENOMEM;
}
sbi->s_partitions = count;
return 0;
}
static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
{
struct super_block *sb = mnt->mnt_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
seq_puts(seq, ",nostrict");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
seq_printf(seq, ",bs=%lu", sb->s_blocksize);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
seq_puts(seq, ",unhide");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
seq_puts(seq, ",undelete");
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
seq_puts(seq, ",noadinicb");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
seq_puts(seq, ",shortad");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
seq_puts(seq, ",uid=forget");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_IGNORE))
seq_puts(seq, ",uid=ignore");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
seq_puts(seq, ",gid=forget");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_IGNORE))
seq_puts(seq, ",gid=ignore");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
seq_printf(seq, ",uid=%u", sbi->s_uid);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
seq_printf(seq, ",gid=%u", sbi->s_gid);
if (sbi->s_umask != 0)
seq_printf(seq, ",umask=%o", sbi->s_umask);
if (sbi->s_fmode != UDF_INVALID_MODE)
seq_printf(seq, ",mode=%o", sbi->s_fmode);
if (sbi->s_dmode != UDF_INVALID_MODE)
seq_printf(seq, ",dmode=%o", sbi->s_dmode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
seq_printf(seq, ",session=%u", sbi->s_session);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
if (sbi->s_anchor != 0)
seq_printf(seq, ",anchor=%u", sbi->s_anchor);
/*
* volume, partition, fileset and rootdir seem to be ignored
* currently
*/
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
seq_puts(seq, ",utf8");
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
return 0;
}
/*
* udf_parse_options
*
* PURPOSE
* Parse mount options.
*
* DESCRIPTION
* The following mount options are supported:
*
* gid= Set the default group.
* umask= Set the default umask.
* mode= Set the default file permissions.
* dmode= Set the default directory permissions.
* uid= Set the default user.
* bs= Set the block size.
* unhide Show otherwise hidden files.
* undelete Show deleted files in lists.
* adinicb Embed data in the inode (default)
* noadinicb Don't embed data in the inode
* shortad Use short ad's
* longad Use long ad's (default)
* nostrict Unset strict conformance
* iocharset= Set the NLS character set
*
* The remaining are for debugging and disaster recovery:
*
* novrs Skip volume sequence recognition
*
* The following expect a offset from 0.
*
* session= Set the CDROM session (default= last session)
* anchor= Override standard anchor location. (default= 256)
* volume= Override the VolumeDesc location. (unused)
* partition= Override the PartitionDesc location. (unused)
* lastblock= Set the last block of the filesystem/
*
* The following expect a offset from the partition root.
*
* fileset= Override the fileset block location. (unused)
* rootdir= Override the root directory location. (unused)
* WARNING: overriding the rootdir to a non-directory may
* yield highly unpredictable results.
*
* PRE-CONDITIONS
* options Pointer to mount options string.
* uopts Pointer to mount options variable.
*
* POST-CONDITIONS
* <return> 1 Mount options parsed okay.
* <return> 0 Error parsing mount options.
*
* HISTORY
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
enum {
Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
Opt_rootdir, Opt_utf8, Opt_iocharset,
Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
Opt_fmode, Opt_dmode
};
static const match_table_t tokens = {
{Opt_novrs, "novrs"},
{Opt_nostrict, "nostrict"},
{Opt_bs, "bs=%u"},
{Opt_unhide, "unhide"},
{Opt_undelete, "undelete"},
{Opt_noadinicb, "noadinicb"},
{Opt_adinicb, "adinicb"},
{Opt_shortad, "shortad"},
{Opt_longad, "longad"},
{Opt_uforget, "uid=forget"},
{Opt_uignore, "uid=ignore"},
{Opt_gforget, "gid=forget"},
{Opt_gignore, "gid=ignore"},
{Opt_gid, "gid=%u"},
{Opt_uid, "uid=%u"},
{Opt_umask, "umask=%o"},
{Opt_session, "session=%u"},
{Opt_lastblock, "lastblock=%u"},
{Opt_anchor, "anchor=%u"},
{Opt_volume, "volume=%u"},
{Opt_partition, "partition=%u"},
{Opt_fileset, "fileset=%u"},
{Opt_rootdir, "rootdir=%u"},
{Opt_utf8, "utf8"},
{Opt_iocharset, "iocharset=%s"},
{Opt_fmode, "mode=%o"},
{Opt_dmode, "dmode=%o"},
{Opt_err, NULL}
};
static int udf_parse_options(char *options, struct udf_options *uopt,
bool remount)
{
char *p;
int option;
uopt->novrs = 0;
uopt->partition = 0xFFFF;
uopt->session = 0xFFFFFFFF;
uopt->lastblock = 0;
uopt->anchor = 0;
uopt->volume = 0xFFFFFFFF;
uopt->rootdir = 0xFFFFFFFF;
uopt->fileset = 0xFFFFFFFF;
uopt->nls_map = NULL;
if (!options)
return 1;
while ((p = strsep(&options, ",")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_novrs:
uopt->novrs = 1;
break;
case Opt_bs:
if (match_int(&args[0], &option))
return 0;
uopt->blocksize = option;
uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
break;
case Opt_unhide:
uopt->flags |= (1 << UDF_FLAG_UNHIDE);
break;
case Opt_undelete:
uopt->flags |= (1 << UDF_FLAG_UNDELETE);
break;
case Opt_noadinicb:
uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
break;
case Opt_adinicb:
uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
break;
case Opt_shortad:
uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_longad:
uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
break;
case Opt_gid:
if (match_int(args, &option))
return 0;
uopt->gid = option;
uopt->flags |= (1 << UDF_FLAG_GID_SET);
break;
case Opt_uid:
if (match_int(args, &option))
return 0;
uopt->uid = option;
uopt->flags |= (1 << UDF_FLAG_UID_SET);
break;
case Opt_umask:
if (match_octal(args, &option))
return 0;
uopt->umask = option;
break;
case Opt_nostrict:
uopt->flags &= ~(1 << UDF_FLAG_STRICT);
break;
case Opt_session:
if (match_int(args, &option))
return 0;
uopt->session = option;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
break;
case Opt_lastblock:
if (match_int(args, &option))
return 0;
uopt->lastblock = option;
if (!remount)
uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
break;
case Opt_anchor:
if (match_int(args, &option))
return 0;
uopt->anchor = option;
break;
case Opt_volume:
if (match_int(args, &option))
return 0;
uopt->volume = option;
break;
case Opt_partition:
if (match_int(args, &option))
return 0;
uopt->partition = option;
break;
case Opt_fileset:
if (match_int(args, &option))
return 0;
uopt->fileset = option;
break;
case Opt_rootdir:
if (match_int(args, &option))
return 0;
uopt->rootdir = option;
break;
case Opt_utf8:
uopt->flags |= (1 << UDF_FLAG_UTF8);
break;
#ifdef CONFIG_UDF_NLS
case Opt_iocharset:
uopt->nls_map = load_nls(args[0].from);
uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
break;
#endif
case Opt_uignore:
uopt->flags |= (1 << UDF_FLAG_UID_IGNORE);
break;
case Opt_uforget:
uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
break;
case Opt_gignore:
uopt->flags |= (1 << UDF_FLAG_GID_IGNORE);
break;
case Opt_gforget:
uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
break;
case Opt_fmode:
if (match_octal(args, &option))
return 0;
uopt->fmode = option & 0777;
break;
case Opt_dmode:
if (match_octal(args, &option))
return 0;
uopt->dmode = option & 0777;
break;
default:
printk(KERN_ERR "udf: bad mount option \"%s\" "
"or missing value\n", p);
return 0;
}
}
return 1;
}
static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
{
struct udf_options uopt;
struct udf_sb_info *sbi = UDF_SB(sb);
int error = 0;
uopt.flags = sbi->s_flags;
uopt.uid = sbi->s_uid;
uopt.gid = sbi->s_gid;
uopt.umask = sbi->s_umask;
uopt.fmode = sbi->s_fmode;
uopt.dmode = sbi->s_dmode;
if (!udf_parse_options(options, &uopt, true))
return -EINVAL;
lock_kernel();
sbi->s_flags = uopt.flags;
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
if (sbi->s_lvid_bh) {
int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
if (write_rev > UDF_MAX_WRITE_VERSION)
*flags |= MS_RDONLY;
}
if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
goto out_unlock;
if (*flags & MS_RDONLY)
udf_close_lvid(sb);
else
udf_open_lvid(sb);
out_unlock:
unlock_kernel();
return error;
}
/* Check Volume Structure Descriptors (ECMA 167 2/9.1) */
/* We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1) */
static loff_t udf_check_vsd(struct super_block *sb)
{
struct volStructDesc *vsd = NULL;
loff_t sector = 32768;
int sectorsize;
struct buffer_head *bh = NULL;
int nsr02 = 0;
int nsr03 = 0;
struct udf_sb_info *sbi;
sbi = UDF_SB(sb);
if (sb->s_blocksize < sizeof(struct volStructDesc))
sectorsize = sizeof(struct volStructDesc);
else
sectorsize = sb->s_blocksize;
sector += (sbi->s_session << sb->s_blocksize_bits);
udf_debug("Starting at sector %u (%ld byte sectors)\n",
(unsigned int)(sector >> sb->s_blocksize_bits),
sb->s_blocksize);
/* Process the sequence (if applicable) */
for (; !nsr02 && !nsr03; sector += sectorsize) {
/* Read a block */
bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
if (!bh)
break;
/* Look for ISO descriptors */
vsd = (struct volStructDesc *)(bh->b_data +
(sector & (sb->s_blocksize - 1)));
if (vsd->stdIdent[0] == 0) {
brelse(bh);
break;
} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_CD001,
VSD_STD_ID_LEN)) {
switch (vsd->structType) {
case 0:
udf_debug("ISO9660 Boot Record found\n");
break;
case 1:
udf_debug("ISO9660 Primary Volume Descriptor "
"found\n");
break;
case 2:
udf_debug("ISO9660 Supplementary Volume "
"Descriptor found\n");
break;
case 3:
udf_debug("ISO9660 Volume Partition Descriptor "
"found\n");
break;
case 255:
udf_debug("ISO9660 Volume Descriptor Set "
"Terminator found\n");
break;
default:
udf_debug("ISO9660 VRS (%u) found\n",
vsd->structType);
break;
}
} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_BEA01,
VSD_STD_ID_LEN))
; /* nothing */
else if (!strncmp(vsd->stdIdent, VSD_STD_ID_TEA01,
VSD_STD_ID_LEN)) {
brelse(bh);
break;
} else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR02,
VSD_STD_ID_LEN))
nsr02 = sector;
else if (!strncmp(vsd->stdIdent, VSD_STD_ID_NSR03,
VSD_STD_ID_LEN))
nsr03 = sector;
brelse(bh);
}
if (nsr03)
return nsr03;
else if (nsr02)
return nsr02;
else if (sector - (sbi->s_session << sb->s_blocksize_bits) == 32768)
return -1;
else
return 0;
}
static int udf_find_fileset(struct super_block *sb,
struct kernel_lb_addr *fileset,
struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
long lastblock;
uint16_t ident;
struct udf_sb_info *sbi;
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) {
bh = udf_read_ptagged(sb, fileset, 0, &ident);
if (!bh) {
return 1;
} else if (ident != TAG_IDENT_FSD) {
brelse(bh);
return 1;
}
}
sbi = UDF_SB(sb);
if (!bh) {
/* Search backwards through the partitions */
struct kernel_lb_addr newfileset;
/* --> cvg: FIXME - is it reasonable? */
return 1;
for (newfileset.partitionReferenceNum = sbi->s_partitions - 1;
(newfileset.partitionReferenceNum != 0xFFFF &&
fileset->logicalBlockNum == 0xFFFFFFFF &&
fileset->partitionReferenceNum == 0xFFFF);
newfileset.partitionReferenceNum--) {
lastblock = sbi->s_partmaps
[newfileset.partitionReferenceNum]
.s_partition_len;
newfileset.logicalBlockNum = 0;
do {
bh = udf_read_ptagged(sb, &newfileset, 0,
&ident);
if (!bh) {
newfileset.logicalBlockNum++;
continue;
}
switch (ident) {
case TAG_IDENT_SBD:
{
struct spaceBitmapDesc *sp;
sp = (struct spaceBitmapDesc *)
bh->b_data;
newfileset.logicalBlockNum += 1 +
((le32_to_cpu(sp->numOfBytes) +
sizeof(struct spaceBitmapDesc)
- 1) >> sb->s_blocksize_bits);
brelse(bh);
break;
}
case TAG_IDENT_FSD:
*fileset = newfileset;
break;
default:
newfileset.logicalBlockNum++;
brelse(bh);
bh = NULL;
break;
}
} while (newfileset.logicalBlockNum < lastblock &&
fileset->logicalBlockNum == 0xFFFFFFFF &&
fileset->partitionReferenceNum == 0xFFFF);
}
}
if ((fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) && bh) {
udf_debug("Fileset at block=%d, partition=%d\n",
fileset->logicalBlockNum,
fileset->partitionReferenceNum);
sbi->s_partition = fileset->partitionReferenceNum;
udf_load_fileset(sb, bh, root);
brelse(bh);
return 0;
}
return 1;
}
static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
{
struct primaryVolDesc *pvoldesc;
struct ustr *instr, *outstr;
struct buffer_head *bh;
uint16_t ident;
int ret = 1;
instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!instr)
return 1;
outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
if (!outstr)
goto out1;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
goto out2;
BUG_ON(ident != TAG_IDENT_PVD);
pvoldesc = (struct primaryVolDesc *)bh->b_data;
if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
pvoldesc->recordingDateAndTime)) {
#ifdef UDFFS_DEBUG
struct timestamp *ts = &pvoldesc->recordingDateAndTime;
udf_debug("recording time %04u/%02u/%02u"
" %02u:%02u (%x)\n",
le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
ts->minute, le16_to_cpu(ts->typeAndTimezone));
#endif
}
if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
if (udf_CS0toUTF8(outstr, instr)) {
strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
outstr->u_len > 31 ? 31 : outstr->u_len);
udf_debug("volIdent[] = '%s'\n",
UDF_SB(sb)->s_volume_ident);
}
if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
if (udf_CS0toUTF8(outstr, instr))
udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
brelse(bh);
ret = 0;
out2:
kfree(outstr);
out1:
kfree(instr);
return ret;
}
static int udf_load_metadata_files(struct super_block *sb, int partition)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
struct udf_meta_data *mdata;
struct kernel_lb_addr addr;
int fe_error = 0;
map = &sbi->s_partmaps[partition];
mdata = &map->s_type_specific.s_metadata;
/* metadata address */
addr.logicalBlockNum = mdata->s_meta_file_loc;
addr.partitionReferenceNum = map->s_partition_num;
udf_debug("Metadata file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
mdata->s_metadata_fe = udf_iget(sb, &addr);
if (mdata->s_metadata_fe == NULL) {
udf_warning(sb, __func__, "metadata inode efe not found, "
"will try mirror inode.");
fe_error = 1;
} else if (UDF_I(mdata->s_metadata_fe)->i_alloc_type !=
ICBTAG_FLAG_AD_SHORT) {
udf_warning(sb, __func__, "metadata inode efe does not have "
"short allocation descriptors!");
fe_error = 1;
iput(mdata->s_metadata_fe);
mdata->s_metadata_fe = NULL;
}
/* mirror file entry */
addr.logicalBlockNum = mdata->s_mirror_file_loc;
addr.partitionReferenceNum = map->s_partition_num;
udf_debug("Mirror metadata file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
mdata->s_mirror_fe = udf_iget(sb, &addr);
if (mdata->s_mirror_fe == NULL) {
if (fe_error) {
udf_error(sb, __func__, "mirror inode efe not found "
"and metadata inode is missing too, exiting...");
goto error_exit;
} else
udf_warning(sb, __func__, "mirror inode efe not found,"
" but metadata inode is OK");
} else if (UDF_I(mdata->s_mirror_fe)->i_alloc_type !=
ICBTAG_FLAG_AD_SHORT) {
udf_warning(sb, __func__, "mirror inode efe does not have "
"short allocation descriptors!");
iput(mdata->s_mirror_fe);
mdata->s_mirror_fe = NULL;
if (fe_error)
goto error_exit;
}
/*
* bitmap file entry
* Note:
* Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
*/
if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
addr.logicalBlockNum = mdata->s_bitmap_file_loc;
addr.partitionReferenceNum = map->s_partition_num;
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
mdata->s_bitmap_fe = udf_iget(sb, &addr);
if (mdata->s_bitmap_fe == NULL) {
if (sb->s_flags & MS_RDONLY)
udf_warning(sb, __func__, "bitmap inode efe "
"not found but it's ok since the disc"
" is mounted read-only");
else {
udf_error(sb, __func__, "bitmap inode efe not "
"found and attempted read-write mount");
goto error_exit;
}
}
}
udf_debug("udf_load_metadata_files Ok\n");
return 0;
error_exit:
return 1;
}
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
struct kernel_lb_addr *root)
{
struct fileSetDesc *fset;
fset = (struct fileSetDesc *)bh->b_data;
*root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
udf_debug("Rootdir at block=%d, partition=%d\n",
root->logicalBlockNum, root->partitionReferenceNum);
}
int udf_compute_nr_groups(struct super_block *sb, u32 partition)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
return DIV_ROUND_UP(map->s_partition_len +
(sizeof(struct spaceBitmapDesc) << 3),
sb->s_blocksize * 8);
}
static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
{
struct udf_bitmap *bitmap;
int nr_groups;
int size;
nr_groups = udf_compute_nr_groups(sb, index);
size = sizeof(struct udf_bitmap) +
(sizeof(struct buffer_head *) * nr_groups);
if (size <= PAGE_SIZE)
bitmap = kmalloc(size, GFP_KERNEL);
else
bitmap = vmalloc(size); /* TODO: get rid of vmalloc */
if (bitmap == NULL) {
udf_error(sb, __func__,
"Unable to allocate space for bitmap "
"and %d buffer_head pointers", nr_groups);
return NULL;
}
memset(bitmap, 0x00, size);
bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
bitmap->s_nr_groups = nr_groups;
return bitmap;
}
static int udf_fill_partdesc_info(struct super_block *sb,
struct partitionDesc *p, int p_index)
{
struct udf_part_map *map;
struct udf_sb_info *sbi = UDF_SB(sb);
struct partitionHeaderDesc *phd;
map = &sbi->s_partmaps[p_index];
map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
udf_debug("Partition (%d type %x) starts at physical %d, "
"block length %d\n", p_index,
map->s_partition_type, map->s_partition_root,
map->s_partition_len);
if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
return 0;
phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
if (phd->unallocSpaceTable.extLength) {
struct kernel_lb_addr loc = {
.logicalBlockNum = le32_to_cpu(
phd->unallocSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
map->s_uspace.s_table = udf_iget(sb, &loc);
if (!map->s_uspace.s_table) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
return 1;
}
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
udf_debug("unallocSpaceTable (part %d) @ %ld\n",
p_index, map->s_uspace.s_table->i_ino);
}
if (phd->unallocSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
return 1;
map->s_uspace.s_bitmap = bitmap;
bitmap->s_extLength = le32_to_cpu(
phd->unallocSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu(
phd->unallocSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
udf_debug("unallocSpaceBitmap (part %d) @ %d\n", p_index,
bitmap->s_extPosition);
}
if (phd->partitionIntegrityTable.extLength)
udf_debug("partitionIntegrityTable (part %d)\n", p_index);
if (phd->freedSpaceTable.extLength) {
struct kernel_lb_addr loc = {
.logicalBlockNum = le32_to_cpu(
phd->freedSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
map->s_fspace.s_table = udf_iget(sb, &loc);
if (!map->s_fspace.s_table) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
return 1;
}
map->s_partition_flags |= UDF_PART_FLAG_FREED_TABLE;
udf_debug("freedSpaceTable (part %d) @ %ld\n",
p_index, map->s_fspace.s_table->i_ino);
}
if (phd->freedSpaceBitmap.extLength) {
struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
if (!bitmap)
return 1;
map->s_fspace.s_bitmap = bitmap;
bitmap->s_extLength = le32_to_cpu(
phd->freedSpaceBitmap.extLength);
bitmap->s_extPosition = le32_to_cpu(
phd->freedSpaceBitmap.extPosition);
map->s_partition_flags |= UDF_PART_FLAG_FREED_BITMAP;
udf_debug("freedSpaceBitmap (part %d) @ %d\n", p_index,
bitmap->s_extPosition);
}
return 0;
}
static void udf_find_vat_block(struct super_block *sb, int p_index,
int type1_index, sector_t start_block)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
sector_t vat_block;
struct kernel_lb_addr ino;
/*
* VAT file entry is in the last recorded block. Some broken disks have
* it a few blocks before so try a bit harder...
*/
ino.partitionReferenceNum = type1_index;
for (vat_block = start_block;
vat_block >= map->s_partition_root &&
vat_block >= start_block - 3 &&
!sbi->s_vat_inode; vat_block--) {
ino.logicalBlockNum = vat_block - map->s_partition_root;
sbi->s_vat_inode = udf_iget(sb, &ino);
}
}
static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
struct buffer_head *bh = NULL;
struct udf_inode_info *vati;
uint32_t pos;
struct virtualAllocationTable20 *vat20;
sector_t blocks = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
if (!sbi->s_vat_inode &&
sbi->s_last_block != blocks - 1) {
printk(KERN_NOTICE "UDF-fs: Failed to read VAT inode from the"
" last recorded block (%lu), retrying with the last "
"block of the device (%lu).\n",
(unsigned long)sbi->s_last_block,
(unsigned long)blocks - 1);
udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
}
if (!sbi->s_vat_inode)
return 1;
if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
map->s_type_specific.s_virtual.s_start_offset = 0;
map->s_type_specific.s_virtual.s_num_entries =
(sbi->s_vat_inode->i_size - 36) >> 2;
} else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
vati = UDF_I(sbi->s_vat_inode);
if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
pos = udf_block_map(sbi->s_vat_inode, 0);
bh = sb_bread(sb, pos);
if (!bh)
return 1;
vat20 = (struct virtualAllocationTable20 *)bh->b_data;
} else {
vat20 = (struct virtualAllocationTable20 *)
vati->i_ext.i_data;
}
map->s_type_specific.s_virtual.s_start_offset =
le16_to_cpu(vat20->lengthHeader);
map->s_type_specific.s_virtual.s_num_entries =
(sbi->s_vat_inode->i_size -
map->s_type_specific.s_virtual.
s_start_offset) >> 2;
brelse(bh);
}
return 0;
}
static int udf_load_partdesc(struct super_block *sb, sector_t block)
{
struct buffer_head *bh;
struct partitionDesc *p;
struct udf_part_map *map;
struct udf_sb_info *sbi = UDF_SB(sb);
int i, type1_idx;
uint16_t partitionNumber;
uint16_t ident;
int ret = 0;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
return 1;
if (ident != TAG_IDENT_PD)
goto out_bh;
p = (struct partitionDesc *)bh->b_data;
partitionNumber = le16_to_cpu(p->partitionNumber);
/* First scan for TYPE1, SPARABLE and METADATA partitions */
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
udf_debug("Searching map: (%d == %d)\n",
map->s_partition_num, partitionNumber);
if (map->s_partition_num == partitionNumber &&
(map->s_partition_type == UDF_TYPE1_MAP15 ||
map->s_partition_type == UDF_SPARABLE_MAP15))
break;
}
if (i >= sbi->s_partitions) {
udf_debug("Partition (%d) not found in partition map\n",
partitionNumber);
goto out_bh;
}
ret = udf_fill_partdesc_info(sb, p, i);
/*
* Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
* PHYSICAL partitions are already set up
*/
type1_idx = i;
for (i = 0; i < sbi->s_partitions; i++) {
map = &sbi->s_partmaps[i];
if (map->s_partition_num == partitionNumber &&
(map->s_partition_type == UDF_VIRTUAL_MAP15 ||
map->s_partition_type == UDF_VIRTUAL_MAP20 ||
map->s_partition_type == UDF_METADATA_MAP25))
break;
}
if (i >= sbi->s_partitions)
goto out_bh;
ret = udf_fill_partdesc_info(sb, p, i);
if (ret)
goto out_bh;
if (map->s_partition_type == UDF_METADATA_MAP25) {
ret = udf_load_metadata_files(sb, i);
if (ret) {
printk(KERN_ERR "UDF-fs: error loading MetaData "
"partition map %d\n", i);
goto out_bh;
}
} else {
ret = udf_load_vat(sb, i, type1_idx);
if (ret)
goto out_bh;
/*
* Mark filesystem read-only if we have a partition with
* virtual map since we don't handle writing to it (we
* overwrite blocks instead of relocating them).
*/
sb->s_flags |= MS_RDONLY;
printk(KERN_NOTICE "UDF-fs: Filesystem marked read-only "
"because writing to pseudooverwrite partition is "
"not implemented.\n");
}
out_bh:
/* In case loading failed, we handle cleanup in udf_fill_super */
brelse(bh);
return ret;
}
static int udf_load_logicalvol(struct super_block *sb, sector_t block,
struct kernel_lb_addr *fileset)
{
struct logicalVolDesc *lvd;
int i, j, offset;
uint8_t type;
struct udf_sb_info *sbi = UDF_SB(sb);
struct genericPartitionMap *gpm;
uint16_t ident;
struct buffer_head *bh;
int ret = 0;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
return 1;
BUG_ON(ident != TAG_IDENT_LVD);
lvd = (struct logicalVolDesc *)bh->b_data;
i = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
if (i != 0) {
ret = i;
goto out_bh;
}
for (i = 0, offset = 0;
i < sbi->s_partitions && offset < le32_to_cpu(lvd->mapTableLength);
i++, offset += gpm->partitionMapLength) {
struct udf_part_map *map = &sbi->s_partmaps[i];
gpm = (struct genericPartitionMap *)
&(lvd->partitionMaps[offset]);
type = gpm->partitionMapType;
if (type == 1) {
struct genericPartitionMap1 *gpm1 =
(struct genericPartitionMap1 *)gpm;
map->s_partition_type = UDF_TYPE1_MAP15;
map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
map->s_partition_func = NULL;
} else if (type == 2) {
struct udfPartitionMap2 *upm2 =
(struct udfPartitionMap2 *)gpm;
if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
strlen(UDF_ID_VIRTUAL))) {
u16 suf =
le16_to_cpu(((__le16 *)upm2->partIdent.
identSuffix)[0]);
if (suf < 0x0200) {
map->s_partition_type =
UDF_VIRTUAL_MAP15;
map->s_partition_func =
udf_get_pblock_virt15;
} else {
map->s_partition_type =
UDF_VIRTUAL_MAP20;
map->s_partition_func =
udf_get_pblock_virt20;
}
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_SPARABLE,
strlen(UDF_ID_SPARABLE))) {
uint32_t loc;
struct sparingTable *st;
struct sparablePartitionMap *spm =
(struct sparablePartitionMap *)gpm;
map->s_partition_type = UDF_SPARABLE_MAP15;
map->s_type_specific.s_sparing.s_packet_len =
le16_to_cpu(spm->packetLength);
for (j = 0; j < spm->numSparingTables; j++) {
struct buffer_head *bh2;
loc = le32_to_cpu(
spm->locSparingTable[j]);
bh2 = udf_read_tagged(sb, loc, loc,
&ident);
map->s_type_specific.s_sparing.
s_spar_map[j] = bh2;
if (bh2 == NULL)
continue;
st = (struct sparingTable *)bh2->b_data;
if (ident != 0 || strncmp(
st->sparingIdent.ident,
UDF_ID_SPARING,
strlen(UDF_ID_SPARING))) {
brelse(bh2);
map->s_type_specific.s_sparing.
s_spar_map[j] = NULL;
}
}
map->s_partition_func = udf_get_pblock_spar15;
} else if (!strncmp(upm2->partIdent.ident,
UDF_ID_METADATA,
strlen(UDF_ID_METADATA))) {
struct udf_meta_data *mdata =
&map->s_type_specific.s_metadata;
struct metadataPartitionMap *mdm =
(struct metadataPartitionMap *)
&(lvd->partitionMaps[offset]);
udf_debug("Parsing Logical vol part %d "
"type %d id=%s\n", i, type,
UDF_ID_METADATA);
map->s_partition_type = UDF_METADATA_MAP25;
map->s_partition_func = udf_get_pblock_meta25;
mdata->s_meta_file_loc =
le32_to_cpu(mdm->metadataFileLoc);
mdata->s_mirror_file_loc =
le32_to_cpu(mdm->metadataMirrorFileLoc);
mdata->s_bitmap_file_loc =
le32_to_cpu(mdm->metadataBitmapFileLoc);
mdata->s_alloc_unit_size =
le32_to_cpu(mdm->allocUnitSize);
mdata->s_align_unit_size =
le16_to_cpu(mdm->alignUnitSize);
mdata->s_dup_md_flag =
mdm->flags & 0x01;
udf_debug("Metadata Ident suffix=0x%x\n",
(le16_to_cpu(
((__le16 *)
mdm->partIdent.identSuffix)[0])));
udf_debug("Metadata part num=%d\n",
le16_to_cpu(mdm->partitionNum));
udf_debug("Metadata part alloc unit size=%d\n",
le32_to_cpu(mdm->allocUnitSize));
udf_debug("Metadata file loc=%d\n",
le32_to_cpu(mdm->metadataFileLoc));
udf_debug("Mirror file loc=%d\n",
le32_to_cpu(mdm->metadataMirrorFileLoc));
udf_debug("Bitmap file loc=%d\n",
le32_to_cpu(mdm->metadataBitmapFileLoc));
udf_debug("Duplicate Flag: %d %d\n",
mdata->s_dup_md_flag, mdm->flags);
} else {
udf_debug("Unknown ident: %s\n",
upm2->partIdent.ident);
continue;
}
map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
map->s_partition_num = le16_to_cpu(upm2->partitionNum);
}
udf_debug("Partition (%d:%d) type %d on volume %d\n",
i, map->s_partition_num, type,
map->s_volumeseqnum);
}
if (fileset) {
struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
*fileset = lelb_to_cpu(la->extLocation);
udf_debug("FileSet found in LogicalVolDesc at block=%d, "
"partition=%d\n", fileset->logicalBlockNum,
fileset->partitionReferenceNum);
}
if (lvd->integritySeqExt.extLength)
udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
out_bh:
brelse(bh);
return ret;
}
/*
* udf_load_logicalvolint
*
*/
static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
{
struct buffer_head *bh = NULL;
uint16_t ident;
struct udf_sb_info *sbi = UDF_SB(sb);
struct logicalVolIntegrityDesc *lvid;
while (loc.extLength > 0 &&
(bh = udf_read_tagged(sb, loc.extLocation,
loc.extLocation, &ident)) &&
ident == TAG_IDENT_LVID) {
sbi->s_lvid_bh = bh;
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
if (lvid->nextIntegrityExt.extLength)
udf_load_logicalvolint(sb,
leea_to_cpu(lvid->nextIntegrityExt));
if (sbi->s_lvid_bh != bh)
brelse(bh);
loc.extLength -= sb->s_blocksize;
loc.extLocation++;
}
if (sbi->s_lvid_bh != bh)
brelse(bh);
}
/*
* udf_process_sequence
*
* PURPOSE
* Process a main/reserve volume descriptor sequence.
*
* PRE-CONDITIONS
* sb Pointer to _locked_ superblock.
* block First block of first extent of the sequence.
* lastblock Lastblock of first extent of the sequence.
*
* HISTORY
* July 1, 1997 - Andrew E. Mileski
* Written, tested, and released.
*/
static noinline int udf_process_sequence(struct super_block *sb, long block,
long lastblock, struct kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
struct udf_vds_record vds[VDS_POS_LENGTH];
struct udf_vds_record *curr;
struct generic_desc *gd;
struct volDescPtr *vdp;
int done = 0;
uint32_t vdsn;
uint16_t ident;
long next_s = 0, next_e = 0;
memset(vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
/*
* Read the main descriptor sequence and find which descriptors
* are in it.
*/
for (; (!done && block <= lastblock); block++) {
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh) {
printk(KERN_ERR "udf: Block %Lu of volume descriptor "
"sequence is corrupted or we could not read "
"it.\n", (unsigned long long)block);
return 1;
}
/* Process each descriptor (ISO 13346 3/8.3-8.4) */
gd = (struct generic_desc *)bh->b_data;
vdsn = le32_to_cpu(gd->volDescSeqNum);
switch (ident) {
case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
curr = &vds[VDS_POS_PRIMARY_VOL_DESC];
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
}
break;
case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
curr = &vds[VDS_POS_VOL_DESC_PTR];
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
vdp = (struct volDescPtr *)bh->b_data;
next_s = le32_to_cpu(
vdp->nextVolDescSeqExt.extLocation);
next_e = le32_to_cpu(
vdp->nextVolDescSeqExt.extLength);
next_e = next_e >> sb->s_blocksize_bits;
next_e += next_s;
}
break;
case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
curr = &vds[VDS_POS_IMP_USE_VOL_DESC];
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
}
break;
case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
curr = &vds[VDS_POS_PARTITION_DESC];
if (!curr->block)
curr->block = block;
break;
case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
curr = &vds[VDS_POS_LOGICAL_VOL_DESC];
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
}
break;
case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
curr = &vds[VDS_POS_UNALLOC_SPACE_DESC];
if (vdsn >= curr->volDescSeqNum) {
curr->volDescSeqNum = vdsn;
curr->block = block;
}
break;
case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
vds[VDS_POS_TERMINATING_DESC].block = block;
if (next_e) {
block = next_s;
lastblock = next_e;
next_s = next_e = 0;
} else
done = 1;
break;
}
brelse(bh);
}
/*
* Now read interesting descriptors again and process them
* in a suitable order
*/
if (!vds[VDS_POS_PRIMARY_VOL_DESC].block) {
printk(KERN_ERR "udf: Primary Volume Descriptor not found!\n");
return 1;
}
if (udf_load_pvoldesc(sb, vds[VDS_POS_PRIMARY_VOL_DESC].block))
return 1;
if (vds[VDS_POS_LOGICAL_VOL_DESC].block && udf_load_logicalvol(sb,
vds[VDS_POS_LOGICAL_VOL_DESC].block, fileset))
return 1;
if (vds[VDS_POS_PARTITION_DESC].block) {
/*
* We rescan the whole descriptor sequence to find
* partition descriptor blocks and process them.
*/
for (block = vds[VDS_POS_PARTITION_DESC].block;
block < vds[VDS_POS_TERMINATING_DESC].block;
block++)
if (udf_load_partdesc(sb, block))
return 1;
}
return 0;
}
static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
struct kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
long main_s, main_e, reserve_s, reserve_e;
anchor = (struct anchorVolDescPtr *)bh->b_data;
/* Locate the main sequence */
main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
main_e = main_e >> sb->s_blocksize_bits;
main_e += main_s;
/* Locate the reserve sequence */
reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
reserve_e = reserve_e >> sb->s_blocksize_bits;
reserve_e += reserve_s;
/* Process the main & reserve sequences */
/* responsible for finding the PartitionDesc(s) */
if (!udf_process_sequence(sb, main_s, main_e, fileset))
return 1;
return !udf_process_sequence(sb, reserve_s, reserve_e, fileset);
}
/*
* Check whether there is an anchor block in the given block and
* load Volume Descriptor Sequence if so.
*/
static int udf_check_anchor_block(struct super_block *sb, sector_t block,
struct kernel_lb_addr *fileset)
{
struct buffer_head *bh;
uint16_t ident;
int ret;
if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
udf_fixed_to_variable(block) >=
sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits)
return 0;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
return 0;
if (ident != TAG_IDENT_AVDP) {
brelse(bh);
return 0;
}
ret = udf_load_sequence(sb, bh, fileset);
brelse(bh);
return ret;
}
/* Search for an anchor volume descriptor pointer */
static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock,
struct kernel_lb_addr *fileset)
{
sector_t last[6];
int i;
struct udf_sb_info *sbi = UDF_SB(sb);
int last_count = 0;
/* First try user provided anchor */
if (sbi->s_anchor) {
if (udf_check_anchor_block(sb, sbi->s_anchor, fileset))
return lastblock;
}
/*
* according to spec, anchor is in either:
* block 256
* lastblock-256
* lastblock
* however, if the disc isn't closed, it could be 512.
*/
if (udf_check_anchor_block(sb, sbi->s_session + 256, fileset))
return lastblock;
/*
* The trouble is which block is the last one. Drives often misreport
* this so we try various possibilities.
*/
last[last_count++] = lastblock;
if (lastblock >= 1)
last[last_count++] = lastblock - 1;
last[last_count++] = lastblock + 1;
if (lastblock >= 2)
last[last_count++] = lastblock - 2;
if (lastblock >= 150)
last[last_count++] = lastblock - 150;
if (lastblock >= 152)
last[last_count++] = lastblock - 152;
for (i = 0; i < last_count; i++) {
if (last[i] >= sb->s_bdev->bd_inode->i_size >>
sb->s_blocksize_bits)
continue;
if (udf_check_anchor_block(sb, last[i], fileset))
return last[i];
if (last[i] < 256)
continue;
if (udf_check_anchor_block(sb, last[i] - 256, fileset))
return last[i];
}
/* Finally try block 512 in case media is open */
if (udf_check_anchor_block(sb, sbi->s_session + 512, fileset))
return last[0];
return 0;
}
/*
* Find an anchor volume descriptor and load Volume Descriptor Sequence from
* area specified by it. The function expects sbi->s_lastblock to be the last
* block on the media.
*
* Return 1 if ok, 0 if not found.
*
*/
static int udf_find_anchor(struct super_block *sb,
struct kernel_lb_addr *fileset)
{
sector_t lastblock;
struct udf_sb_info *sbi = UDF_SB(sb);
lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
if (lastblock)
goto out;
/* No anchor found? Try VARCONV conversion of block numbers */
UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
/* Firstly, we try to not convert number of the last block */
lastblock = udf_scan_anchors(sb,
udf_variable_to_fixed(sbi->s_last_block),
fileset);
if (lastblock)
goto out;
/* Secondly, we try with converted number of the last block */
lastblock = udf_scan_anchors(sb, sbi->s_last_block, fileset);
if (!lastblock) {
/* VARCONV didn't help. Clear it. */
UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
return 0;
}
out:
sbi->s_last_block = lastblock;
return 1;
}
/*
* Check Volume Structure Descriptor, find Anchor block and load Volume
* Descriptor Sequence
*/
static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
int silent, struct kernel_lb_addr *fileset)
{
struct udf_sb_info *sbi = UDF_SB(sb);
loff_t nsr_off;
if (!sb_set_blocksize(sb, uopt->blocksize)) {
if (!silent)
printk(KERN_WARNING "UDF-fs: Bad block size\n");
return 0;
}
sbi->s_last_block = uopt->lastblock;
if (!uopt->novrs) {
/* Check that it is NSR02 compliant */
nsr_off = udf_check_vsd(sb);
if (!nsr_off) {
if (!silent)
printk(KERN_WARNING "UDF-fs: No VRS found\n");
return 0;
}
if (nsr_off == -1)
udf_debug("Failed to read byte 32768. Assuming open "
"disc. Skipping validity check\n");
if (!sbi->s_last_block)
sbi->s_last_block = udf_get_last_block(sb);
} else {
udf_debug("Validity check skipped because of novrs option\n");
}
/* Look for anchor block and load Volume Descriptor Sequence */
sbi->s_anchor = uopt->anchor;
if (!udf_find_anchor(sb, fileset)) {
if (!silent)
printk(KERN_WARNING "UDF-fs: No anchor found\n");
return 0;
}
return 1;
}
static void udf_open_lvid(struct super_block *sb)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
if (!bh)
return;
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
lvidiu = udf_sb_lvidiu(sbi);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
udf_time_to_disk_stamp(&lvid->recordingDateAndTime,
CURRENT_TIME);
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
lvid->descTag.descCRC = cpu_to_le16(
crc_itu_t(0, (char *)lvid + sizeof(struct tag),
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
}
static void udf_close_lvid(struct super_block *sb)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = sbi->s_lvid_bh;
struct logicalVolIntegrityDesc *lvid;
struct logicalVolIntegrityDescImpUse *lvidiu;
if (!bh)
return;
lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
lvidiu = udf_sb_lvidiu(sbi);
lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
udf_time_to_disk_stamp(&lvid->recordingDateAndTime, CURRENT_TIME);
if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
lvid->descTag.descCRC = cpu_to_le16(
crc_itu_t(0, (char *)lvid + sizeof(struct tag),
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
mark_buffer_dirty(bh);
sbi->s_lvid_dirty = 0;
}
static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
{
int i;
int nr_groups = bitmap->s_nr_groups;
int size = sizeof(struct udf_bitmap) + (sizeof(struct buffer_head *) *
nr_groups);
for (i = 0; i < nr_groups; i++)
if (bitmap->s_block_bitmap[i])
brelse(bitmap->s_block_bitmap[i]);
if (size <= PAGE_SIZE)
kfree(bitmap);
else
vfree(bitmap);
}
static void udf_free_partition(struct udf_part_map *map)
{
int i;
struct udf_meta_data *mdata;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
iput(map->s_uspace.s_table);
if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
iput(map->s_fspace.s_table);
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
udf_sb_free_bitmap(map->s_uspace.s_bitmap);
if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
udf_sb_free_bitmap(map->s_fspace.s_bitmap);
if (map->s_partition_type == UDF_SPARABLE_MAP15)
for (i = 0; i < 4; i++)
brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
else if (map->s_partition_type == UDF_METADATA_MAP25) {
mdata = &map->s_type_specific.s_metadata;
iput(mdata->s_metadata_fe);
mdata->s_metadata_fe = NULL;
iput(mdata->s_mirror_fe);
mdata->s_mirror_fe = NULL;
iput(mdata->s_bitmap_fe);
mdata->s_bitmap_fe = NULL;
}
}
static int udf_fill_super(struct super_block *sb, void *options, int silent)
{
int i;
int ret;
struct inode *inode = NULL;
struct udf_options uopt;
struct kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
uopt.uid = -1;
uopt.gid = -1;
uopt.umask = 0;
uopt.fmode = UDF_INVALID_MODE;
uopt.dmode = UDF_INVALID_MODE;
sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
if (!sbi)
return -ENOMEM;
sb->s_fs_info = sbi;
mutex_init(&sbi->s_alloc_mutex);
if (!udf_parse_options((char *)options, &uopt, false))
goto error_out;
if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
udf_error(sb, "udf_read_super",
"utf8 cannot be combined with iocharset\n");
goto error_out;
}
#ifdef CONFIG_UDF_NLS
if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
uopt.nls_map = load_nls_default();
if (!uopt.nls_map)
uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
else
udf_debug("Using default NLS map\n");
}
#endif
if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
uopt.flags |= (1 << UDF_FLAG_UTF8);
fileset.logicalBlockNum = 0xFFFFFFFF;
fileset.partitionReferenceNum = 0xFFFF;
sbi->s_flags = uopt.flags;
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
sbi->s_fmode = uopt.fmode;
sbi->s_dmode = uopt.dmode;
sbi->s_nls_map = uopt.nls_map;
if (uopt.session == 0xFFFFFFFF)
sbi->s_session = udf_get_last_session(sb);
else
sbi->s_session = uopt.session;
udf_debug("Multi-session=%d\n", sbi->s_session);
/* Fill in the rest of the superblock */
sb->s_op = &udf_sb_ops;
sb->s_export_op = &udf_export_ops;
sb->s_dirt = 0;
sb->s_magic = UDF_SUPER_MAGIC;
sb->s_time_gran = 1000;
if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
} else {
uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
if (!ret && uopt.blocksize != UDF_DEFAULT_BLOCKSIZE) {
if (!silent)
printk(KERN_NOTICE
"UDF-fs: Rescanning with blocksize "
"%d\n", UDF_DEFAULT_BLOCKSIZE);
uopt.blocksize = UDF_DEFAULT_BLOCKSIZE;
ret = udf_load_vrs(sb, &uopt, silent, &fileset);
}
}
if (!ret) {
printk(KERN_WARNING "UDF-fs: No partition found (1)\n");
goto error_out;
}
udf_debug("Lastblock=%d\n", sbi->s_last_block);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDescImpUse *lvidiu =
udf_sb_lvidiu(sbi);
uint16_t minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
uint16_t minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
/* uint16_t maxUDFWriteRev =
le16_to_cpu(lvidiu->maxUDFWriteRev); */
if (minUDFReadRev > UDF_MAX_READ_VERSION) {
printk(KERN_ERR "UDF-fs: minUDFReadRev=%x "
"(max is %x)\n",
le16_to_cpu(lvidiu->minUDFReadRev),
UDF_MAX_READ_VERSION);
goto error_out;
} else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION)
sb->s_flags |= MS_RDONLY;
sbi->s_udfrev = minUDFWriteRev;
if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
}
if (!sbi->s_partitions) {
printk(KERN_WARNING "UDF-fs: No partition found (2)\n");
goto error_out;
}
if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
UDF_PART_FLAG_READ_ONLY) {
printk(KERN_NOTICE "UDF-fs: Partition marked readonly; "
"forcing readonly mount\n");
sb->s_flags |= MS_RDONLY;
}
if (udf_find_fileset(sb, &fileset, &rootdir)) {
printk(KERN_WARNING "UDF-fs: No fileset found\n");
goto error_out;
}
if (!silent) {
struct timestamp ts;
udf_time_to_disk_stamp(&ts, sbi->s_record_time);
udf_info("UDF: Mounting volume '%s', "
"timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
sbi->s_volume_ident, le16_to_cpu(ts.year), ts.month, ts.day,
ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
}
if (!(sb->s_flags & MS_RDONLY))
udf_open_lvid(sb);
/* Assign the root inode */
/* assign inodes by physical block number */
/* perhaps it's not extensible enough, but for now ... */
inode = udf_iget(sb, &rootdir);
if (!inode) {
printk(KERN_ERR "UDF-fs: Error in udf_iget, block=%d, "
"partition=%d\n",
rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
goto error_out;
}
/* Allocate a dentry for the root inode */
sb->s_root = d_alloc_root(inode);
if (!sb->s_root) {
printk(KERN_ERR "UDF-fs: Couldn't allocate root dentry\n");
iput(inode);
goto error_out;
}
sb->s_maxbytes = MAX_LFS_FILESIZE;
return 0;
error_out:
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
if (sbi->s_partitions)
for (i = 0; i < sbi->s_partitions; i++)
udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
#endif
if (!(sb->s_flags & MS_RDONLY))
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
kfree(sbi->s_partmaps);
kfree(sbi);
sb->s_fs_info = NULL;
return -EINVAL;
}
static void udf_error(struct super_block *sb, const char *function,
const char *fmt, ...)
{
va_list args;
if (!(sb->s_flags & MS_RDONLY)) {
/* mark sb error */
sb->s_dirt = 1;
}
va_start(args, fmt);
vsnprintf(error_buf, sizeof(error_buf), fmt, args);
va_end(args);
printk(KERN_CRIT "UDF-fs error (device %s): %s: %s\n",
sb->s_id, function, error_buf);
}
void udf_warning(struct super_block *sb, const char *function,
const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
vsnprintf(error_buf, sizeof(error_buf), fmt, args);
va_end(args);
printk(KERN_WARNING "UDF-fs warning (device %s): %s: %s\n",
sb->s_id, function, error_buf);
}
static void udf_put_super(struct super_block *sb)
{
int i;
struct udf_sb_info *sbi;
sbi = UDF_SB(sb);
lock_kernel();
if (sbi->s_vat_inode)
iput(sbi->s_vat_inode);
if (sbi->s_partitions)
for (i = 0; i < sbi->s_partitions; i++)
udf_free_partition(&sbi->s_partmaps[i]);
#ifdef CONFIG_UDF_NLS
if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
unload_nls(sbi->s_nls_map);
#endif
if (!(sb->s_flags & MS_RDONLY))
udf_close_lvid(sb);
brelse(sbi->s_lvid_bh);
kfree(sbi->s_partmaps);
kfree(sb->s_fs_info);
sb->s_fs_info = NULL;
unlock_kernel();
}
static int udf_sync_fs(struct super_block *sb, int wait)
{
struct udf_sb_info *sbi = UDF_SB(sb);
mutex_lock(&sbi->s_alloc_mutex);
if (sbi->s_lvid_dirty) {
/*
* Blockdevice will be synced later so we don't have to submit
* the buffer for IO
*/
mark_buffer_dirty(sbi->s_lvid_bh);
sb->s_dirt = 0;
sbi->s_lvid_dirty = 0;
}
mutex_unlock(&sbi->s_alloc_mutex);
return 0;
}
static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct super_block *sb = dentry->d_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct logicalVolIntegrityDescImpUse *lvidiu;
u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
if (sbi->s_lvid_bh != NULL)
lvidiu = udf_sb_lvidiu(sbi);
else
lvidiu = NULL;
buf->f_type = UDF_SUPER_MAGIC;
buf->f_bsize = sb->s_blocksize;
buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
buf->f_bfree = udf_count_free(sb);
buf->f_bavail = buf->f_bfree;
buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
le32_to_cpu(lvidiu->numDirs)) : 0)
+ buf->f_bfree;
buf->f_ffree = buf->f_bfree;
buf->f_namelen = UDF_NAME_LEN - 2;
buf->f_fsid.val[0] = (u32)id;
buf->f_fsid.val[1] = (u32)(id >> 32);
return 0;
}
static unsigned int udf_count_free_bitmap(struct super_block *sb,
struct udf_bitmap *bitmap)
{
struct buffer_head *bh = NULL;
unsigned int accum = 0;
int index;
int block = 0, newblock;
struct kernel_lb_addr loc;
uint32_t bytes;
uint8_t *ptr;
uint16_t ident;
struct spaceBitmapDesc *bm;
lock_kernel();
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
bh = udf_read_ptagged(sb, &loc, 0, &ident);
if (!bh) {
printk(KERN_ERR "udf: udf_count_free failed\n");
goto out;
} else if (ident != TAG_IDENT_SBD) {
brelse(bh);
printk(KERN_ERR "udf: udf_count_free failed\n");
goto out;
}
bm = (struct spaceBitmapDesc *)bh->b_data;
bytes = le32_to_cpu(bm->numOfBytes);
index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
ptr = (uint8_t *)bh->b_data;
while (bytes > 0) {
u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
accum += bitmap_weight((const unsigned long *)(ptr + index),
cur_bytes * 8);
bytes -= cur_bytes;
if (bytes) {
brelse(bh);
newblock = udf_get_lb_pblock(sb, &loc, ++block);
bh = udf_tread(sb, newblock);
if (!bh) {
udf_debug("read failed\n");
goto out;
}
index = 0;
ptr = (uint8_t *)bh->b_data;
}
}
brelse(bh);
out:
unlock_kernel();
return accum;
}
static unsigned int udf_count_free_table(struct super_block *sb,
struct inode *table)
{
unsigned int accum = 0;
uint32_t elen;
struct kernel_lb_addr eloc;
int8_t etype;
struct extent_position epos;
lock_kernel();
epos.block = UDF_I(table)->i_location;
epos.offset = sizeof(struct unallocSpaceEntry);
epos.bh = NULL;
while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
accum += (elen >> table->i_sb->s_blocksize_bits);
brelse(epos.bh);
unlock_kernel();
return accum;
}
static unsigned int udf_count_free(struct super_block *sb)
{
unsigned int accum = 0;
struct udf_sb_info *sbi;
struct udf_part_map *map;
sbi = UDF_SB(sb);
if (sbi->s_lvid_bh) {
struct logicalVolIntegrityDesc *lvid =
(struct logicalVolIntegrityDesc *)
sbi->s_lvid_bh->b_data;
if (le32_to_cpu(lvid->numOfPartitions) > sbi->s_partition) {
accum = le32_to_cpu(
lvid->freeSpaceTable[sbi->s_partition]);
if (accum == 0xFFFFFFFF)
accum = 0;
}
}
if (accum)
return accum;
map = &sbi->s_partmaps[sbi->s_partition];
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
accum += udf_count_free_bitmap(sb,
map->s_uspace.s_bitmap);
}
if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
accum += udf_count_free_bitmap(sb,
map->s_fspace.s_bitmap);
}
if (accum)
return accum;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
accum += udf_count_free_table(sb,
map->s_uspace.s_table);
}
if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
accum += udf_count_free_table(sb,
map->s_fspace.s_table);
}
return accum;
}
| gpl-2.0 |
c2h2/aria-imx6-kernel | sound/soc/pxa/magician.c | 86 | 14291 | /*
* SoC audio for HTC Magician
*
* Copyright (c) 2006 Philipp Zabel <philipp.zabel@gmail.com>
*
* based on spitz.c,
* Authors: Liam Girdwood <lrg@slimlogic.co.uk>
* Richard Purdie <richard@openedhand.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/uda1380.h>
#include <mach/magician.h>
#include <asm/mach-types.h>
#include "../codecs/uda1380.h"
#include "pxa2xx-i2s.h"
#include "pxa-ssp.h"
#define MAGICIAN_MIC 0
#define MAGICIAN_MIC_EXT 1
static int magician_hp_switch;
static int magician_spk_switch = 1;
static int magician_in_sel = MAGICIAN_MIC;
static void magician_ext_control(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
if (magician_spk_switch)
snd_soc_dapm_enable_pin(dapm, "Speaker");
else
snd_soc_dapm_disable_pin(dapm, "Speaker");
if (magician_hp_switch)
snd_soc_dapm_enable_pin(dapm, "Headphone Jack");
else
snd_soc_dapm_disable_pin(dapm, "Headphone Jack");
switch (magician_in_sel) {
case MAGICIAN_MIC:
snd_soc_dapm_disable_pin(dapm, "Headset Mic");
snd_soc_dapm_enable_pin(dapm, "Call Mic");
break;
case MAGICIAN_MIC_EXT:
snd_soc_dapm_disable_pin(dapm, "Call Mic");
snd_soc_dapm_enable_pin(dapm, "Headset Mic");
break;
}
snd_soc_dapm_sync(dapm);
}
static int magician_startup(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
mutex_lock(&codec->mutex);
/* check the jack status at stream startup */
magician_ext_control(codec);
mutex_unlock(&codec->mutex);
return 0;
}
/*
* Magician uses SSP port for playback.
*/
static int magician_playback_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int acps, acds, width;
unsigned int div4 = PXA_SSP_CLK_SCDB_4;
int ret = 0;
width = snd_pcm_format_physical_width(params_format(params));
/*
* rate = SSPSCLK / (2 * width(16 or 32))
* SSPSCLK = (ACPS / ACDS) / SSPSCLKDIV(div4 or div1)
*/
switch (params_rate(params)) {
case 8000:
/* off by a factor of 2: bug in the PXA27x audio clock? */
acps = 32842000;
switch (width) {
case 16:
/* 513156 Hz ~= _2_ * 8000 Hz * 32 (+0.23%) */
acds = PXA_SSP_CLK_AUDIO_DIV_16;
break;
default: /* 32 */
/* 1026312 Hz ~= _2_ * 8000 Hz * 64 (+0.23%) */
acds = PXA_SSP_CLK_AUDIO_DIV_8;
}
break;
case 11025:
acps = 5622000;
switch (width) {
case 16:
/* 351375 Hz ~= 11025 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_4;
break;
default: /* 32 */
/* 702750 Hz ~= 11025 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
}
break;
case 22050:
acps = 5622000;
switch (width) {
case 16:
/* 702750 Hz ~= 22050 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 1405500 Hz ~= 22050 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 44100:
acps = 5622000;
switch (width) {
case 16:
/* 1405500 Hz ~= 44100 Hz * 32 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 2811000 Hz ~= 44100 Hz * 64 (-0.41%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 48000:
acps = 12235000;
switch (width) {
case 16:
/* 1529375 Hz ~= 48000 Hz * 32 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
break;
default: /* 32 */
/* 3058750 Hz ~= 48000 Hz * 64 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
}
break;
case 96000:
default:
acps = 12235000;
switch (width) {
case 16:
/* 3058750 Hz ~= 96000 Hz * 32 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_1;
break;
default: /* 32 */
/* 6117500 Hz ~= 96000 Hz * 64 (-0.44%) */
acds = PXA_SSP_CLK_AUDIO_DIV_2;
div4 = PXA_SSP_CLK_SCDB_1;
break;
}
break;
}
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_MSB |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_DSP_A |
SND_SOC_DAIFMT_NB_IF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
ret = snd_soc_dai_set_tdm_slot(cpu_dai, 1, 0, 1, width);
if (ret < 0)
return ret;
/* set audio clock as clock source */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_AUDIO, 0,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
/* set the SSP audio system clock ACDS divider */
ret = snd_soc_dai_set_clkdiv(cpu_dai,
PXA_SSP_AUDIO_DIV_ACDS, acds);
if (ret < 0)
return ret;
/* set the SSP audio system clock SCDB divider4 */
ret = snd_soc_dai_set_clkdiv(cpu_dai,
PXA_SSP_AUDIO_DIV_SCDB, div4);
if (ret < 0)
return ret;
/* set SSP audio pll clock */
ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, acps);
if (ret < 0)
return ret;
return 0;
}
/*
* Magician uses I2S for capture.
*/
static int magician_capture_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
int ret = 0;
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_MSB | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set the I2S system clock as output */
ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0,
SND_SOC_CLOCK_OUT);
if (ret < 0)
return ret;
return 0;
}
static struct snd_soc_ops magician_capture_ops = {
.startup = magician_startup,
.hw_params = magician_capture_hw_params,
};
static struct snd_soc_ops magician_playback_ops = {
.startup = magician_startup,
.hw_params = magician_playback_hw_params,
};
static int magician_get_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_hp_switch;
return 0;
}
static int magician_set_hp(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
if (magician_hp_switch == ucontrol->value.integer.value[0])
return 0;
magician_hp_switch = ucontrol->value.integer.value[0];
magician_ext_control(codec);
return 1;
}
static int magician_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_spk_switch;
return 0;
}
static int magician_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
if (magician_spk_switch == ucontrol->value.integer.value[0])
return 0;
magician_spk_switch = ucontrol->value.integer.value[0];
magician_ext_control(codec);
return 1;
}
static int magician_get_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = magician_in_sel;
return 0;
}
static int magician_set_input(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
if (magician_in_sel == ucontrol->value.integer.value[0])
return 0;
magician_in_sel = ucontrol->value.integer.value[0];
switch (magician_in_sel) {
case MAGICIAN_MIC:
gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 1);
break;
case MAGICIAN_MIC_EXT:
gpio_set_value(EGPIO_MAGICIAN_IN_SEL1, 0);
}
return 1;
}
static int magician_spk_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_hp_power(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
static int magician_mic_bias(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, SND_SOC_DAPM_EVENT_ON(event));
return 0;
}
/* magician machine dapm widgets */
static const struct snd_soc_dapm_widget uda1380_dapm_widgets[] = {
SND_SOC_DAPM_HP("Headphone Jack", magician_hp_power),
SND_SOC_DAPM_SPK("Speaker", magician_spk_power),
SND_SOC_DAPM_MIC("Call Mic", magician_mic_bias),
SND_SOC_DAPM_MIC("Headset Mic", magician_mic_bias),
};
/* magician machine audio_map */
static const struct snd_soc_dapm_route audio_map[] = {
/* Headphone connected to VOUTL, VOUTR */
{"Headphone Jack", NULL, "VOUTL"},
{"Headphone Jack", NULL, "VOUTR"},
/* Speaker connected to VOUTL, VOUTR */
{"Speaker", NULL, "VOUTL"},
{"Speaker", NULL, "VOUTR"},
/* Mics are connected to VINM */
{"VINM", NULL, "Headset Mic"},
{"VINM", NULL, "Call Mic"},
};
static const char *input_select[] = {"Call Mic", "Headset Mic"};
static const struct soc_enum magician_in_sel_enum =
SOC_ENUM_SINGLE_EXT(2, input_select);
static const struct snd_kcontrol_new uda1380_magician_controls[] = {
SOC_SINGLE_BOOL_EXT("Headphone Switch",
(unsigned long)&magician_hp_switch,
magician_get_hp, magician_set_hp),
SOC_SINGLE_BOOL_EXT("Speaker Switch",
(unsigned long)&magician_spk_switch,
magician_get_spk, magician_set_spk),
SOC_ENUM_EXT("Input Select", magician_in_sel_enum,
magician_get_input, magician_set_input),
};
/*
* Logic for a uda1380 as connected on a HTC Magician
*/
static int magician_uda1380_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int err;
/* NC codec pins */
snd_soc_dapm_nc_pin(dapm, "VOUTLHP");
snd_soc_dapm_nc_pin(dapm, "VOUTRHP");
/* FIXME: is anything connected here? */
snd_soc_dapm_nc_pin(dapm, "VINL");
snd_soc_dapm_nc_pin(dapm, "VINR");
/* Add magician specific controls */
err = snd_soc_add_controls(codec, uda1380_magician_controls,
ARRAY_SIZE(uda1380_magician_controls));
if (err < 0)
return err;
/* Add magician specific widgets */
snd_soc_dapm_new_controls(dapm, uda1380_dapm_widgets,
ARRAY_SIZE(uda1380_dapm_widgets));
/* Set up magician specific audio path interconnects */
snd_soc_dapm_add_routes(dapm, audio_map, ARRAY_SIZE(audio_map));
return 0;
}
/* magician digital audio interface glue - connects codec <--> CPU */
static struct snd_soc_dai_link magician_dai[] = {
{
.name = "uda1380",
.stream_name = "UDA1380 Playback",
.cpu_dai_name = "pxa-ssp-dai.0",
.codec_dai_name = "uda1380-hifi-playback",
.platform_name = "pxa-pcm-audio",
.codec_name = "uda1380-codec.0-0018",
.init = magician_uda1380_init,
.ops = &magician_playback_ops,
},
{
.name = "uda1380",
.stream_name = "UDA1380 Capture",
.cpu_dai_name = "pxa2xx-i2s",
.codec_dai_name = "uda1380-hifi-capture",
.platform_name = "pxa-pcm-audio",
.codec_name = "uda1380-codec.0-0018",
.ops = &magician_capture_ops,
}
};
/* magician audio machine driver */
static struct snd_soc_card snd_soc_card_magician = {
.name = "Magician",
.owner = THIS_MODULE,
.dai_link = magician_dai,
.num_links = ARRAY_SIZE(magician_dai),
};
static struct platform_device *magician_snd_device;
/*
* FIXME: move into magician board file once merged into the pxa tree
*/
static struct uda1380_platform_data uda1380_info = {
.gpio_power = EGPIO_MAGICIAN_CODEC_POWER,
.gpio_reset = EGPIO_MAGICIAN_CODEC_RESET,
.dac_clk = UDA1380_DAC_CLK_WSPLL,
};
static struct i2c_board_info i2c_board_info[] = {
{
I2C_BOARD_INFO("uda1380", 0x18),
.platform_data = &uda1380_info,
},
};
static int __init magician_init(void)
{
int ret;
struct i2c_adapter *adapter;
struct i2c_client *client;
if (!machine_is_magician())
return -ENODEV;
adapter = i2c_get_adapter(0);
if (!adapter)
return -ENODEV;
client = i2c_new_device(adapter, i2c_board_info);
i2c_put_adapter(adapter);
if (!client)
return -ENODEV;
ret = gpio_request(EGPIO_MAGICIAN_SPK_POWER, "SPK_POWER");
if (ret)
goto err_request_spk;
ret = gpio_request(EGPIO_MAGICIAN_EP_POWER, "EP_POWER");
if (ret)
goto err_request_ep;
ret = gpio_request(EGPIO_MAGICIAN_MIC_POWER, "MIC_POWER");
if (ret)
goto err_request_mic;
ret = gpio_request(EGPIO_MAGICIAN_IN_SEL0, "IN_SEL0");
if (ret)
goto err_request_in_sel0;
ret = gpio_request(EGPIO_MAGICIAN_IN_SEL1, "IN_SEL1");
if (ret)
goto err_request_in_sel1;
gpio_set_value(EGPIO_MAGICIAN_IN_SEL0, 0);
magician_snd_device = platform_device_alloc("soc-audio", -1);
if (!magician_snd_device) {
ret = -ENOMEM;
goto err_pdev;
}
platform_set_drvdata(magician_snd_device, &snd_soc_card_magician);
ret = platform_device_add(magician_snd_device);
if (ret) {
platform_device_put(magician_snd_device);
goto err_pdev;
}
return 0;
err_pdev:
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
err_request_in_sel1:
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
err_request_in_sel0:
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
err_request_mic:
gpio_free(EGPIO_MAGICIAN_EP_POWER);
err_request_ep:
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
err_request_spk:
return ret;
}
static void __exit magician_exit(void)
{
platform_device_unregister(magician_snd_device);
gpio_set_value(EGPIO_MAGICIAN_SPK_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_EP_POWER, 0);
gpio_set_value(EGPIO_MAGICIAN_MIC_POWER, 0);
gpio_free(EGPIO_MAGICIAN_IN_SEL1);
gpio_free(EGPIO_MAGICIAN_IN_SEL0);
gpio_free(EGPIO_MAGICIAN_MIC_POWER);
gpio_free(EGPIO_MAGICIAN_EP_POWER);
gpio_free(EGPIO_MAGICIAN_SPK_POWER);
}
module_init(magician_init);
module_exit(magician_exit);
MODULE_AUTHOR("Philipp Zabel");
MODULE_DESCRIPTION("ALSA SoC Magician");
MODULE_LICENSE("GPL");
| gpl-2.0 |
sour12/iamroot | fs/ceph/dir.c | 86 | 35949 | #include <linux/ceph/ceph_debug.h>
#include <linux/spinlock.h>
#include <linux/fs_struct.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "super.h"
#include "mds_client.h"
/*
* Directory operations: readdir, lookup, create, link, unlink,
* rename, etc.
*/
/*
* Ceph MDS operations are specified in terms of a base ino and
* relative path. Thus, the client can specify an operation on a
* specific inode (e.g., a getattr due to fstat(2)), or as a path
* relative to, say, the root directory.
*
* Normally, we limit ourselves to strict inode ops (no path component)
* or dentry operations (a single path component relative to an ino). The
* exception to this is open_root_dentry(), which will open the mount
* point by name.
*/
const struct inode_operations ceph_dir_iops;
const struct file_operations ceph_dir_fops;
const struct dentry_operations ceph_dentry_ops;
/*
* Initialize ceph dentry state.
*/
int ceph_init_dentry(struct dentry *dentry)
{
struct ceph_dentry_info *di;
if (dentry->d_fsdata)
return 0;
di = kmem_cache_alloc(ceph_dentry_cachep, GFP_NOFS | __GFP_ZERO);
if (!di)
return -ENOMEM; /* oh well */
spin_lock(&dentry->d_lock);
if (dentry->d_fsdata) {
/* lost a race */
kmem_cache_free(ceph_dentry_cachep, di);
goto out_unlock;
}
if (ceph_snap(dentry->d_parent->d_inode) == CEPH_NOSNAP)
d_set_d_op(dentry, &ceph_dentry_ops);
else if (ceph_snap(dentry->d_parent->d_inode) == CEPH_SNAPDIR)
d_set_d_op(dentry, &ceph_snapdir_dentry_ops);
else
d_set_d_op(dentry, &ceph_snap_dentry_ops);
di->dentry = dentry;
di->lease_session = NULL;
dentry->d_time = jiffies;
/* avoid reordering d_fsdata setup so that the check above is safe */
smp_mb();
dentry->d_fsdata = di;
ceph_dentry_lru_add(dentry);
out_unlock:
spin_unlock(&dentry->d_lock);
return 0;
}
struct inode *ceph_get_dentry_parent_inode(struct dentry *dentry)
{
struct inode *inode = NULL;
if (!dentry)
return NULL;
spin_lock(&dentry->d_lock);
if (!IS_ROOT(dentry)) {
inode = dentry->d_parent->d_inode;
ihold(inode);
}
spin_unlock(&dentry->d_lock);
return inode;
}
/*
* for readdir, we encode the directory frag and offset within that
* frag into f_pos.
*/
static unsigned fpos_frag(loff_t p)
{
return p >> 32;
}
static unsigned fpos_off(loff_t p)
{
return p & 0xffffffff;
}
/*
* When possible, we try to satisfy a readdir by peeking at the
* dcache. We make this work by carefully ordering dentries on
* d_u.d_child when we initially get results back from the MDS, and
* falling back to a "normal" sync readdir if any dentries in the dir
* are dropped.
*
* Complete dir indicates that we have all dentries in the dir. It is
* defined IFF we hold CEPH_CAP_FILE_SHARED (which will be revoked by
* the MDS if/when the directory is modified).
*/
static int __dcache_readdir(struct file *file, struct dir_context *ctx)
{
struct ceph_file_info *fi = file->private_data;
struct dentry *parent = file->f_dentry;
struct inode *dir = parent->d_inode;
struct list_head *p;
struct dentry *dentry, *last;
struct ceph_dentry_info *di;
int err = 0;
/* claim ref on last dentry we returned */
last = fi->dentry;
fi->dentry = NULL;
dout("__dcache_readdir %p at %llu (last %p)\n", dir, ctx->pos,
last);
spin_lock(&parent->d_lock);
/* start at beginning? */
if (ctx->pos == 2 || last == NULL ||
ctx->pos < ceph_dentry(last)->offset) {
if (list_empty(&parent->d_subdirs))
goto out_unlock;
p = parent->d_subdirs.prev;
dout(" initial p %p/%p\n", p->prev, p->next);
} else {
p = last->d_u.d_child.prev;
}
more:
dentry = list_entry(p, struct dentry, d_u.d_child);
di = ceph_dentry(dentry);
while (1) {
dout(" p %p/%p %s d_subdirs %p/%p\n", p->prev, p->next,
d_unhashed(dentry) ? "!hashed" : "hashed",
parent->d_subdirs.prev, parent->d_subdirs.next);
if (p == &parent->d_subdirs) {
fi->flags |= CEPH_F_ATEND;
goto out_unlock;
}
spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED);
if (!d_unhashed(dentry) && dentry->d_inode &&
ceph_snap(dentry->d_inode) != CEPH_SNAPDIR &&
ceph_ino(dentry->d_inode) != CEPH_INO_CEPH &&
ctx->pos <= di->offset)
break;
dout(" skipping %p %.*s at %llu (%llu)%s%s\n", dentry,
dentry->d_name.len, dentry->d_name.name, di->offset,
ctx->pos, d_unhashed(dentry) ? " unhashed" : "",
!dentry->d_inode ? " null" : "");
spin_unlock(&dentry->d_lock);
p = p->prev;
dentry = list_entry(p, struct dentry, d_u.d_child);
di = ceph_dentry(dentry);
}
dget_dlock(dentry);
spin_unlock(&dentry->d_lock);
spin_unlock(&parent->d_lock);
dout(" %llu (%llu) dentry %p %.*s %p\n", di->offset, ctx->pos,
dentry, dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
ctx->pos = di->offset;
if (!dir_emit(ctx, dentry->d_name.name,
dentry->d_name.len,
ceph_translate_ino(dentry->d_sb, dentry->d_inode->i_ino),
dentry->d_inode->i_mode >> 12)) {
if (last) {
/* remember our position */
fi->dentry = last;
fi->next_offset = di->offset;
}
dput(dentry);
return 0;
}
if (last)
dput(last);
last = dentry;
ctx->pos++;
/* make sure a dentry wasn't dropped while we didn't have parent lock */
if (!ceph_dir_is_complete(dir)) {
dout(" lost dir complete on %p; falling back to mds\n", dir);
err = -EAGAIN;
goto out;
}
spin_lock(&parent->d_lock);
p = p->prev; /* advance to next dentry */
goto more;
out_unlock:
spin_unlock(&parent->d_lock);
out:
if (last)
dput(last);
return err;
}
/*
* make note of the last dentry we read, so we can
* continue at the same lexicographical point,
* regardless of what dir changes take place on the
* server.
*/
static int note_last_dentry(struct ceph_file_info *fi, const char *name,
int len)
{
kfree(fi->last_name);
fi->last_name = kmalloc(len+1, GFP_NOFS);
if (!fi->last_name)
return -ENOMEM;
memcpy(fi->last_name, name, len);
fi->last_name[len] = 0;
dout("note_last_dentry '%s'\n", fi->last_name);
return 0;
}
static int ceph_readdir(struct file *file, struct dir_context *ctx)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_mds_client *mdsc = fsc->mdsc;
unsigned frag = fpos_frag(ctx->pos);
int off = fpos_off(ctx->pos);
int err;
u32 ftype;
struct ceph_mds_reply_info_parsed *rinfo;
const int max_entries = fsc->mount_options->max_readdir;
const int max_bytes = fsc->mount_options->max_readdir_bytes;
dout("readdir %p file %p frag %u off %u\n", inode, file, frag, off);
if (fi->flags & CEPH_F_ATEND)
return 0;
/* always start with . and .. */
if (ctx->pos == 0) {
/* note dir version at start of readdir so we can tell
* if any dentries get dropped */
fi->dir_release_count = atomic_read(&ci->i_release_count);
dout("readdir off 0 -> '.'\n");
if (!dir_emit(ctx, ".", 1,
ceph_translate_ino(inode->i_sb, inode->i_ino),
inode->i_mode >> 12))
return 0;
ctx->pos = 1;
off = 1;
}
if (ctx->pos == 1) {
ino_t ino = parent_ino(file->f_dentry);
dout("readdir off 1 -> '..'\n");
if (!dir_emit(ctx, "..", 2,
ceph_translate_ino(inode->i_sb, ino),
inode->i_mode >> 12))
return 0;
ctx->pos = 2;
off = 2;
}
/* can we use the dcache? */
spin_lock(&ci->i_ceph_lock);
if ((ctx->pos == 2 || fi->dentry) &&
!ceph_test_mount_opt(fsc, NOASYNCREADDIR) &&
ceph_snap(inode) != CEPH_SNAPDIR &&
__ceph_dir_is_complete(ci) &&
__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1)) {
spin_unlock(&ci->i_ceph_lock);
err = __dcache_readdir(file, ctx);
if (err != -EAGAIN)
return err;
} else {
spin_unlock(&ci->i_ceph_lock);
}
if (fi->dentry) {
err = note_last_dentry(fi, fi->dentry->d_name.name,
fi->dentry->d_name.len);
if (err)
return err;
dput(fi->dentry);
fi->dentry = NULL;
}
/* proceed with a normal readdir */
more:
/* do we have the correct frag content buffered? */
if (fi->frag != frag || fi->last_readdir == NULL) {
struct ceph_mds_request *req;
int op = ceph_snap(inode) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
/* discard old result, if any */
if (fi->last_readdir) {
ceph_mdsc_put_request(fi->last_readdir);
fi->last_readdir = NULL;
}
/* requery frag tree, as the frag topology may have changed */
frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
dout("readdir fetching %llx.%llx frag %x offset '%s'\n",
ceph_vinop(inode), frag, fi->last_name);
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
req->r_inode = inode;
ihold(inode);
req->r_dentry = dget(file->f_dentry);
/* hints to request -> mds selection code */
req->r_direct_mode = USE_AUTH_MDS;
req->r_direct_hash = ceph_frag_value(frag);
req->r_direct_is_hash = true;
req->r_path2 = kstrdup(fi->last_name, GFP_NOFS);
req->r_readdir_offset = fi->next_offset;
req->r_args.readdir.frag = cpu_to_le32(frag);
req->r_args.readdir.max_entries = cpu_to_le32(max_entries);
req->r_args.readdir.max_bytes = cpu_to_le32(max_bytes);
req->r_num_caps = max_entries + 1;
err = ceph_mdsc_do_request(mdsc, NULL, req);
if (err < 0) {
ceph_mdsc_put_request(req);
return err;
}
dout("readdir got and parsed readdir result=%d"
" on frag %x, end=%d, complete=%d\n", err, frag,
(int)req->r_reply_info.dir_end,
(int)req->r_reply_info.dir_complete);
if (!req->r_did_prepopulate) {
dout("readdir !did_prepopulate");
/* preclude from marking dir complete */
fi->dir_release_count--;
}
/* note next offset and last dentry name */
fi->offset = fi->next_offset;
fi->last_readdir = req;
if (req->r_reply_info.dir_end) {
kfree(fi->last_name);
fi->last_name = NULL;
if (ceph_frag_is_rightmost(frag))
fi->next_offset = 2;
else
fi->next_offset = 0;
} else {
rinfo = &req->r_reply_info;
err = note_last_dentry(fi,
rinfo->dir_dname[rinfo->dir_nr-1],
rinfo->dir_dname_len[rinfo->dir_nr-1]);
if (err)
return err;
fi->next_offset += rinfo->dir_nr;
}
}
rinfo = &fi->last_readdir->r_reply_info;
dout("readdir frag %x num %d off %d chunkoff %d\n", frag,
rinfo->dir_nr, off, fi->offset);
ctx->pos = ceph_make_fpos(frag, off);
while (off >= fi->offset && off - fi->offset < rinfo->dir_nr) {
struct ceph_mds_reply_inode *in =
rinfo->dir_in[off - fi->offset].in;
struct ceph_vino vino;
ino_t ino;
dout("readdir off %d (%d/%d) -> %lld '%.*s' %p\n",
off, off - fi->offset, rinfo->dir_nr, ctx->pos,
rinfo->dir_dname_len[off - fi->offset],
rinfo->dir_dname[off - fi->offset], in);
BUG_ON(!in);
ftype = le32_to_cpu(in->mode) >> 12;
vino.ino = le64_to_cpu(in->ino);
vino.snap = le64_to_cpu(in->snapid);
ino = ceph_vino_to_ino(vino);
if (!dir_emit(ctx,
rinfo->dir_dname[off - fi->offset],
rinfo->dir_dname_len[off - fi->offset],
ceph_translate_ino(inode->i_sb, ino), ftype)) {
dout("filldir stopping us...\n");
return 0;
}
off++;
ctx->pos++;
}
if (fi->last_name) {
ceph_mdsc_put_request(fi->last_readdir);
fi->last_readdir = NULL;
goto more;
}
/* more frags? */
if (!ceph_frag_is_rightmost(frag)) {
frag = ceph_frag_next(frag);
off = 0;
ctx->pos = ceph_make_fpos(frag, off);
dout("readdir next frag is %x\n", frag);
goto more;
}
fi->flags |= CEPH_F_ATEND;
/*
* if dir_release_count still matches the dir, no dentries
* were released during the whole readdir, and we should have
* the complete dir contents in our cache.
*/
spin_lock(&ci->i_ceph_lock);
if (atomic_read(&ci->i_release_count) == fi->dir_release_count) {
dout(" marking %p complete\n", inode);
__ceph_dir_set_complete(ci, fi->dir_release_count);
ci->i_max_offset = ctx->pos;
}
spin_unlock(&ci->i_ceph_lock);
dout("readdir %p file %p done.\n", inode, file);
return 0;
}
static void reset_readdir(struct ceph_file_info *fi)
{
if (fi->last_readdir) {
ceph_mdsc_put_request(fi->last_readdir);
fi->last_readdir = NULL;
}
kfree(fi->last_name);
fi->last_name = NULL;
fi->next_offset = 2; /* compensate for . and .. */
if (fi->dentry) {
dput(fi->dentry);
fi->dentry = NULL;
}
fi->flags &= ~CEPH_F_ATEND;
}
static loff_t ceph_dir_llseek(struct file *file, loff_t offset, int whence)
{
struct ceph_file_info *fi = file->private_data;
struct inode *inode = file->f_mapping->host;
loff_t old_offset = offset;
loff_t retval;
mutex_lock(&inode->i_mutex);
retval = -EINVAL;
switch (whence) {
case SEEK_END:
offset += inode->i_size + 2; /* FIXME */
break;
case SEEK_CUR:
offset += file->f_pos;
case SEEK_SET:
break;
default:
goto out;
}
if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) {
if (offset != file->f_pos) {
file->f_pos = offset;
file->f_version = 0;
fi->flags &= ~CEPH_F_ATEND;
}
retval = offset;
/*
* discard buffered readdir content on seekdir(0), or
* seek to new frag, or seek prior to current chunk.
*/
if (offset == 0 ||
fpos_frag(offset) != fpos_frag(old_offset) ||
fpos_off(offset) < fi->offset) {
dout("dir_llseek dropping %p content\n", file);
reset_readdir(fi);
}
/* bump dir_release_count if we did a forward seek */
if (offset > old_offset)
fi->dir_release_count--;
}
out:
mutex_unlock(&inode->i_mutex);
return retval;
}
/*
* Handle lookups for the hidden .snap directory.
*/
int ceph_handle_snapdir(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dentry->d_sb);
struct inode *parent = dentry->d_parent->d_inode; /* we hold i_mutex */
/* .snap dir? */
if (err == -ENOENT &&
ceph_snap(parent) == CEPH_NOSNAP &&
strcmp(dentry->d_name.name,
fsc->mount_options->snapdir_name) == 0) {
struct inode *inode = ceph_get_snapdir(parent);
dout("ENOENT on snapdir %p '%.*s', linking to snapdir %p\n",
dentry, dentry->d_name.len, dentry->d_name.name, inode);
BUG_ON(!d_unhashed(dentry));
d_add(dentry, inode);
err = 0;
}
return err;
}
/*
* Figure out final result of a lookup/open request.
*
* Mainly, make sure we return the final req->r_dentry (if it already
* existed) in place of the original VFS-provided dentry when they
* differ.
*
* Gracefully handle the case where the MDS replies with -ENOENT and
* no trace (which it may do, at its discretion, e.g., if it doesn't
* care to issue a lease on the negative dentry).
*/
struct dentry *ceph_finish_lookup(struct ceph_mds_request *req,
struct dentry *dentry, int err)
{
if (err == -ENOENT) {
/* no trace? */
err = 0;
if (!req->r_reply_info.head->is_dentry) {
dout("ENOENT and no trace, dentry %p inode %p\n",
dentry, dentry->d_inode);
if (dentry->d_inode) {
d_drop(dentry);
err = -ENOENT;
} else {
d_add(dentry, NULL);
}
}
}
if (err)
dentry = ERR_PTR(err);
else if (dentry != req->r_dentry)
dentry = dget(req->r_dentry); /* we got spliced */
else
dentry = NULL;
return dentry;
}
static int is_root_ceph_dentry(struct inode *inode, struct dentry *dentry)
{
return ceph_ino(inode) == CEPH_INO_ROOT &&
strncmp(dentry->d_name.name, ".ceph", 5) == 0;
}
/*
* Look up a single dir entry. If there is a lookup intent, inform
* the MDS so that it gets our 'caps wanted' value in a single op.
*/
static struct dentry *ceph_lookup(struct inode *dir, struct dentry *dentry,
unsigned int flags)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int op;
int err;
dout("lookup %p dentry %p '%.*s'\n",
dir, dentry, dentry->d_name.len, dentry->d_name.name);
if (dentry->d_name.len > NAME_MAX)
return ERR_PTR(-ENAMETOOLONG);
err = ceph_init_dentry(dentry);
if (err < 0)
return ERR_PTR(err);
/* can we conclude ENOENT locally? */
if (dentry->d_inode == NULL) {
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
spin_lock(&ci->i_ceph_lock);
dout(" dir %p flags are %d\n", dir, ci->i_ceph_flags);
if (strncmp(dentry->d_name.name,
fsc->mount_options->snapdir_name,
dentry->d_name.len) &&
!is_root_ceph_dentry(dir, dentry) &&
__ceph_dir_is_complete(ci) &&
(__ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1))) {
spin_unlock(&ci->i_ceph_lock);
dout(" dir %p complete, -ENOENT\n", dir);
d_add(dentry, NULL);
di->lease_shared_gen = ci->i_shared_gen;
return NULL;
}
spin_unlock(&ci->i_ceph_lock);
}
op = ceph_snap(dir) == CEPH_SNAPDIR ?
CEPH_MDS_OP_LOOKUPSNAP : CEPH_MDS_OP_LOOKUP;
req = ceph_mdsc_create_request(mdsc, op, USE_ANY_MDS);
if (IS_ERR(req))
return ERR_CAST(req);
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
/* we only need inode linkage */
req->r_args.getattr.mask = cpu_to_le32(CEPH_STAT_CAP_INODE);
req->r_locked_dir = dir;
err = ceph_mdsc_do_request(mdsc, NULL, req);
err = ceph_handle_snapdir(req, dentry, err);
dentry = ceph_finish_lookup(req, dentry, err);
ceph_mdsc_put_request(req); /* will dput(dentry) */
dout("lookup result=%p\n", dentry);
return dentry;
}
/*
* If we do a create but get no trace back from the MDS, follow up with
* a lookup (the VFS expects us to link up the provided dentry).
*/
int ceph_handle_notrace_create(struct inode *dir, struct dentry *dentry)
{
struct dentry *result = ceph_lookup(dir, dentry, 0);
if (result && !IS_ERR(result)) {
/*
* We created the item, then did a lookup, and found
* it was already linked to another inode we already
* had in our cache (and thus got spliced). Link our
* dentry to that inode, but don't hash it, just in
* case the VFS wants to dereference it.
*/
BUG_ON(!result->d_inode);
d_instantiate(dentry, result->d_inode);
return 0;
}
return PTR_ERR(result);
}
static int ceph_mknod(struct inode *dir, struct dentry *dentry,
umode_t mode, dev_t rdev)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
dout("mknod in dir %p dentry %p mode 0%ho rdev %d\n",
dir, dentry, mode, rdev);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_MKNOD, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
return PTR_ERR(req);
}
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_locked_dir = dir;
req->r_args.mknod.mode = cpu_to_le32(mode);
req->r_args.mknod.rdev = cpu_to_le32(rdev);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
if (err)
d_drop(dentry);
return err;
}
static int ceph_create(struct inode *dir, struct dentry *dentry, umode_t mode,
bool excl)
{
return ceph_mknod(dir, dentry, mode, 0);
}
static int ceph_symlink(struct inode *dir, struct dentry *dentry,
const char *dest)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
dout("symlink in dir %p dentry %p to '%s'\n", dir, dentry, dest);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SYMLINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
return PTR_ERR(req);
}
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_path2 = kstrdup(dest, GFP_NOFS);
req->r_locked_dir = dir;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
if (err)
d_drop(dentry);
return err;
}
static int ceph_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err = -EROFS;
int op;
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* mkdir .snap/foo is a MKSNAP */
op = CEPH_MDS_OP_MKSNAP;
dout("mksnap dir %p snap '%.*s' dn %p\n", dir,
dentry->d_name.len, dentry->d_name.name, dentry);
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
dout("mkdir dir %p dn %p mode 0%ho\n", dir, dentry, mode);
op = CEPH_MDS_OP_MKDIR;
} else {
goto out;
}
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_locked_dir = dir;
req->r_args.mkdir.mode = cpu_to_le32(mode);
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
err = ceph_handle_notrace_create(dir, dentry);
ceph_mdsc_put_request(req);
out:
if (err < 0)
d_drop(dentry);
return err;
}
static int ceph_link(struct dentry *old_dentry, struct inode *dir,
struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
if (ceph_snap(dir) != CEPH_NOSNAP)
return -EROFS;
dout("link in dir %p old_dentry %p dentry %p\n", dir,
old_dentry, dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LINK, USE_AUTH_MDS);
if (IS_ERR(req)) {
d_drop(dentry);
return PTR_ERR(req);
}
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_old_dentry = dget(old_dentry); /* or inode? hrm. */
req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
req->r_locked_dir = dir;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_SHARED on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
err = ceph_mdsc_do_request(mdsc, dir, req);
if (err) {
d_drop(dentry);
} else if (!req->r_reply_info.head->is_dentry) {
ihold(old_dentry->d_inode);
d_instantiate(dentry, old_dentry->d_inode);
}
ceph_mdsc_put_request(req);
return err;
}
/*
* For a soon-to-be unlinked file, drop the AUTH_RDCACHE caps. If it
* looks like the link count will hit 0, drop any other caps (other
* than PIN) we don't specifically want (due to the file still being
* open).
*/
static int drop_caps_for_unlink(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int drop = CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL;
spin_lock(&ci->i_ceph_lock);
if (inode->i_nlink == 1) {
drop |= ~(__ceph_caps_wanted(ci) | CEPH_CAP_PIN);
ci->i_ceph_flags |= CEPH_I_NODELAY;
}
spin_unlock(&ci->i_ceph_lock);
return drop;
}
/*
* rmdir and unlink are differ only by the metadata op code
*/
static int ceph_unlink(struct inode *dir, struct dentry *dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct inode *inode = dentry->d_inode;
struct ceph_mds_request *req;
int err = -EROFS;
int op;
if (ceph_snap(dir) == CEPH_SNAPDIR) {
/* rmdir .snap/foo is RMSNAP */
dout("rmsnap dir %p '%.*s' dn %p\n", dir, dentry->d_name.len,
dentry->d_name.name, dentry);
op = CEPH_MDS_OP_RMSNAP;
} else if (ceph_snap(dir) == CEPH_NOSNAP) {
dout("unlink/rmdir dir %p dn %p inode %p\n",
dir, dentry, inode);
op = S_ISDIR(dentry->d_inode->i_mode) ?
CEPH_MDS_OP_RMDIR : CEPH_MDS_OP_UNLINK;
} else
goto out;
req = ceph_mdsc_create_request(mdsc, op, USE_AUTH_MDS);
if (IS_ERR(req)) {
err = PTR_ERR(req);
goto out;
}
req->r_dentry = dget(dentry);
req->r_num_caps = 2;
req->r_locked_dir = dir;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
req->r_inode_drop = drop_caps_for_unlink(inode);
err = ceph_mdsc_do_request(mdsc, dir, req);
if (!err && !req->r_reply_info.head->is_dentry)
d_delete(dentry);
ceph_mdsc_put_request(req);
out:
return err;
}
static int ceph_rename(struct inode *old_dir, struct dentry *old_dentry,
struct inode *new_dir, struct dentry *new_dentry)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(old_dir->i_sb);
struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_mds_request *req;
int err;
if (ceph_snap(old_dir) != ceph_snap(new_dir))
return -EXDEV;
if (ceph_snap(old_dir) != CEPH_NOSNAP ||
ceph_snap(new_dir) != CEPH_NOSNAP)
return -EROFS;
dout("rename dir %p dentry %p to dir %p dentry %p\n",
old_dir, old_dentry, new_dir, new_dentry);
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_RENAME, USE_AUTH_MDS);
if (IS_ERR(req))
return PTR_ERR(req);
req->r_dentry = dget(new_dentry);
req->r_num_caps = 2;
req->r_old_dentry = dget(old_dentry);
req->r_old_dentry_dir = ceph_get_dentry_parent_inode(old_dentry);
req->r_locked_dir = new_dir;
req->r_old_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_old_dentry_unless = CEPH_CAP_FILE_EXCL;
req->r_dentry_drop = CEPH_CAP_FILE_SHARED;
req->r_dentry_unless = CEPH_CAP_FILE_EXCL;
/* release LINK_RDCACHE on source inode (mds will lock it) */
req->r_old_inode_drop = CEPH_CAP_LINK_SHARED;
if (new_dentry->d_inode)
req->r_inode_drop = drop_caps_for_unlink(new_dentry->d_inode);
err = ceph_mdsc_do_request(mdsc, old_dir, req);
if (!err && !req->r_reply_info.head->is_dentry) {
/*
* Normally d_move() is done by fill_trace (called by
* do_request, above). If there is no trace, we need
* to do it here.
*/
/* d_move screws up d_subdirs order */
ceph_dir_clear_complete(new_dir);
d_move(old_dentry, new_dentry);
/* ensure target dentry is invalidated, despite
rehashing bug in vfs_rename_dir */
ceph_invalidate_dentry_lease(new_dentry);
}
ceph_mdsc_put_request(req);
return err;
}
/*
* Ensure a dentry lease will no longer revalidate.
*/
void ceph_invalidate_dentry_lease(struct dentry *dentry)
{
spin_lock(&dentry->d_lock);
dentry->d_time = jiffies;
ceph_dentry(dentry)->lease_shared_gen = 0;
spin_unlock(&dentry->d_lock);
}
/*
* Check if dentry lease is valid. If not, delete the lease. Try to
* renew if the least is more than half up.
*/
static int dentry_lease_is_valid(struct dentry *dentry)
{
struct ceph_dentry_info *di;
struct ceph_mds_session *s;
int valid = 0;
u32 gen;
unsigned long ttl;
struct ceph_mds_session *session = NULL;
struct inode *dir = NULL;
u32 seq = 0;
spin_lock(&dentry->d_lock);
di = ceph_dentry(dentry);
if (di->lease_session) {
s = di->lease_session;
spin_lock(&s->s_gen_ttl_lock);
gen = s->s_cap_gen;
ttl = s->s_cap_ttl;
spin_unlock(&s->s_gen_ttl_lock);
if (di->lease_gen == gen &&
time_before(jiffies, dentry->d_time) &&
time_before(jiffies, ttl)) {
valid = 1;
if (di->lease_renew_after &&
time_after(jiffies, di->lease_renew_after)) {
/* we should renew */
dir = dentry->d_parent->d_inode;
session = ceph_get_mds_session(s);
seq = di->lease_seq;
di->lease_renew_after = 0;
di->lease_renew_from = jiffies;
}
}
}
spin_unlock(&dentry->d_lock);
if (session) {
ceph_mdsc_lease_send_msg(session, dir, dentry,
CEPH_MDS_LEASE_RENEW, seq);
ceph_put_mds_session(session);
}
dout("dentry_lease_is_valid - dentry %p = %d\n", dentry, valid);
return valid;
}
/*
* Check if directory-wide content lease/cap is valid.
*/
static int dir_lease_is_valid(struct inode *dir, struct dentry *dentry)
{
struct ceph_inode_info *ci = ceph_inode(dir);
struct ceph_dentry_info *di = ceph_dentry(dentry);
int valid = 0;
spin_lock(&ci->i_ceph_lock);
if (ci->i_shared_gen == di->lease_shared_gen)
valid = __ceph_caps_issued_mask(ci, CEPH_CAP_FILE_SHARED, 1);
spin_unlock(&ci->i_ceph_lock);
dout("dir_lease_is_valid dir %p v%u dentry %p v%u = %d\n",
dir, (unsigned)ci->i_shared_gen, dentry,
(unsigned)di->lease_shared_gen, valid);
return valid;
}
/*
* Check if cached dentry can be trusted.
*/
static int ceph_d_revalidate(struct dentry *dentry, unsigned int flags)
{
int valid = 0;
struct inode *dir;
if (flags & LOOKUP_RCU)
return -ECHILD;
dout("d_revalidate %p '%.*s' inode %p offset %lld\n", dentry,
dentry->d_name.len, dentry->d_name.name, dentry->d_inode,
ceph_dentry(dentry)->offset);
dir = ceph_get_dentry_parent_inode(dentry);
/* always trust cached snapped dentries, snapdir dentry */
if (ceph_snap(dir) != CEPH_NOSNAP) {
dout("d_revalidate %p '%.*s' inode %p is SNAPPED\n", dentry,
dentry->d_name.len, dentry->d_name.name, dentry->d_inode);
valid = 1;
} else if (dentry->d_inode &&
ceph_snap(dentry->d_inode) == CEPH_SNAPDIR) {
valid = 1;
} else if (dentry_lease_is_valid(dentry) ||
dir_lease_is_valid(dir, dentry)) {
valid = 1;
}
dout("d_revalidate %p %s\n", dentry, valid ? "valid" : "invalid");
if (valid)
ceph_dentry_lru_touch(dentry);
else
d_drop(dentry);
iput(dir);
return valid;
}
/*
* Release our ceph_dentry_info.
*/
static void ceph_d_release(struct dentry *dentry)
{
struct ceph_dentry_info *di = ceph_dentry(dentry);
dout("d_release %p\n", dentry);
ceph_dentry_lru_del(dentry);
if (di->lease_session)
ceph_put_mds_session(di->lease_session);
kmem_cache_free(ceph_dentry_cachep, di);
dentry->d_fsdata = NULL;
}
static int ceph_snapdir_d_revalidate(struct dentry *dentry,
unsigned int flags)
{
/*
* Eventually, we'll want to revalidate snapped metadata
* too... probably...
*/
return 1;
}
/*
* When the VFS prunes a dentry from the cache, we need to clear the
* complete flag on the parent directory.
*
* Called under dentry->d_lock.
*/
static void ceph_d_prune(struct dentry *dentry)
{
dout("ceph_d_prune %p\n", dentry);
/* do we have a valid parent? */
if (IS_ROOT(dentry))
return;
/* if we are not hashed, we don't affect dir's completeness */
if (d_unhashed(dentry))
return;
/*
* we hold d_lock, so d_parent is stable, and d_fsdata is never
* cleared until d_release
*/
ceph_dir_clear_complete(dentry->d_parent->d_inode);
}
/*
* read() on a dir. This weird interface hack only works if mounted
* with '-o dirstat'.
*/
static ssize_t ceph_read_dir(struct file *file, char __user *buf, size_t size,
loff_t *ppos)
{
struct ceph_file_info *cf = file->private_data;
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
int left;
const int bufsize = 1024;
if (!ceph_test_mount_opt(ceph_sb_to_client(inode->i_sb), DIRSTAT))
return -EISDIR;
if (!cf->dir_info) {
cf->dir_info = kmalloc(bufsize, GFP_NOFS);
if (!cf->dir_info)
return -ENOMEM;
cf->dir_info_len =
snprintf(cf->dir_info, bufsize,
"entries: %20lld\n"
" files: %20lld\n"
" subdirs: %20lld\n"
"rentries: %20lld\n"
" rfiles: %20lld\n"
" rsubdirs: %20lld\n"
"rbytes: %20lld\n"
"rctime: %10ld.%09ld\n",
ci->i_files + ci->i_subdirs,
ci->i_files,
ci->i_subdirs,
ci->i_rfiles + ci->i_rsubdirs,
ci->i_rfiles,
ci->i_rsubdirs,
ci->i_rbytes,
(long)ci->i_rctime.tv_sec,
(long)ci->i_rctime.tv_nsec);
}
if (*ppos >= cf->dir_info_len)
return 0;
size = min_t(unsigned, size, cf->dir_info_len-*ppos);
left = copy_to_user(buf, cf->dir_info + *ppos, size);
if (left == size)
return -EFAULT;
*ppos += (size - left);
return size - left;
}
/*
* an fsync() on a dir will wait for any uncommitted directory
* operations to commit.
*/
static int ceph_dir_fsync(struct file *file, loff_t start, loff_t end,
int datasync)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct list_head *head = &ci->i_unsafe_dirops;
struct ceph_mds_request *req;
u64 last_tid;
int ret = 0;
dout("dir_fsync %p\n", inode);
ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
if (ret)
return ret;
mutex_lock(&inode->i_mutex);
spin_lock(&ci->i_unsafe_lock);
if (list_empty(head))
goto out;
req = list_entry(head->prev,
struct ceph_mds_request, r_unsafe_dir_item);
last_tid = req->r_tid;
do {
ceph_mdsc_get_request(req);
spin_unlock(&ci->i_unsafe_lock);
dout("dir_fsync %p wait on tid %llu (until %llu)\n",
inode, req->r_tid, last_tid);
if (req->r_timeout) {
ret = wait_for_completion_timeout(
&req->r_safe_completion, req->r_timeout);
if (ret > 0)
ret = 0;
else if (ret == 0)
ret = -EIO; /* timed out */
} else {
wait_for_completion(&req->r_safe_completion);
}
ceph_mdsc_put_request(req);
spin_lock(&ci->i_unsafe_lock);
if (ret || list_empty(head))
break;
req = list_entry(head->next,
struct ceph_mds_request, r_unsafe_dir_item);
} while (req->r_tid < last_tid);
out:
spin_unlock(&ci->i_unsafe_lock);
mutex_unlock(&inode->i_mutex);
return ret;
}
/*
* We maintain a private dentry LRU.
*
* FIXME: this needs to be changed to a per-mds lru to be useful.
*/
void ceph_dentry_lru_add(struct dentry *dn)
{
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
dout("dentry_lru_add %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_add_tail(&di->lru, &mdsc->dentry_lru);
mdsc->num_dentry++;
spin_unlock(&mdsc->dentry_lru_lock);
}
void ceph_dentry_lru_touch(struct dentry *dn)
{
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
dout("dentry_lru_touch %p %p '%.*s' (offset %lld)\n", di, dn,
dn->d_name.len, dn->d_name.name, di->offset);
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_move_tail(&di->lru, &mdsc->dentry_lru);
spin_unlock(&mdsc->dentry_lru_lock);
}
void ceph_dentry_lru_del(struct dentry *dn)
{
struct ceph_dentry_info *di = ceph_dentry(dn);
struct ceph_mds_client *mdsc;
dout("dentry_lru_del %p %p '%.*s'\n", di, dn,
dn->d_name.len, dn->d_name.name);
mdsc = ceph_sb_to_client(dn->d_sb)->mdsc;
spin_lock(&mdsc->dentry_lru_lock);
list_del_init(&di->lru);
mdsc->num_dentry--;
spin_unlock(&mdsc->dentry_lru_lock);
}
/*
* Return name hash for a given dentry. This is dependent on
* the parent directory's hash function.
*/
unsigned ceph_dentry_hash(struct inode *dir, struct dentry *dn)
{
struct ceph_inode_info *dci = ceph_inode(dir);
switch (dci->i_dir_layout.dl_dir_hash) {
case 0: /* for backward compat */
case CEPH_STR_HASH_LINUX:
return dn->d_name.hash;
default:
return ceph_str_hash(dci->i_dir_layout.dl_dir_hash,
dn->d_name.name, dn->d_name.len);
}
}
const struct file_operations ceph_dir_fops = {
.read = ceph_read_dir,
.iterate = ceph_readdir,
.llseek = ceph_dir_llseek,
.open = ceph_open,
.release = ceph_release,
.unlocked_ioctl = ceph_ioctl,
.fsync = ceph_dir_fsync,
};
const struct inode_operations ceph_dir_iops = {
.lookup = ceph_lookup,
.permission = ceph_permission,
.getattr = ceph_getattr,
.setattr = ceph_setattr,
.setxattr = ceph_setxattr,
.getxattr = ceph_getxattr,
.listxattr = ceph_listxattr,
.removexattr = ceph_removexattr,
.mknod = ceph_mknod,
.symlink = ceph_symlink,
.mkdir = ceph_mkdir,
.link = ceph_link,
.unlink = ceph_unlink,
.rmdir = ceph_unlink,
.rename = ceph_rename,
.create = ceph_create,
.atomic_open = ceph_atomic_open,
};
const struct dentry_operations ceph_dentry_ops = {
.d_revalidate = ceph_d_revalidate,
.d_release = ceph_d_release,
.d_prune = ceph_d_prune,
};
const struct dentry_operations ceph_snapdir_dentry_ops = {
.d_revalidate = ceph_snapdir_d_revalidate,
.d_release = ceph_d_release,
};
const struct dentry_operations ceph_snap_dentry_ops = {
.d_release = ceph_d_release,
.d_prune = ceph_d_prune,
};
| gpl-2.0 |
jonas2295/m7_kernel | net/ipv6/raw.c | 342 | 29073 | /*
* RAW sockets for IPv6
* Linux INET6 implementation
*
* Authors:
* Pedro Roque <roque@di.fc.ul.pt>
*
* Adapted from linux/net/ipv4/raw.c
*
* Fixes:
* Hideaki YOSHIFUJI : sin6_scope_id support
* YOSHIFUJI,H.@USAGI : raw checksum (RFC2292(bis) compliance)
* Kazunori MIYAZAWA @USAGI: change process style to use ip6_append_data
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/icmpv6.h>
#include <linux/netfilter.h>
#include <linux/netfilter_ipv6.h>
#include <linux/skbuff.h>
#include <linux/compat.h>
#include <asm/uaccess.h>
#include <asm/ioctls.h>
#include <net/net_namespace.h>
#include <net/ip.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ipv6.h>
#include <net/ndisc.h>
#include <net/protocol.h>
#include <net/ip6_route.h>
#include <net/ip6_checksum.h>
#include <net/addrconf.h>
#include <net/transp_v6.h>
#include <net/udp.h>
#include <net/inet_common.h>
#include <net/tcp_states.h>
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
#include <net/mip6.h>
#endif
#include <linux/mroute6.h>
#include <net/raw.h>
#include <net/rawv6.h>
#include <net/xfrm.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/export.h>
static struct raw_hashinfo raw_v6_hashinfo = {
.lock = __RW_LOCK_UNLOCKED(raw_v6_hashinfo.lock),
};
static struct sock *__raw_v6_lookup(struct net *net, struct sock *sk,
unsigned short num, const struct in6_addr *loc_addr,
const struct in6_addr *rmt_addr, int dif)
{
struct hlist_node *node;
int is_multicast = ipv6_addr_is_multicast(loc_addr);
sk_for_each_from(sk, node)
if (inet_sk(sk)->inet_num == num) {
struct ipv6_pinfo *np = inet6_sk(sk);
if (!net_eq(sock_net(sk), net))
continue;
if (!ipv6_addr_any(&np->daddr) &&
!ipv6_addr_equal(&np->daddr, rmt_addr))
continue;
if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
continue;
if (!ipv6_addr_any(&np->rcv_saddr)) {
if (ipv6_addr_equal(&np->rcv_saddr, loc_addr))
goto found;
if (is_multicast &&
inet6_mc_check(sk, loc_addr, rmt_addr))
goto found;
continue;
}
goto found;
}
sk = NULL;
found:
return sk;
}
static __inline__ int icmpv6_filter(struct sock *sk, struct sk_buff *skb)
{
struct icmp6hdr *icmph;
struct raw6_sock *rp = raw6_sk(sk);
if (pskb_may_pull(skb, sizeof(struct icmp6hdr))) {
__u32 *data = &rp->filter.data[0];
int bit_nr;
icmph = (struct icmp6hdr *) skb->data;
bit_nr = icmph->icmp6_type;
return (data[bit_nr >> 5] & (1 << (bit_nr & 31))) != 0;
}
return 0;
}
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
typedef int mh_filter_t(struct sock *sock, struct sk_buff *skb);
static mh_filter_t __rcu *mh_filter __read_mostly;
int rawv6_mh_filter_register(mh_filter_t filter)
{
rcu_assign_pointer(mh_filter, filter);
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_register);
int rawv6_mh_filter_unregister(mh_filter_t filter)
{
RCU_INIT_POINTER(mh_filter, NULL);
synchronize_rcu();
return 0;
}
EXPORT_SYMBOL(rawv6_mh_filter_unregister);
#endif
static int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
{
const struct in6_addr *saddr;
const struct in6_addr *daddr;
struct sock *sk;
int delivered = 0;
__u8 hash;
struct net *net;
saddr = &ipv6_hdr(skb)->saddr;
daddr = saddr + 1;
hash = nexthdr & (MAX_INET_PROTOS - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk == NULL)
goto out;
net = dev_net(skb->dev);
sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
while (sk) {
int filtered;
delivered = 1;
switch (nexthdr) {
case IPPROTO_ICMPV6:
filtered = icmpv6_filter(sk, skb);
break;
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
case IPPROTO_MH:
{
mh_filter_t *filter;
filter = rcu_dereference(mh_filter);
filtered = filter ? (*filter)(sk, skb) : 0;
break;
}
#endif
default:
filtered = 0;
break;
}
if (filtered < 0)
break;
if (filtered == 0) {
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
if (clone) {
nf_reset(clone);
rawv6_rcv(sk, clone);
}
}
sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
IP6CB(skb)->iif);
}
out:
read_unlock(&raw_v6_hashinfo.lock);
return delivered;
}
int raw6_local_deliver(struct sk_buff *skb, int nexthdr)
{
struct sock *raw_sk;
raw_sk = sk_head(&raw_v6_hashinfo.ht[nexthdr & (MAX_INET_PROTOS - 1)]);
if (raw_sk && !ipv6_raw_deliver(skb, nexthdr))
raw_sk = NULL;
return raw_sk != NULL;
}
static int rawv6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *addr = (struct sockaddr_in6 *) uaddr;
__be32 v4addr = 0;
int addr_type;
int err;
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
addr_type = ipv6_addr_type(&addr->sin6_addr);
if (addr_type == IPV6_ADDR_MAPPED)
return -EADDRNOTAVAIL;
lock_sock(sk);
err = -EINVAL;
if (sk->sk_state != TCP_CLOSE)
goto out;
rcu_read_lock();
if (addr_type != IPV6_ADDR_ANY) {
struct net_device *dev = NULL;
if (addr_type & IPV6_ADDR_LINKLOCAL) {
if (addr_len >= sizeof(struct sockaddr_in6) &&
addr->sin6_scope_id) {
sk->sk_bound_dev_if = addr->sin6_scope_id;
}
if (!sk->sk_bound_dev_if)
goto out_unlock;
err = -ENODEV;
dev = dev_get_by_index_rcu(sock_net(sk),
sk->sk_bound_dev_if);
if (!dev)
goto out_unlock;
}
v4addr = LOOPBACK4_IPV6;
if (!(addr_type & IPV6_ADDR_MULTICAST)) {
err = -EADDRNOTAVAIL;
if (!ipv6_chk_addr(sock_net(sk), &addr->sin6_addr,
dev, 0)) {
goto out_unlock;
}
}
}
inet->inet_rcv_saddr = inet->inet_saddr = v4addr;
np->rcv_saddr = addr->sin6_addr;
if (!(addr_type & IPV6_ADDR_MULTICAST))
np->saddr = addr->sin6_addr;
err = 0;
out_unlock:
rcu_read_unlock();
out:
release_sock(sk);
return err;
}
static void rawv6_err(struct sock *sk, struct sk_buff *skb,
struct inet6_skb_parm *opt,
u8 type, u8 code, int offset, __be32 info)
{
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
int err;
int harderr;
if (!np->recverr && sk->sk_state != TCP_ESTABLISHED)
return;
harderr = icmpv6_err_convert(type, code, &err);
if (type == ICMPV6_PKT_TOOBIG)
harderr = (np->pmtudisc == IPV6_PMTUDISC_DO);
if (np->recverr) {
u8 *payload = skb->data;
if (!inet->hdrincl)
payload += offset;
ipv6_icmp_error(sk, skb, err, 0, ntohl(info), payload);
}
if (np->recverr || harderr) {
sk->sk_err = err;
sk->sk_error_report(sk);
}
}
void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
u8 type, u8 code, int inner_offset, __be32 info)
{
struct sock *sk;
int hash;
const struct in6_addr *saddr, *daddr;
struct net *net;
hash = nexthdr & (RAW_HTABLE_SIZE - 1);
read_lock(&raw_v6_hashinfo.lock);
sk = sk_head(&raw_v6_hashinfo.ht[hash]);
if (sk != NULL) {
const struct ipv6hdr *ip6h = (const struct ipv6hdr *)skb->data;
saddr = &ip6h->saddr;
daddr = &ip6h->daddr;
net = dev_net(skb->dev);
while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
IP6CB(skb)->iif))) {
rawv6_err(sk, skb, NULL, type, code,
inner_offset, info);
sk = sk_next(sk);
}
}
read_unlock(&raw_v6_hashinfo.lock);
}
static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
skb_dst_drop(skb);
if (sock_queue_rcv_skb(sk, skb) < 0) {
kfree_skb(skb);
return NET_RX_DROP;
}
return 0;
}
int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
if (skb->ip_summed == CHECKSUM_COMPLETE) {
skb_postpull_rcsum(skb, skb_network_header(skb),
skb_network_header_len(skb));
if (!csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len, inet->inet_num, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (!skb_csum_unnecessary(skb))
skb->csum = ~csum_unfold(csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
&ipv6_hdr(skb)->daddr,
skb->len,
inet->inet_num, 0));
if (inet->hdrincl) {
if (skb_checksum_complete(skb)) {
atomic_inc(&sk->sk_drops);
kfree_skb(skb);
return NET_RX_DROP;
}
}
rawv6_rcv_skb(sk, skb);
return 0;
}
static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len,
int noblock, int flags, int *addr_len)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)msg->msg_name;
struct sk_buff *skb;
size_t copied;
int err;
if (flags & MSG_OOB)
return -EOPNOTSUPP;
if (addr_len)
*addr_len=sizeof(*sin6);
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len);
if (np->rxpmtu && np->rxopt.bits.rxpmtu)
return ipv6_recv_rxpmtu(sk, msg, len);
skb = skb_recv_datagram(sk, flags, noblock, &err);
if (!skb)
goto out;
copied = skb->len;
if (copied > len) {
copied = len;
msg->msg_flags |= MSG_TRUNC;
}
if (skb_csum_unnecessary(skb)) {
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
err = skb_copy_and_csum_datagram_iovec(skb, 0, msg->msg_iov);
if (err == -EINVAL)
goto csum_copy_err;
}
if (err)
goto out_free;
if (sin6) {
sin6->sin6_family = AF_INET6;
sin6->sin6_port = 0;
sin6->sin6_addr = ipv6_hdr(skb)->saddr;
sin6->sin6_flowinfo = 0;
sin6->sin6_scope_id = 0;
if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
sin6->sin6_scope_id = IP6CB(skb)->iif;
}
sock_recv_ts_and_drops(msg, sk, skb);
if (np->rxopt.all)
datagram_recv_ctl(sk, msg, skb);
err = copied;
if (flags & MSG_TRUNC)
err = skb->len;
out_free:
skb_free_datagram(sk, skb);
out:
return err;
csum_copy_err:
skb_kill_datagram(sk, skb, flags);
err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
goto out;
}
static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
struct raw6_sock *rp)
{
struct sk_buff *skb;
int err = 0;
int offset;
int len;
int total_len;
__wsum tmp_csum;
__sum16 csum;
if (!rp->checksum)
goto send;
if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
goto out;
offset = rp->offset;
total_len = inet_sk(sk)->cork.base.length;
if (offset >= total_len - 1) {
err = -EINVAL;
ip6_flush_pending_frames(sk);
goto out;
}
if (skb_queue_len(&sk->sk_write_queue) == 1) {
tmp_csum = skb->csum;
} else {
struct sk_buff *csum_skb = NULL;
tmp_csum = 0;
skb_queue_walk(&sk->sk_write_queue, skb) {
tmp_csum = csum_add(tmp_csum, skb->csum);
if (csum_skb)
continue;
len = skb->len - skb_transport_offset(skb);
if (offset >= len) {
offset -= len;
continue;
}
csum_skb = skb;
}
skb = csum_skb;
}
#ifdef CONFIG_HTC_NETWORK_MODIFY
if (IS_ERR(skb) || (!skb))
printk(KERN_ERR "[NET] skb is NULL in %s!\n", __func__);
#endif
offset += skb_transport_offset(skb);
if (skb_copy_bits(skb, offset, &csum, 2))
BUG();
if (unlikely(csum))
tmp_csum = csum_sub(tmp_csum, csum_unfold(csum));
csum = csum_ipv6_magic(&fl6->saddr, &fl6->daddr,
total_len, fl6->flowi6_proto, tmp_csum);
if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
csum = CSUM_MANGLED_0;
if (skb_store_bits(skb, offset, &csum, 2))
BUG();
send:
err = ip6_push_pending_frames(sk);
out:
return err;
}
static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
struct flowi6 *fl6, struct dst_entry **dstp,
unsigned int flags)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct ipv6hdr *iph;
struct sk_buff *skb;
int err;
struct rt6_info *rt = (struct rt6_info *)*dstp;
int hlen = LL_RESERVED_SPACE(rt->dst.dev);
int tlen = rt->dst.dev->needed_tailroom;
if (length > rt->dst.dev->mtu) {
ipv6_local_error(sk, EMSGSIZE, fl6, rt->dst.dev->mtu);
return -EMSGSIZE;
}
if (flags&MSG_PROBE)
goto out;
skb = sock_alloc_send_skb(sk,
length + hlen + tlen + 15,
flags & MSG_DONTWAIT, &err);
if (skb == NULL)
goto error;
skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
skb_dst_set(skb, &rt->dst);
*dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
iph = ipv6_hdr(skb);
skb->ip_summed = CHECKSUM_NONE;
skb->transport_header = skb->network_header;
err = memcpy_fromiovecend((void *)iph, from, 0, length);
if (err)
goto error_fault;
IP6_UPD_PO_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, skb, NULL,
rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
if (err)
goto error;
out:
return 0;
error_fault:
err = -EFAULT;
kfree_skb(skb);
error:
IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
}
static int rawv6_probe_proto_opt(struct flowi6 *fl6, struct msghdr *msg)
{
struct iovec *iov;
u8 __user *type = NULL;
u8 __user *code = NULL;
u8 len = 0;
int probed = 0;
int i;
if (!msg->msg_iov)
return 0;
for (i = 0; i < msg->msg_iovlen; i++) {
iov = &msg->msg_iov[i];
if (!iov)
continue;
switch (fl6->flowi6_proto) {
case IPPROTO_ICMPV6:
if (iov->iov_base && iov->iov_len < 1)
break;
if (!type) {
type = iov->iov_base;
if (iov->iov_len > 1)
code = type + 1;
} else if (!code)
code = iov->iov_base;
if (type && code) {
if (get_user(fl6->fl6_icmp_type, type) ||
get_user(fl6->fl6_icmp_code, code))
return -EFAULT;
probed = 1;
}
break;
case IPPROTO_MH:
if (iov->iov_base && iov->iov_len < 1)
break;
if (iov->iov_len > 2 - len) {
u8 __user *p = iov->iov_base;
if (get_user(fl6->fl6_mh_type, &p[2 - len]))
return -EFAULT;
probed = 1;
} else
len += iov->iov_len;
break;
default:
probed = 1;
break;
}
if (probed)
break;
}
return 0;
}
static int rawv6_sendmsg(struct kiocb *iocb, struct sock *sk,
struct msghdr *msg, size_t len)
{
struct ipv6_txoptions opt_space;
struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *) msg->msg_name;
struct in6_addr *daddr, *final_p, final;
struct inet_sock *inet = inet_sk(sk);
struct ipv6_pinfo *np = inet6_sk(sk);
struct raw6_sock *rp = raw6_sk(sk);
struct ipv6_txoptions *opt = NULL;
struct ip6_flowlabel *flowlabel = NULL;
struct dst_entry *dst = NULL;
struct flowi6 fl6;
int addr_len = msg->msg_namelen;
int hlimit = -1;
int tclass = -1;
int dontfrag = -1;
u16 proto;
int err;
if (len > INT_MAX)
return -EMSGSIZE;
if (msg->msg_flags & MSG_OOB)
return -EOPNOTSUPP;
memset(&fl6, 0, sizeof(fl6));
fl6.flowi6_mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
return -EINVAL;
if (sin6->sin6_family && sin6->sin6_family != AF_INET6)
return -EAFNOSUPPORT;
proto = ntohs(sin6->sin6_port);
if (!proto)
proto = inet->inet_num;
else if (proto != inet->inet_num)
return -EINVAL;
if (proto > 255)
return -EINVAL;
daddr = &sin6->sin6_addr;
if (np->sndflow) {
fl6.flowlabel = sin6->sin6_flowinfo&IPV6_FLOWINFO_MASK;
if (fl6.flowlabel&IPV6_FLOWLABEL_MASK) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
daddr = &flowlabel->dst;
}
}
if (sk->sk_state == TCP_ESTABLISHED &&
ipv6_addr_equal(daddr, &np->daddr))
daddr = &np->daddr;
if (addr_len >= sizeof(struct sockaddr_in6) &&
sin6->sin6_scope_id &&
ipv6_addr_type(daddr)&IPV6_ADDR_LINKLOCAL)
fl6.flowi6_oif = sin6->sin6_scope_id;
} else {
if (sk->sk_state != TCP_ESTABLISHED)
return -EDESTADDRREQ;
proto = inet->inet_num;
daddr = &np->daddr;
fl6.flowlabel = np->flow_label;
}
if (fl6.flowi6_oif == 0)
fl6.flowi6_oif = sk->sk_bound_dev_if;
if (msg->msg_controllen) {
opt = &opt_space;
memset(opt, 0, sizeof(struct ipv6_txoptions));
opt->tot_len = sizeof(struct ipv6_txoptions);
err = datagram_send_ctl(sock_net(sk), sk, msg, &fl6, opt,
&hlimit, &tclass, &dontfrag);
if (err < 0) {
fl6_sock_release(flowlabel);
return err;
}
if ((fl6.flowlabel&IPV6_FLOWLABEL_MASK) && !flowlabel) {
flowlabel = fl6_sock_lookup(sk, fl6.flowlabel);
if (flowlabel == NULL)
return -EINVAL;
}
if (!(opt->opt_nflen|opt->opt_flen))
opt = NULL;
}
if (opt == NULL)
opt = np->opt;
if (flowlabel)
opt = fl6_merge_options(&opt_space, flowlabel, opt);
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
err = rawv6_probe_proto_opt(&fl6, msg);
if (err)
goto out;
if (!ipv6_addr_any(daddr))
fl6.daddr = *daddr;
else
fl6.daddr.s6_addr[15] = 0x1;
if (ipv6_addr_any(&fl6.saddr) && !ipv6_addr_any(&np->saddr))
fl6.saddr = np->saddr;
final_p = fl6_update_dst(&fl6, opt, &final);
if (!fl6.flowi6_oif && ipv6_addr_is_multicast(&fl6.daddr))
fl6.flowi6_oif = np->mcast_oif;
else if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->ucast_oif;
security_sk_classify_flow(sk, flowi6_to_flowi(&fl6));
dst = ip6_dst_lookup_flow(sk, &fl6, final_p, true);
if (IS_ERR(dst)) {
err = PTR_ERR(dst);
goto out;
}
if (hlimit < 0) {
if (ipv6_addr_is_multicast(&fl6.daddr))
hlimit = np->mcast_hops;
else
hlimit = np->hop_limit;
#ifdef CONFIG_HTC_NETWORK_MODIFY
if (IS_ERR(dst) || (!dst))
printk(KERN_ERR "[NET] dst is NULL in %s!\n", __func__);
#endif
if (hlimit < 0)
hlimit = ip6_dst_hoplimit(dst);
}
if (tclass < 0)
tclass = np->tclass;
if (dontfrag < 0)
dontfrag = np->dontfrag;
if (msg->msg_flags&MSG_CONFIRM)
goto do_confirm;
back_from_confirm:
if (inet->hdrincl)
err = rawv6_send_hdrinc(sk, msg->msg_iov, len, &fl6, &dst, msg->msg_flags);
else {
lock_sock(sk);
err = ip6_append_data(sk, ip_generic_getfrag, msg->msg_iov,
len, 0, hlimit, tclass, opt, &fl6, (struct rt6_info*)dst,
msg->msg_flags, dontfrag);
if (err)
ip6_flush_pending_frames(sk);
else if (!(msg->msg_flags & MSG_MORE))
err = rawv6_push_pending_frames(sk, &fl6, rp);
release_sock(sk);
}
done:
dst_release(dst);
out:
fl6_sock_release(flowlabel);
return err<0?err:len;
do_confirm:
dst_confirm(dst);
if (!(msg->msg_flags & MSG_PROBE) || len)
goto back_from_confirm;
err = 0;
goto done;
}
static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int optlen)
{
switch (optname) {
case ICMPV6_FILTER:
if (optlen > sizeof(struct icmp6_filter))
optlen = sizeof(struct icmp6_filter);
if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
int len;
switch (optname) {
case ICMPV6_FILTER:
if (get_user(len, optlen))
return -EFAULT;
if (len < 0)
return -EINVAL;
if (len > sizeof(struct icmp6_filter))
len = sizeof(struct icmp6_filter);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
return -EFAULT;
return 0;
default:
return -ENOPROTOOPT;
}
return 0;
}
static int do_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val;
if (get_user(val, (int __user *)optval))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
if (inet_sk(sk)->inet_num == IPPROTO_ICMPV6 &&
level == IPPROTO_IPV6) {
return -EINVAL;
}
if (val > 0 && (val&1))
return -EINVAL;
if (val < 0) {
rp->checksum = 0;
} else {
rp->checksum = 1;
rp->offset = val;
}
return 0;
default:
return -ENOPROTOOPT;
}
}
static int rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_setsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_seticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_setsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_setsockopt(sk, level, optname, optval, optlen);
}
#endif
static int do_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
struct raw6_sock *rp = raw6_sk(sk);
int val, len;
if (get_user(len,optlen))
return -EFAULT;
switch (optname) {
case IPV6_CHECKSUM:
if (rp->checksum == 0)
val = -1;
else
val = rp->offset;
break;
default:
return -ENOPROTOOPT;
}
len = min_t(unsigned int, sizeof(int), len);
if (put_user(len, optlen))
return -EFAULT;
if (copy_to_user(optval,&val,len))
return -EFAULT;
return 0;
}
static int rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return ipv6_getsockopt(sk, level, optname, optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen)
{
switch (level) {
case SOL_RAW:
break;
case SOL_ICMPV6:
if (inet_sk(sk)->inet_num != IPPROTO_ICMPV6)
return -EOPNOTSUPP;
return rawv6_geticmpfilter(sk, level, optname, optval, optlen);
case SOL_IPV6:
if (optname == IPV6_CHECKSUM)
break;
default:
return compat_ipv6_getsockopt(sk, level, optname,
optval, optlen);
}
return do_rawv6_getsockopt(sk, level, optname, optval, optlen);
}
#endif
static int rawv6_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ: {
int amount = sk_wmem_alloc_get(sk);
return put_user(amount, (int __user *)arg);
}
case SIOCINQ: {
struct sk_buff *skb;
int amount = 0;
spin_lock_bh(&sk->sk_receive_queue.lock);
skb = skb_peek(&sk->sk_receive_queue);
if (skb != NULL)
amount = skb->tail - skb->transport_header;
spin_unlock_bh(&sk->sk_receive_queue.lock);
return put_user(amount, (int __user *)arg);
}
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_ioctl(sk, cmd, (void __user *)arg);
#else
return -ENOIOCTLCMD;
#endif
}
}
#ifdef CONFIG_COMPAT
static int compat_rawv6_ioctl(struct sock *sk, unsigned int cmd, unsigned long arg)
{
switch (cmd) {
case SIOCOUTQ:
case SIOCINQ:
return -ENOIOCTLCMD;
default:
#ifdef CONFIG_IPV6_MROUTE
return ip6mr_compat_ioctl(sk, cmd, compat_ptr(arg));
#else
return -ENOIOCTLCMD;
#endif
}
}
#endif
static void rawv6_close(struct sock *sk, long timeout)
{
if (inet_sk(sk)->inet_num == IPPROTO_RAW)
ip6_ra_control(sk, -1);
ip6mr_sk_done(sk);
sk_common_release(sk);
}
static void raw6_destroy(struct sock *sk)
{
lock_sock(sk);
ip6_flush_pending_frames(sk);
release_sock(sk);
inet6_destroy_sock(sk);
}
static int rawv6_init_sk(struct sock *sk)
{
struct raw6_sock *rp = raw6_sk(sk);
switch (inet_sk(sk)->inet_num) {
case IPPROTO_ICMPV6:
rp->checksum = 1;
rp->offset = 2;
break;
case IPPROTO_MH:
rp->checksum = 1;
rp->offset = 4;
break;
default:
break;
}
return 0;
}
struct proto rawv6_prot = {
.name = "RAWv6",
.owner = THIS_MODULE,
.close = rawv6_close,
.destroy = raw6_destroy,
.connect = ip6_datagram_connect,
.disconnect = udp_disconnect,
.ioctl = rawv6_ioctl,
.init = rawv6_init_sk,
.setsockopt = rawv6_setsockopt,
.getsockopt = rawv6_getsockopt,
.sendmsg = rawv6_sendmsg,
.recvmsg = rawv6_recvmsg,
.bind = rawv6_bind,
.backlog_rcv = rawv6_rcv_skb,
.hash = raw_hash_sk,
.unhash = raw_unhash_sk,
.obj_size = sizeof(struct raw6_sock),
.h.raw_hash = &raw_v6_hashinfo,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_rawv6_setsockopt,
.compat_getsockopt = compat_rawv6_getsockopt,
.compat_ioctl = compat_rawv6_ioctl,
#endif
};
#ifdef CONFIG_PROC_FS
static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
{
struct ipv6_pinfo *np = inet6_sk(sp);
const struct in6_addr *dest, *src;
__u16 destp, srcp;
dest = &np->daddr;
src = &np->rcv_saddr;
destp = 0;
srcp = inet_sk(sp)->inet_num;
seq_printf(seq,
"%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
"%02X %08X:%08X %02X:%08lX %08X %5d %8d %lu %d %pK %d\n",
i,
src->s6_addr32[0], src->s6_addr32[1],
src->s6_addr32[2], src->s6_addr32[3], srcp,
dest->s6_addr32[0], dest->s6_addr32[1],
dest->s6_addr32[2], dest->s6_addr32[3], destp,
sp->sk_state,
sk_wmem_alloc_get(sp),
sk_rmem_alloc_get(sp),
0, 0L, 0,
sock_i_uid(sp), 0,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
}
static int raw6_seq_show(struct seq_file *seq, void *v)
{
if (v == SEQ_START_TOKEN)
seq_printf(seq,
" sl "
"local_address "
"remote_address "
"st tx_queue rx_queue tr tm->when retrnsmt"
" uid timeout inode ref pointer drops\n");
else
raw6_sock_seq_show(seq, v, raw_seq_private(seq)->bucket);
return 0;
}
static const struct seq_operations raw6_seq_ops = {
.start = raw_seq_start,
.next = raw_seq_next,
.stop = raw_seq_stop,
.show = raw6_seq_show,
};
static int raw6_seq_open(struct inode *inode, struct file *file)
{
return raw_seq_open(inode, file, &raw_v6_hashinfo, &raw6_seq_ops);
}
static const struct file_operations raw6_seq_fops = {
.owner = THIS_MODULE,
.open = raw6_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int __net_init raw6_init_net(struct net *net)
{
if (!proc_net_fops_create(net, "raw6", S_IRUGO, &raw6_seq_fops))
return -ENOMEM;
return 0;
}
static void __net_exit raw6_exit_net(struct net *net)
{
proc_net_remove(net, "raw6");
}
static struct pernet_operations raw6_net_ops = {
.init = raw6_init_net,
.exit = raw6_exit_net,
};
int __init raw6_proc_init(void)
{
return register_pernet_subsys(&raw6_net_ops);
}
void raw6_proc_exit(void)
{
unregister_pernet_subsys(&raw6_net_ops);
}
#endif
static const struct proto_ops inet6_sockraw_ops = {
.family = PF_INET6,
.owner = THIS_MODULE,
.release = inet6_release,
.bind = inet6_bind,
.connect = inet_dgram_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = inet6_getname,
.poll = datagram_poll,
.ioctl = inet6_ioctl,
.listen = sock_no_listen,
.shutdown = inet_shutdown,
.setsockopt = sock_common_setsockopt,
.getsockopt = sock_common_getsockopt,
.sendmsg = inet_sendmsg,
.recvmsg = sock_common_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
#ifdef CONFIG_COMPAT
.compat_setsockopt = compat_sock_common_setsockopt,
.compat_getsockopt = compat_sock_common_getsockopt,
#endif
};
static struct inet_protosw rawv6_protosw = {
.type = SOCK_RAW,
.protocol = IPPROTO_IP,
.prot = &rawv6_prot,
.ops = &inet6_sockraw_ops,
.no_check = UDP_CSUM_DEFAULT,
.flags = INET_PROTOSW_REUSE,
};
int __init rawv6_init(void)
{
int ret;
ret = inet6_register_protosw(&rawv6_protosw);
if (ret)
goto out;
out:
return ret;
}
void rawv6_exit(void)
{
inet6_unregister_protosw(&rawv6_protosw);
}
| gpl-2.0 |
faux123/private-pyramid | net/netfilter/nf_conntrack_standalone.c | 854 | 12428 | /* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/netfilter.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/percpu.h>
#include <linux/netdevice.h>
#include <net/net_namespace.h>
#ifdef CONFIG_SYSCTL
#include <linux/sysctl.h>
#endif
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_zones.h>
MODULE_LICENSE("GPL");
#ifdef CONFIG_PROC_FS
int
print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto)
{
return l3proto->print_tuple(s, tuple) || l4proto->print_tuple(s, tuple);
}
EXPORT_SYMBOL_GPL(print_tuple);
struct ct_iter_state {
struct seq_net_private p;
unsigned int bucket;
};
static struct hlist_nulls_node *ct_get_first(struct seq_file *seq)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
struct hlist_nulls_node *n;
for (st->bucket = 0;
st->bucket < net->ct.htable_size;
st->bucket++) {
n = rcu_dereference(net->ct.hash[st->bucket].first);
if (!is_a_nulls(n))
return n;
}
return NULL;
}
static struct hlist_nulls_node *ct_get_next(struct seq_file *seq,
struct hlist_nulls_node *head)
{
struct net *net = seq_file_net(seq);
struct ct_iter_state *st = seq->private;
head = rcu_dereference(head->next);
while (is_a_nulls(head)) {
if (likely(get_nulls_value(head) == st->bucket)) {
if (++st->bucket >= net->ct.htable_size)
return NULL;
}
head = rcu_dereference(net->ct.hash[st->bucket].first);
}
return head;
}
static struct hlist_nulls_node *ct_get_idx(struct seq_file *seq, loff_t pos)
{
struct hlist_nulls_node *head = ct_get_first(seq);
if (head)
while (pos && (head = ct_get_next(seq, head)))
pos--;
return pos ? NULL : head;
}
static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
__acquires(RCU)
{
rcu_read_lock();
return ct_get_idx(seq, *pos);
}
static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
(*pos)++;
return ct_get_next(s, v);
}
static void ct_seq_stop(struct seq_file *s, void *v)
__releases(RCU)
{
rcu_read_unlock();
}
/* return 0 on success, 1 in case of error */
static int ct_seq_show(struct seq_file *s, void *v)
{
struct nf_conntrack_tuple_hash *hash = v;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(hash);
const struct nf_conntrack_l3proto *l3proto;
const struct nf_conntrack_l4proto *l4proto;
int ret = 0;
NF_CT_ASSERT(ct);
if (unlikely(!atomic_inc_not_zero(&ct->ct_general.use)))
return 0;
/* we only want to print DIR_ORIGINAL */
if (NF_CT_DIRECTION(hash))
goto release;
l3proto = __nf_ct_l3proto_find(nf_ct_l3num(ct));
NF_CT_ASSERT(l3proto);
l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
NF_CT_ASSERT(l4proto);
ret = -ENOSPC;
if (seq_printf(s, "%-8s %u %-8s %u %ld ",
l3proto->name, nf_ct_l3num(ct),
l4proto->name, nf_ct_protonum(ct),
timer_pending(&ct->timeout)
? (long)(ct->timeout.expires - jiffies)/HZ : 0) != 0)
goto release;
if (l4proto->print_conntrack && l4proto->print_conntrack(s, ct))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_ORIGINAL))
goto release;
if (!(test_bit(IPS_SEEN_REPLY_BIT, &ct->status)))
if (seq_printf(s, "[UNREPLIED] "))
goto release;
if (print_tuple(s, &ct->tuplehash[IP_CT_DIR_REPLY].tuple,
l3proto, l4proto))
goto release;
if (seq_print_acct(s, ct, IP_CT_DIR_REPLY))
goto release;
if (test_bit(IPS_ASSURED_BIT, &ct->status))
if (seq_printf(s, "[ASSURED] "))
goto release;
#if defined(CONFIG_NF_CONNTRACK_MARK)
if (seq_printf(s, "mark=%u ", ct->mark))
goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_SECMARK
if (seq_printf(s, "secmark=%u ", ct->secmark))
goto release;
#endif
#ifdef CONFIG_NF_CONNTRACK_ZONES
if (seq_printf(s, "zone=%u ", nf_ct_zone(ct)))
goto release;
#endif
if (seq_printf(s, "use=%u\n", atomic_read(&ct->ct_general.use)))
goto release;
ret = 0;
release:
nf_ct_put(ct);
return 0;
}
static const struct seq_operations ct_seq_ops = {
.start = ct_seq_start,
.next = ct_seq_next,
.stop = ct_seq_stop,
.show = ct_seq_show
};
static int ct_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_seq_ops,
sizeof(struct ct_iter_state));
}
static const struct file_operations ct_file_ops = {
.owner = THIS_MODULE,
.open = ct_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
if (*pos == 0)
return SEQ_START_TOKEN;
for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
struct net *net = seq_file_net(seq);
int cpu;
for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
if (!cpu_possible(cpu))
continue;
*pos = cpu + 1;
return per_cpu_ptr(net->ct.stat, cpu);
}
return NULL;
}
static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
{
}
static int ct_cpu_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq_file_net(seq);
unsigned int nr_conntracks = atomic_read(&net->ct.count);
const struct ip_conntrack_stat *st = v;
if (v == SEQ_START_TOKEN) {
seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete search_restart\n");
return 0;
}
seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
"%08x %08x %08x %08x %08x %08x %08x %08x %08x\n",
nr_conntracks,
st->searched,
st->found,
st->new,
st->invalid,
st->ignore,
st->delete,
st->delete_list,
st->insert,
st->insert_failed,
st->drop,
st->early_drop,
st->error,
st->expect_new,
st->expect_create,
st->expect_delete,
st->search_restart
);
return 0;
}
static const struct seq_operations ct_cpu_seq_ops = {
.start = ct_cpu_seq_start,
.next = ct_cpu_seq_next,
.stop = ct_cpu_seq_stop,
.show = ct_cpu_seq_show,
};
static int ct_cpu_seq_open(struct inode *inode, struct file *file)
{
return seq_open_net(inode, file, &ct_cpu_seq_ops,
sizeof(struct seq_net_private));
}
static const struct file_operations ct_cpu_seq_fops = {
.owner = THIS_MODULE,
.open = ct_cpu_seq_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_net,
};
static int nf_conntrack_standalone_init_proc(struct net *net)
{
struct proc_dir_entry *pde;
pde = proc_net_fops_create(net, "nf_conntrack", 0440, &ct_file_ops);
if (!pde)
goto out_nf_conntrack;
pde = proc_create("nf_conntrack", S_IRUGO, net->proc_net_stat,
&ct_cpu_seq_fops);
if (!pde)
goto out_stat_nf_conntrack;
return 0;
out_stat_nf_conntrack:
proc_net_remove(net, "nf_conntrack");
out_nf_conntrack:
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
remove_proc_entry("nf_conntrack", net->proc_net_stat);
proc_net_remove(net, "nf_conntrack");
}
#else
static int nf_conntrack_standalone_init_proc(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_proc(struct net *net)
{
}
#endif /* CONFIG_PROC_FS */
/* Sysctl support */
#ifdef CONFIG_SYSCTL
/* Log invalid packets of a given protocol */
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
static struct ctl_table_header *nf_ct_netfilter_header;
static ctl_table nf_ct_sysctl_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_count",
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_buckets",
.data = &init_net.ct.htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.procname = "nf_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
{
.procname = "nf_conntrack_expect_max",
.data = &nf_ct_expect_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
#define NET_NF_CONNTRACK_MAX 2089
static ctl_table nf_ct_netfilter_table[] = {
{
.procname = "nf_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{ }
};
static struct ctl_path nf_ct_path[] = {
{ .procname = "net", },
{ }
};
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net)) {
nf_ct_netfilter_header =
register_sysctl_paths(nf_ct_path, nf_ct_netfilter_table);
if (!nf_ct_netfilter_header)
goto out;
}
table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
GFP_KERNEL);
if (!table)
goto out_kmemdup;
table[1].data = &net->ct.count;
table[2].data = &net->ct.htable_size;
table[3].data = &net->ct.sysctl_checksum;
table[4].data = &net->ct.sysctl_log_invalid;
net->ct.sysctl_header = register_net_sysctl_table(net,
nf_net_netfilter_sysctl_path, table);
if (!net->ct.sysctl_header)
goto out_unregister_netfilter;
return 0;
out_unregister_netfilter:
kfree(table);
out_kmemdup:
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
out:
printk(KERN_ERR "nf_conntrack: can't register to sysctl.\n");
return -ENOMEM;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
struct ctl_table *table;
if (net_eq(net, &init_net))
unregister_sysctl_table(nf_ct_netfilter_header);
table = net->ct.sysctl_header->ctl_table_arg;
unregister_net_sysctl_table(net->ct.sysctl_header);
kfree(table);
}
#else
static int nf_conntrack_standalone_init_sysctl(struct net *net)
{
return 0;
}
static void nf_conntrack_standalone_fini_sysctl(struct net *net)
{
}
#endif /* CONFIG_SYSCTL */
static int nf_conntrack_net_init(struct net *net)
{
int ret;
ret = nf_conntrack_init(net);
if (ret < 0)
goto out_init;
ret = nf_conntrack_standalone_init_proc(net);
if (ret < 0)
goto out_proc;
net->ct.sysctl_checksum = 1;
net->ct.sysctl_log_invalid = 0;
ret = nf_conntrack_standalone_init_sysctl(net);
if (ret < 0)
goto out_sysctl;
return 0;
out_sysctl:
nf_conntrack_standalone_fini_proc(net);
out_proc:
nf_conntrack_cleanup(net);
out_init:
return ret;
}
static void nf_conntrack_net_exit(struct net *net)
{
nf_conntrack_standalone_fini_sysctl(net);
nf_conntrack_standalone_fini_proc(net);
nf_conntrack_cleanup(net);
}
static struct pernet_operations nf_conntrack_net_ops = {
.init = nf_conntrack_net_init,
.exit = nf_conntrack_net_exit,
};
static int __init nf_conntrack_standalone_init(void)
{
return register_pernet_subsys(&nf_conntrack_net_ops);
}
static void __exit nf_conntrack_standalone_fini(void)
{
unregister_pernet_subsys(&nf_conntrack_net_ops);
}
module_init(nf_conntrack_standalone_init);
module_exit(nf_conntrack_standalone_fini);
/* Some modules need us, but don't depend directly on any symbol.
They should call this. */
void need_conntrack(void)
{
}
EXPORT_SYMBOL_GPL(need_conntrack);
| gpl-2.0 |
mrabe89sigma/linux-curie | drivers/parisc/superio.c | 1110 | 14118 | /* National Semiconductor NS87560UBD Super I/O controller used in
* HP [BCJ]x000 workstations.
*
* This chip is a horrid piece of engineering, and National
* denies any knowledge of its existence. Thus no datasheet is
* available off www.national.com.
*
* (C) Copyright 2000 Linuxcare, Inc.
* (C) Copyright 2000 Linuxcare Canada, Inc.
* (C) Copyright 2000 Martin K. Petersen <mkp@linuxcare.com>
* (C) Copyright 2000 Alex deVries <alex@onefishtwo.ca>
* (C) Copyright 2001 John Marvin <jsm fc hp com>
* (C) Copyright 2003 Grant Grundler <grundler parisc-linux org>
* (C) Copyright 2005 Kyle McMartin <kyle@parisc-linux.org>
* (C) Copyright 2006 Helge Deller <deller@gmx.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of
* the License, or (at your option) any later version.
*
* The initial version of this is by Martin Peterson. Alex deVries
* has spent a bit of time trying to coax it into working.
*
* Major changes to get basic interrupt infrastructure working to
* hopefully be able to support all SuperIO devices. Currently
* works with serial. -- John Marvin <jsm@fc.hp.com>
*
* Converted superio_init() to be a PCI_FIXUP_FINAL callee.
* -- Kyle McMartin <kyle@parisc-linux.org>
*/
/* NOTES:
*
* Function 0 is an IDE controller. It is identical to a PC87415 IDE
* controller (and identifies itself as such).
*
* Function 1 is a "Legacy I/O" controller. Under this function is a
* whole mess of legacy I/O peripherals. Of course, HP hasn't enabled
* all the functionality in hardware, but the following is available:
*
* Two 16550A compatible serial controllers
* An IEEE 1284 compatible parallel port
* A floppy disk controller
*
* Function 2 is a USB controller.
*
* We must be incredibly careful during initialization. Since all
* interrupts are routed through function 1 (which is not allowed by
* the PCI spec), we need to program the PICs on the legacy I/O port
* *before* we attempt to set up IDE and USB. @#$!&
*
* According to HP, devices are only enabled by firmware if they have
* a physical device connected.
*
* Configuration register bits:
* 0x5A: FDC, SP1, IDE1, SP2, IDE2, PAR, Reserved, P92
* 0x5B: RTC, 8259, 8254, DMA1, DMA2, KBC, P61, APM
*
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/serial.h>
#include <linux/pci.h>
#include <linux/parport.h>
#include <linux/parport_pc.h>
#include <linux/termios.h>
#include <linux/tty.h>
#include <linux/serial_core.h>
#include <linux/serial_8250.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/hardware.h>
#include <asm/superio.h>
static struct superio_device sio_dev;
#undef DEBUG_SUPERIO_INIT
#ifdef DEBUG_SUPERIO_INIT
#define DBG_INIT(x...) printk(x)
#else
#define DBG_INIT(x...)
#endif
#define SUPERIO "SuperIO"
#define PFX SUPERIO ": "
static irqreturn_t
superio_interrupt(int parent_irq, void *devp)
{
u8 results;
u8 local_irq;
/* Poll the 8259 to see if there's an interrupt. */
outb (OCW3_POLL,IC_PIC1+0);
results = inb(IC_PIC1+0);
/*
* Bit 7: 1 = active Interrupt; 0 = no Interrupt pending
* Bits 6-3: zero
* Bits 2-0: highest priority, active requesting interrupt ID (0-7)
*/
if ((results & 0x80) == 0) {
/* I suspect "spurious" interrupts are from unmasking an IRQ.
* We don't know if an interrupt was/is pending and thus
* just call the handler for that IRQ as if it were pending.
*/
return IRQ_NONE;
}
/* Check to see which device is interrupting */
local_irq = results & 0x0f;
if (local_irq == 2 || local_irq > 7) {
printk(KERN_ERR PFX "slave interrupted!\n");
return IRQ_HANDLED;
}
if (local_irq == 7) {
/* Could be spurious. Check in service bits */
outb(OCW3_ISR,IC_PIC1+0);
results = inb(IC_PIC1+0);
if ((results & 0x80) == 0) { /* if ISR7 not set: spurious */
printk(KERN_WARNING PFX "spurious interrupt!\n");
return IRQ_HANDLED;
}
}
/* Call the appropriate device's interrupt */
generic_handle_irq(local_irq);
/* set EOI - forces a new interrupt if a lower priority device
* still needs service.
*/
outb((OCW2_SEOI|local_irq),IC_PIC1 + 0);
return IRQ_HANDLED;
}
/* Initialize Super I/O device */
static void
superio_init(struct pci_dev *pcidev)
{
struct superio_device *sio = &sio_dev;
struct pci_dev *pdev = sio->lio_pdev;
u16 word;
int ret;
if (sio->suckyio_irq_enabled)
return;
BUG_ON(!pdev);
BUG_ON(!sio->usb_pdev);
/* use the IRQ iosapic found for USB INT D... */
pdev->irq = sio->usb_pdev->irq;
/* ...then properly fixup the USB to point at suckyio PIC */
sio->usb_pdev->irq = superio_fixup_irq(sio->usb_pdev);
printk(KERN_INFO PFX "Found NS87560 Legacy I/O device at %s (IRQ %i)\n",
pci_name(pdev), pdev->irq);
pci_read_config_dword (pdev, SIO_SP1BAR, &sio->sp1_base);
sio->sp1_base &= ~1;
printk(KERN_INFO PFX "Serial port 1 at 0x%x\n", sio->sp1_base);
pci_read_config_dword (pdev, SIO_SP2BAR, &sio->sp2_base);
sio->sp2_base &= ~1;
printk(KERN_INFO PFX "Serial port 2 at 0x%x\n", sio->sp2_base);
pci_read_config_dword (pdev, SIO_PPBAR, &sio->pp_base);
sio->pp_base &= ~1;
printk(KERN_INFO PFX "Parallel port at 0x%x\n", sio->pp_base);
pci_read_config_dword (pdev, SIO_FDCBAR, &sio->fdc_base);
sio->fdc_base &= ~1;
printk(KERN_INFO PFX "Floppy controller at 0x%x\n", sio->fdc_base);
pci_read_config_dword (pdev, SIO_ACPIBAR, &sio->acpi_base);
sio->acpi_base &= ~1;
printk(KERN_INFO PFX "ACPI at 0x%x\n", sio->acpi_base);
request_region (IC_PIC1, 0x1f, "pic1");
request_region (IC_PIC2, 0x1f, "pic2");
request_region (sio->acpi_base, 0x1f, "acpi");
/* Enable the legacy I/O function */
pci_read_config_word (pdev, PCI_COMMAND, &word);
word |= PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_IO;
pci_write_config_word (pdev, PCI_COMMAND, word);
pci_set_master (pdev);
ret = pci_enable_device(pdev);
BUG_ON(ret < 0); /* not too much we can do about this... */
/*
* Next project is programming the onboard interrupt controllers.
* PDC hasn't done this for us, since it's using polled I/O.
*
* XXX Use dword writes to avoid bugs in Elroy or Suckyio Config
* space access. PCI is by nature a 32-bit bus and config
* space can be sensitive to that.
*/
/* 0x64 - 0x67 :
DMA Rtg 2
DMA Rtg 3
DMA Chan Ctl
TRIGGER_1 == 0x82 USB & IDE level triggered, rest to edge
*/
pci_write_config_dword (pdev, 0x64, 0x82000000U);
/* 0x68 - 0x6b :
TRIGGER_2 == 0x00 all edge triggered (not used)
CFG_IR_SER == 0x43 SerPort1 = IRQ3, SerPort2 = IRQ4
CFG_IR_PF == 0x65 ParPort = IRQ5, FloppyCtlr = IRQ6
CFG_IR_IDE == 0x07 IDE1 = IRQ7, reserved
*/
pci_write_config_dword (pdev, TRIGGER_2, 0x07654300U);
/* 0x6c - 0x6f :
CFG_IR_INTAB == 0x00
CFG_IR_INTCD == 0x10 USB = IRQ1
CFG_IR_PS2 == 0x00
CFG_IR_FXBUS == 0x00
*/
pci_write_config_dword (pdev, CFG_IR_INTAB, 0x00001000U);
/* 0x70 - 0x73 :
CFG_IR_USB == 0x00 not used. USB is connected to INTD.
CFG_IR_ACPI == 0x00 not used.
DMA Priority == 0x4c88 Power on default value. NFC.
*/
pci_write_config_dword (pdev, CFG_IR_USB, 0x4c880000U);
/* PIC1 Initialization Command Word register programming */
outb (0x11,IC_PIC1+0); /* ICW1: ICW4 write req | ICW1 */
outb (0x00,IC_PIC1+1); /* ICW2: interrupt vector table - not used */
outb (0x04,IC_PIC1+1); /* ICW3: Cascade */
outb (0x01,IC_PIC1+1); /* ICW4: x86 mode */
/* PIC1 Program Operational Control Words */
outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
outb (0xc2,IC_PIC1+0); /* OCW2: priority (3-7,0-2) */
/* PIC2 Initialization Command Word register programming */
outb (0x11,IC_PIC2+0); /* ICW1: ICW4 write req | ICW1 */
outb (0x00,IC_PIC2+1); /* ICW2: N/A */
outb (0x02,IC_PIC2+1); /* ICW3: Slave ID code */
outb (0x01,IC_PIC2+1); /* ICW4: x86 mode */
/* Program Operational Control Words */
outb (0xff,IC_PIC1+1); /* OCW1: Mask all interrupts */
outb (0x68,IC_PIC1+0); /* OCW3: OCW3 select | ESMM | SMM */
/* Write master mask reg */
outb (0xff,IC_PIC1+1);
/* Setup USB power regulation */
outb(1, sio->acpi_base + USB_REG_CR);
if (inb(sio->acpi_base + USB_REG_CR) & 1)
printk(KERN_INFO PFX "USB regulator enabled\n");
else
printk(KERN_ERR PFX "USB regulator not initialized!\n");
if (request_irq(pdev->irq, superio_interrupt, 0,
SUPERIO, (void *)sio)) {
printk(KERN_ERR PFX "could not get irq\n");
BUG();
return;
}
sio->suckyio_irq_enabled = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO, superio_init);
static void superio_mask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
printk(KERN_ERR PFX "Illegal irq number.\n");
BUG();
return;
}
/* Mask interrupt */
r8 = inb(IC_PIC1+1);
r8 |= (1 << irq);
outb (r8,IC_PIC1+1);
}
static void superio_unmask_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
u8 r8;
if ((irq < 1) || (irq == 2) || (irq > 7)) {
printk(KERN_ERR PFX "Illegal irq number (%d).\n", irq);
BUG();
return;
}
/* Unmask interrupt */
r8 = inb(IC_PIC1+1);
r8 &= ~(1 << irq);
outb (r8,IC_PIC1+1);
}
static struct irq_chip superio_interrupt_type = {
.name = SUPERIO,
.irq_unmask = superio_unmask_irq,
.irq_mask = superio_mask_irq,
};
#ifdef DEBUG_SUPERIO_INIT
static unsigned short expected_device[3] = {
PCI_DEVICE_ID_NS_87415,
PCI_DEVICE_ID_NS_87560_LIO,
PCI_DEVICE_ID_NS_87560_USB
};
#endif
int superio_fixup_irq(struct pci_dev *pcidev)
{
int local_irq, i;
#ifdef DEBUG_SUPERIO_INIT
int fn;
fn = PCI_FUNC(pcidev->devfn);
/* Verify the function number matches the expected device id. */
if (expected_device[fn] != pcidev->device) {
BUG();
return -1;
}
printk(KERN_DEBUG "superio_fixup_irq(%s) ven 0x%x dev 0x%x from %ps\n",
pci_name(pcidev),
pcidev->vendor, pcidev->device,
__builtin_return_address(0));
#endif
for (i = 0; i < 16; i++) {
irq_set_chip_and_handler(i, &superio_interrupt_type,
handle_simple_irq);
}
/*
* We don't allocate a SuperIO irq for the legacy IO function,
* since it is a "bridge". Instead, we will allocate irq's for
* each legacy device as they are initialized.
*/
switch(pcidev->device) {
case PCI_DEVICE_ID_NS_87415: /* Function 0 */
local_irq = IDE_IRQ;
break;
case PCI_DEVICE_ID_NS_87560_LIO: /* Function 1 */
sio_dev.lio_pdev = pcidev; /* save for superio_init() */
return -1;
case PCI_DEVICE_ID_NS_87560_USB: /* Function 2 */
sio_dev.usb_pdev = pcidev; /* save for superio_init() */
local_irq = USB_IRQ;
break;
default:
local_irq = -1;
BUG();
break;
}
return local_irq;
}
static void __init superio_serial_init(void)
{
#ifdef CONFIG_SERIAL_8250
int retval;
struct uart_port serial_port;
memset(&serial_port, 0, sizeof(serial_port));
serial_port.iotype = UPIO_PORT;
serial_port.type = PORT_16550A;
serial_port.uartclk = 115200*16;
serial_port.flags = UPF_FIXED_PORT | UPF_FIXED_TYPE |
UPF_BOOT_AUTOCONF;
/* serial port #1 */
serial_port.iobase = sio_dev.sp1_base;
serial_port.irq = SP1_IRQ;
serial_port.line = 0;
retval = early_serial_setup(&serial_port);
if (retval < 0) {
printk(KERN_WARNING PFX "Register Serial #0 failed.\n");
return;
}
/* serial port #2 */
serial_port.iobase = sio_dev.sp2_base;
serial_port.irq = SP2_IRQ;
serial_port.line = 1;
retval = early_serial_setup(&serial_port);
if (retval < 0)
printk(KERN_WARNING PFX "Register Serial #1 failed.\n");
#endif /* CONFIG_SERIAL_8250 */
}
static void __init superio_parport_init(void)
{
#ifdef CONFIG_PARPORT_PC
if (!parport_pc_probe_port(sio_dev.pp_base,
0 /*base_hi*/,
PAR_IRQ,
PARPORT_DMA_NONE /* dma */,
NULL /*struct pci_dev* */,
0 /* shared irq flags */))
printk(KERN_WARNING PFX "Probing parallel port failed.\n");
#endif /* CONFIG_PARPORT_PC */
}
static void superio_fixup_pci(struct pci_dev *pdev)
{
u8 prog;
pdev->class |= 0x5;
pci_write_config_byte(pdev, PCI_CLASS_PROG, pdev->class);
pci_read_config_byte(pdev, PCI_CLASS_PROG, &prog);
printk("PCI: Enabled native mode for NS87415 (pif=0x%x)\n", prog);
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415, superio_fixup_pci);
static int __init
superio_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
struct superio_device *sio = &sio_dev;
/*
** superio_probe(00:0e.0) ven 0x100b dev 0x2 sv 0x0 sd 0x0 class 0x1018a
** superio_probe(00:0e.1) ven 0x100b dev 0xe sv 0x0 sd 0x0 class 0x68000
** superio_probe(00:0e.2) ven 0x100b dev 0x12 sv 0x0 sd 0x0 class 0xc0310
*/
DBG_INIT("superio_probe(%s) ven 0x%x dev 0x%x sv 0x%x sd 0x%x class 0x%x\n",
pci_name(dev),
dev->vendor, dev->device,
dev->subsystem_vendor, dev->subsystem_device,
dev->class);
BUG_ON(!sio->suckyio_irq_enabled); /* Enabled by PCI_FIXUP_FINAL */
if (dev->device == PCI_DEVICE_ID_NS_87560_LIO) { /* Function 1 */
superio_parport_init();
superio_serial_init();
/* REVISIT XXX : superio_fdc_init() ? */
return 0;
} else if (dev->device == PCI_DEVICE_ID_NS_87415) { /* Function 0 */
DBG_INIT("superio_probe: ignoring IDE 87415\n");
} else if (dev->device == PCI_DEVICE_ID_NS_87560_USB) { /* Function 2 */
DBG_INIT("superio_probe: ignoring USB OHCI controller\n");
} else {
DBG_INIT("superio_probe: WTF? Fire Extinguisher?\n");
}
/* Let appropriate other driver claim this device. */
return -ENODEV;
}
static const struct pci_device_id superio_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_LIO) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87560_USB) },
{ PCI_DEVICE(PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_87415) },
{ 0, }
};
static struct pci_driver superio_driver = {
.name = SUPERIO,
.id_table = superio_tbl,
.probe = superio_probe,
};
module_pci_driver(superio_driver);
| gpl-2.0 |
bigzz/linux-btrfs | drivers/net/wireless/ti/wl1251/cmd.c | 1878 | 11404 | #include "cmd.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include "wl1251.h"
#include "reg.h"
#include "io.h"
#include "ps.h"
#include "acx.h"
/**
* send command to firmware
*
* @wl: wl struct
* @id: command id
* @buf: buffer containing the command, must work with dma
* @len: length of the buffer
*/
int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct wl1251_cmd_header *cmd;
unsigned long timeout;
u32 intr;
int ret = 0;
cmd = buf;
cmd->id = id;
cmd->status = 0;
WARN_ON(len % 4 != 0);
wl1251_mem_write(wl, wl->cmd_box_addr, buf, len);
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
timeout = jiffies + msecs_to_jiffies(WL1251_COMMAND_TIMEOUT);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
while (!(intr & WL1251_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1251_error("command complete timeout");
ret = -ETIMEDOUT;
goto out;
}
msleep(1);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1251_ACX_INTR_CMD_COMPLETE);
out:
return ret;
}
/**
* send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
* @len: length of the buffer
* @answer: is answer needed
*/
int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
{
int ret;
wl1251_debug(DEBUG_CMD, "cmd test");
ret = wl1251_cmd_send(wl, CMD_TEST, buf, buf_len);
if (ret < 0) {
wl1251_warning("TEST command failed");
return ret;
}
if (answer) {
struct wl1251_command *cmd_answer;
/*
* The test command got in, we can read the answer.
* The answer would be a wl1251_command, where the
* parameter array contains the actual answer.
*/
wl1251_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
cmd_answer = buf;
if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
wl1251_error("TEST command answer error: %d",
cmd_answer->header.status);
}
return 0;
}
/**
* read acx from firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
* @len: length of buf
*/
int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct acx_header *acx = buf;
int ret;
wl1251_debug(DEBUG_CMD, "cmd interrogate");
acx->id = id;
/* payload length, does not include any headers */
acx->len = len - sizeof(*acx);
ret = wl1251_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx));
if (ret < 0) {
wl1251_error("INTERROGATE command failed");
goto out;
}
/* the interrogate command got in, we can read the answer */
wl1251_mem_read(wl, wl->cmd_box_addr, buf, len);
acx = buf;
if (acx->cmd.status != CMD_STATUS_SUCCESS)
wl1251_error("INTERROGATE command error: %d",
acx->cmd.status);
out:
return ret;
}
/**
* write acx value to firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer containing acx, including all headers, must work with dma
* @len: length of buf
*/
int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct acx_header *acx = buf;
int ret;
wl1251_debug(DEBUG_CMD, "cmd configure");
acx->id = id;
/* payload length, does not include any headers */
acx->len = len - sizeof(*acx);
ret = wl1251_cmd_send(wl, CMD_CONFIGURE, acx, len);
if (ret < 0) {
wl1251_warning("CONFIGURE command NOK");
return ret;
}
return 0;
}
int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
void *bitmap, u16 bitmap_len, u8 bitmap_control)
{
struct wl1251_cmd_vbm_update *vbm;
int ret;
wl1251_debug(DEBUG_CMD, "cmd vbm");
vbm = kzalloc(sizeof(*vbm), GFP_KERNEL);
if (!vbm) {
ret = -ENOMEM;
goto out;
}
/* Count and period will be filled by the target */
vbm->tim.bitmap_ctrl = bitmap_control;
if (bitmap_len > PARTIAL_VBM_MAX) {
wl1251_warning("cmd vbm len is %d B, truncating to %d",
bitmap_len, PARTIAL_VBM_MAX);
bitmap_len = PARTIAL_VBM_MAX;
}
memcpy(vbm->tim.pvb_field, bitmap, bitmap_len);
vbm->tim.identity = identity;
vbm->tim.length = bitmap_len + 3;
vbm->len = cpu_to_le16(bitmap_len + 5);
ret = wl1251_cmd_send(wl, CMD_VBM, vbm, sizeof(*vbm));
if (ret < 0) {
wl1251_error("VBM command failed");
goto out;
}
out:
kfree(vbm);
return ret;
}
int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
u16 cmd_rx;
wl1251_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->channel = channel;
if (enable)
cmd_rx = CMD_ENABLE_RX;
else
cmd_rx = CMD_DISABLE_RX;
ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("rx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
goto out;
}
wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
enable ? "start" : "stop", channel);
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
u16 cmd_tx;
wl1251_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->channel = channel;
if (enable)
cmd_tx = CMD_ENABLE_TX;
else
cmd_tx = CMD_DISABLE_TX;
ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
if (ret < 0)
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
else
wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
enable ? "start" : "stop", channel);
kfree(cmd);
return ret;
}
int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_interval)
{
struct cmd_join *join;
int ret, i;
u8 *bssid;
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join) {
ret = -ENOMEM;
goto out;
}
wl1251_debug(DEBUG_CMD, "cmd join%s ch %d %d/%d",
bss_type == BSS_TYPE_IBSS ? " ibss" : "",
channel, beacon_interval, dtim_interval);
/* Reverse order BSSID */
bssid = (u8 *) &join->bssid_lsb;
for (i = 0; i < ETH_ALEN; i++)
bssid[i] = wl->bssid[ETH_ALEN - i - 1];
join->rx_config_options = wl->rx_config;
join->rx_filter_options = wl->rx_filter;
join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
join->beacon_interval = beacon_interval;
join->dtim_interval = dtim_interval;
join->bss_type = bss_type;
join->channel = channel;
join->ctrl = JOIN_CMD_CTRL_TX_FLUSH;
ret = wl1251_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join));
if (ret < 0) {
wl1251_error("failed to initiate cmd join");
goto out;
}
out:
kfree(join);
return ret;
}
int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode)
{
struct wl1251_cmd_ps_params *ps_params = NULL;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd set ps mode");
ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL);
if (!ps_params) {
ret = -ENOMEM;
goto out;
}
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = 1;
ps_params->retries = 5;
ps_params->hang_over_period = 128;
ps_params->null_data_rate = 1; /* 1 Mbps */
ret = wl1251_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params));
if (ret < 0) {
wl1251_error("cmd set_ps_mode failed");
goto out;
}
out:
kfree(ps_params);
return ret;
}
int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len)
{
struct cmd_read_write_memory *cmd;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd read memory");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
WARN_ON(len > MAX_READ_SIZE);
len = min_t(size_t, len, MAX_READ_SIZE);
cmd->addr = addr;
cmd->size = len;
ret = wl1251_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("read memory command failed: %d", ret);
goto out;
}
/* the read command got in, we can now read the answer */
wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
if (cmd->header.status != CMD_STATUS_SUCCESS)
wl1251_error("error in read command result: %d",
cmd->header.status);
memcpy(answer, cmd->value, len);
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len)
{
struct wl1251_cmd_packet_template *cmd;
size_t cmd_len;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd template %d", cmd_id);
WARN_ON(buf_len > WL1251_MAX_TEMPLATE_SIZE);
buf_len = min_t(size_t, buf_len, WL1251_MAX_TEMPLATE_SIZE);
cmd_len = ALIGN(sizeof(*cmd) + buf_len, 4);
cmd = kzalloc(cmd_len, GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->size = cpu_to_le16(buf_len);
if (buf)
memcpy(cmd->data, buf, buf_len);
ret = wl1251_cmd_send(wl, cmd_id, cmd, cmd_len);
if (ret < 0) {
wl1251_warning("cmd set_template failed: %d", ret);
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
struct ieee80211_channel *channels[],
unsigned int n_channels, unsigned int n_probes)
{
struct wl1251_cmd_scan *cmd;
int i, ret = 0;
wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
CFG_RX_MGMT_EN |
CFG_RX_BCN_EN);
cmd->params.scan_options = 0;
/*
* Use high priority scan when not associated to prevent fw issue
* causing never-ending scans (sometimes 20+ minutes).
* Note: This bug may be caused by the fw's DTIM handling.
*/
if (is_zero_ether_addr(wl->bssid))
cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
cmd->params.tid_trigger = 0;
for (i = 0; i < n_channels; i++) {
cmd->channels[i].min_duration =
cpu_to_le32(WL1251_SCAN_MIN_DURATION);
cmd->channels[i].max_duration =
cpu_to_le32(WL1251_SCAN_MAX_DURATION);
memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
memset(&cmd->channels[i].bssid_msb, 0xff, 2);
cmd->channels[i].early_termination = 0;
cmd->channels[i].tx_power_att = 0;
cmd->channels[i].channel = channels[i]->hw_value;
}
cmd->params.ssid_len = ssid_len;
if (ssid)
memcpy(cmd->params.ssid, ssid, ssid_len);
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("cmd scan failed: %d", ret);
goto out;
}
wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
if (cmd->header.status != CMD_STATUS_SUCCESS) {
wl1251_error("cmd scan status wasn't success: %d",
cmd->header.status);
ret = -EIO;
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
{
struct wl1251_cmd_trigger_scan_to *cmd;
int ret;
wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->timeout = timeout;
ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("cmd trigger scan to failed: %d", ret);
goto out;
}
out:
kfree(cmd);
return ret;
}
| gpl-2.0 |
ptmr3/GalaxyNote2_Kernel | net/econet/af_econet.c | 2390 | 25459 | /*
* An implementation of the Acorn Econet and AUN protocols.
* Philip Blundell <philb@gnu.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/socket.h>
#include <linux/sockios.h>
#include <linux/in.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/if_ether.h>
#include <linux/netdevice.h>
#include <linux/inetdevice.h>
#include <linux/route.h>
#include <linux/inet.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/wireless.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <net/sock.h>
#include <net/inet_common.h>
#include <linux/stat.h>
#include <linux/init.h>
#include <linux/if_ec.h>
#include <net/udp.h>
#include <net/ip.h>
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
#include <linux/bitops.h>
#include <linux/mutex.h>
#include <asm/uaccess.h>
#include <asm/system.h>
static const struct proto_ops econet_ops;
static struct hlist_head econet_sklist;
static DEFINE_SPINLOCK(econet_lock);
static DEFINE_MUTEX(econet_mutex);
/* Since there are only 256 possible network numbers (or fewer, depends
how you count) it makes sense to use a simple lookup table. */
static struct net_device *net2dev_map[256];
#define EC_PORT_IP 0xd2
#ifdef CONFIG_ECONET_AUNUDP
static DEFINE_SPINLOCK(aun_queue_lock);
static struct socket *udpsock;
#define AUN_PORT 0x8000
struct aunhdr
{
unsigned char code; /* AUN magic protocol byte */
unsigned char port;
unsigned char cb;
unsigned char pad;
unsigned long handle;
};
static unsigned long aun_seq;
/* Queue of packets waiting to be transmitted. */
static struct sk_buff_head aun_queue;
static struct timer_list ab_cleanup_timer;
#endif /* CONFIG_ECONET_AUNUDP */
/* Per-packet information */
struct ec_cb
{
struct sockaddr_ec sec;
unsigned long cookie; /* Supplied by user. */
#ifdef CONFIG_ECONET_AUNUDP
int done;
unsigned long seq; /* Sequencing */
unsigned long timeout; /* Timeout */
unsigned long start; /* jiffies */
#endif
#ifdef CONFIG_ECONET_NATIVE
void (*sent)(struct sk_buff *, int result);
#endif
};
static void econet_remove_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock_bh(&econet_lock);
sk_del_node_init(sk);
spin_unlock_bh(&econet_lock);
}
static void econet_insert_socket(struct hlist_head *list, struct sock *sk)
{
spin_lock_bh(&econet_lock);
sk_add_node(sk, list);
spin_unlock_bh(&econet_lock);
}
/*
* Pull a packet from our receive queue and hand it to the user.
* If necessary we block.
*/
static int econet_recvmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len, int flags)
{
struct sock *sk = sock->sk;
struct sk_buff *skb;
size_t copied;
int err;
msg->msg_namelen = sizeof(struct sockaddr_ec);
mutex_lock(&econet_mutex);
/*
* Call the generic datagram receiver. This handles all sorts
* of horrible races and re-entrancy so we can forget about it
* in the protocol layers.
*
* Now it will return ENETDOWN, if device have just gone down,
* but then it will block.
*/
skb=skb_recv_datagram(sk,flags,flags&MSG_DONTWAIT,&err);
/*
* An error occurred so return it. Because skb_recv_datagram()
* handles the blocking we don't see and worry about blocking
* retries.
*/
if(skb==NULL)
goto out;
/*
* You lose any data beyond the buffer you gave. If it worries a
* user program they can ask the device for its MTU anyway.
*/
copied = skb->len;
if (copied > len)
{
copied=len;
msg->msg_flags|=MSG_TRUNC;
}
/* We can't use skb_copy_datagram here */
err = memcpy_toiovec(msg->msg_iov, skb->data, copied);
if (err)
goto out_free;
sk->sk_stamp = skb->tstamp;
if (msg->msg_name)
memcpy(msg->msg_name, skb->cb, msg->msg_namelen);
/*
* Free or return the buffer as appropriate. Again this
* hides all the races and re-entrancy issues from us.
*/
err = copied;
out_free:
skb_free_datagram(sk, skb);
out:
mutex_unlock(&econet_mutex);
return err;
}
/*
* Bind an Econet socket.
*/
static int econet_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
{
struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
struct sock *sk;
struct econet_sock *eo;
/*
* Check legality
*/
if (addr_len < sizeof(struct sockaddr_ec) ||
sec->sec_family != AF_ECONET)
return -EINVAL;
mutex_lock(&econet_mutex);
sk = sock->sk;
eo = ec_sk(sk);
eo->cb = sec->cb;
eo->port = sec->port;
eo->station = sec->addr.station;
eo->net = sec->addr.net;
mutex_unlock(&econet_mutex);
return 0;
}
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
/*
* Queue a transmit result for the user to be told about.
*/
static void tx_result(struct sock *sk, unsigned long cookie, int result)
{
struct sk_buff *skb = alloc_skb(0, GFP_ATOMIC);
struct ec_cb *eb;
struct sockaddr_ec *sec;
if (skb == NULL)
{
printk(KERN_DEBUG "ec: memory squeeze, transmit result dropped.\n");
return;
}
eb = (struct ec_cb *)&skb->cb;
sec = (struct sockaddr_ec *)&eb->sec;
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->cookie = cookie;
sec->type = ECTYPE_TRANSMIT_STATUS | result;
sec->sec_family = AF_ECONET;
if (sock_queue_rcv_skb(sk, skb) < 0)
kfree_skb(skb);
}
#endif
#ifdef CONFIG_ECONET_NATIVE
/*
* Called by the Econet hardware driver when a packet transmit
* has completed. Tell the user.
*/
static void ec_tx_done(struct sk_buff *skb, int result)
{
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
tx_result(skb->sk, eb->cookie, result);
}
#endif
/*
* Send a packet. We have to work out which device it's going out on
* and hence whether to use real Econet or the UDP emulation.
*/
static int econet_sendmsg(struct kiocb *iocb, struct socket *sock,
struct msghdr *msg, size_t len)
{
struct sockaddr_ec *saddr=(struct sockaddr_ec *)msg->msg_name;
struct net_device *dev;
struct ec_addr addr;
int err;
unsigned char port, cb;
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
struct sock *sk = sock->sk;
struct sk_buff *skb;
struct ec_cb *eb;
#endif
#ifdef CONFIG_ECONET_AUNUDP
struct msghdr udpmsg;
struct iovec iov[2];
struct aunhdr ah;
struct sockaddr_in udpdest;
__kernel_size_t size;
mm_segment_t oldfs;
char *userbuf;
#endif
/*
* Check the flags.
*/
if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT))
return -EINVAL;
/*
* Get and verify the address.
*/
mutex_lock(&econet_mutex);
if (saddr == NULL || msg->msg_namelen < sizeof(struct sockaddr_ec)) {
mutex_unlock(&econet_mutex);
return -EINVAL;
}
addr.station = saddr->addr.station;
addr.net = saddr->addr.net;
port = saddr->port;
cb = saddr->cb;
/* Look for a device with the right network number. */
dev = net2dev_map[addr.net];
/* If not directly reachable, use some default */
if (dev == NULL) {
dev = net2dev_map[0];
/* No interfaces at all? */
if (dev == NULL) {
mutex_unlock(&econet_mutex);
return -ENETDOWN;
}
}
if (dev->type == ARPHRD_ECONET) {
/* Real hardware Econet. We're not worthy etc. */
#ifdef CONFIG_ECONET_NATIVE
unsigned short proto = 0;
int res;
if (len + 15 > dev->mtu) {
mutex_unlock(&econet_mutex);
return -EMSGSIZE;
}
dev_hold(dev);
skb = sock_alloc_send_skb(sk, len+LL_ALLOCATED_SPACE(dev),
msg->msg_flags & MSG_DONTWAIT, &err);
if (skb==NULL)
goto out_unlock;
skb_reserve(skb, LL_RESERVED_SPACE(dev));
skb_reset_network_header(skb);
eb = (struct ec_cb *)&skb->cb;
eb->cookie = saddr->cookie;
eb->sec = *saddr;
eb->sent = ec_tx_done;
err = -EINVAL;
res = dev_hard_header(skb, dev, ntohs(proto), &addr, NULL, len);
if (res < 0)
goto out_free;
if (res > 0) {
struct ec_framehdr *fh;
/* Poke in our control byte and
port number. Hack, hack. */
fh = (struct ec_framehdr *)(skb->data);
fh->cb = cb;
fh->port = port;
if (sock->type != SOCK_DGRAM) {
skb_reset_tail_pointer(skb);
skb->len = 0;
}
}
/* Copy the data. Returns -EFAULT on error */
err = memcpy_fromiovec(skb_put(skb,len), msg->msg_iov, len);
skb->protocol = proto;
skb->dev = dev;
skb->priority = sk->sk_priority;
if (err)
goto out_free;
err = -ENETDOWN;
if (!(dev->flags & IFF_UP))
goto out_free;
/*
* Now send it
*/
dev_queue_xmit(skb);
dev_put(dev);
mutex_unlock(&econet_mutex);
return len;
out_free:
kfree_skb(skb);
out_unlock:
if (dev)
dev_put(dev);
#else
err = -EPROTOTYPE;
#endif
mutex_unlock(&econet_mutex);
return err;
}
#ifdef CONFIG_ECONET_AUNUDP
/* AUN virtual Econet. */
if (udpsock == NULL) {
mutex_unlock(&econet_mutex);
return -ENETDOWN; /* No socket - can't send */
}
if (len > 32768) {
err = -E2BIG;
goto error;
}
/* Make up a UDP datagram and hand it off to some higher intellect. */
memset(&udpdest, 0, sizeof(udpdest));
udpdest.sin_family = AF_INET;
udpdest.sin_port = htons(AUN_PORT);
/* At the moment we use the stupid Acorn scheme of Econet address
y.x maps to IP a.b.c.x. This should be replaced with something
more flexible and more aware of subnet masks. */
{
struct in_device *idev;
unsigned long network = 0;
rcu_read_lock();
idev = __in_dev_get_rcu(dev);
if (idev) {
if (idev->ifa_list)
network = ntohl(idev->ifa_list->ifa_address) &
0xffffff00; /* !!! */
}
rcu_read_unlock();
udpdest.sin_addr.s_addr = htonl(network | addr.station);
}
memset(&ah, 0, sizeof(ah));
ah.port = port;
ah.cb = cb & 0x7f;
ah.code = 2; /* magic */
/* tack our header on the front of the iovec */
size = sizeof(struct aunhdr);
iov[0].iov_base = (void *)&ah;
iov[0].iov_len = size;
userbuf = vmalloc(len);
if (userbuf == NULL) {
err = -ENOMEM;
goto error;
}
iov[1].iov_base = userbuf;
iov[1].iov_len = len;
err = memcpy_fromiovec(userbuf, msg->msg_iov, len);
if (err)
goto error_free_buf;
/* Get a skbuff (no data, just holds our cb information) */
if ((skb = sock_alloc_send_skb(sk, 0,
msg->msg_flags & MSG_DONTWAIT,
&err)) == NULL)
goto error_free_buf;
eb = (struct ec_cb *)&skb->cb;
eb->cookie = saddr->cookie;
eb->timeout = (5*HZ);
eb->start = jiffies;
ah.handle = aun_seq;
eb->seq = (aun_seq++);
eb->sec = *saddr;
skb_queue_tail(&aun_queue, skb);
udpmsg.msg_name = (void *)&udpdest;
udpmsg.msg_namelen = sizeof(udpdest);
udpmsg.msg_iov = &iov[0];
udpmsg.msg_iovlen = 2;
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
oldfs = get_fs(); set_fs(KERNEL_DS); /* More privs :-) */
err = sock_sendmsg(udpsock, &udpmsg, size);
set_fs(oldfs);
error_free_buf:
vfree(userbuf);
error:
#else
err = -EPROTOTYPE;
#endif
mutex_unlock(&econet_mutex);
return err;
}
/*
* Look up the address of a socket.
*/
static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
int *uaddr_len, int peer)
{
struct sock *sk;
struct econet_sock *eo;
struct sockaddr_ec *sec = (struct sockaddr_ec *)uaddr;
if (peer)
return -EOPNOTSUPP;
memset(sec, 0, sizeof(*sec));
mutex_lock(&econet_mutex);
sk = sock->sk;
eo = ec_sk(sk);
sec->sec_family = AF_ECONET;
sec->port = eo->port;
sec->addr.station = eo->station;
sec->addr.net = eo->net;
mutex_unlock(&econet_mutex);
*uaddr_len = sizeof(*sec);
return 0;
}
static void econet_destroy_timer(unsigned long data)
{
struct sock *sk=(struct sock *)data;
if (!sk_has_allocations(sk)) {
sk_free(sk);
return;
}
sk->sk_timer.expires = jiffies + 10 * HZ;
add_timer(&sk->sk_timer);
printk(KERN_DEBUG "econet socket destroy delayed\n");
}
/*
* Close an econet socket.
*/
static int econet_release(struct socket *sock)
{
struct sock *sk;
mutex_lock(&econet_mutex);
sk = sock->sk;
if (!sk)
goto out_unlock;
econet_remove_socket(&econet_sklist, sk);
/*
* Now the socket is dead. No more input will appear.
*/
sk->sk_state_change(sk); /* It is useless. Just for sanity. */
sock_orphan(sk);
/* Purge queues */
skb_queue_purge(&sk->sk_receive_queue);
if (sk_has_allocations(sk)) {
sk->sk_timer.data = (unsigned long)sk;
sk->sk_timer.expires = jiffies + HZ;
sk->sk_timer.function = econet_destroy_timer;
add_timer(&sk->sk_timer);
goto out_unlock;
}
sk_free(sk);
out_unlock:
mutex_unlock(&econet_mutex);
return 0;
}
static struct proto econet_proto = {
.name = "ECONET",
.owner = THIS_MODULE,
.obj_size = sizeof(struct econet_sock),
};
/*
* Create an Econet socket
*/
static int econet_create(struct net *net, struct socket *sock, int protocol,
int kern)
{
struct sock *sk;
struct econet_sock *eo;
int err;
if (!net_eq(net, &init_net))
return -EAFNOSUPPORT;
/* Econet only provides datagram services. */
if (sock->type != SOCK_DGRAM)
return -ESOCKTNOSUPPORT;
sock->state = SS_UNCONNECTED;
err = -ENOBUFS;
sk = sk_alloc(net, PF_ECONET, GFP_KERNEL, &econet_proto);
if (sk == NULL)
goto out;
sk->sk_reuse = 1;
sock->ops = &econet_ops;
sock_init_data(sock, sk);
eo = ec_sk(sk);
sock_reset_flag(sk, SOCK_ZAPPED);
sk->sk_family = PF_ECONET;
eo->num = protocol;
econet_insert_socket(&econet_sklist, sk);
return 0;
out:
return err;
}
/*
* Handle Econet specific ioctls
*/
static int ec_dev_ioctl(struct socket *sock, unsigned int cmd, void __user *arg)
{
struct ifreq ifr;
struct ec_device *edev;
struct net_device *dev;
struct sockaddr_ec *sec;
int err;
/*
* Fetch the caller's info block into kernel space
*/
if (copy_from_user(&ifr, arg, sizeof(struct ifreq)))
return -EFAULT;
if ((dev = dev_get_by_name(&init_net, ifr.ifr_name)) == NULL)
return -ENODEV;
sec = (struct sockaddr_ec *)&ifr.ifr_addr;
mutex_lock(&econet_mutex);
err = 0;
switch (cmd) {
case SIOCSIFADDR:
if (!capable(CAP_NET_ADMIN)) {
err = -EPERM;
break;
}
edev = dev->ec_ptr;
if (edev == NULL) {
/* Magic up a new one. */
edev = kzalloc(sizeof(struct ec_device), GFP_KERNEL);
if (edev == NULL) {
err = -ENOMEM;
break;
}
dev->ec_ptr = edev;
} else
net2dev_map[edev->net] = NULL;
edev->station = sec->addr.station;
edev->net = sec->addr.net;
net2dev_map[sec->addr.net] = dev;
if (!net2dev_map[0])
net2dev_map[0] = dev;
break;
case SIOCGIFADDR:
edev = dev->ec_ptr;
if (edev == NULL) {
err = -ENODEV;
break;
}
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->addr.station = edev->station;
sec->addr.net = edev->net;
sec->sec_family = AF_ECONET;
dev_put(dev);
if (copy_to_user(arg, &ifr, sizeof(struct ifreq)))
err = -EFAULT;
break;
default:
err = -EINVAL;
break;
}
mutex_unlock(&econet_mutex);
dev_put(dev);
return err;
}
/*
* Handle generic ioctls
*/
static int econet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
{
struct sock *sk = sock->sk;
void __user *argp = (void __user *)arg;
switch(cmd) {
case SIOCGSTAMP:
return sock_get_timestamp(sk, argp);
case SIOCGSTAMPNS:
return sock_get_timestampns(sk, argp);
case SIOCSIFADDR:
case SIOCGIFADDR:
return ec_dev_ioctl(sock, cmd, argp);
break;
default:
return -ENOIOCTLCMD;
}
/*NOTREACHED*/
return 0;
}
static const struct net_proto_family econet_family_ops = {
.family = PF_ECONET,
.create = econet_create,
.owner = THIS_MODULE,
};
static const struct proto_ops econet_ops = {
.family = PF_ECONET,
.owner = THIS_MODULE,
.release = econet_release,
.bind = econet_bind,
.connect = sock_no_connect,
.socketpair = sock_no_socketpair,
.accept = sock_no_accept,
.getname = econet_getname,
.poll = datagram_poll,
.ioctl = econet_ioctl,
.listen = sock_no_listen,
.shutdown = sock_no_shutdown,
.setsockopt = sock_no_setsockopt,
.getsockopt = sock_no_getsockopt,
.sendmsg = econet_sendmsg,
.recvmsg = econet_recvmsg,
.mmap = sock_no_mmap,
.sendpage = sock_no_sendpage,
};
#if defined(CONFIG_ECONET_AUNUDP) || defined(CONFIG_ECONET_NATIVE)
/*
* Find the listening socket, if any, for the given data.
*/
static struct sock *ec_listening_socket(unsigned char port, unsigned char
station, unsigned char net)
{
struct sock *sk;
struct hlist_node *node;
spin_lock(&econet_lock);
sk_for_each(sk, node, &econet_sklist) {
struct econet_sock *opt = ec_sk(sk);
if ((opt->port == port || opt->port == 0) &&
(opt->station == station || opt->station == 0) &&
(opt->net == net || opt->net == 0)) {
sock_hold(sk);
goto found;
}
}
sk = NULL;
found:
spin_unlock(&econet_lock);
return sk;
}
/*
* Queue a received packet for a socket.
*/
static int ec_queue_packet(struct sock *sk, struct sk_buff *skb,
unsigned char stn, unsigned char net,
unsigned char cb, unsigned char port)
{
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
struct sockaddr_ec *sec = (struct sockaddr_ec *)&eb->sec;
memset(sec, 0, sizeof(struct sockaddr_ec));
sec->sec_family = AF_ECONET;
sec->type = ECTYPE_PACKET_RECEIVED;
sec->port = port;
sec->cb = cb;
sec->addr.net = net;
sec->addr.station = stn;
return sock_queue_rcv_skb(sk, skb);
}
#endif
#ifdef CONFIG_ECONET_AUNUDP
/*
* Send an AUN protocol response.
*/
static void aun_send_response(__u32 addr, unsigned long seq, int code, int cb)
{
struct sockaddr_in sin = {
.sin_family = AF_INET,
.sin_port = htons(AUN_PORT),
.sin_addr = {.s_addr = addr}
};
struct aunhdr ah = {.code = code, .cb = cb, .handle = seq};
struct kvec iov = {.iov_base = (void *)&ah, .iov_len = sizeof(ah)};
struct msghdr udpmsg;
udpmsg.msg_name = (void *)&sin;
udpmsg.msg_namelen = sizeof(sin);
udpmsg.msg_control = NULL;
udpmsg.msg_controllen = 0;
udpmsg.msg_flags=0;
kernel_sendmsg(udpsock, &udpmsg, &iov, 1, sizeof(ah));
}
/*
* Handle incoming AUN packets. Work out if anybody wants them,
* and send positive or negative acknowledgements as appropriate.
*/
static void aun_incoming(struct sk_buff *skb, struct aunhdr *ah, size_t len)
{
struct iphdr *ip = ip_hdr(skb);
unsigned char stn = ntohl(ip->saddr) & 0xff;
struct dst_entry *dst = skb_dst(skb);
struct ec_device *edev = NULL;
struct sock *sk = NULL;
struct sk_buff *newskb;
if (dst)
edev = dst->dev->ec_ptr;
if (! edev)
goto bad;
if ((sk = ec_listening_socket(ah->port, stn, edev->net)) == NULL)
goto bad; /* Nobody wants it */
newskb = alloc_skb((len - sizeof(struct aunhdr) + 15) & ~15,
GFP_ATOMIC);
if (newskb == NULL)
{
printk(KERN_DEBUG "AUN: memory squeeze, dropping packet.\n");
/* Send nack and hope sender tries again */
goto bad;
}
memcpy(skb_put(newskb, len - sizeof(struct aunhdr)), (void *)(ah+1),
len - sizeof(struct aunhdr));
if (ec_queue_packet(sk, newskb, stn, edev->net, ah->cb, ah->port))
{
/* Socket is bankrupt. */
kfree_skb(newskb);
goto bad;
}
aun_send_response(ip->saddr, ah->handle, 3, 0);
sock_put(sk);
return;
bad:
aun_send_response(ip->saddr, ah->handle, 4, 0);
if (sk)
sock_put(sk);
}
/*
* Handle incoming AUN transmit acknowledgements. If the sequence
* number matches something in our backlog then kill it and tell
* the user. If the remote took too long to reply then we may have
* dropped the packet already.
*/
static void aun_tx_ack(unsigned long seq, int result)
{
struct sk_buff *skb;
unsigned long flags;
struct ec_cb *eb;
spin_lock_irqsave(&aun_queue_lock, flags);
skb_queue_walk(&aun_queue, skb) {
eb = (struct ec_cb *)&skb->cb;
if (eb->seq == seq)
goto foundit;
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
printk(KERN_DEBUG "AUN: unknown sequence %ld\n", seq);
return;
foundit:
tx_result(skb->sk, eb->cookie, result);
skb_unlink(skb, &aun_queue);
spin_unlock_irqrestore(&aun_queue_lock, flags);
kfree_skb(skb);
}
/*
* Deal with received AUN frames - sort out what type of thing it is
* and hand it to the right function.
*/
static void aun_data_available(struct sock *sk, int slen)
{
int err;
struct sk_buff *skb;
unsigned char *data;
struct aunhdr *ah;
size_t len;
while ((skb = skb_recv_datagram(sk, 0, 1, &err)) == NULL) {
if (err == -EAGAIN) {
printk(KERN_ERR "AUN: no data available?!");
return;
}
printk(KERN_DEBUG "AUN: recvfrom() error %d\n", -err);
}
data = skb_transport_header(skb) + sizeof(struct udphdr);
ah = (struct aunhdr *)data;
len = skb->len - sizeof(struct udphdr);
switch (ah->code)
{
case 2:
aun_incoming(skb, ah, len);
break;
case 3:
aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_OK);
break;
case 4:
aun_tx_ack(ah->handle, ECTYPE_TRANSMIT_NOT_LISTENING);
break;
default:
printk(KERN_DEBUG "unknown AUN packet (type %d)\n", data[0]);
}
skb_free_datagram(sk, skb);
}
/*
* Called by the timer to manage the AUN transmit queue. If a packet
* was sent to a dead or nonexistent host then we will never get an
* acknowledgement back. After a few seconds we need to spot this and
* drop the packet.
*/
static void ab_cleanup(unsigned long h)
{
struct sk_buff *skb, *n;
unsigned long flags;
spin_lock_irqsave(&aun_queue_lock, flags);
skb_queue_walk_safe(&aun_queue, skb, n) {
struct ec_cb *eb = (struct ec_cb *)&skb->cb;
if ((jiffies - eb->start) > eb->timeout) {
tx_result(skb->sk, eb->cookie,
ECTYPE_TRANSMIT_NOT_PRESENT);
skb_unlink(skb, &aun_queue);
kfree_skb(skb);
}
}
spin_unlock_irqrestore(&aun_queue_lock, flags);
mod_timer(&ab_cleanup_timer, jiffies + (HZ*2));
}
static int __init aun_udp_initialise(void)
{
int error;
struct sockaddr_in sin;
skb_queue_head_init(&aun_queue);
setup_timer(&ab_cleanup_timer, ab_cleanup, 0);
ab_cleanup_timer.expires = jiffies + (HZ*2);
add_timer(&ab_cleanup_timer);
memset(&sin, 0, sizeof(sin));
sin.sin_port = htons(AUN_PORT);
/* We can count ourselves lucky Acorn machines are too dim to
speak IPv6. :-) */
if ((error = sock_create_kern(PF_INET, SOCK_DGRAM, 0, &udpsock)) < 0)
{
printk("AUN: socket error %d\n", -error);
return error;
}
udpsock->sk->sk_reuse = 1;
udpsock->sk->sk_allocation = GFP_ATOMIC; /* we're going to call it
from interrupts */
error = udpsock->ops->bind(udpsock, (struct sockaddr *)&sin,
sizeof(sin));
if (error < 0)
{
printk("AUN: bind error %d\n", -error);
goto release;
}
udpsock->sk->sk_data_ready = aun_data_available;
return 0;
release:
sock_release(udpsock);
udpsock = NULL;
return error;
}
#endif
#ifdef CONFIG_ECONET_NATIVE
/*
* Receive an Econet frame from a device.
*/
static int econet_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
{
struct ec_framehdr *hdr;
struct sock *sk = NULL;
struct ec_device *edev = dev->ec_ptr;
if (!net_eq(dev_net(dev), &init_net))
goto drop;
if (skb->pkt_type == PACKET_OTHERHOST)
goto drop;
if (!edev)
goto drop;
if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
return NET_RX_DROP;
if (!pskb_may_pull(skb, sizeof(struct ec_framehdr)))
goto drop;
hdr = (struct ec_framehdr *) skb->data;
/* First check for encapsulated IP */
if (hdr->port == EC_PORT_IP) {
skb->protocol = htons(ETH_P_IP);
skb_pull(skb, sizeof(struct ec_framehdr));
netif_rx(skb);
return NET_RX_SUCCESS;
}
sk = ec_listening_socket(hdr->port, hdr->src_stn, hdr->src_net);
if (!sk)
goto drop;
if (ec_queue_packet(sk, skb, edev->net, hdr->src_stn, hdr->cb,
hdr->port))
goto drop;
sock_put(sk);
return NET_RX_SUCCESS;
drop:
if (sk)
sock_put(sk);
kfree_skb(skb);
return NET_RX_DROP;
}
static struct packet_type econet_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_ECONET),
.func = econet_rcv,
};
static void econet_hw_initialise(void)
{
dev_add_pack(&econet_packet_type);
}
#endif
static int econet_notifier(struct notifier_block *this, unsigned long msg, void *data)
{
struct net_device *dev = (struct net_device *)data;
struct ec_device *edev;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
switch (msg) {
case NETDEV_UNREGISTER:
/* A device has gone down - kill any data we hold for it. */
edev = dev->ec_ptr;
if (edev)
{
if (net2dev_map[0] == dev)
net2dev_map[0] = NULL;
net2dev_map[edev->net] = NULL;
kfree(edev);
dev->ec_ptr = NULL;
}
break;
}
return NOTIFY_DONE;
}
static struct notifier_block econet_netdev_notifier = {
.notifier_call =econet_notifier,
};
static void __exit econet_proto_exit(void)
{
#ifdef CONFIG_ECONET_AUNUDP
del_timer(&ab_cleanup_timer);
if (udpsock)
sock_release(udpsock);
#endif
unregister_netdevice_notifier(&econet_netdev_notifier);
#ifdef CONFIG_ECONET_NATIVE
dev_remove_pack(&econet_packet_type);
#endif
sock_unregister(econet_family_ops.family);
proto_unregister(&econet_proto);
}
static int __init econet_proto_init(void)
{
int err = proto_register(&econet_proto, 0);
if (err != 0)
goto out;
sock_register(&econet_family_ops);
#ifdef CONFIG_ECONET_AUNUDP
aun_udp_initialise();
#endif
#ifdef CONFIG_ECONET_NATIVE
econet_hw_initialise();
#endif
register_netdevice_notifier(&econet_netdev_notifier);
out:
return err;
}
module_init(econet_proto_init);
module_exit(econet_proto_exit);
MODULE_LICENSE("GPL");
MODULE_ALIAS_NETPROTO(PF_ECONET);
| gpl-2.0 |
Kali-/htc-kernel-ace | drivers/w1/masters/matrox_w1.c | 4438 | 6028 | /*
* matrox_w1.c
*
* Copyright (c) 2004 Evgeniy Polyakov <johnpol@2ka.mipt.ru>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <asm/types.h>
#include <asm/atomic.h>
#include <asm/io.h>
#include <linux/delay.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
#include <linux/timer.h>
#include <linux/slab.h>
#include <linux/pci_ids.h>
#include <linux/pci.h>
#include "../w1.h"
#include "../w1_int.h"
#include "../w1_log.h"
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Evgeniy Polyakov <johnpol@2ka.mipt.ru>");
MODULE_DESCRIPTION("Driver for transport(Dallas 1-wire prtocol) over VGA DDC(matrox gpio).");
static struct pci_device_id matrox_w1_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400) },
{ },
};
MODULE_DEVICE_TABLE(pci, matrox_w1_tbl);
static int __devinit matrox_w1_probe(struct pci_dev *, const struct pci_device_id *);
static void __devexit matrox_w1_remove(struct pci_dev *);
static struct pci_driver matrox_w1_pci_driver = {
.name = "matrox_w1",
.id_table = matrox_w1_tbl,
.probe = matrox_w1_probe,
.remove = __devexit_p(matrox_w1_remove),
};
/*
* Matrox G400 DDC registers.
*/
#define MATROX_G400_DDC_CLK (1<<4)
#define MATROX_G400_DDC_DATA (1<<1)
#define MATROX_BASE 0x3C00
#define MATROX_STATUS 0x1e14
#define MATROX_PORT_INDEX_OFFSET 0x00
#define MATROX_PORT_DATA_OFFSET 0x0A
#define MATROX_GET_CONTROL 0x2A
#define MATROX_GET_DATA 0x2B
#define MATROX_CURSOR_CTL 0x06
struct matrox_device
{
void __iomem *base_addr;
void __iomem *port_index;
void __iomem *port_data;
u8 data_mask;
unsigned long phys_addr;
void __iomem *virt_addr;
unsigned long found;
struct w1_bus_master *bus_master;
};
static u8 matrox_w1_read_ddc_bit(void *);
static void matrox_w1_write_ddc_bit(void *, u8);
/*
* These functions read and write DDC Data bit.
*
* Using tristate pins, since i can't find any open-drain pin in whole motherboard.
* Unfortunately we can't connect to Intel's 82801xx IO controller
* since we don't know motherboard schema, which has pretty unused(may be not) GPIO.
*
* I've heard that PIIX also has open drain pin.
*
* Port mapping.
*/
static __inline__ u8 matrox_w1_read_reg(struct matrox_device *dev, u8 reg)
{
u8 ret;
writeb(reg, dev->port_index);
ret = readb(dev->port_data);
barrier();
return ret;
}
static __inline__ void matrox_w1_write_reg(struct matrox_device *dev, u8 reg, u8 val)
{
writeb(reg, dev->port_index);
writeb(val, dev->port_data);
wmb();
}
static void matrox_w1_write_ddc_bit(void *data, u8 bit)
{
u8 ret;
struct matrox_device *dev = data;
if (bit)
bit = 0;
else
bit = dev->data_mask;
ret = matrox_w1_read_reg(dev, MATROX_GET_CONTROL);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, ((ret & ~dev->data_mask) | bit));
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0x00);
}
static u8 matrox_w1_read_ddc_bit(void *data)
{
u8 ret;
struct matrox_device *dev = data;
ret = matrox_w1_read_reg(dev, MATROX_GET_DATA);
return ret;
}
static void matrox_w1_hw_init(struct matrox_device *dev)
{
matrox_w1_write_reg(dev, MATROX_GET_DATA, 0xFF);
matrox_w1_write_reg(dev, MATROX_GET_CONTROL, 0x00);
}
static int __devinit matrox_w1_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct matrox_device *dev;
int err;
assert(pdev != NULL);
assert(ent != NULL);
if (pdev->vendor != PCI_VENDOR_ID_MATROX || pdev->device != PCI_DEVICE_ID_MATROX_G400)
return -ENODEV;
dev = kzalloc(sizeof(struct matrox_device) +
sizeof(struct w1_bus_master), GFP_KERNEL);
if (!dev) {
dev_err(&pdev->dev,
"%s: Failed to create new matrox_device object.\n",
__func__);
return -ENOMEM;
}
dev->bus_master = (struct w1_bus_master *)(dev + 1);
/*
* True for G400, for some other we need resource 0, see drivers/video/matrox/matroxfb_base.c
*/
dev->phys_addr = pci_resource_start(pdev, 1);
dev->virt_addr = ioremap_nocache(dev->phys_addr, 16384);
if (!dev->virt_addr) {
dev_err(&pdev->dev, "%s: failed to ioremap(0x%lx, %d).\n",
__func__, dev->phys_addr, 16384);
err = -EIO;
goto err_out_free_device;
}
dev->base_addr = dev->virt_addr + MATROX_BASE;
dev->port_index = dev->base_addr + MATROX_PORT_INDEX_OFFSET;
dev->port_data = dev->base_addr + MATROX_PORT_DATA_OFFSET;
dev->data_mask = (MATROX_G400_DDC_DATA);
matrox_w1_hw_init(dev);
dev->bus_master->data = dev;
dev->bus_master->read_bit = &matrox_w1_read_ddc_bit;
dev->bus_master->write_bit = &matrox_w1_write_ddc_bit;
err = w1_add_master_device(dev->bus_master);
if (err)
goto err_out_free_device;
pci_set_drvdata(pdev, dev);
dev->found = 1;
dev_info(&pdev->dev, "Matrox G400 GPIO transport layer for 1-wire.\n");
return 0;
err_out_free_device:
if (dev->virt_addr)
iounmap(dev->virt_addr);
kfree(dev);
return err;
}
static void __devexit matrox_w1_remove(struct pci_dev *pdev)
{
struct matrox_device *dev = pci_get_drvdata(pdev);
assert(dev != NULL);
if (dev->found) {
w1_remove_master_device(dev->bus_master);
iounmap(dev->virt_addr);
}
kfree(dev);
}
static int __init matrox_w1_init(void)
{
return pci_register_driver(&matrox_w1_pci_driver);
}
static void __exit matrox_w1_fini(void)
{
pci_unregister_driver(&matrox_w1_pci_driver);
}
module_init(matrox_w1_init);
module_exit(matrox_w1_fini);
| gpl-2.0 |
CarbonROM/android_kernel_htc_msm8960 | arch/mips/powertv/asic/asic_int.c | 4694 | 2804 | /*
* Carsten Langgaard, carstenl@mips.com
* Copyright (C) 2000, 2001, 2004 MIPS Technologies, Inc.
* Copyright (C) 2001 Ralf Baechle
* Portions copyright (C) 2009 Cisco Systems, Inc.
*
* This program is free software; you can distribute it and/or modify it
* under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*
* Routines for generic manipulation of the interrupts found on the PowerTV
* platform.
*
* The interrupt controller is located in the South Bridge a PIIX4 device
* with two internal 82C95 interrupt controllers.
*/
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/kernel.h>
#include <linux/random.h>
#include <asm/irq_cpu.h>
#include <linux/io.h>
#include <asm/irq_regs.h>
#include <asm/setup.h>
#include <asm/mips-boards/generic.h>
#include <asm/mach-powertv/asic_regs.h>
static DEFINE_RAW_SPINLOCK(asic_irq_lock);
static inline int get_int(void)
{
unsigned long flags;
int irq;
raw_spin_lock_irqsave(&asic_irq_lock, flags);
irq = (asic_read(int_int_scan) >> 4) - 1;
if (irq == 0 || irq >= NR_IRQS)
irq = -1;
raw_spin_unlock_irqrestore(&asic_irq_lock, flags);
return irq;
}
static void asic_irqdispatch(void)
{
int irq;
irq = get_int();
if (irq < 0)
return; /* interrupt has already been cleared */
do_IRQ(irq);
}
static inline int clz(unsigned long x)
{
__asm__(
" .set push \n"
" .set mips32 \n"
" clz %0, %1 \n"
" .set pop \n"
: "=r" (x)
: "r" (x));
return x;
}
/*
* Version of ffs that only looks at bits 12..15.
*/
static inline unsigned int irq_ffs(unsigned int pending)
{
return fls(pending) - 1 + CAUSEB_IP;
}
/*
* TODO: check how it works under EIC mode.
*/
asmlinkage void plat_irq_dispatch(void)
{
unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
int irq;
irq = irq_ffs(pending);
if (irq == CAUSEF_IP3)
asic_irqdispatch();
else if (irq >= 0)
do_IRQ(irq);
else
spurious_interrupt();
}
void __init arch_init_irq(void)
{
int i;
asic_irq_init();
/*
* Initialize interrupt exception vectors.
*/
if (cpu_has_veic || cpu_has_vint) {
int nvec = cpu_has_veic ? 64 : 8;
for (i = 0; i < nvec; i++)
set_vi_handler(i, asic_irqdispatch);
}
}
| gpl-2.0 |
yemingxing/X9180_kernel | drivers/mtd/onenand/onenand_bbt.c | 7510 | 6918 | /*
* linux/drivers/mtd/onenand/onenand_bbt.c
*
* Bad Block Table support for the OneNAND driver
*
* Copyright(c) 2005 Samsung Electronics
* Kyungmin Park <kyungmin.park@samsung.com>
*
* Derived from nand_bbt.c
*
* TODO:
* Split BBT core and chip specific BBT.
*/
#include <linux/slab.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/onenand.h>
#include <linux/export.h>
/**
* check_short_pattern - [GENERIC] check if a pattern is in the buffer
* @param buf the buffer to search
* @param len the length of buffer to search
* @param paglen the pagelength
* @param td search pattern descriptor
*
* Check for a pattern at the given place. Used to search bad block
* tables and good / bad block identifiers. Same as check_pattern, but
* no optional empty check and the pattern is expected to start
* at offset 0.
*
*/
static int check_short_pattern(uint8_t *buf, int len, int paglen, struct nand_bbt_descr *td)
{
int i;
uint8_t *p = buf;
/* Compare the pattern */
for (i = 0; i < td->len; i++) {
if (p[i] != td->pattern[i])
return -1;
}
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
* @param mtd MTD device structure
* @param buf temporary buffer
* @param bd descriptor for the good/bad block search pattern
* @param chip create the table for a specific chip, -1 read all chips.
* Applies only if NAND_BBT_PERCHIP option is set
*
* Create a bad block table by scanning the device
* for the given good/bad block identify pattern
*/
static int create_bbt(struct mtd_info *mtd, uint8_t *buf, struct nand_bbt_descr *bd, int chip)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int i, j, numblocks, len, scanlen;
int startblock;
loff_t from;
size_t readlen, ooblen;
struct mtd_oob_ops ops;
int rgn;
printk(KERN_INFO "Scanning device for bad blocks\n");
len = 2;
/* We need only read few bytes from the OOB area */
scanlen = ooblen = 0;
readlen = bd->len;
/* chip == -1 case only */
/* Note that numblocks is 2 * (real numblocks) here;
* see i += 2 below as it makses shifting and masking less painful
*/
numblocks = this->chipsize >> (bbm->bbt_erase_shift - 1);
startblock = 0;
from = 0;
ops.mode = MTD_OPS_PLACE_OOB;
ops.ooblen = readlen;
ops.oobbuf = buf;
ops.len = ops.ooboffs = ops.retlen = ops.oobretlen = 0;
for (i = startblock; i < numblocks; ) {
int ret;
for (j = 0; j < len; j++) {
/* No need to read pages fully,
* just read required OOB bytes */
ret = onenand_bbt_read_oob(mtd,
from + j * this->writesize + bd->offs, &ops);
/* If it is a initial bad block, just ignore it */
if (ret == ONENAND_BBT_READ_FATAL_ERROR)
return -EIO;
if (ret || check_short_pattern(&buf[j * scanlen],
scanlen, this->writesize, bd)) {
bbm->bbt[i >> 3] |= 0x03 << (i & 0x6);
printk(KERN_INFO "OneNAND eraseblock %d is an "
"initial bad block\n", i >> 1);
mtd->ecc_stats.badblocks++;
break;
}
}
i += 2;
if (FLEXONENAND(this)) {
rgn = flexonenand_region(mtd, from);
from += mtd->eraseregions[rgn].erasesize;
} else
from += (1 << bbm->bbt_erase_shift);
}
return 0;
}
/**
* onenand_memory_bbt - [GENERIC] create a memory based bad block table
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device
* for manufacturer / software marked good / bad blocks
*/
static inline int onenand_memory_bbt (struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
bd->options &= ~NAND_BBT_SCANEMPTY;
return create_bbt(mtd, this->page_buf, bd, -1);
}
/**
* onenand_isbad_bbt - [OneNAND Interface] Check if a block is bad
* @param mtd MTD device structure
* @param offs offset in the device
* @param allowbbt allow access to bad block table region
*/
static int onenand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int block;
uint8_t res;
/* Get block number * 2 */
block = (int) (onenand_block(this, offs) << 1);
res = (bbm->bbt[block >> 3] >> (block & 0x06)) & 0x03;
pr_debug("onenand_isbad_bbt: bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int) offs, block >> 1, res);
switch ((int) res) {
case 0x00: return 0;
case 0x01: return 1;
case 0x02: return allowbbt ? 0 : 1;
}
return 1;
}
/**
* onenand_scan_bbt - [OneNAND Interface] scan, find, read and maybe create bad block table(s)
* @param mtd MTD device structure
* @param bd descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already
* available. If not it scans the device for manufacturer
* marked good / bad blocks and writes the bad block table(s) to
* the selected place.
*
* The bad block table memory is allocated here. It is freed
* by the onenand_release function.
*
*/
int onenand_scan_bbt(struct mtd_info *mtd, struct nand_bbt_descr *bd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm = this->bbm;
int len, ret = 0;
len = this->chipsize >> (this->erase_shift + 2);
/* Allocate memory (2bit per block) and clear the memory bad block table */
bbm->bbt = kzalloc(len, GFP_KERNEL);
if (!bbm->bbt)
return -ENOMEM;
/* Set the bad block position */
bbm->badblockpos = ONENAND_BADBLOCK_POS;
/* Set erase shift */
bbm->bbt_erase_shift = this->erase_shift;
if (!bbm->isbad_bbt)
bbm->isbad_bbt = onenand_isbad_bbt;
/* Scan the device to build a memory based bad block table */
if ((ret = onenand_memory_bbt(mtd, bd))) {
printk(KERN_ERR "onenand_scan_bbt: Can't scan flash and build the RAM-based BBT\n");
kfree(bbm->bbt);
bbm->bbt = NULL;
}
return ret;
}
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
*/
static uint8_t scan_ff_pattern[] = { 0xff, 0xff };
static struct nand_bbt_descr largepage_memorybased = {
.options = 0,
.offs = 0,
.len = 2,
.pattern = scan_ff_pattern,
};
/**
* onenand_default_bbt - [OneNAND Interface] Select a default bad block table for the device
* @param mtd MTD device structure
*
* This function selects the default bad block table
* support for the device and calls the onenand_scan_bbt function
*/
int onenand_default_bbt(struct mtd_info *mtd)
{
struct onenand_chip *this = mtd->priv;
struct bbm_info *bbm;
this->bbm = kzalloc(sizeof(struct bbm_info), GFP_KERNEL);
if (!this->bbm)
return -ENOMEM;
bbm = this->bbm;
/* 1KB page has same configuration as 2KB page */
if (!bbm->badblock_pattern)
bbm->badblock_pattern = &largepage_memorybased;
return onenand_scan_bbt(mtd, bbm->badblock_pattern);
}
EXPORT_SYMBOL(onenand_scan_bbt);
EXPORT_SYMBOL(onenand_default_bbt);
| gpl-2.0 |
eoghan2t9/android_kernel_oppo_find5 | arch/arm/plat-mxc/iomux-v3.c | 8022 | 2410 | /*
* Copyright 2004-2006 Freescale Semiconductor, Inc. All Rights Reserved.
* Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
* Copyright (C) 2009 by Jan Weitzel Phytec Messtechnik GmbH,
* <armlinux@phytec.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/gpio.h>
#include <mach/hardware.h>
#include <asm/mach/map.h>
#include <mach/iomux-v3.h>
static void __iomem *base;
/*
* configures a single pad in the iomuxer
*/
int mxc_iomux_v3_setup_pad(iomux_v3_cfg_t pad)
{
u32 mux_ctrl_ofs = (pad & MUX_CTRL_OFS_MASK) >> MUX_CTRL_OFS_SHIFT;
u32 mux_mode = (pad & MUX_MODE_MASK) >> MUX_MODE_SHIFT;
u32 sel_input_ofs = (pad & MUX_SEL_INPUT_OFS_MASK) >> MUX_SEL_INPUT_OFS_SHIFT;
u32 sel_input = (pad & MUX_SEL_INPUT_MASK) >> MUX_SEL_INPUT_SHIFT;
u32 pad_ctrl_ofs = (pad & MUX_PAD_CTRL_OFS_MASK) >> MUX_PAD_CTRL_OFS_SHIFT;
u32 pad_ctrl = (pad & MUX_PAD_CTRL_MASK) >> MUX_PAD_CTRL_SHIFT;
if (mux_ctrl_ofs)
__raw_writel(mux_mode, base + mux_ctrl_ofs);
if (sel_input_ofs)
__raw_writel(sel_input, base + sel_input_ofs);
if (!(pad_ctrl & NO_PAD_CTRL) && pad_ctrl_ofs)
__raw_writel(pad_ctrl, base + pad_ctrl_ofs);
return 0;
}
EXPORT_SYMBOL(mxc_iomux_v3_setup_pad);
int mxc_iomux_v3_setup_multiple_pads(iomux_v3_cfg_t *pad_list, unsigned count)
{
iomux_v3_cfg_t *p = pad_list;
int i;
int ret;
for (i = 0; i < count; i++) {
ret = mxc_iomux_v3_setup_pad(*p);
if (ret)
return ret;
p++;
}
return 0;
}
EXPORT_SYMBOL(mxc_iomux_v3_setup_multiple_pads);
void mxc_iomux_v3_init(void __iomem *iomux_v3_base)
{
base = iomux_v3_base;
}
| gpl-2.0 |
montekki/linux-2.6-acedia | drivers/macintosh/windfarm_cpufreq_clamp.c | 9046 | 2417 | #include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/cpufreq.h>
#include <asm/prom.h>
#include "windfarm.h"
#define VERSION "0.3"
static int clamped;
static struct wf_control *clamp_control;
static int clamp_notifier_call(struct notifier_block *self,
unsigned long event, void *data)
{
struct cpufreq_policy *p = data;
unsigned long max_freq;
if (event != CPUFREQ_ADJUST)
return 0;
max_freq = clamped ? (p->cpuinfo.min_freq) : (p->cpuinfo.max_freq);
cpufreq_verify_within_limits(p, 0, max_freq);
return 0;
}
static struct notifier_block clamp_notifier = {
.notifier_call = clamp_notifier_call,
};
static int clamp_set(struct wf_control *ct, s32 value)
{
if (value)
printk(KERN_INFO "windfarm: Clamping CPU frequency to "
"minimum !\n");
else
printk(KERN_INFO "windfarm: CPU frequency unclamped !\n");
clamped = value;
cpufreq_update_policy(0);
return 0;
}
static int clamp_get(struct wf_control *ct, s32 *value)
{
*value = clamped;
return 0;
}
static s32 clamp_min(struct wf_control *ct)
{
return 0;
}
static s32 clamp_max(struct wf_control *ct)
{
return 1;
}
static struct wf_control_ops clamp_ops = {
.set_value = clamp_set,
.get_value = clamp_get,
.get_min = clamp_min,
.get_max = clamp_max,
.owner = THIS_MODULE,
};
static int __init wf_cpufreq_clamp_init(void)
{
struct wf_control *clamp;
/* Don't register on old machines that use therm_pm72 for now */
if (of_machine_is_compatible("PowerMac7,2") ||
of_machine_is_compatible("PowerMac7,3") ||
of_machine_is_compatible("RackMac3,1"))
return -ENODEV;
clamp = kmalloc(sizeof(struct wf_control), GFP_KERNEL);
if (clamp == NULL)
return -ENOMEM;
cpufreq_register_notifier(&clamp_notifier, CPUFREQ_POLICY_NOTIFIER);
clamp->ops = &clamp_ops;
clamp->name = "cpufreq-clamp";
if (wf_register_control(clamp))
goto fail;
clamp_control = clamp;
return 0;
fail:
kfree(clamp);
return -ENODEV;
}
static void __exit wf_cpufreq_clamp_exit(void)
{
if (clamp_control)
wf_unregister_control(clamp_control);
}
module_init(wf_cpufreq_clamp_init);
module_exit(wf_cpufreq_clamp_exit);
MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
MODULE_DESCRIPTION("CPU frequency clamp for PowerMacs thermal control");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Sohamlad7/kernel | drivers/ide/ide-legacy.c | 10070 | 1282 | #include <linux/kernel.h>
#include <linux/export.h>
#include <linux/ide.h>
static void ide_legacy_init_one(struct ide_hw **hws, struct ide_hw *hw,
u8 port_no, const struct ide_port_info *d,
unsigned long config)
{
unsigned long base, ctl;
int irq;
if (port_no == 0) {
base = 0x1f0;
ctl = 0x3f6;
irq = 14;
} else {
base = 0x170;
ctl = 0x376;
irq = 15;
}
if (!request_region(base, 8, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX-0x%lX not free.\n",
d->name, base, base + 7);
return;
}
if (!request_region(ctl, 1, d->name)) {
printk(KERN_ERR "%s: I/O resource 0x%lX not free.\n",
d->name, ctl);
release_region(base, 8);
return;
}
ide_std_init_ports(hw, base, ctl);
hw->irq = irq;
hw->config = config;
hws[port_no] = hw;
}
int ide_legacy_device_add(const struct ide_port_info *d, unsigned long config)
{
struct ide_hw hw[2], *hws[] = { NULL, NULL };
memset(&hw, 0, sizeof(hw));
if ((d->host_flags & IDE_HFLAG_QD_2ND_PORT) == 0)
ide_legacy_init_one(hws, &hw[0], 0, d, config);
ide_legacy_init_one(hws, &hw[1], 1, d, config);
if (hws[0] == NULL && hws[1] == NULL &&
(d->host_flags & IDE_HFLAG_SINGLE))
return -ENOENT;
return ide_host_add(d, hws, 2, NULL);
}
EXPORT_SYMBOL_GPL(ide_legacy_device_add);
| gpl-2.0 |
gl-sergei/percona-xtrabackup | storage/ndb/src/common/debugger/EventLogger.cpp | 87 | 56826 | /*
Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <ndb_global.h>
#include <EventLogger.hpp>
#include <TransporterCallback.hpp>
#include <NdbConfig.h>
#include <kernel/BlockNumbers.h>
#include <signaldata/ArbitSignalData.hpp>
#include <signaldata/FailRep.hpp>
#include <NodeState.hpp>
#include <version.h>
#include <ndb_version.h>
#include <ndbd_exit_codes.h>
#define make_uint64(a,b) (((Uint64)(a)) + (((Uint64)(b)) << 32))
//
// PUBLIC
//
EventLoggerBase::~EventLoggerBase()
{
}
#define QQQQ char *m_text, size_t m_text_len, const Uint32* theData, Uint32 len
void getTextConnected(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node %u Connected",
theData[1]);
}
void getTextConnectedApiVersion(QQQQ) {
char tmp[100];
Uint32 mysql_version = theData[3];
if (theData[2] < NDBD_SPLIT_VERSION)
mysql_version = 0;
BaseString::snprintf(m_text, m_text_len,
"Node %u: API %s",
theData[1],
ndbGetVersionString(theData[2], mysql_version, 0,
tmp, sizeof(tmp)));
}
void getTextDisconnected(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node %u Disconnected",
theData[1]);
}
void getTextCommunicationClosed(QQQQ) {
//-----------------------------------------------------------------------
// REPORT communication to node closed.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Communication to Node %u closed",
theData[1]);
}
void getTextCommunicationOpened(QQQQ) {
//-----------------------------------------------------------------------
// REPORT communication to node opened.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Communication to Node %u opened",
theData[1]);
}
void getTextNDBStartStarted(QQQQ) {
//-----------------------------------------------------------------------
// Start of NDB has been initiated.
//-----------------------------------------------------------------------
char tmp[100];
Uint32 mysql_version = theData[2];
if (theData[1] < NDBD_SPLIT_VERSION)
mysql_version = 0;
BaseString::snprintf(m_text, m_text_len,
"Start initiated (%s)",
ndbGetVersionString(theData[1], mysql_version, 0,
tmp, sizeof(tmp)));
}
void getTextNDBStopStarted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"%s shutdown initiated",
(theData[1] == 1 ? "Cluster" : "Node"));
}
void getRestartAction(Uint32 action, BaseString &str)
{
if (action == 0)
return;
str.appfmt(", restarting");
if (action & 2)
str.appfmt(", no start");
if (action & 4)
str.appfmt(", initial");
}
void getTextNDBStopCompleted(QQQQ) {
BaseString action_str("");
BaseString signum_str("");
getRestartAction(theData[1], action_str);
if (theData[2])
signum_str.appfmt(" Initiated by signal %d.", theData[2]);
BaseString::snprintf(m_text, m_text_len,
"Node shutdown completed%s.%s",
action_str.c_str(),
signum_str.c_str());
}
void getTextNDBStopForced(QQQQ) {
BaseString action_str("");
BaseString reason_str("");
BaseString sphase_str("");
int signum = theData[2];
int error = theData[3];
int sphase = theData[4];
int extra = theData[5];
if (signum)
getRestartAction(theData[1],action_str);
if (signum)
reason_str.appfmt(" Initiated by signal %d.", signum);
if (error)
{
ndbd_exit_classification cl;
ndbd_exit_status st;
const char *msg = ndbd_exit_message(error, &cl);
const char *cl_msg = ndbd_exit_classification_message(cl, &st);
const char *st_msg = ndbd_exit_status_message(st);
reason_str.appfmt(" Caused by error %d: \'%s(%s). %s\'.",
error, msg, cl_msg, st_msg);
if (extra != 0)
reason_str.appfmt(" (extra info %d)", extra);
}
if (sphase < 255)
sphase_str.appfmt(" Occured during startphase %u.", sphase);
BaseString::snprintf(m_text, m_text_len,
"Forced node shutdown completed%s.%s%s",
action_str.c_str(), sphase_str.c_str(),
reason_str.c_str());
}
void getTextNDBStopAborted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node shutdown aborted");
}
void getTextNDBStartCompleted(QQQQ) {
//-----------------------------------------------------------------------
// Start of NDB has been completed.
//-----------------------------------------------------------------------
char tmp[100];
Uint32 mysql_version = theData[2];
if (theData[1] < NDBD_SPLIT_VERSION)
mysql_version = 0;
BaseString::snprintf(m_text, m_text_len,
"Started (%s)",
ndbGetVersionString(theData[1], mysql_version, 0,
tmp, sizeof(tmp)));
}
void getTextSTTORRYRecieved(QQQQ) {
//-----------------------------------------------------------------------
// STTORRY recevied after restart finished.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"STTORRY received after restart finished");
}
void getTextStartPhaseCompleted(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Start phase completed.
//-----------------------------------------------------------------------
const char *type = "<Unknown>";
switch((NodeState::StartType)theData[2]){
case NodeState::ST_INITIAL_START:
type = "(initial start)";
break;
case NodeState::ST_SYSTEM_RESTART:
type = "(system restart)";
break;
case NodeState::ST_NODE_RESTART:
type = "(node restart)";
break;
case NodeState::ST_INITIAL_NODE_RESTART:
type = "(initial node restart)";
break;
case NodeState::ST_ILLEGAL_TYPE:
type = "";
break;
default:
BaseString::snprintf(m_text, m_text_len,
"Start phase %u completed (unknown = %d)",
theData[1],
theData[2]);
return;
}
BaseString::snprintf(m_text, m_text_len,
"Start phase %u completed %s",
theData[1],
type);
}
void getTextCM_REGCONF(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"CM_REGCONF president = %u, own Node = %u, our dynamic id = %u/%u",
theData[2],
theData[1],
(theData[3] >> 16), (theData[3] & 0xFFFF));
}
void getTextCM_REGREF(QQQQ) {
const char* line = "";
switch (theData[3]) {
case 0:
line = "Busy";
break;
case 1:
line = "Election with wait = false";
break;
case 2:
line = "Election with wait = false";
break;
case 3:
line = "Not president";
break;
case 4:
line = "Election without selecting new candidate";
break;
default:
line = "No such cause";
break;
}//switch
BaseString::snprintf(m_text, m_text_len,
"CM_REGREF from Node %u to our Node %u. Cause = %s",
theData[2],
theData[1],
line);
}
void getTextFIND_NEIGHBOURS(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"We are Node %u with dynamic ID %u, our left neighbour "
"is Node %u, our right is Node %u",
theData[1],
theData[4],
theData[2],
theData[3]);
}
void getTextNodeFailCompleted(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node failure phase completed.
//-----------------------------------------------------------------------
if (theData[1] == 0)
{
if (theData[3] != 0) {
BaseString::snprintf(m_text, m_text_len,
"Node %u completed failure of Node %u",
theData[3],
theData[2]);
} else {
BaseString::snprintf(m_text, m_text_len,
"All nodes completed failure of Node %u",
theData[2]);
}//if
} else {
const char* line = "";
if (theData[1] == DBTC){
line = "DBTC";
}else if (theData[1] == DBDICT){
line = "DBDICT";
}else if (theData[1] == DBDIH){
line = "DBDIH";
}else if (theData[1] == DBLQH){
line = "DBLQH";
}
BaseString::snprintf(m_text, m_text_len,
"Node failure of %u %s completed",
theData[2],
line);
}
}
void getTextNODE_FAILREP(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node %u has failed. The Node state at failure "
"was %u",
theData[1],
theData[2]);
}
void getTextArbitState(QQQQ) {
//-----------------------------------------------------------------------
// REPORT arbitrator found or lost.
//-----------------------------------------------------------------------
{
const ArbitSignalData* sd = (ArbitSignalData*)theData;
char ticketText[ArbitTicket::TextLength + 1];
char errText[ArbitCode::ErrTextLength + 1];
const unsigned code = sd->code & 0xFFFF;
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::ThreadStart:
BaseString::snprintf(m_text, m_text_len,
"President restarts arbitration thread [state=%u]",
state);
break;
case ArbitCode::PrepPart2:
sd->ticket.getText(ticketText, sizeof(ticketText));
BaseString::snprintf(m_text, m_text_len,
"Prepare arbitrator node %u [ticket=%s]",
sd->node, ticketText);
break;
case ArbitCode::PrepAtrun:
sd->ticket.getText(ticketText, sizeof(ticketText));
BaseString::snprintf(m_text, m_text_len,
"Receive arbitrator node %u [ticket=%s]",
sd->node, ticketText);
break;
case ArbitCode::ApiStart:
sd->ticket.getText(ticketText, sizeof(ticketText));
BaseString::snprintf(m_text, m_text_len,
"Started arbitrator node %u [ticket=%s]",
sd->node, ticketText);
break;
case ArbitCode::ApiFail:
BaseString::snprintf(m_text, m_text_len,
"Lost arbitrator node %u - process failure [state=%u]",
sd->node, state);
break;
case ArbitCode::ApiExit:
BaseString::snprintf(m_text, m_text_len,
"Lost arbitrator node %u - process exit [state=%u]",
sd->node, state);
break;
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
BaseString::snprintf(m_text, m_text_len,
"Lost arbitrator node %u - %s [state=%u]",
sd->node, errText, state);
break;
}
}
}
void getTextArbitResult(QQQQ) {
//-----------------------------------------------------------------------
// REPORT arbitration result (the failures may not reach us).
//-----------------------------------------------------------------------
{
const ArbitSignalData* sd = (ArbitSignalData*)theData;
char errText[ArbitCode::ErrTextLength + 1];
const unsigned code = sd->code & 0xFFFF;
const unsigned state = sd->code >> 16;
switch (code) {
case ArbitCode::LoseNodes:
BaseString::snprintf(m_text, m_text_len,
"Arbitration check lost - less than 1/2 nodes left");
break;
case ArbitCode::WinNodes:
BaseString::snprintf(m_text, m_text_len,
"Arbitration check won - all node groups and more than 1/2 nodes left");
break;
case ArbitCode::WinGroups:
BaseString::snprintf(m_text, m_text_len,
"Arbitration check won - node group majority");
break;
case ArbitCode::LoseGroups:
BaseString::snprintf(m_text, m_text_len,
"Arbitration check lost - missing node group");
break;
case ArbitCode::Partitioning:
BaseString::snprintf(m_text, m_text_len,
"Network partitioning - arbitration required");
break;
case ArbitCode::WinChoose:
BaseString::snprintf(m_text, m_text_len,
"Arbitration won - positive reply from node %u",
sd->node);
break;
case ArbitCode::LoseChoose:
BaseString::snprintf(m_text, m_text_len,
"Arbitration lost - negative reply from node %u",
sd->node);
break;
case ArbitCode::LoseNorun:
BaseString::snprintf(m_text, m_text_len,
"Network partitioning - no arbitrator available");
break;
case ArbitCode::LoseNocfg:
BaseString::snprintf(m_text, m_text_len,
"Network partitioning - no arbitrator configured");
break;
case ArbitCode::WinWaitExternal:{
char buf[8*4*2+1];
sd->mask.getText(buf);
BaseString::snprintf(m_text, m_text_len,
"Continuing after wait for external arbitration, "
"nodes: %s", buf);
break;
}
default:
ArbitCode::getErrText(code, errText, sizeof(errText));
BaseString::snprintf(m_text, m_text_len,
"Arbitration failure - %s [state=%u]",
errText, state);
break;
}
}
}
void getTextGlobalCheckpointStarted(QQQQ) {
//-----------------------------------------------------------------------
// This event reports that a global checkpoint has been started and this
// node is the master of this global checkpoint.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Global checkpoint %u started",
theData[1]);
}
void getTextGlobalCheckpointCompleted(QQQQ) {
//-----------------------------------------------------------------------
// This event reports that a global checkpoint has been completed on this
// node and the node is the master of this global checkpoint.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Global checkpoint %u completed",
theData[1]);
}
void getTextLocalCheckpointStarted(QQQQ) {
//-----------------------------------------------------------------------
// This event reports that a local checkpoint has been started and this
// node is the master of this local checkpoint.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Local checkpoint %u started. "
"Keep GCI = %u oldest restorable GCI = %u",
theData[1],
theData[2],
theData[3]);
}
void getTextLocalCheckpointCompleted(QQQQ) {
//-----------------------------------------------------------------------
// This event reports that a local checkpoint has been completed on this
// node and the node is the master of this local checkpoint.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Local checkpoint %u completed",
theData[1]);
}
void getTextTableCreated(QQQQ) {
//-----------------------------------------------------------------------
// This event reports that a table has been created.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Table with ID = %u created",
theData[1]);
}
/* STRANGE */
void getTextLCPStoppedInCalcKeepGci(QQQQ) {
if (theData[1] == 0)
BaseString::snprintf(m_text, m_text_len,
"Local Checkpoint stopped in CALCULATED_KEEP_GCI");
}
void getTextNR_CopyDict(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node Restart completed copy of dictionary information.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Node restart completed copy of dictionary information");
}
void getTextNR_CopyDistr(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node Restart completed copy of distribution information.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Node restart completed copy of distribution information");
}
void getTextNR_CopyFragsStarted(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node Restart is starting to copy the fragments.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Node restart starting to copy the fragments "
"to Node %u",
theData[1]);
}
void getTextNR_CopyFragDone(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Node Restart copied a fragment.
//-----------------------------------------------------------------------
Uint64 rows = theData[4] + (Uint64(theData[5]) << 32);
Uint64 bytes = theData[6] + (Uint64(theData[7]) << 32);
BaseString::snprintf(m_text, m_text_len,
"Table ID = %u, fragment ID = %u have been synced "
"to Node %u rows: %llu bytes: %llu ",
theData[2],
theData[3],
theData[1],
rows, bytes);
}
void getTextNR_CopyFragsCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node restart completed copying the fragments "
"to Node %u",
theData[1]);
}
void getTextLCPFragmentCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Table ID = %u, fragment ID = %u has completed LCP "
"on Node %u maxGciStarted: %d maxGciCompleted: %d",
theData[2],
theData[3],
theData[1],
theData[4],
theData[5]);
}
void getTextTransReportCounters(QQQQ) {
// -------------------------------------------------------------------
// Report information about transaction activity once per 10 seconds.
// -------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"Trans. Count = %u, Commit Count = %u, "
"Read Count = %u, Simple Read Count = %u, "
"Write Count = %u, AttrInfo Count = %u, "
"Concurrent Operations = %u, Abort Count = %u"
" Scans = %u Range scans = %u",
theData[1],
theData[2],
theData[3],
theData[4],
theData[5],
theData[6],
theData[7],
theData[8],
theData[9],
theData[10]);
}
void getTextOperationReportCounters(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Operations=%u",
theData[1]);
}
void getTextUndoLogBlocked(QQQQ) {
//-----------------------------------------------------------------------
// REPORT Undo Logging blocked due to buffer near to overflow.
//-----------------------------------------------------------------------
BaseString::snprintf(m_text, m_text_len,
"ACC Blocked %u and TUP Blocked %u times last second",
theData[1],
theData[2]);
}
void getTextTransporterError(QQQQ) {
struct myTransporterError{
Uint32 errorNum;
char errorString[256];
};
int i = 0;
int lenth = 0;
static const struct myTransporterError TransporterErrorString[]=
{
//TE_NO_ERROR = 0
{TE_NO_ERROR,"No error"},
//TE_ERROR_CLOSING_SOCKET = 0x1
{TE_ERROR_CLOSING_SOCKET,"Error found during closing of socket"},
//TE_ERROR_IN_SELECT_BEFORE_ACCEPT = 0x2
{TE_ERROR_IN_SELECT_BEFORE_ACCEPT,"Error found before accept. The transporter will retry"},
//TE_INVALID_MESSAGE_LENGTH = 0x3 | TE_DO_DISCONNECT
{TE_INVALID_MESSAGE_LENGTH,"Error found in message (invalid message length)"},
//TE_INVALID_CHECKSUM = 0x4 | TE_DO_DISCONNECT
{TE_INVALID_CHECKSUM,"Error found in message (checksum)"},
//TE_COULD_NOT_CREATE_SOCKET = 0x5
{TE_COULD_NOT_CREATE_SOCKET,"Error found while creating socket(can't create socket)"},
//TE_COULD_NOT_BIND_SOCKET = 0x6
{TE_COULD_NOT_BIND_SOCKET,"Error found while binding server socket"},
//TE_LISTEN_FAILED = 0x7
{TE_LISTEN_FAILED,"Error found while listening to server socket"},
//TE_ACCEPT_RETURN_ERROR = 0x8
{TE_ACCEPT_RETURN_ERROR,"Error found during accept(accept return error)"},
//TE_SHM_DISCONNECT = 0xb | TE_DO_DISCONNECT
{TE_SHM_DISCONNECT,"The remote node has disconnected"},
//TE_SHM_IPC_STAT = 0xc | TE_DO_DISCONNECT
{TE_SHM_IPC_STAT,"Unable to check shm segment"},
//TE_SHM_UNABLE_TO_CREATE_SEGMENT = 0xd
{TE_SHM_UNABLE_TO_CREATE_SEGMENT,"Unable to create shm segment"},
//TE_SHM_UNABLE_TO_ATTACH_SEGMENT = 0xe
{TE_SHM_UNABLE_TO_ATTACH_SEGMENT,"Unable to attach shm segment"},
//TE_SHM_UNABLE_TO_REMOVE_SEGMENT = 0xf
{TE_SHM_UNABLE_TO_REMOVE_SEGMENT,"Unable to remove shm segment"},
//TE_TOO_SMALL_SIGID = 0x10
{TE_TOO_SMALL_SIGID,"Sig ID too small"},
//TE_TOO_LARGE_SIGID = 0x11
{TE_TOO_LARGE_SIGID,"Sig ID too large"},
//TE_WAIT_STACK_FULL = 0x12 | TE_DO_DISCONNECT
{TE_WAIT_STACK_FULL,"Wait stack was full"},
//TE_RECEIVE_BUFFER_FULL = 0x13 | TE_DO_DISCONNECT
{TE_RECEIVE_BUFFER_FULL,"Receive buffer was full"},
//TE_SIGNAL_LOST_SEND_BUFFER_FULL = 0x14 | TE_DO_DISCONNECT
{TE_SIGNAL_LOST_SEND_BUFFER_FULL,"Send buffer was full,and trying to force send fails"},
//TE_SIGNAL_LOST = 0x15
{TE_SIGNAL_LOST,"Send failed for unknown reason(signal lost)"},
//TE_SEND_BUFFER_FULL = 0x16
{TE_SEND_BUFFER_FULL,"The send buffer was full, but sleeping for a while solved"},
//TE_SCI_LINK_ERROR = 0x0017
{TE_SCI_LINK_ERROR,"There is no link from this node to the switch"},
//TE_SCI_UNABLE_TO_START_SEQUENCE = 0x18 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_START_SEQUENCE,"Could not start a sequence, because system resources are exumed or no sequence has been created"},
//TE_SCI_UNABLE_TO_REMOVE_SEQUENCE = 0x19 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_REMOVE_SEQUENCE,"Could not remove a sequence"},
//TE_SCI_UNABLE_TO_CREATE_SEQUENCE = 0x1a | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_CREATE_SEQUENCE,"Could not create a sequence, because system resources are exempted. Must reboot"},
//TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR = 0x1b | TE_DO_DISCONNECT
{TE_SCI_UNRECOVERABLE_DATA_TFX_ERROR,"Tried to send data on redundant link but failed"},
//TE_SCI_CANNOT_INIT_LOCALSEGMENT = 0x1c | TE_DO_DISCONNECT
{TE_SCI_CANNOT_INIT_LOCALSEGMENT,"Cannot initialize local segment"},
//TE_SCI_CANNOT_MAP_REMOTESEGMENT = 0x1d | TE_DO_DISCONNEC
{TE_SCI_CANNOT_MAP_REMOTESEGMENT,"Cannot map remote segment"},
//TE_SCI_UNABLE_TO_UNMAP_SEGMENT = 0x1e | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_UNMAP_SEGMENT,"Cannot free the resources used by this segment (step 1)"},
//TE_SCI_UNABLE_TO_REMOVE_SEGMENT = 0x1f | TE_DO_DISCONNEC
{TE_SCI_UNABLE_TO_REMOVE_SEGMENT,"Cannot free the resources used by this segment (step 2)"},
//TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT = 0x20 | TE_DO_DISCONNECT
{TE_SCI_UNABLE_TO_DISCONNECT_SEGMENT,"Cannot disconnect from a remote segment"},
//TE_SHM_IPC_PERMANENT = 0x21
{TE_SHM_IPC_PERMANENT,"Shm ipc Permanent error"},
//TE_SCI_UNABLE_TO_CLOSE_CHANNEL = 0x22
{TE_SCI_UNABLE_TO_CLOSE_CHANNEL,"Unable to close the sci channel and the resources allocated"}
};
lenth = sizeof(TransporterErrorString)/sizeof(struct myTransporterError);
for(i=0; i<lenth; i++)
{
if(theData[2] == (Uint32) TransporterErrorString[i].errorNum)
{
BaseString::snprintf(m_text, m_text_len,
"Transporter to node %d reported error 0x%x: %s",
theData[1],
theData[2],
TransporterErrorString[i].errorString);
break;
}
}
if(i == lenth)
BaseString::snprintf(m_text, m_text_len,
"Transporter to node %d reported error 0x%x: unknown error",
theData[1],
theData[2]);
}
void getTextTransporterWarning(QQQQ) {
getTextTransporterError(m_text, m_text_len, theData, len);
}
void getTextMissedHeartbeat(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node %d missed heartbeat %d",
theData[1],
theData[2]);
}
void getTextDeadDueToHeartbeat(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node %d declared dead due to missed heartbeat",
theData[1]);
}
void getTextJobStatistic(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Mean loop Counter in doJob last 8192 times = %u",
theData[1]);
}
void getTextThreadConfigLoop(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"8192 loops,tot %u usec,exec %u extra:loops = %u,time %u,const %u",
theData[1], theData[3], theData[4], theData[5],
theData[2]);
}
void getTextSendBytesStatistic(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Mean send size to Node = %d last 4096 sends = %u bytes",
theData[1],
theData[2]);
}
void getTextReceiveBytesStatistic(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Mean receive size to Node = %d last 4096 sends = %u bytes",
theData[1],
theData[2]);
}
void getTextSentHeartbeat(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node Sent Heartbeat to node = %d",
theData[1]);
}
void getTextCreateLogBytes(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Log part %u, log file %u, MB %u",
theData[1],
theData[2],
theData[3]);
}
void getTextStartLog(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Log part %u, start MB %u, stop MB %u, last GCI, log exec %u",
theData[1],
theData[2],
theData[3],
theData[4]);
}
void getTextStartREDOLog(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Node: %d StartLog: [GCI Keep: %d LastCompleted: %d NewestRestorable: %d]",
theData[1],
theData[2],
theData[3],
theData[4]);
}
void getTextRedoStatus(QQQQ) {
Uint64 total = (Uint64(theData[6]) << 32) + theData[7];
Uint64 free = (Uint64(theData[8]) << 32) + theData[9];
BaseString::snprintf(m_text, m_text_len,
"Logpart: %u head=[ file: %u mbyte: %u ] tail=[ file: %u mbyte: %u ] total mb: %llu free mb: %llu free%%: %u",
theData[1],
theData[2],
theData[3],
theData[4],
theData[5],
total,
free,
Uint32((100 * free) / total));
}
void getTextUNDORecordsExecuted(QQQQ) {
const char* line = "";
if (theData[1] == DBTUP){
line = "DBTUP";
}else if (theData[1] == DBACC){
line = "DBACC";
}
BaseString::snprintf(m_text, m_text_len,
" UNDO %s %d [%d %d %d %d %d %d %d %d %d]",
line,
theData[2],
theData[3],
theData[4],
theData[5],
theData[6],
theData[7],
theData[8],
theData[9],
theData[10],
theData[11]);
}
void getTextInfoEvent(QQQQ) {
BaseString::snprintf(m_text, m_text_len, "%s", (char *)&theData[1]);
}
const char bytes_unit[]= "B";
const char kbytes_unit[]= "KB";
const char mbytes_unit[]= "MB";
static void convert_unit(unsigned &data, const char *&unit)
{
if (data < 16*1024)
{
unit= bytes_unit;
return;
}
if (data < 16*1024*1024)
{
data= (data+1023)/1024;
unit= kbytes_unit;
return;
}
data= (data+1024*1024-1)/(1024*1024);
unit= mbytes_unit;
}
void getTextEventBufferStatus(QQQQ) {
unsigned used= theData[1], alloc= theData[2], max_= theData[3];
const char *used_unit, *alloc_unit, *max_unit;
convert_unit(used, used_unit);
convert_unit(alloc, alloc_unit);
convert_unit(max_, max_unit);
BaseString::snprintf(m_text, m_text_len,
"Event buffer status: used=%d%s(%d%%) alloc=%d%s(%d%%) "
"max=%d%s apply_epoch=%u/%u latest_epoch=%u/%u",
used, used_unit,
theData[2] ? (Uint32)((((Uint64)theData[1])*100)/theData[2]) : 0,
alloc, alloc_unit,
theData[3] ? (Uint32)((((Uint64)theData[2])*100)/theData[3]) : 0,
max_, max_unit,
theData[5], theData[4],
theData[7], theData[6]);
}
void getTextWarningEvent(QQQQ) {
BaseString::snprintf(m_text, m_text_len, "%s", (char *)&theData[1]);
}
void getTextGCP_TakeoverStarted(QQQQ) {
BaseString::snprintf(m_text, m_text_len, "GCP Take over started");
}
void getTextGCP_TakeoverCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len, "GCP Take over completed");
}
void getTextLCP_TakeoverStarted(QQQQ) {
BaseString::snprintf(m_text, m_text_len, "LCP Take over started");
}
void getTextLCP_TakeoverCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"LCP Take over completed (state = %d)",
theData[1]);
}
void getTextMemoryUsage(QQQQ) {
const int gth = theData[1];
const int size = theData[2];
const int used = theData[3];
const int total = theData[4];
const int block = theData[5];
const int percent = total ? (used*100)/total : 0;
BaseString::snprintf(m_text, m_text_len,
"%s usage %s %d%s(%d %dK pages of total %d)",
(block==DBACC ? "Index" : (block == DBTUP ?"Data":"<unknown>")),
(gth == 0 ? "is" : (gth > 0 ? "increased to" : "decreased to")),
percent, "%",
used, size/1024, total
);
}
void getTextBackupStarted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Backup %u started from node %d",
theData[2], refToNode(theData[1]));
}
void getTextBackupFailedToStart(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Backup request from %d failed to start. Error: %d",
refToNode(theData[1]), theData[2]);
}
void getTextBackupCompleted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Backup %u started from node %u completed."
" StartGCP: %u StopGCP: %u"
" #Records: %u #LogRecords: %u"
" Data: %u bytes Log: %u bytes",
theData[2], refToNode(theData[1]),
theData[3], theData[4], theData[6], theData[8],
theData[5], theData[7]);
}
void getTextBackupStatus(QQQQ) {
if (theData[1])
BaseString::snprintf(m_text, m_text_len,
"Local backup status: backup %u started from node %u\n"
" #Records: %llu #LogRecords: %llu\n"
" Data: %llu bytes Log: %llu bytes",
theData[2], refToNode(theData[1]),
make_uint64(theData[5], theData[6]),
make_uint64(theData[9], theData[10]),
make_uint64(theData[3], theData[4]),
make_uint64(theData[7], theData[8]));
else
BaseString::snprintf(m_text, m_text_len,
"Backup not started");
}
void getTextBackupAborted(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Backup %u started from %d has been aborted. Error: %d",
theData[2],
refToNode(theData[1]),
theData[3]);
}
void getTextRestoreStarted(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restore started: backup %u from node %u",
theData[1], theData[2]);
}
void getTextRestoreMetaData(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restore meta data: backup %u from node %u "
"#Tables: %u\n"
" #Tablespaces: %u #Logfilegroups: %u "
"#datafiles: %u #undofiles: %u",
theData[1], theData[2], theData[3],
theData[4], theData[5], theData[6], theData[7]);
}
void getTextRestoreData(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restore data: backup %u from node %u "
"#Records: %llu Data: %llu bytes",
theData[1], theData[2],
make_uint64(theData[3], theData[4]),
make_uint64(theData[5], theData[6]));
}
void getTextRestoreLog(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restore log: backup %u from node %u "
"#Records: %llu Data: %llu bytes",
theData[1], theData[2],
make_uint64(theData[3], theData[4]),
make_uint64(theData[5], theData[6]));
}
void getTextRestoreCompleted(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restore completed: backup %u from node %u",
theData[1], theData[2]);
}
void getTextLogFileInitStatus(QQQQ) {
if (theData[2])
BaseString::snprintf(m_text, m_text_len,
"Local redo log file initialization status:\n"
"#Total files: %u, Completed: %u\n"
"#Total MBytes: %u, Completed: %u",
// refToNode(theData[1]),
theData[2], theData[3],
theData[4], theData[5]);
else
BaseString::snprintf(m_text, m_text_len,
"Node %u: Log file initializtion completed",
refToNode(theData[1]));
}
void getTextLogFileInitCompStatus(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Local redo log file initialization completed:\n"
"#Total files: %u, Completed: %u\n"
"#Total MBytes: %u, Completed: %u",
// refToNode(theData[1]),
theData[2], theData[3],
theData[4], theData[5]);
}
void getTextSingleUser(QQQQ) {
switch (theData[1])
{
case 0:
BaseString::snprintf(m_text, m_text_len, "Entering single user mode");
break;
case 1:
BaseString::snprintf(m_text, m_text_len,
"Entered single user mode "
"Node %d has exclusive access", theData[2]);
break;
case 2:
BaseString::snprintf(m_text, m_text_len,"Exiting single user mode");
break;
default:
BaseString::snprintf(m_text, m_text_len,
"Unknown single user report %d", theData[1]);
break;
}
}
void getTextStartReport(QQQQ) {
Uint32 time = theData[2];
Uint32 sz = theData[3];
BaseString
bstr0 = BaseString::getPrettyText(sz, theData + 4 + (0 * sz)),
bstr1 = BaseString::getPrettyText(sz, theData + 4 + (1 * sz)),
bstr2 = BaseString::getPrettyText(sz, theData + 4 + (2 * sz)),
bstr3 = BaseString::getPrettyText(sz, theData + 4 + (3 * sz)),
bstr4 = BaseString::getPrettyText(sz, theData + 4 + (4 * sz));
if (len < 4 + 5 * sz)
{
bstr4.assign("<unknown>");
}
switch(theData[1]){
case 1: // Wait initial
BaseString::snprintf
(m_text, m_text_len,
"Initial start, waiting for %s to connect, "
" nodes [ all: %s connected: %s no-wait: %s ]",
bstr3.c_str(), bstr0.c_str(), bstr1.c_str(), bstr2.c_str());
break;
case 2: // Wait partial
BaseString::snprintf
(m_text, m_text_len,
"Waiting until nodes: %s connects, "
"nodes [ all: %s connected: %s no-wait: %s ]",
bstr3.c_str(), bstr0.c_str(), bstr1.c_str(), bstr2.c_str());
break;
case 3: // Wait partial timeout
BaseString::snprintf
(m_text, m_text_len,
"Waiting %u sec for nodes %s to connect, "
"nodes [ all: %s connected: %s no-wait: %s ]",
time, bstr3.c_str(), bstr0.c_str(), bstr1.c_str(), bstr2.c_str());
break;
case 4: // Wait partioned
BaseString::snprintf
(m_text, m_text_len,
"Waiting for non partitioned start, "
"nodes [ all: %s connected: %s missing: %s no-wait: %s ]",
bstr0.c_str(), bstr1.c_str(), bstr3.c_str(), bstr2.c_str());
break;
case 5:
BaseString::snprintf
(m_text, m_text_len,
"Waiting %u sec for non partitioned start, "
"nodes [ all: %s connected: %s missing: %s no-wait: %s ]",
time, bstr0.c_str(), bstr1.c_str(), bstr3.c_str(), bstr2.c_str());
break;
case 6:
BaseString::snprintf
(m_text, m_text_len,
"Initial start, waiting %u for %s to connect, "
"nodes [ all: %s connected: %s missing: %s no-wait: %s no-nodegroup: %s ]",
time, bstr4.c_str(),
bstr0.c_str(), bstr1.c_str(), bstr3.c_str(), bstr2.c_str(),
bstr4.c_str());
break;
case 7: // Wait no-nodes/partial timeout
BaseString::snprintf
(m_text, m_text_len,
"Waiting %u sec for nodes %s to connect, "
"nodes [ all: %s connected: %s no-wait: %s no-nodegroup: %s ]",
time, bstr3.c_str(), bstr0.c_str(), bstr1.c_str(), bstr2.c_str(),
bstr4.c_str());
break;
case 0x8000: // Do initial
BaseString::snprintf
(m_text, m_text_len,
"Initial start with nodes %s [ missing: %s no-wait: %s ]",
bstr1.c_str(), bstr3.c_str(), bstr2.c_str());
break;
case 0x8001: // Do start
BaseString::snprintf
(m_text, m_text_len,
"Start with all nodes %s",
bstr1.c_str());
break;
case 0x8002: // Do partial
BaseString::snprintf
(m_text, m_text_len,
"Start with nodes %s [ missing: %s no-wait: %s ]",
bstr1.c_str(), bstr3.c_str(), bstr2.c_str());
break;
case 0x8003: // Do partioned
BaseString::snprintf
(m_text, m_text_len,
"Start potentially partitioned with nodes %s "
" [ missing: %s no-wait: %s ]",
bstr1.c_str(), bstr3.c_str(), bstr2.c_str());
break;
default:
BaseString::snprintf
(m_text, m_text_len,
"Unknown startreport: 0x%x [ %s %s %s %s ]",
theData[1],
bstr0.c_str(), bstr1.c_str(), bstr2.c_str(), bstr3.c_str());
}
}
void getTextMTSignalStatistics(QQQQ) {
BaseString::snprintf(m_text, m_text_len,
"Signals delivered from thread %u: "
"prio A %u (%u bytes) prio B %u (%u bytes)",
theData[1],
theData[2], theData[3], theData[4], theData[5]);
}
void getTextSubscriptionStatus(QQQQ)
{
switch(theData[1]) {
case(1): // SubscriptionStatus::DISCONNECTED
BaseString::snprintf(m_text, m_text_len,
"Disconnecting node %u because it has "
"exceeded MaxBufferedEpochs (%u > %u), epoch %u/%u",
theData[2],
theData[5],
theData[6],
theData[4], theData[3]);
break;
case(2): // SubscriptionStatus::INCONSISTENT
BaseString::snprintf(m_text, m_text_len,
"Nodefailure while out of event buffer: "
"informing subscribers of possibly missing event data"
", epoch %u/%u",
theData[4], theData[3]);
break;
}
}
void
getTextStartReadLCP(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Start reading LCP for table %u fragment: %u",
theData[1],
theData[2]);
}
void
getTextReadLCPComplete(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"Restored LCP for table %u fragment: %u rows: %llu",
theData[1],
theData[2],
(Uint64(theData[3]) << 32) + Uint64(theData[4]));
}
void
getTextRunRedo(QQQQ)
{
const ndb_logevent_RunRedo * ev = (const ndb_logevent_RunRedo*)(theData+1);
if (ev->currgci == ev->startgci)
{
BaseString::snprintf(m_text, m_text_len,
"Log part: %u phase: %u run redo from "
" gci: %u (file: %u mb: %u) to "
" gci: %u (file: %u mb: %u)",
ev->logpart,
ev->phase,
ev->startgci,
ev->startfile,
ev->startmb,
ev->stopgci,
ev->stopfile,
ev->stopmb);
}
else if (ev->currgci == ev->stopgci)
{
BaseString::snprintf(m_text, m_text_len,
"Log part: %u phase: %u found stop "
" gci: %u (file: %u mb: %u)",
ev->logpart,
ev->phase,
ev->currgci,
ev->currfile,
ev->currmb);
}
else
{
BaseString::snprintf(m_text, m_text_len,
"Log part: %u phase: %u at "
" gci: %u (file: %u mb: %u)",
ev->logpart,
ev->phase,
ev->currgci,
ev->currfile,
ev->currmb);
}
}
void
getTextRebuildIndex(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"instace: %u rebuild index: %u",
theData[1],
theData[2]);
}
const
char*
getObjectTypeName(Uint32 type)
{
return "object";
}
void
getTextCreateSchemaObject(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"create %s id: %u version: %u (from %u)",
getObjectTypeName(theData[3]),
theData[1],
theData[2],
theData[4]);
}
void
getTextAlterSchemaObject(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"alter %s id: %u version: %u (from %u)",
getObjectTypeName(theData[3]),
theData[1],
theData[2],
theData[4]);
}
void
getTextDropSchemaObject(QQQQ)
{
BaseString::snprintf(m_text, m_text_len,
"drop %s id: %u version: %u (from %u)",
getObjectTypeName(theData[3]),
theData[1],
theData[2],
theData[4]);
}
void getTextSavedEvent(QQQQ)
{
abort();
}
void getTextConnectCheckStarted(QQQQ)
{
/* EventReport format :
* 1 : other_node_count
* 2 : reason (FailRep causes or 0)
* 3 : causing_node (if from FailRep)
* 4 : bitmask wordsize
* 5 : bitmask[2]
*/
Uint32 other_node_count = theData[1];
Uint32 reason = theData[2];
Uint32 causing_node = theData[3];
Uint32 bitmaskSz = theData[4];
char otherNodeMask[100];
char suspectNodeMask[100];
BitmaskImpl::getText(bitmaskSz, theData + 5 + (0 * bitmaskSz), otherNodeMask);
BitmaskImpl::getText(bitmaskSz, theData + 5 + (1 * bitmaskSz), suspectNodeMask);
Uint32 suspectCount = BitmaskImpl::count(bitmaskSz, theData + 5 + (1 * bitmaskSz));
if (reason)
{
/* Connect check started for specific reason */
const char * reasonText = NULL;
switch (reason)
{
case FailRep::ZHEARTBEAT_FAILURE:
reasonText = "Heartbeat failure";
break;
case FailRep::ZCONNECT_CHECK_FAILURE:
reasonText = "Connectivity check request";
break;
default:
reasonText = "UNKNOWN";
break;
}
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check of %u other nodes (%s) started due to %s from node %u.",
other_node_count,
otherNodeMask,
reasonText,
causing_node);
}
else
{
/* Connect check restarted due to suspect nodes */
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check of %u nodes (%s) restarting due to %u suspect nodes (%s).",
other_node_count,
otherNodeMask,
suspectCount,
suspectNodeMask);
}
}
void getTextConnectCheckCompleted(QQQQ)
{
/* EventReport format
* 1 : Nodes checked
* 2 : Suspect nodes
* 3 : Failed nodes
*/
Uint32 nodes_checked = theData[1];
Uint32 suspect_nodes = theData[2];
Uint32 failed_nodes = theData[3];
if ((failed_nodes + suspect_nodes) == 0)
{
/* All connectivity ok */
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check completed on %u nodes, connectivity ok",
nodes_checked);
}
else
{
if (failed_nodes > 0)
{
if (suspect_nodes > 0)
{
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check completed on %u nodes. %u nodes failed. "
"%u nodes still suspect, repeating check.",
nodes_checked,
failed_nodes,
suspect_nodes);
}
else
{
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check completed on %u nodes. %u nodes failed. "
"Connectivity now OK",
nodes_checked,
failed_nodes);
}
}
else
{
/* Just suspect nodes */
BaseString::snprintf(m_text, m_text_len,
"Connectivity Check completed on %u nodes. %u nodes still suspect, "
"repeating check.",
nodes_checked,
suspect_nodes);
}
}
}
void getTextNodeFailRejected(QQQQ)
{
Uint32 reason = theData[1];
Uint32 failed_node = theData[2];
Uint32 source_node = theData[3];
const char* reasonText = "Unknown";
switch (reason)
{
case FailRep::ZCONNECT_CHECK_FAILURE:
reasonText = "Connect Check Failure";
break;
case FailRep::ZLINK_FAILURE:
reasonText = "Link Failure";
break;
}
BaseString::snprintf(m_text, m_text_len,
"Received FAIL_REP (%s (%u)) for node %u sourced by suspect node %u. "
"Rejecting as failure of node %u.",
reasonText,
reason,
failed_node,
source_node,
source_node);
}
#if 0
BaseString::snprintf(m_text,
m_text_len,
"Unknown event: %d",
theData[0]);
#endif
/**
* This matrix defines which event should be printed when
*
* threshold - is in range [0-15]
* severity - DEBUG to ALERT (Type of log message)
*/
#define ROW(a,b,c,d) \
{ NDB_LE_ ## a, b, c, d, getText ## a}
const EventLoggerBase::EventRepLogLevelMatrix EventLoggerBase::matrix[] = {
// CONNECTION
ROW(Connected, LogLevel::llConnection, 8, Logger::LL_INFO ),
ROW(Disconnected, LogLevel::llConnection, 8, Logger::LL_ALERT ),
ROW(CommunicationClosed, LogLevel::llConnection, 8, Logger::LL_INFO ),
ROW(CommunicationOpened, LogLevel::llConnection, 8, Logger::LL_INFO ),
ROW(ConnectedApiVersion, LogLevel::llConnection, 8, Logger::LL_INFO ),
// CHECKPOINT
ROW(GlobalCheckpointStarted, LogLevel::llCheckpoint, 9, Logger::LL_INFO ),
ROW(GlobalCheckpointCompleted,LogLevel::llCheckpoint,10, Logger::LL_INFO ),
ROW(LocalCheckpointStarted, LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
ROW(LocalCheckpointCompleted,LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
ROW(LCPStoppedInCalcKeepGci, LogLevel::llCheckpoint, 0, Logger::LL_ALERT ),
ROW(LCPFragmentCompleted, LogLevel::llCheckpoint, 11, Logger::LL_INFO ),
ROW(UndoLogBlocked, LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
ROW(RedoStatus, LogLevel::llCheckpoint, 7, Logger::LL_INFO ),
// STARTUP
ROW(NDBStartStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(NDBStartCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(STTORRYRecieved, LogLevel::llStartUp, 15, Logger::LL_INFO ),
ROW(StartPhaseCompleted, LogLevel::llStartUp, 4, Logger::LL_INFO ),
ROW(CM_REGCONF, LogLevel::llStartUp, 3, Logger::LL_INFO ),
ROW(CM_REGREF, LogLevel::llStartUp, 8, Logger::LL_INFO ),
ROW(FIND_NEIGHBOURS, LogLevel::llStartUp, 8, Logger::LL_INFO ),
ROW(NDBStopStarted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(NDBStopCompleted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(NDBStopForced, LogLevel::llStartUp, 1, Logger::LL_ALERT ),
ROW(NDBStopAborted, LogLevel::llStartUp, 1, Logger::LL_INFO ),
ROW(StartREDOLog, LogLevel::llStartUp, 4, Logger::LL_INFO ),
ROW(StartLog, LogLevel::llStartUp, 10, Logger::LL_INFO ),
ROW(UNDORecordsExecuted, LogLevel::llStartUp, 15, Logger::LL_INFO ),
ROW(StartReport, LogLevel::llStartUp, 4, Logger::LL_INFO ),
ROW(LogFileInitStatus, LogLevel::llStartUp, 7, Logger::LL_INFO),
ROW(LogFileInitCompStatus, LogLevel::llStartUp, 7, Logger::LL_INFO),
ROW(StartReadLCP, LogLevel::llStartUp, 10, Logger::LL_INFO),
ROW(ReadLCPComplete, LogLevel::llStartUp, 10, Logger::LL_INFO),
ROW(RunRedo, LogLevel::llStartUp, 8, Logger::LL_INFO),
ROW(RebuildIndex, LogLevel::llStartUp, 10, Logger::LL_INFO),
// NODERESTART
ROW(NR_CopyDict, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(NR_CopyDistr, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(NR_CopyFragsStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(NR_CopyFragDone, LogLevel::llNodeRestart,10, Logger::LL_INFO ),
ROW(NR_CopyFragsCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(NodeFailCompleted, LogLevel::llNodeRestart, 8, Logger::LL_ALERT),
ROW(NODE_FAILREP, LogLevel::llNodeRestart, 8, Logger::LL_ALERT),
ROW(ArbitState, LogLevel::llNodeRestart, 6, Logger::LL_INFO ),
ROW(ArbitResult, LogLevel::llNodeRestart, 2, Logger::LL_ALERT),
ROW(GCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(GCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(LCP_TakeoverStarted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(LCP_TakeoverCompleted, LogLevel::llNodeRestart, 7, Logger::LL_INFO ),
ROW(ConnectCheckStarted, LogLevel::llNodeRestart, 6, Logger::LL_INFO ),
ROW(ConnectCheckCompleted, LogLevel::llNodeRestart, 6, Logger::LL_INFO ),
ROW(NodeFailRejected, LogLevel::llNodeRestart, 6, Logger::LL_ALERT ),
// STATISTIC
ROW(TransReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ),
ROW(OperationReportCounters, LogLevel::llStatistic, 8, Logger::LL_INFO ),
ROW(TableCreated, LogLevel::llStatistic, 7, Logger::LL_INFO ),
ROW(JobStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
ROW(ThreadConfigLoop, LogLevel::llStatistic, 9, Logger::LL_INFO ),
ROW(SendBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
ROW(ReceiveBytesStatistic, LogLevel::llStatistic, 9, Logger::LL_INFO ),
ROW(MemoryUsage, LogLevel::llStatistic, 5, Logger::LL_INFO ),
ROW(MTSignalStatistics, LogLevel::llStatistic, 9, Logger::LL_INFO ),
// Schema
ROW(CreateSchemaObject, LogLevel::llSchema, 8, Logger::LL_INFO ),
ROW(AlterSchemaObject, LogLevel::llSchema, 8, Logger::LL_INFO ),
ROW(DropSchemaObject, LogLevel::llSchema, 8, Logger::LL_INFO ),
// ERROR
ROW(TransporterError, LogLevel::llError, 2, Logger::LL_ERROR ),
ROW(TransporterWarning, LogLevel::llError, 8, Logger::LL_WARNING ),
ROW(MissedHeartbeat, LogLevel::llError, 8, Logger::LL_WARNING ),
ROW(DeadDueToHeartbeat, LogLevel::llError, 8, Logger::LL_ALERT ),
ROW(WarningEvent, LogLevel::llError, 2, Logger::LL_WARNING ),
ROW(SubscriptionStatus, LogLevel::llError, 4, Logger::LL_WARNING ),
// INFO
ROW(SentHeartbeat, LogLevel::llInfo, 12, Logger::LL_INFO ),
ROW(CreateLogBytes, LogLevel::llInfo, 11, Logger::LL_INFO ),
ROW(InfoEvent, LogLevel::llInfo, 2, Logger::LL_INFO ),
ROW(EventBufferStatus, LogLevel::llInfo, 7, Logger::LL_INFO ),
//Single User
ROW(SingleUser, LogLevel::llInfo, 7, Logger::LL_INFO ),
// Backup
ROW(BackupStarted, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(BackupStatus, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(BackupCompleted, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(BackupFailedToStart, LogLevel::llBackup, 7, Logger::LL_ALERT),
ROW(BackupAborted, LogLevel::llBackup, 7, Logger::LL_ALERT),
ROW(RestoreStarted, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(RestoreMetaData, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(RestoreData, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(RestoreLog, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(RestoreCompleted, LogLevel::llBackup, 7, Logger::LL_INFO ),
ROW(SavedEvent, LogLevel::llInfo, 7, Logger::LL_INFO)
};
const Uint32 EventLoggerBase::matrixSize=
sizeof(EventLoggerBase::matrix)/sizeof(EventRepLogLevelMatrix);
EventLogger::EventLogger()
{
setCategory("EventLogger");
enable(Logger::LL_INFO, Logger::LL_ALERT);
}
EventLogger::~EventLogger()
{
}
void
EventLogger::close()
{
removeAllHandlers();
}
#ifdef NOT_USED
static NdbOut&
operator<<(NdbOut& out, const LogLevel & ll)
{
out << "[LogLevel: ";
for(size_t i = 0; i<LogLevel::LOGLEVEL_CATEGORIES; i++)
out << ll.getLogLevel((LogLevel::EventCategory)i) << " ";
out << "]";
return out;
}
#endif
int
EventLoggerBase::event_lookup(int eventType,
LogLevel::EventCategory &cat,
Uint32 &threshold,
Logger::LoggerLevel &severity,
EventTextFunction &textF)
{
for(unsigned i = 0; i<EventLoggerBase::matrixSize; i++){
if(EventLoggerBase::matrix[i].eventType == eventType){
cat = EventLoggerBase::matrix[i].eventCategory;
threshold = EventLoggerBase::matrix[i].threshold;
severity = EventLoggerBase::matrix[i].severity;
textF= EventLoggerBase::matrix[i].textF;
return 0;
}
}
return 1;
}
const char*
EventLogger::getText(char * dst, size_t dst_len,
EventTextFunction textF,
const Uint32* theData, Uint32 len, NodeId nodeId )
{
int pos= 0;
if (nodeId != 0)
{
BaseString::snprintf(dst, dst_len, "Node %u: ", nodeId);
pos= strlen(dst);
}
if (dst_len-pos > 0)
textF(dst+pos, dst_len-pos, theData, len);
return dst;
}
void
EventLogger::log(int eventType, const Uint32* theData, Uint32 len,
NodeId nodeId, const LogLevel* ll)
{
Uint32 threshold = 0;
Logger::LoggerLevel severity = Logger::LL_WARNING;
LogLevel::EventCategory cat= LogLevel::llInvalid;
EventTextFunction textF;
char log_text[MAX_TEXT_LENGTH];
DBUG_ENTER("EventLogger::log");
DBUG_PRINT("enter",("eventType=%d, nodeid=%d", eventType, nodeId));
if (EventLoggerBase::event_lookup(eventType,cat,threshold,severity,textF))
DBUG_VOID_RETURN;
Uint32 set = ll?ll->getLogLevel(cat) : m_logLevel.getLogLevel(cat);
DBUG_PRINT("info",("threshold=%d, set=%d", threshold, set));
if (ll)
DBUG_PRINT("info",("m_logLevel.getLogLevel=%d", m_logLevel.getLogLevel(cat)));
if (threshold <= set){
getText(log_text, sizeof(log_text), textF, theData, len, nodeId);
switch (severity){
case Logger::LL_ALERT:
alert("%s", log_text);
break;
case Logger::LL_CRITICAL:
critical("%s", log_text);
break;
case Logger::LL_WARNING:
warning("%s", log_text);
break;
case Logger::LL_ERROR:
error("%s", log_text);
break;
case Logger::LL_INFO:
info("%s", log_text);
break;
case Logger::LL_DEBUG:
debug("%s", log_text);
break;
default:
info("%s", log_text);
break;
}
} // if (..
DBUG_VOID_RETURN;
}
EventLogger*
create_event_logger()
{
return new EventLogger();
}
void
destroy_event_logger(class EventLogger ** g_eventLogger)
{
delete *g_eventLogger;
*g_eventLogger = 0;
}
| gpl-2.0 |
MoKee/android_kernel_zte_x9180 | drivers/staging/prima/CORE/HDD/src/wlan_hdd_wext.c | 87 | 321130 | /*
* Copyright (c) 2012-2014 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/** ------------------------------------------------------------------------ *
------------------------------------------------------------------------ *
\file wlan_hdd_wext.c
\brief Airgo Linux Wireless Extensions Common Control Plane Types and
interfaces.
$Id: wlan_hdd_wext.c,v 1.34 2007/04/14 01:49:23 jimz Exp jimz $
This file defines all of the types that are utilized by the CCP module
of the "Portable" HDD. This file also includes the underlying Linux
Wireless Extensions Data types referred to by CCP.
======================================================================== */
#include <linux/version.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/wireless.h>
#include <linux/ratelimit.h>
#include <macTrace.h>
#include <wlan_hdd_includes.h>
#include <wlan_btc_svc.h>
#include <wlan_nlink_common.h>
#ifdef WLAN_BTAMP_FEATURE
#include <bap_hdd_main.h>
#endif
#include <vos_api.h>
#include <net/arp.h>
#include "ccmApi.h"
#include "sirParams.h"
#include "csrApi.h"
#include "csrInsideApi.h"
#if defined WLAN_FEATURE_VOWIFI
#include "smeRrmInternal.h"
#endif
#include <aniGlobal.h>
#include "dot11f.h"
#include <wlan_hdd_wowl.h>
#include <wlan_hdd_cfg.h>
#include <wlan_hdd_wmm.h>
#include "utilsApi.h"
#include "wlan_hdd_p2p.h"
#ifdef FEATURE_WLAN_TDLS
#include "wlan_hdd_tdls.h"
#endif
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
#include "wlan_hdd_power.h"
#include "qwlan_version.h"
#include "wlan_hdd_host_offload.h"
#include "wlan_hdd_keep_alive.h"
#ifdef WLAN_FEATURE_PACKET_FILTERING
#include "wlan_hdd_packet_filtering.h"
#endif
#include <linux/wireless.h>
#include <net/cfg80211.h>
#include "wlan_qct_pal_trace.h"
#include "wlan_qct_tl.h"
#include "wlan_hdd_misc.h"
#include "bap_hdd_misc.h"
#include "wlan_hdd_dev_pwr.h"
#include "qc_sap_ioctl.h"
#include "sme_Api.h"
#include "vos_trace.h"
#include "wlan_hdd_assoc.h"
#ifdef DEBUG_ROAM_DELAY
#include "vos_utils.h"
#endif
#include "sapInternal.h"
#ifdef CONFIG_HAS_EARLYSUSPEND
extern void hdd_suspend_wlan(struct early_suspend *wlan_suspend);
extern void hdd_resume_wlan(struct early_suspend *wlan_suspend);
#endif
#ifdef FEATURE_OEM_DATA_SUPPORT
#define MAX_OEM_DATA_RSP_LEN 2047
#endif
#define HDD_FINISH_ULA_TIME_OUT 800
#define COUNTRY_CODE_LEN 2
// tdlsoffchan
#ifdef FEATURE_WLAN_TDLS
static int tdlsOffCh = 1;
static int tdlsOffChBwOffset = 0;
#endif
static int ioctl_debug;
module_param(ioctl_debug, int, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
/* To Validate Channel against the Frequency and Vice-Versa */
static const hdd_freq_chan_map_t freq_chan_map[] = { {2412, 1}, {2417, 2},
{2422, 3}, {2427, 4}, {2432, 5}, {2437, 6}, {2442, 7}, {2447, 8},
{2452, 9}, {2457, 10}, {2462, 11}, {2467 ,12}, {2472, 13},
{2484, 14}, {4920, 240}, {4940, 244}, {4960, 248}, {4980, 252},
{5040, 208}, {5060, 212}, {5080, 216}, {5180, 36}, {5200, 40}, {5220, 44},
{5240, 48}, {5260, 52}, {5280, 56}, {5300, 60}, {5320, 64}, {5500, 100},
{5520, 104}, {5540, 108}, {5560, 112}, {5580, 116}, {5600, 120},
{5620, 124}, {5640, 128}, {5660, 132}, {5680, 136}, {5700, 140},
{5720, 144}, {5745, 149}, {5765, 153}, {5785, 157}, {5805, 161},
{5825, 165} };
#define FREQ_CHAN_MAP_TABLE_SIZE (sizeof(freq_chan_map)/sizeof(freq_chan_map[0]))
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_INT_GET_NONE (SIOCIWFIRSTPRIV + 0)
#define WE_SET_11D_STATE 1
#define WE_WOWL 2
#define WE_SET_POWER 3
#define WE_SET_MAX_ASSOC 4
#define WE_SET_SAP_AUTO_CHANNEL_SELECTION 5
#define WE_SET_DATA_INACTIVITY_TO 6
#define WE_SET_MAX_TX_POWER 7
#define WE_SET_HIGHER_DTIM_TRANSITION 8
#define WE_SET_TM_LEVEL 9
#define WE_ENABLE_STRICT_FCC_REG 10
#define WE_SET_MAX_TX_POWER_2_4 11
#define WE_SET_MAX_TX_POWER_5_0 12
/* Private IOCTL for debug connection issues */
#define WE_SET_DEBUG_LOG 13
// tdlsoffchan
#ifdef FEATURE_WLAN_TDLS
#define WE_SET_TDLS_OFF_CHAN 14
#define WE_SET_TDLS_SEC_OFF_CHAN_OFFSET 15
#define WE_SET_TDLS_OFF_CHAN_MODE 16
#endif
#define WE_SET_SCAN_BAND_PREFERENCE 17
#define WE_SET_MIRACAST_VENDOR_CONFIG 18
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_NONE_GET_INT (SIOCIWFIRSTPRIV + 1)
#define WE_GET_11D_STATE 1
#define WE_IBSS_STATUS 2
#define WE_PMC_STATE 3
#define WE_GET_WLAN_DBG 4
#define WE_GET_MAX_ASSOC 6
#define WE_GET_WDI_DBG 7
#define WE_GET_SAP_AUTO_CHANNEL_SELECTION 8
#define WE_GET_CONCURRENCY_MODE 9
#define WE_GET_SCAN_BAND_PREFERENCE 10
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_INT_GET_INT (SIOCIWFIRSTPRIV + 2)
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_CHAR_GET_NONE (SIOCIWFIRSTPRIV + 3)
#define WE_WOWL_ADD_PTRN 1
#define WE_WOWL_DEL_PTRN 2
#if defined WLAN_FEATURE_VOWIFI
#define WE_NEIGHBOR_REPORT_REQUEST 3
#endif
#define WE_SET_AP_WPS_IE 4 //This is called in station mode to set probe rsp ie.
#define WE_SET_CONFIG 5
#define WE_SET_ENCRYPT_MSG 6
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_THREE_INT_GET_NONE (SIOCIWFIRSTPRIV + 4)
#define WE_SET_WLAN_DBG 1
#define WE_SET_WDI_DBG 2
#define WE_SET_SAP_CHANNELS 3
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_GET_CHAR_SET_NONE (SIOCIWFIRSTPRIV + 5)
#define WE_WLAN_VERSION 1
#define WE_GET_STATS 2
#define WE_GET_CFG 3
#define WE_GET_WMM_STATUS 4
#define WE_GET_CHANNEL_LIST 5
#ifdef WLAN_FEATURE_11AC
#define WE_GET_RSSI 6
#endif
#define WE_GET_ROAM_RSSI 7
#ifdef FEATURE_WLAN_TDLS
#define WE_GET_TDLS_PEERS 8
#endif
#ifdef WLAN_FEATURE_11W
#define WE_GET_11W_INFO 9
#endif
#define WE_GET_STATES 10
#define WE_GET_SNR 11
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_NONE_GET_NONE (SIOCIWFIRSTPRIV + 6)
#define WE_CLEAR_STATS 1
#define WE_INIT_AP 2
#define WE_STOP_AP 3
#ifdef WLAN_BTAMP_FEATURE
#define WE_ENABLE_AMP 4
#define WE_DISABLE_AMP 5
#endif /* WLAN_BTAMP_FEATURE */
#define WE_ENABLE_DXE_STALL_DETECT 6
#define WE_DISPLAY_DXE_SNAP_SHOT 7
#define WE_SET_REASSOC_TRIGGER 8
#define WE_DISPLAY_DATAPATH_SNAP_SHOT 9
#define WE_STOP_OBSS_SCAN 11
#ifdef DEBUG_ROAM_DELAY
#define WE_DUMP_ROAM_TIMER_LOG 12
#define WE_RESET_ROAM_TIMER_LOG 13
#endif
/* Private ioctls and their sub-ioctls */
#define WLAN_PRIV_SET_VAR_INT_GET_NONE (SIOCIWFIRSTPRIV + 7)
#define WE_LOG_DUMP_CMD 1
#define WE_P2P_NOA_CMD 2
//IOCTL to configure MCC params
#define WE_MCC_CONFIG_CREDENTIAL 3
#define WE_MCC_CONFIG_PARAMS 4
#ifdef FEATURE_WLAN_TDLS
#define WE_TDLS_CONFIG_PARAMS 5
#endif
#define WE_MTRACE_DUMP_CMD 8
#define WE_MTRACE_SELECTIVE_MODULE_LOG_ENABLE_CMD 9
#ifdef FEATURE_WLAN_TDLS
#undef MAX_VAR_ARGS
#define MAX_VAR_ARGS 10
#else
#define MAX_VAR_ARGS 7
#endif
/* Private ioctls (with no sub-ioctls) */
/* note that they must be odd so that they have "get" semantics */
#define WLAN_PRIV_ADD_TSPEC (SIOCIWFIRSTPRIV + 9)
#define WLAN_PRIV_DEL_TSPEC (SIOCIWFIRSTPRIV + 11)
#define WLAN_PRIV_GET_TSPEC (SIOCIWFIRSTPRIV + 13)
/* (SIOCIWFIRSTPRIV + 8) is currently unused */
/* (SIOCIWFIRSTPRIV + 16) is currently unused */
/* (SIOCIWFIRSTPRIV + 10) is currently unused */
/* (SIOCIWFIRSTPRIV + 12) is currently unused */
/* (SIOCIWFIRSTPRIV + 14) is currently unused */
/* (SIOCIWFIRSTPRIV + 15) is currently unused */
#ifdef FEATURE_OEM_DATA_SUPPORT
/* Private ioctls for setting the measurement configuration */
#define WLAN_PRIV_SET_OEM_DATA_REQ (SIOCIWFIRSTPRIV + 17)
#define WLAN_PRIV_GET_OEM_DATA_RSP (SIOCIWFIRSTPRIV + 19)
#endif
#ifdef WLAN_FEATURE_VOWIFI_11R
#define WLAN_PRIV_SET_FTIES (SIOCIWFIRSTPRIV + 20)
#endif
/* Private ioctl for setting the host offload feature */
#define WLAN_PRIV_SET_HOST_OFFLOAD (SIOCIWFIRSTPRIV + 18)
/* Private ioctl to get the statistics */
#define WLAN_GET_WLAN_STATISTICS (SIOCIWFIRSTPRIV + 21)
/* Private ioctl to set the Keep Alive Params */
#define WLAN_SET_KEEPALIVE_PARAMS (SIOCIWFIRSTPRIV + 22)
#ifdef WLAN_FEATURE_PACKET_FILTERING
/* Private ioctl to set the Packet Filtering Params */
#define WLAN_SET_PACKET_FILTER_PARAMS (SIOCIWFIRSTPRIV + 23)
#endif
#ifdef FEATURE_WLAN_SCAN_PNO
/* Private ioctl to get the statistics */
#define WLAN_SET_PNO (SIOCIWFIRSTPRIV + 24)
#endif
#define WLAN_SET_BAND_CONFIG (SIOCIWFIRSTPRIV + 25) /*Don't change this number*/
#define WLAN_PRIV_SET_MCBC_FILTER (SIOCIWFIRSTPRIV + 26)
#define WLAN_PRIV_CLEAR_MCBC_FILTER (SIOCIWFIRSTPRIV + 27)
/* Private ioctl to trigger reassociation */
#define WLAN_SET_POWER_PARAMS (SIOCIWFIRSTPRIV + 29)
#define WLAN_GET_LINK_SPEED (SIOCIWFIRSTPRIV + 31)
#define WLAN_STATS_INVALID 0
#define WLAN_STATS_RETRY_CNT 1
#define WLAN_STATS_MUL_RETRY_CNT 2
#define WLAN_STATS_TX_FRM_CNT 3
#define WLAN_STATS_RX_FRM_CNT 4
#define WLAN_STATS_FRM_DUP_CNT 5
#define WLAN_STATS_FAIL_CNT 6
#define WLAN_STATS_RTS_FAIL_CNT 7
#define WLAN_STATS_ACK_FAIL_CNT 8
#define WLAN_STATS_RTS_SUC_CNT 9
#define WLAN_STATS_RX_DISCARD_CNT 10
#define WLAN_STATS_RX_ERROR_CNT 11
#define WLAN_STATS_TX_BYTE_CNT 12
#define WLAN_STATS_RX_BYTE_CNT 13
#define WLAN_STATS_RX_RATE 14
#define WLAN_STATS_TX_RATE 15
#define WLAN_STATS_RX_UC_BYTE_CNT 16
#define WLAN_STATS_RX_MC_BYTE_CNT 17
#define WLAN_STATS_RX_BC_BYTE_CNT 18
#define WLAN_STATS_TX_UC_BYTE_CNT 19
#define WLAN_STATS_TX_MC_BYTE_CNT 20
#define WLAN_STATS_TX_BC_BYTE_CNT 21
#define FILL_TLV(__p, __type, __size, __val, __tlen) do { \
if ((__tlen + __size + 2) < WE_MAX_STR_LEN) \
{ \
*__p++ = __type; \
*__p++ = __size; \
memcpy(__p, __val, __size); \
__p += __size; \
__tlen += __size + 2; \
} \
else \
{ \
hddLog(VOS_TRACE_LEVEL_ERROR, "FILL_TLV Failed!!!"); \
} \
} while(0);
#define VERSION_VALUE_MAX_LEN 32
#define TX_PER_TRACKING_DEFAULT_RATIO 5
#define TX_PER_TRACKING_MAX_RATIO 10
#define TX_PER_TRACKING_DEFAULT_WATERMARK 5
#define WLAN_ADAPTER 0
#define P2P_ADAPTER 1
#define HDD_IOCTL_RATELIMIT_INTERVAL 20*HZ
#define HDD_IOCTL_RATELIMIT_BURST 1
static DEFINE_RATELIMIT_STATE(hdd_ioctl_timeout_rs, \
HDD_IOCTL_RATELIMIT_INTERVAL, \
HDD_IOCTL_RATELIMIT_BURST);
/*
* When supplicant sends SETBAND ioctl it queries for channels from
* cfg80211 layer by sending itself EVENT_CHANNEL_LIST_CHANGED command.
* This is not required if the return type from ioctl is
* DO_NOT_SEND_CHANNEL_CHANGE_EVENT as wiphy will send channel change
* event as part of regulatory_hint.
*/
enum {
SEND_CHANNEL_CHANGE_EVENT = 0,
DO_NOT_SEND_CHANNEL_CHANGE_EVENT,
};
/*MCC Configuration parameters */
enum {
MCC_SCHEDULE_TIME_SLICE_CFG_PARAM = 1,
MCC_MAX_NULL_SEND_TIME_CFG_PARAM,
MCC_TX_EARLY_STOP_TIME_CFG_PARAM,
MCC_RX_DRAIN_TIME_CFG_PARAM,
MCC_CHANNEL_SWITCH_TIME_CFG_PARAM,
MCC_MIN_CHANNEL_TIME_CFG_PARAM,
MCC_PARK_BEFORE_TBTT_CFG_PARAM,
MCC_MIN_AFTER_DTIM_CFG_PARAM,
MCC_TOO_CLOSE_MARGIN_CFG_PARAM,
};
int hdd_validate_mcc_config(hdd_adapter_t *pAdapter, v_UINT_t staId,
v_UINT_t arg1, v_UINT_t arg2, v_UINT_t arg3);
#ifdef WLAN_FEATURE_PACKET_FILTERING
int wlan_hdd_set_filter(hdd_context_t *pHddCtx, tpPacketFilterCfg pRequest,
v_U8_t sessionId);
#endif
/**---------------------------------------------------------------------------
\brief mem_alloc_copy_from_user_helper -
Helper function to allocate buffer and copy user data.
\param - wrqu - Pointer to IOCTL Data.
len - size
\return - On Success pointer to buffer, On failure NULL
--------------------------------------------------------------------------*/
void *mem_alloc_copy_from_user_helper(const void *wrqu_data, size_t len)
{
u8 *ptr = NULL;
/* in order to protect the code, an extra byte is post appended to the buffer
* and the null termination is added. However, when allocating (len+1) byte
* of memory, we need to make sure that there is no uint overflow when doing
* addition. In theory check len < UINT_MAX protects the uint overflow. For
* wlan private ioctl, the buffer size is much less than UINT_MAX, as a good
* guess, now, it is assumed that the private command buffer size is no
* greater than 4K (4096 bytes). So we use 4096 as the upper boundary for now.
*/
if (len > MAX_USER_COMMAND_SIZE)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Invalid length");
return NULL;
}
ptr = kmalloc(len + 1, GFP_KERNEL);
if (NULL == ptr)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"unable to allocate memory");
return NULL;
}
if (copy_from_user(ptr, wrqu_data, len))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: failed to copy data to user buffer", __func__);
kfree(ptr);
return NULL;
}
ptr[len] = '\0';
return ptr;
}
// Function to handle and get compatible struct iw_point passed to ioctl.
int hdd_priv_get_data(struct iw_point *p_priv_data,
union iwreq_data *wrqu)
{
if ((NULL == p_priv_data) || (NULL == wrqu))
{
return -EINVAL;
}
#ifdef CONFIG_COMPAT
if (is_compat_task())
{
struct compat_iw_point *p_compat_priv_data;
// Compat task: typecast to campat structure and copy the members.
p_compat_priv_data = (struct compat_iw_point *) &wrqu->data;
p_priv_data->pointer = compat_ptr(p_compat_priv_data->pointer);
p_priv_data->length = p_compat_priv_data->length;
p_priv_data->flags = p_compat_priv_data->flags;
}//if(is_compat_task())
else
{
#endif //#ifdef CONFIG_COMPAT
// Non compat task: directly copy the structure.
memcpy(p_priv_data, &wrqu->data, sizeof(struct iw_point));
#ifdef CONFIG_COMPAT
}//else of - if(is_compat_task())
#endif //#ifdef CONFIG_COMPAT
return 0;
}
/**---------------------------------------------------------------------------
\brief hdd_wlan_get_version() -
This function use to get Wlan Driver, Firmware, & Hardware Version.
\param - pAdapter Pointer to the adapter.
wrqu - Pointer to IOCTL REQUEST Data.
extra - Pointer to char
\return - none
--------------------------------------------------------------------------*/
void hdd_wlan_get_version(hdd_adapter_t *pAdapter, union iwreq_data *wrqu,
char *extra)
{
VOS_STATUS status;
tSirVersionString wcnss_SW_version;
tSirVersionString wcnss_HW_version;
char *pSWversion;
char *pHWversion;
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
status = sme_GetWcnssSoftwareVersion(hHal, wcnss_SW_version,
sizeof(wcnss_SW_version));
if (VOS_IS_STATUS_SUCCESS(status))
{
pSWversion = wcnss_SW_version;
}
else
{
pSWversion = "Unknown";
}
status = sme_GetWcnssHardwareVersion(hHal, wcnss_HW_version,
sizeof(wcnss_HW_version));
if (VOS_IS_STATUS_SUCCESS(status))
{
pHWversion = wcnss_HW_version;
}
else
{
pHWversion = "Unknown";
}
wrqu->data.length = scnprintf(extra, WE_MAX_STR_LEN,
"Host SW:%s, FW:%s, HW:%s",
QWLAN_VERSIONSTR,
pSWversion,
pHWversion);
return;
}
int hdd_wlan_get_rts_threshold(hdd_adapter_t *pAdapter, union iwreq_data *wrqu)
{
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
v_U32_t threshold = 0,status = 0;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
"%s:LOGP in Progress. Ignore!!!",__func__);
return status;
}
if ( eHAL_STATUS_SUCCESS !=
ccmCfgGetInt(hHal, WNI_CFG_RTS_THRESHOLD, &threshold) )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_RTS_THRESHOLD"));
return -EIO;
}
wrqu->rts.value = threshold;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
("Rts-Threshold=%d!!"), wrqu->rts.value);
EXIT();
return 0;
}
int hdd_wlan_get_frag_threshold(hdd_adapter_t *pAdapter, union iwreq_data *wrqu)
{
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
v_U32_t threshold = 0,status = 0;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s:LOGP in Progress. Ignore!!!",__func__);
return status;
}
if ( ccmCfgGetInt(hHal, WNI_CFG_FRAGMENTATION_THRESHOLD, &threshold)
!= eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_FRAGMENTATION_THRESHOLD"));
return -EIO;
}
wrqu->frag.value = threshold;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
("Frag-Threshold=%d!!"), wrqu->frag.value);
EXIT();
return 0;
}
int hdd_wlan_get_freq(v_U32_t channel, v_U32_t *pfreq)
{
int i;
if (channel > 0)
{
for (i=0; i < FREQ_CHAN_MAP_TABLE_SIZE; i++)
{
if (channel == freq_chan_map[i].chan)
{
*pfreq = freq_chan_map[i].freq;
return 1;
}
}
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
("Invalid channel no=%d!!"), channel);
return -EINVAL;
}
static v_BOOL_t
hdd_IsAuthTypeRSN( tHalHandle halHandle, eCsrAuthType authType)
{
v_BOOL_t rsnType = VOS_FALSE;
// is the authType supported?
switch (authType)
{
case eCSR_AUTH_TYPE_NONE: //never used
rsnType = eANI_BOOLEAN_FALSE;
break;
// MAC layer authentication types
case eCSR_AUTH_TYPE_OPEN_SYSTEM:
rsnType = eANI_BOOLEAN_FALSE;
break;
case eCSR_AUTH_TYPE_SHARED_KEY:
rsnType = eANI_BOOLEAN_FALSE;
break;
case eCSR_AUTH_TYPE_AUTOSWITCH:
rsnType = eANI_BOOLEAN_FALSE;
break;
// Upper layer authentication types
case eCSR_AUTH_TYPE_WPA:
rsnType = eANI_BOOLEAN_TRUE;
break;
case eCSR_AUTH_TYPE_WPA_PSK:
rsnType = eANI_BOOLEAN_TRUE;
break;
case eCSR_AUTH_TYPE_WPA_NONE:
rsnType = eANI_BOOLEAN_TRUE;
break;
#ifdef WLAN_FEATURE_VOWIFI_11R
case eCSR_AUTH_TYPE_FT_RSN:
#endif
case eCSR_AUTH_TYPE_RSN:
rsnType = eANI_BOOLEAN_TRUE;
break;
#ifdef WLAN_FEATURE_VOWIFI_11R
case eCSR_AUTH_TYPE_FT_RSN_PSK:
#endif
case eCSR_AUTH_TYPE_RSN_PSK:
#ifdef WLAN_FEATURE_11W
case eCSR_AUTH_TYPE_RSN_PSK_SHA256:
case eCSR_AUTH_TYPE_RSN_8021X_SHA256:
#endif
rsnType = eANI_BOOLEAN_TRUE;
break;
//case eCSR_AUTH_TYPE_FAILED:
case eCSR_AUTH_TYPE_UNKNOWN:
rsnType = eANI_BOOLEAN_FALSE;
break;
default:
hddLog(LOGE, FL("%s called with unknown authType - default to Open, None"),
__func__);
rsnType = eANI_BOOLEAN_FALSE;
break;
}
hddLog(LOGE, FL("%s called with authType: %d, returned: %d"),
__func__, authType, rsnType);
return rsnType;
}
static void hdd_GetRssiCB( v_S7_t rssi, tANI_U32 staId, void *pContext )
{
struct statsContext *pStatsContext;
hdd_adapter_t *pAdapter;
if (ioctl_debug)
{
pr_info("%s: rssi [%d] STA [%d] pContext [%p]\n",
__func__, (int)rssi, (int)staId, pContext);
}
if (NULL == pContext)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pContext [%p]",
__func__, pContext);
return;
}
pStatsContext = pContext;
pAdapter = pStatsContext->pAdapter;
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
if ((NULL == pAdapter) || (RSSI_CONTEXT_MAGIC != pStatsContext->magic))
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter [%p] magic [%08x]",
__func__, pAdapter, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, pAdapter [%p] magic [%08x]\n",
__func__, pAdapter, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* copy over the rssi */
pAdapter->rssi = rssi;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
static void hdd_GetSnrCB(tANI_S8 snr, tANI_U32 staId, void *pContext)
{
struct statsContext *pStatsContext;
hdd_adapter_t *pAdapter;
if (ioctl_debug)
{
pr_info("%s: snr [%d] STA [%d] pContext [%p]\n",
__func__, (int)snr, (int)staId, pContext);
}
if (NULL == pContext)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pContext [%p]",
__func__, pContext);
return;
}
pStatsContext = pContext;
pAdapter = pStatsContext->pAdapter;
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
if ((NULL == pAdapter) || (SNR_CONTEXT_MAGIC != pStatsContext->magic))
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter [%p] magic [%08x]",
__func__, pAdapter, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, pAdapter [%p] magic [%08x]\n",
__func__, pAdapter, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* copy over the snr */
pAdapter->snr = snr;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
VOS_STATUS wlan_hdd_get_rssi(hdd_adapter_t *pAdapter, v_S7_t *rssi_value)
{
struct statsContext context;
hdd_context_t *pHddCtx;
hdd_station_ctx_t *pHddStaCtx;
eHalStatus hstatus;
long lrc;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter", __func__);
return VOS_STATUS_E_FAULT;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s:LOGP in Progress. Ignore!!!",__func__);
/* return a cached value */
*rssi_value = pAdapter->rssi;
return VOS_STATUS_SUCCESS;
}
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = RSSI_CONTEXT_MAGIC;
hstatus = sme_GetRssi(pHddCtx->hHal, hdd_GetRssiCB,
pHddStaCtx->conn_info.staId[ 0 ],
pHddStaCtx->conn_info.bssId,
&context, pHddCtx->pvosContext);
if (eHAL_STATUS_SUCCESS != hstatus)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: Unable to retrieve RSSI",
__func__);
/* we'll returned a cached value below */
}
else
{
/* request was sent -- wait for the response */
lrc = wait_for_completion_interruptible_timeout(&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: SME %s while retrieving RSSI",
__func__, (0 == lrc) ? "timeout" : "interrupt");
/* we'll now returned a cached value below */
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
*rssi_value = pAdapter->rssi;
return VOS_STATUS_SUCCESS;
}
VOS_STATUS wlan_hdd_get_snr(hdd_adapter_t *pAdapter, v_S7_t *snr)
{
struct statsContext context;
hdd_context_t *pHddCtx;
hdd_station_ctx_t *pHddStaCtx;
eHalStatus hstatus;
long lrc;
int valid;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Invalid context, pAdapter", __func__);
return VOS_STATUS_E_FAULT;
}
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
valid = wlan_hdd_validate_context(pHddCtx);
if (0 != valid)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("HDD context is not valid"));
return VOS_STATUS_E_FAULT;
}
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
if (NULL == pHddStaCtx)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("HDD STA context is not valid"));
return VOS_STATUS_E_FAULT;
}
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = SNR_CONTEXT_MAGIC;
hstatus = sme_GetSnr(pHddCtx->hHal, hdd_GetSnrCB,
pHddStaCtx->conn_info.staId[ 0 ],
pHddStaCtx->conn_info.bssId,
&context);
if (eHAL_STATUS_SUCCESS != hstatus)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: Unable to retrieve RSSI",
__func__);
/* we'll returned a cached value below */
}
else
{
/* request was sent -- wait for the response */
lrc = wait_for_completion_interruptible_timeout(&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: SME %s while retrieving SNR",
__func__, (0 == lrc) ? "timeout" : "interrupt");
/* we'll now returned a cached value below */
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
*snr = pAdapter->snr;
return VOS_STATUS_SUCCESS;
}
#if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_ESE || defined(FEATURE_WLAN_LFR)
static void hdd_GetRoamRssiCB( v_S7_t rssi, tANI_U32 staId, void *pContext )
{
struct statsContext *pStatsContext;
hdd_adapter_t *pAdapter;
if (ioctl_debug)
{
pr_info("%s: rssi [%d] STA [%d] pContext [%p]\n",
__func__, (int)rssi, (int)staId, pContext);
}
if (NULL == pContext)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pContext [%p]",
__func__, pContext);
return;
}
pStatsContext = pContext;
pAdapter = pStatsContext->pAdapter;
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
if ((NULL == pAdapter) || (RSSI_CONTEXT_MAGIC != pStatsContext->magic))
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter [%p] magic [%08x]",
__func__, pAdapter, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, pAdapter [%p] magic [%08x]\n",
__func__, pAdapter, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* copy over the rssi */
pAdapter->rssi = rssi;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
VOS_STATUS wlan_hdd_get_roam_rssi(hdd_adapter_t *pAdapter, v_S7_t *rssi_value)
{
struct statsContext context;
hdd_context_t *pHddCtx = NULL;
hdd_station_ctx_t *pHddStaCtx = NULL;
eHalStatus hstatus;
long lrc;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter", __func__);
return VOS_STATUS_E_FAULT;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s:LOGP in Progress. Ignore!!!",__func__);
/* return a cached value */
*rssi_value = pAdapter->rssi;
return VOS_STATUS_SUCCESS;
}
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s:Not associated!",__func__);
/* return a cached value */
*rssi_value = 0;
return VOS_STATUS_SUCCESS;
}
if (VOS_TRUE == pHddStaCtx->hdd_ReassocScenario)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Roaming in progress, hence return last cached RSSI", __func__);
*rssi_value = pAdapter->rssi;
return VOS_STATUS_SUCCESS;
}
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = RSSI_CONTEXT_MAGIC;
hstatus = sme_GetRoamRssi(pHddCtx->hHal, hdd_GetRoamRssiCB,
pHddStaCtx->conn_info.staId[ 0 ],
pHddStaCtx->conn_info.bssId,
&context, pHddCtx->pvosContext);
if (eHAL_STATUS_SUCCESS != hstatus)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: Unable to retrieve RSSI",
__func__);
/* we'll returned a cached value below */
}
else
{
/* request was sent -- wait for the response */
lrc = wait_for_completion_interruptible_timeout(&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: SME %s while retrieving RSSI",
__func__, (0 == lrc) ? "timeout" : "interrupt");
/* we'll now returned a cached value below */
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
*rssi_value = pAdapter->rssi;
return VOS_STATUS_SUCCESS;
}
#endif
void hdd_StatisticsCB( void *pStats, void *pContext )
{
hdd_adapter_t *pAdapter = (hdd_adapter_t *)pContext;
hdd_stats_t *pStatsCache = NULL;
hdd_wext_state_t *pWextState;
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
tCsrSummaryStatsInfo *pSummaryStats = NULL;
tCsrGlobalClassAStatsInfo *pClassAStats = NULL;
tCsrGlobalClassBStatsInfo *pClassBStats = NULL;
tCsrGlobalClassCStatsInfo *pClassCStats = NULL;
tCsrGlobalClassDStatsInfo *pClassDStats = NULL;
tCsrPerStaStatsInfo *pPerStaStats = NULL;
if (pAdapter!= NULL)
pStatsCache = &pAdapter->hdd_stats;
pSummaryStats = (tCsrSummaryStatsInfo *)pStats;
pClassAStats = (tCsrGlobalClassAStatsInfo *)( pSummaryStats + 1 );
pClassBStats = (tCsrGlobalClassBStatsInfo *)( pClassAStats + 1 );
pClassCStats = (tCsrGlobalClassCStatsInfo *)( pClassBStats + 1 );
pClassDStats = (tCsrGlobalClassDStatsInfo *)( pClassCStats + 1 );
pPerStaStats = (tCsrPerStaStatsInfo *)( pClassDStats + 1 );
if (pStatsCache!=NULL)
{
// and copy the stats into the cache we keep in the adapter instance structure
vos_mem_copy( &pStatsCache->summary_stat, pSummaryStats, sizeof( pStatsCache->summary_stat ) );
vos_mem_copy( &pStatsCache->ClassA_stat, pClassAStats, sizeof( pStatsCache->ClassA_stat ) );
vos_mem_copy( &pStatsCache->ClassB_stat, pClassBStats, sizeof( pStatsCache->ClassB_stat ) );
vos_mem_copy( &pStatsCache->ClassC_stat, pClassCStats, sizeof( pStatsCache->ClassC_stat ) );
vos_mem_copy( &pStatsCache->ClassD_stat, pClassDStats, sizeof( pStatsCache->ClassD_stat ) );
vos_mem_copy( &pStatsCache->perStaStats, pPerStaStats, sizeof( pStatsCache->perStaStats ) );
}
if(pAdapter)
{
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
if(pWextState)
{
vos_status = vos_event_set(&pWextState->vosevent);
if (!VOS_IS_STATUS_SUCCESS(vos_status))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: vos_event_set failed", __func__);
return;
}
}
}
}
void ccmCfgSetCallback(tHalHandle halHandle, tANI_S32 result)
{
v_CONTEXT_t pVosContext;
hdd_context_t *pHddCtx;
VOS_STATUS hdd_reconnect_all_adapters( hdd_context_t *pHddCtx );
#if 0
hdd_wext_state_t *pWextState;
v_U32_t roamId;
#endif
ENTER();
pVosContext = vos_get_global_context(VOS_MODULE_ID_SYS,NULL);
pHddCtx = (hdd_context_t*) vos_get_context(VOS_MODULE_ID_HDD,pVosContext);
if (NULL == pHddCtx)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Invalid pHddCtx", __func__);
return;
}
#if 0
pWextState = pAdapter->pWextState;
#endif
if (WNI_CFG_NEED_RESTART == result || WNI_CFG_NEED_RELOAD == result)
{
//TODO Verify is this is really used. If yes need to fix it.
hdd_reconnect_all_adapters( pHddCtx );
#if 0
pAdapter->conn_info.connState = eConnectionState_NotConnected;
INIT_COMPLETION(pAdapter->disconnect_comp_var);
vosStatus = sme_RoamDisconnect(halHandle, pAdapter->sessionId, eCSR_DISCONNECT_REASON_UNSPECIFIED);
if(VOS_STATUS_SUCCESS == vosStatus)
wait_for_completion_interruptible_timeout(&pAdapter->disconnect_comp_var,
msecs_to_jiffies(WLAN_WAIT_TIME_DISCONNECT));
sme_RoamConnect(halHandle,
pAdapter->sessionId, &(pWextState->roamProfile),
&roamId);
#endif
}
EXIT();
}
void hdd_clearRoamProfileIe( hdd_adapter_t *pAdapter)
{
hdd_wext_state_t *pWextState= WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
/* clear WPA/RSN/WSC IE information in the profile */
pWextState->roamProfile.nWPAReqIELength = 0;
pWextState->roamProfile.pWPAReqIE = (tANI_U8 *)NULL;
pWextState->roamProfile.nRSNReqIELength = 0;
pWextState->roamProfile.pRSNReqIE = (tANI_U8 *)NULL;
#ifdef FEATURE_WLAN_WAPI
pWextState->roamProfile.nWAPIReqIELength = 0;
pWextState->roamProfile.pWAPIReqIE = (tANI_U8 *)NULL;
#endif
pWextState->roamProfile.bWPSAssociation = VOS_FALSE;
pWextState->roamProfile.bOSENAssociation = VOS_FALSE;
pWextState->roamProfile.nAddIEScanLength = 0;
memset(pWextState->roamProfile.addIEScan, 0 , SIR_MAC_MAX_IE_LENGTH+2);
pWextState->roamProfile.pAddIEAssoc = (tANI_U8 *)NULL;
pWextState->roamProfile.nAddIEAssocLength = 0;
pWextState->roamProfile.EncryptionType.numEntries = 1;
pWextState->roamProfile.EncryptionType.encryptionType[0]
= eCSR_ENCRYPT_TYPE_NONE;
pWextState->roamProfile.mcEncryptionType.numEntries = 1;
pWextState->roamProfile.mcEncryptionType.encryptionType[0]
= eCSR_ENCRYPT_TYPE_NONE;
pWextState->roamProfile.AuthType.numEntries = 1;
pWextState->roamProfile.AuthType.authType[0] = eCSR_AUTH_TYPE_OPEN_SYSTEM;
#ifdef WLAN_FEATURE_11W
pWextState->roamProfile.MFPEnabled = eANI_BOOLEAN_FALSE;
pWextState->roamProfile.MFPRequired = 0;
pWextState->roamProfile.MFPCapable = 0;
#endif
pWextState->authKeyMgmt = 0;
vos_mem_zero(&pWextState->roamProfile.Keys,
sizeof(pWextState->roamProfile.Keys));
#ifdef FEATURE_WLAN_WAPI
pAdapter->wapi_info.wapiAuthMode = WAPI_AUTH_MODE_OPEN;
pAdapter->wapi_info.nWapiMode = 0;
#endif
vos_mem_zero((void *)(pWextState->req_bssId), WNI_CFG_BSSID_LEN);
}
void wlan_hdd_ula_done_cb(v_VOID_t *callbackContext)
{
hdd_adapter_t *pAdapter = (hdd_adapter_t*)callbackContext;
if (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid pAdapter magic", __func__);
}
else
{
complete(&pAdapter->ula_complete);
}
}
VOS_STATUS wlan_hdd_check_ula_done(hdd_adapter_t *pAdapter)
{
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
VOS_STATUS vos_status;
unsigned long rc;
if (VOS_FALSE == pHddStaCtx->conn_info.uIsAuthenticated)
{
INIT_COMPLETION(pAdapter->ula_complete);
/*To avoid race condition between the set key and the last EAPOL
packet, notify TL to finish upper layer authentication incase if the
last EAPOL packet pending in the TL queue.*/
vos_status = WLANTL_Finish_ULA(wlan_hdd_ula_done_cb, pAdapter);
if ( vos_status != VOS_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"[%4d] WLANTL_Finish_ULA returned ERROR status= %d",
__LINE__, vos_status );
return vos_status;
}
rc = wait_for_completion_timeout(&pAdapter->ula_complete,
msecs_to_jiffies(HDD_FINISH_ULA_TIME_OUT));
if (rc <= 0)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failure wait on ULA to complete %ld"), rc);
/* we'll still fall through and return success since the
* connection may still get established but is just taking
* too long for us to wait */
}
}
return VOS_STATUS_SUCCESS;
}
v_U8_t* wlan_hdd_get_vendor_oui_ie_ptr(v_U8_t *oui, v_U8_t oui_size, v_U8_t *ie, int ie_len)
{
int left = ie_len;
v_U8_t *ptr = ie;
v_U8_t elem_id,elem_len;
v_U8_t eid = 0xDD;
if ( NULL == ie || 0 == ie_len )
return NULL;
while(left >= 2)
{
elem_id = ptr[0];
elem_len = ptr[1];
left -= 2;
if(elem_len > left)
{
hddLog(VOS_TRACE_LEVEL_FATAL,
FL("****Invalid IEs eid = %d elem_len=%d left=%d*****"),
eid,elem_len,left);
return NULL;
}
if (elem_id == eid)
{
if(memcmp( &ptr[2], oui, oui_size)==0)
return ptr;
}
left -= elem_len;
ptr += (elem_len + 2);
}
return NULL;
}
static int iw_set_commit(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hddLog( LOG1, "In %s", __func__);
/* Do nothing for now */
return 0;
}
static int iw_get_name(struct net_device *dev,
struct iw_request_info *info,
char *wrqu, char *extra)
{
ENTER();
strlcpy(wrqu, "Qcom:802.11n", IFNAMSIZ);
EXIT();
return 0;
}
static int __iw_set_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tCsrRoamProfile *pRoamProfile;
eCsrRoamBssType LastBSSType;
eMib_dot11DesiredBssType connectedBssType;
hdd_config_t *pConfig;
struct wireless_dev *wdev;
ENTER();
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter", __func__);
return 0;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return 0;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
if (pWextState == NULL)
{
hddLog(LOGE, "%s ERROR: Data Storage Corruption", __func__);
return -EINVAL;
}
wdev = dev->ieee80211_ptr;
pRoamProfile = &pWextState->roamProfile;
LastBSSType = pRoamProfile->BSSType;
hddLog(LOG1, "%s Old Bss type = %d", __func__, LastBSSType);
switch (wrqu->mode)
{
case IW_MODE_ADHOC:
hddLog(LOG1, "%s Setting AP Mode as IW_MODE_ADHOC", __func__);
pRoamProfile->BSSType = eCSR_BSS_TYPE_START_IBSS;
// Set the phymode correctly for IBSS.
pConfig = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini;
pWextState->roamProfile.phyMode = hdd_cfg_xlate_to_csr_phy_mode(pConfig->dot11Mode);
pAdapter->device_mode = WLAN_HDD_IBSS;
wdev->iftype = NL80211_IFTYPE_ADHOC;
break;
case IW_MODE_INFRA:
hddLog(LOG1, "%s Setting AP Mode as IW_MODE_INFRA", __func__);
pRoamProfile->BSSType = eCSR_BSS_TYPE_INFRASTRUCTURE;
wdev->iftype = NL80211_IFTYPE_STATION;
break;
case IW_MODE_AUTO:
hddLog(LOG1, "%s Setting AP Mode as IW_MODE_AUTO", __func__);
pRoamProfile->BSSType = eCSR_BSS_TYPE_ANY;
break;
default:
hddLog(LOGE, "%s Unknown AP Mode value %d ", __func__, wrqu->mode);
return -EOPNOTSUPP;
}
if ( LastBSSType != pRoamProfile->BSSType )
{
//the BSS mode changed
// We need to issue disconnect if connected or in IBSS disconnect state
if ( hdd_connGetConnectedBssType( WLAN_HDD_GET_STATION_CTX_PTR(pAdapter), &connectedBssType ) ||
( eCSR_BSS_TYPE_START_IBSS == LastBSSType ) )
{
VOS_STATUS vosStatus;
// need to issue a disconnect to CSR.
INIT_COMPLETION(pAdapter->disconnect_comp_var);
vosStatus = sme_RoamDisconnect( WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId,
eCSR_DISCONNECT_REASON_IBSS_LEAVE );
if(VOS_STATUS_SUCCESS == vosStatus)
{
long ret;
ret = wait_for_completion_interruptible_timeout(
&pAdapter->disconnect_comp_var,
msecs_to_jiffies(WLAN_WAIT_TIME_DISCONNECT));
if (ret <= 0)
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("failed wait on disconnect_comp_var %ld"), ret);
}
}
}
EXIT();
return 0;
}
static int iw_set_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_mode(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hddLog(LOG1, "In %s", __func__);
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter", __func__);
return 0;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return 0;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
if (pWextState == NULL)
{
hddLog(LOGE, "%s ERROR: Data Storage Corruption", __func__);
return -EINVAL;
}
switch (pWextState->roamProfile.BSSType)
{
case eCSR_BSS_TYPE_INFRASTRUCTURE:
hddLog(LOG1, "%s returns IW_MODE_INFRA", __func__);
wrqu->mode = IW_MODE_INFRA;
break;
case eCSR_BSS_TYPE_IBSS:
case eCSR_BSS_TYPE_START_IBSS:
hddLog(LOG1, "%s returns IW_MODE_ADHOC", __func__);
wrqu->mode = IW_MODE_ADHOC;
break;
case eCSR_BSS_TYPE_ANY:
hddLog(LOG1, "%s returns IW_MODE_AUTO", __func__);
wrqu->mode = IW_MODE_AUTO;
break;
default:
hddLog(LOG1, "%s returns APMODE_UNKNOWN", __func__);
break;
}
return 0;
}
static int iw_get_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_mode(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
v_U32_t numChans = 0;
v_U8_t validChan[WNI_CFG_VALID_CHANNEL_LIST_LEN];
v_U32_t indx = 0;
v_U32_t status = 0;
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
tCsrRoamProfile * pRoamProfile;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return status;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
pRoamProfile = &pWextState->roamProfile;
hddLog(LOG1,"setCHANNEL ioctl");
/* Link is up then return cant set channel*/
if(eConnectionState_IbssConnected == pHddStaCtx->conn_info.connState ||
eConnectionState_Associated == pHddStaCtx->conn_info.connState)
{
hddLog( LOGE, "IBSS Associated");
return -EOPNOTSUPP;
}
/* Settings by Frequency as input */
if((wrqu->freq.e == 1) && (wrqu->freq.m >= (tANI_U32)2.412e8) &&
(wrqu->freq.m <= (tANI_U32)5.825e8))
{
tANI_U32 freq = wrqu->freq.m / 100000;
while ((indx < FREQ_CHAN_MAP_TABLE_SIZE) && (freq != freq_chan_map[indx].freq))
indx++;
if (indx >= FREQ_CHAN_MAP_TABLE_SIZE)
{
return -EINVAL;
}
wrqu->freq.e = 0;
wrqu->freq.m = freq_chan_map[indx].chan;
}
if (wrqu->freq.e == 0)
{
if((wrqu->freq.m < WNI_CFG_CURRENT_CHANNEL_STAMIN) ||
(wrqu->freq.m > WNI_CFG_CURRENT_CHANNEL_STAMAX))
{
hddLog(LOG1,"%s: Channel [%d] is outside valid range from %d to %d",
__func__, wrqu->freq.m, WNI_CFG_CURRENT_CHANNEL_STAMIN,
WNI_CFG_CURRENT_CHANNEL_STAMAX);
return -EINVAL;
}
numChans = WNI_CFG_VALID_CHANNEL_LIST_LEN;
if (ccmCfgGetStr(hHal, WNI_CFG_VALID_CHANNEL_LIST,
validChan, &numChans) != eHAL_STATUS_SUCCESS){
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_VALID_CHANNEL_LIST"));
return -EIO;
}
for (indx = 0; indx < numChans; indx++) {
if (wrqu->freq.m == validChan[indx]){
break;
}
}
}
else{
return -EINVAL;
}
if(indx >= numChans)
{
return -EINVAL;
}
/* Set the Operational Channel */
numChans = pRoamProfile->ChannelInfo.numOfChannels = 1;
pHddStaCtx->conn_info.operationChannel = wrqu->freq.m;
pRoamProfile->ChannelInfo.ChannelList = &pHddStaCtx->conn_info.operationChannel;
hddLog(LOG1,"pRoamProfile->operationChannel = %d", wrqu->freq.m);
EXIT();
return status;
}
static int iw_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_freq(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
{
v_U32_t status = FALSE, channel = 0, freq = 0;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal;
hdd_wext_state_t *pWextState;
tCsrRoamProfile * pRoamProfile;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return status;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
pRoamProfile = &pWextState->roamProfile;
if( pHddStaCtx->conn_info.connState== eConnectionState_Associated )
{
if (sme_GetOperationChannel(hHal, &channel, pAdapter->sessionId) != eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to get operating channel %u"), pAdapter->sessionId);
return -EIO;
}
else
{
status = hdd_wlan_get_freq(channel, &freq);
if( TRUE == status )
{
/* Set Exponent parameter as 6 (MHZ) in struct iw_freq
* iwlist & iwconfig command shows frequency into proper
* format (2.412 GHz instead of 246.2 MHz)*/
fwrq->m = freq;
fwrq->e = MHZ;
}
}
}
else
{
/* Set Exponent parameter as 6 (MHZ) in struct iw_freq
* iwlist & iwconfig command shows frequency into proper
* format (2.412 GHz instead of 246.2 MHz)*/
fwrq->m = 0;
fwrq->e = MHZ;
}
return 0;
}
static int iw_get_freq(struct net_device *dev,
struct iw_request_info *info,
struct iw_freq *fwrq, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_freq(dev, info, fwrq, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_tx_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
if (pHddCtx->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!",__func__);
return -EBUSY;
}
if(eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
wrqu->txpower.value = 0;
return 0;
}
wlan_hdd_get_classAstats(pAdapter);
wrqu->txpower.value = pAdapter->hdd_stats.ClassA_stat.max_pwr;
return 0;
}
static int iw_get_tx_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_tx_power(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_tx_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
ENTER();
if ( ccmCfgSetInt(hHal, WNI_CFG_CURRENT_TX_POWER_LEVEL, wrqu->txpower.value, ccmCfgSetCallback, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_CURRENT_TX_POWER_LEVEL"));
return -EIO;
}
EXIT();
return 0;
}
static int iw_set_tx_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_tx_power(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_bitrate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
eHalStatus status = eHAL_STATUS_SUCCESS;
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return status;
}
if(eConnectionState_Associated != pHddStaCtx->conn_info.connState) {
wrqu->bitrate.value = 0;
}
else {
status = sme_GetStatistics( WLAN_HDD_GET_HAL_CTX(pAdapter), eCSR_HDD,
SME_SUMMARY_STATS |
SME_GLOBAL_CLASSA_STATS |
SME_GLOBAL_CLASSB_STATS |
SME_GLOBAL_CLASSC_STATS |
SME_GLOBAL_CLASSD_STATS |
SME_PER_STA_STATS,
hdd_StatisticsCB, 0, FALSE,
pHddStaCtx->conn_info.staId[0], pAdapter );
if(eHAL_STATUS_SUCCESS != status)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Unable to retrieve statistics",
__func__);
return status;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
vos_status = vos_wait_single_event(&pWextState->vosevent, WLAN_WAIT_TIME_STATS);
if (!VOS_IS_STATUS_SUCCESS(vos_status))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME timeout while retrieving statistics",
__func__);
return VOS_STATUS_E_FAILURE;
}
wrqu->bitrate.value = pAdapter->hdd_stats.ClassA_stat.tx_rate*500*1000;
}
EXIT();
return vos_status;
}
static int iw_get_bitrate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_bitrate(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* ccm call back function */
static int __iw_set_bitrate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_wext_state_t *pWextState;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
v_U8_t supp_rates[WNI_CFG_SUPPORTED_RATES_11A_LEN];
v_U32_t a_len = WNI_CFG_SUPPORTED_RATES_11A_LEN;
v_U32_t b_len = WNI_CFG_SUPPORTED_RATES_11B_LEN;
v_U32_t i, rate;
v_U32_t valid_rate = FALSE, active_phy_mode = 0;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
return -ENXIO ;
}
rate = wrqu->bitrate.value;
if (rate == -1)
{
rate = WNI_CFG_FIXED_RATE_AUTO;
valid_rate = TRUE;
}
else if (ccmCfgGetInt(WLAN_HDD_GET_HAL_CTX(pAdapter),
WNI_CFG_DOT11_MODE, &active_phy_mode) == eHAL_STATUS_SUCCESS)
{
if (active_phy_mode == WNI_CFG_DOT11_MODE_11A || active_phy_mode == WNI_CFG_DOT11_MODE_11G
|| active_phy_mode == WNI_CFG_DOT11_MODE_11B)
{
if ((ccmCfgGetStr(WLAN_HDD_GET_HAL_CTX(pAdapter),
WNI_CFG_SUPPORTED_RATES_11A,
supp_rates, &a_len) == eHAL_STATUS_SUCCESS) &&
(ccmCfgGetStr(WLAN_HDD_GET_HAL_CTX(pAdapter),
WNI_CFG_SUPPORTED_RATES_11B,
supp_rates, &b_len) == eHAL_STATUS_SUCCESS))
{
for (i = 0; i < (b_len + a_len); ++i)
{
/* supported rates returned is double the actual rate so we divide it by 2 */
if ((supp_rates[i]&0x7F)/2 == rate)
{
valid_rate = TRUE;
rate = i + WNI_CFG_FIXED_RATE_1MBPS;
break;
}
}
}
}
}
if (valid_rate != TRUE)
{
return -EINVAL;
}
if (ccmCfgSetInt(WLAN_HDD_GET_HAL_CTX(pAdapter),
WNI_CFG_FIXED_RATE, rate,
ccmCfgSetCallback,eANI_BOOLEAN_FALSE) != eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_FIXED_RATE"));
return -EIO;
}
return 0;
}
static int iw_set_bitrate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_bitrate(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
u_int8_t *genie = NULL;
u_int8_t *base_genie = NULL;
v_U16_t remLen;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return 0;
}
if (!wrqu->data.length) {
hdd_clearRoamProfileIe(pAdapter);
EXIT();
return 0;
}
base_genie = mem_alloc_copy_from_user_helper(wrqu->data.pointer,
wrqu->data.length);
if (NULL == base_genie)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"mem_alloc_copy_from_user_helper fail");
return -ENOMEM;
}
genie = base_genie;
remLen = wrqu->data.length;
hddLog(LOG1,"iw_set_genie ioctl IE[0x%X], LEN[%d]", genie[0], genie[1]);
/* clear any previous genIE before this call */
memset( &pWextState->genIE, 0, sizeof(pWextState->genIE) );
while (remLen >= 2)
{
v_U16_t eLen = 0;
v_U8_t elementId;
elementId = *genie++;
eLen = *genie++;
remLen -= 2;
hddLog(VOS_TRACE_LEVEL_INFO, "%s: IE[0x%X], LEN[%d]",
__func__, elementId, eLen);
switch ( elementId )
{
case IE_EID_VENDOR:
if ((IE_LEN_SIZE+IE_EID_SIZE+IE_VENDOR_OUI_SIZE) > eLen) /* should have at least OUI */
{
kfree(base_genie);
return -EINVAL;
}
if (0 == memcmp(&genie[0], "\x00\x50\xf2\x04", 4))
{
v_U16_t curGenIELen = pWextState->genIE.length;
hddLog (VOS_TRACE_LEVEL_INFO, "%s Set WPS OUI(%02x %02x %02x %02x) IE(len %d)",
__func__, genie[0], genie[1], genie[2], genie[3], eLen + 2);
if( SIR_MAC_MAX_IE_LENGTH < (pWextState->genIE.length + eLen) )
{
hddLog(VOS_TRACE_LEVEL_FATAL, "Cannot accommodate genIE. "
"Need bigger buffer space");
VOS_ASSERT(0);
kfree(base_genie);
return -ENOMEM;
}
// save to Additional IE ; it should be accumulated to handle WPS IE + other IE
memcpy( pWextState->genIE.addIEdata + curGenIELen, genie - 2, eLen + 2);
pWextState->genIE.length += eLen + 2;
}
else if (0 == memcmp(&genie[0], "\x00\x50\xf2", 3))
{
hddLog (VOS_TRACE_LEVEL_INFO, "%s Set WPA IE (len %d)",__func__, eLen + 2);
memset( pWextState->WPARSNIE, 0, MAX_WPA_RSN_IE_LEN );
memcpy( pWextState->WPARSNIE, genie - 2, (eLen + 2));
pWextState->roamProfile.pWPAReqIE = pWextState->WPARSNIE;
pWextState->roamProfile.nWPAReqIELength = eLen + 2;
}
else /* any vendorId except WPA IE should be accumulated to genIE */
{
v_U16_t curGenIELen = pWextState->genIE.length;
hddLog (VOS_TRACE_LEVEL_INFO, "%s Set OUI(%02x %02x %02x %02x) IE(len %d)",
__func__, genie[0], genie[1], genie[2], genie[3], eLen + 2);
if( SIR_MAC_MAX_IE_LENGTH < (pWextState->genIE.length + eLen) )
{
hddLog(VOS_TRACE_LEVEL_FATAL, "Cannot accommodate genIE. "
"Need bigger buffer space");
VOS_ASSERT(0);
kfree(base_genie);
return -ENOMEM;
}
// save to Additional IE ; it should be accumulated to handle WPS IE + other IE
memcpy( pWextState->genIE.addIEdata + curGenIELen, genie - 2, eLen + 2);
pWextState->genIE.length += eLen + 2;
}
break;
case DOT11F_EID_RSN:
hddLog (LOG1, "%s Set RSN IE (len %d)",__func__, eLen+2);
memset( pWextState->WPARSNIE, 0, MAX_WPA_RSN_IE_LEN );
memcpy( pWextState->WPARSNIE, genie - 2, (eLen + 2));
pWextState->roamProfile.pRSNReqIE = pWextState->WPARSNIE;
pWextState->roamProfile.nRSNReqIELength = eLen + 2;
break;
default:
hddLog (LOGE, "%s Set UNKNOWN IE %X",__func__, elementId);
kfree(base_genie);
return 0;
}
genie += eLen;
remLen -= eLen;
}
EXIT();
kfree(base_genie);
return 0;
}
static int iw_set_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_genie(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
eHalStatus status;
v_U32_t length = DOT11F_IE_RSN_MAX_LEN;
v_U8_t genIeBytes[DOT11F_IE_RSN_MAX_LEN];
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
hddLog(LOG1,"getGEN_IE ioctl");
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
if( pHddStaCtx->conn_info.connState == eConnectionState_NotConnected)
{
return -ENXIO;
}
// Return something ONLY if we are associated with an RSN or WPA network
if ( VOS_TRUE != hdd_IsAuthTypeRSN(WLAN_HDD_GET_HAL_CTX(pAdapter),
pWextState->roamProfile.negotiatedAuthType))
{
return -ENXIO;
}
// Actually retrieve the RSN IE from CSR. (We previously sent it down in the CSR Roam Profile.)
status = csrRoamGetWpaRsnReqIE(WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId,
&length,
genIeBytes);
length = VOS_MIN((u_int16_t) length, DOT11F_IE_RSN_MAX_LEN);
if (wrqu->data.length < length)
{
hddLog(LOG1, "%s: failed to copy data to user buffer", __func__);
return -EFAULT;
}
vos_mem_copy( extra, (v_VOID_t*)genIeBytes, length);
wrqu->data.length = length;
hddLog(LOG1,"%s: RSN IE of %d bytes returned", __func__, wrqu->data.length );
EXIT();
return 0;
}
static int iw_get_genie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_genie(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_encode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
tCsrRoamProfile *pRoamProfile = &(pWextState->roamProfile);
int keyId;
eCsrAuthType authType = eCSR_AUTH_TYPE_NONE;
int i;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
keyId = pRoamProfile->Keys.defaultIndex;
if(keyId < 0 || keyId >= MAX_WEP_KEYS)
{
hddLog(LOG1,"%s: Invalid keyId : %d",__func__,keyId);
return -EINVAL;
}
if(pRoamProfile->Keys.KeyLength[keyId] > 0)
{
dwrq->flags |= IW_ENCODE_ENABLED;
dwrq->length = pRoamProfile->Keys.KeyLength[keyId];
vos_mem_copy(extra,&(pRoamProfile->Keys.KeyMaterial[keyId][0]),pRoamProfile->Keys.KeyLength[keyId]);
dwrq->flags |= (keyId + 1);
}
else
{
dwrq->flags |= IW_ENCODE_DISABLED;
}
for(i=0; i < MAX_WEP_KEYS; i++)
{
if(pRoamProfile->Keys.KeyMaterial[i] == NULL)
{
continue;
}
else
{
break;
}
}
if(MAX_WEP_KEYS == i)
{
dwrq->flags |= IW_ENCODE_NOKEY;
}
authType = ((hdd_station_ctx_t*)WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.authType;
if(eCSR_AUTH_TYPE_OPEN_SYSTEM == authType)
{
dwrq->flags |= IW_ENCODE_OPEN;
}
else
{
dwrq->flags |= IW_ENCODE_RESTRICTED;
}
EXIT();
return 0;
}
static int iw_get_encode(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_encode(dev, info, dwrq, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#define PAE_ROLE_AUTHENTICATOR 1 // =1 for authenticator,
#define PAE_ROLE_SUPPLICANT 0 // =0 for supplicant
/*
* This function sends a single 'key' to LIM at all time.
*/
static int __iw_get_rts_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
v_U32_t status = 0;
status = hdd_wlan_get_rts_threshold(pAdapter,wrqu);
return status;
}
static int iw_get_rts_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_rts_threshold(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_rts_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EAGAIN;
}
if ( wrqu->rts.value < WNI_CFG_RTS_THRESHOLD_STAMIN || wrqu->rts.value > WNI_CFG_RTS_THRESHOLD_STAMAX )
{
return -EINVAL;
}
if ( ccmCfgSetInt(hHal, WNI_CFG_RTS_THRESHOLD, wrqu->rts.value, ccmCfgSetCallback, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_RTS_THRESHOLD"));
return -EIO;
}
EXIT();
return 0;
}
static int iw_set_rts_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_rts_threshold(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_frag_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
v_U32_t status = 0;
status = hdd_wlan_get_frag_threshold(pAdapter,wrqu);
return status;
}
static int iw_get_frag_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_frag_threshold(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_frag_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if ( wrqu->frag.value < WNI_CFG_FRAGMENTATION_THRESHOLD_STAMIN || wrqu->frag.value > WNI_CFG_FRAGMENTATION_THRESHOLD_STAMAX )
{
return -EINVAL;
}
if ( ccmCfgSetInt(hHal, WNI_CFG_FRAGMENTATION_THRESHOLD, wrqu->frag.value, ccmCfgSetCallback, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_FRAGMENTATION_THRESHOLD"));
return -EIO;
}
EXIT();
return 0;
}
static int iw_set_frag_threshold(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_frag_threshold(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int iw_get_power_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ENTER();
return -EOPNOTSUPP;
}
static int iw_set_power_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ENTER();
return -EOPNOTSUPP;
}
static int __iw_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
struct iw_range *range = (struct iw_range *) extra;
v_U8_t channels[WNI_CFG_VALID_CHANNEL_LIST_LEN];
v_U32_t num_channels = sizeof(channels);
v_U8_t supp_rates[WNI_CFG_SUPPORTED_RATES_11A_LEN];
v_U32_t a_len;
v_U32_t b_len;
v_U32_t active_phy_mode = 0;
v_U8_t index = 0, i;
ENTER();
wrqu->data.length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
/*Get the phy mode*/
if (ccmCfgGetInt(hHal,
WNI_CFG_DOT11_MODE, &active_phy_mode) == eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"active_phy_mode = %d", active_phy_mode);
if (active_phy_mode == WNI_CFG_DOT11_MODE_11A || active_phy_mode == WNI_CFG_DOT11_MODE_11G)
{
/*Get the supported rates for 11G band*/
a_len = WNI_CFG_SUPPORTED_RATES_11A_LEN;
if (ccmCfgGetStr(hHal,
WNI_CFG_SUPPORTED_RATES_11A,
supp_rates, &a_len) == eHAL_STATUS_SUCCESS)
{
if (a_len > WNI_CFG_SUPPORTED_RATES_11A_LEN)
{
a_len = WNI_CFG_SUPPORTED_RATES_11A_LEN;
}
for (i = 0; i < a_len; i++)
{
range->bitrate[i] = ((supp_rates[i] & 0x7F) / 2) * 1000000;
}
range->num_bitrates = a_len;
}
else
{
return -EIO;
}
}
else if (active_phy_mode == WNI_CFG_DOT11_MODE_11B)
{
/*Get the supported rates for 11B band*/
b_len = WNI_CFG_SUPPORTED_RATES_11B_LEN;
if (ccmCfgGetStr(hHal,
WNI_CFG_SUPPORTED_RATES_11B,
supp_rates, &b_len) == eHAL_STATUS_SUCCESS)
{
if (b_len > WNI_CFG_SUPPORTED_RATES_11B_LEN)
{
b_len = WNI_CFG_SUPPORTED_RATES_11B_LEN;
}
for (i = 0; i < b_len; i++)
{
range->bitrate[i] = ((supp_rates[i] & 0x7F) / 2) * 1000000;
}
range->num_bitrates = b_len;
}
else
{
return -EIO;
}
}
}
range->max_rts = WNI_CFG_RTS_THRESHOLD_STAMAX;
range->min_frag = WNI_CFG_FRAGMENTATION_THRESHOLD_STAMIN;
range->max_frag = WNI_CFG_FRAGMENTATION_THRESHOLD_STAMAX;
range->encoding_size[0] = 5;
range->encoding_size[1] = 13;
range->num_encoding_sizes = 2;
range->max_encoding_tokens = MAX_WEP_KEYS;
// we support through Wireless Extensions 22
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 22;
/*Supported Channels and Frequencies*/
if (ccmCfgGetStr((hHal), WNI_CFG_VALID_CHANNEL_LIST, channels, &num_channels) != eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_VALID_CHANNEL_LIST"));
return -EIO;
}
if (num_channels > IW_MAX_FREQUENCIES)
{
num_channels = IW_MAX_FREQUENCIES;
}
range->num_channels = num_channels;
range->num_frequency = num_channels;
for (index=0; index < num_channels; index++)
{
v_U32_t frq_indx = 0;
range->freq[index].i = channels[index];
while (frq_indx < FREQ_CHAN_MAP_TABLE_SIZE)
{
if(channels[index] == freq_chan_map[frq_indx].chan)
{
range->freq[index].m = freq_chan_map[frq_indx].freq * 100000;
range->freq[index].e = 1;
break;
}
frq_indx++;
}
}
/* Event capability (kernel + driver) */
range->event_capa[0] = (IW_EVENT_CAPA_K_0 |
IW_EVENT_CAPA_MASK(SIOCGIWAP) |
IW_EVENT_CAPA_MASK(SIOCGIWSCAN));
range->event_capa[1] = IW_EVENT_CAPA_K_1;
/*Encryption capability*/
range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 |
IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP;
/* Txpower capability */
range->txpower_capa = IW_TXPOW_MWATT;
/*Scanning capability*/
#if WIRELESS_EXT >= 22
range->scan_capa = IW_SCAN_CAPA_ESSID | IW_SCAN_CAPA_TYPE | IW_SCAN_CAPA_CHANNEL;
#endif
EXIT();
return 0;
}
static int iw_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_range(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* Callback function registered with PMC to know status of PMC request */
static void iw_power_callback_fn (void *pContext, eHalStatus status)
{
struct statsContext *pStatsContext;
if (NULL == pContext)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pContext [%p]",
__func__, pContext);
return;
}
pStatsContext = (struct statsContext *)pContext;
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
if (POWER_CONTEXT_MAGIC != pStatsContext->magic)
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, magic [%08x]",
__func__, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, magic [%08x]\n",
__func__, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
/* Callback function for tx per hit */
void hdd_tx_per_hit_cb (void *pCallbackContext)
{
hdd_adapter_t *pAdapter = (hdd_adapter_t *)pCallbackContext;
unsigned char tx_fail[16];
union iwreq_data wrqu;
if (NULL == pAdapter || (WLAN_HDD_ADAPTER_MAGIC != pAdapter->magic))
{
hddLog(LOGE, "hdd_tx_per_hit_cb: pAdapter is NULL");
return;
}
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlcpy(tx_fail, "TX_FAIL", sizeof(tx_fail));
wireless_send_event(pAdapter->dev, IWEVCUSTOM, &wrqu, tx_fail);
}
void hdd_GetClassA_statisticsCB(void *pStats, void *pContext)
{
struct statsContext *pStatsContext;
tCsrGlobalClassAStatsInfo *pClassAStats;
hdd_adapter_t *pAdapter;
if (ioctl_debug)
{
pr_info("%s: pStats [%p] pContext [%p]\n",
__func__, pStats, pContext);
}
if ((NULL == pStats) || (NULL == pContext))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pStats [%p] pContext [%p]",
__func__, pStats, pContext);
return;
}
pClassAStats = pStats;
pStatsContext = pContext;
pAdapter = pStatsContext->pAdapter;
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
if ((NULL == pAdapter) || (STATS_CONTEXT_MAGIC != pStatsContext->magic))
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter [%p] magic [%08x]",
__func__, pAdapter, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, pAdapter [%p] magic [%08x]\n",
__func__, pAdapter, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* copy over the stats. do so as a struct copy */
pAdapter->hdd_stats.ClassA_stat = *pClassAStats;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
VOS_STATUS wlan_hdd_get_classAstats(hdd_adapter_t *pAdapter)
{
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
eHalStatus hstatus;
long lrc;
struct statsContext context;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Padapter is NULL", __func__);
return VOS_STATUS_E_FAULT;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s:LOGP in Progress. Ignore!!!",__func__);
return VOS_STATUS_SUCCESS;
}
/* we are connected
prepare our callback context */
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = STATS_CONTEXT_MAGIC;
/* query only for Class A statistics (which include link speed) */
hstatus = sme_GetStatistics( WLAN_HDD_GET_HAL_CTX(pAdapter),
eCSR_HDD,
SME_GLOBAL_CLASSA_STATS,
hdd_GetClassA_statisticsCB,
0, // not periodic
FALSE, //non-cached results
pHddStaCtx->conn_info.staId[0],
&context);
if (eHAL_STATUS_SUCCESS != hstatus)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Unable to retrieve Class A statistics",
__func__);
/* we'll returned a cached value below */
}
else
{
/* request was sent -- wait for the response */
lrc = wait_for_completion_interruptible_timeout(&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME %s while retrieving Class A statistics",
__func__, (0 == lrc) ? "timeout" : "interrupt");
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
/* either callback updated pAdapter stats or it has cached data */
return VOS_STATUS_SUCCESS;
}
static void hdd_get_station_statisticsCB(void *pStats, void *pContext)
{
struct statsContext *pStatsContext;
tCsrSummaryStatsInfo *pSummaryStats;
tCsrGlobalClassAStatsInfo *pClassAStats;
hdd_adapter_t *pAdapter;
if (ioctl_debug)
{
pr_info("%s: pStats [%p] pContext [%p]\n",
__func__, pStats, pContext);
}
if ((NULL == pStats) || (NULL == pContext))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Bad param, pStats [%p] pContext [%p]",
__func__, pStats, pContext);
return;
}
/* there is a race condition that exists between this callback
function and the caller since the caller could time out either
before or while this code is executing. we use a spinlock to
serialize these actions */
spin_lock(&hdd_context_lock);
pSummaryStats = (tCsrSummaryStatsInfo *)pStats;
pClassAStats = (tCsrGlobalClassAStatsInfo *)( pSummaryStats + 1 );
pStatsContext = pContext;
pAdapter = pStatsContext->pAdapter;
if ((NULL == pAdapter) || (STATS_CONTEXT_MAGIC != pStatsContext->magic))
{
/* the caller presumably timed out so there is nothing we can do */
spin_unlock(&hdd_context_lock);
hddLog(VOS_TRACE_LEVEL_WARN,
"%s: Invalid context, pAdapter [%p] magic [%08x]",
__func__, pAdapter, pStatsContext->magic);
if (ioctl_debug)
{
pr_info("%s: Invalid context, pAdapter [%p] magic [%08x]\n",
__func__, pAdapter, pStatsContext->magic);
}
return;
}
/* context is valid so caller is still waiting */
/* paranoia: invalidate the magic */
pStatsContext->magic = 0;
/* copy over the stats. do so as a struct copy */
pAdapter->hdd_stats.summary_stat = *pSummaryStats;
pAdapter->hdd_stats.ClassA_stat = *pClassAStats;
/* notify the caller */
complete(&pStatsContext->completion);
/* serialization is complete */
spin_unlock(&hdd_context_lock);
}
VOS_STATUS wlan_hdd_get_station_stats(hdd_adapter_t *pAdapter)
{
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
eHalStatus hstatus;
long lrc;
struct statsContext context;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Padapter is NULL", __func__);
return VOS_STATUS_SUCCESS;
}
/* we are connected
prepare our callback context */
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = STATS_CONTEXT_MAGIC;
/* query only for Summary & Class A statistics */
hstatus = sme_GetStatistics(WLAN_HDD_GET_HAL_CTX(pAdapter),
eCSR_HDD,
SME_SUMMARY_STATS |
SME_GLOBAL_CLASSA_STATS,
hdd_get_station_statisticsCB,
0, // not periodic
FALSE, //non-cached results
pHddStaCtx->conn_info.staId[0],
&context);
if (eHAL_STATUS_SUCCESS != hstatus)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Unable to retrieve statistics",
__func__);
/* we'll return with cached values */
}
else
{
/* request was sent -- wait for the response */
lrc = wait_for_completion_interruptible_timeout(&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME %s while retrieving statistics",
__func__, (0 == lrc) ? "timeout" : "interrupt");
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
/* either callback updated pAdapter stats or it has cached data */
return VOS_STATUS_SUCCESS;
}
/*
* Support for the LINKSPEED private command
* Per the WiFi framework the response must be of the form
* "LinkSpeed xx"
*/
static int iw_get_linkspeed(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_context_t *pHddCtx;
char *pLinkSpeed = (char*)extra;
int len = sizeof(v_U32_t) + 1;
v_U32_t link_speed;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
VOS_STATUS status;
int rc, valid;
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
valid = wlan_hdd_validate_context(pHddCtx);
if (0 != valid)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("HDD context is not valid"));
return valid;
}
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
/* we are not connected so we don't have a classAstats */
link_speed = 0;
}
else
{
status = wlan_hdd_get_classAstats(pAdapter);
if (!VOS_IS_STATUS_SUCCESS(status ))
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("Unable to retrieve SME statistics"));
return -EINVAL;
}
/* Unit of link capacity is obtained from the TL API is MbpsX10 */
WLANTL_GetSTALinkCapacity(WLAN_HDD_GET_CTX(pAdapter)->pvosContext,
(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0],
&link_speed);
link_speed = link_speed / 10;
if (0 == link_speed)
{
/* The linkspeed returned by HAL is in units of 500kbps.
* converting it to mbps.
* This is required to support legacy firmware which does
* not return link capacity.
*/
link_speed = pAdapter->hdd_stats.ClassA_stat.tx_rate/2;
}
}
wrqu->data.length = len;
// return the linkspeed in the format required by the WiFi Framework
rc = snprintf(pLinkSpeed, len, "%u", link_speed);
if ((rc < 0) || (rc >= len))
{
// encoding or length error?
hddLog(VOS_TRACE_LEVEL_ERROR,FL("Unable to encode link speed"));
return -EIO;
}
/* a value is being successfully returned */
return rc;
}
/*
* Helper function to return correct value for WLAN_GET_LINK_SPEED
*
*/
static int __iw_get_linkspeed_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int rc;
rc = iw_get_linkspeed(dev, info, wrqu, extra);
if (rc < 0)
return rc;
/* a value is being successfully returned */
return 0;
}
static int iw_get_linkspeed_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_linkspeed_priv(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/*
* Support for the RSSI & RSSI-APPROX private commands
* Per the WiFi framework the response must be of the form
* "<ssid> rssi <xx>"
* unless we are not associated, in which case the response is
* "OK"
*/
static int iw_get_rssi(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
char *cmd = extra;
int len = wrqu->data.length;
v_S7_t s7Rssi = 0;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
int ssidlen = pHddStaCtx->conn_info.SSID.SSID.length;
VOS_STATUS vosStatus;
int rc;
if ((eConnectionState_Associated != pHddStaCtx->conn_info.connState) ||
(0 == ssidlen) || (ssidlen >= len))
{
/* we are not connected or our SSID is too long
so we cannot report an rssi */
rc = scnprintf(cmd, len, "OK");
}
else
{
/* we are connected with a valid SSID
so we can write the SSID into the return buffer
(note that it is not NUL-terminated) */
memcpy(cmd, pHddStaCtx->conn_info.SSID.SSID.ssId, ssidlen );
vosStatus = wlan_hdd_get_rssi(pAdapter, &s7Rssi);
if (VOS_STATUS_SUCCESS == vosStatus)
{
/* append the rssi to the ssid in the format required by
the WiFI Framework */
rc = scnprintf(&cmd[ssidlen], len - ssidlen, " rssi %d", s7Rssi);
rc += ssidlen;
}
else
{
rc = -1;
}
}
/* verify that we wrote a valid response */
if ((rc < 0) || (rc >= len))
{
// encoding or length error?
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Unable to encode RSSI, got [%s]",
__func__, cmd);
return -EIO;
}
/* a value is being successfully returned */
return rc;
}
/*
* Support for SoftAP channel range private command
*/
static int iw_softap_set_channel_range( struct net_device *dev,
int startChannel,
int endChannel,
int band)
{
VOS_STATUS status;
int ret = 0;
hdd_adapter_t *pHostapdAdapter = (netdev_priv(dev));
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pHostapdAdapter);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pHostapdAdapter);
status = WLANSAP_SetChannelRange(hHal, startChannel, endChannel, band);
if (VOS_STATUS_SUCCESS != status)
{
ret = -EINVAL;
}
pHddCtx->is_dynamic_channel_range_set = 1;
return ret;
}
static uint8 chartohex(char c)
{
uint8 val = 0;
if (c >= '0' && c <= '9')
val = c - '0';
else if (c >= 'a' && c <= 'f')
val = c - 'a' + 10;
else if (c >= 'A' && c <= 'F')
val = c - 'A' + 10;
else
hddLog(VOS_TRACE_LEVEL_ERROR, "Not a valid hex char");
return val;
}
uint8 getByte(char **buf)
{
uint8 byte = 0;
char *temp = *buf;
byte = chartohex(*temp) * 16;
temp++;
byte += chartohex(*temp);
temp++;
*buf = temp;
return byte;
}
static void parse_Bufferforpkt(tSirpkt80211 *pkt, u8 *pBuffer, u16 len)
{
tSir80211Header *macHeader;
int i = 0, j = 0, length = 0;
uint8 byte = 0;
char *temp = pBuffer;
uint16 fragNum = 0;
macHeader = &pkt->macHeader;
pkt->encParams.keyParams.key[0].keyId = *temp - '0';
temp++;
hddLog(VOS_TRACE_LEVEL_ERROR, "Input Message to encrypt");
hddLog(VOS_TRACE_LEVEL_ERROR, "Key Id : %d",
pkt->encParams.keyParams.key[0].keyId);
for (i = 0; i< 16; i++) {
pkt->encParams.keyParams.key[0].key[i]
= getByte(&temp);
}
print_hex_dump(KERN_INFO, "Key : ", DUMP_PREFIX_NONE, 16, 1,
&pkt->encParams.keyParams.key[0].key[0], 16, 0);
for (i = 0; i< 6; i++) {
pkt->encParams.pn[i]
= getByte(&temp);
}
print_hex_dump(KERN_INFO, "PN : ", DUMP_PREFIX_NONE, 16, 1,
&pkt->encParams.pn[0], 6, 0);
for (i = 0, j= 5; i< 3; i++, j--) {
byte = pkt->encParams.pn[i];
pkt->encParams.pn[i] = pkt->encParams.pn[j];
pkt->encParams.pn[j] = byte;
}
length = getByte(&temp);
byte = getByte(&temp);
macHeader->frameCtrl.protVer = byte & 0x3;
macHeader->frameCtrl.type = (byte >> 2) & 0x3;
macHeader->frameCtrl.subType = (byte >> 4) & 0xF;
byte = getByte(&temp);
macHeader->frameCtrl.toDS = (byte) & 0x1;
macHeader->frameCtrl.fromDS = (byte >> 1) & 0x1;
macHeader->frameCtrl.moreFrag = (byte >> 2) & 0x1;
macHeader->frameCtrl.retry = (byte >> 3) & 0x1;
macHeader->frameCtrl.powerMgmt = (byte >> 4) & 0x1;
macHeader->frameCtrl.moreData = (byte >> 5) & 0x1;
macHeader->frameCtrl.wep = (byte >> 6) & 0x1;
macHeader->frameCtrl.order = (byte >> 7) & 0x1;
hddLog(VOS_TRACE_LEVEL_INFO, "macHeader->frameCtrl.protVer : %x "
"macHeader->frameCtrl.type : %x "
"macHeader->frameCtrl.subType : %x "
"macHeader->frameCtrl.toDS : %x "
"macHeader->frameCtrl.fromDS : %x "
"macHeader->frameCtrl.moreFrag : %x "
"macHeader->frameCtrl.retry : %x "
"macHeader->frameCtrl.powerMgmt : %x "
"macHeader->frameCtrl.MoreData : %x "
"macHeader->frameCtrl.wep : %x "
"macHeader->frameCtrl.order : %x "
, macHeader->frameCtrl.protVer
, macHeader->frameCtrl.type
, macHeader->frameCtrl.subType
, macHeader->frameCtrl.toDS
, macHeader->frameCtrl.fromDS
, macHeader->frameCtrl.moreFrag
, macHeader->frameCtrl.retry
, macHeader->frameCtrl.powerMgmt
, macHeader->frameCtrl.moreData
, macHeader->frameCtrl.wep
, macHeader->frameCtrl.order);
macHeader->usDurationId = getByte(&temp);
macHeader->usDurationId += getByte(&temp) << 8;
macHeader->vA1[0] = getByte(&temp);
macHeader->vA1[1] = getByte(&temp);
macHeader->vA1[2] = getByte(&temp);
macHeader->vA1[3] = getByte(&temp);
macHeader->vA1[4] = getByte(&temp);
macHeader->vA1[5] = getByte(&temp);
macHeader->vA2[0] = getByte(&temp);
macHeader->vA2[1] = getByte(&temp);
macHeader->vA2[2] = getByte(&temp);
macHeader->vA2[3] = getByte(&temp);
macHeader->vA2[4] = getByte(&temp);
macHeader->vA2[5] = getByte(&temp);
macHeader->vA3[0] = getByte(&temp);
macHeader->vA3[1] = getByte(&temp);
macHeader->vA3[2] = getByte(&temp);
macHeader->vA3[3] = getByte(&temp);
macHeader->vA3[4] = getByte(&temp);
macHeader->vA3[5] = getByte(&temp);
macHeader->sSeqCtrl = getByte(&temp);
fragNum = macHeader->sSeqCtrl & 0xF;
macHeader->sSeqCtrl >>= 4;
macHeader->sSeqCtrl += getByte(&temp) << 4;
macHeader->sSeqCtrl |= fragNum << 12;
if (length == 30 || length == 32) {
macHeader->optvA4[0] = getByte(&temp);
macHeader->optvA4[1] = getByte(&temp);
macHeader->optvA4[2] = getByte(&temp);
macHeader->optvA4[3] = getByte(&temp);
macHeader->optvA4[4] = getByte(&temp);
macHeader->optvA4[5] = getByte(&temp);
}
if (length == 26 || length == 32) {
macHeader->usQosCtrl = getByte(&temp);
macHeader->usQosCtrl += getByte(&temp) << 8;
}
print_hex_dump(KERN_INFO, "Header : ", DUMP_PREFIX_NONE, 16, 1,
(char *)&pkt->macHeader, sizeof(tSir80211Header), 0);
//parse payload
length = getByte(&temp);
length += getByte(&temp) << 8;
hddLog(VOS_TRACE_LEVEL_INFO,"Payload length : %d", length);
pkt->data.length = length;
for (i = 0; i< length; i++) {
pkt->data.data[i] = getByte(&temp);
}
print_hex_dump(KERN_INFO, "Data : ", DUMP_PREFIX_NONE, 16, 1,
&pkt->data.data[0], pkt->data.length, 0);
}
/**---------------------------------------------------------------------------
\brief hdd_encrypt_msg_cb() - Callback function for DISA
encrypt message request
This is an asynchronous callback function from SME when the encrypted data
is received
\pEncInfoRsp -> Encrypted data info
\return - 0 for success non-zero for failure
--------------------------------------------------------------------------*/
static void
hdd_encrypt_msg_cb(v_VOID_t *pUserData, v_VOID_t *pEncInfoRsp)
{
tpSetEncryptedDataRspParams pEncryptedDataRsp;
pEncryptedDataRsp = (tpSetEncryptedDataRspParams)pEncInfoRsp;
hddLog(VOS_TRACE_LEVEL_ERROR, "Encrypted Message");
hddLog(VOS_TRACE_LEVEL_ERROR, "Length : %d",
pEncryptedDataRsp->encryptedPayload.length);
hddLog(VOS_TRACE_LEVEL_ERROR, " Encrypted Data: ");
print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1,
pEncryptedDataRsp->encryptedPayload.data,
pEncryptedDataRsp->encryptedPayload.length, 0);
}
VOS_STATUS wlan_hdd_enter_bmps(hdd_adapter_t *pAdapter, int mode)
{
struct statsContext context;
eHalStatus status;
hdd_context_t *pHddCtx;
if (NULL == pAdapter)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "Adapter NULL");
return VOS_STATUS_E_FAULT;
}
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "power mode=%d", mode);
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
if (pHddCtx->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s:LOGP in Progress. Ignore!!!", __func__);
return VOS_STATUS_E_FAILURE;
}
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = POWER_CONTEXT_MAGIC;
if (DRIVER_POWER_MODE_ACTIVE == mode)
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s:Wlan driver Entering "
"Full Power", __func__);
status = sme_RequestFullPower(WLAN_HDD_GET_HAL_CTX(pAdapter),
iw_power_callback_fn, &context,
eSME_FULL_PWR_NEEDED_BY_HDD);
// Enter Full power command received from GUI this means we are disconnected
// Set PMC remainInPowerActiveTillDHCP flag to disable auto BMPS entry by PMC
sme_SetDHCPTillPowerActiveFlag(pHddCtx->hHal, TRUE);
if (eHAL_STATUS_PMC_PENDING == status)
{
/* request was sent -- wait for the response */
int lrc = wait_for_completion_interruptible_timeout(
&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_POWER));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: SME %s while requesting fullpower ",
__func__, (0 == lrc) ? "timeout" : "interrupt");
}
}
}
else if (DRIVER_POWER_MODE_AUTO == mode)
{
if (pHddCtx->cfg_ini->fIsBmpsEnabled)
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s:Wlan driver Entering Bmps ",
__func__);
// Enter BMPS command received from GUI this means DHCP is completed
// Clear PMC remainInPowerActiveTillDHCP flag to enable auto BMPS entry
sme_SetDHCPTillPowerActiveFlag(WLAN_HDD_GET_HAL_CTX(pAdapter),
FALSE);
status = sme_RequestBmps(WLAN_HDD_GET_HAL_CTX(pAdapter),
iw_power_callback_fn, &context);
if (eHAL_STATUS_PMC_PENDING == status)
{
/* request was sent -- wait for the response */
int lrc = wait_for_completion_interruptible_timeout(
&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_POWER));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME %s while requesting BMPS",
__func__, (0 == lrc) ? "timeout" : "interrupt");
}
}
}
else
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "BMPS is not "
"enabled in the cfg");
}
}
/* either we never sent a request, we sent a request and received a
response or we sent a request and timed out. if we never sent a
request or if we sent a request and got a response, we want to
clear the magic out of paranoia. if we timed out there is a
race condition such that the callback function could be
executing at the same time we are. of primary concern is if the
callback function had already verified the "magic" but had not
yet set the completion variable when a timeout occurred. we
serialize these activities by invalidating the magic while
holding a shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
return VOS_STATUS_SUCCESS;
}
VOS_STATUS wlan_hdd_exit_lowpower(hdd_context_t *pHddCtx,
hdd_adapter_t *pAdapter)
{
VOS_STATUS vos_Status;
if ((NULL == pAdapter) || (NULL == pHddCtx))
{
hddLog(VOS_TRACE_LEVEL_FATAL, "Invalid pointer");
return VOS_STATUS_E_FAULT;
}
/**Exit from Deep sleep or standby if we get the driver
START cmd from android GUI
*/
if (WLAN_MAP_DRIVER_STOP_TO_STANDBY == pHddCtx->cfg_ini->nEnableDriverStop)
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: WLAN being exit "
"from Stand by",__func__);
vos_Status = hdd_exit_standby(pHddCtx);
}
else if (eHDD_SUSPEND_DEEP_SLEEP == pHddCtx->hdd_ps_state)
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: WLAN being exit "
"from deep sleep",__func__);
vos_Status = hdd_exit_deep_sleep(pHddCtx, pAdapter);
}
else
{
hddLog(VOS_TRACE_LEVEL_WARN, "%s: Not in standby or deep sleep. "
"Ignore start cmd %d", __func__, pHddCtx->hdd_ps_state);
vos_Status = VOS_STATUS_SUCCESS;
}
return vos_Status;
}
VOS_STATUS wlan_hdd_enter_lowpower(hdd_context_t *pHddCtx)
{
VOS_STATUS vos_Status = VOS_STATUS_E_FAILURE;
if (NULL == pHddCtx)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "HDD context NULL");
return VOS_STATUS_E_FAULT;
}
if (WLAN_MAP_DRIVER_STOP_TO_STANDBY == pHddCtx->cfg_ini->nEnableDriverStop)
{
//Execute standby procedure.
//Executing standby procedure will cause the STA to
//disassociate first and then the chip will be put into standby.
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "Wlan driver entering Stand by mode");
vos_Status = hdd_enter_standby(pHddCtx);
}
else if (WLAN_MAP_DRIVER_STOP_TO_DEEP_SLEEP ==
pHddCtx->cfg_ini->nEnableDriverStop)
{
//Execute deep sleep procedure
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "Wlan driver entering "
"deep sleep mode");
//Deep sleep not supported
vos_Status = hdd_enter_standby(pHddCtx);
}
else
{
hddLog(VOS_TRACE_LEVEL_INFO_LOW, "%s: Driver stop is not enabled %d",
__func__, pHddCtx->cfg_ini->nEnableDriverStop);
vos_Status = VOS_STATUS_SUCCESS;
}
return vos_Status;
}
void* wlan_hdd_change_country_code_callback(void *pAdapter)
{
hdd_adapter_t *call_back_pAdapter = pAdapter;
complete(&call_back_pAdapter->change_country_code);
return NULL;
}
static int __iw_set_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
char *cmd = NULL;
int cmd_len = wrqu->data.length;
int ret = 0;
int rc = 0;
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
ENTER();
cmd = mem_alloc_copy_from_user_helper(wrqu->data.pointer,
wrqu->data.length);
if (NULL == cmd)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"mem_alloc_copy_from_user_helper fail");
return -ENOMEM;
}
if (ioctl_debug)
{
pr_info("%s: req [%s] len [%d]\n", __func__, cmd, cmd_len);
}
hddLog(VOS_TRACE_LEVEL_INFO_MED,
"%s: ***Received %s cmd from Wi-Fi GUI***", __func__, cmd);
if (pHddCtx->isLogpInProgress) {
if (ioctl_debug)
{
pr_info("%s: RESTART in progress\n", __func__);
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!",__func__);
kfree(cmd);
return -EBUSY;
}
if (strncmp(cmd, "CSCAN", 5) == 0 )
{
if (eHAL_STATUS_SUCCESS != iw_set_cscan(dev, info, wrqu, cmd)) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Error in iw_set_scan!", __func__);
rc = -EINVAL;
}
}
else if( strcasecmp(cmd, "start") == 0 ) {
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "Start command");
/*Exit from Deep sleep or standby if we get the driver START cmd from android GUI*/
vos_status = wlan_hdd_exit_lowpower(pHddCtx, pAdapter);
if (vos_status == VOS_STATUS_SUCCESS)
{
union iwreq_data wrqu;
char buf[10];
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlcpy(buf, "START", sizeof(buf));
wireless_send_event(pAdapter->dev, IWEVCUSTOM, &wrqu, buf);
}
else
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: START CMD Status %d", __func__, vos_status);
rc = -EIO;
}
goto done;
}
else if( strcasecmp(cmd, "stop") == 0 )
{
union iwreq_data wrqu;
char buf[10];
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "Stop command");
wlan_hdd_enter_lowpower(pHddCtx);
memset(&wrqu, 0, sizeof(wrqu));
wrqu.data.length = strlcpy(buf, "STOP", sizeof(buf));
wireless_send_event(pAdapter->dev, IWEVCUSTOM, &wrqu, buf);
goto done;
}
else if (strcasecmp(cmd, "macaddr") == 0)
{
ret = snprintf(cmd, cmd_len, "Macaddr = " MAC_ADDRESS_STR,
MAC_ADDR_ARRAY(pAdapter->macAddressCurrent.bytes));
}
else if (strcasecmp(cmd, "scan-active") == 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("making default scan to active"));
pHddCtx->scan_info.scan_mode = eSIR_ACTIVE_SCAN;
ret = snprintf(cmd, cmd_len, "OK");
}
else if (strcasecmp(cmd, "scan-passive") == 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("making default scan to passive"));
pHddCtx->scan_info.scan_mode = eSIR_PASSIVE_SCAN;
ret = snprintf(cmd, cmd_len, "OK");
}
else if( strcasecmp(cmd, "scan-mode") == 0 )
{
ret = snprintf(cmd, cmd_len, "ScanMode = %u", pHddCtx->scan_info.scan_mode);
}
else if( strcasecmp(cmd, "linkspeed") == 0 )
{
ret = iw_get_linkspeed(dev, info, wrqu, cmd);
}
else if( strncasecmp(cmd, "COUNTRY", 7) == 0 ) {
char *country_code;
long lrc;
eHalStatus eHal_status;
country_code = cmd + 8;
init_completion(&pAdapter->change_country_code);
eHal_status = sme_ChangeCountryCode(pHddCtx->hHal,
(void *)(tSmeChangeCountryCallback)wlan_hdd_change_country_code_callback,
country_code,
pAdapter,
pHddCtx->pvosContext,
eSIR_TRUE,
eSIR_TRUE);
/* Wait for completion */
lrc = wait_for_completion_interruptible_timeout(&pAdapter->change_country_code,
msecs_to_jiffies(WLAN_WAIT_TIME_STATS));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: SME %s while setting country code ",
__func__, "Timed out");
}
if (eHAL_STATUS_SUCCESS != eHal_status)
{
VOS_TRACE( VOS_MODULE_ID_VOSS, VOS_TRACE_LEVEL_ERROR,
"%s: SME Change Country code fail", __func__);
kfree(cmd);
return -EIO;
}
}
else if( strncasecmp(cmd, "rssi", 4) == 0 )
{
ret = iw_get_rssi(dev, info, wrqu, cmd);
}
else if( strncasecmp(cmd, "powermode", 9) == 0 ) {
int mode;
char *ptr;
if (9 < cmd_len)
{
ptr = (char*)(cmd + 9);
}else{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"CMD LENGTH %d is not correct",cmd_len);
kfree(cmd);
return -EINVAL;
}
if (1 != sscanf(ptr,"%d",&mode))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"powermode input %s is not correct",ptr);
kfree(cmd);
return -EIO;
}
wlan_hdd_enter_bmps(pAdapter, mode);
/*TODO:Set the power mode*/
}
else if (strncasecmp(cmd, "getpower", 8) == 0 ) {
v_U32_t pmc_state;
v_U16_t value;
pmc_state = pmcGetPmcState(WLAN_HDD_GET_HAL_CTX(pAdapter));
if(pmc_state == BMPS) {
value = DRIVER_POWER_MODE_AUTO;
}
else {
value = DRIVER_POWER_MODE_ACTIVE;
}
ret = snprintf(cmd, cmd_len, "powermode = %u", value);
}
else if( strncasecmp(cmd, "btcoexmode", 10) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "btcoexmode");
/*TODO: set the btcoexmode*/
}
else if( strcasecmp(cmd, "btcoexstat") == 0 ) {
hddLog(VOS_TRACE_LEVEL_INFO, "BtCoex Status");
/*TODO: Return the btcoex status*/
}
else if( strcasecmp(cmd, "rxfilter-start") == 0 ) {
hddLog(VOS_TRACE_LEVEL_INFO, "Rx Data Filter Start command");
/*TODO: Enable Rx data Filter*/
}
else if( strcasecmp(cmd, "rxfilter-stop") == 0 ) {
hddLog(VOS_TRACE_LEVEL_INFO, "Rx Data Filter Stop command");
/*TODO: Disable Rx data Filter*/
}
else if( strcasecmp(cmd, "rxfilter-statistics") == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "Rx Data Filter Statistics command");
/*TODO: rxfilter-statistics*/
}
else if( strncasecmp(cmd, "rxfilter-add", 12) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "rxfilter-add");
/*TODO: rxfilter-add*/
}
else if( strncasecmp(cmd, "rxfilter-remove",15) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "rxfilter-remove");
/*TODO: rxfilter-remove*/
}
#ifdef FEATURE_WLAN_SCAN_PNO
else if( strncasecmp(cmd, "pnosetup", 8) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "pnosetup");
/*TODO: support pnosetup*/
}
else if( strncasecmp(cmd, "pnoforce", 8) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "pnoforce");
/*TODO: support pnoforce*/
}
else if( strncasecmp(cmd, "pno",3) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "pno");
vos_status = iw_set_pno(dev, info, wrqu, cmd, 3);
kfree(cmd);
return (vos_status == VOS_STATUS_SUCCESS) ? 0 : -EINVAL;
}
else if( strncasecmp(cmd, "rssifilter",10) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "rssifilter");
vos_status = iw_set_rssi_filter(dev, info, wrqu, cmd, 10);
kfree(cmd);
return (vos_status == VOS_STATUS_SUCCESS) ? 0 : -EINVAL;
}
#endif /*FEATURE_WLAN_SCAN_PNO*/
else if( strncasecmp(cmd, "powerparams",11) == 0 ) {
hddLog( VOS_TRACE_LEVEL_INFO, "powerparams");
vos_status = iw_set_power_params(dev, info, wrqu, cmd, 11);
kfree(cmd);
return (vos_status == VOS_STATUS_SUCCESS) ? 0 : -EINVAL;
}
else if( 0 == strncasecmp(cmd, "CONFIG-TX-TRACKING", 18) ) {
tSirTxPerTrackingParam tTxPerTrackingParam;
char *ptr;
if (18 < cmd_len)
{
ptr = (char*)(cmd + 18);
}else{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"CMD LENGTH %d is not correct",cmd_len);
kfree(cmd);
return -EINVAL;
}
if (4 != sscanf(ptr,"%hhu %hhu %hhu %u",
&(tTxPerTrackingParam.ucTxPerTrackingEnable),
&(tTxPerTrackingParam.ucTxPerTrackingPeriod),
&(tTxPerTrackingParam.ucTxPerTrackingRatio),
&(tTxPerTrackingParam.uTxPerTrackingWatermark)))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"CONFIG-TX-TRACKING %s input is not correct",ptr);
kfree(cmd);
return -EIO;
}
// parameters checking
// period has to be larger than 0
if (0 == tTxPerTrackingParam.ucTxPerTrackingPeriod)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "Period input is not correct");
kfree(cmd);
return -EIO;
}
// use default value 5 is the input is not reasonable. in unit of 10%
if ((tTxPerTrackingParam.ucTxPerTrackingRatio > TX_PER_TRACKING_MAX_RATIO) || (0 == tTxPerTrackingParam.ucTxPerTrackingRatio))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "Ratio input is not good. use default 5");
tTxPerTrackingParam.ucTxPerTrackingRatio = TX_PER_TRACKING_DEFAULT_RATIO;
}
// default is 5
if (0 == tTxPerTrackingParam.uTxPerTrackingWatermark)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "Tx Packet number input is not good. use default 5");
tTxPerTrackingParam.uTxPerTrackingWatermark = TX_PER_TRACKING_DEFAULT_WATERMARK;
}
if (eHAL_STATUS_SUCCESS !=
sme_SetTxPerTracking(pHddCtx->hHal,
hdd_tx_per_hit_cb,
(void*)pAdapter, &tTxPerTrackingParam)) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "Set Tx PER Tracking Failed!");
rc = -EIO;
}
}
else {
hddLog( VOS_TRACE_LEVEL_WARN, "%s: Unsupported GUI command %s",
__func__, cmd);
}
done:
/* many of the commands write information back into the command
string using snprintf(). check the return value here in one
place */
if ((ret < 0) || (ret >= cmd_len))
{
/* there was an encoding error or overflow */
rc = -EINVAL;
}
else if (ret > 0)
{
if (copy_to_user(wrqu->data.pointer, cmd, ret))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: failed to copy data to user buffer", __func__);
kfree(cmd);
return -EFAULT;
}
wrqu->data.length = ret;
}
if (ioctl_debug)
{
pr_info("%s: rsp [%s] len [%d] status %d\n",
__func__, cmd, wrqu->data.length, rc);
}
kfree(cmd);
return rc;
}
static int iw_set_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_priv(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int iw_set_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ENTER();
return 0;
}
static int iw_get_nick(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
ENTER();
return 0;
}
static struct iw_statistics *get_wireless_stats(struct net_device *dev)
{
ENTER();
return NULL;
}
static int __iw_set_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
struct iw_point *encoderq = &(wrqu->encoding);
v_U32_t keyId;
v_U8_t key_length;
eCsrEncryptionType encryptionType = eCSR_ENCRYPT_TYPE_NONE;
v_BOOL_t fKeyPresent = 0;
int i;
eHalStatus status = eHAL_STATUS_SUCCESS;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
keyId = encoderq->flags & IW_ENCODE_INDEX;
if(keyId)
{
if(keyId > MAX_WEP_KEYS)
{
return -EINVAL;
}
fKeyPresent = 1;
keyId--;
}
else
{
fKeyPresent = 0;
}
if(wrqu->data.flags & IW_ENCODE_DISABLED)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "****iwconfig wlan0 key off*****");
if(!fKeyPresent) {
for(i=0;i < CSR_MAX_NUM_KEY; i++) {
if(pWextState->roamProfile.Keys.KeyMaterial[i])
pWextState->roamProfile.Keys.KeyLength[i] = 0;
}
}
pHddStaCtx->conn_info.authType = eCSR_AUTH_TYPE_OPEN_SYSTEM;
pWextState->wpaVersion = IW_AUTH_WPA_VERSION_DISABLED;
pWextState->roamProfile.EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pWextState->roamProfile.mcEncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pHddStaCtx->conn_info.ucEncryptionType = eCSR_ENCRYPT_TYPE_NONE;
pHddStaCtx->conn_info.mcEncryptionType = eCSR_ENCRYPT_TYPE_NONE;
if(eConnectionState_Associated == pHddStaCtx->conn_info.connState)
{
INIT_COMPLETION(pAdapter->disconnect_comp_var);
status = sme_RoamDisconnect( WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId, eCSR_DISCONNECT_REASON_UNSPECIFIED );
if(eHAL_STATUS_SUCCESS == status)
{
long ret;
ret = wait_for_completion_interruptible_timeout(
&pAdapter->disconnect_comp_var,
msecs_to_jiffies(WLAN_WAIT_TIME_DISCONNECT));
if (ret <= 0)
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("failed wait on disconnect_comp_var %ld"), ret);
}
}
return status;
}
if (wrqu->data.flags & (IW_ENCODE_OPEN | IW_ENCODE_RESTRICTED))
{
hddLog(VOS_TRACE_LEVEL_INFO, "iwconfig wlan0 key on");
pHddStaCtx->conn_info.authType = (encoderq->flags & IW_ENCODE_RESTRICTED) ? eCSR_AUTH_TYPE_SHARED_KEY : eCSR_AUTH_TYPE_OPEN_SYSTEM;
}
if(wrqu->data.length > 0)
{
hddLog(VOS_TRACE_LEVEL_INFO, "%s : wrqu->data.length : %d",__func__,wrqu->data.length);
key_length = wrqu->data.length;
/* IW_ENCODING_TOKEN_MAX is the value that is set for wrqu->data.length by iwconfig.c when 'iwconfig wlan0 key on' is issued.*/
if(5 == key_length)
{
hddLog(VOS_TRACE_LEVEL_INFO, "%s: Call with WEP40,key_len=%d",__func__,key_length);
if((IW_AUTH_KEY_MGMT_802_1X == pWextState->authKeyMgmt) && (eCSR_AUTH_TYPE_OPEN_SYSTEM == pHddStaCtx->conn_info.authType))
{
encryptionType = eCSR_ENCRYPT_TYPE_WEP40;
}
else
{
encryptionType = eCSR_ENCRYPT_TYPE_WEP40_STATICKEY;
}
}
else if(13 == key_length)
{
hddLog(VOS_TRACE_LEVEL_INFO, "%s:Call with WEP104,key_len:%d",__func__,key_length);
if((IW_AUTH_KEY_MGMT_802_1X == pWextState->authKeyMgmt) && (eCSR_AUTH_TYPE_OPEN_SYSTEM == pHddStaCtx->conn_info.authType))
{
encryptionType = eCSR_ENCRYPT_TYPE_WEP104;
}
else
{
encryptionType = eCSR_ENCRYPT_TYPE_WEP104_STATICKEY;
}
}
else
{
hddLog(VOS_TRACE_LEVEL_WARN, "%s: Invalid WEP key length :%d",
__func__, key_length);
return -EINVAL;
}
pHddStaCtx->conn_info.ucEncryptionType = encryptionType;
pHddStaCtx->conn_info.mcEncryptionType = encryptionType;
pWextState->roamProfile.EncryptionType.numEntries = 1;
pWextState->roamProfile.EncryptionType.encryptionType[0] = encryptionType;
pWextState->roamProfile.mcEncryptionType.numEntries = 1;
pWextState->roamProfile.mcEncryptionType.encryptionType[0] = encryptionType;
if((eConnectionState_NotConnected == pHddStaCtx->conn_info.connState) &&
((eCSR_AUTH_TYPE_OPEN_SYSTEM == pHddStaCtx->conn_info.authType) ||
(eCSR_AUTH_TYPE_SHARED_KEY == pHddStaCtx->conn_info.authType)))
{
vos_mem_copy(&pWextState->roamProfile.Keys.KeyMaterial[keyId][0],extra,key_length);
pWextState->roamProfile.Keys.KeyLength[keyId] = (v_U8_t)key_length;
pWextState->roamProfile.Keys.defaultIndex = (v_U8_t)keyId;
return status;
}
}
return 0;
}
static int iw_set_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_encode(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq,
char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
tCsrRoamProfile *pRoamProfile = &(pWextState->roamProfile);
int keyId;
eCsrEncryptionType encryptionType = eCSR_ENCRYPT_TYPE_NONE;
eCsrAuthType authType = eCSR_AUTH_TYPE_NONE;
int i;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
keyId = pRoamProfile->Keys.defaultIndex;
if(keyId < 0 || keyId >= MAX_WEP_KEYS)
{
hddLog(LOG1,"%s: Invalid keyId : %d",__func__,keyId);
return -EINVAL;
}
if(pRoamProfile->Keys.KeyLength[keyId] > 0)
{
dwrq->flags |= IW_ENCODE_ENABLED;
dwrq->length = pRoamProfile->Keys.KeyLength[keyId];
vos_mem_copy(extra, &(pRoamProfile->Keys.KeyMaterial[keyId][0]),
pRoamProfile->Keys.KeyLength[keyId]);
}
else
{
dwrq->flags |= IW_ENCODE_DISABLED;
}
for(i=0; i < MAX_WEP_KEYS; i++)
{
if(pRoamProfile->Keys.KeyMaterial[i] == NULL)
{
continue;
}
else
{
break;
}
}
if(MAX_WEP_KEYS == i)
{
dwrq->flags |= IW_ENCODE_NOKEY;
}
else
{
dwrq->flags |= IW_ENCODE_ENABLED;
}
encryptionType = pRoamProfile->EncryptionType.encryptionType[0];
if(eCSR_ENCRYPT_TYPE_NONE == encryptionType)
{
dwrq->flags |= IW_ENCODE_DISABLED;
}
authType = (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.authType;
if(IW_AUTH_ALG_OPEN_SYSTEM == authType)
{
dwrq->flags |= IW_ENCODE_OPEN;
}
else
{
dwrq->flags |= IW_ENCODE_RESTRICTED;
}
EXIT();
return 0;
}
static int iw_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
struct iw_point *dwrq,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_encodeext(dev, info, dwrq, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
eHalStatus halStatus= eHAL_STATUS_SUCCESS;
tCsrRoamProfile *pRoamProfile = &pWextState->roamProfile;
v_U32_t status = 0;
struct iw_encode_ext *ext = (struct iw_encode_ext*)extra;
v_U8_t groupmacaddr[WNI_CFG_BSSID_LEN] = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
int key_index;
struct iw_point *encoding = &wrqu->encoding;
tCsrRoamSetKey setKey;
v_U32_t roamId= 0xFF;
VOS_STATUS vos_status;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
key_index = encoding->flags & IW_ENCODE_INDEX;
if(key_index > 0) {
/*Convert from 1-based to 0-based keying*/
key_index--;
}
if(!ext->key_len) {
/*Set the encrytion type to NONE*/
pRoamProfile->EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
return status;
}
if(eConnectionState_NotConnected == pHddStaCtx->conn_info.connState &&
(IW_ENCODE_ALG_WEP == ext->alg))
{
if(IW_AUTH_KEY_MGMT_802_1X == pWextState->authKeyMgmt) {
VOS_TRACE (VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
("Invalid Configuration:%s"),__func__);
return -EINVAL;
}
else {
/*Static wep, update the roam profile with the keys */
if(ext->key && (ext->key_len <= eCSR_SECURITY_WEP_KEYSIZE_MAX_BYTES) &&
key_index < CSR_MAX_NUM_KEY) {
vos_mem_copy(&pRoamProfile->Keys.KeyMaterial[key_index][0],ext->key,ext->key_len);
pRoamProfile->Keys.KeyLength[key_index] = (v_U8_t)ext->key_len;
if(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY)
pRoamProfile->Keys.defaultIndex = (v_U8_t)key_index;
}
}
return status;
}
vos_mem_zero(&setKey,sizeof(tCsrRoamSetKey));
setKey.keyId = key_index;
setKey.keyLength = ext->key_len;
if(ext->key_len <= CSR_MAX_KEY_LEN) {
vos_mem_copy(&setKey.Key[0],ext->key,ext->key_len);
}
if(ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY) {
/*Key direction for group is RX only*/
setKey.keyDirection = eSIR_RX_ONLY;
vos_mem_copy(setKey.peerMac,groupmacaddr,WNI_CFG_BSSID_LEN);
}
else {
setKey.keyDirection = eSIR_TX_RX;
vos_mem_copy(setKey.peerMac,ext->addr.sa_data,WNI_CFG_BSSID_LEN);
}
/*For supplicant pae role is zero*/
setKey.paeRole = 0;
switch(ext->alg)
{
case IW_ENCODE_ALG_NONE:
setKey.encType = eCSR_ENCRYPT_TYPE_NONE;
break;
case IW_ENCODE_ALG_WEP:
setKey.encType = (ext->key_len== 5) ? eCSR_ENCRYPT_TYPE_WEP40:eCSR_ENCRYPT_TYPE_WEP104;
break;
case IW_ENCODE_ALG_TKIP:
{
v_U8_t *pKey = &setKey.Key[0];
setKey.encType = eCSR_ENCRYPT_TYPE_TKIP;
vos_mem_zero(pKey, CSR_MAX_KEY_LEN);
/*Supplicant sends the 32bytes key in this order
|--------------|----------|----------|
| Tk1 |TX-MIC | RX Mic |
|--------------|----------|----------|
<---16bytes---><--8bytes--><--8bytes-->
*/
/*Sme expects the 32 bytes key to be in the below order
|--------------|----------|----------|
| Tk1 |RX-MIC | TX Mic |
|--------------|----------|----------|
<---16bytes---><--8bytes--><--8bytes-->
*/
/* Copy the Temporal Key 1 (TK1) */
vos_mem_copy(pKey,ext->key,16);
/*Copy the rx mic first*/
vos_mem_copy(&pKey[16],&ext->key[24],8);
/*Copy the tx mic */
vos_mem_copy(&pKey[24],&ext->key[16],8);
}
break;
case IW_ENCODE_ALG_CCMP:
setKey.encType = eCSR_ENCRYPT_TYPE_AES;
break;
#ifdef FEATURE_WLAN_ESE
#define IW_ENCODE_ALG_KRK 6
case IW_ENCODE_ALG_KRK:
setKey.encType = eCSR_ENCRYPT_TYPE_KRK;
break;
#endif /* FEATURE_WLAN_ESE */
default:
setKey.encType = eCSR_ENCRYPT_TYPE_NONE;
break;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
("%s:cipher_alg:%d key_len[%d] *pEncryptionType :%d"),__func__,(int)ext->alg,(int)ext->key_len,setKey.encType);
#ifdef WLAN_FEATURE_VOWIFI_11R
/* The supplicant may attempt to set the PTK once pre-authentication
is done. Save the key in the UMAC and include it in the ADD
BSS request */
halStatus = sme_FTUpdateKey( WLAN_HDD_GET_HAL_CTX(pAdapter), &setKey);
if ( halStatus == eHAL_STATUS_FT_PREAUTH_KEY_SUCCESS )
{
hddLog(VOS_TRACE_LEVEL_INFO_MED,
"%s: Update PreAuth Key success", __func__);
return 0;
}
else if ( halStatus == eHAL_STATUS_FT_PREAUTH_KEY_FAILED )
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Update PreAuth Key failed", __func__);
return -EINVAL;
}
#endif /* WLAN_FEATURE_VOWIFI_11R */
pHddStaCtx->roam_info.roamingState = HDD_ROAM_STATE_SETTING_KEY;
vos_status = wlan_hdd_check_ula_done(pAdapter);
if ( vos_status != VOS_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"[%4d] wlan_hdd_check_ula_done returned ERROR status= %d",
__LINE__, vos_status );
pHddStaCtx->roam_info.roamingState = HDD_ROAM_STATE_NONE;
}
halStatus = sme_RoamSetKey( WLAN_HDD_GET_HAL_CTX(pAdapter),pAdapter->sessionId, &setKey, &roamId );
if ( halStatus != eHAL_STATUS_SUCCESS )
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"[%4d] sme_RoamSetKey returned ERROR status= %d",
__LINE__, halStatus );
pHddStaCtx->roam_info.roamingState = HDD_ROAM_STATE_NONE;
}
return halStatus;
}
static int iw_set_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_encodeext(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if(wrqu->retry.value < WNI_CFG_LONG_RETRY_LIMIT_STAMIN ||
wrqu->retry.value > WNI_CFG_LONG_RETRY_LIMIT_STAMAX) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("Invalid Retry-Limit=%d!!"),wrqu->retry.value);
return -EINVAL;
}
if(wrqu->retry.flags & IW_RETRY_LIMIT) {
if((wrqu->retry.flags & IW_RETRY_LONG))
{
if ( ccmCfgSetInt(hHal, WNI_CFG_LONG_RETRY_LIMIT, wrqu->retry.value, ccmCfgSetCallback, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_LONG_RETRY_LIMIT"));
return -EIO;
}
}
else if((wrqu->retry.flags & IW_RETRY_SHORT))
{
if ( ccmCfgSetInt(hHal, WNI_CFG_SHORT_RETRY_LIMIT, wrqu->retry.value, ccmCfgSetCallback, eANI_BOOLEAN_TRUE) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_SHORT_RETRY_LIMIT"));
return -EIO;
}
}
}
else
{
return -EOPNOTSUPP;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("Set Retry-Limit=%d!!"),wrqu->retry.value);
EXIT();
return 0;
}
static int iw_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_retry(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
v_U32_t retry = 0;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if((wrqu->retry.flags & IW_RETRY_LONG))
{
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
if ( ccmCfgGetInt(hHal, WNI_CFG_LONG_RETRY_LIMIT, &retry) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_LONG_RETRY_LIMIT"));
return -EIO;
}
wrqu->retry.value = retry;
}
else if ((wrqu->retry.flags & IW_RETRY_SHORT))
{
wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_SHORT;
if ( ccmCfgGetInt(hHal, WNI_CFG_SHORT_RETRY_LIMIT, &retry) != eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_SHORT_RETRY_LIMIT"));
return -EIO;
}
wrqu->retry.value = retry;
}
else {
return -EOPNOTSUPP;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("Retry-Limit=%d!!"),retry);
EXIT();
return 0;
}
static int iw_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_retry(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
struct iw_mlme *mlme = (struct iw_mlme *)extra;
eHalStatus status = eHAL_STATUS_SUCCESS;
ENTER();
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!",__func__);
return 0;
}
//reason_code is unused. By default it is set to eCSR_DISCONNECT_REASON_UNSPECIFIED
switch (mlme->cmd) {
case IW_MLME_DISASSOC:
case IW_MLME_DEAUTH:
if( pHddStaCtx->conn_info.connState == eConnectionState_Associated )
{
eCsrRoamDisconnectReason reason = eCSR_DISCONNECT_REASON_UNSPECIFIED;
if( mlme->reason_code == HDD_REASON_MICHAEL_MIC_FAILURE )
reason = eCSR_DISCONNECT_REASON_MIC_ERROR;
INIT_COMPLETION(pAdapter->disconnect_comp_var);
status = sme_RoamDisconnect( WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId,reason);
if(eHAL_STATUS_SUCCESS == status)
{
long ret;
ret = wait_for_completion_interruptible_timeout(
&pAdapter->disconnect_comp_var,
msecs_to_jiffies(WLAN_WAIT_TIME_DISCONNECT));
if (ret <= 0)
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("failed wait on disconnect_comp_var %ld"), ret);
}
else
hddLog(LOGE,"%s %d Command Disassociate/Deauthenticate : csrRoamDisconnect failure returned %d",
__func__, (int)mlme->cmd, (int)status );
/* Resetting authKeyMgmt */
(WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter))->authKeyMgmt = 0;
netif_tx_disable(dev);
netif_carrier_off(dev);
}
else
{
hddLog(LOGE,"%s %d Command Disassociate/Deauthenticate called but station is not in associated state", __func__, (int)mlme->cmd );
}
break;
default:
hddLog(LOGE,"%s %d Command should be Disassociate/Deauthenticate", __func__, (int)mlme->cmd );
return -EINVAL;
}//end of switch
EXIT();
return status;
}
static int iw_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_mlme(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* set param sub-ioctls */
static int __iw_setint_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
hdd_wext_state_t *pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
int *value = (int *)extra;
int sub_cmd = value[0];
int set_value = value[1];
int ret = 0; /* success */
int enable_pbm, enable_mp;
#ifdef CONFIG_HAS_EARLYSUSPEND
v_U8_t nEnableSuspendOld;
#endif
INIT_COMPLETION(pWextState->completion_var);
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
switch(sub_cmd)
{
case WE_SET_11D_STATE:
{
tSmeConfigParams smeConfig;
memset(&smeConfig, 0x00, sizeof(smeConfig));
if((ENABLE_11D == set_value) || (DISABLE_11D == set_value)) {
sme_GetConfigParam(hHal,&smeConfig);
smeConfig.csrConfig.Is11dSupportEnabled = (v_BOOL_t)set_value;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("11D state=%d!!"),smeConfig.csrConfig.Is11dSupportEnabled);
sme_UpdateConfig(hHal,&smeConfig);
}
else {
return -EINVAL;
}
break;
}
case WE_WOWL:
{
switch (set_value)
{
case 0x00:
hdd_exit_wowl(pAdapter, eWOWL_EXIT_USER);
break;
case 0x01:
case 0x02:
case 0x03:
enable_mp = (set_value & 0x01) ? 1 : 0;
enable_pbm = (set_value & 0x02) ? 1 : 0;
hddLog(LOGE, "magic packet ? = %s pattern byte matching ? = %s",
(enable_mp ? "YES":"NO"), (enable_pbm ? "YES":"NO"));
hdd_enter_wowl(pAdapter, enable_mp, enable_pbm);
break;
default:
hddLog(LOGE, "Invalid arg %d in WE_WOWL IOCTL", set_value);
ret = -EINVAL;
break;
}
break;
}
case WE_SET_POWER:
{
switch (set_value)
{
case 0: //Full Power
{
struct statsContext context;
eHalStatus status;
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = POWER_CONTEXT_MAGIC;
status = sme_RequestFullPower(WLAN_HDD_GET_HAL_CTX(pAdapter),
iw_power_callback_fn, &context,
eSME_FULL_PWR_NEEDED_BY_HDD);
if (eHAL_STATUS_PMC_PENDING == status)
{
int lrc = wait_for_completion_interruptible_timeout(
&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_POWER));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME %s while requesting fullpower",
__func__, (0 == lrc) ?
"timeout" : "interrupt");
}
}
/* either we have a response or we timed out. if we timed
out there is a race condition such that the callback
function could be executing at the same time we are. of
primary concern is if the callback function had already
verified the "magic" but had not yet set the completion
variable when a timeout occurred. we serialize these
activities by invalidating the magic while holding a
shared spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
hddLog(LOGE, "iwpriv Full Power completed");
break;
}
case 1: //Enable BMPS
sme_EnablePowerSave(hHal, ePMC_BEACON_MODE_POWER_SAVE);
break;
case 2: //Disable BMPS
sme_DisablePowerSave(hHal, ePMC_BEACON_MODE_POWER_SAVE);
break;
case 3: //Request Bmps
{
struct statsContext context;
eHalStatus status;
init_completion(&context.completion);
context.pAdapter = pAdapter;
context.magic = POWER_CONTEXT_MAGIC;
status = sme_RequestBmps(WLAN_HDD_GET_HAL_CTX(pAdapter),
iw_power_callback_fn, &context);
if (eHAL_STATUS_PMC_PENDING == status)
{
int lrc = wait_for_completion_interruptible_timeout(
&context.completion,
msecs_to_jiffies(WLAN_WAIT_TIME_POWER));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME %s while requesting BMPS",
__func__, (0 == lrc) ? "timeout" :
"interrupt");
}
}
/* either we have a response or we timed out. if we
timed out there is a race condition such that the
callback function could be executing at the same
time we are. of primary concern is if the callback
function had already verified the "magic" but had
not yet set the completion variable when a timeout
occurred. we serialize these activities by
invalidating the magic while holding a shared
spinlock which will cause us to block if the
callback is currently executing */
spin_lock(&hdd_context_lock);
context.magic = 0;
spin_unlock(&hdd_context_lock);
hddLog(LOGE, "iwpriv Request BMPS completed");
break;
}
case 4: //Enable IMPS
sme_EnablePowerSave(hHal, ePMC_IDLE_MODE_POWER_SAVE);
break;
case 5: //Disable IMPS
sme_DisablePowerSave(hHal, ePMC_IDLE_MODE_POWER_SAVE);
break;
case 6: //Enable Standby
sme_EnablePowerSave(hHal, ePMC_STANDBY_MODE_POWER_SAVE);
break;
case 7: //Disable Standby
sme_DisablePowerSave(hHal, ePMC_STANDBY_MODE_POWER_SAVE);
break;
case 8: //Request Standby
#ifdef CONFIG_HAS_EARLYSUSPEND
#endif
break;
case 9: //Start Auto Bmps Timer
sme_StartAutoBmpsTimer(hHal);
break;
case 10://Stop Auto BMPS Timer
sme_StopAutoBmpsTimer(hHal);
break;
#ifdef CONFIG_HAS_EARLYSUSPEND
case 11://suspend to standby
#ifdef CONFIG_HAS_EARLYSUSPEND
nEnableSuspendOld = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend;
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend = 1;
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend = nEnableSuspendOld;
#endif
break;
case 12://suspend to deep sleep
#ifdef CONFIG_HAS_EARLYSUSPEND
nEnableSuspendOld = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend;
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend = 2;
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->nEnableSuspend = nEnableSuspendOld;
#endif
break;
case 13://resume from suspend
#ifdef CONFIG_HAS_EARLYSUSPEND
#endif
break;
#endif
default:
hddLog(LOGE, "Invalid arg %d in WE_SET_POWER IOCTL", set_value);
ret = -EINVAL;
break;
}
break;
}
case WE_SET_MAX_ASSOC:
{
if ((WNI_CFG_ASSOC_STA_LIMIT_STAMIN > set_value) ||
(WNI_CFG_ASSOC_STA_LIMIT_STAMAX < set_value))
{
ret = -EINVAL;
}
else if ( ccmCfgSetInt(hHal, WNI_CFG_ASSOC_STA_LIMIT,
set_value, NULL, eANI_BOOLEAN_FALSE)
!= eHAL_STATUS_SUCCESS )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("failed to set ini parameter, WNI_CFG_ASSOC_STA_LIMIT"));
ret = -EIO;
}
break;
}
case WE_SET_SAP_AUTO_CHANNEL_SELECTION:
{
if( 0 == set_value )
{
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->apAutoChannelSelection = 0;
}
else if ( 1 == set_value )
{
(WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->apAutoChannelSelection = 1;
}
else
{
hddLog(LOGE, "Invalid arg %d in WE_SET_SAP_AUTO_CHANNEL_SELECTION IOCTL", set_value);
ret = -EINVAL;
}
break;
}
case WE_SET_DATA_INACTIVITY_TO:
{
if ((set_value < CFG_DATA_INACTIVITY_TIMEOUT_MIN) ||
(set_value > CFG_DATA_INACTIVITY_TIMEOUT_MAX) ||
(ccmCfgSetInt((WLAN_HDD_GET_CTX(pAdapter))->hHal,
WNI_CFG_PS_DATA_INACTIVITY_TIMEOUT,
set_value,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE))
{
hddLog(LOGE,"Failure: Could not pass on "
"WNI_CFG_PS_DATA_INACTIVITY_TIMEOUT configuration info "
"to CCM");
ret = -EINVAL;
}
break;
}
case WE_SET_MAX_TX_POWER:
{
tSirMacAddr bssid = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
tSirMacAddr selfMac = {0xFF,0xFF,0xFF,0xFF,0xFF,0xFF};
hddLog(VOS_TRACE_LEVEL_INFO, "%s: Setting maximum tx power %d dBm",
__func__, set_value);
if( sme_SetMaxTxPower(hHal, bssid, selfMac, set_value) !=
eHAL_STATUS_SUCCESS )
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Setting maximum tx power failed",
__func__);
return -EIO;
}
break;
}
case WE_SET_MAX_TX_POWER_2_4:
{
hddLog(VOS_TRACE_LEVEL_INFO,
"%s: Setting maximum tx power %d dBm for 2.4 GHz band",
__func__, set_value);
if (sme_SetMaxTxPowerPerBand(eCSR_BAND_24, set_value) !=
eHAL_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Setting maximum tx power failed for 2.4 GHz band",
__func__);
return -EIO;
}
break;
}
case WE_SET_MAX_TX_POWER_5_0:
{
hddLog(VOS_TRACE_LEVEL_INFO,
"%s: Setting maximum tx power %d dBm for 5.0 GHz band",
__func__, set_value);
if (sme_SetMaxTxPowerPerBand(eCSR_BAND_5G, set_value) !=
eHAL_STATUS_SUCCESS)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Setting maximum tx power failed for 5.0 GHz band",
__func__);
return -EIO;
}
break;
}
case WE_SET_HIGHER_DTIM_TRANSITION:
{
if(!((set_value == eANI_BOOLEAN_FALSE) ||
(set_value == eANI_BOOLEAN_TRUE)))
{
hddLog(LOGE, "Dynamic DTIM Incorrect data:%d", set_value);
ret = -EINVAL;
}
else
{
if(pAdapter->higherDtimTransition != set_value)
{
pAdapter->higherDtimTransition = set_value;
hddLog(LOG1, "%s: higherDtimTransition set to :%d", __func__, pAdapter->higherDtimTransition);
}
}
break;
}
case WE_SET_TM_LEVEL:
{
hdd_context_t *hddCtxt = WLAN_HDD_GET_CTX(pAdapter);
hddLog(VOS_TRACE_LEVEL_INFO, "Set Thermal Mitigation Level %d", (int)set_value);
hddDevTmLevelChangedHandler(hddCtxt->parent_dev, set_value);
break;
}
case WE_ENABLE_STRICT_FCC_REG:
{
hdd_context_t *hddCtxt = WLAN_HDD_GET_CTX(pAdapter);
struct wiphy *wiphy = NULL;
long lrc;
int status;
wiphy = hddCtxt->wiphy;
if(wiphy == NULL)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: wiphy is NULL ", __func__);
break;
}
init_completion(&hddCtxt->wiphy_channel_update_event);
hddCtxt->nEnableStrictRegulatoryForFCC = set_value;
status = regulatory_hint(wiphy, "00");
if(status < 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: Failure in setting regulatory rule ", __func__);
break;
}
/* Wait for completion */
lrc = wait_for_completion_interruptible_timeout(&hddCtxt->wiphy_channel_update_event,
msecs_to_jiffies(WLAN_WAIT_TIME_CHANNEL_UPDATE));
if (lrc <= 0)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: SME %s while setting strict FCC regulatory rule ",
__func__, (0 == lrc) ? "Timeout" : "Interrupt");
return (0 == lrc) ? -ETIMEDOUT : -EINTR;
}
hddLog(VOS_TRACE_LEVEL_INFO,"%s: SUCCESS in setting strict FCC regulatory rule", __func__);
break;
}
case WE_SET_DEBUG_LOG:
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
pHddCtx->cfg_ini->gEnableDebugLog = set_value;
sme_UpdateConnectDebug(pHddCtx->hHal, set_value);
break;
}
#ifdef FEATURE_WLAN_TDLS
case WE_SET_TDLS_OFF_CHAN:
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Tdls offchannel num %d",
__func__, set_value);
ret = iw_set_tdlsoffchannel(pHddCtx, set_value);
break;
}
case WE_SET_TDLS_SEC_OFF_CHAN_OFFSET:
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Tdls sec offchan offset %d",
__func__, set_value);
ret = iw_set_tdlssecoffchanneloffset(pHddCtx, set_value);
break;
}
case WE_SET_TDLS_OFF_CHAN_MODE:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: Tdls offchannel mode %d",
__func__, set_value);
ret = iw_set_tdlsoffchannelmode(pAdapter, set_value);
break;
}
#endif
case WE_SET_SCAN_BAND_PREFERENCE:
{
tSmeConfigParams smeConfig;
memset(&smeConfig, 0x00, sizeof(smeConfig));
if(pAdapter->device_mode != WLAN_HDD_INFRA_STATION) {
ret = -EINVAL;
break;
}
hddLog(LOG1, "WE_SET_BAND_PREFERRENCE val %d ", set_value);
if (eCSR_BAND_ALL == set_value ||
eCSR_BAND_24 == set_value || eCSR_BAND_5G == set_value) {
sme_GetConfigParam(hHal, &smeConfig);
smeConfig.csrConfig.scanBandPreference = set_value;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"set band scan preference = %d\n",
smeConfig.csrConfig.scanBandPreference);
sme_UpdateConfig(hHal, &smeConfig);
}
else {
ret = -EINVAL;
}
break;
}
/* The WE_SET_MIRACAST_VENDOR_CONFIG IOCTL should be set before the
* connection happens so that the params can take effect during
* association. Also this should not be used in STA+p2p concurrency
* as the param will also effect the STA mode.
*/
case WE_SET_MIRACAST_VENDOR_CONFIG:
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
hddLog(LOG1, FL(
"Set Miracast vendor tuning %d"), set_value);
if (1 == set_value || 0 == set_value)
{
if (eHAL_STATUS_SUCCESS != sme_SetMiracastVendorConfig(pHddCtx->hHal,
pHddCtx->cfg_ini->numBuffAdvert, set_value))
{
hddLog( LOGE, FL("set vendor miracast config failed"));
ret = -EIO;
}
}
else
{
hddLog(LOGE,
FL("Invalid value %d in WE_SET_MIRACAST_VENDOR_CONFIG IOCTL"), set_value);
ret = -EINVAL;
}
break;
}
default:
{
hddLog(LOGE, "Invalid IOCTL setvalue command %d value %d",
sub_cmd, set_value);
break;
}
}
return ret;
}
static int iw_setint_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_setint_getnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return 0;
}
/* set param sub-ioctls */
static int __iw_setchar_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
VOS_STATUS vstatus;
int sub_cmd;
int ret = 0; /* success */
char *pBuffer = NULL;
hdd_adapter_t *pAdapter = (netdev_priv(dev));
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
#ifdef WLAN_FEATURE_VOWIFI
hdd_config_t *pConfig = pHddCtx->cfg_ini;
#endif /* WLAN_FEATURE_VOWIFI */
struct iw_point s_priv_data;
tSirpkt80211 *pkt;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
/* helper function to get iwreq_data with compat handling. */
if (hdd_priv_get_data(&s_priv_data, wrqu))
{
return -EINVAL;
}
/* make sure all params are correctly passed to function */
if ((NULL == s_priv_data.pointer) || (0 == s_priv_data.length))
{
return -EINVAL;
}
sub_cmd = s_priv_data.flags;
/* ODD number is used for set, copy data using copy_from_user */
pBuffer = mem_alloc_copy_from_user_helper(s_priv_data.pointer,
s_priv_data.length);
if (NULL == pBuffer)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"mem_alloc_copy_from_user_helper fail");
return -ENOMEM;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Received length %d", __func__, s_priv_data.length);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Received data %s", __func__, pBuffer);
switch(sub_cmd)
{
case WE_WOWL_ADD_PTRN:
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "ADD_PTRN");
hdd_add_wowl_ptrn(pAdapter, pBuffer);
break;
case WE_WOWL_DEL_PTRN:
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "DEL_PTRN");
hdd_del_wowl_ptrn(pAdapter, pBuffer);
break;
#if defined WLAN_FEATURE_VOWIFI
case WE_NEIGHBOR_REPORT_REQUEST:
{
tRrmNeighborReq neighborReq;
tRrmNeighborRspCallbackInfo callbackInfo;
if (pConfig->fRrmEnable)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "Neighbor Request");
neighborReq.no_ssid = (s_priv_data.length - 1) ? false : true ;
if( !neighborReq.no_ssid )
{
neighborReq.ssid.length = (s_priv_data.length - 1) > 32 ? 32 : (s_priv_data.length - 1) ;
vos_mem_copy( neighborReq.ssid.ssId, pBuffer, neighborReq.ssid.length );
}
callbackInfo.neighborRspCallback = NULL;
callbackInfo.neighborRspCallbackContext = NULL;
callbackInfo.timeout = 5000; //5 seconds
sme_NeighborReportRequest( WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId, &neighborReq, &callbackInfo );
}
else
{
hddLog(LOGE, "%s: Ignoring neighbor request as RRM is not enabled", __func__);
ret = -EINVAL;
}
}
break;
#endif
case WE_SET_AP_WPS_IE:
hddLog( LOGE, "Received WE_SET_AP_WPS_IE" );
sme_updateP2pIe( WLAN_HDD_GET_HAL_CTX(pAdapter), pBuffer, s_priv_data.length );
break;
case WE_SET_CONFIG:
vstatus = hdd_execute_config_command(pHddCtx, pBuffer);
if (VOS_STATUS_SUCCESS != vstatus)
{
ret = -EINVAL;
}
break;
case WE_SET_ENCRYPT_MSG:
pkt = vos_mem_malloc(sizeof(tSirpkt80211));
if (NULL == pkt)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: vos_mem_alloc failed", __func__);
return -ENOMEM;
}
memset(pkt, 0, sizeof(tSirpkt80211));
if (FALSE == sme_IsFeatureSupportedByFW(DISA)) {
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("Firmware is not DISA capable"));
ret = -EINVAL;
vos_mem_free(pkt);
break;
}
parse_Bufferforpkt(pkt, pBuffer, wrqu->data.length);
ret = sme_Encryptmsgsend(pHddCtx->hHal, (u8 *)pkt,
sizeof(tSirpkt80211), hdd_encrypt_msg_cb);
if (eHAL_STATUS_SUCCESS != ret) {
hddLog(VOS_TRACE_LEVEL_ERROR,
FL("SENDEncryptMSG: fail to post WDA cmd"));
ret = -EINVAL;
}
vos_mem_free(pkt);
break;
default:
{
hddLog(LOGE, "%s: Invalid sub command %d",__func__, sub_cmd);
ret = -EINVAL;
break;
}
}
kfree(pBuffer);
return ret;
}
static int iw_setchar_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_setchar_getnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* get param sub-ioctls */
static int __iw_setnone_getint(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
int *value = (int *)extra;
int ret = 0; /* success */
tSmeConfigParams smeConfig;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
if (__ratelimit(&hdd_ioctl_timeout_rs)) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
}
return -EBUSY;
}
switch (value[0])
{
case WE_GET_11D_STATE:
{
tSmeConfigParams smeConfig;
sme_GetConfigParam(hHal,&smeConfig);
*value = smeConfig.csrConfig.Is11dSupportEnabled;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("11D state=%d!!"),*value);
break;
}
case WE_IBSS_STATUS:
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "****Return IBSS Status*****");
break;
case WE_PMC_STATE:
{
*value = pmcGetPmcState(hHal);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("PMC state=%d!!"),*value);
break;
}
case WE_GET_WLAN_DBG:
{
vos_trace_display();
*value = 0;
break;
}
case WE_GET_MAX_ASSOC:
{
if (ccmCfgGetInt(hHal, WNI_CFG_ASSOC_STA_LIMIT, (tANI_U32 *)value) != eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("failed to get ini parameter, WNI_CFG_ASSOC_STA_LIMIT"));
ret = -EIO;
}
#ifdef WLAN_SOFTAP_VSTA_FEATURE
if (pHddCtx->cfg_ini->fEnableVSTASupport)
{
if (*value > VSTA_NUM_ASSOC_STA)
{
*value = VSTA_NUM_ASSOC_STA;
}
if ((pHddCtx->hddAdapters.count > VSTA_NUM_RESV_SELFSTA) &&
(*value > (VSTA_NUM_ASSOC_STA -
(pHddCtx->hddAdapters.count - VSTA_NUM_RESV_SELFSTA))))
{
*value = (VSTA_NUM_ASSOC_STA -
(pHddCtx->hddAdapters.count - VSTA_NUM_RESV_SELFSTA));
}
}
else
#endif
{
if (*value > NUM_ASSOC_STA)
{
*value = NUM_ASSOC_STA;
}
if ((pHddCtx->hddAdapters.count > NUM_RESV_SELFSTA) &&
(*value > (NUM_ASSOC_STA -
(pHddCtx->hddAdapters.count - NUM_RESV_SELFSTA))))
{
*value = (NUM_ASSOC_STA -
(pHddCtx->hddAdapters.count - NUM_RESV_SELFSTA));
}
}
break;
}
case WE_GET_WDI_DBG:
{
wpalTraceDisplay();
*value = 0;
break;
}
case WE_GET_SAP_AUTO_CHANNEL_SELECTION:
{
*value = (WLAN_HDD_GET_CTX(pAdapter))->cfg_ini->apAutoChannelSelection;
break;
}
case WE_GET_CONCURRENCY_MODE:
{
*value = hdd_get_concurrency_mode ( );
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, ("concurrency mode=%d"),*value);
break;
}
case WE_GET_SCAN_BAND_PREFERENCE:
{
sme_GetConfigParam(hHal, &smeConfig);
*value = smeConfig.csrConfig.scanBandPreference;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"scanBandPreference = %d\n", *value);
break;
}
default:
{
hddLog(LOGE, "Invalid IOCTL get_value command %d ",value[0]);
break;
}
}
return ret;
}
static int iw_setnone_getint(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_setnone_getint(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* set param sub-ioctls */
int __iw_set_three_ints_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int *value = (int *)extra;
int sub_cmd = value[0];
int ret = 0;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
switch(sub_cmd)
{
case WE_SET_WLAN_DBG:
{
vos_trace_setValue( value[1], value[2], value[3]);
break;
}
case WE_SET_WDI_DBG:
{
wpalTraceSetLevel( value[1], value[2], value[3]);
break;
}
case WE_SET_SAP_CHANNELS:
{
ret = iw_softap_set_channel_range( dev, value[1], value[2], value[3]);
break;
}
default:
{
hddLog(LOGE, "%s: Invalid IOCTL command %d", __func__, sub_cmd );
break;
}
}
return ret;
}
int iw_set_three_ints_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_three_ints_getnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_char_setnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int sub_cmd = wrqu->data.flags;
#ifdef WLAN_FEATURE_11W
hdd_wext_state_t *pWextState;
#endif
if (pAdapter == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: pAdapter is NULL!", __func__);
return -EINVAL;
}
#ifdef WLAN_FEATURE_11W
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
#endif
if (NULL == WLAN_HDD_GET_CTX(pAdapter))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: HDD Context is NULL!", __func__);
return -EINVAL;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
switch(sub_cmd)
{
case WE_WLAN_VERSION:
{
hdd_wlan_get_version(pAdapter, wrqu, extra);
break;
}
case WE_GET_STATS:
{
tHalHandle hHal = NULL;
tpAniSirGlobal pMac = NULL;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
hdd_tx_rx_stats_t *pStats = &pAdapter->hdd_stats.hddTxRxStats;
hdd_chip_reset_stats_t *pResetStats = &pHddCtx->hddChipResetStats;
snprintf(extra, WE_MAX_STR_LEN,
"\nTransmit"
"\ncalled %u, dropped %u, backpressured %u, queued %u"
"\n dropped BK %u, BE %u, VI %u, VO %u"
"\n classified BK %u, BE %u, VI %u, VO %u"
"\nbackpressured BK %u, BE %u, VI %u, VO %u"
"\n queued BK %u, BE %u, VI %u, VO %u"
"\nfetched %u, empty %u, lowres %u, deqerr %u"
"\ndequeued %u, depressured %u, deque-depressured %u, completed %u, flushed %u"
"\n fetched BK %u, BE %u, VI %u, VO %u"
"\n dequeued BK %u, BE %u, VI %u, VO %u"
"\n depressured BK %u, BE %u, VI %u, VO %u"
"\nDeque depressured BK %u, BE %u, VI %u, VO %u"
"\n flushed BK %u, BE %u, VI %u, VO %u"
"\n\nReceive"
"\nchains %u, packets %u, dropped %u, delivered %u, refused %u"
"\n\nResetsStats"
"\n TotalLogp %u Cmd53 %u MutexRead %u MIF-Error %u FW-Heartbeat %u Others %u"
"\n",
pStats->txXmitCalled,
pStats->txXmitDropped,
pStats->txXmitBackPressured,
pStats->txXmitQueued,
pStats->txXmitDroppedAC[WLANTL_AC_BK],
pStats->txXmitDroppedAC[WLANTL_AC_BE],
pStats->txXmitDroppedAC[WLANTL_AC_VI],
pStats->txXmitDroppedAC[WLANTL_AC_VO],
pStats->txXmitClassifiedAC[WLANTL_AC_BK],
pStats->txXmitClassifiedAC[WLANTL_AC_BE],
pStats->txXmitClassifiedAC[WLANTL_AC_VI],
pStats->txXmitClassifiedAC[WLANTL_AC_VO],
pStats->txXmitBackPressuredAC[WLANTL_AC_BK],
pStats->txXmitBackPressuredAC[WLANTL_AC_BE],
pStats->txXmitBackPressuredAC[WLANTL_AC_VI],
pStats->txXmitBackPressuredAC[WLANTL_AC_VO],
pStats->txXmitQueuedAC[WLANTL_AC_BK],
pStats->txXmitQueuedAC[WLANTL_AC_BE],
pStats->txXmitQueuedAC[WLANTL_AC_VI],
pStats->txXmitQueuedAC[WLANTL_AC_VO],
pStats->txFetched,
pStats->txFetchEmpty,
pStats->txFetchLowResources,
pStats->txFetchDequeueError,
pStats->txFetchDequeued,
pStats->txFetchDePressured,
pStats->txDequeDePressured,
pStats->txCompleted,
pStats->txFlushed,
pStats->txFetchedAC[WLANTL_AC_BK],
pStats->txFetchedAC[WLANTL_AC_BE],
pStats->txFetchedAC[WLANTL_AC_VI],
pStats->txFetchedAC[WLANTL_AC_VO],
pStats->txFetchDequeuedAC[WLANTL_AC_BK],
pStats->txFetchDequeuedAC[WLANTL_AC_BE],
pStats->txFetchDequeuedAC[WLANTL_AC_VI],
pStats->txFetchDequeuedAC[WLANTL_AC_VO],
pStats->txFetchDePressuredAC[WLANTL_AC_BK],
pStats->txFetchDePressuredAC[WLANTL_AC_BE],
pStats->txFetchDePressuredAC[WLANTL_AC_VI],
pStats->txFetchDePressuredAC[WLANTL_AC_VO],
pStats->txDequeDePressuredAC[WLANTL_AC_BK],
pStats->txDequeDePressuredAC[WLANTL_AC_BE],
pStats->txDequeDePressuredAC[WLANTL_AC_VI],
pStats->txDequeDePressuredAC[WLANTL_AC_VO],
pStats->txFlushedAC[WLANTL_AC_BK],
pStats->txFlushedAC[WLANTL_AC_BE],
pStats->txFlushedAC[WLANTL_AC_VI],
pStats->txFlushedAC[WLANTL_AC_VO],
pStats->rxChains,
pStats->rxPackets,
pStats->rxDropped,
pStats->rxDelivered,
pStats->rxRefused,
pResetStats->totalLogpResets,
pResetStats->totalCMD53Failures,
pResetStats->totalMutexReadFailures,
pResetStats->totalMIFErrorFailures,
pResetStats->totalFWHearbeatFailures,
pResetStats->totalUnknownExceptions
);
wrqu->data.length = strlen(extra);
hHal = WLAN_HDD_GET_HAL_CTX( pAdapter );
if (hHal)
pMac = PMAC_STRUCT( hHal );
if (pMac && (wrqu->data.length < WE_MAX_STR_LEN)) {
__u32 pmmStatsLength = WE_MAX_STR_LEN - wrqu->data.length;
snprintf(extra+wrqu->data.length, pmmStatsLength,
"\n BMPS sleepcnt %lld, BMPS awakecnt %lld"
"\n BMPS sleepreqfailcnt %lld, BMPS wakeupreqfailcnt %lld"
"\n IMPS sleepcnt %lld, IMPS awakecnt %lld"
"\n IMPS sleepreqfailcnt %lld, IMPS wakeupreqfailcnt %lld, IMPS lasterr %lld"
"\n",
pMac->pmm.BmpscntSleep,
pMac->pmm.BmpscntAwake,
pMac->pmm.BmpsSleeReqFailCnt,
pMac->pmm.BmpsWakeupReqFailCnt,
pMac->pmm.ImpsCntSleep,
pMac->pmm.ImpsCntAwake,
pMac->pmm.ImpsSleepErrCnt,
pMac->pmm.ImpsWakeupErrCnt,
pMac->pmm.ImpsLastErr
);
}
wrqu->data.length = strlen(extra)+1;
break;
}
/* The case prints the current state of the HDD, SME, CSR, PE, TL
*it can be extended for WDI Global State as well.
*And currently it only checks P2P_CLIENT adapter.
*P2P_DEVICE and P2P_GO have not been added as of now.
*/
case WE_GET_STATES:
{
int buf = 0, len = 0;
int adapter_num = 0;
int count = 0, check = 1;
tANI_U16 tlState;
tHalHandle hHal = NULL;
tpAniSirGlobal pMac = NULL;
hdd_station_ctx_t *pHddStaCtx = NULL;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX( pAdapter );
hdd_adapter_t *useAdapter = NULL;
/* Print wlan0 or p2p0 states based on the adapter_num
*by using the correct adapter
*/
while ( adapter_num < 2 )
{
if ( WLAN_ADAPTER == adapter_num )
{
useAdapter = pAdapter;
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n\n wlan0 States:-");
len += buf;
}
else if ( P2P_ADAPTER == adapter_num )
{
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n\n p2p0 States:-");
len += buf;
if( !pHddCtx )
{
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n pHddCtx is NULL");
len += buf;
break;
}
/*Printing p2p0 states only in the case when the device is
configured as a p2p_client*/
useAdapter = hdd_get_adapter(pHddCtx, WLAN_HDD_P2P_CLIENT);
if ( !useAdapter )
{
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n Device not configured as P2P_CLIENT.");
len += buf;
break;
}
}
hHal = WLAN_HDD_GET_HAL_CTX( useAdapter );
if (!hHal) {
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n pMac is NULL");
len += buf;
break;
}
pMac = PMAC_STRUCT( hHal );
if (!pMac) {
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n pMac is NULL");
len += buf;
break;
}
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR( useAdapter );
if( !pHddStaCtx )
{
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n pHddStaCtx is NULL");
len += buf;
break;
}
tlState = smeGetTLSTAState(hHal, pHddStaCtx->conn_info.staId[0]);
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n HDD Conn State - %s "
"\n \n SME State:"
"\n Neighbour Roam State - %s"
"\n CSR State - %s"
"\n CSR Substate - %s"
"\n \n TL STA %d State: %s",
macTraceGetHDDWlanConnState(
pHddStaCtx->conn_info.connState),
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState),
macTraceGetcsrRoamState(
pMac->roam.curState[useAdapter->sessionId]),
macTraceGetcsrRoamSubState(
pMac->roam.curSubState[useAdapter->sessionId]),
pHddStaCtx->conn_info.staId[0],
macTraceGetTLState(tlState)
);
len += buf;
adapter_num++;
}
if (pMac) {
/* Printing Lim State starting with global lim states */
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n \n LIM STATES:-"
"\n Global Sme State - %s "\
"\n Global mlm State - %s "\
"\n",
macTraceGetLimSmeState(pMac->lim.gLimSmeState),
macTraceGetLimMlmState(pMac->lim.gLimMlmState)
);
len += buf;
/*printing the PE Sme and Mlm states for valid lim sessions*/
while ( check < 3 && count < 255)
{
if ( pMac->lim.gpSession[count].valid )
{
buf = scnprintf(extra + len, WE_MAX_STR_LEN - len,
"\n Lim Valid Session %d:-"
"\n PE Sme State - %s "
"\n PE Mlm State - %s "
"\n",
check,
macTraceGetLimSmeState(pMac->lim.gpSession[count].limSmeState),
macTraceGetLimMlmState(pMac->lim.gpSession[count].limMlmState)
);
len += buf;
check++;
}
count++;
}
}
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_CFG:
{
hdd_cfg_get_config(WLAN_HDD_GET_CTX(pAdapter), extra, WE_MAX_STR_LEN);
wrqu->data.length = strlen(extra)+1;
break;
}
#ifdef WLAN_FEATURE_11AC
case WE_GET_RSSI:
{
v_S7_t s7Rssi = 0;
wlan_hdd_get_rssi(pAdapter, &s7Rssi);
snprintf(extra, WE_MAX_STR_LEN, "rssi=%d",s7Rssi);
wrqu->data.length = strlen(extra)+1;
break;
}
#endif
#if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_ESE || defined(FEATURE_WLAN_LFR)
case WE_GET_ROAM_RSSI:
{
v_S7_t s7Rssi = 0;
wlan_hdd_get_roam_rssi(pAdapter, &s7Rssi);
snprintf(extra, WE_MAX_STR_LEN, "rssi=%d", s7Rssi);
wrqu->data.length = strlen(extra)+1;
break;
}
#endif
case WE_GET_WMM_STATUS:
{
snprintf(extra, WE_MAX_STR_LEN,
"\nDir: 0=up, 1=down, 3=both\n"
"|------------------------|\n"
"|AC | ACM |Admitted| Dir |\n"
"|------------------------|\n"
"|VO | %d | %3s | %d |\n"
"|VI | %d | %3s | %d |\n"
"|BE | %d | %3s | %d |\n"
"|BK | %d | %3s | %d |\n"
"|------------------------|\n",
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VO].wmmAcAccessRequired,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VO].wmmAcAccessAllowed?"YES":"NO",
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VO].wmmAcTspecInfo.ts_info.direction,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VI].wmmAcAccessRequired,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VI].wmmAcAccessAllowed?"YES":"NO",
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_VI].wmmAcTspecInfo.ts_info.direction,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BE].wmmAcAccessRequired,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BE].wmmAcAccessAllowed?"YES":"NO",
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BE].wmmAcTspecInfo.ts_info.direction,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BK].wmmAcAccessRequired,
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BK].wmmAcAccessAllowed?"YES":"NO",
pAdapter->hddWmmStatus.wmmAcStatus[WLANTL_AC_BK].wmmAcTspecInfo.ts_info.direction);
wrqu->data.length = strlen(extra)+1;
break;
}
case WE_GET_CHANNEL_LIST:
{
VOS_STATUS status;
v_U8_t i, len;
char* buf ;
tANI_U8 pBuf[COUNTRY_CODE_LEN];
tANI_U8 uBufLen = COUNTRY_CODE_LEN;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX( pAdapter );
tChannelListInfo channel_list;
memset(&channel_list, 0, sizeof(channel_list));
status = iw_softap_get_channel_list(dev, info, wrqu, (char *)&channel_list);
if ( !VOS_IS_STATUS_SUCCESS( status ) )
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s GetChannelList Failed!!!",__func__);
return -EINVAL;
}
buf = extra;
/**
* Maximum channels = WNI_CFG_VALID_CHANNEL_LIST_LEN. Maximum buffer
* needed = 5 * number of channels. Check ifsufficient
* buffer is available and then proceed to fill the buffer.
*/
if(WE_MAX_STR_LEN < (5 * WNI_CFG_VALID_CHANNEL_LIST_LEN))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s Insufficient Buffer to populate channel list",
__func__);
return -EINVAL;
}
len = scnprintf(buf, WE_MAX_STR_LEN, "%u ",
channel_list.num_channels);
if( eHAL_STATUS_SUCCESS == sme_GetCountryCode(pHddCtx->hHal, pBuf, &uBufLen))
{
//Printing Country code in getChannelList
for(i= 0; i < COUNTRY_CODE_LEN; i++)
{
len += scnprintf(buf + len, WE_MAX_STR_LEN - len,
"%c ", pBuf[i]);
}
}
for(i = 0 ; i < channel_list.num_channels; i++)
{
len += scnprintf(buf + len, WE_MAX_STR_LEN - len,
"%u ", channel_list.channels[i]);
}
wrqu->data.length = strlen(extra)+1;
break;
}
#ifdef FEATURE_WLAN_TDLS
case WE_GET_TDLS_PEERS:
{
wrqu->data.length = wlan_hdd_tdls_get_all_peers(pAdapter, extra, WE_MAX_STR_LEN)+1;
break;
}
#endif
#ifdef WLAN_FEATURE_11W
case WE_GET_11W_INFO:
{
hddLog(LOGE, "WE_GET_11W_ENABLED = %d", pWextState->roamProfile.MFPEnabled );
snprintf(extra, WE_MAX_STR_LEN,
"\n BSSID %02X:%02X:%02X:%02X:%02X:%02X, Is PMF Assoc? %d"
"\n Number of Unprotected Disassocs %d"
"\n Number of Unprotected Deauths %d",
(*pWextState->roamProfile.BSSIDs.bssid)[0], (*pWextState->roamProfile.BSSIDs.bssid)[1],
(*pWextState->roamProfile.BSSIDs.bssid)[2], (*pWextState->roamProfile.BSSIDs.bssid)[3],
(*pWextState->roamProfile.BSSIDs.bssid)[4], (*pWextState->roamProfile.BSSIDs.bssid)[5],
pWextState->roamProfile.MFPEnabled, pAdapter->hdd_stats.hddPmfStats.numUnprotDisassocRx,
pAdapter->hdd_stats.hddPmfStats.numUnprotDeauthRx);
wrqu->data.length = strlen(extra)+1;
break;
}
#endif
case WE_GET_SNR:
{
v_S7_t s7snr = 0;
int status = 0;
hdd_context_t *pHddCtx;
hdd_station_ctx_t *pHddStaCtx;
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
status = wlan_hdd_validate_context(pHddCtx);
if (0 != status)
{
hddLog(LOGE, "%s: getSNR: HDD context is not valid", __func__);
return status;
}
pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
if (0 == pHddCtx->cfg_ini->fEnableSNRMonitoring ||
eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
hddLog(LOGE, "%s: getSNR failed: Enable SNR Monitoring-%d,"
" ConnectionState-%d", __func__,
pHddCtx->cfg_ini->fEnableSNRMonitoring,
pHddStaCtx->conn_info.connState);
return -ENONET;
}
/*update the stats in TL*/
wlan_hdd_get_station_stats(pAdapter);
wlan_hdd_get_snr(pAdapter, &s7snr);
snprintf(extra, WE_MAX_STR_LEN, "snr=%d",s7snr);
wrqu->data.length = strlen(extra) + 1;
break;
}
default:
{
hddLog(LOGE, "%s: Invalid IOCTL command %d", __func__, sub_cmd );
break;
}
}
return 0;
}
static int iw_get_char_setnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_char_setnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/* action sub-ioctls */
static int __iw_setnone_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int sub_cmd;
int ret = 0; /* success */
struct iw_point s_priv_data;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
/* helper function to get iwreq_data with compat handling. */
if (hdd_priv_get_data(&s_priv_data, wrqu))
{
return -EINVAL;
}
sub_cmd = s_priv_data.flags;
switch (sub_cmd)
{
case WE_CLEAR_STATS:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,"%s: clearing", __func__);
memset(&pAdapter->stats, 0, sizeof(pAdapter->stats));
memset(&pAdapter->hdd_stats, 0, sizeof(pAdapter->hdd_stats));
break;
}
case WE_INIT_AP:
{
/*FIX ME: Need to be revisited if multiple SAPs to be supported */
/* As Soft AP mode might been changed to STA already with
* killing of Hostapd, need to find the adpater by name
* rather than mode */
hdd_adapter_t* pAdapter_to_stop =
hdd_get_adapter_by_name(WLAN_HDD_GET_CTX(pAdapter), "softap.0");
if( pAdapter_to_stop )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Adapter with name softap.0 already "
"exist, ignoring the request.\nRemove the "
"adapter and try again\n");
break;
}
pr_info("Init AP trigger\n");
hdd_open_adapter( WLAN_HDD_GET_CTX(pAdapter), WLAN_HDD_SOFTAP, "softap.%d",
wlan_hdd_get_intf_addr( WLAN_HDD_GET_CTX(pAdapter) ),TRUE);
break;
}
case WE_STOP_AP:
{
/*FIX ME: Need to be revisited if multiple SAPs to be supported */
/* As Soft AP mode has been changed to STA already with killing of Hostapd,
* this is a dead code and need to find the adpater by name rather than mode */
hdd_adapter_t* pAdapter_to_stop =
hdd_get_adapter_by_name(WLAN_HDD_GET_CTX(pAdapter), "softap.0");
if( pAdapter_to_stop )
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
pr_info("Stopping AP mode\n");
if (TRUE == sme_IsPmcBmps(WLAN_HDD_GET_HAL_CTX(pAdapter)))
{
/* EXIT BMPS as fw cannot handle DEL_STA when its in BMPS */
wlan_hdd_enter_bmps(pAdapter, DRIVER_POWER_MODE_ACTIVE);
}
/*Make sure that pAdapter cleaned properly*/
hdd_stop_adapter( pHddCtx, pAdapter_to_stop, VOS_TRUE );
hdd_deinit_adapter( pHddCtx, pAdapter_to_stop );
memset(&pAdapter_to_stop->sessionCtx, 0, sizeof(pAdapter_to_stop->sessionCtx));
wlan_hdd_release_intf_addr(WLAN_HDD_GET_CTX(pAdapter),
pAdapter_to_stop->macAddressCurrent.bytes);
hdd_close_adapter(WLAN_HDD_GET_CTX(pAdapter), pAdapter_to_stop,
TRUE);
if (FALSE == sme_IsPmcBmps(WLAN_HDD_GET_HAL_CTX(pAdapter)))
{
/* put the device back into BMPS */
wlan_hdd_enter_bmps(pAdapter, DRIVER_POWER_MODE_AUTO);
}
}
else
{
printk(KERN_ERR"SAP adapter not found to stop it!\n");
}
break;
}
#ifdef WLAN_BTAMP_FEATURE
case WE_ENABLE_AMP:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,"%s: enabling AMP", __func__);
WLANBAP_RegisterWithHCI(pAdapter);
break;
}
case WE_DISABLE_AMP:
{
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX( pAdapter );
VOS_STATUS status;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,"%s: disabling AMP", __func__);
pHddCtx = WLAN_HDD_GET_CTX( pAdapter );
status = WLANBAP_StopAmp();
if(VOS_STATUS_SUCCESS != status )
{
pHddCtx->isAmpAllowed = VOS_TRUE;
hddLog(VOS_TRACE_LEVEL_FATAL,
"%s: Failed to stop AMP", __func__);
}
else
{
//a state m/c implementation in PAL is TBD to avoid this delay
msleep(500);
pHddCtx->isAmpAllowed = VOS_FALSE;
WLANBAP_DeregisterFromHCI();
}
break;
}
#endif
case WE_ENABLE_DXE_STALL_DETECT:
{
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
sme_transportDebug(hHal, VOS_FALSE, VOS_TRUE);
break;
}
case WE_DISPLAY_DXE_SNAP_SHOT:
{
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
sme_transportDebug(hHal, VOS_TRUE, VOS_FALSE);
break;
}
case WE_DISPLAY_DATAPATH_SNAP_SHOT:
{
hddLog(LOGE, "%s: called %d",__func__, sub_cmd);
hdd_wmm_tx_snapshot(pAdapter);
WLANTL_TLDebugMessage(WLANTL_DEBUG_TX_SNAPSHOT);
break;
}
case WE_SET_REASSOC_TRIGGER:
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpAniSirGlobal pMac = WLAN_HDD_GET_HAL_CTX(pAdapter);
v_U32_t roamId = 0;
tCsrRoamModifyProfileFields modProfileFields;
sme_GetModifyProfileFields(pMac, pAdapter->sessionId, &modProfileFields);
sme_RoamReassoc(pMac, pAdapter->sessionId, NULL, modProfileFields, &roamId, 1);
return 0;
}
case WE_STOP_OBSS_SCAN:
{
/* 1.OBSS Scan is mandatory while operating in 2.4GHz
2.OBSS scan is stopped by Firmware during the disassociation
3.OBSS stop comamnd is added for debugging purpose*/
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpAniSirGlobal pMac;
if (pAdapter == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
" pAdapter is NULL ");
return -EINVAL;
}
pMac = WLAN_HDD_GET_HAL_CTX(pAdapter);
if (pMac == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
" pMac is NULL ");
return -EINVAL;
}
sme_HT40StopOBSSScan(pMac, pAdapter->sessionId);
}
break;
#ifdef DEBUG_ROAM_DELAY
case WE_DUMP_ROAM_TIMER_LOG:
{
vos_dump_roam_time_log_service();
break;
}
case WE_RESET_ROAM_TIMER_LOG:
{
vos_reset_roam_timer_log();
break;
}
#endif
default:
{
hddLog(LOGE, "%s: unknown ioctl %d", __func__, sub_cmd);
break;
}
}
return ret;
}
static int iw_setnone_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_setnone_getnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
void hdd_wmm_tx_snapshot(hdd_adapter_t *pAdapter)
{
/*
* Function to display HDD WMM information
* for Tx Queues.
* Prints globala as well as per client depending
* whether the clients are registered or not.
*/
int i = 0, j = 0;
v_CONTEXT_t pVosContext = ( WLAN_HDD_GET_CTX(pAdapter))->pvosContext;
ptSapContext pSapCtx = VOS_GET_SAP_CB(pVosContext);
for ( i=0; i< NUM_TX_QUEUES; i++)
{
spin_lock_bh(&pAdapter->wmm_tx_queue[i].lock);
hddLog(LOGE, "HDD WMM TxQueue Info For AC: %d Count: %d PrevAdress:%p, NextAddress:%p",
i, pAdapter->wmm_tx_queue[i].count,
pAdapter->wmm_tx_queue[i].anchor.prev, pAdapter->wmm_tx_queue[i].anchor.next);
spin_unlock_bh(&pAdapter->wmm_tx_queue[i].lock);
}
if(pSapCtx == NULL){
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
FL("psapCtx is NULL"));
return;
}
for(i =0; i<WLAN_MAX_STA_COUNT; i++)
{
if(pSapCtx->aStaInfo[i].isUsed)
{
hddLog(LOGE, "******STAIndex: %d*********", i);
for ( j=0; j< NUM_TX_QUEUES; j++)
{
spin_lock_bh(&pSapCtx->aStaInfo[i].wmm_tx_queue[j].lock);
hddLog(LOGE, "HDD TxQueue Info For AC: %d Count: %d PrevAdress:%p, NextAddress:%p",
j, pSapCtx->aStaInfo[i].wmm_tx_queue[j].count,
pSapCtx->aStaInfo[i].wmm_tx_queue[j].anchor.prev,
pSapCtx->aStaInfo[i].wmm_tx_queue[j].anchor.next);
spin_unlock_bh(&pSapCtx->aStaInfo[i].wmm_tx_queue[j].lock);
}
}
}
}
static int __iw_set_var_ints_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
int sub_cmd;
int *apps_args = (int *) extra;
hdd_station_ctx_t *pStaCtx = NULL ;
hdd_context_t *pHddCtx = NULL;
hdd_ap_ctx_t *pAPCtx = NULL;
int cmd = 0;
int staId = 0;
int ret = 0;
if (extra == NULL)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: NULL extra buffer pointer", __func__);
return -EINVAL;
}
sub_cmd = wrqu->data.flags;
hddLog(LOG1, "%s: Received length %d", __func__, wrqu->data.length);
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
ret = wlan_hdd_validate_context(pHddCtx);
if (0 != ret)
{
VOS_TRACE( VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: HDD context is Null", __func__);
return ret;
}
if(( sub_cmd == WE_MCC_CONFIG_CREDENTIAL ) ||
(sub_cmd == WE_MCC_CONFIG_PARAMS ))
{
if(( pAdapter->device_mode == WLAN_HDD_INFRA_STATION )||
( pAdapter->device_mode == WLAN_HDD_P2P_CLIENT ))
{
pStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
staId = pStaCtx->conn_info.staId[0];
}
else if (( pAdapter->device_mode == WLAN_HDD_P2P_GO ) ||
( pAdapter->device_mode == WLAN_HDD_SOFTAP ))
{
pAPCtx = WLAN_HDD_GET_AP_CTX_PTR(pAdapter);
staId = pAPCtx->uBCStaId;
}
else
{
hddLog(LOGE, "%s: Device mode %d not recognised", __FUNCTION__, pAdapter->device_mode);
return 0;
}
}
switch (sub_cmd)
{
case WE_LOG_DUMP_CMD:
{
vos_ssr_protect(__func__);
hddLog(LOG1, "%s: LOG_DUMP %d arg1 %d arg2 %d arg3 %d arg4 %d",
__func__, apps_args[0], apps_args[1], apps_args[2],
apps_args[3], apps_args[4]);
logPrintf(hHal, apps_args[0], apps_args[1], apps_args[2],
apps_args[3], apps_args[4]);
vos_ssr_unprotect(__func__);
}
break;
case WE_P2P_NOA_CMD:
{
p2p_app_setP2pPs_t p2pNoA;
p2pNoA.opp_ps = apps_args[0];
p2pNoA.ctWindow = apps_args[1];
p2pNoA.duration = apps_args[2];
p2pNoA.interval = apps_args[3];
p2pNoA.count = apps_args[4];
p2pNoA.single_noa_duration = apps_args[5];
p2pNoA.psSelection = apps_args[6];
hddLog(LOG1, "%s: P2P_NOA_ATTR:oppPS %d ctWindow %d duration %d "
"interval %d count %d single noa duration %d PsSelection %x",
__func__, apps_args[0], apps_args[1], apps_args[2],
apps_args[3], apps_args[4], apps_args[5], apps_args[6]);
hdd_setP2pPs(dev, &p2pNoA);
}
break;
case WE_MTRACE_SELECTIVE_MODULE_LOG_ENABLE_CMD:
{
hddLog(LOG1, "%s: SELECTIVE_MODULE_LOG %d arg1 %d arg2",
__func__, apps_args[0], apps_args[1]);
vosTraceEnable(apps_args[0], apps_args[1]);
}
break;
case WE_MTRACE_DUMP_CMD:
{
hddLog(LOG1, "%s: MTRACE_DUMP code %d session %d count %d "
"bitmask_of_module %d ",
__func__, apps_args[0], apps_args[1], apps_args[2],
apps_args[3]);
vosTraceDumpAll((void*)hHal , apps_args[0], apps_args[1],
apps_args[2], apps_args[3]);
}
break;
case WE_MCC_CONFIG_CREDENTIAL :
{
cmd = 287; //Command should be updated if there is any change
// in the Riva dump command
if((apps_args[0] >= 40 ) && (apps_args[0] <= 160 ))
{
logPrintf(hHal, cmd, staId, apps_args[0], apps_args[1], apps_args[2]);
}
else
{
hddLog(LOGE, "%s : Enter valid MccCredential value between MIN :40 and MAX:160", __func__);
return 0;
}
}
break;
case WE_MCC_CONFIG_PARAMS :
{
cmd = 288; //command Should be updated if there is any change
// in the Riva dump command
hdd_validate_mcc_config(pAdapter, staId, apps_args[0], apps_args[1],apps_args[2]);
}
break;
#ifdef FEATURE_WLAN_TDLS
case WE_TDLS_CONFIG_PARAMS :
{
tdls_config_params_t tdlsParams;
tdlsParams.tdls = apps_args[0];
tdlsParams.tx_period_t = apps_args[1];
tdlsParams.tx_packet_n = apps_args[2];
tdlsParams.discovery_period_t = apps_args[3];
tdlsParams.discovery_tries_n = apps_args[4];
tdlsParams.idle_timeout_t = apps_args[5];
tdlsParams.idle_packet_n = apps_args[6];
tdlsParams.rssi_hysteresis = apps_args[7];
tdlsParams.rssi_trigger_threshold = apps_args[8];
tdlsParams.rssi_teardown_threshold = apps_args[9];
wlan_hdd_tdls_set_params(dev, &tdlsParams);
}
break;
#endif
default:
{
hddLog(LOGE, "%s: Invalid IOCTL command %d",
__func__, sub_cmd );
}
break;
}
return 0;
}
static int iw_hdd_set_var_ints_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
union iwreq_data u_priv_wrqu;
int apps_args[MAX_VAR_ARGS] = {0};
int num_args;
/* helper function to get iwreq_data with compat handling. */
if (hdd_priv_get_data(&u_priv_wrqu.data, wrqu))
{
return -EINVAL;
}
if (NULL == u_priv_wrqu.data.pointer)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: NULL data pointer", __func__);
return -EINVAL;
}
num_args = u_priv_wrqu.data.length;
if (num_args > MAX_VAR_ARGS)
{
num_args = MAX_VAR_ARGS;
}
if (copy_from_user(apps_args, u_priv_wrqu.data.pointer,
(sizeof(int)) * num_args))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: failed to copy data from user buffer", __func__);
return -EFAULT;
}
vos_ssr_protect(__func__);
ret = __iw_set_var_ints_getnone(dev, info, &u_priv_wrqu,
(char *)&apps_args);
vos_ssr_unprotect(__func__);
return ret;
}
int iw_set_var_ints_getnone(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_var_ints_getnone(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_add_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_wlan_wmm_status_e *pStatus = (hdd_wlan_wmm_status_e *)extra;
int params[HDD_WLAN_WMM_PARAM_COUNT];
sme_QosWmmTspecInfo tSpec;
v_U32_t handle;
struct iw_point s_priv_data;
// make sure the application is sufficiently priviledged
// note that the kernel will do this for "set" ioctls, but since
// this ioctl wants to return status to user space it must be
// defined as a "get" ioctl
if (!capable(CAP_NET_ADMIN))
{
return -EPERM;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
// we must be associated in order to add a tspec
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
// since we are defined to be a "get" ioctl, and since the number
// of params exceeds the number of params that wireless extensions
// will pass down in the iwreq_data, we must copy the "set" params.
// We must handle the compat for iwreq_data in 32U/64K environment.
// helper fucntion to get iwreq_data with compat handling.
if (hdd_priv_get_data(&s_priv_data, wrqu))
{
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
// make sure all params are correctly passed to function
if ((NULL == s_priv_data.pointer) ||
(HDD_WLAN_WMM_PARAM_COUNT != s_priv_data.length))
{
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
// from user space ourselves
if (copy_from_user(¶ms, s_priv_data.pointer, sizeof(params)))
{
// hmmm, can't get them
return -EIO;
}
// clear the tspec
memset(&tSpec, 0, sizeof(tSpec));
// validate the handle
handle = params[HDD_WLAN_WMM_PARAM_HANDLE];
if (HDD_WMM_HANDLE_IMPLICIT == handle)
{
// that one is reserved
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
// validate the TID
if (params[HDD_WLAN_WMM_PARAM_TID] > 7)
{
// out of range
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
tSpec.ts_info.tid = params[HDD_WLAN_WMM_PARAM_TID];
// validate the direction
switch (params[HDD_WLAN_WMM_PARAM_DIRECTION])
{
case HDD_WLAN_WMM_DIRECTION_UPSTREAM:
tSpec.ts_info.direction = SME_QOS_WMM_TS_DIR_UPLINK;
break;
case HDD_WLAN_WMM_DIRECTION_DOWNSTREAM:
tSpec.ts_info.direction = SME_QOS_WMM_TS_DIR_DOWNLINK;
break;
case HDD_WLAN_WMM_DIRECTION_BIDIRECTIONAL:
tSpec.ts_info.direction = SME_QOS_WMM_TS_DIR_BOTH;
break;
default:
// unknown
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
tSpec.ts_info.psb = params[HDD_WLAN_WMM_PARAM_APSD];
// validate the user priority
if (params[HDD_WLAN_WMM_PARAM_USER_PRIORITY] >= SME_QOS_WMM_UP_MAX)
{
// out of range
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
tSpec.ts_info.up = params[HDD_WLAN_WMM_PARAM_USER_PRIORITY];
if(0 > tSpec.ts_info.up || SME_QOS_WMM_UP_MAX < tSpec.ts_info.up)
{
hddLog(VOS_TRACE_LEVEL_ERROR,"***ts_info.up out of bounds***");
return 0;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO_HIGH,
"%s:TS_INFO PSB %d UP %d !!!", __func__,
tSpec.ts_info.psb, tSpec.ts_info.up);
tSpec.nominal_msdu_size = params[HDD_WLAN_WMM_PARAM_NOMINAL_MSDU_SIZE];
tSpec.maximum_msdu_size = params[HDD_WLAN_WMM_PARAM_MAXIMUM_MSDU_SIZE];
tSpec.min_data_rate = params[HDD_WLAN_WMM_PARAM_MINIMUM_DATA_RATE];
tSpec.mean_data_rate = params[HDD_WLAN_WMM_PARAM_MEAN_DATA_RATE];
tSpec.peak_data_rate = params[HDD_WLAN_WMM_PARAM_PEAK_DATA_RATE];
tSpec.max_burst_size = params[HDD_WLAN_WMM_PARAM_MAX_BURST_SIZE];
tSpec.min_phy_rate = params[HDD_WLAN_WMM_PARAM_MINIMUM_PHY_RATE];
tSpec.surplus_bw_allowance = params[HDD_WLAN_WMM_PARAM_SURPLUS_BANDWIDTH_ALLOWANCE];
tSpec.min_service_interval = params[HDD_WLAN_WMM_PARAM_SERVICE_INTERVAL];
tSpec.max_service_interval = params[HDD_WLAN_WMM_PARAM_MAX_SERVICE_INTERVAL];
tSpec.suspension_interval = params[HDD_WLAN_WMM_PARAM_SUSPENSION_INTERVAL];
tSpec.inactivity_interval = params[HDD_WLAN_WMM_PARAM_INACTIVITY_INTERVAL];
tSpec.ts_info.burst_size_defn = params[HDD_WLAN_WMM_PARAM_BURST_SIZE_DEFN];
// Save the expected UAPSD settings by application, this will be needed
// when re-negotiating UAPSD settings during BT Coex cases.
tSpec.expec_psb_byapp = params[HDD_WLAN_WMM_PARAM_APSD];
// validate the ts info ack policy
switch (params[HDD_WLAN_WMM_PARAM_ACK_POLICY])
{
case HDD_WLAN_WMM_TS_INFO_ACK_POLICY_NORMAL_ACK:
tSpec.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_NORMAL_ACK;
break;
case HDD_WLAN_WMM_TS_INFO_ACK_POLICY_HT_IMMEDIATE_BLOCK_ACK:
tSpec.ts_info.ack_policy = SME_QOS_WMM_TS_ACK_POLICY_HT_IMMEDIATE_BLOCK_ACK;
break;
default:
// unknown
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
*pStatus = hdd_wmm_addts(pAdapter, handle, &tSpec);
return 0;
}
static int iw_add_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_add_tspec(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_del_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int *params = (int *)extra;
hdd_wlan_wmm_status_e *pStatus = (hdd_wlan_wmm_status_e *)extra;
v_U32_t handle;
// make sure the application is sufficiently priviledged
// note that the kernel will do this for "set" ioctls, but since
// this ioctl wants to return status to user space it must be
// defined as a "get" ioctl
if (!capable(CAP_NET_ADMIN))
{
return -EPERM;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
// although we are defined to be a "get" ioctl, the params we require
// will fit in the iwreq_data, therefore unlike iw_add_tspec() there
// is no need to copy the params from user space
// validate the handle
handle = params[HDD_WLAN_WMM_PARAM_HANDLE];
if (HDD_WMM_HANDLE_IMPLICIT == handle)
{
// that one is reserved
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
*pStatus = hdd_wmm_delts(pAdapter, handle);
return 0;
}
static int iw_del_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_del_tspec(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_get_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int *params = (int *)extra;
hdd_wlan_wmm_status_e *pStatus = (hdd_wlan_wmm_status_e *)extra;
v_U32_t handle;
// although we are defined to be a "get" ioctl, the params we require
// will fit in the iwreq_data, therefore unlike iw_add_tspec() there
// is no need to copy the params from user space
// validate the handle
handle = params[HDD_WLAN_WMM_PARAM_HANDLE];
if (HDD_WMM_HANDLE_IMPLICIT == handle)
{
// that one is reserved
*pStatus = HDD_WLAN_WMM_STATUS_SETUP_FAILED_BAD_PARAM;
return 0;
}
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
*pStatus = hdd_wmm_checkts(pAdapter, handle);
return 0;
}
static int iw_get_tspec(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_tspec(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#ifdef WLAN_FEATURE_VOWIFI_11R
//
//
// Each time the supplicant has the auth_request or reassoc request
// IEs ready. This is pushed to the driver. The driver will inturn use
// it to send out the auth req and reassoc req for 11r FT Assoc.
//
static int __iw_set_fties(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
//v_CONTEXT_t pVosContext;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if (!wrqu->data.length)
{
hddLog(LOGE, FL("called with 0 length IEs"));
return -EINVAL;
}
if (wrqu->data.pointer == NULL)
{
hddLog(LOGE, FL("called with NULL IE"));
return -EINVAL;
}
// Added for debug on reception of Re-assoc Req.
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
hddLog(LOGE, FL("Called with Ie of length = %d when not associated"),
wrqu->data.length);
hddLog(LOGE, FL("Should be Re-assoc Req IEs"));
}
#ifdef WLAN_FEATURE_VOWIFI_11R_DEBUG
hddLog(LOGE, FL("%s called with Ie of length = %d"), __func__, wrqu->data.length);
#endif
// Pass the received FT IEs to SME
sme_SetFTIEs( WLAN_HDD_GET_HAL_CTX(pAdapter), pAdapter->sessionId, extra,
wrqu->data.length);
return 0;
}
static int iw_set_fties(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_fties(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#endif
static int __iw_set_dynamic_mcbc_filter(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpRcvFltMcAddrList pRequest = (tpRcvFltMcAddrList)extra;
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
tpSirWlanSetRxpFilters wlanRxpFilterParam;
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
tpSirRcvFltMcAddrList mc_addr_list_ptr;
int idx;
eHalStatus ret_val;
if (pHddCtx->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if (HDD_MULTICAST_FILTER_LIST == pRequest->mcastBcastFilterSetting)
{
#ifdef WLAN_FEATURE_PACKET_FILTERING
mc_addr_list_ptr = vos_mem_malloc(sizeof(tSirRcvFltMcAddrList));
if (NULL == mc_addr_list_ptr)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: vos_mem_alloc failed", __func__);
return -ENOMEM;
}
mc_addr_list_ptr->ulMulticastAddrCnt = pRequest->mcast_addr_cnt;
if (mc_addr_list_ptr->ulMulticastAddrCnt > HDD_MAX_NUM_MULTICAST_ADDRESS)
mc_addr_list_ptr->ulMulticastAddrCnt = HDD_MAX_NUM_MULTICAST_ADDRESS;
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s MC Addr List Cnt %d", __func__,
mc_addr_list_ptr->ulMulticastAddrCnt);
for (idx = 0; idx < mc_addr_list_ptr->ulMulticastAddrCnt; idx++)
{
memcpy(&mc_addr_list_ptr->multicastAddr[idx],
pRequest->multicastAddr[idx], HDD_WLAN_MAC_ADDR_LEN);
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s MC Addr for Idx %d ="MAC_ADDRESS_STR, __func__,
idx, MAC_ADDR_ARRAY(mc_addr_list_ptr->multicastAddr[idx]));
}
ret_val = sme_8023MulticastList(hHal, pAdapter->sessionId, mc_addr_list_ptr);
vos_mem_free(mc_addr_list_ptr);
if (eHAL_STATUS_SUCCESS != ret_val)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Failure to Set MC Address List",
__func__);
return -EINVAL;
}
#endif //WLAN_FEATURE_PACKET_FILTERING
}
else
{
hddLog(VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Set MC BC Filter Config request: %d suspend %d",
__func__, pRequest->mcastBcastFilterSetting,
pHddCtx->hdd_wlan_suspended);
pHddCtx->configuredMcastBcastFilter = pRequest->mcastBcastFilterSetting;
if (pHddCtx->hdd_wlan_suspended)
{
wlanRxpFilterParam = vos_mem_malloc(sizeof(tSirWlanSetRxpFilters));
if (NULL == wlanRxpFilterParam)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: vos_mem_alloc failed", __func__);
return -EINVAL;
}
wlanRxpFilterParam->configuredMcstBcstFilterSetting =
pRequest->mcastBcastFilterSetting;
wlanRxpFilterParam->setMcstBcstFilter = TRUE;
hdd_conf_hostoffload(pAdapter, TRUE);
wlanRxpFilterParam->configuredMcstBcstFilterSetting =
pHddCtx->configuredMcastBcastFilter;
hddLog(VOS_TRACE_LEVEL_INFO, "%s:MC/BC changed Req %d Set %d En %d",
__func__,
pHddCtx->configuredMcastBcastFilter,
wlanRxpFilterParam->configuredMcstBcstFilterSetting,
wlanRxpFilterParam->setMcstBcstFilter);
if (eHAL_STATUS_SUCCESS !=
sme_ConfigureRxpFilter(WLAN_HDD_GET_HAL_CTX(pAdapter),
wlanRxpFilterParam))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute set HW MC/BC Filter request",
__func__);
vos_mem_free(wlanRxpFilterParam);
return -EINVAL;
}
if (VOS_TRUE == pHddCtx->sus_res_mcastbcast_filter_valid)
{
pHddCtx->sus_res_mcastbcast_filter =
pRequest->mcastBcastFilterSetting;
}
}
}
return 0;
}
static int iw_set_dynamic_mcbc_filter(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_dynamic_mcbc_filter(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_clear_dynamic_mcbc_filter(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
tpSirWlanSetRxpFilters wlanRxpFilterParam;
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: ", __func__);
//Reset the filter to INI value as we have to clear the dynamic filter
pHddCtx->configuredMcastBcastFilter = pHddCtx->cfg_ini->mcastBcastFilterSetting;
//Configure FW with new setting
if (pHddCtx->hdd_wlan_suspended)
{
wlanRxpFilterParam = vos_mem_malloc(sizeof(tSirWlanSetRxpFilters));
if (NULL == wlanRxpFilterParam)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: vos_mem_alloc failed", __func__);
return -EINVAL;
}
wlanRxpFilterParam->configuredMcstBcstFilterSetting =
pHddCtx->configuredMcastBcastFilter;
wlanRxpFilterParam->setMcstBcstFilter = TRUE;
hdd_conf_hostoffload(pAdapter, TRUE);
wlanRxpFilterParam->configuredMcstBcstFilterSetting =
pHddCtx->configuredMcastBcastFilter;
if (eHAL_STATUS_SUCCESS !=
sme_ConfigureRxpFilter(WLAN_HDD_GET_HAL_CTX(pAdapter),
wlanRxpFilterParam))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute set HW MC/BC Filter request",
__func__);
vos_mem_free(wlanRxpFilterParam);
return -EINVAL;
}
if (VOS_TRUE == pHddCtx->sus_res_mcastbcast_filter_valid)
{
pHddCtx->sus_res_mcastbcast_filter =
pHddCtx->cfg_ini->mcastBcastFilterSetting;
}
}
return 0;
}
static int iw_clear_dynamic_mcbc_filter(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_clear_dynamic_mcbc_filter(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_host_offload(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpHostOffloadRequest pRequest = (tpHostOffloadRequest) extra;
tSirHostOffloadReq offloadRequest;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
/* Debug display of request components. */
switch (pRequest->offloadType)
{
case WLAN_IPV4_ARP_REPLY_OFFLOAD:
hddLog(VOS_TRACE_LEVEL_WARN, "%s: Host offload request: ARP reply", __func__);
switch (pRequest->enableOrDisable)
{
case WLAN_OFFLOAD_DISABLE:
hddLog(VOS_TRACE_LEVEL_WARN, " disable");
break;
case WLAN_OFFLOAD_ARP_AND_BC_FILTER_ENABLE:
hddLog(VOS_TRACE_LEVEL_WARN, " BC Filtering enable");
case WLAN_OFFLOAD_ENABLE:
hddLog(VOS_TRACE_LEVEL_WARN, " ARP offload enable");
hddLog(VOS_TRACE_LEVEL_WARN, " IP address: %d.%d.%d.%d",
pRequest->params.hostIpv4Addr[0], pRequest->params.hostIpv4Addr[1],
pRequest->params.hostIpv4Addr[2], pRequest->params.hostIpv4Addr[3]);
}
break;
case WLAN_IPV6_NEIGHBOR_DISCOVERY_OFFLOAD:
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: Host offload request: neighbor discovery",
__func__);
switch (pRequest->enableOrDisable)
{
case WLAN_OFFLOAD_DISABLE:
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, " disable");
break;
case WLAN_OFFLOAD_ENABLE:
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, " enable");
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, " IP address: %x:%x:%x:%x:%x:%x:%x:%x",
*(v_U16_t *)(pRequest->params.hostIpv6Addr),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 2),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 4),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 6),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 8),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 10),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 12),
*(v_U16_t *)(pRequest->params.hostIpv6Addr + 14));
}
}
/* Execute offload request. The reason that we can copy the request information
from the ioctl structure to the SME structure is that they are laid out
exactly the same. Otherwise, each piece of information would have to be
copied individually. */
memcpy(&offloadRequest, pRequest, wrqu->data.length);
if (eHAL_STATUS_SUCCESS != sme_SetHostOffload(WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId, &offloadRequest))
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Failure to execute host offload request",
__func__);
return -EINVAL;
}
return 0;
}
static int iw_set_host_offload(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_host_offload(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_keepalive_params(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpKeepAliveRequest pRequest = (tpKeepAliveRequest) extra;
tSirKeepAliveReq keepaliveRequest;
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return 0;
}
/* Debug display of request components. */
hddLog(VOS_TRACE_LEVEL_INFO,
"%s: Set Keep Alive Request : TimePeriod %d size %zu",
__func__, pRequest->timePeriod, sizeof(tKeepAliveRequest));
switch (pRequest->packetType)
{
case WLAN_KEEP_ALIVE_NULL_PKT:
hddLog(VOS_TRACE_LEVEL_WARN, "%s: Keep Alive Request: Tx NULL", __func__);
break;
case WLAN_KEEP_ALIVE_UNSOLICIT_ARP_RSP:
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: Keep Alive Request: Tx UnSolicited ARP RSP",
__func__);
hddLog(VOS_TRACE_LEVEL_WARN, " Host IP address: %d.%d.%d.%d",
pRequest->hostIpv4Addr[0], pRequest->hostIpv4Addr[1],
pRequest->hostIpv4Addr[2], pRequest->hostIpv4Addr[3]);
hddLog(VOS_TRACE_LEVEL_WARN, " Dest IP address: %d.%d.%d.%d",
pRequest->destIpv4Addr[0], pRequest->destIpv4Addr[1],
pRequest->destIpv4Addr[2], pRequest->destIpv4Addr[3]);
hddLog(VOS_TRACE_LEVEL_WARN, " Dest MAC address: %d:%d:%d:%d:%d:%d",
pRequest->destMacAddr[0], pRequest->destMacAddr[1],
pRequest->destMacAddr[2], pRequest->destMacAddr[3],
pRequest->destMacAddr[4], pRequest->destMacAddr[5]);
break;
}
/* Execute keep alive request. The reason that we can copy the request information
from the ioctl structure to the SME structure is that they are laid out
exactly the same. Otherwise, each piece of information would have to be
copied individually. */
memcpy(&keepaliveRequest, pRequest, wrqu->data.length);
hddLog(VOS_TRACE_LEVEL_ERROR, "set Keep: TP before SME %d", keepaliveRequest.timePeriod);
if (eHAL_STATUS_SUCCESS != sme_SetKeepAlive(WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId, &keepaliveRequest))
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Failure to execute Keep Alive",
__func__);
return -EINVAL;
}
return 0;
}
static int iw_set_keepalive_params(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_keepalive_params(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#ifdef WLAN_FEATURE_PACKET_FILTERING
int wlan_hdd_set_filter(hdd_context_t *pHddCtx, tpPacketFilterCfg pRequest,
tANI_U8 sessionId)
{
tSirRcvPktFilterCfgType packetFilterSetReq = {0};
tSirRcvFltPktClearParam packetFilterClrReq = {0};
int i=0;
if (pHddCtx->cfg_ini->disablePacketFilter)
{
hddLog(VOS_TRACE_LEVEL_FATAL, "%s: Packet Filtering Disabled. Returning ",
__func__ );
return 0;
}
if (pHddCtx->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
/* Debug display of request components. */
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Packet Filter Request : FA %d params %d",
__func__, pRequest->filterAction, pRequest->numParams);
switch (pRequest->filterAction)
{
case HDD_RCV_FILTER_SET:
hddLog(VOS_TRACE_LEVEL_INFO, "%s: Set Packet Filter Request for Id: %d",
__func__, pRequest->filterId);
packetFilterSetReq.filterId = pRequest->filterId;
if ( pRequest->numParams >= HDD_MAX_CMP_PER_PACKET_FILTER)
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Number of Params exceed Max limit %d",
__func__, pRequest->numParams);
return -EINVAL;
}
packetFilterSetReq.numFieldParams = pRequest->numParams;
packetFilterSetReq.coalesceTime = 0;
packetFilterSetReq.filterType = 1;
for (i=0; i < pRequest->numParams; i++)
{
packetFilterSetReq.paramsData[i].protocolLayer = pRequest->paramsData[i].protocolLayer;
packetFilterSetReq.paramsData[i].cmpFlag = pRequest->paramsData[i].cmpFlag;
packetFilterSetReq.paramsData[i].dataOffset = pRequest->paramsData[i].dataOffset;
packetFilterSetReq.paramsData[i].dataLength = pRequest->paramsData[i].dataLength;
packetFilterSetReq.paramsData[i].reserved = 0;
hddLog(VOS_TRACE_LEVEL_INFO, "Proto %d Comp Flag %d Filter Type %d",
pRequest->paramsData[i].protocolLayer, pRequest->paramsData[i].cmpFlag,
packetFilterSetReq.filterType);
hddLog(VOS_TRACE_LEVEL_INFO, "Data Offset %d Data Len %d",
pRequest->paramsData[i].dataOffset, pRequest->paramsData[i].dataLength);
memcpy(&packetFilterSetReq.paramsData[i].compareData,
pRequest->paramsData[i].compareData, pRequest->paramsData[i].dataLength);
memcpy(&packetFilterSetReq.paramsData[i].dataMask,
pRequest->paramsData[i].dataMask, pRequest->paramsData[i].dataLength);
hddLog(VOS_TRACE_LEVEL_INFO, "CData %d CData %d CData %d CData %d CData %d CData %d",
pRequest->paramsData[i].compareData[0], pRequest->paramsData[i].compareData[1],
pRequest->paramsData[i].compareData[2], pRequest->paramsData[i].compareData[3],
pRequest->paramsData[i].compareData[4], pRequest->paramsData[i].compareData[5]);
hddLog(VOS_TRACE_LEVEL_INFO, "MData %d MData %d MData %d MData %d MData %d MData %d",
pRequest->paramsData[i].dataMask[0], pRequest->paramsData[i].dataMask[1],
pRequest->paramsData[i].dataMask[2], pRequest->paramsData[i].dataMask[3],
pRequest->paramsData[i].dataMask[4], pRequest->paramsData[i].dataMask[5]);
}
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterSetFilter(pHddCtx->hHal, &packetFilterSetReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Failure to execute Set Filter",
__func__);
return -EINVAL;
}
break;
case HDD_RCV_FILTER_CLEAR:
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: Clear Packet Filter Request for Id: %d",
__func__, pRequest->filterId);
packetFilterClrReq.filterId = pRequest->filterId;
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterClearFilter(pHddCtx->hHal, &packetFilterClrReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR, "%s: Failure to execute Clear Filter",
__func__);
return -EINVAL;
}
break;
default :
hddLog(VOS_TRACE_LEVEL_INFO_HIGH, "%s: Packet Filter Request: Invalid %d",
__func__, pRequest->filterAction);
return -EINVAL;
}
return 0;
}
int wlan_hdd_setIPv6Filter(hdd_context_t *pHddCtx, tANI_U8 filterType,
tANI_U8 sessionId)
{
tSirRcvPktFilterCfgType packetFilterSetReq = {0};
tSirRcvFltPktClearParam packetFilterClrReq = {0};
if (NULL == pHddCtx)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL(" NULL HDD Context Passed"));
return -EINVAL;
}
if (pHddCtx->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if (pHddCtx->cfg_ini->disablePacketFilter)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Packet Filtering Disabled. Returning ",
__func__ );
return -EINVAL;
}
switch (filterType)
{
/* For setting IPV6 MC and UC Filter we need to configure
* 2 filters, one for MC and one for UC.
* The Filter ID shouldn't be swapped, which results in making
* UC Filter ineffective.
* We have Hardcode all the values
*
* Reason for a seperate UC filter is because, driver need to
* specify the FW that the specific filter is for unicast
* otherwise FW will not pass the unicast frames by default
* through the filter. This is required to avoid any performance
* hits when no unicast filter is set and only MC/BC are set.
* The way driver informs host is by using the MAC protocol
* layer, CMP flag set to MAX, CMP Data set to 1.
*/
case HDD_FILTER_IPV6_MC_UC:
/* Setting IPV6 MC Filter below
*/
packetFilterSetReq.filterType = HDD_RCV_FILTER_SET;
packetFilterSetReq.filterId = HDD_FILTER_ID_IPV6_MC;
packetFilterSetReq.numFieldParams = 2;
packetFilterSetReq.paramsData[0].protocolLayer =
HDD_FILTER_PROTO_TYPE_MAC;
packetFilterSetReq.paramsData[0].cmpFlag =
HDD_FILTER_CMP_TYPE_NOT_EQUAL;
packetFilterSetReq.paramsData[0].dataOffset =
WLAN_HDD_80211_FRM_DA_OFFSET;
packetFilterSetReq.paramsData[0].dataLength = 1;
packetFilterSetReq.paramsData[0].compareData[0] =
HDD_IPV6_MC_CMP_DATA;
packetFilterSetReq.paramsData[1].protocolLayer =
HDD_FILTER_PROTO_TYPE_ARP;
packetFilterSetReq.paramsData[1].cmpFlag =
HDD_FILTER_CMP_TYPE_NOT_EQUAL;
packetFilterSetReq.paramsData[1].dataOffset = ETH_ALEN;
packetFilterSetReq.paramsData[1].dataLength = 2;
packetFilterSetReq.paramsData[1].compareData[0] =
HDD_IPV6_CMP_DATA_0;
packetFilterSetReq.paramsData[1].compareData[1] =
HDD_IPV6_CMP_DATA_1;
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterSetFilter(pHddCtx->hHal,
&packetFilterSetReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute Set IPv6 Mulicast Filter",
__func__);
return -EINVAL;
}
memset( &packetFilterSetReq, 0, sizeof(tSirRcvPktFilterCfgType));
/*
* Setting IPV6 UC Filter below
*/
packetFilterSetReq.filterType = HDD_RCV_FILTER_SET;
packetFilterSetReq.filterId = HDD_FILTER_ID_IPV6_UC;
packetFilterSetReq.numFieldParams = 2;
packetFilterSetReq.paramsData[0].protocolLayer =
HDD_FILTER_PROTO_TYPE_MAC;
packetFilterSetReq.paramsData[0].cmpFlag =
HDD_FILTER_CMP_TYPE_MAX;
packetFilterSetReq.paramsData[0].dataOffset = 0;
packetFilterSetReq.paramsData[0].dataLength = 1;
packetFilterSetReq.paramsData[0].compareData[0] =
HDD_IPV6_UC_CMP_DATA;
packetFilterSetReq.paramsData[1].protocolLayer =
HDD_FILTER_PROTO_TYPE_ARP;
packetFilterSetReq.paramsData[1].cmpFlag =
HDD_FILTER_CMP_TYPE_NOT_EQUAL;
packetFilterSetReq.paramsData[1].dataOffset = ETH_ALEN;
packetFilterSetReq.paramsData[1].dataLength = 2;
packetFilterSetReq.paramsData[1].compareData[0] =
HDD_IPV6_CMP_DATA_0;
packetFilterSetReq.paramsData[1].compareData[1] =
HDD_IPV6_CMP_DATA_1;
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterSetFilter(pHddCtx->hHal,
&packetFilterSetReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute Set IPv6 Unicast Filter",
__func__);
return -EINVAL;
}
break;
case HDD_FILTER_IPV6_MC:
/*
* IPV6 UC Filter might be already set,
* clear the UC Filter. As the Filter
* IDs are static, we can directly clear it.
*/
packetFilterSetReq.filterType = HDD_RCV_FILTER_SET;
packetFilterClrReq.filterId = HDD_FILTER_ID_IPV6_UC;
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterClearFilter(pHddCtx->hHal,
&packetFilterClrReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute Clear IPv6 Unicast Filter",
__func__);
return -EINVAL;
}
/*
* Setting IPV6 MC Filter below
*/
packetFilterSetReq.filterId = HDD_FILTER_ID_IPV6_MC;
packetFilterSetReq.numFieldParams = 2;
packetFilterSetReq.paramsData[0].protocolLayer =
HDD_FILTER_PROTO_TYPE_MAC;
packetFilterSetReq.paramsData[0].cmpFlag =
HDD_FILTER_CMP_TYPE_NOT_EQUAL;
packetFilterSetReq.paramsData[0].dataOffset =
WLAN_HDD_80211_FRM_DA_OFFSET;
packetFilterSetReq.paramsData[0].dataLength = 1;
packetFilterSetReq.paramsData[0].compareData[0] =
HDD_IPV6_MC_CMP_DATA;
packetFilterSetReq.paramsData[1].protocolLayer =
HDD_FILTER_PROTO_TYPE_ARP;
packetFilterSetReq.paramsData[1].cmpFlag =
HDD_FILTER_CMP_TYPE_NOT_EQUAL;
packetFilterSetReq.paramsData[1].dataOffset = ETH_ALEN;
packetFilterSetReq.paramsData[1].dataLength = 2;
packetFilterSetReq.paramsData[1].compareData[0] =
HDD_IPV6_CMP_DATA_0;
packetFilterSetReq.paramsData[1].compareData[1] =
HDD_IPV6_CMP_DATA_1;
if (eHAL_STATUS_SUCCESS != sme_ReceiveFilterSetFilter(pHddCtx->hHal,
&packetFilterSetReq, sessionId))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Failure to execute Set IPv6 Multicast Filter",
__func__);
return -EINVAL;
}
break;
default :
hddLog(VOS_TRACE_LEVEL_INFO_HIGH,
"%s: Packet Filter Request: Invalid",
__func__);
return -EINVAL;
}
return 0;
}
void wlan_hdd_set_mc_addr_list(hdd_adapter_t *pAdapter, v_U8_t set)
{
v_U8_t i;
tpSirRcvFltMcAddrList pMulticastAddrs = NULL;
tHalHandle hHal = NULL;
hdd_context_t* pHddCtx = (hdd_context_t*)pAdapter->pHddCtx;
if (NULL == pHddCtx)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("HDD CTX is NULL"));
return;
}
hHal = pHddCtx->hHal;
if (NULL == hHal)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("HAL Handle is NULL"));
return;
}
/* Check if INI is enabled or not, other wise just return
*/
if (pHddCtx->cfg_ini->fEnableMCAddrList)
{
pMulticastAddrs = vos_mem_malloc(sizeof(tSirRcvFltMcAddrList));
if (NULL == pMulticastAddrs)
{
hddLog(VOS_TRACE_LEVEL_ERROR, FL("Could not allocate Memory"));
return;
}
if (set)
{
/* Following pre-conditions should be satisfied before wei
* configure the MC address list.
*/
if (((pAdapter->device_mode == WLAN_HDD_INFRA_STATION) ||
(pAdapter->device_mode == WLAN_HDD_P2P_CLIENT))
&& pAdapter->mc_addr_list.mc_cnt
&& (eConnectionState_Associated ==
(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.connState))
{
pMulticastAddrs->ulMulticastAddrCnt =
pAdapter->mc_addr_list.mc_cnt;
for (i = 0; i < pAdapter->mc_addr_list.mc_cnt; i++)
{
memcpy(pMulticastAddrs->multicastAddr[i],
pAdapter->mc_addr_list.addr[i],
sizeof(pAdapter->mc_addr_list.addr[i]));
hddLog(VOS_TRACE_LEVEL_INFO,
"%s: %s multicast filter: addr ="
MAC_ADDRESS_STR,
__func__, set ? "setting" : "clearing",
MAC_ADDR_ARRAY(pMulticastAddrs->multicastAddr[i]));
}
/* Set multicast filter */
sme_8023MulticastList(hHal, pAdapter->sessionId,
pMulticastAddrs);
}
}
else
{
/* Need to clear only if it was previously configured
*/
if (pAdapter->mc_addr_list.isFilterApplied)
{
pMulticastAddrs->ulMulticastAddrCnt = 0;
sme_8023MulticastList(hHal, pAdapter->sessionId,
pMulticastAddrs);
}
}
pAdapter->mc_addr_list.isFilterApplied = set ? TRUE : FALSE;
vos_mem_free(pMulticastAddrs);
}
else
{
hddLog(VOS_TRACE_LEVEL_INFO,
FL("gMCAddrListEnable is not enabled in INI"));
}
return;
}
static int __iw_set_packet_filter_params(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tpPacketFilterCfg pRequest = NULL;
int ret;
struct iw_point s_priv_data;
if (hdd_priv_get_data(&s_priv_data, wrqu))
{
return -EINVAL;
}
if ((NULL == s_priv_data.pointer) || (0 == s_priv_data.length))
{
return -EINVAL;
}
/* ODD number is used for set, copy data using copy_from_user */
pRequest = mem_alloc_copy_from_user_helper(s_priv_data.pointer,
s_priv_data.length);
if (NULL == pRequest)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"mem_alloc_copy_from_user_helper fail");
return -ENOMEM;
}
ret = wlan_hdd_set_filter(WLAN_HDD_GET_CTX(pAdapter), pRequest, pAdapter->sessionId);
kfree(pRequest);
return ret;
}
static int iw_set_packet_filter_params(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_packet_filter_params(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#endif
static int __iw_get_statistics(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
VOS_STATUS vos_status = VOS_STATUS_SUCCESS;
eHalStatus status = eHAL_STATUS_SUCCESS;
hdd_wext_state_t *pWextState;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
char *p = extra;
int tlen = 0;
tCsrSummaryStatsInfo *pStats = &(pAdapter->hdd_stats.summary_stat);
tCsrGlobalClassAStatsInfo *aStats = &(pAdapter->hdd_stats.ClassA_stat);
tCsrGlobalClassDStatsInfo *dStats = &(pAdapter->hdd_stats.ClassD_stat);
ENTER();
if (pHddCtx->isLogpInProgress) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL, "%s:LOGP in Progress. Ignore!!!",__func__);
return -EINVAL;
}
if (eConnectionState_Associated != (WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.connState) {
wrqu->txpower.value = 0;
}
else {
status = sme_GetStatistics( pHddCtx->hHal, eCSR_HDD,
SME_SUMMARY_STATS |
SME_GLOBAL_CLASSA_STATS |
SME_GLOBAL_CLASSB_STATS |
SME_GLOBAL_CLASSC_STATS |
SME_GLOBAL_CLASSD_STATS |
SME_PER_STA_STATS,
hdd_StatisticsCB, 0, FALSE,
(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], pAdapter );
if (eHAL_STATUS_SUCCESS != status)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: Unable to retrieve SME statistics",
__func__);
return -EINVAL;
}
pWextState = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
vos_status = vos_wait_single_event(&pWextState->vosevent, WLAN_WAIT_TIME_STATS);
if (!VOS_IS_STATUS_SUCCESS(vos_status))
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s: SME timeout while retrieving statistics",
__func__);
/*Remove the SME statistics list by passing NULL in callback argument*/
status = sme_GetStatistics( pHddCtx->hHal, eCSR_HDD,
SME_SUMMARY_STATS |
SME_GLOBAL_CLASSA_STATS |
SME_GLOBAL_CLASSB_STATS |
SME_GLOBAL_CLASSC_STATS |
SME_GLOBAL_CLASSD_STATS |
SME_PER_STA_STATS,
NULL, 0, FALSE,
(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter))->conn_info.staId[0], pAdapter );
return -EINVAL;
}
FILL_TLV(p, (tANI_U8)WLAN_STATS_RETRY_CNT,
(tANI_U8) sizeof (pStats->retry_cnt),
(char*) &(pStats->retry_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_MUL_RETRY_CNT,
(tANI_U8) sizeof (pStats->multiple_retry_cnt),
(char*) &(pStats->multiple_retry_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_FRM_CNT,
(tANI_U8) sizeof (pStats->tx_frm_cnt),
(char*) &(pStats->tx_frm_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_FRM_CNT,
(tANI_U8) sizeof (pStats->rx_frm_cnt),
(char*) &(pStats->rx_frm_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_FRM_DUP_CNT,
(tANI_U8) sizeof (pStats->frm_dup_cnt),
(char*) &(pStats->frm_dup_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_FAIL_CNT,
(tANI_U8) sizeof (pStats->fail_cnt),
(char*) &(pStats->fail_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RTS_FAIL_CNT,
(tANI_U8) sizeof (pStats->rts_fail_cnt),
(char*) &(pStats->rts_fail_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_ACK_FAIL_CNT,
(tANI_U8) sizeof (pStats->ack_fail_cnt),
(char*) &(pStats->ack_fail_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RTS_SUC_CNT,
(tANI_U8) sizeof (pStats->rts_succ_cnt),
(char*) &(pStats->rts_succ_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_DISCARD_CNT,
(tANI_U8) sizeof (pStats->rx_discard_cnt),
(char*) &(pStats->rx_discard_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_ERROR_CNT,
(tANI_U8) sizeof (pStats->rx_error_cnt),
(char*) &(pStats->rx_error_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_BYTE_CNT,
(tANI_U8) sizeof (dStats->tx_uc_byte_cnt[0]),
(char*) &(dStats->tx_uc_byte_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_BYTE_CNT,
(tANI_U8) sizeof (dStats->rx_byte_cnt),
(char*) &(dStats->rx_byte_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_RATE,
(tANI_U8) sizeof (dStats->rx_rate),
(char*) &(dStats->rx_rate),
tlen);
/* Transmit rate, in units of 500 kbit/sec */
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_RATE,
(tANI_U8) sizeof (aStats->tx_rate),
(char*) &(aStats->tx_rate),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_UC_BYTE_CNT,
(tANI_U8) sizeof (dStats->rx_uc_byte_cnt[0]),
(char*) &(dStats->rx_uc_byte_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_MC_BYTE_CNT,
(tANI_U8) sizeof (dStats->rx_mc_byte_cnt),
(char*) &(dStats->rx_mc_byte_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_RX_BC_BYTE_CNT,
(tANI_U8) sizeof (dStats->rx_bc_byte_cnt),
(char*) &(dStats->rx_bc_byte_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_UC_BYTE_CNT,
(tANI_U8) sizeof (dStats->tx_uc_byte_cnt[0]),
(char*) &(dStats->tx_uc_byte_cnt[0]),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_MC_BYTE_CNT,
(tANI_U8) sizeof (dStats->tx_mc_byte_cnt),
(char*) &(dStats->tx_mc_byte_cnt),
tlen);
FILL_TLV(p, (tANI_U8)WLAN_STATS_TX_BC_BYTE_CNT,
(tANI_U8) sizeof (dStats->tx_bc_byte_cnt),
(char*) &(dStats->tx_bc_byte_cnt),
tlen);
wrqu->data.length = tlen;
}
EXIT();
return 0;
}
static int iw_get_statistics(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_get_statistics(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#ifdef FEATURE_WLAN_SCAN_PNO
/*Max Len for PNO notification*/
#define MAX_PNO_NOTIFY_LEN 100
void found_pref_network_cb (void *callbackContext,
tSirPrefNetworkFoundInd *pPrefNetworkFoundInd)
{
hdd_adapter_t* pAdapter = (hdd_adapter_t*)callbackContext;
union iwreq_data wrqu;
char buf[MAX_PNO_NOTIFY_LEN+1];
hddLog(VOS_TRACE_LEVEL_WARN, "A preferred network was found: %s with rssi: -%d",
pPrefNetworkFoundInd->ssId.ssId, pPrefNetworkFoundInd->rssi);
// create the event
memset(&wrqu, 0, sizeof(wrqu));
memset(buf, 0, sizeof(buf));
snprintf(buf, MAX_PNO_NOTIFY_LEN, "QCOM: Found preferred network: %s with RSSI of -%u",
pPrefNetworkFoundInd->ssId.ssId,
(unsigned int)pPrefNetworkFoundInd->rssi);
wrqu.data.pointer = buf;
wrqu.data.length = strlen(buf);
// send the event
wireless_send_event(pAdapter->dev, IWEVCUSTOM, &wrqu, buf);
}
/*string based input*/
VOS_STATUS iw_set_pno(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra, int nOffset)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
/* pnoRequest is a large struct, so we make it static to avoid stack
overflow. This API is only invoked via ioctl, so it is
serialized by the kernel rtnl_lock and hence does not need to be
reentrant */
tSirPNOScanReq pnoRequest = {0};
char *ptr;
v_U8_t i,j, ucParams, ucMode;
eHalStatus status = eHAL_STATUS_FAILURE;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO data len %d data %s",
wrqu->data.length,
extra);
if (wrqu->data.length <= nOffset )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "PNO input is not correct");
return VOS_STATUS_E_FAILURE;
}
pnoRequest.enable = 0;
pnoRequest.ucNetworksCount = 0;
/*-----------------------------------------------------------------------
Input is string based and expected to be like this:
<enabled> <netw_count>
for each network:
<ssid_len> <ssid> <authentication> <encryption>
<ch_num> <channel_list optional> <bcast_type> <rssi_threshold>
<scan_timers> <scan_time> <scan_repeat> <scan_time> <scan_repeat>
e.g:
1 2 4 test 0 0 3 1 6 11 2 40 5 test2 4 4 6 1 2 3 4 5 6 1 0 2 5 2 300 0
this translates into:
-----------------------------
enable PNO
look for 2 networks:
test - with authentication type 0 and encryption type 0,
that can be found on 3 channels: 1 6 and 11 ,
SSID bcast type is unknown (directed probe will be sent if AP not found)
and must meet -40dBm RSSI
test2 - with auth and enrytption type 4/4
that can be found on 6 channels 1, 2, 3, 4, 5 and 6
bcast type is non-bcast (directed probe will be sent)
and must not meet any RSSI threshold
scan every 5 seconds 2 times, scan every 300 seconds until stopped
-----------------------------------------------------------------------*/
ptr = extra + nOffset;
if (1 != sscanf(ptr,"%hhu%n", &(pnoRequest.enable), &nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO enable input is not valid %s",ptr);
return VOS_STATUS_E_FAILURE;
}
if ( 0 == pnoRequest.enable )
{
/*Disable PNO*/
memset(&pnoRequest, 0, sizeof(pnoRequest));
status = sme_SetPreferredNetworkList(WLAN_HDD_GET_HAL_CTX(pAdapter),
&pnoRequest,
pAdapter->sessionId,
found_pref_network_cb, pAdapter);
if (eHAL_STATUS_SUCCESS != status)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: failed to disable PNO", __func__);
return VOS_STATUS_E_FAILURE;
}
pHddCtx->isPnoEnable = FALSE;
return VOS_STATUS_SUCCESS;
}
if (TRUE == pHddCtx->isPnoEnable)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
FL("already PNO is enabled"));
return -EBUSY;
}
pHddCtx->isPnoEnable = TRUE;
ptr += nOffset;
if (1 != sscanf(ptr,"%hhu %n", &(pnoRequest.ucNetworksCount), &nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO count input not valid %s",ptr);
goto error;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO enable %d networks count %d offset %d",
pnoRequest.enable,
pnoRequest.ucNetworksCount,
nOffset);
/* Parameters checking:
ucNetworksCount has to be larger than 0*/
if (( 0 == pnoRequest.ucNetworksCount ) ||
( pnoRequest.ucNetworksCount > SIR_PNO_MAX_SUPP_NETWORKS ))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "Network input is not correct");
goto error;
}
ptr += nOffset;
pnoRequest.aNetworks =
vos_mem_malloc(sizeof(tSirNetworkType)*pnoRequest.ucNetworksCount);
if (pnoRequest.aNetworks == NULL)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("failed to allocate memory aNetworks %u"),
(uint32)sizeof(tSirNetworkType)*pnoRequest.ucNetworksCount);
goto error;
}
vos_mem_zero(pnoRequest.aNetworks,
sizeof(tSirNetworkType)*pnoRequest.ucNetworksCount);
for ( i = 0; i < pnoRequest.ucNetworksCount; i++ )
{
pnoRequest.aNetworks[i].ssId.length = 0;
ucParams = sscanf(ptr,"%hhu %n",
&(pnoRequest.aNetworks[i].ssId.length),&nOffset);
if (1 != ucParams)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO ssid length input is not valid %s",ptr);
return VOS_STATUS_E_FAILURE;
}
if (( 0 == pnoRequest.aNetworks[i].ssId.length ) ||
( pnoRequest.aNetworks[i].ssId.length > 32 ) )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"SSID Len %d is not correct for network %d",
pnoRequest.aNetworks[i].ssId.length, i);
goto error;
}
/*Advance to SSID*/
ptr += nOffset;
memcpy(pnoRequest.aNetworks[i].ssId.ssId, ptr,
pnoRequest.aNetworks[i].ssId.length);
ptr += pnoRequest.aNetworks[i].ssId.length;
ucParams = sscanf(ptr,"%u %u %hhu %n",
&(pnoRequest.aNetworks[i].authentication),
&(pnoRequest.aNetworks[i].encryption),
&(pnoRequest.aNetworks[i].ucChannelCount),
&nOffset);
if ( 3 != ucParams )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
"Incorrect cmd %s",ptr);
goto error;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO len %d ssid 0x%08x%08x%08x%08x%08x%08x%08x%08x"
"auth %d encry %d channel count %d offset %d",
pnoRequest.aNetworks[i].ssId.length,
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[0]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[4]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[8]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[12]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[16]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[20]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[24]),
*((v_U32_t *) &pnoRequest.aNetworks[i].ssId.ssId[28]),
pnoRequest.aNetworks[i].authentication,
pnoRequest.aNetworks[i].encryption,
pnoRequest.aNetworks[i].ucChannelCount,
nOffset );
/*Advance to channel list*/
ptr += nOffset;
if (SIR_PNO_MAX_NETW_CHANNELS < pnoRequest.aNetworks[i].ucChannelCount)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
"Incorrect number of channels");
return VOS_STATUS_E_FAILURE;
}
if ( 0 != pnoRequest.aNetworks[i].ucChannelCount)
{
for ( j = 0; j < pnoRequest.aNetworks[i].ucChannelCount; j++)
{
if (1 != sscanf(ptr,"%hhu %n",
&(pnoRequest.aNetworks[i].aChannels[j]),
&nOffset))
{ VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO network channel input is not valid %s",ptr);
return VOS_STATUS_E_FAILURE;
}
/*Advance to next channel number*/
ptr += nOffset;
}
}
if (1 != sscanf(ptr,"%u %n",
&(pnoRequest.aNetworks[i].bcastNetwType),
&nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO broadcast network type input is not valid %s",ptr);
goto error;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO bcastNetwType %d offset %d",
pnoRequest.aNetworks[i].bcastNetwType,
nOffset );
/*Advance to rssi Threshold*/
ptr += nOffset;
if (1 != sscanf(ptr,"%hhu %n",
&(pnoRequest.aNetworks[i].rssiThreshold),
&nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"PNO rssi threshold input is not valid %s",ptr);
goto error;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO rssi %d offset %d",
pnoRequest.aNetworks[i].rssiThreshold,
nOffset );
/*Advance to next network*/
ptr += nOffset;
}/*For ucNetworkCount*/
ucParams = sscanf(ptr,"%hhu %n",
&(pnoRequest.scanTimers.ucScanTimersCount),
&nOffset);
/*Read the scan timers*/
if (( 1 == ucParams ) && ( pnoRequest.scanTimers.ucScanTimersCount > 0 ))
{
ptr += nOffset;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Scan timer count %d offset %d",
pnoRequest.scanTimers.ucScanTimersCount,
nOffset );
if ( SIR_PNO_MAX_SCAN_TIMERS < pnoRequest.scanTimers.ucScanTimersCount )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Incorrect cmd - too many scan timers");
goto error;
}
for ( i = 0; i < pnoRequest.scanTimers.ucScanTimersCount; i++ )
{
ucParams = sscanf(ptr,"%u %u %n",
&(pnoRequest.scanTimers.aTimerValues[i].uTimerValue),
&( pnoRequest.scanTimers.aTimerValues[i].uTimerRepeat),
&nOffset);
if (2 != ucParams)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Incorrect cmd - diff params then expected %d", ucParams);
goto error;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"PNO Timer value %d Timer repeat %d offset %d",
pnoRequest.scanTimers.aTimerValues[i].uTimerValue,
pnoRequest.scanTimers.aTimerValues[i].uTimerRepeat,
nOffset );
ptr += nOffset;
}
}
else
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"No scan timers provided param count %d scan timers %d",
ucParams, pnoRequest.scanTimers.ucScanTimersCount );
/*Scan timers defaults to 5 minutes*/
pnoRequest.scanTimers.ucScanTimersCount = 1;
pnoRequest.scanTimers.aTimerValues[0].uTimerValue = 60;
pnoRequest.scanTimers.aTimerValues[0].uTimerRepeat = 0;
}
ucParams = sscanf(ptr,"%hhu %n",&(ucMode), &nOffset);
pnoRequest.modePNO = ucMode;
/*for LA we just expose suspend option*/
if (( 1 != ucParams )||( ucMode >= SIR_PNO_MODE_MAX ))
{
pnoRequest.modePNO = SIR_PNO_MODE_ON_SUSPEND;
}
pnoRequest.p24GProbeTemplate = vos_mem_malloc(SIR_PNO_MAX_PB_REQ_SIZE);
if (pnoRequest.p24GProbeTemplate == NULL){
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("failed to allocate memory p24GProbeTemplate %u"),
SIR_PNO_MAX_PB_REQ_SIZE);
goto error;
}
pnoRequest.p5GProbeTemplate = vos_mem_malloc(SIR_PNO_MAX_PB_REQ_SIZE);
if (pnoRequest.p5GProbeTemplate == NULL){
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("failed to allocate memory p5GProbeTemplate %u"),
SIR_PNO_MAX_PB_REQ_SIZE);
goto error;
}
vos_mem_zero(pnoRequest.p24GProbeTemplate, SIR_PNO_MAX_PB_REQ_SIZE);
vos_mem_zero(pnoRequest.p5GProbeTemplate, SIR_PNO_MAX_PB_REQ_SIZE);
status = sme_SetPreferredNetworkList(WLAN_HDD_GET_HAL_CTX(pAdapter), &pnoRequest,
pAdapter->sessionId,
found_pref_network_cb, pAdapter);
if (eHAL_STATUS_SUCCESS == status)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: PNO enabled", __func__);
return VOS_STATUS_SUCCESS;
}
error:
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Failed to enable PNO", __func__);
pHddCtx->isPnoEnable = FALSE;
if (pnoRequest.aNetworks)
vos_mem_free(pnoRequest.aNetworks);
if (pnoRequest.p24GProbeTemplate)
vos_mem_free(pnoRequest.p24GProbeTemplate);
if (pnoRequest.p5GProbeTemplate)
vos_mem_free(pnoRequest.p5GProbeTemplate);
return VOS_STATUS_E_FAILURE;
}/*iw_set_pno*/
VOS_STATUS iw_set_rssi_filter(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra, int nOffset)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
v_U8_t rssiThreshold = 0;
v_U8_t nRead;
nRead = sscanf(extra + nOffset,"%hhu",
&rssiThreshold);
if ( 1 != nRead )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN,
"Incorrect format");
return VOS_STATUS_E_FAILURE;
}
sme_SetRSSIFilter(WLAN_HDD_GET_HAL_CTX(pAdapter), rssiThreshold);
return VOS_STATUS_SUCCESS;
}
static int __iw_set_pno_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Set PNO Private");
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
return iw_set_pno(dev,info,wrqu,extra,0);
}
static int iw_set_pno_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_pno_priv(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
#endif /*FEATURE_WLAN_SCAN_PNO*/
//Common function to SetBand
int hdd_setBand(struct net_device *dev, u8 ui_band)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
hdd_scaninfo_t *pScanInfo = NULL;
eCsrBand band;
eCsrBand currBand = eCSR_BAND_MAX;
eCsrBand connectedBand;
v_U8_t ret = SEND_CHANNEL_CHANGE_EVENT;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
switch(ui_band)
{
case WLAN_HDD_UI_BAND_AUTO:
band = eCSR_BAND_ALL;
break;
case WLAN_HDD_UI_BAND_5_GHZ:
band = eCSR_BAND_5G;
break;
case WLAN_HDD_UI_BAND_2_4_GHZ:
band = eCSR_BAND_24;
break;
default:
band = eCSR_BAND_MAX;
}
connectedBand =
hdd_connGetConnectedBand(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter));
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change band to %u",
__func__, band);
if (band == eCSR_BAND_MAX)
{
/* Received change band request with invalid band value */
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid band value %u", __func__, ui_band);
return -EINVAL;
}
if ( (band == eCSR_BAND_24 && pHddCtx->cfg_ini->nBandCapability==2) ||
(band == eCSR_BAND_5G && pHddCtx->cfg_ini->nBandCapability==1) ||
(band == eCSR_BAND_ALL && pHddCtx->cfg_ini->nBandCapability!=0))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: band value %u violate INI settings %u", __func__,
band, pHddCtx->cfg_ini->nBandCapability);
return -EIO;
}
if (eHAL_STATUS_SUCCESS != sme_GetFreqBand(hHal, &currBand))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Failed to get current band config",
__func__);
return -EIO;
}
if (currBand != band)
{
/* Return failure if current country code is world regulatory domain*/
if( (pMac->scan.countryCodeCurrent[0] == '0' &&
pMac->scan.countryCodeCurrent[1] == '0') )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: failed to set the band value to %u as country code is 00",
__func__, band);
return -EAGAIN;
}
/* Change band request received.
* Abort pending scan requests, flush the existing scan results,
* and change the band capability
*/
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: Current band value = %u, new setting %u ",
__func__, currBand, band);
/* We need to change the band and flush the scan results here itself
* as we may get timeout for disconnection in which we will return
* with out doing any of these
*/
if (eHAL_STATUS_SUCCESS != sme_SetFreqBand(hHal, (eCsrBand)band))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s: failed to set the band value to %u ",
__func__, band);
return -EINVAL;
}
if(currBand == eCSR_BAND_24 || currBand == eCSR_BAND_5G)
{
v_COUNTRYCODE_t curr_country;
curr_country[0]=pMac->scan.countryCodeCurrent[0];
curr_country[1]=pMac->scan.countryCodeCurrent[1];
/* As currunt band is already set to 2.4Ghz/5Ghz we dont have all channel
* information available in NV so to get the channel information from kernel
* we need to send regulatory hint for the currunt country
* And to set the same country again we need to set the dummy country
* first and then the actual country.
*/
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
regulatory_hint_user("00", NL80211_USER_REG_HINT_USER);
#else
regulatory_hint_user("00");
#endif
pMac->roam.configParam.fEnforceCountryCode = eANI_BOOLEAN_TRUE;
#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0))
regulatory_hint_user(curr_country, NL80211_USER_REG_HINT_USER);
#else
regulatory_hint_user(curr_country);
#endif
ret = DO_NOT_SEND_CHANNEL_CHANGE_EVENT;
}
else
{
#ifdef CONFIG_ENABLE_LINUX_REG
vos_update_nv_table_from_wiphy_band((void *)pHddCtx,
(void *)pHddCtx->wiphy, (eCsrBand)band);
#else
wlan_hdd_cfg80211_update_band( pHddCtx->wiphy, (eCsrBand)band );
#endif
}
pScanInfo = &pHddCtx->scan_info;
if ((pScanInfo != NULL) && pHddCtx->scan_info.mScanPending)
{
hdd_abort_mac_scan(pHddCtx, pScanInfo->sessionId,
eCSR_SCAN_ABORT_DUE_TO_BAND_CHANGE);
}
sme_FilterScanResults(hHal, pAdapter->sessionId);
if (band != eCSR_BAND_ALL &&
hdd_connIsConnected(WLAN_HDD_GET_STATION_CTX_PTR(pAdapter)) &&
(connectedBand != band))
{
eHalStatus status = eHAL_STATUS_SUCCESS;
long lrc;
/* STA already connected on current band, So issue disconnect first,
* then change the band*/
hddLog(VOS_TRACE_LEVEL_INFO,
"%s STA connected in band %u, Changing band to %u, Issuing Disconnect."
, __func__, csrGetCurrentBand(hHal), band);
INIT_COMPLETION(pAdapter->disconnect_comp_var);
status = sme_RoamDisconnect( WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId, eCSR_DISCONNECT_REASON_UNSPECIFIED);
if ( eHAL_STATUS_SUCCESS != status)
{
hddLog(VOS_TRACE_LEVEL_ERROR,
"%s csrRoamDisconnect failure, returned %d",
__func__, (int)status );
return -EINVAL;
}
lrc = wait_for_completion_interruptible_timeout(
&pAdapter->disconnect_comp_var,
msecs_to_jiffies(WLAN_WAIT_TIME_DISCONNECT));
if (lrc <= 0) {
hddLog(VOS_TRACE_LEVEL_ERROR,"%s: %s while waiting for csrRoamDisconnect ",
__func__, (0 == lrc) ? "Timeout" : "Interrupt");
return (0 == lrc) ? -ETIMEDOUT : -EINTR;
}
}
}
return ret;
}
int hdd_setBand_helper(struct net_device *dev, const char *command)
{
u8 band;
/*convert the band value from ascii to integer*/
band = command[WLAN_HDD_UI_SET_BAND_VALUE_OFFSET] - '0';
return hdd_setBand(dev, band);
}
static int __iw_set_band_config(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
int *value = (int *)extra;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: ", __func__);
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
return hdd_setBand(dev, value[0]);
}
static int iw_set_band_config(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_band_config(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
static int __iw_set_power_params_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
char *ptr;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Set power params Private");
/* ODD number is used for set, copy data using copy_from_user */
ptr = mem_alloc_copy_from_user_helper(wrqu->data.pointer,
wrqu->data.length);
if (NULL == ptr)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"mem_alloc_copy_from_user_helper fail");
return -ENOMEM;
}
ret = iw_set_power_params(dev, info, wrqu, ptr, 0);
kfree(ptr);
return ret;
}
static int iw_set_power_params_priv(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
vos_ssr_protect(__func__);
ret = __iw_set_power_params_priv(dev, info, wrqu, extra);
vos_ssr_unprotect(__func__);
return ret;
}
/*string based input*/
VOS_STATUS iw_set_power_params(struct net_device *dev, struct iw_request_info *info,
union iwreq_data *wrqu, char *extra, int nOffset)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
tSirSetPowerParamsReq powerRequest;
char *ptr;
v_U8_t ucType;
v_U32_t uTotalSize, uValue;
/*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Power Params data len %d data %s",
wrqu->data.length,
extra);
if ((WLAN_HDD_GET_CTX(pAdapter))->isLogpInProgress)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s:LOGP in Progress. Ignore!!!", __func__);
return -EBUSY;
}
if (wrqu->data.length <= nOffset )
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_WARN, "set power param input is not correct");
return VOS_STATUS_E_FAILURE;
}
uTotalSize = wrqu->data.length - nOffset;
/*-----------------------------------------------------------------------
Input is string based and expected to be like this:
<param_type> <param_value> <param_type> <param_value> ...
e.g:
1 2 2 3 3 0 4 1 5 1
e.g. setting just a few:
1 2 4 1
parameter types:
-----------------------------
1 - Ignore DTIM
2 - Listen Interval
3 - Broadcast Multicas Filter
4 - Beacon Early Termination
5 - Beacon Early Termination Interval
-----------------------------------------------------------------------*/
powerRequest.uIgnoreDTIM = SIR_NOCHANGE_POWER_VALUE;
powerRequest.uListenInterval = SIR_NOCHANGE_POWER_VALUE;
powerRequest.uBcastMcastFilter = SIR_NOCHANGE_POWER_VALUE;
powerRequest.uEnableBET = SIR_NOCHANGE_POWER_VALUE;
powerRequest.uBETInterval = SIR_NOCHANGE_POWER_VALUE;
ptr = extra + nOffset;
while ( uTotalSize )
{
if (1 != sscanf(ptr,"%hhu %n", &(ucType), &nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Invalid input parameter type %s",ptr);
return VOS_STATUS_E_FAILURE;
}
uTotalSize -= nOffset;
if (!uTotalSize)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Invalid input parameter type : %d with no value at offset %d",
ucType, nOffset);
return VOS_STATUS_E_FAILURE;
}
ptr += nOffset;
if (1 != sscanf(ptr,"%u %n", &(uValue), &nOffset))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Invalid input parameter value %s",ptr);
return VOS_STATUS_E_FAILURE;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Power request parameter %d value %d offset %d",
ucType, uValue, nOffset);
switch (ucType)
{
case eSIR_IGNORE_DTIM:
powerRequest.uIgnoreDTIM = uValue;
break;
case eSIR_LISTEN_INTERVAL:
powerRequest.uListenInterval = uValue;
break;
case eSIR_MCAST_BCAST_FILTER:
powerRequest.uBcastMcastFilter = uValue;
break;
case eSIR_ENABLE_BET:
powerRequest.uEnableBET = uValue;
break;
case eSIR_BET_INTERVAL:
powerRequest.uBETInterval = uValue;
break;
default:
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"Invalid input parameter type : %d with value: %d at offset %d",
ucType, uValue, nOffset);
return VOS_STATUS_E_FAILURE;
}
uTotalSize -= nOffset;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"Power request parameter %d Total size",
uTotalSize);
ptr += nOffset;
/* This is added for dynamic Tele LI enable (0xF1) /disable (0xF0)*/
if(!(uTotalSize - nOffset) &&
(powerRequest.uListenInterval != SIR_NOCHANGE_POWER_VALUE))
{
uTotalSize = 0;
}
}/*Go for as long as we have a valid string*/
/* put the device into full power*/
wlan_hdd_enter_bmps(pAdapter, DRIVER_POWER_MODE_ACTIVE);
/* Apply the power save params*/
sme_SetPowerParams( WLAN_HDD_GET_HAL_CTX(pAdapter), &powerRequest, FALSE);
/* put the device back to power save*/
wlan_hdd_enter_bmps(pAdapter, DRIVER_POWER_MODE_AUTO);
return VOS_STATUS_SUCCESS;
}/*iw_set_power_params*/
// tdlsoffchan
#ifdef FEATURE_WLAN_TDLS
int iw_set_tdlsoffchannel(hdd_context_t *pHddCtx, int offchannel)
{
if (offchannel < 0 || offchannel > 165)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s: Invalid tdls off channel %u",
__func__, offchannel);
return -1;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change tdls off channel from %d to %d",
__func__, tdlsOffCh, offchannel);
tdlsOffCh = offchannel;
return 0;
}
int iw_set_tdlssecoffchanneloffset(hdd_context_t *pHddCtx, int offchanoffset)
{
if (offchanoffset == 0)
{
tdlsOffChBwOffset = 0;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change tdls secondary off channel offset to %u",
__func__, tdlsOffChBwOffset);
return 0;
}
if ( offchanoffset == 40 )
{
tdlsOffChBwOffset = 1;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change tdls secondary off channel offset to %u",
__func__, tdlsOffChBwOffset);
return 0;
}
if (offchanoffset == -40)
{
tdlsOffChBwOffset = 3;
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change tdls secondary off channel offset to %u",
__func__, tdlsOffChBwOffset);
return 0;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, "%s: Invalid tdls secondary off channel offset %d",
__func__, offchanoffset);
return -1;
}
int iw_set_tdlsoffchannelmode(hdd_adapter_t *pAdapter, int offchanmode)
{
hddTdlsPeer_t *connPeer = NULL;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
if (offchanmode < 0 || offchanmode > 4)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid tdls off channel mode %d",
__func__, offchanmode);
return -1;
}
if (eConnectionState_Associated != pHddStaCtx->conn_info.connState)
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: tdls off channel mode req in not associated state %d",
__func__, offchanmode);
return -1;
}
if (eTDLS_SUPPORT_ENABLED == pHddCtx->tdls_mode ||
eTDLS_SUPPORT_EXPLICIT_TRIGGER_ONLY == pHddCtx->tdls_mode)
{
/* Send TDLS Channel Switch Request to connected peer */
connPeer = wlan_hdd_tdls_get_first_connected_peer(pAdapter);
if (NULL == connPeer) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s: No TDLS Connected Peer", __func__);
return -1;
}
}
else
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_FATAL,
"%s: TDLS Connection not supported", __func__);
return -1;
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
("%s: TDLS Channel Switch in swmode=%d"),
__func__, offchanmode);
switch (offchanmode)
{
case 1:/*Enable*/
case 2:/*Disable*/
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: change tdls off channel mode %d tdls_off_channel %d offchanoffset %d ",
__func__, offchanmode, tdlsOffCh, tdlsOffChBwOffset);
if (TRUE == pHddCtx->cfg_ini->fEnableTDLSOffChannel)
{
sme_SendTdlsChanSwitchReq(WLAN_HDD_GET_HAL_CTX(pAdapter),
pAdapter->sessionId, connPeer->peerMac,
tdlsOffCh, tdlsOffChBwOffset,
offchanmode);
}
else
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: TDLS Off Channel not supported", __func__);
return -1;
}
break;
}
case 3:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: change tdls off channel mode %d REJREQ 3 tdls_off_channel %d offchanoffset %d ",
__func__, offchanmode, tdlsOffCh, tdlsOffChBwOffset);
break;
}
case 4:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,
"%s: change tdls off channel mode %d UNSOLRESP 4 tdls_off_channel %d offchanoffset %d ",
__func__, offchanmode, tdlsOffCh, tdlsOffChBwOffset);
break;
}
default:
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR,
"%s: change tdls off channel mode %d Not SET 0 tdls_off_channel %d offchanoffset %d ",
__func__, offchanmode, tdlsOffCh, tdlsOffChBwOffset);
break;
}
}
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO, "%s: change tdls off channel mode to %u",
__func__, offchanmode);
return 0;
}
#endif
// Define the Wireless Extensions to the Linux Network Device structure
// A number of these routines are NULL (meaning they are not implemented.)
static const iw_handler we_handler[] =
{
(iw_handler) iw_set_commit, /* SIOCSIWCOMMIT */
(iw_handler) iw_get_name, /* SIOCGIWNAME */
(iw_handler) NULL, /* SIOCSIWNWID */
(iw_handler) NULL, /* SIOCGIWNWID */
(iw_handler) iw_set_freq, /* SIOCSIWFREQ */
(iw_handler) iw_get_freq, /* SIOCGIWFREQ */
(iw_handler) iw_set_mode, /* SIOCSIWMODE */
(iw_handler) iw_get_mode, /* SIOCGIWMODE */
(iw_handler) NULL, /* SIOCSIWSENS */
(iw_handler) NULL, /* SIOCGIWSENS */
(iw_handler) NULL, /* SIOCSIWRANGE */
(iw_handler) iw_get_range, /* SIOCGIWRANGE */
(iw_handler) iw_set_priv, /* SIOCSIWPRIV */
(iw_handler) NULL, /* SIOCGIWPRIV */
(iw_handler) NULL, /* SIOCSIWSTATS */
(iw_handler) NULL, /* SIOCGIWSTATS */
iw_handler_set_spy, /* SIOCSIWSPY */
iw_handler_get_spy, /* SIOCGIWSPY */
iw_handler_set_thrspy, /* SIOCSIWTHRSPY */
iw_handler_get_thrspy, /* SIOCGIWTHRSPY */
(iw_handler) iw_set_ap_address, /* SIOCSIWAP */
(iw_handler) iw_get_ap_address, /* SIOCGIWAP */
(iw_handler) iw_set_mlme, /* SIOCSIWMLME */
(iw_handler) NULL, /* SIOCGIWAPLIST */
(iw_handler) iw_set_scan, /* SIOCSIWSCAN */
(iw_handler) iw_get_scan, /* SIOCGIWSCAN */
(iw_handler) iw_set_essid, /* SIOCSIWESSID */
(iw_handler) iw_get_essid, /* SIOCGIWESSID */
(iw_handler) iw_set_nick, /* SIOCSIWNICKN */
(iw_handler) iw_get_nick, /* SIOCGIWNICKN */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) iw_set_bitrate, /* SIOCSIWRATE */
(iw_handler) iw_get_bitrate, /* SIOCGIWRATE */
(iw_handler) iw_set_rts_threshold,/* SIOCSIWRTS */
(iw_handler) iw_get_rts_threshold,/* SIOCGIWRTS */
(iw_handler) iw_set_frag_threshold, /* SIOCSIWFRAG */
(iw_handler) iw_get_frag_threshold, /* SIOCGIWFRAG */
(iw_handler) iw_set_tx_power, /* SIOCSIWTXPOW */
(iw_handler) iw_get_tx_power, /* SIOCGIWTXPOW */
(iw_handler) iw_set_retry, /* SIOCSIWRETRY */
(iw_handler) iw_get_retry, /* SIOCGIWRETRY */
(iw_handler) iw_set_encode, /* SIOCSIWENCODE */
(iw_handler) iw_get_encode, /* SIOCGIWENCODE */
(iw_handler) iw_set_power_mode, /* SIOCSIWPOWER */
(iw_handler) iw_get_power_mode, /* SIOCGIWPOWER */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) NULL, /* -- hole -- */
(iw_handler) iw_set_genie, /* SIOCSIWGENIE */
(iw_handler) iw_get_genie, /* SIOCGIWGENIE */
(iw_handler) iw_set_auth, /* SIOCSIWAUTH */
(iw_handler) iw_get_auth, /* SIOCGIWAUTH */
(iw_handler) iw_set_encodeext, /* SIOCSIWENCODEEXT */
(iw_handler) iw_get_encodeext, /* SIOCGIWENCODEEXT */
(iw_handler) NULL, /* SIOCSIWPMKSA */
};
static const iw_handler we_private[] = {
[WLAN_PRIV_SET_INT_GET_NONE - SIOCIWFIRSTPRIV] = iw_setint_getnone, //set priv ioctl
[WLAN_PRIV_SET_NONE_GET_INT - SIOCIWFIRSTPRIV] = iw_setnone_getint, //get priv ioctl
[WLAN_PRIV_SET_CHAR_GET_NONE - SIOCIWFIRSTPRIV] = iw_setchar_getnone, //get priv ioctl
[WLAN_PRIV_SET_THREE_INT_GET_NONE - SIOCIWFIRSTPRIV] = iw_set_three_ints_getnone,
[WLAN_PRIV_GET_CHAR_SET_NONE - SIOCIWFIRSTPRIV] = iw_get_char_setnone,
[WLAN_PRIV_SET_NONE_GET_NONE - SIOCIWFIRSTPRIV] = iw_setnone_getnone, //action priv ioctl
[WLAN_PRIV_SET_VAR_INT_GET_NONE - SIOCIWFIRSTPRIV] = iw_hdd_set_var_ints_getnone,
[WLAN_PRIV_ADD_TSPEC - SIOCIWFIRSTPRIV] = iw_add_tspec,
[WLAN_PRIV_DEL_TSPEC - SIOCIWFIRSTPRIV] = iw_del_tspec,
[WLAN_PRIV_GET_TSPEC - SIOCIWFIRSTPRIV] = iw_get_tspec,
#ifdef FEATURE_OEM_DATA_SUPPORT
[WLAN_PRIV_SET_OEM_DATA_REQ - SIOCIWFIRSTPRIV] = iw_set_oem_data_req, //oem data req Specifc
[WLAN_PRIV_GET_OEM_DATA_RSP - SIOCIWFIRSTPRIV] = iw_get_oem_data_rsp, //oem data req Specifc
#endif
#ifdef WLAN_FEATURE_VOWIFI_11R
[WLAN_PRIV_SET_FTIES - SIOCIWFIRSTPRIV] = iw_set_fties,
#endif
[WLAN_PRIV_SET_HOST_OFFLOAD - SIOCIWFIRSTPRIV] = iw_set_host_offload,
[WLAN_GET_WLAN_STATISTICS - SIOCIWFIRSTPRIV] = iw_get_statistics,
[WLAN_SET_KEEPALIVE_PARAMS - SIOCIWFIRSTPRIV] = iw_set_keepalive_params
#ifdef WLAN_FEATURE_PACKET_FILTERING
,
[WLAN_SET_PACKET_FILTER_PARAMS - SIOCIWFIRSTPRIV] = iw_set_packet_filter_params
#endif
#ifdef FEATURE_WLAN_SCAN_PNO
,
[WLAN_SET_PNO - SIOCIWFIRSTPRIV] = iw_set_pno_priv
#endif
,
[WLAN_SET_BAND_CONFIG - SIOCIWFIRSTPRIV] = iw_set_band_config,
[WLAN_PRIV_SET_MCBC_FILTER - SIOCIWFIRSTPRIV] = iw_set_dynamic_mcbc_filter,
[WLAN_PRIV_CLEAR_MCBC_FILTER - SIOCIWFIRSTPRIV] = iw_clear_dynamic_mcbc_filter,
[WLAN_SET_POWER_PARAMS - SIOCIWFIRSTPRIV] = iw_set_power_params_priv,
[WLAN_GET_LINK_SPEED - SIOCIWFIRSTPRIV] = iw_get_linkspeed_priv,
};
/*Maximum command length can be only 15 */
static const struct iw_priv_args we_private_args[] = {
/* handlers for main ioctl */
{ WLAN_PRIV_SET_INT_GET_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"" },
/* handlers for sub-ioctl */
{ WE_SET_11D_STATE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"set11Dstate" },
{ WE_WOWL,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"wowl" },
{ WE_SET_POWER,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setPower" },
{ WE_SET_MAX_ASSOC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setMaxAssoc" },
{ WE_SET_SAP_AUTO_CHANNEL_SELECTION,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setAutoChannel" },
{ WE_SET_DATA_INACTIVITY_TO,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"inactivityTO" },
{ WE_SET_MAX_TX_POWER,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setMaxTxPower" },
{ WE_SET_MAX_TX_POWER_2_4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setTxMaxPower2G" },
{ WE_SET_MAX_TX_POWER_5_0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setTxMaxPower5G" },
/* SAP has TxMax whereas STA has MaxTx, adding TxMax for STA
* as well to keep same syntax as in SAP. Now onwards, STA
* will support both */
{ WE_SET_MAX_TX_POWER,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setTxMaxPower" },
/* set Higher DTIM Transition (DTIM1 to DTIM3)
* 1 = enable and 0 = disable */
{
WE_SET_HIGHER_DTIM_TRANSITION,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setHDtimTransn" },
{ WE_SET_TM_LEVEL,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setTmLevel" },
{ WE_ENABLE_STRICT_FCC_REG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"setStrictFCCreg" },
{ WE_SET_DEBUG_LOG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "setDbgLvl" },
#ifdef FEATURE_WLAN_TDLS
{
WE_SET_TDLS_OFF_CHAN,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"tdlsoffchan" },
{
WE_SET_TDLS_SEC_OFF_CHAN_OFFSET,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"tdlsecchnoffst" },
{
WE_SET_TDLS_OFF_CHAN_MODE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"tdlsoffchnmode" },
#endif
{ WE_SET_SCAN_BAND_PREFERENCE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "set_scan_pref" },
{ WE_SET_MIRACAST_VENDOR_CONFIG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0, "setMiracstConf" },
/* handlers for main ioctl */
{ WLAN_PRIV_SET_NONE_GET_INT,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"" },
/* handlers for sub-ioctl */
{ WE_GET_11D_STATE,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get11Dstate" },
{ WE_IBSS_STATUS,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getAdhocStatus" },
{ WE_PMC_STATE,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"pmcState" },
{ WE_GET_WLAN_DBG,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getwlandbg" },
{ WE_GET_MAX_ASSOC,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getMaxAssoc" },
{ WE_GET_WDI_DBG,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getwdidbg" },
{ WE_GET_SAP_AUTO_CHANNEL_SELECTION,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getAutoChannel" },
{ WE_GET_CONCURRENCY_MODE,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getconcurrency" },
{ WE_GET_SCAN_BAND_PREFERENCE,
0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"get_scan_pref"},
/* handlers for main ioctl */
{ WLAN_PRIV_SET_CHAR_GET_NONE,
IW_PRIV_TYPE_CHAR| 512,
0,
"" },
/* handlers for sub-ioctl */
{ WE_WOWL_ADD_PTRN,
IW_PRIV_TYPE_CHAR| 512,
0,
"wowlAddPtrn" },
{ WE_WOWL_DEL_PTRN,
IW_PRIV_TYPE_CHAR| 512,
0,
"wowlDelPtrn" },
#if defined WLAN_FEATURE_VOWIFI
/* handlers for sub-ioctl */
{ WE_NEIGHBOR_REPORT_REQUEST,
IW_PRIV_TYPE_CHAR | 512,
0,
"neighbor" },
#endif
{ WE_SET_AP_WPS_IE,
IW_PRIV_TYPE_CHAR| 512,
0,
"set_ap_wps_ie" },
{ WE_SET_CONFIG,
IW_PRIV_TYPE_CHAR| 512,
0,
"setConfig" },
{ WE_SET_ENCRYPT_MSG,
IW_PRIV_TYPE_CHAR| 512,
0,
"encryptMsg" },
/* handlers for main ioctl */
{ WLAN_PRIV_SET_THREE_INT_GET_NONE,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3,
0,
"" },
/* handlers for sub-ioctl */
{ WE_SET_WLAN_DBG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3,
0,
"setwlandbg" },
{ WE_SET_WDI_DBG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3,
0,
"setwdidbg" },
{ WE_SET_SAP_CHANNELS,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 3,
0,
"setsapchannels" },
/* handlers for main ioctl */
{ WLAN_PRIV_GET_CHAR_SET_NONE,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"" },
/* handlers for sub-ioctl */
{ WE_WLAN_VERSION,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"version" },
{ WE_GET_STATS,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getStats" },
{ WE_GET_STATES,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getHostStates" },
{ WE_GET_CFG,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getConfig" },
#ifdef WLAN_FEATURE_11AC
{ WE_GET_RSSI,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getRSSI" },
#endif
#if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_ESE || defined(FEATURE_WLAN_LFR)
{ WE_GET_ROAM_RSSI,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getRoamRSSI" },
#endif
{ WE_GET_WMM_STATUS,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getWmmStatus" },
{
WE_GET_CHANNEL_LIST,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getChannelList" },
#ifdef FEATURE_WLAN_TDLS
{
WE_GET_TDLS_PEERS,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getTdlsPeers" },
#endif
#ifdef WLAN_FEATURE_11W
{
WE_GET_11W_INFO,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getPMFInfo" },
#endif
{ WE_GET_SNR,
0,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
"getSNR" },
/* handlers for main ioctl */
{ WLAN_PRIV_SET_NONE_GET_NONE,
0,
0,
"" },
/* handlers for sub-ioctl */
{ WE_CLEAR_STATS,
0,
0,
"clearStats" },
{ WE_INIT_AP,
0,
0,
"initAP" },
{ WE_STOP_AP,
0,
0,
"exitAP" },
#ifdef WLAN_BTAMP_FEATURE
{ WE_ENABLE_AMP,
0,
0,
"enableAMP" },
{ WE_DISABLE_AMP,
0,
0,
"disableAMP" },
#endif /* WLAN_BTAMP_FEATURE */
{ WE_ENABLE_DXE_STALL_DETECT,
0,
0,
"dxeStallDetect" },
{ WE_DISPLAY_DXE_SNAP_SHOT,
0,
0,
"dxeSnapshot" },
{ WE_DISPLAY_DATAPATH_SNAP_SHOT,
0,
0,
"dataSnapshot"},
{
WE_SET_REASSOC_TRIGGER,
0,
0,
"reassoc" },
{
WE_STOP_OBSS_SCAN,
0,
0,
"stopOBSSScan" },
#ifdef DEBUG_ROAM_DELAY
{
WE_DUMP_ROAM_TIMER_LOG,
0,
0,
"dumpRoamDelay" },
{
WE_RESET_ROAM_TIMER_LOG,
0,
0,
"resetRoamDelay" },
#endif
/* handlers for main ioctl */
{ WLAN_PRIV_SET_VAR_INT_GET_NONE,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"" },
/* handlers for sub-ioctl */
{ WE_LOG_DUMP_CMD,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"dump" },
/* handlers for sub-ioctl */
{ WE_MTRACE_SELECTIVE_MODULE_LOG_ENABLE_CMD,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"setdumplog" },
{ WE_MTRACE_DUMP_CMD,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"dumplog" },
/* handlers for sub ioctl */
{
WE_MCC_CONFIG_CREDENTIAL,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"setMccCrdnl" },
/* handlers for sub ioctl */
{
WE_MCC_CONFIG_PARAMS,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"setMccConfig" },
#ifdef FEATURE_WLAN_TDLS
/* handlers for sub ioctl */
{
WE_TDLS_CONFIG_PARAMS,
IW_PRIV_TYPE_INT | MAX_VAR_ARGS,
0,
"setTdlsConfig" },
#endif
/* handlers for main ioctl */
{ WLAN_PRIV_ADD_TSPEC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | HDD_WLAN_WMM_PARAM_COUNT,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"addTspec" },
/* handlers for main ioctl */
{ WLAN_PRIV_DEL_TSPEC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"delTspec" },
/* handlers for main ioctl */
{ WLAN_PRIV_GET_TSPEC,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
"getTspec" },
#ifdef FEATURE_OEM_DATA_SUPPORT
/* handlers for main ioctl - OEM DATA */
{
WLAN_PRIV_SET_OEM_DATA_REQ,
IW_PRIV_TYPE_BYTE | sizeof(struct iw_oem_data_req) | IW_PRIV_SIZE_FIXED,
0,
"set_oem_data_req" },
/* handlers for main ioctl - OEM DATA */
{
WLAN_PRIV_GET_OEM_DATA_RSP,
0,
IW_PRIV_TYPE_BYTE | MAX_OEM_DATA_RSP_LEN,
"get_oem_data_rsp" },
#endif
/* handlers for main ioctl - host offload */
{
WLAN_PRIV_SET_HOST_OFFLOAD,
IW_PRIV_TYPE_BYTE | sizeof(tHostOffloadRequest),
0,
"setHostOffload" },
{
WLAN_GET_WLAN_STATISTICS,
0,
IW_PRIV_TYPE_BYTE | WE_MAX_STR_LEN,
"getWlanStats" },
{
WLAN_SET_KEEPALIVE_PARAMS,
IW_PRIV_TYPE_BYTE | sizeof(tKeepAliveRequest),
0,
"setKeepAlive" },
#ifdef WLAN_FEATURE_PACKET_FILTERING
{
WLAN_SET_PACKET_FILTER_PARAMS,
IW_PRIV_TYPE_BYTE | sizeof(tPacketFilterCfg),
0,
"setPktFilter" },
#endif
#ifdef FEATURE_WLAN_SCAN_PNO
{
WLAN_SET_PNO,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
0,
"setpno" },
#endif
{
WLAN_SET_BAND_CONFIG,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
0,
"SETBAND" },
/* handlers for dynamic MC BC ioctl */
{
WLAN_PRIV_SET_MCBC_FILTER,
IW_PRIV_TYPE_BYTE | sizeof(tRcvFltMcAddrList),
0,
"setMCBCFilter" },
{
WLAN_PRIV_CLEAR_MCBC_FILTER,
0,
0,
"clearMCBCFilter" },
{
WLAN_SET_POWER_PARAMS,
IW_PRIV_TYPE_CHAR| WE_MAX_STR_LEN,
0,
"setpowerparams" },
{
WLAN_GET_LINK_SPEED,
IW_PRIV_TYPE_CHAR | 18,
IW_PRIV_TYPE_CHAR | 5, "getLinkSpeed" },
};
const struct iw_handler_def we_handler_def = {
.num_standard = sizeof(we_handler) / sizeof(we_handler[0]),
.num_private = sizeof(we_private) / sizeof(we_private[0]),
.num_private_args = sizeof(we_private_args) / sizeof(we_private_args[0]),
.standard = (iw_handler *)we_handler,
.private = (iw_handler *)we_private,
.private_args = we_private_args,
.get_wireless_stats = get_wireless_stats,
};
int hdd_validate_mcc_config(hdd_adapter_t *pAdapter, v_UINT_t staId, v_UINT_t arg1, v_UINT_t arg2, v_UINT_t arg3)
{
v_U32_t cmd = 288; //Command to RIVA
hdd_context_t *pHddCtx = NULL;
tHalHandle hHal = WLAN_HDD_GET_HAL_CTX(pAdapter);
pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
/*
*configMccParam : specify the bit which needs to be modified
*allowed to update based on wlan_qcom_cfg.ini
* configuration
* Bit 0 : SCHEDULE_TIME_SLICE MIN : 5 MAX : 20
* Bit 1 : MAX_NULL_SEND_TIME MIN : 1 MAX : 10
* Bit 2 : TX_EARLY_STOP_TIME MIN : 1 MAX : 10
* Bit 3 : RX_DRAIN_TIME MIN : 1 MAX : 10
* Bit 4 : CHANNEL_SWITCH_TIME MIN : 1 MAX : 20
* Bit 5 : MIN_CHANNEL_TIME MIN : 5 MAX : 20
* Bit 6 : PARK_BEFORE_TBTT MIN : 1 MAX : 5
* Bit 7 : MIN_AFTER_DTIM MIN : 5 MAX : 15
* Bit 8 : TOO_CLOSE_MARGIN MIN : 1 MAX : 3
* Bit 9 : Reserved
*/
switch (arg1)
{
//Update MCC SCHEDULE_TIME_SLICE parameter
case MCC_SCHEDULE_TIME_SLICE_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0001)
{
if((arg2 >= 5) && (arg2 <= 20))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC MAX_NULL_SEND_TIME parameter
case MCC_MAX_NULL_SEND_TIME_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0002)
{
if((arg2 >= 1) && (arg2 <= 10))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC TX_EARLY_STOP_TIME parameter
case MCC_TX_EARLY_STOP_TIME_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0004)
{
if((arg2 >= 1) && (arg2 <= 10))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC RX_DRAIN_TIME parameter
case MCC_RX_DRAIN_TIME_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0008)
{
if((arg2 >= 1) && (arg2 <= 10))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC CHANNEL_SWITCH_TIME parameter
case MCC_CHANNEL_SWITCH_TIME_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0010)
{
if((arg2 >= 1) && (arg2 <= 20))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC MIN_CHANNEL_TIME parameter
case MCC_MIN_CHANNEL_TIME_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0020)
{
if((arg2 >= 5) && (arg2 <= 20))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC PARK_BEFORE_TBTT parameter
case MCC_PARK_BEFORE_TBTT_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0040)
{
if((arg2 >= 1) && (arg2 <= 5))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC MIN_AFTER_DTIM parameter
case MCC_MIN_AFTER_DTIM_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0080)
{
if((arg2 >= 5) && (arg2 <= 15))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
//Update MCC TOO_CLOSE_MARGIN parameter
case MCC_TOO_CLOSE_MARGIN_CFG_PARAM :
if( pHddCtx->cfg_ini->configMccParam & 0x0100)
{
if((arg2 >= 1) && (arg2 <= 3))
{
logPrintf(hHal, cmd, staId, arg1, arg2, arg3);
}
else
{
hddLog(LOGE, "%s : Enter a valid MCC configuration value",__FUNCTION__);
return 0;
}
}
break;
default :
hddLog(LOGE, "%s : Uknown / Not allowed to configure parameter : %d",
__FUNCTION__,arg1);
break;
}
return 0;
}
int hdd_set_wext(hdd_adapter_t *pAdapter)
{
hdd_wext_state_t *pwextBuf;
hdd_station_ctx_t *pHddStaCtx = WLAN_HDD_GET_STATION_CTX_PTR(pAdapter);
hdd_context_t *pHddCtx = WLAN_HDD_GET_CTX(pAdapter);
pwextBuf = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
// Now configure the roaming profile links. To SSID and bssid.
pwextBuf->roamProfile.SSIDs.numOfSSIDs = 0;
pwextBuf->roamProfile.SSIDs.SSIDList = &pHddStaCtx->conn_info.SSID;
pwextBuf->roamProfile.BSSIDs.numOfBSSIDs = 0;
pwextBuf->roamProfile.BSSIDs.bssid = &pHddStaCtx->conn_info.bssId;
/*Set the numOfChannels to zero to scan all the channels*/
pwextBuf->roamProfile.ChannelInfo.numOfChannels = 0;
pwextBuf->roamProfile.ChannelInfo.ChannelList = NULL;
/* Default is no encryption */
pwextBuf->roamProfile.EncryptionType.numEntries = 1;
pwextBuf->roamProfile.EncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pwextBuf->roamProfile.mcEncryptionType.numEntries = 1;
pwextBuf->roamProfile.mcEncryptionType.encryptionType[0] = eCSR_ENCRYPT_TYPE_NONE;
pwextBuf->roamProfile.BSSType = eCSR_BSS_TYPE_INFRASTRUCTURE;
/* Default is no authentication */
pwextBuf->roamProfile.AuthType.numEntries = 1;
pwextBuf->roamProfile.AuthType.authType[0] = eCSR_AUTH_TYPE_OPEN_SYSTEM;
pwextBuf->roamProfile.phyMode = eCSR_DOT11_MODE_TAURUS;
pwextBuf->wpaVersion = IW_AUTH_WPA_VERSION_DISABLED;
/*Set the default scan mode*/
pHddCtx->scan_info.scan_mode = eSIR_ACTIVE_SCAN;
hdd_clearRoamProfileIe(pAdapter);
return VOS_STATUS_SUCCESS;
}
int hdd_register_wext(struct net_device *dev)
{
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
hdd_wext_state_t *pwextBuf = WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter);
VOS_STATUS status;
ENTER();
// Zero the memory. This zeros the profile structure.
memset(pwextBuf, 0,sizeof(hdd_wext_state_t));
init_completion(&(WLAN_HDD_GET_WEXT_STATE_PTR(pAdapter))->completion_var);
status = hdd_set_wext(pAdapter);
if(!VOS_IS_STATUS_SUCCESS(status)) {
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("ERROR: hdd_set_wext failed!!"));
return eHAL_STATUS_FAILURE;
}
if (!VOS_IS_STATUS_SUCCESS(vos_event_init(&pwextBuf->vosevent)))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("ERROR: HDD vos event init failed!!"));
return eHAL_STATUS_FAILURE;
}
if (!VOS_IS_STATUS_SUCCESS(vos_event_init(&pwextBuf->scanevent)))
{
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_ERROR, ("ERROR: HDD scan event init failed!!"));
return eHAL_STATUS_FAILURE;
}
// Register as a wireless device
dev->wireless_handlers = (struct iw_handler_def *)&we_handler_def;
EXIT();
return 0;
}
int hdd_UnregisterWext(struct net_device *dev)
{
#if 0
hdd_wext_state_t *wextBuf;
hdd_adapter_t *pAdapter = WLAN_HDD_GET_PRIV_PTR(dev);
ENTER();
// Set up the pointer to the Wireless Extensions state structure
wextBuf = pAdapter->pWextState;
// De-allocate the Wireless Extensions state structure
kfree(wextBuf);
// Clear out the pointer to the Wireless Extensions state structure
pAdapter->pWextState = NULL;
EXIT();
#endif
VOS_TRACE(VOS_MODULE_ID_HDD, VOS_TRACE_LEVEL_INFO,"In %s", __func__);
rtnl_lock();
dev->wireless_handlers = NULL;
rtnl_unlock();
return 0;
}
| gpl-2.0 |
louishust/mysql5.6.14_tokudb | storage/ndb/src/kernel/blocks/ndbfs/Ndbfs.cpp | 87 | 44980 | /*
Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <ndb_global.h>
#include "Ndbfs.hpp"
#include "AsyncFile.hpp"
#ifdef NDB_WIN
#include "Win32AsyncFile.hpp"
#else
#include "PosixAsyncFile.hpp"
#endif
#include <signaldata/FsOpenReq.hpp>
#include <signaldata/FsCloseReq.hpp>
#include <signaldata/FsReadWriteReq.hpp>
#include <signaldata/FsAppendReq.hpp>
#include <signaldata/FsRemoveReq.hpp>
#include <signaldata/FsConf.hpp>
#include <signaldata/FsRef.hpp>
#include <signaldata/NdbfsContinueB.hpp>
#include <signaldata/DumpStateOrd.hpp>
#include <signaldata/AllocMem.hpp>
#include <RefConvert.hpp>
#include <portlib/NdbDir.hpp>
#include <NdbOut.hpp>
#include <Configuration.hpp>
#include <EventLogger.hpp>
extern EventLogger * g_eventLogger;
NdbMutex g_active_bound_threads_mutex;
inline
int pageSize( const NewVARIABLE* baseAddrRef )
{
int log_psize;
int log_qsize = baseAddrRef->bits.q;
int log_vsize = baseAddrRef->bits.v;
if (log_vsize < 3)
log_vsize = 3;
log_psize = log_qsize + log_vsize - 3;
return (1 << log_psize);
}
Ndbfs::Ndbfs(Block_context& ctx) :
SimulatedBlock(NDBFS, ctx),
scanningInProgress(false),
theLastId(0),
theRequestPool(0),
m_maxOpenedFiles(0),
m_bound_threads_cnt(0),
m_unbounds_threads_cnt(0),
m_active_bound_threads_cnt(0)
{
BLOCK_CONSTRUCTOR(Ndbfs);
NdbMutex_Init(&g_active_bound_threads_mutex);
// Set received signals
addRecSignal(GSN_READ_CONFIG_REQ, &Ndbfs::execREAD_CONFIG_REQ);
addRecSignal(GSN_DUMP_STATE_ORD, &Ndbfs::execDUMP_STATE_ORD);
addRecSignal(GSN_STTOR, &Ndbfs::execSTTOR);
addRecSignal(GSN_FSOPENREQ, &Ndbfs::execFSOPENREQ);
addRecSignal(GSN_FSCLOSEREQ, &Ndbfs::execFSCLOSEREQ);
addRecSignal(GSN_FSWRITEREQ, &Ndbfs::execFSWRITEREQ);
addRecSignal(GSN_FSREADREQ, &Ndbfs::execFSREADREQ);
addRecSignal(GSN_FSSYNCREQ, &Ndbfs::execFSSYNCREQ);
addRecSignal(GSN_CONTINUEB, &Ndbfs::execCONTINUEB);
addRecSignal(GSN_FSAPPENDREQ, &Ndbfs::execFSAPPENDREQ);
addRecSignal(GSN_FSREMOVEREQ, &Ndbfs::execFSREMOVEREQ);
addRecSignal(GSN_ALLOC_MEM_REQ, &Ndbfs::execALLOC_MEM_REQ);
addRecSignal(GSN_SEND_PACKED, &Ndbfs::execSEND_PACKED, true);
addRecSignal(GSN_BUILD_INDX_IMPL_REQ, &Ndbfs::execBUILD_INDX_IMPL_REQ);
// Set send signals
addRecSignal(GSN_FSSUSPENDORD, &Ndbfs::execFSSUSPENDORD);
theRequestPool = new Pool<Request>;
}
Ndbfs::~Ndbfs()
{
/**
* Stop all unbound threads
*/
/**
* Post enought Request::end to saturate all unbound threads
*/
Request request;
request.action = Request::end;
for (unsigned i = 0; i < theThreads.size(); i++)
{
theToBoundThreads.writeChannel(&request);
theToUnboundThreads.writeChannel(&request);
}
for (unsigned i = 0; i < theThreads.size(); i++)
{
AsyncIoThread * thr = theThreads[i];
thr->shutdown();
}
/**
* delete all threads
*/
for (unsigned i = 0; i < theThreads.size(); i++)
{
AsyncIoThread * thr = theThreads[i];
delete thr;
theThreads[i] = 0;
}
theThreads.clear();
/**
* Delete all files
*/
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
delete file;
theFiles[i] = NULL;
}//for
theFiles.clear();
if (theRequestPool)
delete theRequestPool;
}
static
bool
do_mkdir(const char * path)
{
return NdbDir::create(path,
NdbDir::u_rwx() | NdbDir::g_r() | NdbDir::g_x(),
true /* ignore_existing */);
}
static
void
add_path(BaseString& dst, const char * add)
{
const char * tmp = dst.c_str();
unsigned len = dst.length();
unsigned dslen = (unsigned)strlen(DIR_SEPARATOR);
if (len > dslen && strcmp(tmp+(len - dslen), DIR_SEPARATOR) != 0)
dst.append(DIR_SEPARATOR);
dst.append(add);
}
static
bool
validate_path(BaseString & dst,
const char * path)
{
char buf2[PATH_MAX];
memset(buf2, 0,sizeof(buf2));
#ifdef NDB_WIN32
CreateDirectory(path, 0);
char* szFilePart;
if(!GetFullPathName(path, sizeof(buf2), buf2, &szFilePart) ||
(GetFileAttributes(buf2) & FILE_ATTRIBUTE_READONLY))
return false;
#else
if (::realpath(path, buf2) == NULL ||
::access(buf2, W_OK) != 0)
return false;
#endif
dst.assign(buf2);
add_path(dst, "");
return true;
}
const BaseString&
Ndbfs::get_base_path(Uint32 no) const
{
if (no < NDB_ARRAY_SIZE(m_base_path) &&
strlen(m_base_path[no].c_str()) > 0)
{
jam();
return m_base_path[no];
}
return m_base_path[FsOpenReq::BP_FS];
}
void
Ndbfs::execREAD_CONFIG_REQ(Signal* signal)
{
const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr();
Uint32 ref = req->senderRef;
Uint32 senderData = req->senderData;
const ndb_mgm_configuration_iterator * p =
m_ctx.m_config.getOwnConfigIterator();
ndbrequire(p != 0);
BaseString tmp;
tmp.assfmt("ndb_%u_fs%s", getOwnNodeId(), DIR_SEPARATOR);
m_base_path[FsOpenReq::BP_FS].assfmt("%s%s",
m_ctx.m_config.fileSystemPath(),
tmp.c_str());
m_base_path[FsOpenReq::BP_BACKUP].assign(m_ctx.m_config.backupFilePath());
const char * ddpath = 0;
ndb_mgm_get_string_parameter(p, CFG_DB_DD_FILESYSTEM_PATH, &ddpath);
{
const char * datapath = ddpath;
ndb_mgm_get_string_parameter(p, CFG_DB_DD_DATAFILE_PATH, &datapath);
if (datapath)
{
/**
* Only set BP_DD_DF if either FileSystemPathDataFiles or FileSystemPathDD
* is set...otherwise get_base_path(FsOpenReq::BP_DD_DF) will
* return BP_FS (see get_base_path)
*/
BaseString path;
add_path(path, datapath);
do_mkdir(path.c_str());
add_path(path, tmp.c_str());
do_mkdir(path.c_str());
if (!validate_path(m_base_path[FsOpenReq::BP_DD_DF], path.c_str()))
{
ERROR_SET(fatal, NDBD_EXIT_AFS_INVALIDPATH,
m_base_path[FsOpenReq::BP_DD_DF].c_str(),
"FileSystemPathDataFiles");
}
}
}
{
const char * undopath = ddpath;
ndb_mgm_get_string_parameter(p, CFG_DB_DD_UNDOFILE_PATH, &undopath);
if (undopath)
{
/**
* Only set BP_DD_DF if either FileSystemPathUndoFiles or FileSystemPathDD
* is set...otherwise get_base_path(FsOpenReq::BP_DD_UF) will
* return BP_FS (see get_base_path)
*/
BaseString path;
add_path(path, undopath);
do_mkdir(path.c_str());
add_path(path, tmp.c_str());
do_mkdir(path.c_str());
if (!validate_path(m_base_path[FsOpenReq::BP_DD_UF], path.c_str()))
{
ERROR_SET(fatal, NDBD_EXIT_AFS_INVALIDPATH,
m_base_path[FsOpenReq::BP_DD_UF].c_str(),
"FileSystemPathUndoFiles");
}
}
}
m_maxFiles = 0;
ndb_mgm_get_int_parameter(p, CFG_DB_MAX_OPEN_FILES, &m_maxFiles);
Uint32 noIdleFiles = 27;
ndb_mgm_get_int_parameter(p, CFG_DB_INITIAL_OPEN_FILES, &noIdleFiles);
// Make sure at least "noIdleFiles" files can be created
if (noIdleFiles > m_maxFiles && m_maxFiles != 0)
m_maxFiles = noIdleFiles;
// Create idle AsyncFiles
for (Uint32 i = 0; i < noIdleFiles; i++)
{
theIdleFiles.push_back(createAsyncFile());
AsyncIoThread * thr = createIoThread(/* bound */ true);
if (thr)
{
theThreads.push_back(thr);
}
}
Uint32 threadpool = 2;
ndb_mgm_get_int_parameter(p, CFG_DB_THREAD_POOL, &threadpool);
// Create IoThreads
for (Uint32 i = 0; i < threadpool; i++)
{
AsyncIoThread * thr = createIoThread(/* bound */ false);
if (thr)
{
jam();
theThreads.push_back(thr);
}
else
{
jam();
break;
}
}
setup_wakeup();
ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend();
conf->senderRef = reference();
conf->senderData = senderData;
sendSignal(ref, GSN_READ_CONFIG_CONF, signal,
ReadConfigConf::SignalLength, JBB);
// start scanning
signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY;
sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
}
/* Received a restart signal.
* Answer it like any other block
* PR0 : StartCase
* DR0 : StartPhase
* DR1 : ?
* DR2 : ?
* DR3 : ?
* DR4 : ?
* DR5 : SignalKey
*/
void
Ndbfs::execSTTOR(Signal* signal)
{
jamEntry();
if(signal->theData[1] == 0){ // StartPhase 0
jam();
do_mkdir(m_base_path[FsOpenReq::BP_FS].c_str());
// close all open files
ndbrequire(theOpenFiles.size() == 0);
signal->theData[3] = 255;
sendSignal(NDBCNTR_REF, GSN_STTORRY, signal,4, JBB);
return;
}
ndbrequire(0);
}
int
Ndbfs::forward( AsyncFile * file, Request* request)
{
jam();
AsyncIoThread* thr = file->getThread();
if (thr) // bound
{
thr->dispatch(request);
}
else if (request->m_do_bind)
{
theToBoundThreads.writeChannel(request);
}
else
{
theToUnboundThreads.writeChannel(request);
}
return 1;
}
void
Ndbfs::execFSOPENREQ(Signal* signal)
{
jamEntry();
const FsOpenReq * const fsOpenReq = (FsOpenReq *)&signal->theData[0];
const BlockReference userRef = fsOpenReq->userReference;
bool bound = (fsOpenReq->fileFlags & FsOpenReq::OM_THREAD_POOL) == 0;
AsyncFile* file = getIdleFile(bound);
ndbrequire(file != NULL);
Uint32 userPointer = fsOpenReq->userPointer;
SectionHandle handle(this, signal);
SegmentedSectionPtr ptr; ptr.setNull();
if (handle.m_cnt)
{
jam();
handle.getSection(ptr, FsOpenReq::FILENAME);
}
file->theFileName.set(this, userRef, fsOpenReq->fileNumber, false, ptr);
releaseSections(handle);
if (fsOpenReq->fileFlags & FsOpenReq::OM_INIT)
{
jam();
Uint32 cnt = 16; // 512k
Ptr<GlobalPage> page_ptr;
m_ctx.m_mm.alloc_pages(RT_DBTUP_PAGE, &page_ptr.i, &cnt, 1);
if(cnt == 0)
{
file->m_page_ptr.setNull();
file->m_page_cnt = 0;
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrOutOfMemory);
fsRef->osErrorCode = ~0; // Indicate local error
sendSignal(userRef, GSN_FSOPENREF, signal, 3, JBB);
return;
}
m_shared_page_pool.getPtr(page_ptr);
file->set_buffer(RT_DBTUP_PAGE, page_ptr, cnt);
}
else if (fsOpenReq->fileFlags & FsOpenReq::OM_WRITE_BUFFER)
{
jam();
Uint32 cnt = NDB_FILE_BUFFER_SIZE / GLOBAL_PAGE_SIZE; // 256k
Ptr<GlobalPage> page_ptr;
m_ctx.m_mm.alloc_pages(RT_FILE_BUFFER, &page_ptr.i, &cnt, 1);
if (cnt == 0)
{
jam();
file->m_page_ptr.setNull();
file->m_page_cnt = 0;
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrOutOfMemory);
fsRef->osErrorCode = ~0; // Indicate local error
sendSignal(userRef, GSN_FSOPENREF, signal, 3, JBB);
return;
}
m_shared_page_pool.getPtr(page_ptr);
file->set_buffer(RT_FILE_BUFFER, page_ptr, cnt);
}
else
{
ndbassert(file->m_page_ptr.isNull());
file->m_page_ptr.setNull();
file->m_page_cnt = 0;
}
if (getenv("NDB_TRACE_OPEN"))
ndbout_c("open(%s) bound: %u", file->theFileName.c_str(), bound);
Request* request = theRequestPool->get();
request->action = Request::open;
request->error = 0;
request->set(userRef, userPointer, newId() );
request->file = file;
request->theTrace = signal->getTrace();
request->par.open.flags = fsOpenReq->fileFlags;
request->par.open.page_size = fsOpenReq->page_size;
request->par.open.file_size = fsOpenReq->file_size_hi;
request->par.open.file_size <<= 32;
request->par.open.file_size |= fsOpenReq->file_size_lo;
request->par.open.auto_sync_size = fsOpenReq->auto_sync_size;
request->m_do_bind = bound;
ndbrequire(forward(file, request));
}
void
Ndbfs::execFSREMOVEREQ(Signal* signal)
{
jamEntry();
const FsRemoveReq * const req = (FsRemoveReq *)signal->getDataPtr();
const BlockReference userRef = req->userReference;
bool bound = true;
AsyncFile* file = getIdleFile(bound);
ndbrequire(file != NULL);
SectionHandle handle(this, signal);
SegmentedSectionPtr ptr; ptr.setNull();
if(handle.m_cnt)
{
jam();
handle.getSection(ptr, FsOpenReq::FILENAME);
}
file->theFileName.set(this, userRef, req->fileNumber, req->directory, ptr);
releaseSections(handle);
Uint32 version = FsOpenReq::getVersion(req->fileNumber);
Uint32 bp = FsOpenReq::v5_getLcpNo(req->fileNumber);
Request* request = theRequestPool->get();
request->action = Request::rmrf;
request->par.rmrf.directory = req->directory;
request->par.rmrf.own_directory = req->ownDirectory;
request->error = 0;
request->set(userRef, req->userPointer, newId() );
request->file = file;
request->theTrace = signal->getTrace();
request->m_do_bind = bound;
if (version == 6)
{
ndbrequire(bp < NDB_ARRAY_SIZE(m_base_path));
if (strlen(m_base_path[bp].c_str()) == 0)
{
goto ignore;
}
}
ndbrequire(forward(file, request));
return;
ignore:
report(request, signal);
}
/*
* PR0: File Pointer DR0: User reference DR1: User Pointer DR2: Flag bit 0= 1
* remove file
*/
void
Ndbfs::execFSCLOSEREQ(Signal * signal)
{
jamEntry();
const FsCloseReq * const fsCloseReq = (FsCloseReq *)&signal->theData[0];
const BlockReference userRef = fsCloseReq->userReference;
const Uint16 filePointer = (Uint16)fsCloseReq->filePointer;
const UintR userPointer = fsCloseReq->userPointer;
AsyncFile* openFile = theOpenFiles.find(filePointer);
if (openFile == NULL) {
// The file was not open, send error back to sender
jam();
// Initialise FsRef signal
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist);
fsRef->osErrorCode = ~0; // Indicate local error
sendSignal(userRef, GSN_FSCLOSEREF, signal, 3, JBB);
g_eventLogger->warning("Trying to close unknown file!! %u", userPointer);
g_eventLogger->warning("Dumping files");
signal->theData[0] = 405;
execDUMP_STATE_ORD(signal);
return;
}
if (getenv("NDB_TRACE_OPEN"))
ndbout_c("close(%s)", openFile->theFileName.c_str());
Request *request = theRequestPool->get();
if( fsCloseReq->getRemoveFileFlag(fsCloseReq->fileFlag) == true ) {
jam();
request->action = Request::closeRemove;
} else {
jam();
request->action = Request::close;
}
request->set(userRef, fsCloseReq->userPointer, filePointer);
request->file = openFile;
request->error = 0;
request->theTrace = signal->getTrace();
request->m_do_bind = false;
ndbrequire(forward(openFile, request));
}
void
Ndbfs::readWriteRequest(int action, Signal * signal)
{
Uint32 theData[25 + 2 * 32];
memcpy(theData, signal->theData, 4 * signal->getLength());
SectionHandle handle(this, signal);
if (handle.m_cnt > 0)
{
SegmentedSectionPtr secPtr;
ndbrequire(handle.getSection(secPtr, 0));
ndbrequire(signal->getLength() + secPtr.sz < NDB_ARRAY_SIZE(theData));
copy(theData + signal->getLength(), secPtr);
releaseSections(handle);
}
const FsReadWriteReq * const fsRWReq = (FsReadWriteReq *)theData;
Uint16 filePointer = (Uint16)fsRWReq->filePointer;
const UintR userPointer = fsRWReq->userPointer;
const BlockReference userRef = fsRWReq->userReference;
const BlockNumber blockNumber = refToMain(userRef);
const Uint32 instanceNumber = refToInstance(userRef);
AsyncFile* openFile = theOpenFiles.find(filePointer);
const NewVARIABLE *myBaseAddrRef =
&getBat(blockNumber, instanceNumber)[fsRWReq->varIndex];
UintPtr tPageSize;
UintPtr tClusterSize;
UintPtr tNRR;
UintPtr tPageOffset;
char* tWA;
FsRef::NdbfsErrorCodeType errorCode;
Request *request = theRequestPool->get();
request->error = 0;
request->set(userRef, userPointer, filePointer);
request->file = openFile;
request->action = (Request::Action) action;
request->theTrace = signal->getTrace();
request->m_do_bind = false;
Uint32 format = fsRWReq->getFormatFlag(fsRWReq->operationFlag);
if (fsRWReq->numberOfPages == 0) { //Zero pages not allowed
jam();
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
if(format != FsReadWriteReq::fsFormatGlobalPage &&
format != FsReadWriteReq::fsFormatSharedPage)
{
if (fsRWReq->varIndex >= getBatSize(blockNumber, instanceNumber)) {
jam();// Ensure that a valid variable is used
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
if (myBaseAddrRef == NULL) {
jam(); // Ensure that a valid variable is used
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
if (openFile == NULL) {
jam(); //file not open
errorCode = FsRef::fsErrFileDoesNotExist;
goto error;
}
tPageSize = pageSize(myBaseAddrRef);
tClusterSize = myBaseAddrRef->ClusterSize;
tNRR = myBaseAddrRef->nrr;
tWA = (char*)myBaseAddrRef->WA;
switch (format) {
// List of memory and file pages pairs
case FsReadWriteReq::fsFormatListOfPairs: {
jam();
for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) {
jam();
const UintPtr varIndex = fsRWReq->data.listOfPair[i].varIndex;
const UintPtr fileOffset = fsRWReq->data.listOfPair[i].fileOffset;
if (varIndex >= tNRR) {
jam();
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}//if
request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize];
request->par.readWrite.pages[i].size = tPageSize;
request->par.readWrite.pages[i].offset = (off_t)(fileOffset*tPageSize);
}//for
request->par.readWrite.numberOfPages = fsRWReq->numberOfPages;
break;
}//case
// Range of memory page with one file page
case FsReadWriteReq::fsFormatArrayOfPages: {
if ((fsRWReq->numberOfPages + fsRWReq->data.arrayOfPages.varIndex) > tNRR) {
jam();
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}//if
const UintPtr varIndex = fsRWReq->data.arrayOfPages.varIndex;
const UintPtr fileOffset = fsRWReq->data.arrayOfPages.fileOffset;
request->par.readWrite.pages[0].offset = (off_t)(fileOffset * tPageSize);
request->par.readWrite.pages[0].size = tPageSize * fsRWReq->numberOfPages;
request->par.readWrite.numberOfPages = 1;
request->par.readWrite.pages[0].buf = &tWA[varIndex * tPageSize];
break;
}//case
// List of memory pages followed by one file page
case FsReadWriteReq::fsFormatListOfMemPages: {
tPageOffset = fsRWReq->data.listOfMemPages.varIndex[fsRWReq->numberOfPages];
tPageOffset *= tPageSize;
for (unsigned int i = 0; i < fsRWReq->numberOfPages; i++) {
jam();
UintPtr varIndex = fsRWReq->data.listOfMemPages.varIndex[i];
if (varIndex >= tNRR) {
jam();
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}//if
request->par.readWrite.pages[i].buf = &tWA[varIndex * tClusterSize];
request->par.readWrite.pages[i].size = tPageSize;
request->par.readWrite.pages[i].offset = (off_t)
(tPageOffset + (i*tPageSize));
}//for
request->par.readWrite.numberOfPages = fsRWReq->numberOfPages;
break;
// make it a writev or readv
}//case
default: {
jam();
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}//default
}//switch
}
else if (format == FsReadWriteReq::fsFormatGlobalPage)
{
Ptr<GlobalPage> ptr;
m_global_page_pool.getPtr(ptr, fsRWReq->data.pageData[0]);
request->par.readWrite.pages[0].buf = (char*)ptr.p;
request->par.readWrite.pages[0].size = ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->numberOfPages;
request->par.readWrite.pages[0].offset= ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->varIndex;
request->par.readWrite.numberOfPages = 1;
}
else
{
ndbrequire(format == FsReadWriteReq::fsFormatSharedPage);
Ptr<GlobalPage> ptr;
m_shared_page_pool.getPtr(ptr, fsRWReq->data.pageData[0]);
request->par.readWrite.pages[0].buf = (char*)ptr.p;
request->par.readWrite.pages[0].size = ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->numberOfPages;
request->par.readWrite.pages[0].offset= ((UintPtr)GLOBAL_PAGE_SIZE)*fsRWReq->varIndex;
request->par.readWrite.numberOfPages = 1;
}
ndbrequire(forward(openFile, request));
return;
error:
theRequestPool->put(request);
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, errorCode);
fsRef->osErrorCode = ~0; // Indicate local error
switch (action) {
case Request:: write:
case Request:: writeSync: {
jam();
sendSignal(userRef, GSN_FSWRITEREF, signal, 3, JBB);
break;
}//case
case Request:: readPartial:
case Request:: read: {
jam();
sendSignal(userRef, GSN_FSREADREF, signal, 3, JBB);
}//case
}//switch
return;
}
/*
PR0: File Pointer , theData[0]
DR0: User reference, theData[1]
DR1: User Pointer, etc.
DR2: Flag
DR3: Var number
DR4: amount of pages
DR5->: Memory Page id and File page id according to Flag
*/
void
Ndbfs::execFSWRITEREQ(Signal* signal)
{
jamEntry();
const FsReadWriteReq * const fsWriteReq = (FsReadWriteReq *)&signal->theData[0];
if (fsWriteReq->getSyncFlag(fsWriteReq->operationFlag) == true){
jam();
readWriteRequest( Request::writeSync, signal );
} else {
jam();
readWriteRequest( Request::write, signal );
}
}
/*
PR0: File Pointer
DR0: User reference
DR1: User Pointer
DR2: Flag
DR3: Var number
DR4: amount of pages
DR5->: Memory Page id and File page id according to Flag
*/
void
Ndbfs::execFSREADREQ(Signal* signal)
{
jamEntry();
FsReadWriteReq * req = (FsReadWriteReq *)signal->getDataPtr();
if (FsReadWriteReq::getPartialReadFlag(req->operationFlag))
readWriteRequest( Request::readPartial, signal );
else
readWriteRequest( Request::read, signal );
}
/*
* PR0: File Pointer DR0: User reference DR1: User Pointer
*/
void
Ndbfs::execFSSYNCREQ(Signal * signal)
{
jamEntry();
Uint16 filePointer = (Uint16)signal->theData[0];
BlockReference userRef = signal->theData[1];
const UintR userPointer = signal->theData[2];
AsyncFile* openFile = theOpenFiles.find(filePointer);
if (openFile == NULL) {
jam(); //file not open
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, FsRef::fsErrFileDoesNotExist);
fsRef->osErrorCode = ~0; // Indicate local error
sendSignal(userRef, GSN_FSSYNCREF, signal, 3, JBB);
return;
}
Request *request = theRequestPool->get();
request->error = 0;
request->action = Request::sync;
request->set(userRef, userPointer, filePointer);
request->file = openFile;
request->theTrace = signal->getTrace();
request->m_do_bind = false;
ndbrequire(forward(openFile,request));
}
/*
* PR0: File Pointer DR0: User reference DR1: User Pointer
*/
void
Ndbfs::execFSSUSPENDORD(Signal * signal)
{
jamEntry();
Uint16 filePointer = (Uint16)signal->theData[0];
Uint32 millis = signal->theData[1];
AsyncFile* openFile = theOpenFiles.find(filePointer);
if (openFile == NULL)
{
jam(); //file not open
return;
}
Request *request = theRequestPool->get();
request->error = 0;
request->action = Request::suspend;
request->set(0, 0, filePointer);
request->file = openFile;
request->theTrace = signal->getTrace();
request->par.suspend.milliseconds = millis;
request->m_do_bind = false;
ndbrequire(forward(openFile,request));
}
void
Ndbfs::execFSAPPENDREQ(Signal * signal)
{
const FsAppendReq * const fsReq = (FsAppendReq *)&signal->theData[0];
const Uint16 filePointer = (Uint16)fsReq->filePointer;
const UintR userPointer = fsReq->userPointer;
const BlockReference userRef = fsReq->userReference;
const BlockNumber blockNumber = refToMain(userRef);
const Uint32 instanceNumber = refToInstance(userRef);
FsRef::NdbfsErrorCodeType errorCode;
AsyncFile* openFile = theOpenFiles.find(filePointer);
const NewVARIABLE *myBaseAddrRef =
&getBat(blockNumber, instanceNumber)[fsReq->varIndex];
const Uint32* tWA = (const Uint32*)myBaseAddrRef->WA;
const Uint32 tSz = myBaseAddrRef->nrr;
const Uint32 offset = fsReq->offset;
const Uint32 size = fsReq->size;
const Uint32 synch_flag = fsReq->synch_flag;
Request *request = theRequestPool->get();
if (openFile == NULL) {
jam();
errorCode = FsRef::fsErrFileDoesNotExist;
goto error;
}
if (myBaseAddrRef == NULL) {
jam(); // Ensure that a valid variable is used
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
if (fsReq->varIndex >= getBatSize(blockNumber, instanceNumber)) {
jam();// Ensure that a valid variable is used
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
if(offset + size > tSz){
jam(); // Ensure that a valid variable is used
errorCode = FsRef::fsErrInvalidParameters;
goto error;
}
request->error = 0;
request->set(userRef, userPointer, filePointer);
request->file = openFile;
request->theTrace = signal->getTrace();
request->par.append.buf = (const char *)(tWA + offset);
request->par.append.size = size << 2;
if (!synch_flag)
request->action = Request::append;
else
request->action = Request::append_synch;
request->m_do_bind = false;
ndbrequire(forward(openFile, request));
return;
error:
jam();
theRequestPool->put(request);
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = userPointer;
fsRef->setErrorCode(fsRef->errorCode, errorCode);
fsRef->osErrorCode = ~0; // Indicate local error
jam();
sendSignal(userRef, GSN_FSAPPENDREF, signal, 3, JBB);
return;
}
void
Ndbfs::execALLOC_MEM_REQ(Signal* signal)
{
jamEntry();
AllocMemReq* req = (AllocMemReq*)signal->getDataPtr();
bool bound = true;
AsyncFile* file = getIdleFile(bound);
ndbrequire(file != NULL);
Request *request = theRequestPool->get();
request->error = 0;
request->set(req->senderRef, req->senderData, 0);
request->file = file;
request->theTrace = signal->getTrace();
request->par.alloc.ctx = &m_ctx;
request->par.alloc.requestInfo = req->requestInfo;
request->par.alloc.bytes = (Uint64(req->bytes_hi) << 32) + req->bytes_lo;
request->action = Request::allocmem;
request->m_do_bind = bound;
ndbrequire(forward(file, request));
}
#include <signaldata/BuildIndxImpl.hpp>
void
Ndbfs::execBUILD_INDX_IMPL_REQ(Signal* signal)
{
jamEntry();
mt_BuildIndxReq * req = (mt_BuildIndxReq*)signal->getDataPtr();
bool bound = true;
AsyncFile* file = getIdleFile(bound);
ndbrequire(file != NULL);
Request *request = theRequestPool->get();
request->error = 0;
request->set(req->senderRef, req->senderData, 0);
request->file = file;
request->theTrace = signal->getTrace();
Uint32 cnt = (req->buffer_size + 32768 - 1) / 32768;
Uint32 save = cnt;
Ptr<GlobalPage> page_ptr;
m_ctx.m_mm.alloc_pages(RT_DBTUP_PAGE, &page_ptr.i, &cnt, cnt);
if(cnt == 0)
{
file->m_page_ptr.setNull();
file->m_page_cnt = 0;
ndbrequire(false); // TODO
return;
}
ndbrequire(cnt == save);
m_shared_page_pool.getPtr(page_ptr);
file->set_buffer(RT_DBTUP_PAGE, page_ptr, cnt);
memcpy(&request->par.build.m_req, req, sizeof(* req));
request->action = Request::buildindx;
request->m_do_bind = bound;
ndbrequire(forward(file, request));
}
Uint16
Ndbfs::newId()
{
// finds a new key, eg a new filepointer
for (int i = 1; i < SHRT_MAX; i++)
{
if (theLastId == SHRT_MAX) {
jam();
theLastId = 1;
} else {
jam();
theLastId++;
}
if(theOpenFiles.find(theLastId) == NULL) {
jam();
return theLastId;
}
}
ndbrequire(1 == 0);
// The program will not reach this point
return 0;
}
AsyncFile*
Ndbfs::createAsyncFile()
{
// Check limit of open files
if (m_maxFiles !=0 && theFiles.size() == m_maxFiles)
{
// Print info about all open files
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
}
ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
}
#ifdef NDB_WIN
AsyncFile* file = new Win32AsyncFile(* this);
#else
AsyncFile* file = new PosixAsyncFile(* this);
#endif
if (file->init())
{
ERROR_SET(fatal, NDBD_EXIT_AFS_MAXOPEN,""," Ndbfs::createAsyncFile");
}
theFiles.push_back(file);
return file;
}
void
Ndbfs::pushIdleFile(AsyncFile* file)
{
assert(file->getThread() == 0);
theIdleFiles.push_back(file);
}
AsyncIoThread*
Ndbfs::createIoThread(bool bound)
{
AsyncIoThread* thr = new AsyncIoThread(*this, bound);
if (thr)
{
#ifdef VM_TRACE
ndbout_c("NDBFS: Created new file thread %d", theThreads.size());
#endif
struct NdbThread* thrptr = thr->doStart();
globalEmulatorData.theConfiguration->addThread(thrptr, NdbfsThread);
if (bound)
m_bound_threads_cnt++;
else
m_unbounds_threads_cnt++;
}
return thr;
}
AsyncFile*
Ndbfs::getIdleFile(bool bound)
{
AsyncFile* file = 0;
Uint32 sz = theIdleFiles.size();
if (sz)
{
file = theIdleFiles[sz - 1];
theIdleFiles.erase(sz - 1);
}
else
{
file = createAsyncFile();
}
if (bound)
{
/**
* Check if we should create thread
*/
if (m_active_bound_threads_cnt == m_bound_threads_cnt)
{
AsyncIoThread * thr = createIoThread(true);
if (thr)
{
theThreads.push_back(thr);
}
}
}
return file;
}
void
Ndbfs::cnt_active_bound(int val)
{
Guard g(&g_active_bound_threads_mutex);
if (val < 0)
{
val = -val;
assert(m_active_bound_threads_cnt >= (Uint32)val);
m_active_bound_threads_cnt -= val;
}
else
{
m_active_bound_threads_cnt += val;
}
}
void
Ndbfs::report(Request * request, Signal* signal)
{
const Uint32 orgTrace = signal->getTrace();
signal->setTrace(request->theTrace);
const BlockReference ref = request->theUserReference;
if (request->file->has_buffer())
{
if ((request->action == Request::open && request->error) ||
request->action == Request::close ||
request->action == Request::closeRemove ||
request->action == Request::buildindx)
{
Uint32 rg;
Uint32 cnt;
Ptr<GlobalPage> ptr;
request->file->clear_buffer(rg, ptr, cnt);
m_ctx.m_mm.release_pages(rg, ptr.i, cnt);
}
}
if (request->error) {
jam();
// Initialise FsRef signal
FsRef * const fsRef = (FsRef *)&signal->theData[0];
fsRef->userPointer = request->theUserPointer;
if(request->error & FsRef::FS_ERR_BIT)
{
fsRef->errorCode = request->error;
fsRef->osErrorCode = 0;
}
else
{
fsRef->setErrorCode(fsRef->errorCode, translateErrno(request->error));
fsRef->osErrorCode = request->error;
}
switch (request->action) {
case Request:: open: {
jam();
// Put the file back in idle files list
pushIdleFile(request->file);
sendSignal(ref, GSN_FSOPENREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request:: closeRemove:
case Request:: close: {
jam();
sendSignal(ref, GSN_FSCLOSEREF, signal, FsRef::SignalLength, JBB);
g_eventLogger->warning("Error closing file: %s %u/%u",
request->file->theFileName.c_str(),
fsRef->errorCode,
fsRef->osErrorCode);
g_eventLogger->warning("Dumping files");
signal->theData[0] = 405;
execDUMP_STATE_ORD(signal);
break;
}
case Request:: writeSync:
case Request:: writevSync:
case Request:: write:
case Request:: writev: {
jam();
sendSignal(ref, GSN_FSWRITEREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request:: read:
case Request:: readPartial:
case Request:: readv: {
jam();
sendSignal(ref, GSN_FSREADREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request:: sync: {
jam();
sendSignal(ref, GSN_FSSYNCREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request::append:
case Request::append_synch:
{
jam();
sendSignal(ref, GSN_FSAPPENDREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request::rmrf: {
jam();
// Put the file back in idle files list
pushIdleFile(request->file);
sendSignal(ref, GSN_FSREMOVEREF, signal, FsRef::SignalLength, JBB);
break;
}
case Request:: end: {
case Request:: suspend:
// Report nothing
break;
}
case Request::allocmem: {
jam();
AllocMemRef* rep = (AllocMemRef*)signal->getDataPtrSend();
rep->senderRef = reference();
rep->senderData = request->theUserPointer;
rep->errorCode = request->error;
sendSignal(ref, GSN_ALLOC_MEM_REF, signal,
AllocMemRef::SignalLength, JBB);
pushIdleFile(request->file);
break;
}
case Request::buildindx: {
jam();
BuildIndxImplRef* rep = (BuildIndxImplRef*)signal->getDataPtrSend();
rep->senderRef = reference();
rep->senderData = request->theUserPointer;
rep->errorCode = (BuildIndxImplRef::ErrorCode)request->error;
sendSignal(ref, GSN_BUILD_INDX_IMPL_REF, signal,
BuildIndxImplRef::SignalLength, JBB);
pushIdleFile(request->file);
break;
}
}//switch
} else {
jam();
FsConf * const fsConf = (FsConf *)&signal->theData[0];
fsConf->userPointer = request->theUserPointer;
switch (request->action) {
case Request:: open: {
jam();
theOpenFiles.insert(request->file, request->theFilePointer);
// Keep track on max number of opened files
if (theOpenFiles.size() > m_maxOpenedFiles)
m_maxOpenedFiles = theOpenFiles.size();
fsConf->filePointer = request->theFilePointer;
sendSignal(ref, GSN_FSOPENCONF, signal, 3, JBA);
break;
}
case Request:: closeRemove:
case Request:: close: {
jam();
// removes the file from OpenFiles list
theOpenFiles.erase(request->theFilePointer);
// Put the file in idle files list
pushIdleFile(request->file);
sendSignal(ref, GSN_FSCLOSECONF, signal, 1, JBA);
break;
}
case Request:: writeSync:
case Request:: writevSync:
case Request:: write:
case Request:: writev: {
jam();
sendSignal(ref, GSN_FSWRITECONF, signal, 1, JBA);
break;
}
case Request:: read:
case Request:: readv: {
jam();
sendSignal(ref, GSN_FSREADCONF, signal, 1, JBA);
break;
}
case Request:: readPartial: {
jam();
fsConf->bytes_read = Uint32(request->par.readWrite.pages[0].size);
sendSignal(ref, GSN_FSREADCONF, signal, 2, JBA);
break;
}
case Request:: sync: {
jam();
sendSignal(ref, GSN_FSSYNCCONF, signal, 1, JBA);
break;
}//case
case Request::append:
case Request::append_synch:
{
jam();
signal->theData[1] = Uint32(request->par.append.size);
sendSignal(ref, GSN_FSAPPENDCONF, signal, 2, JBA);
break;
}
case Request::rmrf: {
jam();
// Put the file in idle files list
pushIdleFile(request->file);
sendSignal(ref, GSN_FSREMOVECONF, signal, 1, JBA);
break;
}
case Request:: end: {
case Request:: suspend:
// Report nothing
break;
}
case Request::allocmem: {
jam();
AllocMemConf* conf = (AllocMemConf*)signal->getDataPtrSend();
conf->senderRef = reference();
conf->senderData = request->theUserPointer;
conf->bytes_hi = Uint32(request->par.alloc.bytes >> 32);
conf->bytes_lo = Uint32(request->par.alloc.bytes);
sendSignal(ref, GSN_ALLOC_MEM_CONF, signal,
AllocMemConf::SignalLength, JBB);
pushIdleFile(request->file);
break;
}
case Request::buildindx: {
jam();
BuildIndxImplConf* rep = (BuildIndxImplConf*)signal->getDataPtrSend();
rep->senderRef = reference();
rep->senderData = request->theUserPointer;
sendSignal(ref, GSN_BUILD_INDX_IMPL_CONF, signal,
BuildIndxImplConf::SignalLength, JBB);
pushIdleFile(request->file);
break;
}
}
}//if
signal->setTrace(orgTrace);
}
bool
Ndbfs::scanIPC(Signal* signal)
{
Request* request = theFromThreads.tryReadChannel();
jam();
if (request) {
jam();
report(request, signal);
theRequestPool->put(request);
return true;
}
return false;
}
#if defined NDB_WIN32
Uint32 Ndbfs::translateErrno(int aErrno)
{
switch (aErrno)
{
//permission denied
case ERROR_ACCESS_DENIED:
return FsRef::fsErrPermissionDenied;
//temporary not accessible
case ERROR_PATH_BUSY:
case ERROR_NO_MORE_SEARCH_HANDLES:
return FsRef::fsErrTemporaryNotAccessible;
//no space left on device
case ERROR_HANDLE_DISK_FULL:
case ERROR_DISK_FULL:
return FsRef::fsErrNoSpaceLeftOnDevice;
//none valid parameters
case ERROR_INVALID_HANDLE:
case ERROR_INVALID_DRIVE:
case ERROR_INVALID_ACCESS:
case ERROR_HANDLE_EOF:
case ERROR_BUFFER_OVERFLOW:
return FsRef::fsErrInvalidParameters;
//environment error
case ERROR_CRC:
case ERROR_ARENA_TRASHED:
case ERROR_BAD_ENVIRONMENT:
case ERROR_INVALID_BLOCK:
case ERROR_WRITE_FAULT:
case ERROR_READ_FAULT:
case ERROR_OPEN_FAILED:
return FsRef::fsErrEnvironmentError;
//no more process resources
case ERROR_TOO_MANY_OPEN_FILES:
case ERROR_NOT_ENOUGH_MEMORY:
case ERROR_OUTOFMEMORY:
return FsRef::fsErrNoMoreResources;
//no file
case ERROR_FILE_NOT_FOUND:
return FsRef::fsErrFileDoesNotExist;
case ERR_ReadUnderflow:
return FsRef::fsErrReadUnderflow;
default:
return FsRef::fsErrUnknown;
}
}
#else
Uint32 Ndbfs::translateErrno(int aErrno)
{
switch (aErrno)
{
//permission denied
case EACCES:
case EROFS:
case ENXIO:
return FsRef::fsErrPermissionDenied;
//temporary not accessible
case EAGAIN:
case ETIMEDOUT:
case ENOLCK:
case EINTR:
case EIO:
return FsRef::fsErrTemporaryNotAccessible;
//no space left on device
case ENFILE:
case EDQUOT:
#ifdef ENOSR
case ENOSR:
#endif
case ENOSPC:
case EFBIG:
return FsRef::fsErrNoSpaceLeftOnDevice;
//none valid parameters
case EINVAL:
case EBADF:
case ENAMETOOLONG:
case EFAULT:
case EISDIR:
case ENOTDIR:
case EEXIST:
case ETXTBSY:
return FsRef::fsErrInvalidParameters;
//environment error
case ELOOP:
#ifdef ENOLINK
case ENOLINK:
#endif
#ifdef EMULTIHOP
case EMULTIHOP:
#endif
#ifdef EOPNOTSUPP
case EOPNOTSUPP:
#endif
#ifdef ESPIPE
case ESPIPE:
#endif
case EPIPE:
return FsRef::fsErrEnvironmentError;
//no more process resources
case EMFILE:
case ENOMEM:
return FsRef::fsErrNoMoreResources;
//no file
case ENOENT:
return FsRef::fsErrFileDoesNotExist;
case ERR_ReadUnderflow:
return FsRef::fsErrReadUnderflow;
default:
return FsRef::fsErrUnknown;
}
}
#endif
void
Ndbfs::execCONTINUEB(Signal* signal)
{
jamEntry();
if (signal->theData[0] == NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY) {
jam();
// Also send CONTINUEB to ourself in order to scan for
// incoming answers from AsyncFile on MemoryChannel theFromThreads
signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_10MS_DELAY;
sendSignalWithDelay(reference(), GSN_CONTINUEB, signal, 10, 1);
if (scanningInProgress == true) {
jam();
return;
}
}
if (scanIPC(signal)) {
jam();
scanningInProgress = true;
signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY;
sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
} else {
jam();
scanningInProgress = false;
}
return;
}
void
Ndbfs::execSEND_PACKED(Signal* signal)
{
jamEntry();
if (scanningInProgress == false && scanIPC(signal))
{
jam();
scanningInProgress = true;
signal->theData[0] = NdbfsContinueB::ZSCAN_MEMORYCHANNEL_NO_DELAY;
sendSignal(reference(), GSN_CONTINUEB, signal, 1, JBB);
}
}
void
Ndbfs::execDUMP_STATE_ORD(Signal* signal)
{
if(signal->theData[0] == 19){
return;
}
if(signal->theData[0] == DumpStateOrd::NdbfsDumpFileStat){
infoEvent("NDBFS: Files: %d Open files: %d",
theFiles.size(),
theOpenFiles.size());
infoEvent(" Idle files: %u Max opened files: %d",
theIdleFiles.size(),
m_maxOpenedFiles);
infoEvent(" Bound Threads: %u (active %u) Unbound threads: %u",
m_bound_threads_cnt,
m_active_bound_threads_cnt,
m_unbounds_threads_cnt);
infoEvent(" Max files: %d",
m_maxFiles);
infoEvent(" Requests: %d",
theRequestPool->size());
return;
}
if(signal->theData[0] == DumpStateOrd::NdbfsDumpOpenFiles){
infoEvent("NDBFS: Dump open files: %d", theOpenFiles.size());
for (unsigned i = 0; i < theOpenFiles.size(); i++){
AsyncFile* file = theOpenFiles.getFile(i);
infoEvent("%2d (0x%lx): %s thr: %lx", i,
(long)file,
file->theFileName.c_str(),
(long)file->getThread());
}
return;
}
if(signal->theData[0] == DumpStateOrd::NdbfsDumpAllFiles){
infoEvent("NDBFS: Dump all files: %d", theFiles.size());
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
}
return;
}
if(signal->theData[0] == DumpStateOrd::NdbfsDumpIdleFiles){
infoEvent("NDBFS: Dump idle files: %u",
theIdleFiles.size());
for (unsigned i = 0; i < theIdleFiles.size(); i++){
AsyncFile* file = theIdleFiles[i];
infoEvent("%2d (0x%lx): %s", i, (long)file, file->isOpen()?"OPEN":"CLOSED");
}
return;
}
if(signal->theData[0] == 404)
{
#if 0
ndbrequire(signal->getLength() == 2);
Uint32 file= signal->theData[1];
AsyncFile* openFile = theOpenFiles.find(file);
ndbrequire(openFile != 0);
ndbout_c("File: %s %p", openFile->theFileName.c_str(), openFile);
Request* curr = openFile->m_current_request;
Request* last = openFile->m_last_request;
if(curr)
ndbout << "Current request: " << *curr << endl;
if(last)
ndbout << "Last request: " << *last << endl;
ndbout << "theReportTo " << *openFile->theReportTo << endl;
ndbout << "theMemoryChannelPtr" << *openFile->theMemoryChannelPtr << endl;
ndbout << "All files: " << endl;
for (unsigned i = 0; i < theFiles.size(); i++){
AsyncFile* file = theFiles[i];
ndbout_c("%2d (0x%lx): %s", i, (long) file, file->isOpen()?"OPEN":"CLOSED");
}
#endif
}
if(signal->theData[0] == 405)
{
for (unsigned i = 0; i < theFiles.size(); i++)
{
AsyncFile* file = theFiles[i];
if (file == 0)
continue;
ndbout_c("%u : %s %s", i,
file->theFileName.c_str() ? file->theFileName.c_str() : "",
file->isOpen() ? "OPEN" : "CLOSED");
}
}
}//Ndbfs::execDUMP_STATE_ORD()
const char*
Ndbfs::get_filename(Uint32 fd) const
{
jamEntry();
const AsyncFile* openFile = theOpenFiles.find(fd);
if(openFile)
return openFile->theFileName.get_base_name();
return "";
}
BLOCK_FUNCTIONS(Ndbfs)
template class Vector<AsyncFile*>;
template class Vector<AsyncIoThread*>;
template class Vector<OpenFiles::OpenFileItem>;
template class MemoryChannel<Request>;
template class Pool<Request>;
template NdbOut& operator<<(NdbOut&, const MemoryChannel<Request>&);
| gpl-2.0 |
codename13/android_kernel_samsung_kylessopen-CAF | drivers/staging/msm/mddi_toshiba_vga.c | 87 | 3069 | /* Copyright (c) 2009, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include "msm_fb.h"
#include "mddihost.h"
#include "mddihosti.h"
#include "mddi_toshiba.h"
static uint32 read_client_reg(uint32 addr)
{
uint32 val;
mddi_queue_register_read(addr, &val, TRUE, 0);
return val;
}
static uint32 toshiba_lcd_gpio_read(void)
{
uint32 val;
write_client_reg(GPIODIR, 0x0000000C, TRUE);
write_client_reg(GPIOSEL, 0x00000000, TRUE);
write_client_reg(GPIOSEL, 0x00000000, TRUE);
write_client_reg(GPIOPC, 0x03CF00C0, TRUE);
val = read_client_reg(GPIODATA) & 0x2C0;
return val;
}
static u32 mddi_toshiba_panel_detect(void)
{
mddi_host_type host_idx = MDDI_HOST_PRIM;
uint32 lcd_gpio;
u32 mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
/* Toshiba display requires larger drive_lo value */
mddi_host_reg_out(DRIVE_LO, 0x0050);
lcd_gpio = toshiba_lcd_gpio_read();
switch (lcd_gpio) {
case 0x0080:
mddi_toshiba_lcd = LCD_SHARP_2P4_VGA;
break;
case 0x00C0:
default:
mddi_toshiba_lcd = LCD_TOSHIBA_2P4_VGA;
break;
}
return mddi_toshiba_lcd;
}
static int __init mddi_toshiba_vga_init(void)
{
int ret;
struct msm_panel_info pinfo;
u32 panel;
#ifdef CONFIG_FB_MSM_MDDI_AUTO_DETECT
u32 id;
ret = msm_fb_detect_client("mddi_toshiba_vga");
if (ret == -ENODEV)
return 0;
if (ret) {
id = mddi_get_client_id();
if ((id >> 16) != 0xD263)
return 0;
}
#endif
panel = mddi_toshiba_panel_detect();
pinfo.xres = 480;
pinfo.yres = 640;
pinfo.type = MDDI_PANEL;
pinfo.pdest = DISPLAY_1;
pinfo.mddi.vdopkt = MDDI_DEFAULT_PRIM_PIX_ATTR;
pinfo.wait_cycle = 0;
pinfo.bpp = 18;
pinfo.lcd.vsync_enable = TRUE;
pinfo.lcd.refx100 = 6118;
pinfo.lcd.v_back_porch = 6;
pinfo.lcd.v_front_porch = 0;
pinfo.lcd.v_pulse_width = 0;
pinfo.lcd.hw_vsync_mode = FALSE;
pinfo.lcd.vsync_notifier_period = (1 * HZ);
pinfo.bl_max = 99;
pinfo.bl_min = 1;
pinfo.clk_rate = 122880000;
pinfo.clk_min = 120000000;
pinfo.clk_max = 200000000;
pinfo.fb_num = 2;
ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_PRIM, panel);
if (ret) {
printk(KERN_ERR "%s: failed to register device!\n", __func__);
return ret;
}
pinfo.xres = 176;
pinfo.yres = 220;
pinfo.type = MDDI_PANEL;
pinfo.pdest = DISPLAY_2;
pinfo.mddi.vdopkt = 0x400;
pinfo.wait_cycle = 0;
pinfo.bpp = 18;
pinfo.clk_rate = 122880000;
pinfo.clk_min = 120000000;
pinfo.clk_max = 200000000;
pinfo.fb_num = 2;
ret = mddi_toshiba_device_register(&pinfo, TOSHIBA_VGA_SECD, panel);
if (ret)
printk(KERN_WARNING
"%s: failed to register device!\n", __func__);
return ret;
}
module_init(mddi_toshiba_vga_init);
| gpl-2.0 |
paulalesius/kernel-devel | net/ceph/pagelist.c | 1367 | 3713 | #include <linux/module.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <linux/ceph/pagelist.h>
static void ceph_pagelist_unmap_tail(struct ceph_pagelist *pl)
{
if (pl->mapped_tail) {
struct page *page = list_entry(pl->head.prev, struct page, lru);
kunmap(page);
pl->mapped_tail = NULL;
}
}
void ceph_pagelist_release(struct ceph_pagelist *pl)
{
if (!atomic_dec_and_test(&pl->refcnt))
return;
ceph_pagelist_unmap_tail(pl);
while (!list_empty(&pl->head)) {
struct page *page = list_first_entry(&pl->head, struct page,
lru);
list_del(&page->lru);
__free_page(page);
}
ceph_pagelist_free_reserve(pl);
kfree(pl);
}
EXPORT_SYMBOL(ceph_pagelist_release);
static int ceph_pagelist_addpage(struct ceph_pagelist *pl)
{
struct page *page;
if (!pl->num_pages_free) {
page = __page_cache_alloc(GFP_NOFS);
} else {
page = list_first_entry(&pl->free_list, struct page, lru);
list_del(&page->lru);
--pl->num_pages_free;
}
if (!page)
return -ENOMEM;
pl->room += PAGE_SIZE;
ceph_pagelist_unmap_tail(pl);
list_add_tail(&page->lru, &pl->head);
pl->mapped_tail = kmap(page);
return 0;
}
int ceph_pagelist_append(struct ceph_pagelist *pl, const void *buf, size_t len)
{
while (pl->room < len) {
size_t bit = pl->room;
int ret;
memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK),
buf, bit);
pl->length += bit;
pl->room -= bit;
buf += bit;
len -= bit;
ret = ceph_pagelist_addpage(pl);
if (ret)
return ret;
}
memcpy(pl->mapped_tail + (pl->length & ~PAGE_CACHE_MASK), buf, len);
pl->length += len;
pl->room -= len;
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_append);
/* Allocate enough pages for a pagelist to append the given amount
* of data without without allocating.
* Returns: 0 on success, -ENOMEM on error.
*/
int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space)
{
if (space <= pl->room)
return 0;
space -= pl->room;
space = (space + PAGE_SIZE - 1) >> PAGE_SHIFT; /* conv to num pages */
while (space > pl->num_pages_free) {
struct page *page = __page_cache_alloc(GFP_NOFS);
if (!page)
return -ENOMEM;
list_add_tail(&page->lru, &pl->free_list);
++pl->num_pages_free;
}
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_reserve);
/* Free any pages that have been preallocated. */
int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
{
while (!list_empty(&pl->free_list)) {
struct page *page = list_first_entry(&pl->free_list,
struct page, lru);
list_del(&page->lru);
__free_page(page);
--pl->num_pages_free;
}
BUG_ON(pl->num_pages_free);
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
/* Create a truncation point. */
void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
c->pl = pl;
c->page_lru = pl->head.prev;
c->room = pl->room;
}
EXPORT_SYMBOL(ceph_pagelist_set_cursor);
/* Truncate a pagelist to the given point. Move extra pages to reserve.
* This won't sleep.
* Returns: 0 on success,
* -EINVAL if the pagelist doesn't match the trunc point pagelist
*/
int ceph_pagelist_truncate(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
struct page *page;
if (pl != c->pl)
return -EINVAL;
ceph_pagelist_unmap_tail(pl);
while (pl->head.prev != c->page_lru) {
page = list_entry(pl->head.prev, struct page, lru);
/* move from pagelist to reserve */
list_move_tail(&page->lru, &pl->free_list);
++pl->num_pages_free;
}
pl->room = c->room;
if (!list_empty(&pl->head)) {
page = list_entry(pl->head.prev, struct page, lru);
pl->mapped_tail = kmap(page);
}
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_truncate);
| gpl-2.0 |
embeddedarm/linux-vanilla-imx6 | arch/arm/mach-mvebu/kirkwood-pm.c | 1623 | 1849 | /*
* Power Management driver for Marvell Kirkwood SoCs
*
* Copyright (C) 2013 Ezequiel Garcia <ezequiel@free-electrons.com>
* Copyright (C) 2010 Simon Guinot <sguinot@lacie.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License,
* version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/suspend.h>
#include <linux/io.h>
#include "kirkwood.h"
static void __iomem *ddr_operation_base;
static void __iomem *memory_pm_ctrl;
static void kirkwood_low_power(void)
{
u32 mem_pm_ctrl;
mem_pm_ctrl = readl(memory_pm_ctrl);
/* Set peripherals to low-power mode */
writel_relaxed(~0, memory_pm_ctrl);
/* Set DDR in self-refresh */
writel_relaxed(0x7, ddr_operation_base);
/*
* Set CPU in wait-for-interrupt state.
* This disables the CPU core clocks,
* the array clocks, and also the L2 controller.
*/
cpu_do_idle();
writel_relaxed(mem_pm_ctrl, memory_pm_ctrl);
}
static int kirkwood_suspend_enter(suspend_state_t state)
{
switch (state) {
case PM_SUSPEND_STANDBY:
kirkwood_low_power();
break;
default:
return -EINVAL;
}
return 0;
}
static int kirkwood_pm_valid_standby(suspend_state_t state)
{
return state == PM_SUSPEND_STANDBY;
}
static const struct platform_suspend_ops kirkwood_suspend_ops = {
.enter = kirkwood_suspend_enter,
.valid = kirkwood_pm_valid_standby,
};
int __init kirkwood_pm_init(void)
{
ddr_operation_base = ioremap(DDR_OPERATION_BASE, 4);
memory_pm_ctrl = ioremap(MEMORY_PM_CTRL_PHYS, 4);
suspend_set_ops(&kirkwood_suspend_ops);
return 0;
}
| gpl-2.0 |
schmatzler/zte-kernel-smartchat | drivers/media/video/vino.c | 2647 | 101201 | /*
* Driver for the VINO (Video In No Out) system found in SGI Indys.
*
* This file is subject to the terms and conditions of the GNU General Public
* License version 2 as published by the Free Software Foundation.
*
* Copyright (C) 2004,2005 Mikael Nousiainen <tmnousia@cc.hut.fi>
*
* Based on the previous version of the driver for 2.4 kernels by:
* Copyright (C) 2003 Ladislav Michl <ladis@linux-mips.org>
*
* v4l2_device/v4l2_subdev conversion by:
* Copyright (C) 2009 Hans Verkuil <hverkuil@xs4all.nl>
*
* Note: this conversion is untested! Please contact the linux-media
* mailinglist if you can test this, together with the test results.
*/
/*
* TODO:
* - remove "mark pages reserved-hacks" from memory allocation code
* and implement fault()
* - check decimation, calculating and reporting image size when
* using decimation
* - implement read(), user mode buffers and overlay (?)
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/version.h>
#include <linux/kmod.h>
#include <linux/i2c.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/mutex.h>
#include <asm/paccess.h>
#include <asm/io.h>
#include <asm/sgi/ip22.h>
#include <asm/sgi/mc.h>
#include "vino.h"
#include "saa7191.h"
#include "indycam.h"
/* Uncomment the following line to get lots and lots of (mostly useless)
* debug info.
* Note that the debug output also slows down the driver significantly */
// #define VINO_DEBUG
// #define VINO_DEBUG_INT
#define VINO_MODULE_VERSION "0.0.6"
#define VINO_VERSION_CODE KERNEL_VERSION(0, 0, 6)
MODULE_DESCRIPTION("SGI VINO Video4Linux2 driver");
MODULE_VERSION(VINO_MODULE_VERSION);
MODULE_AUTHOR("Mikael Nousiainen <tmnousia@cc.hut.fi>");
MODULE_LICENSE("GPL");
#ifdef VINO_DEBUG
#define dprintk(x...) printk("VINO: " x);
#else
#define dprintk(x...)
#endif
#define VINO_NO_CHANNEL 0
#define VINO_CHANNEL_A 1
#define VINO_CHANNEL_B 2
#define VINO_PAL_WIDTH 768
#define VINO_PAL_HEIGHT 576
#define VINO_NTSC_WIDTH 640
#define VINO_NTSC_HEIGHT 480
#define VINO_MIN_WIDTH 32
#define VINO_MIN_HEIGHT 32
#define VINO_CLIPPING_START_ODD_D1 1
#define VINO_CLIPPING_START_ODD_PAL 15
#define VINO_CLIPPING_START_ODD_NTSC 12
#define VINO_CLIPPING_START_EVEN_D1 2
#define VINO_CLIPPING_START_EVEN_PAL 15
#define VINO_CLIPPING_START_EVEN_NTSC 12
#define VINO_INPUT_CHANNEL_COUNT 3
/* the number is the index for vino_inputs */
#define VINO_INPUT_NONE -1
#define VINO_INPUT_COMPOSITE 0
#define VINO_INPUT_SVIDEO 1
#define VINO_INPUT_D1 2
#define VINO_PAGE_RATIO (PAGE_SIZE / VINO_PAGE_SIZE)
#define VINO_FIFO_THRESHOLD_DEFAULT 16
#define VINO_FRAMEBUFFER_SIZE ((VINO_PAL_WIDTH \
* VINO_PAL_HEIGHT * 4 \
+ 3 * PAGE_SIZE) & ~(PAGE_SIZE - 1))
#define VINO_FRAMEBUFFER_COUNT_MAX 8
#define VINO_FRAMEBUFFER_UNUSED 0
#define VINO_FRAMEBUFFER_IN_USE 1
#define VINO_FRAMEBUFFER_READY 2
#define VINO_QUEUE_ERROR -1
#define VINO_QUEUE_MAGIC 0x20050125
#define VINO_MEMORY_NONE 0
#define VINO_MEMORY_MMAP 1
#define VINO_MEMORY_USERPTR 2
#define VINO_DUMMY_DESC_COUNT 4
#define VINO_DESC_FETCH_DELAY 5 /* microseconds */
#define VINO_MAX_FRAME_SKIP_COUNT 128
/* the number is the index for vino_data_formats */
#define VINO_DATA_FMT_NONE -1
#define VINO_DATA_FMT_GREY 0
#define VINO_DATA_FMT_RGB332 1
#define VINO_DATA_FMT_RGB32 2
#define VINO_DATA_FMT_YUV 3
#define VINO_DATA_FMT_COUNT 4
/* the number is the index for vino_data_norms */
#define VINO_DATA_NORM_NONE -1
#define VINO_DATA_NORM_NTSC 0
#define VINO_DATA_NORM_PAL 1
#define VINO_DATA_NORM_SECAM 2
#define VINO_DATA_NORM_D1 3
#define VINO_DATA_NORM_COUNT 4
/* I2C controller flags */
#define SGI_I2C_FORCE_IDLE (0 << 0)
#define SGI_I2C_NOT_IDLE (1 << 0)
#define SGI_I2C_WRITE (0 << 1)
#define SGI_I2C_READ (1 << 1)
#define SGI_I2C_RELEASE_BUS (0 << 2)
#define SGI_I2C_HOLD_BUS (1 << 2)
#define SGI_I2C_XFER_DONE (0 << 4)
#define SGI_I2C_XFER_BUSY (1 << 4)
#define SGI_I2C_ACK (0 << 5)
#define SGI_I2C_NACK (1 << 5)
#define SGI_I2C_BUS_OK (0 << 7)
#define SGI_I2C_BUS_ERR (1 << 7)
/* Internal data structure definitions */
struct vino_input {
char *name;
v4l2_std_id std;
};
struct vino_clipping {
unsigned int left, right, top, bottom;
};
struct vino_data_format {
/* the description */
char *description;
/* bytes per pixel */
unsigned int bpp;
/* V4L2 fourcc code */
__u32 pixelformat;
/* V4L2 colorspace (duh!) */
enum v4l2_colorspace colorspace;
};
struct vino_data_norm {
char *description;
unsigned int width, height;
struct vino_clipping odd;
struct vino_clipping even;
v4l2_std_id std;
unsigned int fps_min, fps_max;
__u32 framelines;
};
struct vino_descriptor_table {
/* the number of PAGE_SIZE sized pages in the buffer */
unsigned int page_count;
/* virtual (kmalloc'd) pointers to the actual data
* (in PAGE_SIZE chunks, used with mmap streaming) */
unsigned long *virtual;
/* cpu address for the VINO descriptor table
* (contains DMA addresses, VINO_PAGE_SIZE chunks) */
unsigned long *dma_cpu;
/* dma address for the VINO descriptor table
* (contains DMA addresses, VINO_PAGE_SIZE chunks) */
dma_addr_t dma;
};
struct vino_framebuffer {
/* identifier nubmer */
unsigned int id;
/* the length of the whole buffer */
unsigned int size;
/* the length of actual data in buffer */
unsigned int data_size;
/* the data format */
unsigned int data_format;
/* the state of buffer data */
unsigned int state;
/* is the buffer mapped in user space? */
unsigned int map_count;
/* memory offset for mmap() */
unsigned int offset;
/* frame counter */
unsigned int frame_counter;
/* timestamp (written when image capture finishes) */
struct timeval timestamp;
struct vino_descriptor_table desc_table;
spinlock_t state_lock;
};
struct vino_framebuffer_fifo {
unsigned int length;
unsigned int used;
unsigned int head;
unsigned int tail;
unsigned int data[VINO_FRAMEBUFFER_COUNT_MAX];
};
struct vino_framebuffer_queue {
unsigned int magic;
/* VINO_MEMORY_NONE, VINO_MEMORY_MMAP or VINO_MEMORY_USERPTR */
unsigned int type;
unsigned int length;
/* data field of in and out contain index numbers for buffer */
struct vino_framebuffer_fifo in;
struct vino_framebuffer_fifo out;
struct vino_framebuffer *buffer[VINO_FRAMEBUFFER_COUNT_MAX];
spinlock_t queue_lock;
struct mutex queue_mutex;
wait_queue_head_t frame_wait_queue;
};
struct vino_interrupt_data {
struct timeval timestamp;
unsigned int frame_counter;
unsigned int skip_count;
unsigned int skip;
};
struct vino_channel_settings {
unsigned int channel;
int input;
unsigned int data_format;
unsigned int data_norm;
struct vino_clipping clipping;
unsigned int decimation;
unsigned int line_size;
unsigned int alpha;
unsigned int fps;
unsigned int framert_reg;
unsigned int fifo_threshold;
struct vino_framebuffer_queue fb_queue;
/* number of the current field */
unsigned int field;
/* read in progress */
int reading;
/* streaming is active */
int streaming;
/* the driver is currently processing the queue */
int capturing;
struct mutex mutex;
spinlock_t capture_lock;
unsigned int users;
struct vino_interrupt_data int_data;
/* V4L support */
struct video_device *vdev;
};
struct vino_settings {
struct v4l2_device v4l2_dev;
struct vino_channel_settings a;
struct vino_channel_settings b;
/* the channel which owns this client:
* VINO_NO_CHANNEL, VINO_CHANNEL_A or VINO_CHANNEL_B */
unsigned int decoder_owner;
struct v4l2_subdev *decoder;
unsigned int camera_owner;
struct v4l2_subdev *camera;
/* a lock for vino register access */
spinlock_t vino_lock;
/* a lock for channel input changes */
spinlock_t input_lock;
unsigned long dummy_page;
struct vino_descriptor_table dummy_desc_table;
};
/* Module parameters */
/*
* Using vino_pixel_conversion the ABGR32-format pixels supplied
* by the VINO chip can be converted to more common formats
* like RGBA32 (or probably RGB24 in the future). This way we
* can give out data that can be specified correctly with
* the V4L2-definitions.
*
* The pixel format is specified as RGBA32 when no conversion
* is used.
*
* Note that this only affects the 32-bit bit depth.
*
* Use non-zero value to enable conversion.
*/
static int vino_pixel_conversion;
module_param_named(pixelconv, vino_pixel_conversion, int, 0);
MODULE_PARM_DESC(pixelconv,
"enable pixel conversion (non-zero value enables)");
/* Internal data structures */
static struct sgi_vino *vino;
static struct vino_settings *vino_drvdata;
#define camera_call(o, f, args...) \
v4l2_subdev_call(vino_drvdata->camera, o, f, ##args)
#define decoder_call(o, f, args...) \
v4l2_subdev_call(vino_drvdata->decoder, o, f, ##args)
static const char *vino_driver_name = "vino";
static const char *vino_driver_description = "SGI VINO";
static const char *vino_bus_name = "GIO64 bus";
static const char *vino_vdev_name_a = "SGI VINO Channel A";
static const char *vino_vdev_name_b = "SGI VINO Channel B";
static void vino_capture_tasklet(unsigned long channel);
DECLARE_TASKLET(vino_tasklet_a, vino_capture_tasklet, VINO_CHANNEL_A);
DECLARE_TASKLET(vino_tasklet_b, vino_capture_tasklet, VINO_CHANNEL_B);
static const struct vino_input vino_inputs[] = {
{
.name = "Composite",
.std = V4L2_STD_NTSC | V4L2_STD_PAL
| V4L2_STD_SECAM,
}, {
.name = "S-Video",
.std = V4L2_STD_NTSC | V4L2_STD_PAL
| V4L2_STD_SECAM,
}, {
.name = "D1/IndyCam",
.std = V4L2_STD_NTSC,
}
};
static const struct vino_data_format vino_data_formats[] = {
{
.description = "8-bit greyscale",
.bpp = 1,
.pixelformat = V4L2_PIX_FMT_GREY,
.colorspace = V4L2_COLORSPACE_SMPTE170M,
}, {
.description = "8-bit dithered RGB 3-3-2",
.bpp = 1,
.pixelformat = V4L2_PIX_FMT_RGB332,
.colorspace = V4L2_COLORSPACE_SRGB,
}, {
.description = "32-bit RGB",
.bpp = 4,
.pixelformat = V4L2_PIX_FMT_RGB32,
.colorspace = V4L2_COLORSPACE_SRGB,
}, {
.description = "YUV 4:2:2",
.bpp = 2,
.pixelformat = V4L2_PIX_FMT_YUYV, // XXX: swapped?
.colorspace = V4L2_COLORSPACE_SMPTE170M,
}
};
static const struct vino_data_norm vino_data_norms[] = {
{
.description = "NTSC",
.std = V4L2_STD_NTSC,
.fps_min = 6,
.fps_max = 30,
.framelines = 525,
.width = VINO_NTSC_WIDTH,
.height = VINO_NTSC_HEIGHT,
.odd = {
.top = VINO_CLIPPING_START_ODD_NTSC,
.left = 0,
.bottom = VINO_CLIPPING_START_ODD_NTSC
+ VINO_NTSC_HEIGHT / 2 - 1,
.right = VINO_NTSC_WIDTH,
},
.even = {
.top = VINO_CLIPPING_START_EVEN_NTSC,
.left = 0,
.bottom = VINO_CLIPPING_START_EVEN_NTSC
+ VINO_NTSC_HEIGHT / 2 - 1,
.right = VINO_NTSC_WIDTH,
},
}, {
.description = "PAL",
.std = V4L2_STD_PAL,
.fps_min = 5,
.fps_max = 25,
.framelines = 625,
.width = VINO_PAL_WIDTH,
.height = VINO_PAL_HEIGHT,
.odd = {
.top = VINO_CLIPPING_START_ODD_PAL,
.left = 0,
.bottom = VINO_CLIPPING_START_ODD_PAL
+ VINO_PAL_HEIGHT / 2 - 1,
.right = VINO_PAL_WIDTH,
},
.even = {
.top = VINO_CLIPPING_START_EVEN_PAL,
.left = 0,
.bottom = VINO_CLIPPING_START_EVEN_PAL
+ VINO_PAL_HEIGHT / 2 - 1,
.right = VINO_PAL_WIDTH,
},
}, {
.description = "SECAM",
.std = V4L2_STD_SECAM,
.fps_min = 5,
.fps_max = 25,
.framelines = 625,
.width = VINO_PAL_WIDTH,
.height = VINO_PAL_HEIGHT,
.odd = {
.top = VINO_CLIPPING_START_ODD_PAL,
.left = 0,
.bottom = VINO_CLIPPING_START_ODD_PAL
+ VINO_PAL_HEIGHT / 2 - 1,
.right = VINO_PAL_WIDTH,
},
.even = {
.top = VINO_CLIPPING_START_EVEN_PAL,
.left = 0,
.bottom = VINO_CLIPPING_START_EVEN_PAL
+ VINO_PAL_HEIGHT / 2 - 1,
.right = VINO_PAL_WIDTH,
},
}, {
.description = "NTSC/D1",
.std = V4L2_STD_NTSC,
.fps_min = 6,
.fps_max = 30,
.framelines = 525,
.width = VINO_NTSC_WIDTH,
.height = VINO_NTSC_HEIGHT,
.odd = {
.top = VINO_CLIPPING_START_ODD_D1,
.left = 0,
.bottom = VINO_CLIPPING_START_ODD_D1
+ VINO_NTSC_HEIGHT / 2 - 1,
.right = VINO_NTSC_WIDTH,
},
.even = {
.top = VINO_CLIPPING_START_EVEN_D1,
.left = 0,
.bottom = VINO_CLIPPING_START_EVEN_D1
+ VINO_NTSC_HEIGHT / 2 - 1,
.right = VINO_NTSC_WIDTH,
},
}
};
#define VINO_INDYCAM_V4L2_CONTROL_COUNT 9
struct v4l2_queryctrl vino_indycam_v4l2_controls[] = {
{
.id = V4L2_CID_AUTOGAIN,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Automatic Gain Control",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = INDYCAM_AGC_DEFAULT,
}, {
.id = V4L2_CID_AUTO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Automatic White Balance",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = INDYCAM_AWB_DEFAULT,
}, {
.id = V4L2_CID_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gain",
.minimum = INDYCAM_GAIN_MIN,
.maximum = INDYCAM_GAIN_MAX,
.step = 1,
.default_value = INDYCAM_GAIN_DEFAULT,
}, {
.id = INDYCAM_CONTROL_RED_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Saturation",
.minimum = INDYCAM_RED_SATURATION_MIN,
.maximum = INDYCAM_RED_SATURATION_MAX,
.step = 1,
.default_value = INDYCAM_RED_SATURATION_DEFAULT,
}, {
.id = INDYCAM_CONTROL_BLUE_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Saturation",
.minimum = INDYCAM_BLUE_SATURATION_MIN,
.maximum = INDYCAM_BLUE_SATURATION_MAX,
.step = 1,
.default_value = INDYCAM_BLUE_SATURATION_DEFAULT,
}, {
.id = V4L2_CID_RED_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Red Balance",
.minimum = INDYCAM_RED_BALANCE_MIN,
.maximum = INDYCAM_RED_BALANCE_MAX,
.step = 1,
.default_value = INDYCAM_RED_BALANCE_DEFAULT,
}, {
.id = V4L2_CID_BLUE_BALANCE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Blue Balance",
.minimum = INDYCAM_BLUE_BALANCE_MIN,
.maximum = INDYCAM_BLUE_BALANCE_MAX,
.step = 1,
.default_value = INDYCAM_BLUE_BALANCE_DEFAULT,
}, {
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Shutter Control",
.minimum = INDYCAM_SHUTTER_MIN,
.maximum = INDYCAM_SHUTTER_MAX,
.step = 1,
.default_value = INDYCAM_SHUTTER_DEFAULT,
}, {
.id = V4L2_CID_GAMMA,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Gamma",
.minimum = INDYCAM_GAMMA_MIN,
.maximum = INDYCAM_GAMMA_MAX,
.step = 1,
.default_value = INDYCAM_GAMMA_DEFAULT,
}
};
#define VINO_SAA7191_V4L2_CONTROL_COUNT 9
struct v4l2_queryctrl vino_saa7191_v4l2_controls[] = {
{
.id = V4L2_CID_HUE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Hue",
.minimum = SAA7191_HUE_MIN,
.maximum = SAA7191_HUE_MAX,
.step = 1,
.default_value = SAA7191_HUE_DEFAULT,
}, {
.id = SAA7191_CONTROL_BANDPASS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Luminance Bandpass",
.minimum = SAA7191_BANDPASS_MIN,
.maximum = SAA7191_BANDPASS_MAX,
.step = 1,
.default_value = SAA7191_BANDPASS_DEFAULT,
}, {
.id = SAA7191_CONTROL_BANDPASS_WEIGHT,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Luminance Bandpass Weight",
.minimum = SAA7191_BANDPASS_WEIGHT_MIN,
.maximum = SAA7191_BANDPASS_WEIGHT_MAX,
.step = 1,
.default_value = SAA7191_BANDPASS_WEIGHT_DEFAULT,
}, {
.id = SAA7191_CONTROL_CORING,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "HF Luminance Coring",
.minimum = SAA7191_CORING_MIN,
.maximum = SAA7191_CORING_MAX,
.step = 1,
.default_value = SAA7191_CORING_DEFAULT,
}, {
.id = SAA7191_CONTROL_FORCE_COLOUR,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Force Colour",
.minimum = SAA7191_FORCE_COLOUR_MIN,
.maximum = SAA7191_FORCE_COLOUR_MAX,
.step = 1,
.default_value = SAA7191_FORCE_COLOUR_DEFAULT,
}, {
.id = SAA7191_CONTROL_CHROMA_GAIN,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Chrominance Gain Control",
.minimum = SAA7191_CHROMA_GAIN_MIN,
.maximum = SAA7191_CHROMA_GAIN_MAX,
.step = 1,
.default_value = SAA7191_CHROMA_GAIN_DEFAULT,
}, {
.id = SAA7191_CONTROL_VTRC,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "VTR Time Constant",
.minimum = SAA7191_VTRC_MIN,
.maximum = SAA7191_VTRC_MAX,
.step = 1,
.default_value = SAA7191_VTRC_DEFAULT,
}, {
.id = SAA7191_CONTROL_LUMA_DELAY,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Luminance Delay Compensation",
.minimum = SAA7191_LUMA_DELAY_MIN,
.maximum = SAA7191_LUMA_DELAY_MAX,
.step = 1,
.default_value = SAA7191_LUMA_DELAY_DEFAULT,
}, {
.id = SAA7191_CONTROL_VNR,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Vertical Noise Reduction",
.minimum = SAA7191_VNR_MIN,
.maximum = SAA7191_VNR_MAX,
.step = 1,
.default_value = SAA7191_VNR_DEFAULT,
}
};
/* VINO framebuffer/DMA descriptor management */
static void vino_free_buffer_with_count(struct vino_framebuffer *fb,
unsigned int count)
{
unsigned int i;
dprintk("vino_free_buffer_with_count(): count = %d\n", count);
for (i = 0; i < count; i++) {
ClearPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
dma_unmap_single(NULL,
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
PAGE_SIZE, DMA_FROM_DEVICE);
free_page(fb->desc_table.virtual[i]);
}
dma_free_coherent(NULL,
VINO_PAGE_RATIO * (fb->desc_table.page_count + 4) *
sizeof(dma_addr_t), (void *)fb->desc_table.dma_cpu,
fb->desc_table.dma);
kfree(fb->desc_table.virtual);
memset(fb, 0, sizeof(struct vino_framebuffer));
}
static void vino_free_buffer(struct vino_framebuffer *fb)
{
vino_free_buffer_with_count(fb, fb->desc_table.page_count);
}
static int vino_allocate_buffer(struct vino_framebuffer *fb,
unsigned int size)
{
unsigned int count, i, j;
int ret = 0;
dprintk("vino_allocate_buffer():\n");
if (size < 1)
return -EINVAL;
memset(fb, 0, sizeof(struct vino_framebuffer));
count = ((size / PAGE_SIZE) + 4) & ~3;
dprintk("vino_allocate_buffer(): size = %d, count = %d\n",
size, count);
/* allocate memory for table with virtual (page) addresses */
fb->desc_table.virtual = (unsigned long *)
kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
if (!fb->desc_table.virtual)
return -ENOMEM;
/* allocate memory for table with dma addresses
* (has space for four extra descriptors) */
fb->desc_table.dma_cpu =
dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
sizeof(dma_addr_t), &fb->desc_table.dma,
GFP_KERNEL | GFP_DMA);
if (!fb->desc_table.dma_cpu) {
ret = -ENOMEM;
goto out_free_virtual;
}
/* allocate pages for the buffer and acquire the according
* dma addresses */
for (i = 0; i < count; i++) {
dma_addr_t dma_data_addr;
fb->desc_table.virtual[i] =
get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!fb->desc_table.virtual[i]) {
ret = -ENOBUFS;
break;
}
dma_data_addr =
dma_map_single(NULL,
(void *)fb->desc_table.virtual[i],
PAGE_SIZE, DMA_FROM_DEVICE);
for (j = 0; j < VINO_PAGE_RATIO; j++) {
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
dma_data_addr + VINO_PAGE_SIZE * j;
}
SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
}
/* page_count needs to be set anyway, because the descriptor table has
* been allocated according to this number */
fb->desc_table.page_count = count;
if (ret) {
/* the descriptor with index i doesn't contain
* a valid address yet */
vino_free_buffer_with_count(fb, i);
return ret;
}
//fb->size = size;
fb->size = count * PAGE_SIZE;
fb->data_format = VINO_DATA_FMT_NONE;
/* set the dma stop-bit for the last (count+1)th descriptor */
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
return 0;
out_free_virtual:
kfree(fb->desc_table.virtual);
return ret;
}
#if 0
/* user buffers not fully implemented yet */
static int vino_prepare_user_buffer(struct vino_framebuffer *fb,
void *user,
unsigned int size)
{
unsigned int count, i, j;
int ret = 0;
dprintk("vino_prepare_user_buffer():\n");
if (size < 1)
return -EINVAL;
memset(fb, 0, sizeof(struct vino_framebuffer));
count = ((size / PAGE_SIZE)) & ~3;
dprintk("vino_prepare_user_buffer(): size = %d, count = %d\n",
size, count);
/* allocate memory for table with virtual (page) addresses */
fb->desc_table.virtual = (unsigned long *)
kmalloc(count * sizeof(unsigned long), GFP_KERNEL);
if (!fb->desc_table.virtual)
return -ENOMEM;
/* allocate memory for table with dma addresses
* (has space for four extra descriptors) */
fb->desc_table.dma_cpu =
dma_alloc_coherent(NULL, VINO_PAGE_RATIO * (count + 4) *
sizeof(dma_addr_t), &fb->desc_table.dma,
GFP_KERNEL | GFP_DMA);
if (!fb->desc_table.dma_cpu) {
ret = -ENOMEM;
goto out_free_virtual;
}
/* allocate pages for the buffer and acquire the according
* dma addresses */
for (i = 0; i < count; i++) {
dma_addr_t dma_data_addr;
fb->desc_table.virtual[i] =
get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!fb->desc_table.virtual[i]) {
ret = -ENOBUFS;
break;
}
dma_data_addr =
dma_map_single(NULL,
(void *)fb->desc_table.virtual[i],
PAGE_SIZE, DMA_FROM_DEVICE);
for (j = 0; j < VINO_PAGE_RATIO; j++) {
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i + j] =
dma_data_addr + VINO_PAGE_SIZE * j;
}
SetPageReserved(virt_to_page((void *)fb->desc_table.virtual[i]));
}
/* page_count needs to be set anyway, because the descriptor table has
* been allocated according to this number */
fb->desc_table.page_count = count;
if (ret) {
/* the descriptor with index i doesn't contain
* a valid address yet */
vino_free_buffer_with_count(fb, i);
return ret;
}
//fb->size = size;
fb->size = count * PAGE_SIZE;
/* set the dma stop-bit for the last (count+1)th descriptor */
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * count] = VINO_DESC_STOP;
return 0;
out_free_virtual:
kfree(fb->desc_table.virtual);
return ret;
}
#endif
static void vino_sync_buffer(struct vino_framebuffer *fb)
{
int i;
dprintk("vino_sync_buffer():\n");
for (i = 0; i < fb->desc_table.page_count; i++)
dma_sync_single_for_cpu(NULL,
fb->desc_table.dma_cpu[VINO_PAGE_RATIO * i],
PAGE_SIZE, DMA_FROM_DEVICE);
}
/* Framebuffer fifo functions (need to be locked externally) */
static inline void vino_fifo_init(struct vino_framebuffer_fifo *f,
unsigned int length)
{
f->length = 0;
f->used = 0;
f->head = 0;
f->tail = 0;
if (length > VINO_FRAMEBUFFER_COUNT_MAX)
length = VINO_FRAMEBUFFER_COUNT_MAX;
f->length = length;
}
/* returns true/false */
static inline int vino_fifo_has_id(struct vino_framebuffer_fifo *f,
unsigned int id)
{
unsigned int i;
for (i = f->head; i == (f->tail - 1); i = (i + 1) % f->length) {
if (f->data[i] == id)
return 1;
}
return 0;
}
#if 0
/* returns true/false */
static inline int vino_fifo_full(struct vino_framebuffer_fifo *f)
{
return (f->used == f->length);
}
#endif
static inline unsigned int vino_fifo_get_used(struct vino_framebuffer_fifo *f)
{
return f->used;
}
static int vino_fifo_enqueue(struct vino_framebuffer_fifo *f, unsigned int id)
{
if (id >= f->length) {
return VINO_QUEUE_ERROR;
}
if (vino_fifo_has_id(f, id)) {
return VINO_QUEUE_ERROR;
}
if (f->used < f->length) {
f->data[f->tail] = id;
f->tail = (f->tail + 1) % f->length;
f->used++;
} else {
return VINO_QUEUE_ERROR;
}
return 0;
}
static int vino_fifo_peek(struct vino_framebuffer_fifo *f, unsigned int *id)
{
if (f->used > 0) {
*id = f->data[f->head];
} else {
return VINO_QUEUE_ERROR;
}
return 0;
}
static int vino_fifo_dequeue(struct vino_framebuffer_fifo *f, unsigned int *id)
{
if (f->used > 0) {
*id = f->data[f->head];
f->head = (f->head + 1) % f->length;
f->used--;
} else {
return VINO_QUEUE_ERROR;
}
return 0;
}
/* Framebuffer queue functions */
/* execute with queue_lock locked */
static void vino_queue_free_with_count(struct vino_framebuffer_queue *q,
unsigned int length)
{
unsigned int i;
q->length = 0;
memset(&q->in, 0, sizeof(struct vino_framebuffer_fifo));
memset(&q->out, 0, sizeof(struct vino_framebuffer_fifo));
for (i = 0; i < length; i++) {
dprintk("vino_queue_free_with_count(): freeing buffer %d\n",
i);
vino_free_buffer(q->buffer[i]);
kfree(q->buffer[i]);
}
q->type = VINO_MEMORY_NONE;
q->magic = 0;
}
static void vino_queue_free(struct vino_framebuffer_queue *q)
{
dprintk("vino_queue_free():\n");
if (q->magic != VINO_QUEUE_MAGIC)
return;
if (q->type != VINO_MEMORY_MMAP)
return;
mutex_lock(&q->queue_mutex);
vino_queue_free_with_count(q, q->length);
mutex_unlock(&q->queue_mutex);
}
static int vino_queue_init(struct vino_framebuffer_queue *q,
unsigned int *length)
{
unsigned int i;
int ret = 0;
dprintk("vino_queue_init(): length = %d\n", *length);
if (q->magic == VINO_QUEUE_MAGIC) {
dprintk("vino_queue_init(): queue already initialized!\n");
return -EINVAL;
}
if (q->type != VINO_MEMORY_NONE) {
dprintk("vino_queue_init(): queue already initialized!\n");
return -EINVAL;
}
if (*length < 1)
return -EINVAL;
mutex_lock(&q->queue_mutex);
if (*length > VINO_FRAMEBUFFER_COUNT_MAX)
*length = VINO_FRAMEBUFFER_COUNT_MAX;
q->length = 0;
for (i = 0; i < *length; i++) {
dprintk("vino_queue_init(): allocating buffer %d\n", i);
q->buffer[i] = kmalloc(sizeof(struct vino_framebuffer),
GFP_KERNEL);
if (!q->buffer[i]) {
dprintk("vino_queue_init(): kmalloc() failed\n");
ret = -ENOMEM;
break;
}
ret = vino_allocate_buffer(q->buffer[i],
VINO_FRAMEBUFFER_SIZE);
if (ret) {
kfree(q->buffer[i]);
dprintk("vino_queue_init(): "
"vino_allocate_buffer() failed\n");
break;
}
q->buffer[i]->id = i;
if (i > 0) {
q->buffer[i]->offset = q->buffer[i - 1]->offset +
q->buffer[i - 1]->size;
} else {
q->buffer[i]->offset = 0;
}
spin_lock_init(&q->buffer[i]->state_lock);
dprintk("vino_queue_init(): buffer = %d, offset = %d, "
"size = %d\n", i, q->buffer[i]->offset,
q->buffer[i]->size);
}
if (ret) {
vino_queue_free_with_count(q, i);
*length = 0;
} else {
q->length = *length;
vino_fifo_init(&q->in, q->length);
vino_fifo_init(&q->out, q->length);
q->type = VINO_MEMORY_MMAP;
q->magic = VINO_QUEUE_MAGIC;
}
mutex_unlock(&q->queue_mutex);
return ret;
}
static struct vino_framebuffer *vino_queue_add(struct
vino_framebuffer_queue *q,
unsigned int id)
{
struct vino_framebuffer *ret = NULL;
unsigned int total;
unsigned long flags;
dprintk("vino_queue_add(): id = %d\n", id);
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
if (id >= q->length)
goto out;
/* not needed?: if (vino_fifo_full(&q->out)) {
goto out;
}*/
/* check that outgoing queue isn't already full
* (or that it won't become full) */
total = vino_fifo_get_used(&q->in) +
vino_fifo_get_used(&q->out);
if (total >= q->length)
goto out;
if (vino_fifo_enqueue(&q->in, id))
goto out;
ret = q->buffer[id];
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static struct vino_framebuffer *vino_queue_transfer(struct
vino_framebuffer_queue *q)
{
struct vino_framebuffer *ret = NULL;
struct vino_framebuffer *fb;
int id;
unsigned long flags;
dprintk("vino_queue_transfer():\n");
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
// now this actually removes an entry from the incoming queue
if (vino_fifo_dequeue(&q->in, &id)) {
goto out;
}
dprintk("vino_queue_transfer(): id = %d\n", id);
fb = q->buffer[id];
// we have already checked that the outgoing queue is not full, but...
if (vino_fifo_enqueue(&q->out, id)) {
printk(KERN_ERR "vino_queue_transfer(): "
"outgoing queue is full, this shouldn't happen!\n");
goto out;
}
ret = fb;
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
/* returns true/false */
static int vino_queue_incoming_contains(struct vino_framebuffer_queue *q,
unsigned int id)
{
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
ret = vino_fifo_has_id(&q->in, id);
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
/* returns true/false */
static int vino_queue_outgoing_contains(struct vino_framebuffer_queue *q,
unsigned int id)
{
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
ret = vino_fifo_has_id(&q->out, id);
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static int vino_queue_get_incoming(struct vino_framebuffer_queue *q,
unsigned int *used)
{
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return VINO_QUEUE_ERROR;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0) {
ret = VINO_QUEUE_ERROR;
goto out;
}
*used = vino_fifo_get_used(&q->in);
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static int vino_queue_get_outgoing(struct vino_framebuffer_queue *q,
unsigned int *used)
{
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return VINO_QUEUE_ERROR;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0) {
ret = VINO_QUEUE_ERROR;
goto out;
}
*used = vino_fifo_get_used(&q->out);
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
#if 0
static int vino_queue_get_total(struct vino_framebuffer_queue *q,
unsigned int *total)
{
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return VINO_QUEUE_ERROR;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0) {
ret = VINO_QUEUE_ERROR;
goto out;
}
*total = vino_fifo_get_used(&q->in) +
vino_fifo_get_used(&q->out);
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
#endif
static struct vino_framebuffer *vino_queue_peek(struct
vino_framebuffer_queue *q,
unsigned int *id)
{
struct vino_framebuffer *ret = NULL;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
if (vino_fifo_peek(&q->in, id)) {
goto out;
}
ret = q->buffer[*id];
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static struct vino_framebuffer *vino_queue_remove(struct
vino_framebuffer_queue *q,
unsigned int *id)
{
struct vino_framebuffer *ret = NULL;
unsigned long flags;
dprintk("vino_queue_remove():\n");
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
if (vino_fifo_dequeue(&q->out, id)) {
goto out;
}
dprintk("vino_queue_remove(): id = %d\n", *id);
ret = q->buffer[*id];
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static struct
vino_framebuffer *vino_queue_get_buffer(struct vino_framebuffer_queue *q,
unsigned int id)
{
struct vino_framebuffer *ret = NULL;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
if (q->length == 0)
goto out;
if (id >= q->length)
goto out;
ret = q->buffer[id];
out:
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
static unsigned int vino_queue_get_length(struct vino_framebuffer_queue *q)
{
unsigned int length = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return length;
}
spin_lock_irqsave(&q->queue_lock, flags);
length = q->length;
spin_unlock_irqrestore(&q->queue_lock, flags);
return length;
}
static int vino_queue_has_mapped_buffers(struct vino_framebuffer_queue *q)
{
unsigned int i;
int ret = 0;
unsigned long flags;
if (q->magic != VINO_QUEUE_MAGIC) {
return ret;
}
spin_lock_irqsave(&q->queue_lock, flags);
for (i = 0; i < q->length; i++) {
if (q->buffer[i]->map_count > 0) {
ret = 1;
break;
}
}
spin_unlock_irqrestore(&q->queue_lock, flags);
return ret;
}
/* VINO functions */
/* execute with input_lock locked */
static void vino_update_line_size(struct vino_channel_settings *vcs)
{
unsigned int w = vcs->clipping.right - vcs->clipping.left;
unsigned int d = vcs->decimation;
unsigned int bpp = vino_data_formats[vcs->data_format].bpp;
unsigned int lsize;
dprintk("update_line_size(): before: w = %d, d = %d, "
"line_size = %d\n", w, d, vcs->line_size);
/* line size must be multiple of 8 bytes */
lsize = (bpp * (w / d)) & ~7;
w = (lsize / bpp) * d;
vcs->clipping.right = vcs->clipping.left + w;
vcs->line_size = lsize;
dprintk("update_line_size(): after: w = %d, d = %d, "
"line_size = %d\n", w, d, vcs->line_size);
}
/* execute with input_lock locked */
static void vino_set_clipping(struct vino_channel_settings *vcs,
unsigned int x, unsigned int y,
unsigned int w, unsigned int h)
{
unsigned int maxwidth, maxheight;
unsigned int d;
maxwidth = vino_data_norms[vcs->data_norm].width;
maxheight = vino_data_norms[vcs->data_norm].height;
d = vcs->decimation;
y &= ~1; /* odd/even fields */
if (x > maxwidth) {
x = 0;
}
if (y > maxheight) {
y = 0;
}
if (((w / d) < VINO_MIN_WIDTH)
|| ((h / d) < VINO_MIN_HEIGHT)) {
w = VINO_MIN_WIDTH * d;
h = VINO_MIN_HEIGHT * d;
}
if ((x + w) > maxwidth) {
w = maxwidth - x;
if ((w / d) < VINO_MIN_WIDTH)
x = maxwidth - VINO_MIN_WIDTH * d;
}
if ((y + h) > maxheight) {
h = maxheight - y;
if ((h / d) < VINO_MIN_HEIGHT)
y = maxheight - VINO_MIN_HEIGHT * d;
}
vcs->clipping.left = x;
vcs->clipping.top = y;
vcs->clipping.right = x + w;
vcs->clipping.bottom = y + h;
vino_update_line_size(vcs);
dprintk("clipping %d, %d, %d, %d / %d - %d\n",
vcs->clipping.left, vcs->clipping.top, vcs->clipping.right,
vcs->clipping.bottom, vcs->decimation, vcs->line_size);
}
/* execute with input_lock locked */
static inline void vino_set_default_clipping(struct vino_channel_settings *vcs)
{
vino_set_clipping(vcs, 0, 0, vino_data_norms[vcs->data_norm].width,
vino_data_norms[vcs->data_norm].height);
}
/* execute with input_lock locked */
static void vino_set_scaling(struct vino_channel_settings *vcs,
unsigned int w, unsigned int h)
{
unsigned int x, y, curw, curh, d;
x = vcs->clipping.left;
y = vcs->clipping.top;
curw = vcs->clipping.right - vcs->clipping.left;
curh = vcs->clipping.bottom - vcs->clipping.top;
d = max(curw / w, curh / h);
dprintk("scaling w: %d, h: %d, curw: %d, curh: %d, d: %d\n",
w, h, curw, curh, d);
if (d < 1) {
d = 1;
} else if (d > 8) {
d = 8;
}
vcs->decimation = d;
vino_set_clipping(vcs, x, y, w * d, h * d);
dprintk("scaling %d, %d, %d, %d / %d - %d\n", vcs->clipping.left,
vcs->clipping.top, vcs->clipping.right, vcs->clipping.bottom,
vcs->decimation, vcs->line_size);
}
/* execute with input_lock locked */
static inline void vino_set_default_scaling(struct vino_channel_settings *vcs)
{
vino_set_scaling(vcs, vcs->clipping.right - vcs->clipping.left,
vcs->clipping.bottom - vcs->clipping.top);
}
/* execute with input_lock locked */
static void vino_set_framerate(struct vino_channel_settings *vcs,
unsigned int fps)
{
unsigned int mask;
switch (vcs->data_norm) {
case VINO_DATA_NORM_NTSC:
case VINO_DATA_NORM_D1:
fps = (unsigned int)(fps / 6) * 6; // FIXME: round!
if (fps < vino_data_norms[vcs->data_norm].fps_min)
fps = vino_data_norms[vcs->data_norm].fps_min;
if (fps > vino_data_norms[vcs->data_norm].fps_max)
fps = vino_data_norms[vcs->data_norm].fps_max;
switch (fps) {
case 6:
mask = 0x003;
break;
case 12:
mask = 0x0c3;
break;
case 18:
mask = 0x333;
break;
case 24:
mask = 0x3ff;
break;
case 30:
mask = 0xfff;
break;
default:
mask = VINO_FRAMERT_FULL;
}
vcs->framert_reg = VINO_FRAMERT_RT(mask);
break;
case VINO_DATA_NORM_PAL:
case VINO_DATA_NORM_SECAM:
fps = (unsigned int)(fps / 5) * 5; // FIXME: round!
if (fps < vino_data_norms[vcs->data_norm].fps_min)
fps = vino_data_norms[vcs->data_norm].fps_min;
if (fps > vino_data_norms[vcs->data_norm].fps_max)
fps = vino_data_norms[vcs->data_norm].fps_max;
switch (fps) {
case 5:
mask = 0x003;
break;
case 10:
mask = 0x0c3;
break;
case 15:
mask = 0x333;
break;
case 20:
mask = 0x0ff;
break;
case 25:
mask = 0x3ff;
break;
default:
mask = VINO_FRAMERT_FULL;
}
vcs->framert_reg = VINO_FRAMERT_RT(mask) | VINO_FRAMERT_PAL;
break;
}
vcs->fps = fps;
}
/* execute with input_lock locked */
static inline void vino_set_default_framerate(struct
vino_channel_settings *vcs)
{
vino_set_framerate(vcs, vino_data_norms[vcs->data_norm].fps_max);
}
/* VINO I2C bus functions */
struct i2c_algo_sgi_data {
void *data; /* private data for lowlevel routines */
unsigned (*getctrl)(void *data);
void (*setctrl)(void *data, unsigned val);
unsigned (*rdata)(void *data);
void (*wdata)(void *data, unsigned val);
int xfer_timeout;
int ack_timeout;
};
static int wait_xfer_done(struct i2c_algo_sgi_data *adap)
{
int i;
for (i = 0; i < adap->xfer_timeout; i++) {
if ((adap->getctrl(adap->data) & SGI_I2C_XFER_BUSY) == 0)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int wait_ack(struct i2c_algo_sgi_data *adap)
{
int i;
if (wait_xfer_done(adap))
return -ETIMEDOUT;
for (i = 0; i < adap->ack_timeout; i++) {
if ((adap->getctrl(adap->data) & SGI_I2C_NACK) == 0)
return 0;
udelay(1);
}
return -ETIMEDOUT;
}
static int force_idle(struct i2c_algo_sgi_data *adap)
{
int i;
adap->setctrl(adap->data, SGI_I2C_FORCE_IDLE);
for (i = 0; i < adap->xfer_timeout; i++) {
if ((adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE) == 0)
goto out;
udelay(1);
}
return -ETIMEDOUT;
out:
if (adap->getctrl(adap->data) & SGI_I2C_BUS_ERR)
return -EIO;
return 0;
}
static int do_address(struct i2c_algo_sgi_data *adap, unsigned int addr,
int rd)
{
if (rd)
adap->setctrl(adap->data, SGI_I2C_NOT_IDLE);
/* Check if bus is idle, eventually force it to do so */
if (adap->getctrl(adap->data) & SGI_I2C_NOT_IDLE)
if (force_idle(adap))
return -EIO;
/* Write out the i2c chip address and specify operation */
adap->setctrl(adap->data,
SGI_I2C_HOLD_BUS | SGI_I2C_WRITE | SGI_I2C_NOT_IDLE);
if (rd)
addr |= 1;
adap->wdata(adap->data, addr);
if (wait_ack(adap))
return -EIO;
return 0;
}
static int i2c_read(struct i2c_algo_sgi_data *adap, unsigned char *buf,
unsigned int len)
{
int i;
adap->setctrl(adap->data,
SGI_I2C_HOLD_BUS | SGI_I2C_READ | SGI_I2C_NOT_IDLE);
for (i = 0; i < len; i++) {
if (wait_xfer_done(adap))
return -EIO;
buf[i] = adap->rdata(adap->data);
}
adap->setctrl(adap->data, SGI_I2C_RELEASE_BUS | SGI_I2C_FORCE_IDLE);
return 0;
}
static int i2c_write(struct i2c_algo_sgi_data *adap, unsigned char *buf,
unsigned int len)
{
int i;
/* We are already in write state */
for (i = 0; i < len; i++) {
adap->wdata(adap->data, buf[i]);
if (wait_ack(adap))
return -EIO;
}
return 0;
}
static int sgi_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
int num)
{
struct i2c_algo_sgi_data *adap = i2c_adap->algo_data;
struct i2c_msg *p;
int i, err = 0;
for (i = 0; !err && i < num; i++) {
p = &msgs[i];
err = do_address(adap, p->addr, p->flags & I2C_M_RD);
if (err || !p->len)
continue;
if (p->flags & I2C_M_RD)
err = i2c_read(adap, p->buf, p->len);
else
err = i2c_write(adap, p->buf, p->len);
}
return (err < 0) ? err : i;
}
static u32 sgi_func(struct i2c_adapter *adap)
{
return I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm sgi_algo = {
.master_xfer = sgi_xfer,
.functionality = sgi_func,
};
static unsigned i2c_vino_getctrl(void *data)
{
return vino->i2c_control;
}
static void i2c_vino_setctrl(void *data, unsigned val)
{
vino->i2c_control = val;
}
static unsigned i2c_vino_rdata(void *data)
{
return vino->i2c_data;
}
static void i2c_vino_wdata(void *data, unsigned val)
{
vino->i2c_data = val;
}
static struct i2c_algo_sgi_data i2c_sgi_vino_data = {
.getctrl = &i2c_vino_getctrl,
.setctrl = &i2c_vino_setctrl,
.rdata = &i2c_vino_rdata,
.wdata = &i2c_vino_wdata,
.xfer_timeout = 200,
.ack_timeout = 1000,
};
static struct i2c_adapter vino_i2c_adapter = {
.name = "VINO I2C bus",
.algo = &sgi_algo,
.algo_data = &i2c_sgi_vino_data,
.owner = THIS_MODULE,
};
/*
* Prepare VINO for DMA transfer...
* (execute only with vino_lock and input_lock locked)
*/
static int vino_dma_setup(struct vino_channel_settings *vcs,
struct vino_framebuffer *fb)
{
u32 ctrl, intr;
struct sgi_vino_channel *ch;
const struct vino_data_norm *norm;
dprintk("vino_dma_setup():\n");
vcs->field = 0;
fb->frame_counter = 0;
ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
norm = &vino_data_norms[vcs->data_norm];
ch->page_index = 0;
ch->line_count = 0;
/* VINO line size register is set 8 bytes less than actual */
ch->line_size = vcs->line_size - 8;
/* let VINO know where to transfer data */
ch->start_desc_tbl = fb->desc_table.dma;
ch->next_4_desc = fb->desc_table.dma;
/* give vino time to fetch the first four descriptors, 5 usec
* should be more than enough time */
udelay(VINO_DESC_FETCH_DELAY);
dprintk("vino_dma_setup(): start desc = %08x, next 4 desc = %08x\n",
ch->start_desc_tbl, ch->next_4_desc);
/* set the alpha register */
ch->alpha = vcs->alpha;
/* set clipping registers */
ch->clip_start = VINO_CLIP_ODD(norm->odd.top + vcs->clipping.top / 2) |
VINO_CLIP_EVEN(norm->even.top +
vcs->clipping.top / 2) |
VINO_CLIP_X(vcs->clipping.left);
ch->clip_end = VINO_CLIP_ODD(norm->odd.top +
vcs->clipping.bottom / 2 - 1) |
VINO_CLIP_EVEN(norm->even.top +
vcs->clipping.bottom / 2 - 1) |
VINO_CLIP_X(vcs->clipping.right);
/* set the size of actual content in the buffer (DECIMATION !) */
fb->data_size = ((vcs->clipping.right - vcs->clipping.left) /
vcs->decimation) *
((vcs->clipping.bottom - vcs->clipping.top) /
vcs->decimation) *
vino_data_formats[vcs->data_format].bpp;
ch->frame_rate = vcs->framert_reg;
ctrl = vino->control;
intr = vino->intr_status;
if (vcs->channel == VINO_CHANNEL_A) {
/* All interrupt conditions for this channel was cleared
* so clear the interrupt status register and enable
* interrupts */
intr &= ~VINO_INTSTAT_A;
ctrl |= VINO_CTRL_A_INT;
/* enable synchronization */
ctrl |= VINO_CTRL_A_SYNC_ENBL;
/* enable frame assembly */
ctrl |= VINO_CTRL_A_INTERLEAVE_ENBL;
/* set decimation used */
if (vcs->decimation < 2)
ctrl &= ~VINO_CTRL_A_DEC_ENBL;
else {
ctrl |= VINO_CTRL_A_DEC_ENBL;
ctrl &= ~VINO_CTRL_A_DEC_SCALE_MASK;
ctrl |= (vcs->decimation - 1) <<
VINO_CTRL_A_DEC_SCALE_SHIFT;
}
/* select input interface */
if (vcs->input == VINO_INPUT_D1)
ctrl |= VINO_CTRL_A_SELECT;
else
ctrl &= ~VINO_CTRL_A_SELECT;
/* palette */
ctrl &= ~(VINO_CTRL_A_LUMA_ONLY | VINO_CTRL_A_RGB |
VINO_CTRL_A_DITHER);
} else {
intr &= ~VINO_INTSTAT_B;
ctrl |= VINO_CTRL_B_INT;
ctrl |= VINO_CTRL_B_SYNC_ENBL;
ctrl |= VINO_CTRL_B_INTERLEAVE_ENBL;
if (vcs->decimation < 2)
ctrl &= ~VINO_CTRL_B_DEC_ENBL;
else {
ctrl |= VINO_CTRL_B_DEC_ENBL;
ctrl &= ~VINO_CTRL_B_DEC_SCALE_MASK;
ctrl |= (vcs->decimation - 1) <<
VINO_CTRL_B_DEC_SCALE_SHIFT;
}
if (vcs->input == VINO_INPUT_D1)
ctrl |= VINO_CTRL_B_SELECT;
else
ctrl &= ~VINO_CTRL_B_SELECT;
ctrl &= ~(VINO_CTRL_B_LUMA_ONLY | VINO_CTRL_B_RGB |
VINO_CTRL_B_DITHER);
}
/* set palette */
fb->data_format = vcs->data_format;
switch (vcs->data_format) {
case VINO_DATA_FMT_GREY:
ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
VINO_CTRL_A_LUMA_ONLY : VINO_CTRL_B_LUMA_ONLY;
break;
case VINO_DATA_FMT_RGB32:
ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
VINO_CTRL_A_RGB : VINO_CTRL_B_RGB;
break;
case VINO_DATA_FMT_YUV:
/* nothing needs to be done */
break;
case VINO_DATA_FMT_RGB332:
ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
VINO_CTRL_A_RGB | VINO_CTRL_A_DITHER :
VINO_CTRL_B_RGB | VINO_CTRL_B_DITHER;
break;
}
vino->intr_status = intr;
vino->control = ctrl;
return 0;
}
/* (execute only with vino_lock locked) */
static inline void vino_dma_start(struct vino_channel_settings *vcs)
{
u32 ctrl = vino->control;
dprintk("vino_dma_start():\n");
ctrl |= (vcs->channel == VINO_CHANNEL_A) ?
VINO_CTRL_A_DMA_ENBL : VINO_CTRL_B_DMA_ENBL;
vino->control = ctrl;
}
/* (execute only with vino_lock locked) */
static inline void vino_dma_stop(struct vino_channel_settings *vcs)
{
u32 ctrl = vino->control;
ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
~VINO_CTRL_A_DMA_ENBL : ~VINO_CTRL_B_DMA_ENBL;
ctrl &= (vcs->channel == VINO_CHANNEL_A) ?
~VINO_CTRL_A_INT : ~VINO_CTRL_B_INT;
vino->control = ctrl;
dprintk("vino_dma_stop():\n");
}
/*
* Load dummy page to descriptor registers. This prevents generating of
* spurious interrupts. (execute only with vino_lock locked)
*/
static void vino_clear_interrupt(struct vino_channel_settings *vcs)
{
struct sgi_vino_channel *ch;
ch = (vcs->channel == VINO_CHANNEL_A) ? &vino->a : &vino->b;
ch->page_index = 0;
ch->line_count = 0;
ch->start_desc_tbl = vino_drvdata->dummy_desc_table.dma;
ch->next_4_desc = vino_drvdata->dummy_desc_table.dma;
udelay(VINO_DESC_FETCH_DELAY);
dprintk("channel %c clear interrupt condition\n",
(vcs->channel == VINO_CHANNEL_A) ? 'A':'B');
}
static int vino_capture(struct vino_channel_settings *vcs,
struct vino_framebuffer *fb)
{
int err = 0;
unsigned long flags, flags2;
spin_lock_irqsave(&fb->state_lock, flags);
if (fb->state == VINO_FRAMEBUFFER_IN_USE)
err = -EBUSY;
fb->state = VINO_FRAMEBUFFER_IN_USE;
spin_unlock_irqrestore(&fb->state_lock, flags);
if (err)
return err;
spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
spin_lock_irqsave(&vino_drvdata->input_lock, flags2);
vino_dma_setup(vcs, fb);
vino_dma_start(vcs);
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags2);
spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
return err;
}
static
struct vino_framebuffer *vino_capture_enqueue(struct
vino_channel_settings *vcs,
unsigned int index)
{
struct vino_framebuffer *fb;
unsigned long flags;
dprintk("vino_capture_enqueue():\n");
spin_lock_irqsave(&vcs->capture_lock, flags);
fb = vino_queue_add(&vcs->fb_queue, index);
if (fb == NULL) {
dprintk("vino_capture_enqueue(): vino_queue_add() failed, "
"queue full?\n");
goto out;
}
out:
spin_unlock_irqrestore(&vcs->capture_lock, flags);
return fb;
}
static int vino_capture_next(struct vino_channel_settings *vcs, int start)
{
struct vino_framebuffer *fb;
unsigned int incoming, id;
int err = 0;
unsigned long flags;
dprintk("vino_capture_next():\n");
spin_lock_irqsave(&vcs->capture_lock, flags);
if (start) {
/* start capture only if capture isn't in progress already */
if (vcs->capturing) {
spin_unlock_irqrestore(&vcs->capture_lock, flags);
return 0;
}
} else {
/* capture next frame:
* stop capture if capturing is not set */
if (!vcs->capturing) {
spin_unlock_irqrestore(&vcs->capture_lock, flags);
return 0;
}
}
err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
if (err) {
dprintk("vino_capture_next(): vino_queue_get_incoming() "
"failed\n");
err = -EINVAL;
goto out;
}
if (incoming == 0) {
dprintk("vino_capture_next(): no buffers available\n");
goto out;
}
fb = vino_queue_peek(&vcs->fb_queue, &id);
if (fb == NULL) {
dprintk("vino_capture_next(): vino_queue_peek() failed\n");
err = -EINVAL;
goto out;
}
if (start) {
vcs->capturing = 1;
}
spin_unlock_irqrestore(&vcs->capture_lock, flags);
err = vino_capture(vcs, fb);
return err;
out:
vcs->capturing = 0;
spin_unlock_irqrestore(&vcs->capture_lock, flags);
return err;
}
static inline int vino_is_capturing(struct vino_channel_settings *vcs)
{
int ret;
unsigned long flags;
spin_lock_irqsave(&vcs->capture_lock, flags);
ret = vcs->capturing;
spin_unlock_irqrestore(&vcs->capture_lock, flags);
return ret;
}
/* waits until a frame is captured */
static int vino_wait_for_frame(struct vino_channel_settings *vcs)
{
wait_queue_t wait;
int err = 0;
dprintk("vino_wait_for_frame():\n");
init_waitqueue_entry(&wait, current);
/* add ourselves into wait queue */
add_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
/* to ensure that schedule_timeout will return immediately
* if VINO interrupt was triggered meanwhile */
schedule_timeout_interruptible(msecs_to_jiffies(100));
if (signal_pending(current))
err = -EINTR;
remove_wait_queue(&vcs->fb_queue.frame_wait_queue, &wait);
dprintk("vino_wait_for_frame(): waiting for frame %s\n",
err ? "failed" : "ok");
return err;
}
/* the function assumes that PAGE_SIZE % 4 == 0 */
static void vino_convert_to_rgba(struct vino_framebuffer *fb) {
unsigned char *pageptr;
unsigned int page, i;
unsigned char a;
for (page = 0; page < fb->desc_table.page_count; page++) {
pageptr = (unsigned char *)fb->desc_table.virtual[page];
for (i = 0; i < PAGE_SIZE; i += 4) {
a = pageptr[0];
pageptr[0] = pageptr[3];
pageptr[1] = pageptr[2];
pageptr[2] = pageptr[1];
pageptr[3] = a;
pageptr += 4;
}
}
}
/* checks if the buffer is in correct state and syncs data */
static int vino_check_buffer(struct vino_channel_settings *vcs,
struct vino_framebuffer *fb)
{
int err = 0;
unsigned long flags;
dprintk("vino_check_buffer():\n");
spin_lock_irqsave(&fb->state_lock, flags);
switch (fb->state) {
case VINO_FRAMEBUFFER_IN_USE:
err = -EIO;
break;
case VINO_FRAMEBUFFER_READY:
vino_sync_buffer(fb);
fb->state = VINO_FRAMEBUFFER_UNUSED;
break;
default:
err = -EINVAL;
}
spin_unlock_irqrestore(&fb->state_lock, flags);
if (!err) {
if (vino_pixel_conversion
&& (fb->data_format == VINO_DATA_FMT_RGB32)) {
vino_convert_to_rgba(fb);
}
} else if (err && (err != -EINVAL)) {
dprintk("vino_check_buffer(): buffer not ready\n");
spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
vino_dma_stop(vcs);
vino_clear_interrupt(vcs);
spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
}
return err;
}
/* forcefully terminates capture */
static void vino_capture_stop(struct vino_channel_settings *vcs)
{
unsigned int incoming = 0, outgoing = 0, id;
unsigned long flags, flags2;
dprintk("vino_capture_stop():\n");
spin_lock_irqsave(&vcs->capture_lock, flags);
/* unset capturing to stop queue processing */
vcs->capturing = 0;
spin_lock_irqsave(&vino_drvdata->vino_lock, flags2);
vino_dma_stop(vcs);
vino_clear_interrupt(vcs);
spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags2);
/* remove all items from the queue */
if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
dprintk("vino_capture_stop(): "
"vino_queue_get_incoming() failed\n");
goto out;
}
while (incoming > 0) {
vino_queue_transfer(&vcs->fb_queue);
if (vino_queue_get_incoming(&vcs->fb_queue, &incoming)) {
dprintk("vino_capture_stop(): "
"vino_queue_get_incoming() failed\n");
goto out;
}
}
if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
dprintk("vino_capture_stop(): "
"vino_queue_get_outgoing() failed\n");
goto out;
}
while (outgoing > 0) {
vino_queue_remove(&vcs->fb_queue, &id);
if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
dprintk("vino_capture_stop(): "
"vino_queue_get_outgoing() failed\n");
goto out;
}
}
out:
spin_unlock_irqrestore(&vcs->capture_lock, flags);
}
#if 0
static int vino_capture_failed(struct vino_channel_settings *vcs)
{
struct vino_framebuffer *fb;
unsigned long flags;
unsigned int i;
int ret;
dprintk("vino_capture_failed():\n");
spin_lock_irqsave(&vino_drvdata->vino_lock, flags);
vino_dma_stop(vcs);
vino_clear_interrupt(vcs);
spin_unlock_irqrestore(&vino_drvdata->vino_lock, flags);
ret = vino_queue_get_incoming(&vcs->fb_queue, &i);
if (ret == VINO_QUEUE_ERROR) {
dprintk("vino_queue_get_incoming() failed\n");
return -EINVAL;
}
if (i == 0) {
/* no buffers to process */
return 0;
}
fb = vino_queue_peek(&vcs->fb_queue, &i);
if (fb == NULL) {
dprintk("vino_queue_peek() failed\n");
return -EINVAL;
}
spin_lock_irqsave(&fb->state_lock, flags);
if (fb->state == VINO_FRAMEBUFFER_IN_USE) {
fb->state = VINO_FRAMEBUFFER_UNUSED;
vino_queue_transfer(&vcs->fb_queue);
vino_queue_remove(&vcs->fb_queue, &i);
/* we should actually discard the newest frame,
* but who cares ... */
}
spin_unlock_irqrestore(&fb->state_lock, flags);
return 0;
}
#endif
static void vino_skip_frame(struct vino_channel_settings *vcs)
{
struct vino_framebuffer *fb;
unsigned long flags;
unsigned int id;
spin_lock_irqsave(&vcs->capture_lock, flags);
fb = vino_queue_peek(&vcs->fb_queue, &id);
if (!fb) {
spin_unlock_irqrestore(&vcs->capture_lock, flags);
dprintk("vino_skip_frame(): vino_queue_peek() failed!\n");
return;
}
spin_unlock_irqrestore(&vcs->capture_lock, flags);
spin_lock_irqsave(&fb->state_lock, flags);
fb->state = VINO_FRAMEBUFFER_UNUSED;
spin_unlock_irqrestore(&fb->state_lock, flags);
vino_capture_next(vcs, 0);
}
static void vino_frame_done(struct vino_channel_settings *vcs)
{
struct vino_framebuffer *fb;
unsigned long flags;
spin_lock_irqsave(&vcs->capture_lock, flags);
fb = vino_queue_transfer(&vcs->fb_queue);
if (!fb) {
spin_unlock_irqrestore(&vcs->capture_lock, flags);
dprintk("vino_frame_done(): vino_queue_transfer() failed!\n");
return;
}
spin_unlock_irqrestore(&vcs->capture_lock, flags);
fb->frame_counter = vcs->int_data.frame_counter;
memcpy(&fb->timestamp, &vcs->int_data.timestamp,
sizeof(struct timeval));
spin_lock_irqsave(&fb->state_lock, flags);
if (fb->state == VINO_FRAMEBUFFER_IN_USE)
fb->state = VINO_FRAMEBUFFER_READY;
spin_unlock_irqrestore(&fb->state_lock, flags);
wake_up(&vcs->fb_queue.frame_wait_queue);
vino_capture_next(vcs, 0);
}
static void vino_capture_tasklet(unsigned long channel) {
struct vino_channel_settings *vcs;
vcs = (channel == VINO_CHANNEL_A)
? &vino_drvdata->a : &vino_drvdata->b;
if (vcs->int_data.skip)
vcs->int_data.skip_count++;
if (vcs->int_data.skip && (vcs->int_data.skip_count
<= VINO_MAX_FRAME_SKIP_COUNT)) {
vino_skip_frame(vcs);
} else {
vcs->int_data.skip_count = 0;
vino_frame_done(vcs);
}
}
static irqreturn_t vino_interrupt(int irq, void *dev_id)
{
u32 ctrl, intr;
unsigned int fc_a, fc_b;
int handled_a = 0, skip_a = 0, done_a = 0;
int handled_b = 0, skip_b = 0, done_b = 0;
#ifdef VINO_DEBUG_INT
int loop = 0;
unsigned int line_count = vino->a.line_count,
page_index = vino->a.page_index,
field_counter = vino->a.field_counter,
start_desc_tbl = vino->a.start_desc_tbl,
next_4_desc = vino->a.next_4_desc;
unsigned int line_count_2,
page_index_2,
field_counter_2,
start_desc_tbl_2,
next_4_desc_2;
#endif
spin_lock(&vino_drvdata->vino_lock);
while ((intr = vino->intr_status)) {
fc_a = vino->a.field_counter >> 1;
fc_b = vino->b.field_counter >> 1;
/* handle error-interrupts in some special way ?
* --> skips frames */
if (intr & VINO_INTSTAT_A) {
if (intr & VINO_INTSTAT_A_EOF) {
vino_drvdata->a.field++;
if (vino_drvdata->a.field > 1) {
vino_dma_stop(&vino_drvdata->a);
vino_clear_interrupt(&vino_drvdata->a);
vino_drvdata->a.field = 0;
done_a = 1;
} else {
if (vino->a.page_index
!= vino_drvdata->a.line_size) {
vino->a.line_count = 0;
vino->a.page_index =
vino_drvdata->
a.line_size;
vino->a.next_4_desc =
vino->a.start_desc_tbl;
}
}
dprintk("channel A end-of-field "
"interrupt: %04x\n", intr);
} else {
vino_dma_stop(&vino_drvdata->a);
vino_clear_interrupt(&vino_drvdata->a);
vino_drvdata->a.field = 0;
skip_a = 1;
dprintk("channel A error interrupt: %04x\n",
intr);
}
#ifdef VINO_DEBUG_INT
line_count_2 = vino->a.line_count;
page_index_2 = vino->a.page_index;
field_counter_2 = vino->a.field_counter;
start_desc_tbl_2 = vino->a.start_desc_tbl;
next_4_desc_2 = vino->a.next_4_desc;
printk("intr = %04x, loop = %d, field = %d\n",
intr, loop, vino_drvdata->a.field);
printk("1- line count = %04d, page index = %04d, "
"start = %08x, next = %08x\n"
" fieldc = %d, framec = %d\n",
line_count, page_index, start_desc_tbl,
next_4_desc, field_counter, fc_a);
printk("12-line count = %04d, page index = %04d, "
" start = %08x, next = %08x\n",
line_count_2, page_index_2, start_desc_tbl_2,
next_4_desc_2);
if (done_a)
printk("\n");
#endif
}
if (intr & VINO_INTSTAT_B) {
if (intr & VINO_INTSTAT_B_EOF) {
vino_drvdata->b.field++;
if (vino_drvdata->b.field > 1) {
vino_dma_stop(&vino_drvdata->b);
vino_clear_interrupt(&vino_drvdata->b);
vino_drvdata->b.field = 0;
done_b = 1;
}
dprintk("channel B end-of-field "
"interrupt: %04x\n", intr);
} else {
vino_dma_stop(&vino_drvdata->b);
vino_clear_interrupt(&vino_drvdata->b);
vino_drvdata->b.field = 0;
skip_b = 1;
dprintk("channel B error interrupt: %04x\n",
intr);
}
}
/* Always remember to clear interrupt status.
* Disable VINO interrupts while we do this. */
ctrl = vino->control;
vino->control = ctrl & ~(VINO_CTRL_A_INT | VINO_CTRL_B_INT);
vino->intr_status = ~intr;
vino->control = ctrl;
spin_unlock(&vino_drvdata->vino_lock);
if ((!handled_a) && (done_a || skip_a)) {
if (!skip_a) {
do_gettimeofday(&vino_drvdata->
a.int_data.timestamp);
vino_drvdata->a.int_data.frame_counter = fc_a;
}
vino_drvdata->a.int_data.skip = skip_a;
dprintk("channel A %s, interrupt: %d\n",
skip_a ? "skipping frame" : "frame done",
intr);
tasklet_hi_schedule(&vino_tasklet_a);
handled_a = 1;
}
if ((!handled_b) && (done_b || skip_b)) {
if (!skip_b) {
do_gettimeofday(&vino_drvdata->
b.int_data.timestamp);
vino_drvdata->b.int_data.frame_counter = fc_b;
}
vino_drvdata->b.int_data.skip = skip_b;
dprintk("channel B %s, interrupt: %d\n",
skip_b ? "skipping frame" : "frame done",
intr);
tasklet_hi_schedule(&vino_tasklet_b);
handled_b = 1;
}
#ifdef VINO_DEBUG_INT
loop++;
#endif
spin_lock(&vino_drvdata->vino_lock);
}
spin_unlock(&vino_drvdata->vino_lock);
return IRQ_HANDLED;
}
/* VINO video input management */
static int vino_get_saa7191_input(int input)
{
switch (input) {
case VINO_INPUT_COMPOSITE:
return SAA7191_INPUT_COMPOSITE;
case VINO_INPUT_SVIDEO:
return SAA7191_INPUT_SVIDEO;
default:
printk(KERN_ERR "VINO: vino_get_saa7191_input(): "
"invalid input!\n");
return -1;
}
}
/* execute with input_lock locked */
static int vino_is_input_owner(struct vino_channel_settings *vcs)
{
switch(vcs->input) {
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO:
return vino_drvdata->decoder_owner == vcs->channel;
case VINO_INPUT_D1:
return vino_drvdata->camera_owner == vcs->channel;
default:
return 0;
}
}
static int vino_acquire_input(struct vino_channel_settings *vcs)
{
unsigned long flags;
int ret = 0;
dprintk("vino_acquire_input():\n");
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
/* First try D1 and then SAA7191 */
if (vino_drvdata->camera
&& (vino_drvdata->camera_owner == VINO_NO_CHANNEL)) {
vino_drvdata->camera_owner = vcs->channel;
vcs->input = VINO_INPUT_D1;
vcs->data_norm = VINO_DATA_NORM_D1;
} else if (vino_drvdata->decoder
&& (vino_drvdata->decoder_owner == VINO_NO_CHANNEL)) {
int input;
int data_norm;
v4l2_std_id norm;
input = VINO_INPUT_COMPOSITE;
ret = decoder_call(video, s_routing,
vino_get_saa7191_input(input), 0, 0);
if (ret) {
ret = -EINVAL;
goto out;
}
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
/* Don't hold spinlocks while auto-detecting norm
* as it may take a while... */
ret = decoder_call(video, querystd, &norm);
if (!ret) {
for (data_norm = 0; data_norm < 3; data_norm++) {
if (vino_data_norms[data_norm].std & norm)
break;
}
if (data_norm == 3)
data_norm = VINO_DATA_NORM_PAL;
ret = decoder_call(core, s_std, norm);
}
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (ret) {
ret = -EINVAL;
goto out;
}
vino_drvdata->decoder_owner = vcs->channel;
vcs->input = input;
vcs->data_norm = data_norm;
} else {
vcs->input = (vcs->channel == VINO_CHANNEL_A) ?
vino_drvdata->b.input : vino_drvdata->a.input;
vcs->data_norm = (vcs->channel == VINO_CHANNEL_A) ?
vino_drvdata->b.data_norm : vino_drvdata->a.data_norm;
}
if (vcs->input == VINO_INPUT_NONE) {
ret = -ENODEV;
goto out;
}
vino_set_default_clipping(vcs);
vino_set_default_scaling(vcs);
vino_set_default_framerate(vcs);
dprintk("vino_acquire_input(): %s\n", vino_inputs[vcs->input].name);
out:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return ret;
}
static int vino_set_input(struct vino_channel_settings *vcs, int input)
{
struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
&vino_drvdata->b : &vino_drvdata->a;
unsigned long flags;
int ret = 0;
dprintk("vino_set_input():\n");
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (vcs->input == input)
goto out;
switch (input) {
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO:
if (!vino_drvdata->decoder) {
ret = -EINVAL;
goto out;
}
if (vino_drvdata->decoder_owner == VINO_NO_CHANNEL) {
vino_drvdata->decoder_owner = vcs->channel;
}
if (vino_drvdata->decoder_owner == vcs->channel) {
int data_norm;
v4l2_std_id norm;
ret = decoder_call(video, s_routing,
vino_get_saa7191_input(input), 0, 0);
if (ret) {
vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
ret = -EINVAL;
goto out;
}
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
/* Don't hold spinlocks while auto-detecting norm
* as it may take a while... */
ret = decoder_call(video, querystd, &norm);
if (!ret) {
for (data_norm = 0; data_norm < 3; data_norm++) {
if (vino_data_norms[data_norm].std & norm)
break;
}
if (data_norm == 3)
data_norm = VINO_DATA_NORM_PAL;
ret = decoder_call(core, s_std, norm);
}
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (ret) {
vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
ret = -EINVAL;
goto out;
}
vcs->input = input;
vcs->data_norm = data_norm;
} else {
if (input != vcs2->input) {
ret = -EBUSY;
goto out;
}
vcs->input = input;
vcs->data_norm = vcs2->data_norm;
}
if (vino_drvdata->camera_owner == vcs->channel) {
/* Transfer the ownership or release the input */
if (vcs2->input == VINO_INPUT_D1) {
vino_drvdata->camera_owner = vcs2->channel;
} else {
vino_drvdata->camera_owner = VINO_NO_CHANNEL;
}
}
break;
case VINO_INPUT_D1:
if (!vino_drvdata->camera) {
ret = -EINVAL;
goto out;
}
if (vino_drvdata->camera_owner == VINO_NO_CHANNEL)
vino_drvdata->camera_owner = vcs->channel;
if (vino_drvdata->decoder_owner == vcs->channel) {
/* Transfer the ownership or release the input */
if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
(vcs2->input == VINO_INPUT_SVIDEO)) {
vino_drvdata->decoder_owner = vcs2->channel;
} else {
vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
}
}
vcs->input = input;
vcs->data_norm = VINO_DATA_NORM_D1;
break;
default:
ret = -EINVAL;
goto out;
}
vino_set_default_clipping(vcs);
vino_set_default_scaling(vcs);
vino_set_default_framerate(vcs);
dprintk("vino_set_input(): %s\n", vino_inputs[vcs->input].name);
out:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return ret;
}
static void vino_release_input(struct vino_channel_settings *vcs)
{
struct vino_channel_settings *vcs2 = (vcs->channel == VINO_CHANNEL_A) ?
&vino_drvdata->b : &vino_drvdata->a;
unsigned long flags;
dprintk("vino_release_input():\n");
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
/* Release ownership of the channel
* and if the other channel takes input from
* the same source, transfer the ownership */
if (vino_drvdata->camera_owner == vcs->channel) {
if (vcs2->input == VINO_INPUT_D1) {
vino_drvdata->camera_owner = vcs2->channel;
} else {
vino_drvdata->camera_owner = VINO_NO_CHANNEL;
}
} else if (vino_drvdata->decoder_owner == vcs->channel) {
if ((vcs2->input == VINO_INPUT_COMPOSITE) ||
(vcs2->input == VINO_INPUT_SVIDEO)) {
vino_drvdata->decoder_owner = vcs2->channel;
} else {
vino_drvdata->decoder_owner = VINO_NO_CHANNEL;
}
}
vcs->input = VINO_INPUT_NONE;
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
}
/* execute with input_lock locked */
static int vino_set_data_norm(struct vino_channel_settings *vcs,
unsigned int data_norm,
unsigned long *flags)
{
int err = 0;
if (data_norm == vcs->data_norm)
return 0;
switch (vcs->input) {
case VINO_INPUT_D1:
/* only one "norm" supported */
if (data_norm != VINO_DATA_NORM_D1)
return -EINVAL;
break;
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO: {
v4l2_std_id norm;
if ((data_norm != VINO_DATA_NORM_PAL)
&& (data_norm != VINO_DATA_NORM_NTSC)
&& (data_norm != VINO_DATA_NORM_SECAM))
return -EINVAL;
spin_unlock_irqrestore(&vino_drvdata->input_lock, *flags);
/* Don't hold spinlocks while setting norm
* as it may take a while... */
norm = vino_data_norms[data_norm].std;
err = decoder_call(core, s_std, norm);
spin_lock_irqsave(&vino_drvdata->input_lock, *flags);
if (err)
goto out;
vcs->data_norm = data_norm;
vino_set_default_clipping(vcs);
vino_set_default_scaling(vcs);
vino_set_default_framerate(vcs);
break;
}
default:
return -EINVAL;
}
out:
return err;
}
/* V4L2 helper functions */
static int vino_find_data_format(__u32 pixelformat)
{
int i;
for (i = 0; i < VINO_DATA_FMT_COUNT; i++) {
if (vino_data_formats[i].pixelformat == pixelformat)
return i;
}
return VINO_DATA_FMT_NONE;
}
static int vino_int_enum_input(struct vino_channel_settings *vcs, __u32 index)
{
int input = VINO_INPUT_NONE;
unsigned long flags;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (vino_drvdata->decoder && vino_drvdata->camera) {
switch (index) {
case 0:
input = VINO_INPUT_COMPOSITE;
break;
case 1:
input = VINO_INPUT_SVIDEO;
break;
case 2:
input = VINO_INPUT_D1;
break;
}
} else if (vino_drvdata->decoder) {
switch (index) {
case 0:
input = VINO_INPUT_COMPOSITE;
break;
case 1:
input = VINO_INPUT_SVIDEO;
break;
}
} else if (vino_drvdata->camera) {
switch (index) {
case 0:
input = VINO_INPUT_D1;
break;
}
}
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return input;
}
/* execute with input_lock locked */
static __u32 vino_find_input_index(struct vino_channel_settings *vcs)
{
__u32 index = 0;
// FIXME: detect when no inputs available
if (vino_drvdata->decoder && vino_drvdata->camera) {
switch (vcs->input) {
case VINO_INPUT_COMPOSITE:
index = 0;
break;
case VINO_INPUT_SVIDEO:
index = 1;
break;
case VINO_INPUT_D1:
index = 2;
break;
}
} else if (vino_drvdata->decoder) {
switch (vcs->input) {
case VINO_INPUT_COMPOSITE:
index = 0;
break;
case VINO_INPUT_SVIDEO:
index = 1;
break;
}
} else if (vino_drvdata->camera) {
switch (vcs->input) {
case VINO_INPUT_D1:
index = 0;
break;
}
}
return index;
}
/* V4L2 ioctls */
static int vino_querycap(struct file *file, void *__fh,
struct v4l2_capability *cap)
{
memset(cap, 0, sizeof(struct v4l2_capability));
strcpy(cap->driver, vino_driver_name);
strcpy(cap->card, vino_driver_description);
strcpy(cap->bus_info, vino_bus_name);
cap->version = VINO_VERSION_CODE;
cap->capabilities =
V4L2_CAP_VIDEO_CAPTURE |
V4L2_CAP_STREAMING;
// V4L2_CAP_OVERLAY, V4L2_CAP_READWRITE
return 0;
}
static int vino_enum_input(struct file *file, void *__fh,
struct v4l2_input *i)
{
struct vino_channel_settings *vcs = video_drvdata(file);
__u32 index = i->index;
int input;
dprintk("requested index = %d\n", index);
input = vino_int_enum_input(vcs, index);
if (input == VINO_INPUT_NONE)
return -EINVAL;
i->type = V4L2_INPUT_TYPE_CAMERA;
i->std = vino_inputs[input].std;
strcpy(i->name, vino_inputs[input].name);
if (input == VINO_INPUT_COMPOSITE || input == VINO_INPUT_SVIDEO)
decoder_call(video, g_input_status, &i->status);
return 0;
}
static int vino_g_input(struct file *file, void *__fh,
unsigned int *i)
{
struct vino_channel_settings *vcs = video_drvdata(file);
__u32 index;
int input;
unsigned long flags;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
input = vcs->input;
index = vino_find_input_index(vcs);
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
dprintk("input = %d\n", input);
if (input == VINO_INPUT_NONE) {
return -EINVAL;
}
*i = index;
return 0;
}
static int vino_s_input(struct file *file, void *__fh,
unsigned int i)
{
struct vino_channel_settings *vcs = video_drvdata(file);
int input;
dprintk("requested input = %d\n", i);
input = vino_int_enum_input(vcs, i);
if (input == VINO_INPUT_NONE)
return -EINVAL;
return vino_set_input(vcs, input);
}
static int vino_querystd(struct file *file, void *__fh,
v4l2_std_id *std)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
int err = 0;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
switch (vcs->input) {
case VINO_INPUT_D1:
*std = vino_inputs[vcs->input].std;
break;
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO: {
decoder_call(video, querystd, std);
break;
}
default:
err = -EINVAL;
}
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return err;
}
static int vino_g_std(struct file *file, void *__fh,
v4l2_std_id *std)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
*std = vino_data_norms[vcs->data_norm].std;
dprintk("current standard = %d\n", vcs->data_norm);
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return 0;
}
static int vino_s_std(struct file *file, void *__fh,
v4l2_std_id *std)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (!vino_is_input_owner(vcs)) {
ret = -EBUSY;
goto out;
}
/* check if the standard is valid for the current input */
if ((*std) & vino_inputs[vcs->input].std) {
dprintk("standard accepted\n");
/* change the video norm for SAA7191
* and accept NTSC for D1 (do nothing) */
if (vcs->input == VINO_INPUT_D1)
goto out;
if ((*std) & V4L2_STD_PAL) {
ret = vino_set_data_norm(vcs, VINO_DATA_NORM_PAL,
&flags);
} else if ((*std) & V4L2_STD_NTSC) {
ret = vino_set_data_norm(vcs, VINO_DATA_NORM_NTSC,
&flags);
} else if ((*std) & V4L2_STD_SECAM) {
ret = vino_set_data_norm(vcs, VINO_DATA_NORM_SECAM,
&flags);
} else {
ret = -EINVAL;
}
if (ret) {
ret = -EINVAL;
}
} else {
ret = -EINVAL;
}
out:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return ret;
}
static int vino_enum_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_fmtdesc *fd)
{
dprintk("format index = %d\n", fd->index);
if (fd->index >= VINO_DATA_FMT_COUNT)
return -EINVAL;
dprintk("format name = %s\n", vino_data_formats[fd->index].description);
fd->pixelformat = vino_data_formats[fd->index].pixelformat;
strcpy(fd->description, vino_data_formats[fd->index].description);
return 0;
}
static int vino_try_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *f)
{
struct vino_channel_settings *vcs = video_drvdata(file);
struct vino_channel_settings tempvcs;
unsigned long flags;
struct v4l2_pix_format *pf = &f->fmt.pix;
dprintk("requested: w = %d, h = %d\n",
pf->width, pf->height);
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
memcpy(&tempvcs, vcs, sizeof(struct vino_channel_settings));
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
tempvcs.data_format = vino_find_data_format(pf->pixelformat);
if (tempvcs.data_format == VINO_DATA_FMT_NONE) {
tempvcs.data_format = VINO_DATA_FMT_GREY;
pf->pixelformat =
vino_data_formats[tempvcs.data_format].
pixelformat;
}
/* data format must be set before clipping/scaling */
vino_set_scaling(&tempvcs, pf->width, pf->height);
dprintk("data format = %s\n",
vino_data_formats[tempvcs.data_format].description);
pf->width = (tempvcs.clipping.right - tempvcs.clipping.left) /
tempvcs.decimation;
pf->height = (tempvcs.clipping.bottom - tempvcs.clipping.top) /
tempvcs.decimation;
pf->field = V4L2_FIELD_INTERLACED;
pf->bytesperline = tempvcs.line_size;
pf->sizeimage = tempvcs.line_size *
(tempvcs.clipping.bottom - tempvcs.clipping.top) /
tempvcs.decimation;
pf->colorspace =
vino_data_formats[tempvcs.data_format].colorspace;
pf->priv = 0;
return 0;
}
static int vino_g_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *f)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
struct v4l2_pix_format *pf = &f->fmt.pix;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
pf->width = (vcs->clipping.right - vcs->clipping.left) /
vcs->decimation;
pf->height = (vcs->clipping.bottom - vcs->clipping.top) /
vcs->decimation;
pf->pixelformat =
vino_data_formats[vcs->data_format].pixelformat;
pf->field = V4L2_FIELD_INTERLACED;
pf->bytesperline = vcs->line_size;
pf->sizeimage = vcs->line_size *
(vcs->clipping.bottom - vcs->clipping.top) /
vcs->decimation;
pf->colorspace =
vino_data_formats[vcs->data_format].colorspace;
pf->priv = 0;
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return 0;
}
static int vino_s_fmt_vid_cap(struct file *file, void *__fh,
struct v4l2_format *f)
{
struct vino_channel_settings *vcs = video_drvdata(file);
int data_format;
unsigned long flags;
struct v4l2_pix_format *pf = &f->fmt.pix;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
data_format = vino_find_data_format(pf->pixelformat);
if (data_format == VINO_DATA_FMT_NONE) {
vcs->data_format = VINO_DATA_FMT_GREY;
pf->pixelformat =
vino_data_formats[vcs->data_format].
pixelformat;
} else {
vcs->data_format = data_format;
}
/* data format must be set before clipping/scaling */
vino_set_scaling(vcs, pf->width, pf->height);
dprintk("data format = %s\n",
vino_data_formats[vcs->data_format].description);
pf->width = vcs->clipping.right - vcs->clipping.left;
pf->height = vcs->clipping.bottom - vcs->clipping.top;
pf->field = V4L2_FIELD_INTERLACED;
pf->bytesperline = vcs->line_size;
pf->sizeimage = vcs->line_size *
(vcs->clipping.bottom - vcs->clipping.top) /
vcs->decimation;
pf->colorspace =
vino_data_formats[vcs->data_format].colorspace;
pf->priv = 0;
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return 0;
}
static int vino_cropcap(struct file *file, void *__fh,
struct v4l2_cropcap *ccap)
{
struct vino_channel_settings *vcs = video_drvdata(file);
const struct vino_data_norm *norm;
unsigned long flags;
switch (ccap->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
norm = &vino_data_norms[vcs->data_norm];
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
ccap->bounds.left = 0;
ccap->bounds.top = 0;
ccap->bounds.width = norm->width;
ccap->bounds.height = norm->height;
memcpy(&ccap->defrect, &ccap->bounds,
sizeof(struct v4l2_rect));
ccap->pixelaspect.numerator = 1;
ccap->pixelaspect.denominator = 1;
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
default:
return -EINVAL;
}
return 0;
}
static int vino_g_crop(struct file *file, void *__fh,
struct v4l2_crop *c)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
switch (c->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
c->c.left = vcs->clipping.left;
c->c.top = vcs->clipping.top;
c->c.width = vcs->clipping.right - vcs->clipping.left;
c->c.height = vcs->clipping.bottom - vcs->clipping.top;
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
default:
return -EINVAL;
}
return 0;
}
static int vino_s_crop(struct file *file, void *__fh,
struct v4l2_crop *c)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
switch (c->type) {
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
vino_set_clipping(vcs, c->c.left, c->c.top,
c->c.width, c->c.height);
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
break;
case V4L2_BUF_TYPE_VIDEO_OVERLAY:
default:
return -EINVAL;
}
return 0;
}
static int vino_g_parm(struct file *file, void *__fh,
struct v4l2_streamparm *sp)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
struct v4l2_captureparm *cp = &sp->parm.capture;
cp->capability = V4L2_CAP_TIMEPERFRAME;
cp->timeperframe.numerator = 1;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
cp->timeperframe.denominator = vcs->fps;
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
/* TODO: cp->readbuffers = xxx; */
return 0;
}
static int vino_s_parm(struct file *file, void *__fh,
struct v4l2_streamparm *sp)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
struct v4l2_captureparm *cp = &sp->parm.capture;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if ((cp->timeperframe.numerator == 0) ||
(cp->timeperframe.denominator == 0)) {
/* reset framerate */
vino_set_default_framerate(vcs);
} else {
vino_set_framerate(vcs, cp->timeperframe.denominator /
cp->timeperframe.numerator);
}
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return 0;
}
static int vino_reqbufs(struct file *file, void *__fh,
struct v4l2_requestbuffers *rb)
{
struct vino_channel_settings *vcs = video_drvdata(file);
if (vcs->reading)
return -EBUSY;
/* TODO: check queue type */
if (rb->memory != V4L2_MEMORY_MMAP) {
dprintk("type not mmap\n");
return -EINVAL;
}
dprintk("count = %d\n", rb->count);
if (rb->count > 0) {
if (vino_is_capturing(vcs)) {
dprintk("busy, capturing\n");
return -EBUSY;
}
if (vino_queue_has_mapped_buffers(&vcs->fb_queue)) {
dprintk("busy, buffers still mapped\n");
return -EBUSY;
} else {
vcs->streaming = 0;
vino_queue_free(&vcs->fb_queue);
vino_queue_init(&vcs->fb_queue, &rb->count);
}
} else {
vcs->streaming = 0;
vino_capture_stop(vcs);
vino_queue_free(&vcs->fb_queue);
}
return 0;
}
static void vino_v4l2_get_buffer_status(struct vino_channel_settings *vcs,
struct vino_framebuffer *fb,
struct v4l2_buffer *b)
{
if (vino_queue_outgoing_contains(&vcs->fb_queue,
fb->id)) {
b->flags &= ~V4L2_BUF_FLAG_QUEUED;
b->flags |= V4L2_BUF_FLAG_DONE;
} else if (vino_queue_incoming_contains(&vcs->fb_queue,
fb->id)) {
b->flags &= ~V4L2_BUF_FLAG_DONE;
b->flags |= V4L2_BUF_FLAG_QUEUED;
} else {
b->flags &= ~(V4L2_BUF_FLAG_DONE |
V4L2_BUF_FLAG_QUEUED);
}
b->flags &= ~(V4L2_BUF_FLAG_TIMECODE);
if (fb->map_count > 0)
b->flags |= V4L2_BUF_FLAG_MAPPED;
b->index = fb->id;
b->memory = (vcs->fb_queue.type == VINO_MEMORY_MMAP) ?
V4L2_MEMORY_MMAP : V4L2_MEMORY_USERPTR;
b->m.offset = fb->offset;
b->bytesused = fb->data_size;
b->length = fb->size;
b->field = V4L2_FIELD_INTERLACED;
b->sequence = fb->frame_counter;
memcpy(&b->timestamp, &fb->timestamp,
sizeof(struct timeval));
// b->input ?
dprintk("buffer %d: length = %d, bytesused = %d, offset = %d\n",
fb->id, fb->size, fb->data_size, fb->offset);
}
static int vino_querybuf(struct file *file, void *__fh,
struct v4l2_buffer *b)
{
struct vino_channel_settings *vcs = video_drvdata(file);
struct vino_framebuffer *fb;
if (vcs->reading)
return -EBUSY;
/* TODO: check queue type */
if (b->index >= vino_queue_get_length(&vcs->fb_queue)) {
dprintk("invalid index = %d\n",
b->index);
return -EINVAL;
}
fb = vino_queue_get_buffer(&vcs->fb_queue,
b->index);
if (fb == NULL) {
dprintk("vino_queue_get_buffer() failed");
return -EINVAL;
}
vino_v4l2_get_buffer_status(vcs, fb, b);
return 0;
}
static int vino_qbuf(struct file *file, void *__fh,
struct v4l2_buffer *b)
{
struct vino_channel_settings *vcs = video_drvdata(file);
struct vino_framebuffer *fb;
int ret;
if (vcs->reading)
return -EBUSY;
/* TODO: check queue type */
if (b->memory != V4L2_MEMORY_MMAP) {
dprintk("type not mmap\n");
return -EINVAL;
}
fb = vino_capture_enqueue(vcs, b->index);
if (fb == NULL)
return -EINVAL;
vino_v4l2_get_buffer_status(vcs, fb, b);
if (vcs->streaming) {
ret = vino_capture_next(vcs, 1);
if (ret)
return ret;
}
return 0;
}
static int vino_dqbuf(struct file *file, void *__fh,
struct v4l2_buffer *b)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned int nonblocking = file->f_flags & O_NONBLOCK;
struct vino_framebuffer *fb;
unsigned int incoming, outgoing;
int err;
if (vcs->reading)
return -EBUSY;
/* TODO: check queue type */
err = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
if (err) {
dprintk("vino_queue_get_incoming() failed\n");
return -EINVAL;
}
err = vino_queue_get_outgoing(&vcs->fb_queue, &outgoing);
if (err) {
dprintk("vino_queue_get_outgoing() failed\n");
return -EINVAL;
}
dprintk("incoming = %d, outgoing = %d\n", incoming, outgoing);
if (outgoing == 0) {
if (incoming == 0) {
dprintk("no incoming or outgoing buffers\n");
return -EINVAL;
}
if (nonblocking) {
dprintk("non-blocking I/O was selected and "
"there are no buffers to dequeue\n");
return -EAGAIN;
}
err = vino_wait_for_frame(vcs);
if (err) {
err = vino_wait_for_frame(vcs);
if (err) {
/* interrupted or no frames captured because of
* frame skipping */
/* vino_capture_failed(vcs); */
return -EIO;
}
}
}
fb = vino_queue_remove(&vcs->fb_queue, &b->index);
if (fb == NULL) {
dprintk("vino_queue_remove() failed\n");
return -EINVAL;
}
err = vino_check_buffer(vcs, fb);
vino_v4l2_get_buffer_status(vcs, fb, b);
if (err)
return -EIO;
return 0;
}
static int vino_streamon(struct file *file, void *__fh,
enum v4l2_buf_type i)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned int incoming;
int ret;
if (vcs->reading)
return -EBUSY;
if (vcs->streaming)
return 0;
// TODO: check queue type
if (vino_queue_get_length(&vcs->fb_queue) < 1) {
dprintk("no buffers allocated\n");
return -EINVAL;
}
ret = vino_queue_get_incoming(&vcs->fb_queue, &incoming);
if (ret) {
dprintk("vino_queue_get_incoming() failed\n");
return -EINVAL;
}
vcs->streaming = 1;
if (incoming > 0) {
ret = vino_capture_next(vcs, 1);
if (ret) {
vcs->streaming = 0;
dprintk("couldn't start capture\n");
return -EINVAL;
}
}
return 0;
}
static int vino_streamoff(struct file *file, void *__fh,
enum v4l2_buf_type i)
{
struct vino_channel_settings *vcs = video_drvdata(file);
if (vcs->reading)
return -EBUSY;
if (!vcs->streaming)
return 0;
vcs->streaming = 0;
vino_capture_stop(vcs);
return 0;
}
static int vino_queryctrl(struct file *file, void *__fh,
struct v4l2_queryctrl *queryctrl)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
int i;
int err = 0;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
switch (vcs->input) {
case VINO_INPUT_D1:
for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
if (vino_indycam_v4l2_controls[i].id ==
queryctrl->id) {
memcpy(queryctrl,
&vino_indycam_v4l2_controls[i],
sizeof(struct v4l2_queryctrl));
queryctrl->reserved[0] = 0;
goto found;
}
}
err = -EINVAL;
break;
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO:
for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
if (vino_saa7191_v4l2_controls[i].id ==
queryctrl->id) {
memcpy(queryctrl,
&vino_saa7191_v4l2_controls[i],
sizeof(struct v4l2_queryctrl));
queryctrl->reserved[0] = 0;
goto found;
}
}
err = -EINVAL;
break;
default:
err = -EINVAL;
}
found:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return err;
}
static int vino_g_ctrl(struct file *file, void *__fh,
struct v4l2_control *control)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
int i;
int err = 0;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
switch (vcs->input) {
case VINO_INPUT_D1: {
err = -EINVAL;
for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
if (vino_indycam_v4l2_controls[i].id == control->id) {
err = 0;
break;
}
}
if (err)
goto out;
err = camera_call(core, g_ctrl, control);
if (err)
err = -EINVAL;
break;
}
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO: {
err = -EINVAL;
for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
if (vino_saa7191_v4l2_controls[i].id == control->id) {
err = 0;
break;
}
}
if (err)
goto out;
err = decoder_call(core, g_ctrl, control);
if (err)
err = -EINVAL;
break;
}
default:
err = -EINVAL;
}
out:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return err;
}
static int vino_s_ctrl(struct file *file, void *__fh,
struct v4l2_control *control)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long flags;
int i;
int err = 0;
spin_lock_irqsave(&vino_drvdata->input_lock, flags);
if (!vino_is_input_owner(vcs)) {
err = -EBUSY;
goto out;
}
switch (vcs->input) {
case VINO_INPUT_D1: {
err = -EINVAL;
for (i = 0; i < VINO_INDYCAM_V4L2_CONTROL_COUNT; i++) {
if (vino_indycam_v4l2_controls[i].id == control->id) {
err = 0;
break;
}
}
if (err)
goto out;
if (control->value < vino_indycam_v4l2_controls[i].minimum ||
control->value > vino_indycam_v4l2_controls[i].maximum) {
err = -ERANGE;
goto out;
}
err = camera_call(core, s_ctrl, control);
if (err)
err = -EINVAL;
break;
}
case VINO_INPUT_COMPOSITE:
case VINO_INPUT_SVIDEO: {
err = -EINVAL;
for (i = 0; i < VINO_SAA7191_V4L2_CONTROL_COUNT; i++) {
if (vino_saa7191_v4l2_controls[i].id == control->id) {
err = 0;
break;
}
}
if (err)
goto out;
if (control->value < vino_saa7191_v4l2_controls[i].minimum ||
control->value > vino_saa7191_v4l2_controls[i].maximum) {
err = -ERANGE;
goto out;
}
err = decoder_call(core, s_ctrl, control);
if (err)
err = -EINVAL;
break;
}
default:
err = -EINVAL;
}
out:
spin_unlock_irqrestore(&vino_drvdata->input_lock, flags);
return err;
}
/* File operations */
static int vino_open(struct file *file)
{
struct vino_channel_settings *vcs = video_drvdata(file);
int ret = 0;
dprintk("open(): channel = %c\n",
(vcs->channel == VINO_CHANNEL_A) ? 'A' : 'B');
mutex_lock(&vcs->mutex);
if (vcs->users) {
dprintk("open(): driver busy\n");
ret = -EBUSY;
goto out;
}
ret = vino_acquire_input(vcs);
if (ret) {
dprintk("open(): vino_acquire_input() failed\n");
goto out;
}
vcs->users++;
out:
mutex_unlock(&vcs->mutex);
dprintk("open(): %s!\n", ret ? "failed" : "complete");
return ret;
}
static int vino_close(struct file *file)
{
struct vino_channel_settings *vcs = video_drvdata(file);
dprintk("close():\n");
mutex_lock(&vcs->mutex);
vcs->users--;
if (!vcs->users) {
vino_release_input(vcs);
/* stop DMA and free buffers */
vino_capture_stop(vcs);
vino_queue_free(&vcs->fb_queue);
}
mutex_unlock(&vcs->mutex);
return 0;
}
static void vino_vm_open(struct vm_area_struct *vma)
{
struct vino_framebuffer *fb = vma->vm_private_data;
fb->map_count++;
dprintk("vino_vm_open(): count = %d\n", fb->map_count);
}
static void vino_vm_close(struct vm_area_struct *vma)
{
struct vino_framebuffer *fb = vma->vm_private_data;
fb->map_count--;
dprintk("vino_vm_close(): count = %d\n", fb->map_count);
}
static const struct vm_operations_struct vino_vm_ops = {
.open = vino_vm_open,
.close = vino_vm_close,
};
static int vino_mmap(struct file *file, struct vm_area_struct *vma)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned long start = vma->vm_start;
unsigned long size = vma->vm_end - vma->vm_start;
unsigned long offset = vma->vm_pgoff << PAGE_SHIFT;
struct vino_framebuffer *fb = NULL;
unsigned int i, length;
int ret = 0;
dprintk("mmap():\n");
// TODO: reject mmap if already mapped
if (mutex_lock_interruptible(&vcs->mutex))
return -EINTR;
if (vcs->reading) {
ret = -EBUSY;
goto out;
}
// TODO: check queue type
if (!(vma->vm_flags & VM_WRITE)) {
dprintk("mmap(): app bug: PROT_WRITE please\n");
ret = -EINVAL;
goto out;
}
if (!(vma->vm_flags & VM_SHARED)) {
dprintk("mmap(): app bug: MAP_SHARED please\n");
ret = -EINVAL;
goto out;
}
/* find the correct buffer using offset */
length = vino_queue_get_length(&vcs->fb_queue);
if (length == 0) {
dprintk("mmap(): queue not initialized\n");
ret = -EINVAL;
goto out;
}
for (i = 0; i < length; i++) {
fb = vino_queue_get_buffer(&vcs->fb_queue, i);
if (fb == NULL) {
dprintk("mmap(): vino_queue_get_buffer() failed\n");
ret = -EINVAL;
goto out;
}
if (fb->offset == offset)
goto found;
}
dprintk("mmap(): invalid offset = %lu\n", offset);
ret = -EINVAL;
goto out;
found:
dprintk("mmap(): buffer = %d\n", i);
if (size > (fb->desc_table.page_count * PAGE_SIZE)) {
dprintk("mmap(): failed: size = %lu > %lu\n",
size, fb->desc_table.page_count * PAGE_SIZE);
ret = -EINVAL;
goto out;
}
for (i = 0; i < fb->desc_table.page_count; i++) {
unsigned long pfn =
virt_to_phys((void *)fb->desc_table.virtual[i]) >>
PAGE_SHIFT;
if (size < PAGE_SIZE)
break;
// protection was: PAGE_READONLY
if (remap_pfn_range(vma, start, pfn, PAGE_SIZE,
vma->vm_page_prot)) {
dprintk("mmap(): remap_pfn_range() failed\n");
ret = -EAGAIN;
goto out;
}
start += PAGE_SIZE;
size -= PAGE_SIZE;
}
fb->map_count = 1;
vma->vm_flags |= VM_DONTEXPAND | VM_RESERVED;
vma->vm_flags &= ~VM_IO;
vma->vm_private_data = fb;
vma->vm_file = file;
vma->vm_ops = &vino_vm_ops;
out:
mutex_unlock(&vcs->mutex);
return ret;
}
static unsigned int vino_poll(struct file *file, poll_table *pt)
{
struct vino_channel_settings *vcs = video_drvdata(file);
unsigned int outgoing;
unsigned int ret = 0;
// lock mutex (?)
// TODO: this has to be corrected for different read modes
dprintk("poll():\n");
if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
dprintk("poll(): vino_queue_get_outgoing() failed\n");
ret = POLLERR;
goto error;
}
if (outgoing > 0)
goto over;
poll_wait(file, &vcs->fb_queue.frame_wait_queue, pt);
if (vino_queue_get_outgoing(&vcs->fb_queue, &outgoing)) {
dprintk("poll(): vino_queue_get_outgoing() failed\n");
ret = POLLERR;
goto error;
}
over:
dprintk("poll(): data %savailable\n",
(outgoing > 0) ? "" : "not ");
if (outgoing > 0)
ret = POLLIN | POLLRDNORM;
error:
return ret;
}
static long vino_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
struct vino_channel_settings *vcs = video_drvdata(file);
long ret;
if (mutex_lock_interruptible(&vcs->mutex))
return -EINTR;
ret = video_ioctl2(file, cmd, arg);
mutex_unlock(&vcs->mutex);
return ret;
}
/* Initialization and cleanup */
/* __initdata */
static int vino_init_stage;
const struct v4l2_ioctl_ops vino_ioctl_ops = {
.vidioc_enum_fmt_vid_cap = vino_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vino_g_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vino_s_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vino_try_fmt_vid_cap,
.vidioc_querycap = vino_querycap,
.vidioc_enum_input = vino_enum_input,
.vidioc_g_input = vino_g_input,
.vidioc_s_input = vino_s_input,
.vidioc_g_std = vino_g_std,
.vidioc_s_std = vino_s_std,
.vidioc_querystd = vino_querystd,
.vidioc_cropcap = vino_cropcap,
.vidioc_s_crop = vino_s_crop,
.vidioc_g_crop = vino_g_crop,
.vidioc_s_parm = vino_s_parm,
.vidioc_g_parm = vino_g_parm,
.vidioc_reqbufs = vino_reqbufs,
.vidioc_querybuf = vino_querybuf,
.vidioc_qbuf = vino_qbuf,
.vidioc_dqbuf = vino_dqbuf,
.vidioc_streamon = vino_streamon,
.vidioc_streamoff = vino_streamoff,
.vidioc_queryctrl = vino_queryctrl,
.vidioc_g_ctrl = vino_g_ctrl,
.vidioc_s_ctrl = vino_s_ctrl,
};
static const struct v4l2_file_operations vino_fops = {
.owner = THIS_MODULE,
.open = vino_open,
.release = vino_close,
.unlocked_ioctl = vino_ioctl,
.mmap = vino_mmap,
.poll = vino_poll,
};
static struct video_device vdev_template = {
.name = "NOT SET",
.fops = &vino_fops,
.ioctl_ops = &vino_ioctl_ops,
.tvnorms = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM,
};
static void vino_module_cleanup(int stage)
{
switch(stage) {
case 11:
video_unregister_device(vino_drvdata->b.vdev);
vino_drvdata->b.vdev = NULL;
case 10:
video_unregister_device(vino_drvdata->a.vdev);
vino_drvdata->a.vdev = NULL;
case 9:
i2c_del_adapter(&vino_i2c_adapter);
case 8:
free_irq(SGI_VINO_IRQ, NULL);
case 7:
if (vino_drvdata->b.vdev) {
video_device_release(vino_drvdata->b.vdev);
vino_drvdata->b.vdev = NULL;
}
case 6:
if (vino_drvdata->a.vdev) {
video_device_release(vino_drvdata->a.vdev);
vino_drvdata->a.vdev = NULL;
}
case 5:
/* all entries in dma_cpu dummy table have the same address */
dma_unmap_single(NULL,
vino_drvdata->dummy_desc_table.dma_cpu[0],
PAGE_SIZE, DMA_FROM_DEVICE);
dma_free_coherent(NULL, VINO_DUMMY_DESC_COUNT
* sizeof(dma_addr_t),
(void *)vino_drvdata->
dummy_desc_table.dma_cpu,
vino_drvdata->dummy_desc_table.dma);
case 4:
free_page(vino_drvdata->dummy_page);
case 3:
v4l2_device_unregister(&vino_drvdata->v4l2_dev);
case 2:
kfree(vino_drvdata);
case 1:
iounmap(vino);
case 0:
break;
default:
dprintk("vino_module_cleanup(): invalid cleanup stage = %d\n",
stage);
}
}
static int vino_probe(void)
{
unsigned long rev_id;
if (ip22_is_fullhouse()) {
printk(KERN_ERR "VINO doesn't exist in IP22 Fullhouse\n");
return -ENODEV;
}
if (!(sgimc->systemid & SGIMC_SYSID_EPRESENT)) {
printk(KERN_ERR "VINO is not found (EISA BUS not present)\n");
return -ENODEV;
}
vino = (struct sgi_vino *)ioremap(VINO_BASE, sizeof(struct sgi_vino));
if (!vino) {
printk(KERN_ERR "VINO: ioremap() failed\n");
return -EIO;
}
vino_init_stage++;
if (get_dbe(rev_id, &(vino->rev_id))) {
printk(KERN_ERR "Failed to read VINO revision register\n");
vino_module_cleanup(vino_init_stage);
return -ENODEV;
}
if (VINO_ID_VALUE(rev_id) != VINO_CHIP_ID) {
printk(KERN_ERR "Unknown VINO chip ID (Rev/ID: 0x%02lx)\n",
rev_id);
vino_module_cleanup(vino_init_stage);
return -ENODEV;
}
printk(KERN_INFO "VINO revision %ld found\n", VINO_REV_NUM(rev_id));
return 0;
}
static int vino_init(void)
{
dma_addr_t dma_dummy_address;
int err;
int i;
vino_drvdata = kzalloc(sizeof(struct vino_settings), GFP_KERNEL);
if (!vino_drvdata) {
vino_module_cleanup(vino_init_stage);
return -ENOMEM;
}
vino_init_stage++;
strlcpy(vino_drvdata->v4l2_dev.name, "vino",
sizeof(vino_drvdata->v4l2_dev.name));
err = v4l2_device_register(NULL, &vino_drvdata->v4l2_dev);
if (err)
return err;
vino_init_stage++;
/* create a dummy dma descriptor */
vino_drvdata->dummy_page = get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!vino_drvdata->dummy_page) {
vino_module_cleanup(vino_init_stage);
return -ENOMEM;
}
vino_init_stage++;
// TODO: use page_count in dummy_desc_table
vino_drvdata->dummy_desc_table.dma_cpu =
dma_alloc_coherent(NULL,
VINO_DUMMY_DESC_COUNT * sizeof(dma_addr_t),
&vino_drvdata->dummy_desc_table.dma,
GFP_KERNEL | GFP_DMA);
if (!vino_drvdata->dummy_desc_table.dma_cpu) {
vino_module_cleanup(vino_init_stage);
return -ENOMEM;
}
vino_init_stage++;
dma_dummy_address = dma_map_single(NULL,
(void *)vino_drvdata->dummy_page,
PAGE_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < VINO_DUMMY_DESC_COUNT; i++) {
vino_drvdata->dummy_desc_table.dma_cpu[i] = dma_dummy_address;
}
/* initialize VINO */
vino->control = 0;
vino->a.next_4_desc = vino_drvdata->dummy_desc_table.dma;
vino->b.next_4_desc = vino_drvdata->dummy_desc_table.dma;
udelay(VINO_DESC_FETCH_DELAY);
vino->intr_status = 0;
vino->a.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
vino->b.fifo_thres = VINO_FIFO_THRESHOLD_DEFAULT;
return 0;
}
static int vino_init_channel_settings(struct vino_channel_settings *vcs,
unsigned int channel, const char *name)
{
vcs->channel = channel;
vcs->input = VINO_INPUT_NONE;
vcs->alpha = 0;
vcs->users = 0;
vcs->data_format = VINO_DATA_FMT_GREY;
vcs->data_norm = VINO_DATA_NORM_NTSC;
vcs->decimation = 1;
vino_set_default_clipping(vcs);
vino_set_default_framerate(vcs);
vcs->capturing = 0;
mutex_init(&vcs->mutex);
spin_lock_init(&vcs->capture_lock);
mutex_init(&vcs->fb_queue.queue_mutex);
spin_lock_init(&vcs->fb_queue.queue_lock);
init_waitqueue_head(&vcs->fb_queue.frame_wait_queue);
vcs->vdev = video_device_alloc();
if (!vcs->vdev) {
vino_module_cleanup(vino_init_stage);
return -ENOMEM;
}
vino_init_stage++;
memcpy(vcs->vdev, &vdev_template,
sizeof(struct video_device));
strcpy(vcs->vdev->name, name);
vcs->vdev->release = video_device_release;
vcs->vdev->v4l2_dev = &vino_drvdata->v4l2_dev;
video_set_drvdata(vcs->vdev, vcs);
return 0;
}
static int __init vino_module_init(void)
{
int ret;
printk(KERN_INFO "SGI VINO driver version %s\n",
VINO_MODULE_VERSION);
ret = vino_probe();
if (ret)
return ret;
ret = vino_init();
if (ret)
return ret;
/* initialize data structures */
spin_lock_init(&vino_drvdata->vino_lock);
spin_lock_init(&vino_drvdata->input_lock);
ret = vino_init_channel_settings(&vino_drvdata->a, VINO_CHANNEL_A,
vino_vdev_name_a);
if (ret)
return ret;
ret = vino_init_channel_settings(&vino_drvdata->b, VINO_CHANNEL_B,
vino_vdev_name_b);
if (ret)
return ret;
/* initialize hardware and register V4L devices */
ret = request_irq(SGI_VINO_IRQ, vino_interrupt, 0,
vino_driver_description, NULL);
if (ret) {
printk(KERN_ERR "VINO: requesting IRQ %02d failed\n",
SGI_VINO_IRQ);
vino_module_cleanup(vino_init_stage);
return -EAGAIN;
}
vino_init_stage++;
ret = i2c_add_adapter(&vino_i2c_adapter);
if (ret) {
printk(KERN_ERR "VINO I2C bus registration failed\n");
vino_module_cleanup(vino_init_stage);
return ret;
}
i2c_set_adapdata(&vino_i2c_adapter, &vino_drvdata->v4l2_dev);
vino_init_stage++;
ret = video_register_device(vino_drvdata->a.vdev,
VFL_TYPE_GRABBER, -1);
if (ret < 0) {
printk(KERN_ERR "VINO channel A Video4Linux-device "
"registration failed\n");
vino_module_cleanup(vino_init_stage);
return -EINVAL;
}
vino_init_stage++;
ret = video_register_device(vino_drvdata->b.vdev,
VFL_TYPE_GRABBER, -1);
if (ret < 0) {
printk(KERN_ERR "VINO channel B Video4Linux-device "
"registration failed\n");
vino_module_cleanup(vino_init_stage);
return -EINVAL;
}
vino_init_stage++;
vino_drvdata->decoder =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
"saa7191", 0, I2C_ADDRS(0x45));
vino_drvdata->camera =
v4l2_i2c_new_subdev(&vino_drvdata->v4l2_dev, &vino_i2c_adapter,
"indycam", 0, I2C_ADDRS(0x2b));
dprintk("init complete!\n");
return 0;
}
static void __exit vino_module_exit(void)
{
dprintk("exiting, stage = %d ...\n", vino_init_stage);
vino_module_cleanup(vino_init_stage);
dprintk("cleanup complete, exit!\n");
}
module_init(vino_module_init);
module_exit(vino_module_exit);
| gpl-2.0 |
xInterlopeRx/android_kernel_samsung_lt02ltespr | arch/x86/platform/ce4100/ce4100.c | 4951 | 3944 | /*
* Intel CE4100 platform specific setup code
*
* (C) Copyright 2010 Intel Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; version 2
* of the License.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/serial_reg.h>
#include <linux/serial_8250.h>
#include <asm/ce4100.h>
#include <asm/prom.h>
#include <asm/setup.h>
#include <asm/i8259.h>
#include <asm/io.h>
#include <asm/io_apic.h>
static int ce4100_i8042_detect(void)
{
return 0;
}
#ifdef CONFIG_SERIAL_8250
static unsigned int mem_serial_in(struct uart_port *p, int offset)
{
offset = offset << p->regshift;
return readl(p->membase + offset);
}
/*
* The UART Tx interrupts are not set under some conditions and therefore serial
* transmission hangs. This is a silicon issue and has not been root caused. The
* workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT
* bit of LSR register in interrupt handler to see whether at least one of these
* two bits is set, if so then process the transmit request. If this workaround
* is not applied, then the serial transmission may hang. This workaround is for
* errata number 9 in Errata - B step.
*/
static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset)
{
unsigned int ret, ier, lsr;
if (offset == UART_IIR) {
offset = offset << p->regshift;
ret = readl(p->membase + offset);
if (ret & UART_IIR_NO_INT) {
/* see if the TX interrupt should have really set */
ier = mem_serial_in(p, UART_IER);
/* see if the UART's XMIT interrupt is enabled */
if (ier & UART_IER_THRI) {
lsr = mem_serial_in(p, UART_LSR);
/* now check to see if the UART should be
generating an interrupt (but isn't) */
if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
ret &= ~UART_IIR_NO_INT;
}
}
} else
ret = mem_serial_in(p, offset);
return ret;
}
static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
{
offset = offset << p->regshift;
writel(value, p->membase + offset);
}
static void ce4100_serial_fixup(int port, struct uart_port *up,
unsigned short *capabilites)
{
#ifdef CONFIG_EARLY_PRINTK
/*
* Over ride the legacy port configuration that comes from
* asm/serial.h. Using the ioport driver then switching to the
* PCI memmaped driver hangs the IOAPIC
*/
if (up->iotype != UPIO_MEM32) {
up->uartclk = 14745600;
up->mapbase = 0xdffe0200;
set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
up->mapbase & PAGE_MASK);
up->membase =
(void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
up->membase += up->mapbase & ~PAGE_MASK;
up->iotype = UPIO_MEM32;
up->regshift = 2;
}
#endif
up->iobase = 0;
up->serial_in = ce4100_mem_serial_in;
up->serial_out = ce4100_mem_serial_out;
*capabilites |= (1 << 12);
}
static __init void sdv_serial_fixup(void)
{
serial8250_set_isa_configurator(ce4100_serial_fixup);
}
#else
static inline void sdv_serial_fixup(void) {};
#endif
static void __init sdv_arch_setup(void)
{
sdv_serial_fixup();
}
#ifdef CONFIG_X86_IO_APIC
static void __cpuinit sdv_pci_init(void)
{
x86_of_pci_init();
/* We can't set this earlier, because we need to calibrate the timer */
legacy_pic = &null_legacy_pic;
}
#endif
/*
* CE4100 specific x86_init function overrides and early setup
* calls.
*/
void __init x86_ce4100_early_setup(void)
{
x86_init.oem.arch_setup = sdv_arch_setup;
x86_platform.i8042_detect = ce4100_i8042_detect;
x86_init.resources.probe_roms = x86_init_noop;
x86_init.mpparse.get_smp_config = x86_init_uint_noop;
x86_init.mpparse.find_smp_config = x86_init_noop;
x86_init.pci.init = ce4100_pci_init;
#ifdef CONFIG_X86_IO_APIC
x86_init.pci.init_irq = sdv_pci_init;
x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
#endif
}
| gpl-2.0 |
bigzz/sc7715-kernel | fs/reiserfs/tail_conversion.c | 7767 | 9295 | /*
* Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
*/
#include <linux/time.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "reiserfs.h"
/* access to tail : when one is going to read tail it must make sure, that is not running.
direct2indirect and indirect2direct can not run concurrently */
/* Converts direct items to an unformatted node. Panics if file has no
tail. -ENOSPC if no disk space for conversion */
/* path points to first direct item of the file regarless of how many of
them are there */
int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
struct treepath *path, struct buffer_head *unbh,
loff_t tail_offset)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *up_to_date_bh;
struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
unsigned long total_tail = 0;
struct cpu_key end_key; /* Key to search for the last byte of the
converted item. */
struct item_head ind_ih; /* new indirect item to be inserted or
key of unfm pointer to be pasted */
int blk_size, retval; /* returned value for reiserfs_insert_item and clones */
unp_t unfm_ptr; /* Handle on an unformatted node
that will be inserted in the
tree. */
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_direct2indirect++;
blk_size = sb->s_blocksize;
/* and key to search for append or insert pointer to the new
unformatted node. */
copy_item_head(&ind_ih, p_le_ih);
set_le_ih_k_offset(&ind_ih, tail_offset);
set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);
/* Set the key to search for the place for new unfm pointer */
make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
/* FIXME: we could avoid this */
if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
reiserfs_error(sb, "PAP-14030",
"pasted or inserted byte exists in "
"the tree %K. Use fsck to repair.", &end_key);
pathrelse(path);
return -EIO;
}
p_le_ih = PATH_PITEM_HEAD(path);
unfm_ptr = cpu_to_le32(unbh->b_blocknr);
if (is_statdata_le_ih(p_le_ih)) {
/* Insert new indirect item. */
set_ih_free_space(&ind_ih, 0); /* delete at nearest future */
put_ih_item_len(&ind_ih, UNFM_P_SIZE);
PATH_LAST_POSITION(path)++;
retval =
reiserfs_insert_item(th, path, &end_key, &ind_ih, inode,
(char *)&unfm_ptr);
} else {
/* Paste into last indirect item of an object. */
retval = reiserfs_paste_into_item(th, path, &end_key, inode,
(char *)&unfm_ptr,
UNFM_P_SIZE);
}
if (retval) {
return retval;
}
// note: from here there are two keys which have matching first
// three key components. They only differ by the fourth one.
/* Set the key to search for the direct items of the file */
make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
4);
/* Move bytes from the direct items to the new unformatted node
and delete them. */
while (1) {
int tail_size;
/* end_key.k_offset is set so, that we will always have found
last item of the file */
if (search_for_position_by_key(sb, &end_key, path) ==
POSITION_FOUND)
reiserfs_panic(sb, "PAP-14050",
"direct item (%K) not found", &end_key);
p_le_ih = PATH_PITEM_HEAD(path);
RFALSE(!is_direct_le_ih(p_le_ih),
"vs-14055: direct item expected(%K), found %h",
&end_key, p_le_ih);
tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
+ ih_item_len(p_le_ih) - 1;
/* we only send the unbh pointer if the buffer is not up to date.
** this avoids overwriting good data from writepage() with old data
** from the disk or buffer cache
** Special case: unbh->b_page will be NULL if we are coming through
** DIRECT_IO handler here.
*/
if (!unbh->b_page || buffer_uptodate(unbh)
|| PageUptodate(unbh->b_page)) {
up_to_date_bh = NULL;
} else {
up_to_date_bh = unbh;
}
retval = reiserfs_delete_item(th, path, &end_key, inode,
up_to_date_bh);
total_tail += retval;
if (tail_size == retval)
// done: file does not have direct items anymore
break;
}
/* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before)
*/
if (up_to_date_bh) {
unsigned pgoff =
(tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
char *kaddr = kmap_atomic(up_to_date_bh->b_page);
memset(kaddr + pgoff, 0, blk_size - total_tail);
kunmap_atomic(kaddr);
}
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
return 0;
}
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
lock_buffer(bh);
if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
BUG();
}
clear_buffer_dirty(bh);
/* Remove the buffer from whatever list it belongs to. We are mostly
interested in removing it from per-sb j_dirty_buffers list, to avoid
BUG() on attempt to write not mapped buffer */
if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
struct inode *inode = bh->b_page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
spin_lock(&j->j_dirty_buffers_lock);
list_del_init(&bh->b_assoc_buffers);
reiserfs_free_jh(bh);
spin_unlock(&j->j_dirty_buffers_lock);
}
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
bh->b_bdev = NULL;
unlock_buffer(bh);
}
/* this first locks inode (neither reads nor sync are permitted),
reads tail through page cache, insert direct item. When direct item
inserted successfully inode is left locked. Return value is always
what we expect from it (number of cut bytes). But when tail remains
in the unformatted node, we set mode to SKIP_BALANCING and unlock
inode */
int indirect2direct(struct reiserfs_transaction_handle *th,
struct inode *inode, struct page *page,
struct treepath *path, /* path to the indirect item. */
const struct cpu_key *item_key, /* Key to look for
* unformatted node
* pointer to be cut. */
loff_t n_new_file_size, /* New file size. */
char *mode)
{
struct super_block *sb = inode->i_sb;
struct item_head s_ih;
unsigned long block_size = sb->s_blocksize;
char *tail;
int tail_len, round_tail_len;
loff_t pos, pos1; /* position of first byte of the tail */
struct cpu_key key;
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_indirect2direct++;
*mode = M_SKIP_BALANCING;
/* store item head path points to. */
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
tail_len = (n_new_file_size & (block_size - 1));
if (get_inode_sd_version(inode) == STAT_DATA_V2)
round_tail_len = ROUND_UP(tail_len);
else
round_tail_len = tail_len;
pos =
le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
pos1 = pos;
// we are protected by i_mutex. The tail can not disapper, not
// append can be done either
// we are in truncate or packing tail in file_release
tail = (char *)kmap(page); /* this can schedule */
if (path_changed(&s_ih, path)) {
/* re-search indirect item */
if (search_for_position_by_key(sb, item_key, path)
== POSITION_NOT_FOUND)
reiserfs_panic(sb, "PAP-5520",
"item to be converted %K does not exist",
item_key);
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
#ifdef CONFIG_REISERFS_CHECK
pos = le_ih_k_offset(&s_ih) - 1 +
(ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
if (pos != pos1)
reiserfs_panic(sb, "vs-5530", "tail position "
"changed while we were reading it");
#endif
}
/* Set direct item header to insert. */
make_le_item_head(&s_ih, NULL, get_inode_item_key_version(inode),
pos1 + 1, TYPE_DIRECT, round_tail_len,
0xffff /*ih_free_space */ );
/* we want a pointer to the first byte of the tail in the page.
** the page was locked and this part of the page was up to date when
** indirect2direct was called, so we know the bytes are still valid
*/
tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
PATH_LAST_POSITION(path)++;
key = *item_key;
set_cpu_key_k_type(&key, TYPE_DIRECT);
key.key_length = 4;
/* Insert tail as new direct item in the tree */
if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
tail ? tail : NULL) < 0) {
/* No disk memory. So we can not convert last unformatted node
to the direct item. In this case we used to adjust
indirect items's ih_free_space. Now ih_free_space is not
used, it would be ideal to write zeros to corresponding
unformatted node. For now i_size is considered as guard for
going out of file size */
kunmap(page);
return block_size - round_tail_len;
}
kunmap(page);
/* make sure to get the i_blocks changes from reiserfs_insert_item */
reiserfs_update_sd(th, inode);
// note: we have now the same as in above direct2indirect
// conversion: there are two keys which have matching first three
// key components. They only differ by the fouhth one.
/* We have inserted new direct item and must remove last
unformatted node. */
*mode = M_CUT;
/* we store position of first direct item in the in-core inode */
/* mark_file_with_tail (inode, pos1 + 1); */
REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;
return block_size - round_tail_len;
}
| gpl-2.0 |
domintech/PandaBoard-LinuxKernel | fs/reiserfs/tail_conversion.c | 7767 | 9295 | /*
* Copyright 1999 Hans Reiser, see reiserfs/README for licensing and copyright details
*/
#include <linux/time.h>
#include <linux/pagemap.h>
#include <linux/buffer_head.h>
#include "reiserfs.h"
/* access to tail : when one is going to read tail it must make sure, that is not running.
direct2indirect and indirect2direct can not run concurrently */
/* Converts direct items to an unformatted node. Panics if file has no
tail. -ENOSPC if no disk space for conversion */
/* path points to first direct item of the file regarless of how many of
them are there */
int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
struct treepath *path, struct buffer_head *unbh,
loff_t tail_offset)
{
struct super_block *sb = inode->i_sb;
struct buffer_head *up_to_date_bh;
struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
unsigned long total_tail = 0;
struct cpu_key end_key; /* Key to search for the last byte of the
converted item. */
struct item_head ind_ih; /* new indirect item to be inserted or
key of unfm pointer to be pasted */
int blk_size, retval; /* returned value for reiserfs_insert_item and clones */
unp_t unfm_ptr; /* Handle on an unformatted node
that will be inserted in the
tree. */
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_direct2indirect++;
blk_size = sb->s_blocksize;
/* and key to search for append or insert pointer to the new
unformatted node. */
copy_item_head(&ind_ih, p_le_ih);
set_le_ih_k_offset(&ind_ih, tail_offset);
set_le_ih_k_type(&ind_ih, TYPE_INDIRECT);
/* Set the key to search for the place for new unfm pointer */
make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
/* FIXME: we could avoid this */
if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
reiserfs_error(sb, "PAP-14030",
"pasted or inserted byte exists in "
"the tree %K. Use fsck to repair.", &end_key);
pathrelse(path);
return -EIO;
}
p_le_ih = PATH_PITEM_HEAD(path);
unfm_ptr = cpu_to_le32(unbh->b_blocknr);
if (is_statdata_le_ih(p_le_ih)) {
/* Insert new indirect item. */
set_ih_free_space(&ind_ih, 0); /* delete at nearest future */
put_ih_item_len(&ind_ih, UNFM_P_SIZE);
PATH_LAST_POSITION(path)++;
retval =
reiserfs_insert_item(th, path, &end_key, &ind_ih, inode,
(char *)&unfm_ptr);
} else {
/* Paste into last indirect item of an object. */
retval = reiserfs_paste_into_item(th, path, &end_key, inode,
(char *)&unfm_ptr,
UNFM_P_SIZE);
}
if (retval) {
return retval;
}
// note: from here there are two keys which have matching first
// three key components. They only differ by the fourth one.
/* Set the key to search for the direct items of the file */
make_cpu_key(&end_key, inode, max_reiserfs_offset(inode), TYPE_DIRECT,
4);
/* Move bytes from the direct items to the new unformatted node
and delete them. */
while (1) {
int tail_size;
/* end_key.k_offset is set so, that we will always have found
last item of the file */
if (search_for_position_by_key(sb, &end_key, path) ==
POSITION_FOUND)
reiserfs_panic(sb, "PAP-14050",
"direct item (%K) not found", &end_key);
p_le_ih = PATH_PITEM_HEAD(path);
RFALSE(!is_direct_le_ih(p_le_ih),
"vs-14055: direct item expected(%K), found %h",
&end_key, p_le_ih);
tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
+ ih_item_len(p_le_ih) - 1;
/* we only send the unbh pointer if the buffer is not up to date.
** this avoids overwriting good data from writepage() with old data
** from the disk or buffer cache
** Special case: unbh->b_page will be NULL if we are coming through
** DIRECT_IO handler here.
*/
if (!unbh->b_page || buffer_uptodate(unbh)
|| PageUptodate(unbh->b_page)) {
up_to_date_bh = NULL;
} else {
up_to_date_bh = unbh;
}
retval = reiserfs_delete_item(th, path, &end_key, inode,
up_to_date_bh);
total_tail += retval;
if (tail_size == retval)
// done: file does not have direct items anymore
break;
}
/* if we've copied bytes from disk into the page, we need to zero
** out the unused part of the block (it was not up to date before)
*/
if (up_to_date_bh) {
unsigned pgoff =
(tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
char *kaddr = kmap_atomic(up_to_date_bh->b_page);
memset(kaddr + pgoff, 0, blk_size - total_tail);
kunmap_atomic(kaddr);
}
REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
return 0;
}
/* stolen from fs/buffer.c */
void reiserfs_unmap_buffer(struct buffer_head *bh)
{
lock_buffer(bh);
if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
BUG();
}
clear_buffer_dirty(bh);
/* Remove the buffer from whatever list it belongs to. We are mostly
interested in removing it from per-sb j_dirty_buffers list, to avoid
BUG() on attempt to write not mapped buffer */
if ((!list_empty(&bh->b_assoc_buffers) || bh->b_private) && bh->b_page) {
struct inode *inode = bh->b_page->mapping->host;
struct reiserfs_journal *j = SB_JOURNAL(inode->i_sb);
spin_lock(&j->j_dirty_buffers_lock);
list_del_init(&bh->b_assoc_buffers);
reiserfs_free_jh(bh);
spin_unlock(&j->j_dirty_buffers_lock);
}
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
bh->b_bdev = NULL;
unlock_buffer(bh);
}
/* this first locks inode (neither reads nor sync are permitted),
reads tail through page cache, insert direct item. When direct item
inserted successfully inode is left locked. Return value is always
what we expect from it (number of cut bytes). But when tail remains
in the unformatted node, we set mode to SKIP_BALANCING and unlock
inode */
int indirect2direct(struct reiserfs_transaction_handle *th,
struct inode *inode, struct page *page,
struct treepath *path, /* path to the indirect item. */
const struct cpu_key *item_key, /* Key to look for
* unformatted node
* pointer to be cut. */
loff_t n_new_file_size, /* New file size. */
char *mode)
{
struct super_block *sb = inode->i_sb;
struct item_head s_ih;
unsigned long block_size = sb->s_blocksize;
char *tail;
int tail_len, round_tail_len;
loff_t pos, pos1; /* position of first byte of the tail */
struct cpu_key key;
BUG_ON(!th->t_trans_id);
REISERFS_SB(sb)->s_indirect2direct++;
*mode = M_SKIP_BALANCING;
/* store item head path points to. */
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
tail_len = (n_new_file_size & (block_size - 1));
if (get_inode_sd_version(inode) == STAT_DATA_V2)
round_tail_len = ROUND_UP(tail_len);
else
round_tail_len = tail_len;
pos =
le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
pos1 = pos;
// we are protected by i_mutex. The tail can not disapper, not
// append can be done either
// we are in truncate or packing tail in file_release
tail = (char *)kmap(page); /* this can schedule */
if (path_changed(&s_ih, path)) {
/* re-search indirect item */
if (search_for_position_by_key(sb, item_key, path)
== POSITION_NOT_FOUND)
reiserfs_panic(sb, "PAP-5520",
"item to be converted %K does not exist",
item_key);
copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
#ifdef CONFIG_REISERFS_CHECK
pos = le_ih_k_offset(&s_ih) - 1 +
(ih_item_len(&s_ih) / UNFM_P_SIZE -
1) * sb->s_blocksize;
if (pos != pos1)
reiserfs_panic(sb, "vs-5530", "tail position "
"changed while we were reading it");
#endif
}
/* Set direct item header to insert. */
make_le_item_head(&s_ih, NULL, get_inode_item_key_version(inode),
pos1 + 1, TYPE_DIRECT, round_tail_len,
0xffff /*ih_free_space */ );
/* we want a pointer to the first byte of the tail in the page.
** the page was locked and this part of the page was up to date when
** indirect2direct was called, so we know the bytes are still valid
*/
tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
PATH_LAST_POSITION(path)++;
key = *item_key;
set_cpu_key_k_type(&key, TYPE_DIRECT);
key.key_length = 4;
/* Insert tail as new direct item in the tree */
if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
tail ? tail : NULL) < 0) {
/* No disk memory. So we can not convert last unformatted node
to the direct item. In this case we used to adjust
indirect items's ih_free_space. Now ih_free_space is not
used, it would be ideal to write zeros to corresponding
unformatted node. For now i_size is considered as guard for
going out of file size */
kunmap(page);
return block_size - round_tail_len;
}
kunmap(page);
/* make sure to get the i_blocks changes from reiserfs_insert_item */
reiserfs_update_sd(th, inode);
// note: we have now the same as in above direct2indirect
// conversion: there are two keys which have matching first three
// key components. They only differ by the fouhth one.
/* We have inserted new direct item and must remove last
unformatted node. */
*mode = M_CUT;
/* we store position of first direct item in the in-core inode */
/* mark_file_with_tail (inode, pos1 + 1); */
REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;
return block_size - round_tail_len;
}
| gpl-2.0 |
Rover-Yu/ali_kernel | arch/ia64/xen/xencomm.c | 9303 | 2788 | /*
* Copyright (C) 2006 Hollis Blanchard <hollisb@us.ibm.com>, IBM Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/mm.h>
static unsigned long kernel_virtual_offset;
static int is_xencomm_initialized;
/* for xen early printk. It uses console io hypercall which uses xencomm.
* However early printk may use it before xencomm initialization.
*/
int
xencomm_is_initialized(void)
{
return is_xencomm_initialized;
}
void
xencomm_initialize(void)
{
kernel_virtual_offset = KERNEL_START - ia64_tpa(KERNEL_START);
is_xencomm_initialized = 1;
}
/* Translate virtual address to physical address. */
unsigned long
xencomm_vtop(unsigned long vaddr)
{
struct page *page;
struct vm_area_struct *vma;
if (vaddr == 0)
return 0UL;
if (REGION_NUMBER(vaddr) == 5) {
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *ptep;
/* On ia64, TASK_SIZE refers to current. It is not initialized
during boot.
Furthermore the kernel is relocatable and __pa() doesn't
work on addresses. */
if (vaddr >= KERNEL_START
&& vaddr < (KERNEL_START + KERNEL_TR_PAGE_SIZE))
return vaddr - kernel_virtual_offset;
/* In kernel area -- virtually mapped. */
pgd = pgd_offset_k(vaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd))
return ~0UL;
pud = pud_offset(pgd, vaddr);
if (pud_none(*pud) || pud_bad(*pud))
return ~0UL;
pmd = pmd_offset(pud, vaddr);
if (pmd_none(*pmd) || pmd_bad(*pmd))
return ~0UL;
ptep = pte_offset_kernel(pmd, vaddr);
if (!ptep)
return ~0UL;
return (pte_val(*ptep) & _PFN_MASK) | (vaddr & ~PAGE_MASK);
}
if (vaddr > TASK_SIZE) {
/* percpu variables */
if (REGION_NUMBER(vaddr) == 7 &&
REGION_OFFSET(vaddr) >= (1ULL << IA64_MAX_PHYS_BITS))
ia64_tpa(vaddr);
/* kernel address */
return __pa(vaddr);
}
/* XXX double-check (lack of) locking */
vma = find_extend_vma(current->mm, vaddr);
if (!vma)
return ~0UL;
/* We assume the page is modified. */
page = follow_page(vma, vaddr, FOLL_WRITE | FOLL_TOUCH);
if (!page)
return ~0UL;
return (page_to_pfn(page) << PAGE_SHIFT) | (vaddr & ~PAGE_MASK);
}
| gpl-2.0 |
ztc1997/android_kernel_sony_msm8660 | arch/s390/mm/page-states.c | 10071 | 2319 | /*
* Copyright IBM Corp. 2008
*
* Guest page hinting for unused pages.
*
* Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/gfp.h>
#include <linux/init.h>
#define ESSA_SET_STABLE 1
#define ESSA_SET_UNUSED 2
static int cmma_flag = 1;
static int __init cmma(char *str)
{
char *parm;
parm = strstrip(str);
if (strcmp(parm, "yes") == 0 || strcmp(parm, "on") == 0) {
cmma_flag = 1;
return 1;
}
cmma_flag = 0;
if (strcmp(parm, "no") == 0 || strcmp(parm, "off") == 0)
return 1;
return 0;
}
__setup("cmma=", cmma);
void __init cmma_init(void)
{
register unsigned long tmp asm("0") = 0;
register int rc asm("1") = -EOPNOTSUPP;
if (!cmma_flag)
return;
asm volatile(
" .insn rrf,0xb9ab0000,%1,%1,0,0\n"
"0: la %0,0\n"
"1:\n"
EX_TABLE(0b,1b)
: "+&d" (rc), "+&d" (tmp));
if (rc)
cmma_flag = 0;
}
static inline void set_page_unstable(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_UNUSED));
}
void arch_free_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_unstable(page, order);
}
static inline void set_page_stable(struct page *page, int order)
{
int i, rc;
for (i = 0; i < (1 << order); i++)
asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0"
: "=&d" (rc)
: "a" (page_to_phys(page + i)),
"i" (ESSA_SET_STABLE));
}
void arch_alloc_page(struct page *page, int order)
{
if (!cmma_flag)
return;
set_page_stable(page, order);
}
void arch_set_page_states(int make_stable)
{
unsigned long flags, order, t;
struct list_head *l;
struct page *page;
struct zone *zone;
if (!cmma_flag)
return;
if (make_stable)
drain_local_pages(NULL);
for_each_populated_zone(zone) {
spin_lock_irqsave(&zone->lock, flags);
for_each_migratetype_order(order, t) {
list_for_each(l, &zone->free_area[order].free_list[t]) {
page = list_entry(l, struct page, lru);
if (make_stable)
set_page_stable(page, order);
else
set_page_unstable(page, order);
}
}
spin_unlock_irqrestore(&zone->lock, flags);
}
}
| gpl-2.0 |
turtlekiosk/coms4118 | drivers/video/i810/i810_dvt.c | 14679 | 11794 | /*-*- linux-c -*-
* linux/drivers/video/i810_dvt.c -- Intel 810 Discrete Video Timings (Intel)
*
* Copyright (C) 2001 Antonino Daplas<adaplas@pol.net>
* All Rights Reserved
*
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/kernel.h>
#include "i810_regs.h"
#include "i810.h"
struct mode_registers std_modes[] = {
/* 640x480 @ 60Hz */
{ 25000, 0x0013, 0x0003, 0x40, 0x5F, 0x4F, 0x50, 0x82, 0x51, 0x9D,
0x0B, 0x10, 0x40, 0xE9, 0x0B, 0xDF, 0x50, 0xE7, 0x04, 0x02,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22006000,
0x22002000, 0x22004000, 0x22006000, 0xC0 },
/* 640x480 @ 70Hz */
{ 28000, 0x0053, 0x0010, 0x40, 0x61, 0x4F, 0x4F, 0x85, 0x52, 0x9A,
0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22002000, 0x22004000, 0x22005000,
0x22002000, 0x22004000, 0x22005000, 0xC0 },
/* 640x480 @ 72Hz */
{ 31000, 0x0013, 0x0002, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x52, 0x97,
0x06, 0x0F, 0x40, 0xE8, 0x0B, 0xDF, 0x50, 0xDF, 0x07, 0x02,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000,
0x22003000, 0x22005000, 0x22007000, 0xC0 },
/* 640x480 @ 75Hz */
{ 31000, 0x0013, 0x0002, 0x40, 0x64, 0x4F, 0x4F, 0x88, 0x51, 0x99,
0xF2, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xF3, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22007000,
0x22003000, 0x22005000, 0x22007000, 0xC0 },
/* 640x480 @ 85Hz */
{ 36000, 0x0010, 0x0001, 0x40, 0x63, 0x4F, 0x4F, 0x87, 0x56, 0x9D,
0xFB, 0x10, 0x40, 0xE0, 0x03, 0xDF, 0x50, 0xDF, 0xFC, 0x01,
0x01, 0x01, 0x01, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000,
0x22003000, 0x22005000, 0x22107000, 0xC0 },
/* 800x600 @ 56Hz */
{ 36000, 0x0010, 0x0001, 0x40, 0x7B, 0x63, 0x63, 0x9F, 0x66, 0x8F,
0x6F, 0x10, 0x40, 0x58, 0x0A, 0x57, 0xC8, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x01, 0x22003000, 0x22005000, 0x22107000,
0x22003000, 0x22005000, 0x22107000, 0x00 },
/* 800x600 @ 60Hz */
{ 40000, 0x0008, 0x0001, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x68, 0x18,
0x72, 0x10, 0x40, 0x58, 0x0C, 0x57, 0xC8, 0x57, 0x73, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22003000, 0x22006000, 0x22108000,
0x22003000, 0x22006000, 0x22108000, 0x00 },
/* 800x600 @ 70Hz */
{ 45000, 0x0054, 0x0015, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x68, 0x12,
0x6f, 0x10, 0x40, 0x58, 0x0b, 0x57, 0x64, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000,
0x22004000, 0x22007000, 0x2210A000, 0x00 },
/* 800x600 @ 72Hz */
{ 50000, 0x0017, 0x0004, 0x30, 0x7D, 0x63, 0x63, 0x81, 0x6A, 0x19,
0x98, 0x10, 0x40, 0x7C, 0x02, 0x57, 0xC8, 0x57, 0x99, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210A000,
0x22004000, 0x22007000, 0x2210A000, 0x00 },
/* 800x600 @ 75Hz */
{ 49000, 0x001F, 0x0006, 0x30, 0x7F, 0x63, 0x63, 0x83, 0x65, 0x0F,
0x6F, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x70, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22007000, 0x2210B000,
0x22004000, 0x22007000, 0x2210B000, 0x00 },
/* 800x600 @ 85Hz */
{ 56000, 0x0049, 0x000E, 0x30, 0x7E, 0x63, 0x63, 0x82, 0x67, 0x0F,
0x75, 0x10, 0x40, 0x58, 0x0B, 0x57, 0xC8, 0x57, 0x76, 0x02,
0x02, 0x02, 0x02, 0x00, 0x00, 0x22004000, 0x22108000, 0x2210b000,
0x22004000, 0x22108000, 0x2210b000, 0x00 },
/* 1024x768 @ 60Hz */
{ 65000, 0x003F, 0x000A, 0x30, 0xA3, 0x7F, 0x7F, 0x87, 0x83, 0x94,
0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x22109000, 0x2220D000,
0x22005000, 0x22109000, 0x2220D000, 0xC0 },
/* 1024x768 @ 70Hz */
{ 75000, 0x0017, 0x0002, 0x30, 0xA1, 0x7F, 0x7F, 0x85, 0x82, 0x93,
0x24, 0x10, 0x40, 0x02, 0x08, 0xFF, 0x80, 0xFF, 0x25, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22005000, 0x2210A000, 0x2220F000,
0x22005000, 0x2210A000, 0x2220F000, 0xC0 },
/* 1024x768 @ 75Hz */
{ 78000, 0x0050, 0x0017, 0x20, 0x9F, 0x7F, 0x7F, 0x83, 0x81, 0x8D,
0x1E, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x1F, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22006000, 0x2210B000, 0x22210000,
0x22006000, 0x2210B000, 0x22210000, 0x00 },
/* 1024x768 @ 85Hz */
{ 94000, 0x003D, 0x000E, 0x20, 0xA7, 0x7F, 0x7F, 0x8B, 0x85, 0x91,
0x26, 0x10, 0x40, 0x00, 0x03, 0xFF, 0x80, 0xFF, 0x27, 0x03,
0x02, 0x03, 0x02, 0x00, 0x00, 0x22007000, 0x2220E000, 0x22212000,
0x22007000, 0x2220E000, 0x22212000, 0x00 },
/* 1152x864 @ 60Hz */
{ 80000, 0x0008, 0x0001, 0x20, 0xB3, 0x8F, 0x8F, 0x97, 0x93, 0x9f,
0x87, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5f, 0x88, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x2220C000, 0x22210000, 0x22415000,
0x2220C000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 70Hz */
{ 96000, 0x000a, 0x0001, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87,
0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 72Hz */
{ 99000, 0x001f, 0x0006, 0x20, 0xbb, 0x8F, 0x8F, 0x9f, 0x98, 0x87,
0x83, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x84, 0x03,
0x03, 0x03, 0x03, 0x00, 0x00, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 75Hz */
{ 108000, 0x0010, 0x0002, 0x20, 0xC3, 0x8F, 0x8F, 0x87, 0x97, 0x07,
0x82, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x83, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x22107000, 0x22210000, 0x22415000,
0x22107000, 0x22210000, 0x22415000, 0x00 },
/* 1152x864 @ 85Hz */
{ 121000, 0x006D, 0x0014, 0x20, 0xc0, 0x8F, 0x8F, 0x84, 0x97, 0x07,
0x93, 0x10, 0x40, 0x60, 0x03, 0x5F, 0x90, 0x5F, 0x94, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2220C000, 0x22210000, 0x22415000,
0x2220C000, 0x22210000, 0x22415000, 0x0 },
/* 1280x960 @ 60Hz */
{ 108000, 0x0010, 0x0002, 0x20, 0xDC, 0x9F, 0x9F, 0x80, 0xAB, 0x99,
0xE6, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE7, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x22415000,
0x2210A000, 0x22210000, 0x22415000, 0x00 },
/* 1280x960 @ 75Hz */
{ 129000, 0x0029, 0x0006, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xaa, 0x1b,
0xE8, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xE9, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22210000, 0x2241B000,
0x2210A000, 0x22210000, 0x2241B000, 0x00 },
/* 1280x960 @ 85Hz */
{ 148000, 0x0042, 0x0009, 0x20, 0xD3, 0x9F, 0x9F, 0x97, 0xA7, 0x1B,
0xF1, 0x10, 0x40, 0xC0, 0x03, 0xBF, 0xA0, 0xBF, 0xF2, 0x03,
0x03, 0x03, 0x03, 0x00, 0x01, 0x2210A000, 0x22220000, 0x2241D000,
0x2210A000, 0x22220000, 0x2241D000, 0x00 },
/* 1600x1200 @ 60Hz */
{ 162000, 0x0019, 0x0006, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2210b000, 0x22416000, 0x44419000,
0x2210b000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 65 Hz */
{ 175000, 0x005d, 0x0018, 0x10, 0x09, 0xC7, 0xC7, 0x8D, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xB0, 0x03, 0xAF, 0xC8, 0xAF, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2210c000, 0x22416000, 0x44419000,
0x2210c000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 70 Hz */
{ 189000, 0x003D, 0x000e, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04,
0x04, 0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 72 Hz */
{ 195000, 0x003f, 0x000e, 0x10, 0x0b, 0xC7, 0xC7, 0x8f, 0xd5, 0x0b,
0xE1, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xe2, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 75 Hz */
{ 202000, 0x0024, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x2220e000, 0x22416000, 0x44419000,
0x2220e000, 0x22416000, 0x44419000, 0x00 },
/* 1600x1200 @ 85 Hz */
{ 229000, 0x0029, 0x0007, 0x10, 0x09, 0xC7, 0xC7, 0x8d, 0xcf, 0x07,
0xE0, 0x10, 0x40, 0xb0, 0x03, 0xAF, 0xC8, 0xaf, 0xE1, 0x04, 0x04,
0x04, 0x04, 0x01, 0x00, 0x22210000, 0x22416000, 0x0,
0x22210000, 0x22416000, 0x0, 0x00 },
};
void round_off_xres(u32 *xres)
{
if (*xres <= 640)
*xres = 640;
else if (*xres <= 800)
*xres = 800;
else if (*xres <= 1024)
*xres = 1024;
else if (*xres <= 1152)
*xres = 1152;
else if (*xres <= 1280)
*xres = 1280;
else
*xres = 1600;
}
inline void round_off_yres(u32 *xres, u32 *yres)
{
*yres = (*xres * 3) >> 2;
}
static int i810fb_find_best_mode(u32 xres, u32 yres, u32 pixclock)
{
u32 diff = 0, diff_best = 0xFFFFFFFF, i = 0, i_best = 0;
u8 hfl = (u8) ((xres >> 3) - 1);
for (i = 0; i < ARRAY_SIZE(std_modes); i++) {
if (std_modes[i].cr01 == hfl) {
if (std_modes[i].pixclock <= pixclock)
diff = pixclock - std_modes[i].pixclock;
if (diff < diff_best) {
i_best = i;
diff_best = diff;
}
}
}
return i_best;
}
void i810fb_encode_registers(const struct fb_var_screeninfo *var,
struct i810fb_par *par, u32 xres, u32 yres)
{
u32 i_best = i810fb_find_best_mode(xres, yres, par->regs.pixclock);
par->regs = std_modes[i_best];
/* overlay */
par->ovract = ((xres + var->right_margin + var->hsync_len +
var->left_margin - 32) | ((xres - 32) << 16));
}
void i810fb_fill_var_timings(struct fb_var_screeninfo *var)
{
u32 total, xres, yres;
u32 mode, pixclock;
xres = var->xres;
yres = var->yres;
pixclock = 1000000000 / var->pixclock;
mode = i810fb_find_best_mode(xres, yres, pixclock);
total = (std_modes[mode].cr00 | (std_modes[mode].cr35 & 1) << 8) + 3;
total <<= 3;
var->pixclock = 1000000000 / std_modes[mode].pixclock;
var->right_margin = (std_modes[mode].cr04 << 3) - xres;
var->hsync_len = ((std_modes[mode].cr05 & 0x1F) -
(std_modes[mode].cr04 & 0x1F)) << 3;
var->left_margin = (total - (xres + var->right_margin +
var->hsync_len));
var->sync = FB_SYNC_ON_GREEN;
if (~(std_modes[mode].msr & (1 << 6)))
var->sync |= FB_SYNC_HOR_HIGH_ACT;
if (~(std_modes[mode].msr & (1 << 7)))
var->sync |= FB_SYNC_VERT_HIGH_ACT;
total = (std_modes[mode].cr06 | (std_modes[mode].cr30 & 0xF) << 8) + 2;
var->lower_margin = (std_modes[mode].cr10 |
(std_modes[mode].cr32 & 0x0F) << 8) - yres;
var->vsync_len = (std_modes[mode].cr11 & 0x0F) -
(var->lower_margin & 0x0F);
var->upper_margin = total - (yres + var->lower_margin + var->vsync_len);
}
u32 i810_get_watermark(struct fb_var_screeninfo *var,
struct i810fb_par *par)
{
struct mode_registers *params = &par->regs;
u32 wmark = 0;
if (par->mem_freq == 100) {
switch (var->bits_per_pixel) {
case 8:
wmark = params->bpp8_100;
break;
case 16:
wmark = params->bpp16_100;
break;
case 24:
case 32:
wmark = params->bpp24_100;
}
} else {
switch (var->bits_per_pixel) {
case 8:
wmark = params->bpp8_133;
break;
case 16:
wmark = params->bpp16_133;
break;
case 24:
case 32:
wmark = params->bpp24_133;
}
}
return wmark;
}
| gpl-2.0 |
ntddk/pemu | plugins/glibc-2.13-new/soft-fp/extenddftf2.c | 88 | 1920 | /* Software floating-point emulation.
Return a converted to IEEE quad
Copyright (C) 1997,1999,2006 Free Software Foundation, Inc.
This file is part of the GNU C Library.
Contributed by Richard Henderson (rth@cygnus.com) and
Jakub Jelinek (jj@ultra.linux.cz).
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
In addition to the permissions in the GNU Lesser General Public
License, the Free Software Foundation gives you unlimited
permission to link the compiled version of this file into
combinations with other programs, and to distribute those
combinations without any restriction coming from the use of this
file. (The Lesser General Public License restrictions do apply in
other respects; for example, they cover modification of the file,
and distribution when not linked into a combine executable.)
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, write to the Free
Software Foundation, 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
#include "soft-fp.h"
#include "double.h"
#include "quad.h"
TFtype __extenddftf2(DFtype a)
{
FP_DECL_EX;
FP_DECL_D(A);
FP_DECL_Q(R);
TFtype r;
FP_INIT_ROUNDMODE;
FP_UNPACK_RAW_D(A, a);
#if (2 * _FP_W_TYPE_SIZE) < _FP_FRACBITS_Q
FP_EXTEND(Q,D,4,2,R,A);
#else
FP_EXTEND(Q,D,2,1,R,A);
#endif
FP_PACK_RAW_Q(r, R);
FP_HANDLE_EXCEPTIONS;
return r;
}
| gpl-2.0 |
faux123/Note_4_SM-N910T | fs/jbd2/commit.c | 344 | 35550 | /*
* linux/fs/jbd2/commit.c
*
* Written by Stephen C. Tweedie <sct@redhat.com>, 1998
*
* Copyright 1998 Red Hat corp --- All Rights Reserved
*
* This file is part of the Linux kernel and is made available under
* the terms of the GNU General Public License, version 2, or at your
* option, any later version, incorporated herein by reference.
*
* Journal commit routines for the generic filesystem journaling code;
* part of the ext2fs journaling system.
*/
#include <linux/time.h>
#include <linux/fs.h>
#include <linux/jbd2.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/jiffies.h>
#include <linux/crc32.h>
#include <linux/writeback.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/bitops.h>
#include <trace/events/jbd2.h>
/*
* IO end handler for temporary buffer_heads handling writes to the journal.
*/
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{
struct buffer_head *orig_bh = bh->b_private;
BUFFER_TRACE(bh, "");
if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
if (orig_bh) {
clear_bit_unlock(BH_Shadow, &orig_bh->b_state);
smp_mb__after_clear_bit();
wake_up_bit(&orig_bh->b_state, BH_Shadow);
}
unlock_buffer(bh);
}
/*
* When an ext4 file is truncated, it is possible that some pages are not
* successfully freed, because they are attached to a committing transaction.
* After the transaction commits, these pages are left on the LRU, with no
* ->mapping, and with attached buffers. These pages are trivially reclaimable
* by the VM, but their apparent absence upsets the VM accounting, and it makes
* the numbers in /proc/meminfo look odd.
*
* So here, we have a buffer which has just come off the forget list. Look to
* see if we can strip all buffers from the backing page.
*
* Called under lock_journal(), and possibly under journal_datalist_lock. The
* caller provided us with a ref against the buffer, and we drop that here.
*/
static void release_buffer_page(struct buffer_head *bh)
{
struct page *page;
if (buffer_dirty(bh))
goto nope;
if (atomic_read(&bh->b_count) != 1)
goto nope;
page = bh->b_page;
if (!page)
goto nope;
if (page->mapping)
goto nope;
/* OK, it's a truncated page */
if (!trylock_page(page))
goto nope;
page_cache_get(page);
__brelse(bh);
try_to_free_buffers(page);
unlock_page(page);
page_cache_release(page);
return;
nope:
__brelse(bh);
}
static void jbd2_commit_block_csum_set(journal_t *j, struct buffer_head *bh)
{
struct commit_header *h;
__u32 csum;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
h = (struct commit_header *)(bh->b_data);
h->h_chksum_type = 0;
h->h_chksum_size = 0;
h->h_chksum[0] = 0;
csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
h->h_chksum[0] = cpu_to_be32(csum);
}
/*
* Done it all: now submit the commit record. We should have
* cleaned up our previous buffers by now, so if we are in abort
* mode we can now just skip the rest of the journal write
* entirely.
*
* Returns 1 if the journal needs to be aborted or 0 on success
*/
static int journal_submit_commit_record(journal_t *journal,
transaction_t *commit_transaction,
struct buffer_head **cbh,
__u32 crc32_sum)
{
struct commit_header *tmp;
struct buffer_head *bh;
int ret;
struct timespec now = current_kernel_time();
*cbh = NULL;
if (is_journal_aborted(journal))
return 0;
bh = jbd2_journal_get_descriptor_buffer(journal);
if (!bh)
return 1;
tmp = (struct commit_header *)bh->b_data;
tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
if (JBD2_HAS_COMPAT_FEATURE(journal,
JBD2_FEATURE_COMPAT_CHECKSUM)) {
tmp->h_chksum_type = JBD2_CRC32_CHKSUM;
tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
}
jbd2_commit_block_csum_set(journal, bh);
BUFFER_TRACE(bh, "submit commit block");
lock_buffer(bh);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
if (journal->j_flags & JBD2_BARRIER &&
!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT))
ret = submit_bh(WRITE_SYNC | WRITE_FLUSH_FUA, bh);
else
ret = submit_bh(WRITE_SYNC, bh);
*cbh = bh;
return ret;
}
/*
* This function along with journal_submit_commit_record
* allows to write the commit record asynchronously.
*/
static int journal_wait_on_commit_record(journal_t *journal,
struct buffer_head *bh)
{
int ret = 0;
clear_buffer_dirty(bh);
wait_on_buffer(bh);
if (unlikely(!buffer_uptodate(bh)))
ret = -EIO;
put_bh(bh); /* One for getblk() */
return ret;
}
/*
* write the filemap data using writepage() address_space_operations.
* We don't do block allocation here even for delalloc. We don't
* use writepages() because with dealyed allocation we may be doing
* block allocation in writepages().
*/
static int journal_submit_inode_data_buffers(struct address_space *mapping)
{
int ret;
struct writeback_control wbc = {
.sync_mode = WB_SYNC_ALL,
.nr_to_write = mapping->nrpages * 2,
.range_start = 0,
.range_end = i_size_read(mapping->host),
};
ret = generic_writepages(mapping, &wbc);
return ret;
}
/*
* Submit all the data buffers of inode associated with the transaction to
* disk.
*
* We are in a committing transaction. Therefore no new inode can be added to
* our inode list. We use JI_COMMIT_RUNNING flag to protect inode we currently
* operate on from being released while we write out pages.
*/
static int journal_submit_data_buffers(journal_t *journal,
transaction_t *commit_transaction)
{
struct jbd2_inode *jinode;
int err, ret = 0;
struct address_space *mapping;
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
mapping = jinode->i_vfs_inode->i_mapping;
set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
spin_unlock(&journal->j_list_lock);
/*
* submit the inode data buffers. We use writepage
* instead of writepages. Because writepages can do
* block allocation with delalloc. We need to write
* only allocated blocks here.
*/
trace_jbd2_submit_inode_data(jinode->i_vfs_inode);
err = journal_submit_inode_data_buffers(mapping);
if (!ret)
ret = err;
spin_lock(&journal->j_list_lock);
J_ASSERT(jinode->i_transaction == commit_transaction);
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
smp_mb__after_clear_bit();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
spin_unlock(&journal->j_list_lock);
return ret;
}
/*
* Wait for data submitted for writeout, refile inodes to proper
* transaction if needed.
*
*/
static int journal_finish_inode_data_buffers(journal_t *journal,
transaction_t *commit_transaction)
{
struct jbd2_inode *jinode, *next_i;
int err, ret = 0;
/* For locking, see the comment in journal_submit_data_buffers() */
spin_lock(&journal->j_list_lock);
list_for_each_entry(jinode, &commit_transaction->t_inode_list, i_list) {
set_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
spin_unlock(&journal->j_list_lock);
err = filemap_fdatawait(jinode->i_vfs_inode->i_mapping);
if (err) {
/*
* Because AS_EIO is cleared by
* filemap_fdatawait_range(), set it again so
* that user process can get -EIO from fsync().
*/
set_bit(AS_EIO,
&jinode->i_vfs_inode->i_mapping->flags);
if (!ret)
ret = err;
}
spin_lock(&journal->j_list_lock);
clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags);
smp_mb__after_clear_bit();
wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING);
}
/* Now refile inode to proper lists */
list_for_each_entry_safe(jinode, next_i,
&commit_transaction->t_inode_list, i_list) {
list_del(&jinode->i_list);
if (jinode->i_next_transaction) {
jinode->i_transaction = jinode->i_next_transaction;
jinode->i_next_transaction = NULL;
list_add(&jinode->i_list,
&jinode->i_transaction->t_inode_list);
} else {
jinode->i_transaction = NULL;
}
}
spin_unlock(&journal->j_list_lock);
return ret;
}
static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
{
struct page *page = bh->b_page;
char *addr;
__u32 checksum;
addr = kmap_atomic(page);
checksum = crc32_be(crc32_sum,
(void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
kunmap_atomic(addr);
return checksum;
}
static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
unsigned long long block)
{
tag->t_blocknr = cpu_to_be32(block & (u32)~0);
if (tag_bytes > JBD2_TAG_SIZE32)
tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
}
static void jbd2_descr_block_csum_set(journal_t *j,
struct buffer_head *bh)
{
struct jbd2_journal_block_tail *tail;
__u32 csum;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
tail = (struct jbd2_journal_block_tail *)(bh->b_data + j->j_blocksize -
sizeof(struct jbd2_journal_block_tail));
tail->t_checksum = 0;
csum = jbd2_chksum(j, j->j_csum_seed, bh->b_data, j->j_blocksize);
tail->t_checksum = cpu_to_be32(csum);
}
static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
struct buffer_head *bh, __u32 sequence)
{
struct page *page = bh->b_page;
__u8 *addr;
__u32 csum;
if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
return;
sequence = cpu_to_be32(sequence);
addr = kmap_atomic(page);
csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
sizeof(sequence));
csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
bh->b_size);
kunmap_atomic(addr);
tag->t_checksum = cpu_to_be32(csum);
}
/*
* jbd2_journal_commit_transaction
*
* The primary function for committing a transaction to the log. This
* function is called by the journal thread to begin a complete commit.
*/
void jbd2_journal_commit_transaction(journal_t *journal)
{
struct transaction_stats_s stats;
transaction_t *commit_transaction;
struct journal_head *jh;
struct buffer_head *descriptor;
struct buffer_head **wbuf = journal->j_wbuf;
int bufs;
int flags;
int err;
unsigned long long blocknr;
ktime_t start_time;
u64 commit_time;
char *tagp = NULL;
journal_header_t *header;
journal_block_tag_t *tag = NULL;
int space_left = 0;
int first_tag = 0;
int tag_flag;
int i;
int tag_bytes = journal_tag_bytes(journal);
struct buffer_head *cbh = NULL; /* For transactional checksums */
__u32 crc32_sum = ~0;
struct blk_plug plug;
/* Tail of the journal */
unsigned long first_block;
tid_t first_tid;
int update_tail;
int csum_size = 0;
LIST_HEAD(io_bufs);
LIST_HEAD(log_bufs);
if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
csum_size = sizeof(struct jbd2_journal_block_tail);
/*
* First job: lock down the current transaction and wait for
* all outstanding updates to complete.
*/
/* Do we need to erase the effects of a prior jbd2_journal_flush? */
if (journal->j_flags & JBD2_FLUSHED) {
jbd_debug(3, "super block updated\n");
mutex_lock(&journal->j_checkpoint_mutex);
/*
* We hold j_checkpoint_mutex so tail cannot change under us.
* We don't need any special data guarantees for writing sb
* since journal is empty and it is ok for write to be
* flushed only with transaction commit.
*/
jbd2_journal_update_sb_log_tail(journal,
journal->j_tail_sequence,
journal->j_tail,
WRITE_SYNC);
mutex_unlock(&journal->j_checkpoint_mutex);
} else {
jbd_debug(3, "superblock not updated\n");
}
J_ASSERT(journal->j_running_transaction != NULL);
J_ASSERT(journal->j_committing_transaction == NULL);
commit_transaction = journal->j_running_transaction;
J_ASSERT(commit_transaction->t_state == T_RUNNING);
trace_jbd2_start_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: starting commit of transaction %d\n",
commit_transaction->t_tid);
write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_LOCKED;
trace_jbd2_commit_locking(journal, commit_transaction);
stats.run.rs_wait = commit_transaction->t_max_wait;
stats.run.rs_request_delay = 0;
stats.run.rs_locked = jiffies;
if (commit_transaction->t_requested)
stats.run.rs_request_delay =
jbd2_time_diff(commit_transaction->t_requested,
stats.run.rs_locked);
stats.run.rs_running = jbd2_time_diff(commit_transaction->t_start,
stats.run.rs_locked);
spin_lock(&commit_transaction->t_handle_lock);
while (atomic_read(&commit_transaction->t_updates)) {
DEFINE_WAIT(wait);
prepare_to_wait(&journal->j_wait_updates, &wait,
TASK_UNINTERRUPTIBLE);
if (atomic_read(&commit_transaction->t_updates)) {
spin_unlock(&commit_transaction->t_handle_lock);
write_unlock(&journal->j_state_lock);
schedule();
write_lock(&journal->j_state_lock);
spin_lock(&commit_transaction->t_handle_lock);
}
finish_wait(&journal->j_wait_updates, &wait);
}
spin_unlock(&commit_transaction->t_handle_lock);
J_ASSERT (atomic_read(&commit_transaction->t_outstanding_credits) <=
journal->j_max_transaction_buffers);
/*
* First thing we are allowed to do is to discard any remaining
* BJ_Reserved buffers. Note, it is _not_ permissible to assume
* that there are no such buffers: if a large filesystem
* operation like a truncate needs to split itself over multiple
* transactions, then it may try to do a jbd2_journal_restart() while
* there are still BJ_Reserved buffers outstanding. These must
* be released cleanly from the current transaction.
*
* In this case, the filesystem must still reserve write access
* again before modifying the buffer in the new transaction, but
* we do not require it to remember exactly which old buffers it
* has reserved. This is consistent with the existing behaviour
* that multiple jbd2_journal_get_write_access() calls to the same
* buffer are perfectly permissible.
*/
while (commit_transaction->t_reserved_list) {
jh = commit_transaction->t_reserved_list;
JBUFFER_TRACE(jh, "reserved, unused: refile");
/*
* A jbd2_journal_get_undo_access()+jbd2_journal_release_buffer() may
* leave undo-committed data.
*/
if (jh->b_committed_data) {
struct buffer_head *bh = jh2bh(jh);
jbd_lock_bh_state(bh);
jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
jbd_unlock_bh_state(bh);
}
jbd2_journal_refile_buffer(journal, jh);
}
/*
* Now try to drop any written-back buffers from the journal's
* checkpoint lists. We do this *before* commit because it potentially
* frees some memory
*/
spin_lock(&journal->j_list_lock);
__jbd2_journal_clean_checkpoint_list(journal);
spin_unlock(&journal->j_list_lock);
jbd_debug(3, "JBD2: commit phase 1\n");
/*
* Clear revoked flag to reflect there is no revoked buffers
* in the next transaction which is going to be started.
*/
jbd2_clear_buffer_revoked_flags(journal);
/*
* Switch to a new revoke table.
*/
jbd2_journal_switch_revoke_table(journal);
trace_jbd2_commit_flushing(journal, commit_transaction);
stats.run.rs_flushing = jiffies;
stats.run.rs_locked = jbd2_time_diff(stats.run.rs_locked,
stats.run.rs_flushing);
commit_transaction->t_state = T_FLUSH;
journal->j_committing_transaction = commit_transaction;
journal->j_running_transaction = NULL;
start_time = ktime_get();
commit_transaction->t_log_start = journal->j_head;
wake_up(&journal->j_wait_transaction_locked);
write_unlock(&journal->j_state_lock);
jbd_debug(3, "JBD2: commit phase 2\n");
/*
* Now start flushing things to disk, in the order they appear
* on the transaction lists. Data blocks go first.
*/
err = journal_submit_data_buffers(journal, commit_transaction);
if (err)
jbd2_journal_abort(journal, err);
blk_start_plug(&plug);
jbd2_journal_write_revoke_records(journal, commit_transaction,
&log_bufs, WRITE_SYNC);
blk_finish_plug(&plug);
jbd_debug(3, "JBD2: commit phase 2\n");
/*
* Way to go: we have now written out all of the data for a
* transaction! Now comes the tricky part: we need to write out
* metadata. Loop over the transaction's entire buffer list:
*/
write_lock(&journal->j_state_lock);
commit_transaction->t_state = T_COMMIT;
write_unlock(&journal->j_state_lock);
trace_jbd2_commit_logging(journal, commit_transaction);
stats.run.rs_logging = jiffies;
stats.run.rs_flushing = jbd2_time_diff(stats.run.rs_flushing,
stats.run.rs_logging);
stats.run.rs_blocks =
atomic_read(&commit_transaction->t_outstanding_credits);
stats.run.rs_blocks_logged = 0;
J_ASSERT(commit_transaction->t_nr_buffers <=
atomic_read(&commit_transaction->t_outstanding_credits));
err = 0;
bufs = 0;
descriptor = NULL;
blk_start_plug(&plug);
while (commit_transaction->t_buffers) {
/* Find the next buffer to be journaled... */
jh = commit_transaction->t_buffers;
/* If we're in abort mode, we just un-journal the buffer and
release it. */
if (is_journal_aborted(journal)) {
clear_buffer_jbddirty(jh2bh(jh));
JBUFFER_TRACE(jh, "journal is aborting: refile");
jbd2_buffer_abort_trigger(jh,
jh->b_frozen_data ?
jh->b_frozen_triggers :
jh->b_triggers);
jbd2_journal_refile_buffer(journal, jh);
/* If that was the last one, we need to clean up
* any descriptor buffers which may have been
* already allocated, even if we are now
* aborting. */
if (!commit_transaction->t_buffers)
goto start_journal_io;
continue;
}
/* Make sure we have a descriptor block in which to
record the metadata buffer. */
if (!descriptor) {
J_ASSERT (bufs == 0);
jbd_debug(4, "JBD2: get descriptor\n");
descriptor = jbd2_journal_get_descriptor_buffer(journal);
if (!descriptor) {
jbd2_journal_abort(journal, -EIO);
continue;
}
jbd_debug(4, "JBD2: got buffer %llu (%p)\n",
(unsigned long long)descriptor->b_blocknr,
descriptor->b_data);
header = (journal_header_t *)descriptor->b_data;
header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
header->h_blocktype = cpu_to_be32(JBD2_DESCRIPTOR_BLOCK);
header->h_sequence = cpu_to_be32(commit_transaction->t_tid);
tagp = &descriptor->b_data[sizeof(journal_header_t)];
space_left = descriptor->b_size -
sizeof(journal_header_t);
first_tag = 1;
set_buffer_jwrite(descriptor);
set_buffer_dirty(descriptor);
wbuf[bufs++] = descriptor;
/* Record it so that we can wait for IO
completion later */
BUFFER_TRACE(descriptor, "ph3: file as descriptor");
jbd2_file_log_bh(&log_bufs, descriptor);
}
/* Where is the buffer to be written? */
err = jbd2_journal_next_log_block(journal, &blocknr);
/* If the block mapping failed, just abandon the buffer
and repeat this loop: we'll fall into the
refile-on-abort condition above. */
if (err) {
jbd2_journal_abort(journal, err);
continue;
}
/*
* start_this_handle() uses t_outstanding_credits to determine
* the free space in the log, but this counter is changed
* by jbd2_journal_next_log_block() also.
*/
atomic_dec(&commit_transaction->t_outstanding_credits);
/* Bump b_count to prevent truncate from stumbling over
the shadowed buffer! @@@ This can go if we ever get
rid of the shadow pairing of buffers. */
atomic_inc(&jh2bh(jh)->b_count);
/*
* Make a temporary IO buffer with which to write it out
* (this will requeue the metadata buffer to BJ_Shadow).
*/
set_bit(BH_JWrite, &jh2bh(jh)->b_state);
JBUFFER_TRACE(jh, "ph3: write metadata");
flags = jbd2_journal_write_metadata_buffer(commit_transaction,
jh, &wbuf[bufs], blocknr);
if (flags < 0) {
jbd2_journal_abort(journal, flags);
continue;
}
jbd2_file_log_bh(&io_bufs, wbuf[bufs]);
/* Record the new block's tag in the current descriptor
buffer */
tag_flag = 0;
if (flags & 1)
tag_flag |= JBD2_FLAG_ESCAPE;
if (!first_tag)
tag_flag |= JBD2_FLAG_SAME_UUID;
tag = (journal_block_tag_t *) tagp;
write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
tag->t_flags = cpu_to_be16(tag_flag);
jbd2_block_tag_csum_set(journal, tag, wbuf[bufs],
commit_transaction->t_tid);
tagp += tag_bytes;
space_left -= tag_bytes;
bufs++;
if (first_tag) {
memcpy (tagp, journal->j_uuid, 16);
tagp += 16;
space_left -= 16;
first_tag = 0;
}
/* If there's no more to do, or if the descriptor is full,
let the IO rip! */
if (bufs == journal->j_wbufsize ||
commit_transaction->t_buffers == NULL ||
space_left < tag_bytes + 16 + csum_size) {
jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
/* Write an end-of-descriptor marker before
submitting the IOs. "tag" still points to
the last tag we set up. */
tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
jbd2_descr_block_csum_set(journal, descriptor);
start_journal_io:
for (i = 0; i < bufs; i++) {
struct buffer_head *bh = wbuf[i];
/*
* Compute checksum.
*/
if (JBD2_HAS_COMPAT_FEATURE(journal,
JBD2_FEATURE_COMPAT_CHECKSUM)) {
crc32_sum =
jbd2_checksum_data(crc32_sum, bh);
}
lock_buffer(bh);
clear_buffer_dirty(bh);
set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync;
submit_bh(WRITE_SYNC, bh);
}
cond_resched();
stats.run.rs_blocks_logged += bufs;
/* Force a new descriptor to be generated next
time round the loop. */
descriptor = NULL;
bufs = 0;
}
}
err = journal_finish_inode_data_buffers(journal, commit_transaction);
if (err) {
printk(KERN_WARNING
"JBD2: Detected IO errors while flushing file data "
"on %s\n", journal->j_devname);
if (journal->j_flags & JBD2_ABORT_ON_SYNCDATA_ERR)
jbd2_journal_abort(journal, err);
err = 0;
}
/*
* Get current oldest transaction in the log before we issue flush
* to the filesystem device. After the flush we can be sure that
* blocks of all older transactions are checkpointed to persistent
* storage and we will be safe to update journal start in the
* superblock with the numbers we get here.
*/
update_tail =
jbd2_journal_get_log_tail(journal, &first_tid, &first_block);
write_lock(&journal->j_state_lock);
if (update_tail) {
long freed = first_block - journal->j_tail;
if (first_block < journal->j_tail)
freed += journal->j_last - journal->j_first;
/* Update tail only if we free significant amount of space */
if (freed < journal->j_maxlen / 4)
update_tail = 0;
}
J_ASSERT(commit_transaction->t_state == T_COMMIT);
commit_transaction->t_state = T_COMMIT_DFLUSH;
write_unlock(&journal->j_state_lock);
/*
* If the journal is not located on the file system device,
* then we must flush the file system device before we issue
* the commit record
*/
if (commit_transaction->t_need_data_flush &&
(journal->j_fs_dev != journal->j_dev) &&
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(journal->j_fs_dev, GFP_NOFS, NULL);
/* Done it all: now write the commit record asynchronously. */
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
__jbd2_journal_abort_hard(journal);
}
blk_finish_plug(&plug);
/* Lo and behold: we have just managed to send a transaction to
the log. Before we can commit it, wait for the IO so far to
complete. Control buffers being written are on the
transaction's t_log_list queue, and metadata buffers are on
the io_bufs list.
Wait for the buffers in reverse order. That way we are
less likely to be woken up until all IOs have completed, and
so we incur less scheduling load.
*/
jbd_debug(3, "JBD2: commit phase 3\n");
while (!list_empty(&io_bufs)) {
struct buffer_head *bh = list_entry(io_bufs.prev,
struct buffer_head,
b_assoc_buffers);
wait_on_buffer(bh);
cond_resched();
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
jbd2_unfile_log_bh(bh);
/*
* The list contains temporary buffer heads created by
* jbd2_journal_write_metadata_buffer().
*/
BUFFER_TRACE(bh, "dumping temporary bh");
__brelse(bh);
J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0);
free_buffer_head(bh);
/* We also have to refile the corresponding shadowed buffer */
jh = commit_transaction->t_shadow_list->b_tprev;
bh = jh2bh(jh);
clear_buffer_jwrite(bh);
J_ASSERT_BH(bh, buffer_jbddirty(bh));
J_ASSERT_BH(bh, !buffer_shadow(bh));
/* The metadata is now released for reuse, but we need
to remember it against this transaction so that when
we finally commit, we can do any checkpointing
required. */
JBUFFER_TRACE(jh, "file as BJ_Forget");
jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget);
JBUFFER_TRACE(jh, "brelse shadowed buffer");
__brelse(bh);
}
J_ASSERT (commit_transaction->t_shadow_list == NULL);
jbd_debug(3, "JBD2: commit phase 4\n");
/* Here we wait for the revoke record and descriptor record buffers */
while (!list_empty(&log_bufs)) {
struct buffer_head *bh;
bh = list_entry(log_bufs.prev, struct buffer_head, b_assoc_buffers);
wait_on_buffer(bh);
cond_resched();
if (unlikely(!buffer_uptodate(bh)))
err = -EIO;
BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile");
clear_buffer_jwrite(bh);
jbd2_unfile_log_bh(bh);
__brelse(bh); /* One for getblk */
/* AKPM: bforget here */
}
if (err)
jbd2_journal_abort(journal, err);
jbd_debug(3, "JBD2: commit phase 5\n");
write_lock(&journal->j_state_lock);
J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH);
commit_transaction->t_state = T_COMMIT_JFLUSH;
write_unlock(&journal->j_state_lock);
if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
err = journal_submit_commit_record(journal, commit_transaction,
&cbh, crc32_sum);
if (err)
__jbd2_journal_abort_hard(journal);
}
if (cbh)
err = journal_wait_on_commit_record(journal, cbh);
if (JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) &&
journal->j_flags & JBD2_BARRIER) {
blkdev_issue_flush(journal->j_dev, GFP_NOFS, NULL);
}
if (err)
jbd2_journal_abort(journal, err);
/*
* Now disk caches for filesystem device are flushed so we are safe to
* erase checkpointed transactions from the log by updating journal
* superblock.
*/
if (update_tail)
jbd2_update_log_tail(journal, first_tid, first_block);
/* End of a transaction! Finally, we can do checkpoint
processing: any buffers committed as a result of this
transaction can be removed from any checkpoint list it was on
before. */
jbd_debug(3, "JBD2: commit phase 6\n");
J_ASSERT(list_empty(&commit_transaction->t_inode_list));
J_ASSERT(commit_transaction->t_buffers == NULL);
J_ASSERT(commit_transaction->t_checkpoint_list == NULL);
J_ASSERT(commit_transaction->t_shadow_list == NULL);
restart_loop:
/*
* As there are other places (journal_unmap_buffer()) adding buffers
* to this list we have to be careful and hold the j_list_lock.
*/
spin_lock(&journal->j_list_lock);
while (commit_transaction->t_forget) {
transaction_t *cp_transaction;
struct buffer_head *bh;
int try_to_free = 0;
jh = commit_transaction->t_forget;
spin_unlock(&journal->j_list_lock);
bh = jh2bh(jh);
/*
* Get a reference so that bh cannot be freed before we are
* done with it.
*/
get_bh(bh);
jbd_lock_bh_state(bh);
J_ASSERT_JH(jh, jh->b_transaction == commit_transaction);
/*
* If there is undo-protected committed data against
* this buffer, then we can remove it now. If it is a
* buffer needing such protection, the old frozen_data
* field now points to a committed version of the
* buffer, so rotate that field to the new committed
* data.
*
* Otherwise, we can just throw away the frozen data now.
*
* We also know that the frozen data has already fired
* its triggers if they exist, so we can clear that too.
*/
if (jh->b_committed_data) {
jbd2_free(jh->b_committed_data, bh->b_size);
jh->b_committed_data = NULL;
if (jh->b_frozen_data) {
jh->b_committed_data = jh->b_frozen_data;
jh->b_frozen_data = NULL;
jh->b_frozen_triggers = NULL;
}
} else if (jh->b_frozen_data) {
jbd2_free(jh->b_frozen_data, bh->b_size);
jh->b_frozen_data = NULL;
jh->b_frozen_triggers = NULL;
}
spin_lock(&journal->j_list_lock);
cp_transaction = jh->b_cp_transaction;
if (cp_transaction) {
JBUFFER_TRACE(jh, "remove from old cp transaction");
cp_transaction->t_chp_stats.cs_dropped++;
__jbd2_journal_remove_checkpoint(jh);
}
/* Only re-checkpoint the buffer_head if it is marked
* dirty. If the buffer was added to the BJ_Forget list
* by jbd2_journal_forget, it may no longer be dirty and
* there's no point in keeping a checkpoint record for
* it. */
/*
* A buffer which has been freed while still being journaled by
* a previous transaction.
*/
if (buffer_freed(bh)) {
/*
* If the running transaction is the one containing
* "add to orphan" operation (b_next_transaction !=
* NULL), we have to wait for that transaction to
* commit before we can really get rid of the buffer.
* So just clear b_modified to not confuse transaction
* credit accounting and refile the buffer to
* BJ_Forget of the running transaction. If the just
* committed transaction contains "add to orphan"
* operation, we can completely invalidate the buffer
* now. We are rather through in that since the
* buffer may be still accessible when blocksize <
* pagesize and it is attached to the last partial
* page.
*/
jh->b_modified = 0;
if (!jh->b_next_transaction) {
clear_buffer_freed(bh);
clear_buffer_jbddirty(bh);
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_req(bh);
bh->b_bdev = NULL;
}
}
if (buffer_jbddirty(bh)) {
JBUFFER_TRACE(jh, "add to new checkpointing trans");
__jbd2_journal_insert_checkpoint(jh, commit_transaction);
if (is_journal_aborted(journal))
clear_buffer_jbddirty(bh);
} else {
J_ASSERT_BH(bh, !buffer_dirty(bh));
/*
* The buffer on BJ_Forget list and not jbddirty means
* it has been freed by this transaction and hence it
* could not have been reallocated until this
* transaction has committed. *BUT* it could be
* reallocated once we have written all the data to
* disk and before we process the buffer on BJ_Forget
* list.
*/
if (!jh->b_next_transaction)
try_to_free = 1;
}
JBUFFER_TRACE(jh, "refile or unfile buffer");
__jbd2_journal_refile_buffer(jh);
jbd_unlock_bh_state(bh);
if (try_to_free)
release_buffer_page(bh); /* Drops bh reference */
else
__brelse(bh);
cond_resched_lock(&journal->j_list_lock);
}
spin_unlock(&journal->j_list_lock);
/*
* This is a bit sleazy. We use j_list_lock to protect transition
* of a transaction into T_FINISHED state and calling
* __jbd2_journal_drop_transaction(). Otherwise we could race with
* other checkpointing code processing the transaction...
*/
write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
/*
* Now recheck if some buffers did not get attached to the transaction
* while the lock was dropped...
*/
if (commit_transaction->t_forget) {
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
goto restart_loop;
}
/* Done with this transaction! */
jbd_debug(3, "JBD2: commit phase 7\n");
J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH);
commit_transaction->t_start = jiffies;
stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging,
commit_transaction->t_start);
/*
* File the transaction statistics
*/
stats.ts_tid = commit_transaction->t_tid;
stats.run.rs_handle_count =
atomic_read(&commit_transaction->t_handle_count);
trace_jbd2_run_stats(journal->j_fs_dev->bd_dev,
commit_transaction->t_tid, &stats.run);
/*
* Calculate overall stats
*/
spin_lock(&journal->j_history_lock);
journal->j_stats.ts_tid++;
if (commit_transaction->t_requested)
journal->j_stats.ts_requested++;
journal->j_stats.run.rs_wait += stats.run.rs_wait;
journal->j_stats.run.rs_request_delay += stats.run.rs_request_delay;
journal->j_stats.run.rs_running += stats.run.rs_running;
journal->j_stats.run.rs_locked += stats.run.rs_locked;
journal->j_stats.run.rs_flushing += stats.run.rs_flushing;
journal->j_stats.run.rs_logging += stats.run.rs_logging;
journal->j_stats.run.rs_handle_count += stats.run.rs_handle_count;
journal->j_stats.run.rs_blocks += stats.run.rs_blocks;
journal->j_stats.run.rs_blocks_logged += stats.run.rs_blocks_logged;
spin_unlock(&journal->j_history_lock);
commit_transaction->t_state = T_COMMIT_CALLBACK;
J_ASSERT(commit_transaction == journal->j_committing_transaction);
journal->j_commit_sequence = commit_transaction->t_tid;
journal->j_committing_transaction = NULL;
commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
/*
* weight the commit time higher than the average time so we don't
* react too strongly to vast changes in the commit time
*/
if (likely(journal->j_average_commit_time))
journal->j_average_commit_time = (commit_time +
journal->j_average_commit_time*3) / 4;
else
journal->j_average_commit_time = commit_time;
write_unlock(&journal->j_state_lock);
if (journal->j_checkpoint_transactions == NULL) {
journal->j_checkpoint_transactions = commit_transaction;
commit_transaction->t_cpnext = commit_transaction;
commit_transaction->t_cpprev = commit_transaction;
} else {
commit_transaction->t_cpnext =
journal->j_checkpoint_transactions;
commit_transaction->t_cpprev =
commit_transaction->t_cpnext->t_cpprev;
commit_transaction->t_cpnext->t_cpprev =
commit_transaction;
commit_transaction->t_cpprev->t_cpnext =
commit_transaction;
}
spin_unlock(&journal->j_list_lock);
/* Drop all spin_locks because commit_callback may be block.
* __journal_remove_checkpoint() can not destroy transaction
* under us because it is not marked as T_FINISHED yet */
if (journal->j_commit_callback)
journal->j_commit_callback(journal, commit_transaction);
trace_jbd2_end_commit(journal, commit_transaction);
jbd_debug(1, "JBD2: commit %d complete, head %d\n",
journal->j_commit_sequence, journal->j_tail_sequence);
write_lock(&journal->j_state_lock);
spin_lock(&journal->j_list_lock);
commit_transaction->t_state = T_FINISHED;
/* Recheck checkpoint lists after j_list_lock was dropped */
if (commit_transaction->t_checkpoint_list == NULL &&
commit_transaction->t_checkpoint_io_list == NULL) {
__jbd2_journal_drop_transaction(journal, commit_transaction);
jbd2_journal_free_transaction(commit_transaction);
}
spin_unlock(&journal->j_list_lock);
write_unlock(&journal->j_state_lock);
wake_up(&journal->j_wait_done_commit);
}
| gpl-2.0 |
BPI-SINOVOIP/BPI-Mainline-kernel | linux-5.4/net/openvswitch/vport-gre.c | 600 | 2201 | // SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (c) 2007-2014 Nicira, Inc.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <linux/if_tunnel.h>
#include <linux/if_vlan.h>
#include <linux/in.h>
#include <linux/in_route.h>
#include <linux/inetdevice.h>
#include <linux/jhash.h>
#include <linux/list.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/rculist.h>
#include <net/route.h>
#include <net/xfrm.h>
#include <net/icmp.h>
#include <net/ip.h>
#include <net/ip_tunnels.h>
#include <net/gre.h>
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <net/protocol.h>
#include "datapath.h"
#include "vport.h"
#include "vport-netdev.h"
static struct vport_ops ovs_gre_vport_ops;
static struct vport *gre_tnl_create(const struct vport_parms *parms)
{
struct net *net = ovs_dp_get_net(parms->dp);
struct net_device *dev;
struct vport *vport;
int err;
vport = ovs_vport_alloc(0, &ovs_gre_vport_ops, parms);
if (IS_ERR(vport))
return vport;
rtnl_lock();
dev = gretap_fb_dev_create(net, parms->name, NET_NAME_USER);
if (IS_ERR(dev)) {
rtnl_unlock();
ovs_vport_free(vport);
return ERR_CAST(dev);
}
err = dev_change_flags(dev, dev->flags | IFF_UP, NULL);
if (err < 0) {
rtnl_delete_link(dev);
rtnl_unlock();
ovs_vport_free(vport);
return ERR_PTR(err);
}
rtnl_unlock();
return vport;
}
static struct vport *gre_create(const struct vport_parms *parms)
{
struct vport *vport;
vport = gre_tnl_create(parms);
if (IS_ERR(vport))
return vport;
return ovs_netdev_link(vport, parms->name);
}
static struct vport_ops ovs_gre_vport_ops = {
.type = OVS_VPORT_TYPE_GRE,
.create = gre_create,
.send = dev_queue_xmit,
.destroy = ovs_netdev_tunnel_destroy,
};
static int __init ovs_gre_tnl_init(void)
{
return ovs_vport_ops_register(&ovs_gre_vport_ops);
}
static void __exit ovs_gre_tnl_exit(void)
{
ovs_vport_ops_unregister(&ovs_gre_vport_ops);
}
module_init(ovs_gre_tnl_init);
module_exit(ovs_gre_tnl_exit);
MODULE_DESCRIPTION("OVS: GRE switching port");
MODULE_LICENSE("GPL");
MODULE_ALIAS("vport-type-3");
| gpl-2.0 |
amatus/linux | drivers/acpi/acpica/uteval.c | 600 | 10356 | /******************************************************************************
*
* Module Name: uteval - Object evaluation
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_UTILITIES
ACPI_MODULE_NAME("uteval")
/*******************************************************************************
*
* FUNCTION: acpi_ut_evaluate_object
*
* PARAMETERS: prefix_node - Starting node
* path - Path to object from starting node
* expected_return_types - Bitmap of allowed return types
* return_desc - Where a return value is stored
*
* RETURN: Status
*
* DESCRIPTION: Evaluates a namespace object and verifies the type of the
* return object. Common code that simplifies accessing objects
* that have required return objects of fixed types.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_evaluate_object(struct acpi_namespace_node *prefix_node,
char *path,
u32 expected_return_btypes,
union acpi_operand_object **return_desc)
{
struct acpi_evaluate_info *info;
acpi_status status;
u32 return_btype;
ACPI_FUNCTION_TRACE(ut_evaluate_object);
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
info->prefix_node = prefix_node;
info->relative_pathname = path;
/* Evaluate the object/method */
status = acpi_ns_evaluate(info);
if (ACPI_FAILURE(status)) {
if (status == AE_NOT_FOUND) {
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"[%4.4s.%s] was not found\n",
acpi_ut_get_node_name(prefix_node),
path));
} else {
ACPI_ERROR_METHOD("Method execution failed",
prefix_node, path, status);
}
goto cleanup;
}
/* Did we get a return object? */
if (!info->return_object) {
if (expected_return_btypes) {
ACPI_ERROR_METHOD("No object was returned from",
prefix_node, path, AE_NOT_EXIST);
status = AE_NOT_EXIST;
}
goto cleanup;
}
/* Map the return object type to the bitmapped type */
switch ((info->return_object)->common.type) {
case ACPI_TYPE_INTEGER:
return_btype = ACPI_BTYPE_INTEGER;
break;
case ACPI_TYPE_BUFFER:
return_btype = ACPI_BTYPE_BUFFER;
break;
case ACPI_TYPE_STRING:
return_btype = ACPI_BTYPE_STRING;
break;
case ACPI_TYPE_PACKAGE:
return_btype = ACPI_BTYPE_PACKAGE;
break;
default:
return_btype = 0;
break;
}
if ((acpi_gbl_enable_interpreter_slack) && (!expected_return_btypes)) {
/*
* We received a return object, but one was not expected. This can
* happen frequently if the "implicit return" feature is enabled.
* Just delete the return object and return AE_OK.
*/
acpi_ut_remove_reference(info->return_object);
goto cleanup;
}
/* Is the return object one of the expected types? */
if (!(expected_return_btypes & return_btype)) {
ACPI_ERROR_METHOD("Return object type is incorrect",
prefix_node, path, AE_TYPE);
ACPI_ERROR((AE_INFO,
"Type returned from %s was incorrect: %s, expected Btypes: 0x%X",
path,
acpi_ut_get_object_type_name(info->return_object),
expected_return_btypes));
/* On error exit, we must delete the return object */
acpi_ut_remove_reference(info->return_object);
status = AE_TYPE;
goto cleanup;
}
/* Object type is OK, return it */
*return_desc = info->return_object;
cleanup:
ACPI_FREE(info);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_evaluate_numeric_object
*
* PARAMETERS: object_name - Object name to be evaluated
* device_node - Node for the device
* value - Where the value is returned
*
* RETURN: Status
*
* DESCRIPTION: Evaluates a numeric namespace object for a selected device
* and stores result in *Value.
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_evaluate_numeric_object(char *object_name,
struct acpi_namespace_node *device_node,
u64 *value)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_evaluate_numeric_object);
status = acpi_ut_evaluate_object(device_node, object_name,
ACPI_BTYPE_INTEGER, &obj_desc);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Get the returned Integer */
*value = obj_desc->integer.value;
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_STA
*
* PARAMETERS: device_node - Node for the device
* flags - Where the status flags are returned
*
* RETURN: Status
*
* DESCRIPTION: Executes _STA for selected device and stores results in
* *Flags. If _STA does not exist, then the device is assumed
* to be present/functional/enabled (as per the ACPI spec).
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_STA(struct acpi_namespace_node *device_node, u32 * flags)
{
union acpi_operand_object *obj_desc;
acpi_status status;
ACPI_FUNCTION_TRACE(ut_execute_STA);
status = acpi_ut_evaluate_object(device_node, METHOD_NAME__STA,
ACPI_BTYPE_INTEGER, &obj_desc);
if (ACPI_FAILURE(status)) {
if (AE_NOT_FOUND == status) {
/*
* if _STA does not exist, then (as per the ACPI specification),
* the returned flags will indicate that the device is present,
* functional, and enabled.
*/
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"_STA on %4.4s was not found, assuming device is present\n",
acpi_ut_get_node_name(device_node)));
*flags = ACPI_UINT32_MAX;
status = AE_OK;
}
return_ACPI_STATUS(status);
}
/* Extract the status flags */
*flags = (u32) obj_desc->integer.value;
/* On exit, we must delete the return object */
acpi_ut_remove_reference(obj_desc);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ut_execute_power_methods
*
* PARAMETERS: device_node - Node for the device
* method_names - Array of power method names
* method_count - Number of methods to execute
* out_values - Where the power method values are returned
*
* RETURN: Status, out_values
*
* DESCRIPTION: Executes the specified power methods for the device and returns
* the result(s).
*
* NOTE: Internal function, no parameter validation
*
******************************************************************************/
acpi_status
acpi_ut_execute_power_methods(struct acpi_namespace_node *device_node,
const char **method_names,
u8 method_count, u8 *out_values)
{
union acpi_operand_object *obj_desc;
acpi_status status;
acpi_status final_status = AE_NOT_FOUND;
u32 i;
ACPI_FUNCTION_TRACE(ut_execute_power_methods);
for (i = 0; i < method_count; i++) {
/*
* Execute the power method (_sx_d or _sx_w). The only allowable
* return type is an Integer.
*/
status = acpi_ut_evaluate_object(device_node,
ACPI_CAST_PTR(char,
method_names[i]),
ACPI_BTYPE_INTEGER, &obj_desc);
if (ACPI_SUCCESS(status)) {
out_values[i] = (u8)obj_desc->integer.value;
/* Delete the return object */
acpi_ut_remove_reference(obj_desc);
final_status = AE_OK; /* At least one value is valid */
continue;
}
out_values[i] = ACPI_UINT8_MAX;
if (status == AE_NOT_FOUND) {
continue; /* Ignore if not found */
}
ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
"Failed %s on Device %4.4s, %s\n",
ACPI_CAST_PTR(char, method_names[i]),
acpi_ut_get_node_name(device_node),
acpi_format_exception(status)));
}
return_ACPI_STATUS(final_status);
}
| gpl-2.0 |
mialwe/mnics2 | drivers/media/video/samsung/mali/common/mali_kernel_memory_engine.c | 600 | 13738 | /*
* Copyright (C) 2010 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include "mali_kernel_common.h"
#include "mali_kernel_core.h"
#include "mali_kernel_memory_engine.h"
#include "mali_osk.h"
#include "mali_osk_list.h"
typedef struct memory_engine
{
mali_kernel_mem_address_manager * mali_address;
mali_kernel_mem_address_manager * process_address;
} memory_engine;
mali_allocation_engine mali_allocation_engine_create(mali_kernel_mem_address_manager * mali_address_manager, mali_kernel_mem_address_manager * process_address_manager)
{
memory_engine * engine;
/* Mali Address Manager need not support unmap_physical */
MALI_DEBUG_ASSERT_POINTER(mali_address_manager);
MALI_DEBUG_ASSERT_POINTER(mali_address_manager->allocate);
MALI_DEBUG_ASSERT_POINTER(mali_address_manager->release);
MALI_DEBUG_ASSERT_POINTER(mali_address_manager->map_physical);
/* Process Address Manager must support unmap_physical for OS allocation
* error path handling */
MALI_DEBUG_ASSERT_POINTER(process_address_manager);
MALI_DEBUG_ASSERT_POINTER(process_address_manager->allocate);
MALI_DEBUG_ASSERT_POINTER(process_address_manager->release);
MALI_DEBUG_ASSERT_POINTER(process_address_manager->map_physical);
MALI_DEBUG_ASSERT_POINTER(process_address_manager->unmap_physical);
engine = (memory_engine*)_mali_osk_malloc(sizeof(memory_engine));
if (NULL == engine) return NULL;
engine->mali_address = mali_address_manager;
engine->process_address = process_address_manager;
return (mali_allocation_engine)engine;
}
void mali_allocation_engine_destroy(mali_allocation_engine engine)
{
MALI_DEBUG_ASSERT_POINTER(engine);
_mali_osk_free(engine);
}
_mali_osk_errcode_t mali_allocation_engine_allocate_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, mali_physical_memory_allocator * physical_allocators, _mali_osk_list_t *tracking_list )
{
memory_engine * engine = (memory_engine*)mem_engine;
MALI_DEBUG_ASSERT_POINTER(engine);
MALI_DEBUG_ASSERT_POINTER(descriptor);
MALI_DEBUG_ASSERT_POINTER(physical_allocators);
/* ASSERT that the list member has been initialized, even if it won't be
* used for tracking. We need it to be initialized to see if we need to
* delete it from a list in the release function. */
MALI_DEBUG_ASSERT( NULL != descriptor->list.next && NULL != descriptor->list.prev );
if (_MALI_OSK_ERR_OK == engine->mali_address->allocate(descriptor))
{
_mali_osk_errcode_t res = _MALI_OSK_ERR_OK;
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
res = engine->process_address->allocate(descriptor);
}
if ( _MALI_OSK_ERR_OK == res )
{
/* address space setup OK, commit physical memory to the allocation */
mali_physical_memory_allocator * active_allocator = physical_allocators;
struct mali_physical_memory_allocation * active_allocation_tracker = &descriptor->physical_allocation;
u32 offset = 0;
while ( NULL != active_allocator )
{
switch (active_allocator->allocate(active_allocator->ctx, mem_engine, descriptor, &offset, active_allocation_tracker))
{
case MALI_MEM_ALLOC_FINISHED:
if ( NULL != tracking_list )
{
/* Insert into the memory session list */
/* ASSERT that it is not already part of a list */
MALI_DEBUG_ASSERT( _mali_osk_list_empty( &descriptor->list ) );
_mali_osk_list_add( &descriptor->list, tracking_list );
}
MALI_SUCCESS; /* all done */
case MALI_MEM_ALLOC_NONE:
/* reuse current active_allocation_tracker */
MALI_DEBUG_PRINT( 4, ("Memory Engine Allocate: No allocation on %s, resorting to %s\n",
( active_allocator->name ) ? active_allocator->name : "UNNAMED",
( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
active_allocator = active_allocator->next;
break;
case MALI_MEM_ALLOC_PARTIAL:
if (NULL != active_allocator->next)
{
/* need a new allocation tracker */
active_allocation_tracker->next = _mali_osk_calloc(1, sizeof(mali_physical_memory_allocation));
if (NULL != active_allocation_tracker->next)
{
active_allocation_tracker = active_allocation_tracker->next;
MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate: Partial allocation on %s, resorting to %s\n",
( active_allocator->name ) ? active_allocator->name : "UNNAMED",
( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
active_allocator = active_allocator->next;
break;
}
}
/* FALL THROUGH */
case MALI_MEM_ALLOC_INTERNAL_FAILURE:
active_allocator = NULL; /* end the while loop */
break;
}
}
MALI_PRINT(("Memory allocate failed, could not allocate size %d kB.\n", descriptor->size/1024));
/* allocation failure, start cleanup */
/* loop over any potential partial allocations */
active_allocation_tracker = &descriptor->physical_allocation;
while (NULL != active_allocation_tracker)
{
/* handle blank trackers which will show up during failure */
if (NULL != active_allocation_tracker->release)
{
active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
}
active_allocation_tracker = active_allocation_tracker->next;
}
/* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
{
void * buf = active_allocation_tracker;
active_allocation_tracker = active_allocation_tracker->next;
_mali_osk_free(buf);
}
/* release the address spaces */
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
engine->process_address->release(descriptor);
}
}
engine->mali_address->release(descriptor);
}
MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
void mali_allocation_engine_release_memory(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor)
{
memory_engine * engine = (memory_engine*)mem_engine;
mali_physical_memory_allocation * active_allocation_tracker;
MALI_DEBUG_ASSERT_POINTER(engine);
MALI_DEBUG_ASSERT_POINTER(descriptor);
/* Determine whether we need to remove this from a tracking list */
if ( ! _mali_osk_list_empty( &descriptor->list ) )
{
_mali_osk_list_del( &descriptor->list );
/* Clear the list for debug mode, catch use-after-free */
MALI_DEBUG_CODE( descriptor->list.next = descriptor->list.prev = NULL; )
}
engine->mali_address->release(descriptor);
active_allocation_tracker = &descriptor->physical_allocation;
while (NULL != active_allocation_tracker)
{
MALI_DEBUG_ASSERT_POINTER(active_allocation_tracker->release);
active_allocation_tracker->release(active_allocation_tracker->ctx, active_allocation_tracker->handle);
active_allocation_tracker = active_allocation_tracker->next;
}
/* free the allocation tracker objects themselves, skipping the tracker stored inside the descriptor itself */
for ( active_allocation_tracker = descriptor->physical_allocation.next; active_allocation_tracker != NULL; )
{
void * buf = active_allocation_tracker;
active_allocation_tracker = active_allocation_tracker->next;
_mali_osk_free(buf);
}
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
engine->process_address->release(descriptor);
}
}
_mali_osk_errcode_t mali_allocation_engine_map_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 phys, u32 cpu_usage_adjust, u32 size)
{
_mali_osk_errcode_t err;
memory_engine * engine = (memory_engine*)mem_engine;
_mali_osk_mem_mapregion_flags_t unmap_flags = (_mali_osk_mem_mapregion_flags_t)0;
MALI_DEBUG_ASSERT_POINTER(engine);
MALI_DEBUG_ASSERT_POINTER(descriptor);
MALI_DEBUG_PRINT(7, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X\n", phys, size, offset));
MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
MALI_DEBUG_ASSERT_POINTER(engine->mali_address->map_physical);
/* Handle process address manager first, because we may need them to
* allocate the physical page */
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
/* Handle OS-allocated specially, since an adjustment may be required */
if ( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC == phys )
{
MALI_DEBUG_ASSERT( _MALI_OSK_CPU_PAGE_SIZE == size );
/* Set flags to use on error path */
unmap_flags |= _MALI_OSK_MEM_MAPREGION_FLAG_OS_ALLOCATED_PHYSADDR;
err = engine->process_address->map_physical(descriptor, offset, &phys, size);
/* Adjust for cpu physical address to mali physical address */
phys -= cpu_usage_adjust;
}
else
{
u32 cpu_phys;
/* Adjust mali physical address to cpu physical address */
cpu_phys = phys + cpu_usage_adjust;
err = engine->process_address->map_physical(descriptor, offset, &cpu_phys, size);
}
if ( _MALI_OSK_ERR_OK != err )
{
MALI_ERROR( err );
}
}
MALI_DEBUG_PRINT(4, ("Mapping phys 0x%08X length 0x%08X at offset 0x%08X to CPUVA 0x%08X\n", phys, size, offset, (u32)(descriptor->mapping) + offset));
/* Mali address manager must use the physical address - no point in asking
* it to allocate another one for us */
MALI_DEBUG_ASSERT( MALI_MEMORY_ALLOCATION_OS_ALLOCATED_PHYSADDR_MAGIC != phys );
err = engine->mali_address->map_physical(descriptor, offset, &phys, size);
if ( _MALI_OSK_ERR_OK != err )
{
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
MALI_DEBUG_PRINT( 2, ("Process address manager succeeded, but Mali Address manager failed for phys=0x%08X size=0x%08X, offset=0x%08X. Will unmap.\n", phys, size, offset));
engine->process_address->unmap_physical(descriptor, offset, size, unmap_flags);
}
MALI_ERROR( err );
}
MALI_SUCCESS;
}
void mali_allocation_engine_unmap_physical(mali_allocation_engine mem_engine, mali_memory_allocation * descriptor, u32 offset, u32 size, _mali_osk_mem_mapregion_flags_t unmap_flags )
{
memory_engine * engine = (memory_engine*)mem_engine;
MALI_DEBUG_ASSERT_POINTER(engine);
MALI_DEBUG_ASSERT_POINTER(descriptor);
MALI_DEBUG_PRINT(7, ("UnMapping length 0x%08X at offset 0x%08X\n", size, offset));
MALI_DEBUG_ASSERT_POINTER(engine->mali_address);
MALI_DEBUG_ASSERT_POINTER(engine->process_address);
if ( descriptor->flags & MALI_MEMORY_ALLOCATION_FLAG_MAP_INTO_USERSPACE )
{
/* Mandetory for process_address manager to have an unmap function*/
engine->process_address->unmap_physical( descriptor, offset, size, unmap_flags );
}
/* Optional for mali_address manager to have an unmap function*/
if ( NULL != engine->mali_address->unmap_physical )
{
engine->mali_address->unmap_physical( descriptor, offset, size, unmap_flags );
}
}
_mali_osk_errcode_t mali_allocation_engine_allocate_page_tables(mali_allocation_engine engine, mali_page_table_block * descriptor, mali_physical_memory_allocator * physical_provider)
{
mali_physical_memory_allocator * active_allocator = physical_provider;
MALI_DEBUG_ASSERT_POINTER(descriptor);
MALI_DEBUG_ASSERT_POINTER(physical_provider);
while ( NULL != active_allocator )
{
switch (active_allocator->allocate_page_table_block(active_allocator->ctx, descriptor))
{
case MALI_MEM_ALLOC_FINISHED:
MALI_SUCCESS; /* all done */
case MALI_MEM_ALLOC_NONE:
/* try next */
MALI_DEBUG_PRINT( 2, ("Memory Engine Allocate PageTables: No allocation on %s, resorting to %s\n",
( active_allocator->name ) ? active_allocator->name : "UNNAMED",
( active_allocator->next ) ? (( active_allocator->next->name )? active_allocator->next->name : "UNNAMED") : "NONE") );
active_allocator = active_allocator->next;
break;
case MALI_MEM_ALLOC_PARTIAL:
MALI_DEBUG_PRINT(1, ("Invalid return value from allocate_page_table_block call: MALI_MEM_ALLOC_PARTIAL\n"));
/* FALL THROUGH */
case MALI_MEM_ALLOC_INTERNAL_FAILURE:
MALI_DEBUG_PRINT(1, ("Aborting due to allocation failure\n"));
active_allocator = NULL; /* end the while loop */
break;
}
}
MALI_ERROR(_MALI_OSK_ERR_FAULT);
}
void mali_allocation_engine_report_allocators( mali_physical_memory_allocator * physical_provider )
{
mali_physical_memory_allocator * active_allocator = physical_provider;
MALI_DEBUG_ASSERT_POINTER(physical_provider);
MALI_DEBUG_PRINT( 1, ("Mali memory allocators will be used in this order of preference (lowest numbered first) :\n"));
while ( NULL != active_allocator )
{
if ( NULL != active_allocator->name )
{
MALI_DEBUG_PRINT( 1, ("\t%d: %s\n", active_allocator->alloc_order, active_allocator->name) );
}
else
{
MALI_DEBUG_PRINT( 1, ("\t%d: (UNNAMED ALLOCATOR)\n", active_allocator->alloc_order) );
}
active_allocator = active_allocator->next;
}
}
u32 mali_allocation_engine_memory_usage(mali_physical_memory_allocator *allocator)
{
u32 sum = 0;
while(NULL != allocator)
{
/* Only count allocators that have set up a stat function. */
if(allocator->stat)
sum += allocator->stat(allocator);
allocator = allocator->next;
}
return sum;
}
| gpl-2.0 |
KanoComputing/raspberrypi-linux | drivers/acpi/acpica/exstoren.c | 600 | 9587 | /******************************************************************************
*
* Module Name: exstoren - AML Interpreter object store support,
* Store to Node (namespace object)
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2014, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acinterp.h"
#include "amlcode.h"
#define _COMPONENT ACPI_EXECUTER
ACPI_MODULE_NAME("exstoren")
/*******************************************************************************
*
* FUNCTION: acpi_ex_resolve_object
*
* PARAMETERS: source_desc_ptr - Pointer to the source object
* target_type - Current type of the target
* walk_state - Current walk state
*
* RETURN: Status, resolved object in source_desc_ptr.
*
* DESCRIPTION: Resolve an object. If the object is a reference, dereference
* it and return the actual object in the source_desc_ptr.
*
******************************************************************************/
acpi_status
acpi_ex_resolve_object(union acpi_operand_object **source_desc_ptr,
acpi_object_type target_type,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *source_desc = *source_desc_ptr;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE(ex_resolve_object);
/* Ensure we have a Target that can be stored to */
switch (target_type) {
case ACPI_TYPE_BUFFER_FIELD:
case ACPI_TYPE_LOCAL_REGION_FIELD:
case ACPI_TYPE_LOCAL_BANK_FIELD:
case ACPI_TYPE_LOCAL_INDEX_FIELD:
/*
* These cases all require only Integers or values that
* can be converted to Integers (Strings or Buffers)
*/
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* Stores into a Field/Region or into a Integer/Buffer/String
* are all essentially the same. This case handles the
* "interchangeable" types Integer, String, and Buffer.
*/
if (source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) {
/* Resolve a reference object first */
status =
acpi_ex_resolve_to_value(source_desc_ptr,
walk_state);
if (ACPI_FAILURE(status)) {
break;
}
}
/* For copy_object, no further validation necessary */
if (walk_state->opcode == AML_COPY_OP) {
break;
}
/* Must have a Integer, Buffer, or String */
if ((source_desc->common.type != ACPI_TYPE_INTEGER) &&
(source_desc->common.type != ACPI_TYPE_BUFFER) &&
(source_desc->common.type != ACPI_TYPE_STRING) &&
!((source_desc->common.type == ACPI_TYPE_LOCAL_REFERENCE) &&
(source_desc->reference.class == ACPI_REFCLASS_TABLE))) {
/* Conversion successful but still not a valid type */
ACPI_ERROR((AE_INFO,
"Cannot assign type %s to %s (must be type Int/Str/Buf)",
acpi_ut_get_object_type_name(source_desc),
acpi_ut_get_type_name(target_type)));
status = AE_AML_OPERAND_TYPE;
}
break;
case ACPI_TYPE_LOCAL_ALIAS:
case ACPI_TYPE_LOCAL_METHOD_ALIAS:
/*
* All aliases should have been resolved earlier, during the
* operand resolution phase.
*/
ACPI_ERROR((AE_INFO, "Store into an unresolved Alias object"));
status = AE_AML_INTERNAL;
break;
case ACPI_TYPE_PACKAGE:
default:
/*
* All other types than Alias and the various Fields come here,
* including the untyped case - ACPI_TYPE_ANY.
*/
break;
}
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ex_store_object_to_object
*
* PARAMETERS: source_desc - Object to store
* dest_desc - Object to receive a copy of the source
* new_desc - New object if dest_desc is obsoleted
* walk_state - Current walk state
*
* RETURN: Status
*
* DESCRIPTION: "Store" an object to another object. This may include
* converting the source type to the target type (implicit
* conversion), and a copy of the value of the source to
* the target.
*
* The Assignment of an object to another (not named) object
* is handled here.
* The Source passed in will replace the current value (if any)
* with the input value.
*
* When storing into an object the data is converted to the
* target object type then stored in the object. This means
* that the target object type (for an initialized target) will
* not be changed by a store operation.
*
* This module allows destination types of Number, String,
* Buffer, and Package.
*
* Assumes parameters are already validated. NOTE: source_desc
* resolution (from a reference object) must be performed by
* the caller if necessary.
*
******************************************************************************/
acpi_status
acpi_ex_store_object_to_object(union acpi_operand_object *source_desc,
union acpi_operand_object *dest_desc,
union acpi_operand_object **new_desc,
struct acpi_walk_state *walk_state)
{
union acpi_operand_object *actual_src_desc;
acpi_status status = AE_OK;
ACPI_FUNCTION_TRACE_PTR(ex_store_object_to_object, source_desc);
actual_src_desc = source_desc;
if (!dest_desc) {
/*
* There is no destination object (An uninitialized node or
* package element), so we can simply copy the source object
* creating a new destination object
*/
status =
acpi_ut_copy_iobject_to_iobject(actual_src_desc, new_desc,
walk_state);
return_ACPI_STATUS(status);
}
if (source_desc->common.type != dest_desc->common.type) {
/*
* The source type does not match the type of the destination.
* Perform the "implicit conversion" of the source to the current type
* of the target as per the ACPI specification.
*
* If no conversion performed, actual_src_desc = source_desc.
* Otherwise, actual_src_desc is a temporary object to hold the
* converted object.
*/
status = acpi_ex_convert_to_target_type(dest_desc->common.type,
source_desc,
&actual_src_desc,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
if (source_desc == actual_src_desc) {
/*
* No conversion was performed. Return the source_desc as the
* new object.
*/
*new_desc = source_desc;
return_ACPI_STATUS(AE_OK);
}
}
/*
* We now have two objects of identical types, and we can perform a
* copy of the *value* of the source object.
*/
switch (dest_desc->common.type) {
case ACPI_TYPE_INTEGER:
dest_desc->integer.value = actual_src_desc->integer.value;
/* Truncate value if we are executing from a 32-bit ACPI table */
(void)acpi_ex_truncate_for32bit_table(dest_desc);
break;
case ACPI_TYPE_STRING:
status =
acpi_ex_store_string_to_string(actual_src_desc, dest_desc);
break;
case ACPI_TYPE_BUFFER:
status =
acpi_ex_store_buffer_to_buffer(actual_src_desc, dest_desc);
break;
case ACPI_TYPE_PACKAGE:
status =
acpi_ut_copy_iobject_to_iobject(actual_src_desc, &dest_desc,
walk_state);
break;
default:
/*
* All other types come here.
*/
ACPI_WARNING((AE_INFO, "Store into type %s not implemented",
acpi_ut_get_object_type_name(dest_desc)));
status = AE_NOT_IMPLEMENTED;
break;
}
if (actual_src_desc != source_desc) {
/* Delete the intermediate (temporary) source object */
acpi_ut_remove_reference(actual_src_desc);
}
*new_desc = dest_desc;
return_ACPI_STATUS(status);
}
| gpl-2.0 |
liuzhiping/Haier_W910 | drivers/net/wireless/ath/ath9k/hw.c | 856 | 79132 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <asm/unaligned.h>
#include "hw.h"
#include "hw-ops.h"
#include "rc.h"
#include "ar9003_mac.h"
#include "ar9003_mci.h"
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
MODULE_AUTHOR("Atheros Communications");
MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
MODULE_LICENSE("Dual BSD/GPL");
static int __init ath9k_init(void)
{
return 0;
}
module_init(ath9k_init);
static void __exit ath9k_exit(void)
{
return;
}
module_exit(ath9k_exit);
/* Private hardware callbacks */
static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
{
ath9k_hw_private_ops(ah)->init_cal_settings(ah);
}
static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
{
ath9k_hw_private_ops(ah)->init_mode_regs(ah);
}
static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
struct ath9k_channel *chan)
{
return ath9k_hw_private_ops(ah)->compute_pll_control(ah, chan);
}
static void ath9k_hw_init_mode_gain_regs(struct ath_hw *ah)
{
if (!ath9k_hw_private_ops(ah)->init_mode_gain_regs)
return;
ath9k_hw_private_ops(ah)->init_mode_gain_regs(ah);
}
static void ath9k_hw_ani_cache_ini_regs(struct ath_hw *ah)
{
/* You will not have this callback if using the old ANI */
if (!ath9k_hw_private_ops(ah)->ani_cache_ini_regs)
return;
ath9k_hw_private_ops(ah)->ani_cache_ini_regs(ah);
}
/********************/
/* Helper Functions */
/********************/
static void ath9k_hw_set_clockrate(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
struct ath_common *common = ath9k_hw_common(ah);
unsigned int clockrate;
/* AR9287 v1.3+ uses async FIFO and runs the MAC at 117 MHz */
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah))
clockrate = 117;
else if (!ah->curchan) /* should really check for CCK instead */
clockrate = ATH9K_CLOCK_RATE_CCK;
else if (conf->channel->band == IEEE80211_BAND_2GHZ)
clockrate = ATH9K_CLOCK_RATE_2GHZ_OFDM;
else if (ah->caps.hw_caps & ATH9K_HW_CAP_FASTCLOCK)
clockrate = ATH9K_CLOCK_FAST_RATE_5GHZ_OFDM;
else
clockrate = ATH9K_CLOCK_RATE_5GHZ_OFDM;
if (conf_is_ht40(conf))
clockrate *= 2;
if (ah->curchan) {
if (IS_CHAN_HALF_RATE(ah->curchan))
clockrate /= 2;
if (IS_CHAN_QUARTER_RATE(ah->curchan))
clockrate /= 4;
}
common->clockrate = clockrate;
}
static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
{
struct ath_common *common = ath9k_hw_common(ah);
return usecs * common->clockrate;
}
bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
{
int i;
BUG_ON(timeout < AH_TIME_QUANTUM);
for (i = 0; i < (timeout / AH_TIME_QUANTUM); i++) {
if ((REG_READ(ah, reg) & mask) == val)
return true;
udelay(AH_TIME_QUANTUM);
}
ath_dbg(ath9k_hw_common(ah), ANY,
"timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
timeout, reg, REG_READ(ah, reg), mask, val);
return false;
}
EXPORT_SYMBOL(ath9k_hw_wait);
void ath9k_hw_write_array(struct ath_hw *ah, struct ar5416IniArray *array,
int column, unsigned int *writecnt)
{
int r;
ENABLE_REGWRITE_BUFFER(ah);
for (r = 0; r < array->ia_rows; r++) {
REG_WRITE(ah, INI_RA(array, r, 0),
INI_RA(array, r, column));
DO_DELAY(*writecnt);
}
REGWRITE_BUFFER_FLUSH(ah);
}
u32 ath9k_hw_reverse_bits(u32 val, u32 n)
{
u32 retval;
int i;
for (i = 0, retval = 0; i < n; i++) {
retval = (retval << 1) | (val & 1);
val >>= 1;
}
return retval;
}
u16 ath9k_hw_computetxtime(struct ath_hw *ah,
u8 phy, int kbps,
u32 frameLen, u16 rateix,
bool shortPreamble)
{
u32 bitsPerSymbol, numBits, numSymbols, phyTime, txTime;
if (kbps == 0)
return 0;
switch (phy) {
case WLAN_RC_PHY_CCK:
phyTime = CCK_PREAMBLE_BITS + CCK_PLCP_BITS;
if (shortPreamble)
phyTime >>= 1;
numBits = frameLen << 3;
txTime = CCK_SIFS_TIME + phyTime + ((numBits * 1000) / kbps);
break;
case WLAN_RC_PHY_OFDM:
if (ah->curchan && IS_CHAN_QUARTER_RATE(ah->curchan)) {
bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_QUARTER) / 1000;
numBits = OFDM_PLCP_BITS + (frameLen << 3);
numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
txTime = OFDM_SIFS_TIME_QUARTER
+ OFDM_PREAMBLE_TIME_QUARTER
+ (numSymbols * OFDM_SYMBOL_TIME_QUARTER);
} else if (ah->curchan &&
IS_CHAN_HALF_RATE(ah->curchan)) {
bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME_HALF) / 1000;
numBits = OFDM_PLCP_BITS + (frameLen << 3);
numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
txTime = OFDM_SIFS_TIME_HALF +
OFDM_PREAMBLE_TIME_HALF
+ (numSymbols * OFDM_SYMBOL_TIME_HALF);
} else {
bitsPerSymbol = (kbps * OFDM_SYMBOL_TIME) / 1000;
numBits = OFDM_PLCP_BITS + (frameLen << 3);
numSymbols = DIV_ROUND_UP(numBits, bitsPerSymbol);
txTime = OFDM_SIFS_TIME + OFDM_PREAMBLE_TIME
+ (numSymbols * OFDM_SYMBOL_TIME);
}
break;
default:
ath_err(ath9k_hw_common(ah),
"Unknown phy %u (rate ix %u)\n", phy, rateix);
txTime = 0;
break;
}
return txTime;
}
EXPORT_SYMBOL(ath9k_hw_computetxtime);
void ath9k_hw_get_channel_centers(struct ath_hw *ah,
struct ath9k_channel *chan,
struct chan_centers *centers)
{
int8_t extoff;
if (!IS_CHAN_HT40(chan)) {
centers->ctl_center = centers->ext_center =
centers->synth_center = chan->channel;
return;
}
if ((chan->chanmode == CHANNEL_A_HT40PLUS) ||
(chan->chanmode == CHANNEL_G_HT40PLUS)) {
centers->synth_center =
chan->channel + HT40_CHANNEL_CENTER_SHIFT;
extoff = 1;
} else {
centers->synth_center =
chan->channel - HT40_CHANNEL_CENTER_SHIFT;
extoff = -1;
}
centers->ctl_center =
centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
/* 25 MHz spacing is supported by hw but not on upper layers */
centers->ext_center =
centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
}
/******************/
/* Chip Revisions */
/******************/
static void ath9k_hw_read_revisions(struct ath_hw *ah)
{
u32 val;
switch (ah->hw_version.devid) {
case AR5416_AR9100_DEVID:
ah->hw_version.macVersion = AR_SREV_VERSION_9100;
break;
case AR9300_DEVID_AR9330:
ah->hw_version.macVersion = AR_SREV_VERSION_9330;
if (ah->get_mac_revision) {
ah->hw_version.macRev = ah->get_mac_revision();
} else {
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
}
return;
case AR9300_DEVID_AR9340:
ah->hw_version.macVersion = AR_SREV_VERSION_9340;
val = REG_READ(ah, AR_SREV);
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
return;
}
val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
if (val == 0xFF) {
val = REG_READ(ah, AR_SREV);
ah->hw_version.macVersion =
(val & AR_SREV_VERSION2) >> AR_SREV_TYPE2_S;
ah->hw_version.macRev = MS(val, AR_SREV_REVISION2);
if (AR_SREV_9462(ah))
ah->is_pciexpress = true;
else
ah->is_pciexpress = (val &
AR_SREV_TYPE2_HOST_MODE) ? 0 : 1;
} else {
if (!AR_SREV_9100(ah))
ah->hw_version.macVersion = MS(val, AR_SREV_VERSION);
ah->hw_version.macRev = val & AR_SREV_REVISION;
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCIE)
ah->is_pciexpress = true;
}
}
/************************************/
/* HW Attach, Detach, Init Routines */
/************************************/
static void ath9k_hw_disablepcie(struct ath_hw *ah)
{
if (!AR_SREV_5416(ah))
return;
REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
REG_WRITE(ah, AR_PCIE_SERDES, 0x57160824);
REG_WRITE(ah, AR_PCIE_SERDES, 0x25980579);
REG_WRITE(ah, AR_PCIE_SERDES, 0x00000000);
REG_WRITE(ah, AR_PCIE_SERDES, 0x1aaabe40);
REG_WRITE(ah, AR_PCIE_SERDES, 0xbe105554);
REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
}
static void ath9k_hw_aspm_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (common->bus_ops->aspm_init)
common->bus_ops->aspm_init(common);
}
/* This should work for all families including legacy */
static bool ath9k_hw_chip_test(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 regAddr[2] = { AR_STA_ID0 };
u32 regHold[2];
static const u32 patternData[4] = {
0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
};
int i, j, loop_max;
if (!AR_SREV_9300_20_OR_LATER(ah)) {
loop_max = 2;
regAddr[1] = AR_PHY_BASE + (8 << 2);
} else
loop_max = 1;
for (i = 0; i < loop_max; i++) {
u32 addr = regAddr[i];
u32 wrData, rdData;
regHold[i] = REG_READ(ah, addr);
for (j = 0; j < 0x100; j++) {
wrData = (j << 16) | j;
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (rdData != wrData) {
ath_err(common,
"address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
addr, wrData, rdData);
return false;
}
}
for (j = 0; j < 4; j++) {
wrData = patternData[j];
REG_WRITE(ah, addr, wrData);
rdData = REG_READ(ah, addr);
if (wrData != rdData) {
ath_err(common,
"address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
addr, wrData, rdData);
return false;
}
}
REG_WRITE(ah, regAddr[i], regHold[i]);
}
udelay(100);
return true;
}
static void ath9k_hw_init_config(struct ath_hw *ah)
{
int i;
ah->config.dma_beacon_response_time = 2;
ah->config.sw_beacon_response_time = 10;
ah->config.additional_swba_backoff = 0;
ah->config.ack_6mb = 0x0;
ah->config.cwm_ignore_extcca = 0;
ah->config.pcie_clock_req = 0;
ah->config.pcie_waen = 0;
ah->config.analog_shiftreg = 1;
ah->config.enable_ani = true;
for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
ah->config.spurchans[i][0] = AR_NO_SPUR;
ah->config.spurchans[i][1] = AR_NO_SPUR;
}
/* PAPRD needs some more work to be enabled */
ah->config.paprd_disable = 1;
ah->config.rx_intr_mitigation = true;
ah->config.pcieSerDesWrite = true;
/*
* We need this for PCI devices only (Cardbus, PCI, miniPCI)
* _and_ if on non-uniprocessor systems (Multiprocessor/HT).
* This means we use it for all AR5416 devices, and the few
* minor PCI AR9280 devices out there.
*
* Serialization is required because these devices do not handle
* well the case of two concurrent reads/writes due to the latency
* involved. During one read/write another read/write can be issued
* on another CPU while the previous read/write may still be working
* on our hardware, if we hit this case the hardware poops in a loop.
* We prevent this by serializing reads and writes.
*
* This issue is not present on PCI-Express devices or pre-AR5416
* devices (legacy, 802.11abg).
*/
if (num_possible_cpus() > 1)
ah->config.serialize_regmode = SER_REG_MODE_AUTO;
}
static void ath9k_hw_init_defaults(struct ath_hw *ah)
{
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
regulatory->country_code = CTRY_DEFAULT;
regulatory->power_limit = MAX_RATE_POWER;
ah->hw_version.magic = AR5416_MAGIC;
ah->hw_version.subvendorid = 0;
ah->atim_window = 0;
ah->sta_id1_defaults =
AR_STA_ID1_CRPT_MIC_ENABLE |
AR_STA_ID1_MCAST_KSRCH;
if (AR_SREV_9100(ah))
ah->sta_id1_defaults |= AR_STA_ID1_AR9100_BA_FIX;
ah->enable_32kHz_clock = DONT_USE_32KHZ;
ah->slottime = ATH9K_SLOT_TIME_9;
ah->globaltxtimeout = (u32) -1;
ah->power_mode = ATH9K_PM_UNDEFINED;
ah->htc_reset_init = true;
}
static int ath9k_hw_init_macaddr(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 sum;
int i;
u16 eeval;
static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
sum = 0;
for (i = 0; i < 3; i++) {
eeval = ah->eep_ops->get_eeprom(ah, EEP_MAC[i]);
sum += eeval;
common->macaddr[2 * i] = eeval >> 8;
common->macaddr[2 * i + 1] = eeval & 0xff;
}
if (sum == 0 || sum == 0xffff * 3)
return -EADDRNOTAVAIL;
return 0;
}
static int ath9k_hw_post_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int ecode;
if (common->bus_ops->ath_bus_type != ATH_USB) {
if (!ath9k_hw_chip_test(ah))
return -ENODEV;
}
if (!AR_SREV_9300_20_OR_LATER(ah)) {
ecode = ar9002_hw_rf_claim(ah);
if (ecode != 0)
return ecode;
}
ecode = ath9k_hw_eeprom_init(ah);
if (ecode != 0)
return ecode;
ath_dbg(ath9k_hw_common(ah), CONFIG, "Eeprom VER: %d, REV: %d\n",
ah->eep_ops->get_eeprom_ver(ah),
ah->eep_ops->get_eeprom_rev(ah));
ecode = ath9k_hw_rf_alloc_ext_banks(ah);
if (ecode) {
ath_err(ath9k_hw_common(ah),
"Failed allocating banks for external radio\n");
ath9k_hw_rf_free_ext_banks(ah);
return ecode;
}
if (ah->config.enable_ani) {
ath9k_hw_ani_setup(ah);
ath9k_hw_ani_init(ah);
}
return 0;
}
static void ath9k_hw_attach_ops(struct ath_hw *ah)
{
if (AR_SREV_9300_20_OR_LATER(ah))
ar9003_hw_attach_ops(ah);
else
ar9002_hw_attach_ops(ah);
}
/* Called for all hardware families */
static int __ath9k_hw_init(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
int r = 0;
ath9k_hw_read_revisions(ah);
/*
* Read back AR_WA into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
ah->WARegVal = REG_READ(ah, AR_WA);
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
/*
* Read back AR_WA into a permanent copy and set bits 14 and 17.
* We need to do this to avoid RMW of this register. We cannot
* read the reg when chip is asleep.
*/
ah->WARegVal = REG_READ(ah, AR_WA);
ah->WARegVal |= (AR_WA_D3_L1_DISABLE |
AR_WA_ASPM_TIMER_BASED_DISABLE);
ath9k_hw_read_revisions(ah);
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
ath_err(common, "Couldn't reset chip\n");
return -EIO;
}
if (AR_SREV_9462(ah))
ah->WARegVal &= ~AR_WA_D3_L1_DISABLE;
ath9k_hw_init_defaults(ah);
ath9k_hw_init_config(ah);
ath9k_hw_attach_ops(ah);
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
ath_err(common, "Couldn't wakeup chip\n");
return -EIO;
}
if (NR_CPUS > 1 && ah->config.serialize_regmode == SER_REG_MODE_AUTO) {
if (ah->hw_version.macVersion == AR_SREV_VERSION_5416_PCI ||
((AR_SREV_9160(ah) || AR_SREV_9280(ah)) &&
!ah->is_pciexpress)) {
ah->config.serialize_regmode =
SER_REG_MODE_ON;
} else {
ah->config.serialize_regmode =
SER_REG_MODE_OFF;
}
}
ath_dbg(common, RESET, "serialize_regmode is %d\n",
ah->config.serialize_regmode);
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD >> 1;
else
ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
switch (ah->hw_version.macVersion) {
case AR_SREV_VERSION_5416_PCI:
case AR_SREV_VERSION_5416_PCIE:
case AR_SREV_VERSION_9160:
case AR_SREV_VERSION_9100:
case AR_SREV_VERSION_9280:
case AR_SREV_VERSION_9285:
case AR_SREV_VERSION_9287:
case AR_SREV_VERSION_9271:
case AR_SREV_VERSION_9300:
case AR_SREV_VERSION_9330:
case AR_SREV_VERSION_9485:
case AR_SREV_VERSION_9340:
case AR_SREV_VERSION_9462:
break;
default:
ath_err(common,
"Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
ah->hw_version.macVersion, ah->hw_version.macRev);
return -EOPNOTSUPP;
}
if (AR_SREV_9271(ah) || AR_SREV_9100(ah) || AR_SREV_9340(ah) ||
AR_SREV_9330(ah))
ah->is_pciexpress = false;
ah->hw_version.phyRev = REG_READ(ah, AR_PHY_CHIP_ID);
ath9k_hw_init_cal_settings(ah);
ah->ani_function = ATH9K_ANI_ALL;
if (AR_SREV_9280_20_OR_LATER(ah) && !AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
if (!AR_SREV_9300_20_OR_LATER(ah))
ah->ani_function &= ~ATH9K_ANI_MRC_CCK;
/* disable ANI for 9340 */
if (AR_SREV_9340(ah))
ah->config.enable_ani = false;
ath9k_hw_init_mode_regs(ah);
if (!ah->is_pciexpress)
ath9k_hw_disablepcie(ah);
r = ath9k_hw_post_init(ah);
if (r)
return r;
ath9k_hw_init_mode_gain_regs(ah);
r = ath9k_hw_fill_cap_info(ah);
if (r)
return r;
if (ah->is_pciexpress)
ath9k_hw_aspm_init(ah);
r = ath9k_hw_init_macaddr(ah);
if (r) {
ath_err(common, "Failed to initialize MAC address\n");
return r;
}
if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
ah->tx_trig_level = (AR_FTRIG_256B >> AR_FTRIG_S);
else
ah->tx_trig_level = (AR_FTRIG_512B >> AR_FTRIG_S);
if (AR_SREV_9330(ah))
ah->bb_watchdog_timeout_ms = 85;
else
ah->bb_watchdog_timeout_ms = 25;
common->state = ATH_HW_INITIALIZED;
return 0;
}
int ath9k_hw_init(struct ath_hw *ah)
{
int ret;
struct ath_common *common = ath9k_hw_common(ah);
/* These are all the AR5008/AR9001/AR9002 hardware family of chipsets */
switch (ah->hw_version.devid) {
case AR5416_DEVID_PCI:
case AR5416_DEVID_PCIE:
case AR5416_AR9100_DEVID:
case AR9160_DEVID_PCI:
case AR9280_DEVID_PCI:
case AR9280_DEVID_PCIE:
case AR9285_DEVID_PCIE:
case AR9287_DEVID_PCI:
case AR9287_DEVID_PCIE:
case AR2427_DEVID_PCIE:
case AR9300_DEVID_PCIE:
case AR9300_DEVID_AR9485_PCIE:
case AR9300_DEVID_AR9330:
case AR9300_DEVID_AR9340:
case AR9300_DEVID_AR9580:
case AR9300_DEVID_AR9462:
break;
default:
if (common->bus_ops->ath_bus_type == ATH_USB)
break;
ath_err(common, "Hardware device ID 0x%04x not supported\n",
ah->hw_version.devid);
return -EOPNOTSUPP;
}
ret = __ath9k_hw_init(ah);
if (ret) {
ath_err(common,
"Unable to initialize hardware; initialization status: %d\n",
ret);
return ret;
}
return 0;
}
EXPORT_SYMBOL(ath9k_hw_init);
static void ath9k_hw_init_qos(struct ath_hw *ah)
{
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_MIC_QOS_CONTROL, 0x100aa);
REG_WRITE(ah, AR_MIC_QOS_SELECT, 0x3210);
REG_WRITE(ah, AR_QOS_NO_ACK,
SM(2, AR_QOS_NO_ACK_TWO_BIT) |
SM(5, AR_QOS_NO_ACK_BIT_OFF) |
SM(0, AR_QOS_NO_ACK_BYTE_OFF));
REG_WRITE(ah, AR_TXOP_X, AR_TXOP_X_VAL);
REG_WRITE(ah, AR_TXOP_0_3, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_4_7, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_8_11, 0xFFFFFFFF);
REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
REGWRITE_BUFFER_FLUSH(ah);
}
u32 ar9003_get_pll_sqsum_dvc(struct ath_hw *ah)
{
REG_CLR_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
udelay(100);
REG_SET_BIT(ah, PLL3, PLL3_DO_MEAS_MASK);
while ((REG_READ(ah, PLL4) & PLL4_MEAS_DONE) == 0)
udelay(100);
return (REG_READ(ah, PLL3) & SQSUM_DVC_MASK) >> 3;
}
EXPORT_SYMBOL(ar9003_get_pll_sqsum_dvc);
static void ath9k_hw_init_pll(struct ath_hw *ah,
struct ath9k_channel *chan)
{
u32 pll;
if (AR_SREV_9485(ah)) {
/* program BB PLL ki and kd value, ki=0x4, kd=0x40 */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_PLL_PWD, 0x1);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_DPLL2_KD, 0x40);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_DPLL2_KI, 0x4);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
AR_CH0_BB_DPLL1_REFDIV, 0x5);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
AR_CH0_BB_DPLL1_NINI, 0x58);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL1,
AR_CH0_BB_DPLL1_NFRAC, 0x0);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_OUTDIV, 0x1);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_LOCAL_PLL, 0x1);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_EN_NEGTRIG, 0x1);
/* program BB PLL phase_shift to 0x6 */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x6);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2,
AR_CH0_BB_DPLL2_PLL_PWD, 0x0);
udelay(1000);
} else if (AR_SREV_9330(ah)) {
u32 ddr_dpll2, pll_control2, kd;
if (ah->is_clk_25mhz) {
ddr_dpll2 = 0x18e82f01;
pll_control2 = 0xe04a3d;
kd = 0x1d;
} else {
ddr_dpll2 = 0x19e82f01;
pll_control2 = 0x886666;
kd = 0x3d;
}
/* program DDR PLL ki and kd value */
REG_WRITE(ah, AR_CH0_DDR_DPLL2, ddr_dpll2);
/* program DDR PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_DDR_DPLL3,
AR_CH0_DPLL3_PHASE_SHIFT, 0x1);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
udelay(1000);
/* program refdiv, nint, frac to RTC register */
REG_WRITE(ah, AR_RTC_PLL_CONTROL2, pll_control2);
/* program BB PLL kd and ki value */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KD, kd);
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL2, AR_CH0_DPLL2_KI, 0x06);
/* program BB PLL phase_shift */
REG_RMW_FIELD(ah, AR_CH0_BB_DPLL3,
AR_CH0_BB_DPLL3_PHASE_SHIFT, 0x1);
} else if (AR_SREV_9340(ah)) {
u32 regval, pll2_divint, pll2_divfrac, refdiv;
REG_WRITE(ah, AR_RTC_PLL_CONTROL, 0x1142c);
udelay(1000);
REG_SET_BIT(ah, AR_PHY_PLL_MODE, 0x1 << 16);
udelay(100);
if (ah->is_clk_25mhz) {
pll2_divint = 0x54;
pll2_divfrac = 0x1eb85;
refdiv = 3;
} else {
pll2_divint = 88;
pll2_divfrac = 0;
refdiv = 5;
}
regval = REG_READ(ah, AR_PHY_PLL_MODE);
regval |= (0x1 << 16);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
udelay(100);
REG_WRITE(ah, AR_PHY_PLL_CONTROL, (refdiv << 27) |
(pll2_divint << 18) | pll2_divfrac);
udelay(100);
regval = REG_READ(ah, AR_PHY_PLL_MODE);
regval = (regval & 0x80071fff) | (0x1 << 30) | (0x1 << 13) |
(0x4 << 26) | (0x18 << 19);
REG_WRITE(ah, AR_PHY_PLL_MODE, regval);
REG_WRITE(ah, AR_PHY_PLL_MODE,
REG_READ(ah, AR_PHY_PLL_MODE) & 0xfffeffff);
udelay(1000);
}
pll = ath9k_hw_compute_pll_control(ah, chan);
REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
if (AR_SREV_9485(ah) || AR_SREV_9340(ah) || AR_SREV_9330(ah))
udelay(1000);
/* Switch the core clock for ar9271 to 117Mhz */
if (AR_SREV_9271(ah)) {
udelay(500);
REG_WRITE(ah, 0x50040, 0x304);
}
udelay(RTC_PLL_SETTLE_DELAY);
REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
if (AR_SREV_9340(ah)) {
if (ah->is_clk_25mhz) {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
} else {
REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
}
udelay(100);
}
}
static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
enum nl80211_iftype opmode)
{
u32 sync_default = AR_INTR_SYNC_DEFAULT;
u32 imr_reg = AR_IMR_TXERR |
AR_IMR_TXURN |
AR_IMR_RXERR |
AR_IMR_RXORN |
AR_IMR_BCNMISC;
if (AR_SREV_9340(ah))
sync_default &= ~AR_INTR_SYNC_HOST1_FATAL;
if (AR_SREV_9300_20_OR_LATER(ah)) {
imr_reg |= AR_IMR_RXOK_HP;
if (ah->config.rx_intr_mitigation)
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
imr_reg |= AR_IMR_RXOK_LP;
} else {
if (ah->config.rx_intr_mitigation)
imr_reg |= AR_IMR_RXINTM | AR_IMR_RXMINTR;
else
imr_reg |= AR_IMR_RXOK;
}
if (ah->config.tx_intr_mitigation)
imr_reg |= AR_IMR_TXINTM | AR_IMR_TXMINTR;
else
imr_reg |= AR_IMR_TXOK;
if (opmode == NL80211_IFTYPE_AP)
imr_reg |= AR_IMR_MIB;
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_IMR, imr_reg);
ah->imrs2_reg |= AR_IMR_S2_GTT;
REG_WRITE(ah, AR_IMR_S2, ah->imrs2_reg);
if (!AR_SREV_9100(ah)) {
REG_WRITE(ah, AR_INTR_SYNC_CAUSE, 0xFFFFFFFF);
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, sync_default);
REG_WRITE(ah, AR_INTR_SYNC_MASK, 0);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_INTR_PRIO_ASYNC_ENABLE, 0);
REG_WRITE(ah, AR_INTR_PRIO_ASYNC_MASK, 0);
REG_WRITE(ah, AR_INTR_PRIO_SYNC_ENABLE, 0);
REG_WRITE(ah, AR_INTR_PRIO_SYNC_MASK, 0);
}
}
static void ath9k_hw_set_sifs_time(struct ath_hw *ah, u32 us)
{
u32 val = ath9k_hw_mac_to_clks(ah, us - 2);
val = min(val, (u32) 0xFFFF);
REG_WRITE(ah, AR_D_GBL_IFS_SIFS, val);
}
static void ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
{
u32 val = ath9k_hw_mac_to_clks(ah, us);
val = min(val, (u32) 0xFFFF);
REG_WRITE(ah, AR_D_GBL_IFS_SLOT, val);
}
static void ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
{
u32 val = ath9k_hw_mac_to_clks(ah, us);
val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_ACK));
REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_ACK, val);
}
static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
{
u32 val = ath9k_hw_mac_to_clks(ah, us);
val = min(val, (u32) MS(0xFFFFFFFF, AR_TIME_OUT_CTS));
REG_RMW_FIELD(ah, AR_TIME_OUT, AR_TIME_OUT_CTS, val);
}
static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
{
if (tu > 0xFFFF) {
ath_dbg(ath9k_hw_common(ah), XMIT, "bad global tx timeout %u\n",
tu);
ah->globaltxtimeout = (u32) -1;
return false;
} else {
REG_RMW_FIELD(ah, AR_GTXTO, AR_GTXTO_TIMEOUT_LIMIT, tu);
ah->globaltxtimeout = tu;
return true;
}
}
void ath9k_hw_init_global_settings(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
const struct ath9k_channel *chan = ah->curchan;
int acktimeout, ctstimeout;
int slottime;
int sifstime;
int rx_lat = 0, tx_lat = 0, eifs = 0;
u32 reg;
ath_dbg(ath9k_hw_common(ah), RESET, "ah->misc_mode 0x%x\n",
ah->misc_mode);
if (!chan)
return;
if (ah->misc_mode != 0)
REG_SET_BIT(ah, AR_PCU_MISC, ah->misc_mode);
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
rx_lat = 41;
else
rx_lat = 37;
tx_lat = 54;
if (IS_CHAN_HALF_RATE(chan)) {
eifs = 175;
rx_lat *= 2;
tx_lat *= 2;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 11;
slottime = 13;
sifstime = 32;
} else if (IS_CHAN_QUARTER_RATE(chan)) {
eifs = 340;
rx_lat = (rx_lat * 4) - 1;
tx_lat *= 4;
if (IS_CHAN_A_FAST_CLOCK(ah, chan))
tx_lat += 22;
slottime = 21;
sifstime = 64;
} else {
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
eifs = AR_D_GBL_IFS_EIFS_ASYNC_FIFO;
reg = AR_USEC_ASYNC_FIFO;
} else {
eifs = REG_READ(ah, AR_D_GBL_IFS_EIFS)/
common->clockrate;
reg = REG_READ(ah, AR_USEC);
}
rx_lat = MS(reg, AR_USEC_RX_LAT);
tx_lat = MS(reg, AR_USEC_TX_LAT);
slottime = ah->slottime;
if (IS_CHAN_5GHZ(chan))
sifstime = 16;
else
sifstime = 10;
}
/* As defined by IEEE 802.11-2007 17.3.8.6 */
acktimeout = slottime + sifstime + 3 * ah->coverage_class;
ctstimeout = acktimeout;
/*
* Workaround for early ACK timeouts, add an offset to match the
* initval's 64us ack timeout value. Use 48us for the CTS timeout.
* This was initially only meant to work around an issue with delayed
* BA frames in some implementations, but it has been found to fix ACK
* timeout issues in other cases as well.
*/
if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) {
acktimeout += 64 - sifstime - ah->slottime;
ctstimeout += 48 - sifstime - ah->slottime;
}
ath9k_hw_set_sifs_time(ah, sifstime);
ath9k_hw_setslottime(ah, slottime);
ath9k_hw_set_ack_timeout(ah, acktimeout);
ath9k_hw_set_cts_timeout(ah, ctstimeout);
if (ah->globaltxtimeout != (u32) -1)
ath9k_hw_set_global_txtimeout(ah, ah->globaltxtimeout);
REG_WRITE(ah, AR_D_GBL_IFS_EIFS, ath9k_hw_mac_to_clks(ah, eifs));
REG_RMW(ah, AR_USEC,
(common->clockrate - 1) |
SM(rx_lat, AR_USEC_RX_LAT) |
SM(tx_lat, AR_USEC_TX_LAT),
AR_USEC_TX_LAT | AR_USEC_RX_LAT | AR_USEC_USEC);
}
EXPORT_SYMBOL(ath9k_hw_init_global_settings);
void ath9k_hw_deinit(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
if (common->state < ATH_HW_INITIALIZED)
goto free_hw;
ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
free_hw:
ath9k_hw_rf_free_ext_banks(ah);
}
EXPORT_SYMBOL(ath9k_hw_deinit);
/*******/
/* INI */
/*******/
u32 ath9k_regd_get_ctl(struct ath_regulatory *reg, struct ath9k_channel *chan)
{
u32 ctl = ath_regd_get_band_ctl(reg, chan->chan->band);
if (IS_CHAN_B(chan))
ctl |= CTL_11B;
else if (IS_CHAN_G(chan))
ctl |= CTL_11G;
else
ctl |= CTL_11A;
return ctl;
}
/****************************************/
/* Reset and Channel Switching Routines */
/****************************************/
static inline void ath9k_hw_set_dma(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
ENABLE_REGWRITE_BUFFER(ah);
/*
* set AHB_MODE not to do cacheline prefetches
*/
if (!AR_SREV_9300_20_OR_LATER(ah))
REG_SET_BIT(ah, AR_AHB_MODE, AR_AHB_PREFETCH_RD_EN);
/*
* let mac dma reads be in 128 byte chunks
*/
REG_RMW(ah, AR_TXCFG, AR_TXCFG_DMASZ_128B, AR_TXCFG_DMASZ_MASK);
REGWRITE_BUFFER_FLUSH(ah);
/*
* Restore TX Trigger Level to its pre-reset value.
* The initial value depends on whether aggregation is enabled, and is
* adjusted whenever underruns are detected.
*/
if (!AR_SREV_9300_20_OR_LATER(ah))
REG_RMW_FIELD(ah, AR_TXCFG, AR_FTRIG, ah->tx_trig_level);
ENABLE_REGWRITE_BUFFER(ah);
/*
* let mac dma writes be in 128 byte chunks
*/
REG_RMW(ah, AR_RXCFG, AR_RXCFG_DMASZ_128B, AR_RXCFG_DMASZ_MASK);
/*
* Setup receive FIFO threshold to hold off TX activities
*/
REG_WRITE(ah, AR_RXFIFO_CFG, 0x200);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_HP, 0x1);
REG_RMW_FIELD(ah, AR_RXBP_THRESH, AR_RXBP_THRESH_LP, 0x1);
ath9k_hw_set_rx_bufsize(ah, common->rx_bufsize -
ah->caps.rx_status_len);
}
/*
* reduce the number of usable entries in PCU TXBUF to avoid
* wrap around issues.
*/
if (AR_SREV_9285(ah)) {
/* For AR9285 the number of Fifos are reduced to half.
* So set the usable tx buf size also to half to
* avoid data/delimiter underruns
*/
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_9285_PCU_TXBUF_CTRL_USABLE_SIZE);
} else if (!AR_SREV_9271(ah)) {
REG_WRITE(ah, AR_PCU_TXBUF_CTRL,
AR_PCU_TXBUF_CTRL_USABLE_SIZE);
}
REGWRITE_BUFFER_FLUSH(ah);
if (AR_SREV_9300_20_OR_LATER(ah))
ath9k_hw_reset_txstatus_ring(ah);
}
static void ath9k_hw_set_operating_mode(struct ath_hw *ah, int opmode)
{
u32 mask = AR_STA_ID1_STA_AP | AR_STA_ID1_ADHOC;
u32 set = AR_STA_ID1_KSRCH_MODE;
switch (opmode) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
set |= AR_STA_ID1_ADHOC;
REG_SET_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
case NL80211_IFTYPE_AP:
set |= AR_STA_ID1_STA_AP;
/* fall through */
case NL80211_IFTYPE_STATION:
REG_CLR_BIT(ah, AR_CFG, AR_CFG_AP_ADHOC_INDICATION);
break;
default:
if (!ah->is_monitoring)
set = 0;
break;
}
REG_RMW(ah, AR_STA_ID1, set, mask);
}
void ath9k_hw_get_delta_slope_vals(struct ath_hw *ah, u32 coef_scaled,
u32 *coef_mantissa, u32 *coef_exponent)
{
u32 coef_exp, coef_man;
for (coef_exp = 31; coef_exp > 0; coef_exp--)
if ((coef_scaled >> coef_exp) & 0x1)
break;
coef_exp = 14 - (coef_exp - COEF_SCALE_S);
coef_man = coef_scaled + (1 << (COEF_SCALE_S - coef_exp - 1));
*coef_mantissa = coef_man >> (COEF_SCALE_S - coef_exp);
*coef_exponent = coef_exp - 16;
}
static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
{
u32 rst_flags;
u32 tmpReg;
if (AR_SREV_9100(ah)) {
REG_RMW_FIELD(ah, AR_RTC_DERIVED_CLK,
AR_RTC_DERIVED_CLK_PERIOD, 1);
(void)REG_READ(ah, AR_RTC_DERIVED_CLK);
}
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
udelay(10);
}
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (AR_SREV_9100(ah)) {
rst_flags = AR_RTC_RC_MAC_WARM | AR_RTC_RC_MAC_COLD |
AR_RTC_RC_COLD_RESET | AR_RTC_RC_WARM_RESET;
} else {
tmpReg = REG_READ(ah, AR_INTR_SYNC_CAUSE);
if (tmpReg &
(AR_INTR_SYNC_LOCAL_TIMEOUT |
AR_INTR_SYNC_RADM_CPL_TIMEOUT)) {
u32 val;
REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
val = AR_RC_HOSTIF;
if (!AR_SREV_9300_20_OR_LATER(ah))
val |= AR_RC_AHB;
REG_WRITE(ah, AR_RC, val);
} else if (!AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
rst_flags = AR_RTC_RC_MAC_WARM;
if (type == ATH9K_RESET_COLD)
rst_flags |= AR_RTC_RC_MAC_COLD;
}
if (AR_SREV_9330(ah)) {
int npend = 0;
int i;
/* AR9330 WAR:
* call external reset function to reset WMAC if:
* - doing a cold reset
* - we have pending frames in the TX queues
*/
for (i = 0; i < AR_NUM_QCU; i++) {
npend = ath9k_hw_numtxpending(ah, i);
if (npend)
break;
}
if (ah->external_reset &&
(npend || type == ATH9K_RESET_COLD)) {
int reset_err = 0;
ath_dbg(ath9k_hw_common(ah), RESET,
"reset MAC via external reset\n");
reset_err = ah->external_reset();
if (reset_err) {
ath_err(ath9k_hw_common(ah),
"External reset failed, err=%d\n",
reset_err);
return false;
}
REG_WRITE(ah, AR_RTC_RESET, 1);
}
}
REG_WRITE(ah, AR_RTC_RC, rst_flags);
REGWRITE_BUFFER_FLUSH(ah);
udelay(50);
REG_WRITE(ah, AR_RTC_RC, 0);
if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC stuck in MAC reset\n");
return false;
}
if (!AR_SREV_9100(ah))
REG_WRITE(ah, AR_RC, 0);
if (AR_SREV_9100(ah))
udelay(50);
return true;
}
static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
{
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
udelay(10);
}
REG_WRITE(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN |
AR_RTC_FORCE_WAKE_ON_INT);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB);
REG_WRITE(ah, AR_RTC_RESET, 0);
REGWRITE_BUFFER_FLUSH(ah);
if (!AR_SREV_9300_20_OR_LATER(ah))
udelay(2);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, 0);
REG_WRITE(ah, AR_RTC_RESET, 1);
if (!ath9k_hw_wait(ah,
AR_RTC_STATUS,
AR_RTC_STATUS_M,
AR_RTC_STATUS_ON,
AH_WAIT_TIMEOUT)) {
ath_dbg(ath9k_hw_common(ah), RESET, "RTC not waking up\n");
return false;
}
return ath9k_hw_set_reset(ah, ATH9K_RESET_WARM);
}
static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
{
bool ret = false;
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
udelay(10);
}
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN | AR_RTC_FORCE_WAKE_ON_INT);
switch (type) {
case ATH9K_RESET_POWER_ON:
ret = ath9k_hw_set_reset_power_on(ah);
break;
case ATH9K_RESET_WARM:
case ATH9K_RESET_COLD:
ret = ath9k_hw_set_reset(ah, type);
break;
default:
break;
}
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
return ret;
}
static bool ath9k_hw_chip_reset(struct ath_hw *ah,
struct ath9k_channel *chan)
{
int reset_type = ATH9K_RESET_WARM;
if (AR_SREV_9280(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL))
reset_type = ATH9K_RESET_POWER_ON;
else
reset_type = ATH9K_RESET_COLD;
}
if (!ath9k_hw_set_reset_reg(ah, reset_type))
return false;
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return false;
ah->chip_fullsleep = false;
ath9k_hw_init_pll(ah, chan);
ath9k_hw_set_rfmode(ah, chan);
return true;
}
static bool ath9k_hw_channel_change(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 qnum;
int r;
bool edma = !!(ah->caps.hw_caps & ATH9K_HW_CAP_EDMA);
bool band_switch, mode_diff;
u8 ini_reloaded;
band_switch = (chan->channelFlags & (CHANNEL_2GHZ | CHANNEL_5GHZ)) !=
(ah->curchan->channelFlags & (CHANNEL_2GHZ |
CHANNEL_5GHZ));
mode_diff = (chan->chanmode != ah->curchan->chanmode);
for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
if (ath9k_hw_numtxpending(ah, qnum)) {
ath_dbg(common, QUEUE,
"Transmit frames pending on queue %d\n", qnum);
return false;
}
}
if (!ath9k_hw_rfbus_req(ah)) {
ath_err(common, "Could not kill baseband RX\n");
return false;
}
if (edma && (band_switch || mode_diff)) {
ath9k_hw_mark_phy_inactive(ah);
udelay(5);
ath9k_hw_init_pll(ah, NULL);
if (ath9k_hw_fast_chan_change(ah, chan, &ini_reloaded)) {
ath_err(common, "Failed to do fast channel change\n");
return false;
}
}
ath9k_hw_set_channel_regs(ah, chan);
r = ath9k_hw_rf_set_freq(ah, chan);
if (r) {
ath_err(common, "Failed to set channel\n");
return false;
}
ath9k_hw_set_clockrate(ah);
ath9k_hw_apply_txpower(ah, chan, false);
ath9k_hw_rfbus_done(ah);
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
if (edma && (band_switch || mode_diff)) {
ah->ah_flags |= AH_FASTCC;
if (band_switch || ini_reloaded)
ah->eep_ops->set_board_values(ah, chan);
ath9k_hw_init_bb(ah, chan);
if (band_switch || ini_reloaded)
ath9k_hw_init_cal(ah, chan);
ah->ah_flags &= ~AH_FASTCC;
}
return true;
}
static void ath9k_hw_apply_gpio_override(struct ath_hw *ah)
{
u32 gpio_mask = ah->gpio_mask;
int i;
for (i = 0; gpio_mask; i++, gpio_mask >>= 1) {
if (!(gpio_mask & 1))
continue;
ath9k_hw_cfg_output(ah, i, AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
ath9k_hw_set_gpio(ah, i, !!(ah->gpio_val & BIT(i)));
}
}
bool ath9k_hw_check_alive(struct ath_hw *ah)
{
int count = 50;
u32 reg;
if (AR_SREV_9285_12_OR_LATER(ah))
return true;
do {
reg = REG_READ(ah, AR_OBS_BUS_1);
if ((reg & 0x7E7FFFEF) == 0x00702400)
continue;
switch (reg & 0x7E000B00) {
case 0x1E000000:
case 0x52000B00:
case 0x18000B00:
continue;
default:
return true;
}
} while (count-- > 0);
return false;
}
EXPORT_SYMBOL(ath9k_hw_check_alive);
/*
* Fast channel change:
* (Change synthesizer based on channel freq without resetting chip)
*
* Don't do FCC when
* - Flag is not set
* - Chip is just coming out of full sleep
* - Channel to be set is same as current channel
* - Channel flags are different, (eg.,moving from 2GHz to 5GHz channel)
*/
static int ath9k_hw_do_fastcc(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
int ret;
if (AR_SREV_9280(ah) && common->bus_ops->ath_bus_type == ATH_PCI)
goto fail;
if (ah->chip_fullsleep)
goto fail;
if (!ah->curchan)
goto fail;
if (chan->channel == ah->curchan->channel)
goto fail;
if ((chan->channelFlags & CHANNEL_ALL) !=
(ah->curchan->channelFlags & CHANNEL_ALL))
goto fail;
if (!ath9k_hw_check_alive(ah))
goto fail;
/*
* For AR9462, make sure that calibration data for
* re-using are present.
*/
if (AR_SREV_9462(ah) && (!ah->caldata ||
!ah->caldata->done_txiqcal_once ||
!ah->caldata->done_txclcal_once ||
!ah->caldata->rtt_hist.num_readings))
goto fail;
ath_dbg(common, RESET, "FastChannelChange for %d -> %d\n",
ah->curchan->channel, chan->channel);
ret = ath9k_hw_channel_change(ah, chan);
if (!ret)
goto fail;
ath9k_hw_loadnf(ah, ah->curchan);
ath9k_hw_start_nfcal(ah, true);
if ((ah->caps.hw_caps & ATH9K_HW_CAP_MCI) && ar9003_mci_is_ready(ah))
ar9003_mci_2g5g_switch(ah, true);
if (AR_SREV_9271(ah))
ar9002_hw_load_ani_reg(ah, chan);
return 0;
fail:
return -EINVAL;
}
int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
struct ath9k_hw_cal_data *caldata, bool fastcc)
{
struct ath_common *common = ath9k_hw_common(ah);
u32 saveLedState;
u32 saveDefAntenna;
u32 macStaId1;
u64 tsf = 0;
int i, r;
bool start_mci_reset = false;
bool mci = !!(ah->caps.hw_caps & ATH9K_HW_CAP_MCI);
bool save_fullsleep = ah->chip_fullsleep;
if (mci) {
start_mci_reset = ar9003_mci_start_reset(ah, chan);
if (start_mci_reset)
return 0;
}
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return -EIO;
if (ah->curchan && !ah->chip_fullsleep)
ath9k_hw_getnf(ah, ah->curchan);
ah->caldata = caldata;
if (caldata &&
(chan->channel != caldata->channel ||
(chan->channelFlags & ~CHANNEL_CW_INT) !=
(caldata->channelFlags & ~CHANNEL_CW_INT))) {
/* Operating channel changed, reset channel calibration data */
memset(caldata, 0, sizeof(*caldata));
ath9k_init_nfcal_hist_buffer(ah, chan);
}
ah->noise = ath9k_hw_getchan_noise(ah, chan);
if (fastcc) {
r = ath9k_hw_do_fastcc(ah, chan);
if (!r)
return r;
}
if (mci)
ar9003_mci_stop_bt(ah, save_fullsleep);
saveDefAntenna = REG_READ(ah, AR_DEF_ANTENNA);
if (saveDefAntenna == 0)
saveDefAntenna = 1;
macStaId1 = REG_READ(ah, AR_STA_ID1) & AR_STA_ID1_BASE_RATE_11B;
/* For chips on which RTC reset is done, save TSF before it gets cleared */
if (AR_SREV_9100(ah) ||
(AR_SREV_9280(ah) && ah->eep_ops->get_eeprom(ah, EEP_OL_PWRCTRL)))
tsf = ath9k_hw_gettsf64(ah);
saveLedState = REG_READ(ah, AR_CFG_LED) &
(AR_CFG_LED_ASSOC_CTL | AR_CFG_LED_MODE_SEL |
AR_CFG_LED_BLINK_THRESH_SEL | AR_CFG_LED_BLINK_SLOW);
ath9k_hw_mark_phy_inactive(ah);
ah->paprd_table_write_done = false;
/* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
AR9271_RADIO_RF_RST);
udelay(50);
}
if (!ath9k_hw_chip_reset(ah, chan)) {
ath_err(common, "Chip reset failed\n");
return -EINVAL;
}
/* Only required on the first reset */
if (AR_SREV_9271(ah) && ah->htc_reset_init) {
ah->htc_reset_init = false;
REG_WRITE(ah,
AR9271_RESET_POWER_DOWN_CONTROL,
AR9271_GATE_MAC_CTL);
udelay(50);
}
/* Restore TSF */
if (tsf)
ath9k_hw_settsf64(ah, tsf);
if (AR_SREV_9280_20_OR_LATER(ah))
REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, AR_GPIO_JTAG_DISABLE);
if (!AR_SREV_9300_20_OR_LATER(ah))
ar9002_hw_enable_async_fifo(ah);
r = ath9k_hw_process_ini(ah, chan);
if (r)
return r;
if (mci)
ar9003_mci_reset(ah, false, IS_CHAN_2GHZ(chan), save_fullsleep);
/*
* Some AR91xx SoC devices frequently fail to accept TSF writes
* right after the chip reset. When that happens, write a new
* value after the initvals have been applied, with an offset
* based on measured time difference
*/
if (AR_SREV_9100(ah) && (ath9k_hw_gettsf64(ah) < tsf)) {
tsf += 1500;
ath9k_hw_settsf64(ah, tsf);
}
/* Setup MFP options for CCMP */
if (AR_SREV_9280_20_OR_LATER(ah)) {
/* Mask Retry(b11), PwrMgt(b12), MoreData(b13) to 0 in mgmt
* frames when constructing CCMP AAD. */
REG_RMW_FIELD(ah, AR_AES_MUTE_MASK1, AR_AES_MUTE_MASK1_FC_MGMT,
0xc7ff);
ah->sw_mgmt_crypto = false;
} else if (AR_SREV_9160_10_OR_LATER(ah)) {
/* Disable hardware crypto for management frames */
REG_CLR_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_MGMT_CRYPTO_ENABLE);
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_NO_CRYPTO_FOR_NON_DATA_PKT);
ah->sw_mgmt_crypto = true;
} else
ah->sw_mgmt_crypto = true;
if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
ath9k_hw_set_delta_slope(ah, chan);
ath9k_hw_spur_mitigate_freq(ah, chan);
ah->eep_ops->set_board_values(ah, chan);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
| macStaId1
| AR_STA_ID1_RTS_USE_DEF
| (ah->config.
ack_6mb ? AR_STA_ID1_ACKCTS_6MB : 0)
| ah->sta_id1_defaults);
ath_hw_setbssidmask(common);
REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
ath9k_hw_write_associd(ah);
REG_WRITE(ah, AR_ISR, ~0);
REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
REGWRITE_BUFFER_FLUSH(ah);
ath9k_hw_set_operating_mode(ah, ah->opmode);
r = ath9k_hw_rf_set_freq(ah, chan);
if (r)
return r;
ath9k_hw_set_clockrate(ah);
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < AR_NUM_DCU; i++)
REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
REGWRITE_BUFFER_FLUSH(ah);
ah->intr_txqs = 0;
for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
ath9k_hw_resettxqueue(ah, i);
ath9k_hw_init_interrupt_masks(ah, ah->opmode);
ath9k_hw_ani_cache_ini_regs(ah);
ath9k_hw_init_qos(ah);
if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
ath9k_hw_init_global_settings(ah);
if (AR_SREV_9287(ah) && AR_SREV_9287_13_OR_LATER(ah)) {
REG_SET_BIT(ah, AR_MAC_PCU_LOGIC_ANALYZER,
AR_MAC_PCU_LOGIC_ANALYZER_DISBUG20768);
REG_RMW_FIELD(ah, AR_AHB_MODE, AR_AHB_CUSTOM_BURST_EN,
AR_AHB_CUSTOM_BURST_ASYNC_FIFO_VAL);
REG_SET_BIT(ah, AR_PCU_MISC_MODE2,
AR_PCU_MISC_MODE2_ENABLE_AGGWEP);
}
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PRESERVE_SEQNUM);
ath9k_hw_set_dma(ah);
REG_WRITE(ah, AR_OBS, 8);
if (ah->config.rx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_LAST, 500);
REG_RMW_FIELD(ah, AR_RIMT, AR_RIMT_FIRST, 2000);
}
if (ah->config.tx_intr_mitigation) {
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_LAST, 300);
REG_RMW_FIELD(ah, AR_TIMT, AR_TIMT_FIRST, 750);
}
ath9k_hw_init_bb(ah, chan);
if (caldata) {
caldata->done_txiqcal_once = false;
caldata->done_txclcal_once = false;
caldata->rtt_hist.num_readings = 0;
}
if (!ath9k_hw_init_cal(ah, chan))
return -EIO;
ath9k_hw_loadnf(ah, chan);
ath9k_hw_start_nfcal(ah, true);
if (mci && ar9003_mci_end_reset(ah, chan, caldata))
return -EIO;
ENABLE_REGWRITE_BUFFER(ah);
ath9k_hw_restore_chainmask(ah);
REG_WRITE(ah, AR_CFG_LED, saveLedState | AR_CFG_SCLK_32KHZ);
REGWRITE_BUFFER_FLUSH(ah);
/*
* For big endian systems turn on swapping for descriptors
*/
if (AR_SREV_9100(ah)) {
u32 mask;
mask = REG_READ(ah, AR_CFG);
if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
ath_dbg(common, RESET, "CFG Byte Swap Set 0x%x\n",
mask);
} else {
mask =
INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
REG_WRITE(ah, AR_CFG, mask);
ath_dbg(common, RESET, "Setting CFG 0x%x\n",
REG_READ(ah, AR_CFG));
}
} else {
if (common->bus_ops->ath_bus_type == ATH_USB) {
/* Configure AR9271 target WLAN */
if (AR_SREV_9271(ah))
REG_WRITE(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
}
#ifdef __BIG_ENDIAN
else if (AR_SREV_9330(ah) || AR_SREV_9340(ah))
REG_RMW(ah, AR_CFG, AR_CFG_SWRB | AR_CFG_SWTB, 0);
else
REG_WRITE(ah, AR_CFG, AR_CFG_SWTD | AR_CFG_SWRD);
#endif
}
if (ath9k_hw_btcoex_is_enabled(ah))
ath9k_hw_btcoex_enable(ah);
if (mci)
ar9003_mci_check_bt(ah);
if (AR_SREV_9300_20_OR_LATER(ah)) {
ar9003_hw_bb_watchdog_config(ah);
ar9003_hw_disable_phy_restart(ah);
}
ath9k_hw_apply_gpio_override(ah);
return 0;
}
EXPORT_SYMBOL(ath9k_hw_reset);
/******************************/
/* Power Management (Chipset) */
/******************************/
/*
* Notify Power Mgt is disabled in self-generated frames.
* If requested, force chip to sleep.
*/
static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
{
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
if (AR_SREV_9462(ah)) {
REG_WRITE(ah, AR_TIMER_MODE,
REG_READ(ah, AR_TIMER_MODE) & 0xFFFFFF00);
REG_WRITE(ah, AR_NDP2_TIMER_MODE, REG_READ(ah,
AR_NDP2_TIMER_MODE) & 0xFFFFFF00);
REG_WRITE(ah, AR_SLP32_INC,
REG_READ(ah, AR_SLP32_INC) & 0xFFF00000);
/* xxx Required for WLAN only case ? */
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, 0);
udelay(100);
}
/*
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE, AR_RTC_FORCE_WAKE_EN);
if (AR_SREV_9462(ah))
udelay(100);
if (!AR_SREV_9100(ah) && !AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
/* Shutdown chip. Active low */
if (!AR_SREV_5416(ah) && !AR_SREV_9271(ah)) {
REG_CLR_BIT(ah, AR_RTC_RESET, AR_RTC_RESET_EN);
udelay(2);
}
}
/* Clear Bit 14 of AR_WA after putting chip into Full Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
/*
* Notify Power Management is enabled in self-generating
* frames. If request, set power mode of chip to
* auto/normal. Duration in units of 128us (1/8 TU).
*/
static void ath9k_set_power_network_sleep(struct ath_hw *ah, int setChip)
{
u32 val;
REG_SET_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
if (setChip) {
struct ath9k_hw_capabilities *pCap = &ah->caps;
if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
/* Set WakeOnInterrupt bit; clear ForceWake bit */
REG_WRITE(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_ON_INT);
} else {
/* When chip goes into network sleep, it could be waken
* up by MCI_INT interrupt caused by BT's HW messages
* (LNA_xxx, CONT_xxx) which chould be in a very fast
* rate (~100us). This will cause chip to leave and
* re-enter network sleep mode frequently, which in
* consequence will have WLAN MCI HW to generate lots of
* SYS_WAKING and SYS_SLEEPING messages which will make
* BT CPU to busy to process.
*/
if (AR_SREV_9462(ah)) {
val = REG_READ(ah, AR_MCI_INTERRUPT_RX_MSG_EN) &
~AR_MCI_INTERRUPT_RX_HW_MSG_MASK;
REG_WRITE(ah, AR_MCI_INTERRUPT_RX_MSG_EN, val);
}
/*
* Clear the RTC force wake bit to allow the
* mac to go to sleep.
*/
REG_CLR_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
if (AR_SREV_9462(ah))
udelay(30);
}
}
/* Clear Bit 14 of AR_WA after putting chip into Net Sleep mode. */
if (AR_SREV_9300_20_OR_LATER(ah))
REG_WRITE(ah, AR_WA, ah->WARegVal & ~AR_WA_D3_L1_DISABLE);
}
static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
{
u32 val;
int i;
/* Set Bits 14 and 17 of AR_WA before powering on the chip. */
if (AR_SREV_9300_20_OR_LATER(ah)) {
REG_WRITE(ah, AR_WA, ah->WARegVal);
udelay(10);
}
if (setChip) {
if ((REG_READ(ah, AR_RTC_STATUS) &
AR_RTC_STATUS_M) == AR_RTC_STATUS_SHUTDOWN) {
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
return false;
}
if (!AR_SREV_9300_20_OR_LATER(ah))
ath9k_hw_init_pll(ah, NULL);
}
if (AR_SREV_9100(ah))
REG_SET_BIT(ah, AR_RTC_RESET,
AR_RTC_RESET_EN);
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
udelay(50);
for (i = POWER_UP_TIME / 50; i > 0; i--) {
val = REG_READ(ah, AR_RTC_STATUS) & AR_RTC_STATUS_M;
if (val == AR_RTC_STATUS_ON)
break;
udelay(50);
REG_SET_BIT(ah, AR_RTC_FORCE_WAKE,
AR_RTC_FORCE_WAKE_EN);
}
if (i == 0) {
ath_err(ath9k_hw_common(ah),
"Failed to wakeup in %uus\n",
POWER_UP_TIME / 20);
return false;
}
}
REG_CLR_BIT(ah, AR_STA_ID1, AR_STA_ID1_PWR_SAV);
return true;
}
bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
{
struct ath_common *common = ath9k_hw_common(ah);
int status = true, setChip = true;
static const char *modes[] = {
"AWAKE",
"FULL-SLEEP",
"NETWORK SLEEP",
"UNDEFINED"
};
if (ah->power_mode == mode)
return status;
ath_dbg(common, RESET, "%s -> %s\n",
modes[ah->power_mode], modes[mode]);
switch (mode) {
case ATH9K_PM_AWAKE:
status = ath9k_hw_set_power_awake(ah, setChip);
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
break;
case ATH9K_PM_FULL_SLEEP:
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
ar9003_mci_set_full_sleep(ah);
ath9k_set_power_sleep(ah, setChip);
ah->chip_fullsleep = true;
break;
case ATH9K_PM_NETWORK_SLEEP:
if (ah->caps.hw_caps & ATH9K_HW_CAP_MCI)
REG_WRITE(ah, AR_RTC_KEEP_AWAKE, 0x2);
ath9k_set_power_network_sleep(ah, setChip);
break;
default:
ath_err(common, "Unknown power mode %u\n", mode);
return false;
}
ah->power_mode = mode;
/*
* XXX: If this warning never comes up after a while then
* simply keep the ATH_DBG_WARN_ON_ONCE() but make
* ath9k_hw_setpower() return type void.
*/
if (!(ah->ah_flags & AH_UNPLUGGED))
ATH_DBG_WARN_ON_ONCE(!status);
return status;
}
EXPORT_SYMBOL(ath9k_hw_setpower);
/*******************/
/* Beacon Handling */
/*******************/
void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
{
int flags = 0;
ENABLE_REGWRITE_BUFFER(ah);
switch (ah->opmode) {
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
REG_SET_BIT(ah, AR_TXCFG,
AR_TXCFG_ADHOC_BEACON_ATIM_TX_POLICY);
REG_WRITE(ah, AR_NEXT_NDP_TIMER, next_beacon +
TU_TO_USEC(ah->atim_window ? ah->atim_window : 1));
flags |= AR_NDP_TIMER_EN;
case NL80211_IFTYPE_AP:
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, next_beacon);
REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, next_beacon -
TU_TO_USEC(ah->config.dma_beacon_response_time));
REG_WRITE(ah, AR_NEXT_SWBA, next_beacon -
TU_TO_USEC(ah->config.sw_beacon_response_time));
flags |=
AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
break;
default:
ath_dbg(ath9k_hw_common(ah), BEACON,
"%s: unsupported opmode: %d\n", __func__, ah->opmode);
return;
break;
}
REG_WRITE(ah, AR_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_DMA_BEACON_PERIOD, beacon_period);
REG_WRITE(ah, AR_SWBA_PERIOD, beacon_period);
REG_WRITE(ah, AR_NDP_PERIOD, beacon_period);
REGWRITE_BUFFER_FLUSH(ah);
REG_SET_BIT(ah, AR_TIMER_MODE, flags);
}
EXPORT_SYMBOL(ath9k_hw_beaconinit);
void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
const struct ath9k_beacon_state *bs)
{
u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_common *common = ath9k_hw_common(ah);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
REG_WRITE(ah, AR_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval));
REG_WRITE(ah, AR_DMA_BEACON_PERIOD,
TU_TO_USEC(bs->bs_intval));
REGWRITE_BUFFER_FLUSH(ah);
REG_RMW_FIELD(ah, AR_RSSI_THR,
AR_RSSI_THR_BM_THR, bs->bs_bmissthreshold);
beaconintval = bs->bs_intval;
if (bs->bs_sleepduration > beaconintval)
beaconintval = bs->bs_sleepduration;
dtimperiod = bs->bs_dtimperiod;
if (bs->bs_sleepduration > dtimperiod)
dtimperiod = bs->bs_sleepduration;
if (beaconintval == dtimperiod)
nextTbtt = bs->bs_nextdtim;
else
nextTbtt = bs->bs_nexttbtt;
ath_dbg(common, BEACON, "next DTIM %d\n", bs->bs_nextdtim);
ath_dbg(common, BEACON, "next beacon %d\n", nextTbtt);
ath_dbg(common, BEACON, "beacon period %d\n", beaconintval);
ath_dbg(common, BEACON, "DTIM period %d\n", dtimperiod);
ENABLE_REGWRITE_BUFFER(ah);
REG_WRITE(ah, AR_NEXT_DTIM,
TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
REG_WRITE(ah, AR_NEXT_TIM, TU_TO_USEC(nextTbtt - SLEEP_SLOP));
REG_WRITE(ah, AR_SLEEP1,
SM((CAB_TIMEOUT_VAL << 3), AR_SLEEP1_CAB_TIMEOUT)
| AR_SLEEP1_ASSUME_DTIM);
if (pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)
beacontimeout = (BEACON_TIMEOUT_VAL << 3);
else
beacontimeout = MIN_BEACON_TIMEOUT_VAL;
REG_WRITE(ah, AR_SLEEP2,
SM(beacontimeout, AR_SLEEP2_BEACON_TIMEOUT));
REG_WRITE(ah, AR_TIM_PERIOD, TU_TO_USEC(beaconintval));
REG_WRITE(ah, AR_DTIM_PERIOD, TU_TO_USEC(dtimperiod));
REGWRITE_BUFFER_FLUSH(ah);
REG_SET_BIT(ah, AR_TIMER_MODE,
AR_TBTT_TIMER_EN | AR_TIM_TIMER_EN |
AR_DTIM_TIMER_EN);
/* TSF Out of Range Threshold */
REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
}
EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
/*******************/
/* HW Capabilities */
/*******************/
static u8 fixup_chainmask(u8 chip_chainmask, u8 eeprom_chainmask)
{
eeprom_chainmask &= chip_chainmask;
if (eeprom_chainmask)
return eeprom_chainmask;
else
return chip_chainmask;
}
/**
* ath9k_hw_dfs_tested - checks if DFS has been tested with used chipset
* @ah: the atheros hardware data structure
*
* We enable DFS support upstream on chipsets which have passed a series
* of tests. The testing requirements are going to be documented. Desired
* test requirements are documented at:
*
* http://wireless.kernel.org/en/users/Drivers/ath9k/dfs
*
* Once a new chipset gets properly tested an individual commit can be used
* to document the testing for DFS for that chipset.
*/
static bool ath9k_hw_dfs_tested(struct ath_hw *ah)
{
switch (ah->hw_version.macVersion) {
/* AR9580 will likely be our first target to get testing on */
case AR_SREV_VERSION_9580:
default:
return false;
}
}
int ath9k_hw_fill_cap_info(struct ath_hw *ah)
{
struct ath9k_hw_capabilities *pCap = &ah->caps;
struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
struct ath_common *common = ath9k_hw_common(ah);
unsigned int chip_chainmask;
u16 eeval;
u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
regulatory->current_rd = eeval;
if (ah->opmode != NL80211_IFTYPE_AP &&
ah->hw_version.subvendorid == AR_SUBVENDOR_ID_NEW_A) {
if (regulatory->current_rd == 0x64 ||
regulatory->current_rd == 0x65)
regulatory->current_rd += 5;
else if (regulatory->current_rd == 0x41)
regulatory->current_rd = 0x43;
ath_dbg(common, REGULATORY, "regdomain mapped to 0x%x\n",
regulatory->current_rd);
}
eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
ath_err(common,
"no band has been marked as supported in EEPROM\n");
return -EINVAL;
}
if (eeval & AR5416_OPFLAGS_11A)
pCap->hw_caps |= ATH9K_HW_CAP_5GHZ;
if (eeval & AR5416_OPFLAGS_11G)
pCap->hw_caps |= ATH9K_HW_CAP_2GHZ;
if (AR_SREV_9485(ah) || AR_SREV_9285(ah) || AR_SREV_9330(ah))
chip_chainmask = 1;
else if (AR_SREV_9462(ah))
chip_chainmask = 3;
else if (!AR_SREV_9280_20_OR_LATER(ah))
chip_chainmask = 7;
else if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9340(ah))
chip_chainmask = 3;
else
chip_chainmask = 7;
pCap->tx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_TX_MASK);
/*
* For AR9271 we will temporarilly uses the rx chainmax as read from
* the EEPROM.
*/
if ((ah->hw_version.devid == AR5416_DEVID_PCI) &&
!(eeval & AR5416_OPFLAGS_11A) &&
!(AR_SREV_9271(ah)))
/* CB71: GPIO 0 is pulled down to indicate 3 rx chains */
pCap->rx_chainmask = ath9k_hw_gpio_get(ah, 0) ? 0x5 : 0x7;
else if (AR_SREV_9100(ah))
pCap->rx_chainmask = 0x7;
else
/* Use rx_chainmask from EEPROM. */
pCap->rx_chainmask = ah->eep_ops->get_eeprom(ah, EEP_RX_MASK);
pCap->tx_chainmask = fixup_chainmask(chip_chainmask, pCap->tx_chainmask);
pCap->rx_chainmask = fixup_chainmask(chip_chainmask, pCap->rx_chainmask);
ah->txchainmask = pCap->tx_chainmask;
ah->rxchainmask = pCap->rx_chainmask;
ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
/* enable key search for every frame in an aggregate */
if (AR_SREV_9300_20_OR_LATER(ah))
ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
if (ah->hw_version.devid != AR2427_DEVID_PCIE)
pCap->hw_caps |= ATH9K_HW_CAP_HT;
else
pCap->hw_caps &= ~ATH9K_HW_CAP_HT;
if (AR_SREV_9271(ah))
pCap->num_gpio_pins = AR9271_NUM_GPIO;
else if (AR_DEVID_7010(ah))
pCap->num_gpio_pins = AR7010_NUM_GPIO;
else if (AR_SREV_9300_20_OR_LATER(ah))
pCap->num_gpio_pins = AR9300_NUM_GPIO;
else if (AR_SREV_9287_11_OR_LATER(ah))
pCap->num_gpio_pins = AR9287_NUM_GPIO;
else if (AR_SREV_9285_12_OR_LATER(ah))
pCap->num_gpio_pins = AR9285_NUM_GPIO;
else if (AR_SREV_9280_20_OR_LATER(ah))
pCap->num_gpio_pins = AR928X_NUM_GPIO;
else
pCap->num_gpio_pins = AR_NUM_GPIO;
if (AR_SREV_9160_10_OR_LATER(ah) || AR_SREV_9100(ah))
pCap->rts_aggr_limit = ATH_AMPDU_LIMIT_MAX;
else
pCap->rts_aggr_limit = (8 * 1024);
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
ah->rfsilent = ah->eep_ops->get_eeprom(ah, EEP_RF_SILENT);
if (ah->rfsilent & EEP_RFSILENT_ENABLED) {
ah->rfkill_gpio =
MS(ah->rfsilent, EEP_RFSILENT_GPIO_SEL);
ah->rfkill_polarity =
MS(ah->rfsilent, EEP_RFSILENT_POLARITY);
pCap->hw_caps |= ATH9K_HW_CAP_RFSILENT;
}
#endif
if (AR_SREV_9271(ah) || AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_AUTOSLEEP;
else
pCap->hw_caps &= ~ATH9K_HW_CAP_AUTOSLEEP;
if (AR_SREV_9280(ah) || AR_SREV_9285(ah))
pCap->hw_caps &= ~ATH9K_HW_CAP_4KB_SPLITTRANS;
else
pCap->hw_caps |= ATH9K_HW_CAP_4KB_SPLITTRANS;
if (AR_SREV_9300_20_OR_LATER(ah)) {
pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
if (!AR_SREV_9330(ah) && !AR_SREV_9485(ah))
pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
pCap->rx_status_len = sizeof(struct ar9003_rxs);
pCap->tx_desc_len = sizeof(struct ar9003_txc);
pCap->txs_len = sizeof(struct ar9003_txs);
if (!ah->config.paprd_disable &&
ah->eep_ops->get_eeprom(ah, EEP_PAPRD))
pCap->hw_caps |= ATH9K_HW_CAP_PAPRD;
} else {
pCap->tx_desc_len = sizeof(struct ath_desc);
if (AR_SREV_9280_20(ah))
pCap->hw_caps |= ATH9K_HW_CAP_FASTCLOCK;
}
if (AR_SREV_9300_20_OR_LATER(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
if (AR_SREV_9300_20_OR_LATER(ah))
ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
if (AR_SREV_9285(ah))
if (ah->eep_ops->get_eeprom(ah, EEP_MODAL_VER) >= 3) {
ant_div_ctl1 =
ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
}
if (AR_SREV_9300_20_OR_LATER(ah)) {
if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
pCap->hw_caps |= ATH9K_HW_CAP_APM;
}
if (AR_SREV_9330(ah) || AR_SREV_9485(ah)) {
ant_div_ctl1 = ah->eep_ops->get_eeprom(ah, EEP_ANT_DIV_CTL1);
/*
* enable the diversity-combining algorithm only when
* both enable_lna_div and enable_fast_div are set
* Table for Diversity
* ant_div_alt_lnaconf bit 0-1
* ant_div_main_lnaconf bit 2-3
* ant_div_alt_gaintb bit 4
* ant_div_main_gaintb bit 5
* enable_ant_div_lnadiv bit 6
* enable_ant_fast_div bit 7
*/
if ((ant_div_ctl1 >> 0x6) == 0x3)
pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
}
if (AR_SREV_9485_10(ah)) {
pCap->pcie_lcr_extsync_en = true;
pCap->pcie_lcr_offset = 0x80;
}
if (ath9k_hw_dfs_tested(ah))
pCap->hw_caps |= ATH9K_HW_CAP_DFS;
tx_chainmask = pCap->tx_chainmask;
rx_chainmask = pCap->rx_chainmask;
while (tx_chainmask || rx_chainmask) {
if (tx_chainmask & BIT(0))
pCap->max_txchains++;
if (rx_chainmask & BIT(0))
pCap->max_rxchains++;
tx_chainmask >>= 1;
rx_chainmask >>= 1;
}
if (AR_SREV_9300_20_OR_LATER(ah)) {
ah->enabled_cals |= TX_IQ_CAL;
if (AR_SREV_9485_OR_LATER(ah))
ah->enabled_cals |= TX_IQ_ON_AGC_CAL;
}
if (AR_SREV_9462(ah)) {
if (!(ah->ent_mode & AR_ENT_OTP_49GHZ_DISABLE))
pCap->hw_caps |= ATH9K_HW_CAP_MCI;
if (AR_SREV_9462_20(ah))
pCap->hw_caps |= ATH9K_HW_CAP_RTT;
}
return 0;
}
/****************************/
/* GPIO / RFKILL / Antennae */
/****************************/
static void ath9k_hw_gpio_cfg_output_mux(struct ath_hw *ah,
u32 gpio, u32 type)
{
int addr;
u32 gpio_shift, tmp;
if (gpio > 11)
addr = AR_GPIO_OUTPUT_MUX3;
else if (gpio > 5)
addr = AR_GPIO_OUTPUT_MUX2;
else
addr = AR_GPIO_OUTPUT_MUX1;
gpio_shift = (gpio % 6) * 5;
if (AR_SREV_9280_20_OR_LATER(ah)
|| (addr != AR_GPIO_OUTPUT_MUX1)) {
REG_RMW(ah, addr, (type << gpio_shift),
(0x1f << gpio_shift));
} else {
tmp = REG_READ(ah, addr);
tmp = ((tmp & 0x1F0) << 1) | (tmp & ~0x1F0);
tmp &= ~(0x1f << gpio_shift);
tmp |= (type << gpio_shift);
REG_WRITE(ah, addr, tmp);
}
}
void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
{
u32 gpio_shift;
BUG_ON(gpio >= ah->caps.num_gpio_pins);
if (AR_DEVID_7010(ah)) {
gpio_shift = gpio;
REG_RMW(ah, AR7010_GPIO_OE,
(AR7010_GPIO_OE_AS_INPUT << gpio_shift),
(AR7010_GPIO_OE_MASK << gpio_shift));
return;
}
gpio_shift = gpio << 1;
REG_RMW(ah,
AR_GPIO_OE_OUT,
(AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
(AR_GPIO_OE_OUT_DRV << gpio_shift));
}
EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
{
#define MS_REG_READ(x, y) \
(MS(REG_READ(ah, AR_GPIO_IN_OUT), x##_GPIO_IN_VAL) & (AR_GPIO_BIT(y)))
if (gpio >= ah->caps.num_gpio_pins)
return 0xffffffff;
if (AR_DEVID_7010(ah)) {
u32 val;
val = REG_READ(ah, AR7010_GPIO_IN);
return (MS(val, AR7010_GPIO_IN_VAL) & AR_GPIO_BIT(gpio)) == 0;
} else if (AR_SREV_9300_20_OR_LATER(ah))
return (MS(REG_READ(ah, AR_GPIO_IN), AR9300_GPIO_IN_VAL) &
AR_GPIO_BIT(gpio)) != 0;
else if (AR_SREV_9271(ah))
return MS_REG_READ(AR9271, gpio) != 0;
else if (AR_SREV_9287_11_OR_LATER(ah))
return MS_REG_READ(AR9287, gpio) != 0;
else if (AR_SREV_9285_12_OR_LATER(ah))
return MS_REG_READ(AR9285, gpio) != 0;
else if (AR_SREV_9280_20_OR_LATER(ah))
return MS_REG_READ(AR928X, gpio) != 0;
else
return MS_REG_READ(AR, gpio) != 0;
}
EXPORT_SYMBOL(ath9k_hw_gpio_get);
void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
u32 ah_signal_type)
{
u32 gpio_shift;
if (AR_DEVID_7010(ah)) {
gpio_shift = gpio;
REG_RMW(ah, AR7010_GPIO_OE,
(AR7010_GPIO_OE_AS_OUTPUT << gpio_shift),
(AR7010_GPIO_OE_MASK << gpio_shift));
return;
}
ath9k_hw_gpio_cfg_output_mux(ah, gpio, ah_signal_type);
gpio_shift = 2 * gpio;
REG_RMW(ah,
AR_GPIO_OE_OUT,
(AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
(AR_GPIO_OE_OUT_DRV << gpio_shift));
}
EXPORT_SYMBOL(ath9k_hw_cfg_output);
void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
{
if (AR_DEVID_7010(ah)) {
val = val ? 0 : 1;
REG_RMW(ah, AR7010_GPIO_OUT, ((val&1) << gpio),
AR_GPIO_BIT(gpio));
return;
}
if (AR_SREV_9271(ah))
val = ~val;
REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
AR_GPIO_BIT(gpio));
}
EXPORT_SYMBOL(ath9k_hw_set_gpio);
void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
{
REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
}
EXPORT_SYMBOL(ath9k_hw_setantenna);
/*********************/
/* General Operation */
/*********************/
u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
{
u32 bits = REG_READ(ah, AR_RX_FILTER);
u32 phybits = REG_READ(ah, AR_PHY_ERR);
if (phybits & AR_PHY_ERR_RADAR)
bits |= ATH9K_RX_FILTER_PHYRADAR;
if (phybits & (AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING))
bits |= ATH9K_RX_FILTER_PHYERR;
return bits;
}
EXPORT_SYMBOL(ath9k_hw_getrxfilter);
void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
{
u32 phybits;
ENABLE_REGWRITE_BUFFER(ah);
if (AR_SREV_9462(ah))
bits |= ATH9K_RX_FILTER_CONTROL_WRAPPER;
REG_WRITE(ah, AR_RX_FILTER, bits);
phybits = 0;
if (bits & ATH9K_RX_FILTER_PHYRADAR)
phybits |= AR_PHY_ERR_RADAR;
if (bits & ATH9K_RX_FILTER_PHYERR)
phybits |= AR_PHY_ERR_OFDM_TIMING | AR_PHY_ERR_CCK_TIMING;
REG_WRITE(ah, AR_PHY_ERR, phybits);
if (phybits)
REG_SET_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
else
REG_CLR_BIT(ah, AR_RXCFG, AR_RXCFG_ZLFDMA);
REGWRITE_BUFFER_FLUSH(ah);
}
EXPORT_SYMBOL(ath9k_hw_setrxfilter);
bool ath9k_hw_phy_disable(struct ath_hw *ah)
{
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
return false;
ath9k_hw_init_pll(ah, NULL);
ah->htc_reset_init = true;
return true;
}
EXPORT_SYMBOL(ath9k_hw_phy_disable);
bool ath9k_hw_disable(struct ath_hw *ah)
{
if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
return false;
if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
return false;
ath9k_hw_init_pll(ah, NULL);
return true;
}
EXPORT_SYMBOL(ath9k_hw_disable);
static int get_antenna_gain(struct ath_hw *ah, struct ath9k_channel *chan)
{
enum eeprom_param gain_param;
if (IS_CHAN_2GHZ(chan))
gain_param = EEP_ANTENNA_GAIN_2G;
else
gain_param = EEP_ANTENNA_GAIN_5G;
return ah->eep_ops->get_eeprom(ah, gain_param);
}
void ath9k_hw_apply_txpower(struct ath_hw *ah, struct ath9k_channel *chan,
bool test)
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ieee80211_channel *channel;
int chan_pwr, new_pwr, max_gain;
int ant_gain, ant_reduction = 0;
if (!chan)
return;
channel = chan->chan;
chan_pwr = min_t(int, channel->max_power * 2, MAX_RATE_POWER);
new_pwr = min_t(int, chan_pwr, reg->power_limit);
max_gain = chan_pwr - new_pwr + channel->max_antenna_gain * 2;
ant_gain = get_antenna_gain(ah, chan);
if (ant_gain > max_gain)
ant_reduction = ant_gain - max_gain;
ah->eep_ops->set_txpower(ah, chan,
ath9k_regd_get_ctl(reg, chan),
ant_reduction, new_pwr, test);
}
void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
{
struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
struct ath9k_channel *chan = ah->curchan;
struct ieee80211_channel *channel = chan->chan;
reg->power_limit = min_t(u32, limit, MAX_RATE_POWER);
if (test)
channel->max_power = MAX_RATE_POWER / 2;
ath9k_hw_apply_txpower(ah, chan, test);
if (test)
channel->max_power = DIV_ROUND_UP(reg->max_power_level, 2);
}
EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
void ath9k_hw_setopmode(struct ath_hw *ah)
{
ath9k_hw_set_operating_mode(ah, ah->opmode);
}
EXPORT_SYMBOL(ath9k_hw_setopmode);
void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
{
REG_WRITE(ah, AR_MCAST_FIL0, filter0);
REG_WRITE(ah, AR_MCAST_FIL1, filter1);
}
EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
void ath9k_hw_write_associd(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
}
EXPORT_SYMBOL(ath9k_hw_write_associd);
#define ATH9K_MAX_TSF_READ 10
u64 ath9k_hw_gettsf64(struct ath_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
tsf_upper1 = REG_READ(ah, AR_TSF_U32);
for (i = 0; i < ATH9K_MAX_TSF_READ; i++) {
tsf_lower = REG_READ(ah, AR_TSF_L32);
tsf_upper2 = REG_READ(ah, AR_TSF_U32);
if (tsf_upper2 == tsf_upper1)
break;
tsf_upper1 = tsf_upper2;
}
WARN_ON( i == ATH9K_MAX_TSF_READ );
return (((u64)tsf_upper1 << 32) | tsf_lower);
}
EXPORT_SYMBOL(ath9k_hw_gettsf64);
void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
{
REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
}
EXPORT_SYMBOL(ath9k_hw_settsf64);
void ath9k_hw_reset_tsf(struct ath_hw *ah)
{
if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
AH_TSF_WRITE_TIMEOUT))
ath_dbg(ath9k_hw_common(ah), RESET,
"AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
}
EXPORT_SYMBOL(ath9k_hw_reset_tsf);
void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
{
if (setting)
ah->misc_mode |= AR_PCU_TX_ADD_TSF;
else
ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
}
EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
void ath9k_hw_set11nmac2040(struct ath_hw *ah)
{
struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
u32 macmode;
if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
macmode = AR_2040_JOINED_RX_CLEAR;
else
macmode = 0;
REG_WRITE(ah, AR_2040_MODE, macmode);
}
/* HW Generic timers configuration */
static const struct ath_gen_timer_configuration gen_tmr_configuration[] =
{
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP_TIMER, AR_NDP_PERIOD, AR_TIMER_MODE, 0x0080},
{AR_NEXT_NDP2_TIMER, AR_NDP2_PERIOD, AR_NDP2_TIMER_MODE, 0x0001},
{AR_NEXT_NDP2_TIMER + 1*4, AR_NDP2_PERIOD + 1*4,
AR_NDP2_TIMER_MODE, 0x0002},
{AR_NEXT_NDP2_TIMER + 2*4, AR_NDP2_PERIOD + 2*4,
AR_NDP2_TIMER_MODE, 0x0004},
{AR_NEXT_NDP2_TIMER + 3*4, AR_NDP2_PERIOD + 3*4,
AR_NDP2_TIMER_MODE, 0x0008},
{AR_NEXT_NDP2_TIMER + 4*4, AR_NDP2_PERIOD + 4*4,
AR_NDP2_TIMER_MODE, 0x0010},
{AR_NEXT_NDP2_TIMER + 5*4, AR_NDP2_PERIOD + 5*4,
AR_NDP2_TIMER_MODE, 0x0020},
{AR_NEXT_NDP2_TIMER + 6*4, AR_NDP2_PERIOD + 6*4,
AR_NDP2_TIMER_MODE, 0x0040},
{AR_NEXT_NDP2_TIMER + 7*4, AR_NDP2_PERIOD + 7*4,
AR_NDP2_TIMER_MODE, 0x0080}
};
/* HW generic timer primitives */
/* compute and clear index of rightmost 1 */
static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
{
u32 b;
b = *mask;
b &= (0-b);
*mask &= ~b;
b *= debruijn32;
b >>= 27;
return timer_table->gen_timer_index[b];
}
u32 ath9k_hw_gettsf32(struct ath_hw *ah)
{
return REG_READ(ah, AR_TSF_L32);
}
EXPORT_SYMBOL(ath9k_hw_gettsf32);
struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
void (*trigger)(void *),
void (*overflow)(void *),
void *arg,
u8 timer_index)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
if (timer == NULL) {
ath_err(ath9k_hw_common(ah),
"Failed to allocate memory for hw timer[%d]\n",
timer_index);
return NULL;
}
/* allocate a hardware generic timer slot */
timer_table->timers[timer_index] = timer;
timer->index = timer_index;
timer->trigger = trigger;
timer->overflow = overflow;
timer->arg = arg;
return timer;
}
EXPORT_SYMBOL(ath_gen_timer_alloc);
void ath9k_hw_gen_timer_start(struct ath_hw *ah,
struct ath_gen_timer *timer,
u32 trig_timeout,
u32 timer_period)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
u32 tsf, timer_next;
BUG_ON(!timer_period);
set_bit(timer->index, &timer_table->timer_mask.timer_bits);
tsf = ath9k_hw_gettsf32(ah);
timer_next = tsf + trig_timeout;
ath_dbg(ath9k_hw_common(ah), HWTIMER,
"current tsf %x period %x timer_next %x\n",
tsf, timer_period, timer_next);
/*
* Program generic timer registers
*/
REG_WRITE(ah, gen_tmr_configuration[timer->index].next_addr,
timer_next);
REG_WRITE(ah, gen_tmr_configuration[timer->index].period_addr,
timer_period);
REG_SET_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
if (AR_SREV_9462(ah)) {
/*
* Starting from AR9462, each generic timer can select which tsf
* to use. But we still follow the old rule, 0 - 7 use tsf and
* 8 - 15 use tsf2.
*/
if ((timer->index < AR_GEN_TIMER_BANK_1_LEN))
REG_CLR_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
(1 << timer->index));
else
REG_SET_BIT(ah, AR_MAC_PCU_GEN_TIMER_TSF_SEL,
(1 << timer->index));
}
/* Enable both trigger and thresh interrupt masks */
REG_SET_BIT(ah, AR_IMR_S5,
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
if ((timer->index < AR_FIRST_NDP_TIMER) ||
(timer->index >= ATH_MAX_GEN_TIMER)) {
return;
}
/* Clear generic timer enable bits. */
REG_CLR_BIT(ah, gen_tmr_configuration[timer->index].mode_addr,
gen_tmr_configuration[timer->index].mode_mask);
/* Disable both trigger and thresh interrupt masks */
REG_CLR_BIT(ah, AR_IMR_S5,
(SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
}
EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
/* free the hardware generic timer slot */
timer_table->timers[timer->index] = NULL;
kfree(timer);
}
EXPORT_SYMBOL(ath_gen_timer_free);
/*
* Generic Timer Interrupts handling
*/
void ath_gen_timer_isr(struct ath_hw *ah)
{
struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
struct ath_gen_timer *timer;
struct ath_common *common = ath9k_hw_common(ah);
u32 trigger_mask, thresh_mask, index;
/* get hardware generic timer interrupt status */
trigger_mask = ah->intr_gen_timer_trigger;
thresh_mask = ah->intr_gen_timer_thresh;
trigger_mask &= timer_table->timer_mask.val;
thresh_mask &= timer_table->timer_mask.val;
trigger_mask &= ~thresh_mask;
while (thresh_mask) {
index = rightmost_index(timer_table, &thresh_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
ath_dbg(common, HWTIMER, "TSF overflow for Gen timer %d\n",
index);
timer->overflow(timer->arg);
}
while (trigger_mask) {
index = rightmost_index(timer_table, &trigger_mask);
timer = timer_table->timers[index];
BUG_ON(!timer);
ath_dbg(common, HWTIMER,
"Gen timer[%d] trigger\n", index);
timer->trigger(timer->arg);
}
}
EXPORT_SYMBOL(ath_gen_timer_isr);
/********/
/* HTC */
/********/
static struct {
u32 version;
const char * name;
} ath_mac_bb_names[] = {
/* Devices with external radios */
{ AR_SREV_VERSION_5416_PCI, "5416" },
{ AR_SREV_VERSION_5416_PCIE, "5418" },
{ AR_SREV_VERSION_9100, "9100" },
{ AR_SREV_VERSION_9160, "9160" },
/* Single-chip solutions */
{ AR_SREV_VERSION_9280, "9280" },
{ AR_SREV_VERSION_9285, "9285" },
{ AR_SREV_VERSION_9287, "9287" },
{ AR_SREV_VERSION_9271, "9271" },
{ AR_SREV_VERSION_9300, "9300" },
{ AR_SREV_VERSION_9330, "9330" },
{ AR_SREV_VERSION_9340, "9340" },
{ AR_SREV_VERSION_9485, "9485" },
{ AR_SREV_VERSION_9462, "9462" },
};
/* For devices with external radios */
static struct {
u16 version;
const char * name;
} ath_rf_names[] = {
{ 0, "5133" },
{ AR_RAD5133_SREV_MAJOR, "5133" },
{ AR_RAD5122_SREV_MAJOR, "5122" },
{ AR_RAD2133_SREV_MAJOR, "2133" },
{ AR_RAD2122_SREV_MAJOR, "2122" }
};
/*
* Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
*/
static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
{
int i;
for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
if (ath_mac_bb_names[i].version == mac_bb_version) {
return ath_mac_bb_names[i].name;
}
}
return "????";
}
/*
* Return the RF name. "????" is returned if the RF is unknown.
* Used for devices with external radios.
*/
static const char *ath9k_hw_rf_name(u16 rf_version)
{
int i;
for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
if (ath_rf_names[i].version == rf_version) {
return ath_rf_names[i].name;
}
}
return "????";
}
void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
{
int used;
/* chipsets >= AR9280 are single-chip */
if (AR_SREV_9280_20_OR_LATER(ah)) {
used = snprintf(hw_name, len,
"Atheros AR%s Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev);
}
else {
used = snprintf(hw_name, len,
"Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
ah->hw_version.macRev,
ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
AR_RADIO_SREV_MAJOR)),
ah->hw_version.phyRev);
}
hw_name[used] = '\0';
}
EXPORT_SYMBOL(ath9k_hw_name);
| gpl-2.0 |
silvesterlee/linux | arch/powerpc/platforms/85xx/ge_imp3a.c | 1624 | 5193 | /*
* GE IMP3A Board Setup
*
* Author Martyn Welch <martyn.welch@ge.com>
*
* Copyright 2010 GE Intelligent Platforms Embedded Systems, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Based on: mpc85xx_ds.c (MPC85xx DS Board Setup)
* Copyright 2007 Freescale Semiconductor Inc.
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/kdev_t.h>
#include <linux/delay.h>
#include <linux/seq_file.h>
#include <linux/interrupt.h>
#include <linux/of_platform.h>
#include <asm/time.h>
#include <asm/machdep.h>
#include <asm/pci-bridge.h>
#include <mm/mmu_decl.h>
#include <asm/prom.h>
#include <asm/udbg.h>
#include <asm/mpic.h>
#include <asm/swiotlb.h>
#include <asm/nvram.h>
#include <sysdev/fsl_soc.h>
#include <sysdev/fsl_pci.h>
#include "smp.h"
#include "mpc85xx.h"
#include <sysdev/ge/ge_pic.h>
void __iomem *imp3a_regs;
void __init ge_imp3a_pic_init(void)
{
struct mpic *mpic;
struct device_node *np;
struct device_node *cascade_node = NULL;
unsigned long root = of_get_flat_dt_root();
if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
mpic = mpic_alloc(NULL, 0,
MPIC_NO_RESET |
MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
} else {
mpic = mpic_alloc(NULL, 0,
MPIC_BIG_ENDIAN |
MPIC_SINGLE_DEST_CPU,
0, 256, " OpenPIC ");
}
BUG_ON(mpic == NULL);
mpic_init(mpic);
/*
* There is a simple interrupt handler in the main FPGA, this needs
* to be cascaded into the MPIC
*/
for_each_node_by_type(np, "interrupt-controller")
if (of_device_is_compatible(np, "gef,fpga-pic-1.00")) {
cascade_node = np;
break;
}
if (cascade_node == NULL) {
printk(KERN_WARNING "IMP3A: No FPGA PIC\n");
return;
}
gef_pic_init(cascade_node);
of_node_put(cascade_node);
}
static void ge_imp3a_pci_assign_primary(void)
{
#ifdef CONFIG_PCI
struct device_node *np;
struct resource rsrc;
for_each_node_by_type(np, "pci") {
if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
of_device_is_compatible(np, "fsl,p2020-pcie")) {
of_address_to_resource(np, 0, &rsrc);
if ((rsrc.start & 0xfffff) == 0x9000)
fsl_pci_primary = np;
}
}
#endif
}
/*
* Setup the architecture
*/
static void __init ge_imp3a_setup_arch(void)
{
struct device_node *regs;
if (ppc_md.progress)
ppc_md.progress("ge_imp3a_setup_arch()", 0);
mpc85xx_smp_init();
ge_imp3a_pci_assign_primary();
swiotlb_detect_4g();
/* Remap basic board registers */
regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
if (regs) {
imp3a_regs = of_iomap(regs, 0);
if (imp3a_regs == NULL)
printk(KERN_WARNING "Unable to map board registers\n");
of_node_put(regs);
}
#if defined(CONFIG_MMIO_NVRAM)
mmio_nvram_init();
#endif
printk(KERN_INFO "GE Intelligent Platforms IMP3A 3U cPCI SBC\n");
}
/* Return the PCB revision */
static unsigned int ge_imp3a_get_pcb_rev(void)
{
unsigned int reg;
reg = ioread16(imp3a_regs);
return (reg >> 8) & 0xff;
}
/* Return the board (software) revision */
static unsigned int ge_imp3a_get_board_rev(void)
{
unsigned int reg;
reg = ioread16(imp3a_regs + 0x2);
return reg & 0xff;
}
/* Return the FPGA revision */
static unsigned int ge_imp3a_get_fpga_rev(void)
{
unsigned int reg;
reg = ioread16(imp3a_regs + 0x2);
return (reg >> 8) & 0xff;
}
/* Return compactPCI Geographical Address */
static unsigned int ge_imp3a_get_cpci_geo_addr(void)
{
unsigned int reg;
reg = ioread16(imp3a_regs + 0x6);
return (reg & 0x0f00) >> 8;
}
/* Return compactPCI System Controller Status */
static unsigned int ge_imp3a_get_cpci_is_syscon(void)
{
unsigned int reg;
reg = ioread16(imp3a_regs + 0x6);
return reg & (1 << 12);
}
static void ge_imp3a_show_cpuinfo(struct seq_file *m)
{
seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
seq_printf(m, "Revision\t: %u%c\n", ge_imp3a_get_pcb_rev(),
('A' + ge_imp3a_get_board_rev() - 1));
seq_printf(m, "FPGA Revision\t: %u\n", ge_imp3a_get_fpga_rev());
seq_printf(m, "cPCI geo. addr\t: %u\n", ge_imp3a_get_cpci_geo_addr());
seq_printf(m, "cPCI syscon\t: %s\n",
ge_imp3a_get_cpci_is_syscon() ? "yes" : "no");
}
/*
* Called very early, device-tree isn't unflattened
*/
static int __init ge_imp3a_probe(void)
{
unsigned long root = of_get_flat_dt_root();
return of_flat_dt_is_compatible(root, "ge,IMP3A");
}
machine_arch_initcall(ge_imp3a, mpc85xx_common_publish_devices);
machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
define_machine(ge_imp3a) {
.name = "GE_IMP3A",
.probe = ge_imp3a_probe,
.setup_arch = ge_imp3a_setup_arch,
.init_IRQ = ge_imp3a_pic_init,
.show_cpuinfo = ge_imp3a_show_cpuinfo,
#ifdef CONFIG_PCI
.pcibios_fixup_bus = fsl_pcibios_fixup_bus,
.pcibios_fixup_phb = fsl_pcibios_fixup_phb,
#endif
.get_irq = mpic_get_irq,
.restart = fsl_rstcr_restart,
.calibrate_decr = generic_calibrate_decr,
.progress = udbg_progress,
};
| gpl-2.0 |
maxnet/linux-amlogic | drivers/gpu/drm/nouveau/nouveau_connector.c | 2136 | 34008 | /*
* Copyright (C) 2008 Maarten Maathuis.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include <acpi/button.h>
#include <drm/drmP.h>
#include <drm/drm_edid.h>
#include <drm/drm_crtc_helper.h>
#include "nouveau_reg.h"
#include "nouveau_drm.h"
#include "dispnv04/hw.h"
#include "nouveau_acpi.h"
#include "nouveau_display.h"
#include "nouveau_connector.h"
#include "nouveau_encoder.h"
#include "nouveau_crtc.h"
#include <subdev/i2c.h>
#include <subdev/gpio.h>
MODULE_PARM_DESC(tv_disable, "Disable TV-out detection");
static int nouveau_tv_disable = 0;
module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
MODULE_PARM_DESC(ignorelid, "Ignore ACPI lid status");
static int nouveau_ignorelid = 0;
module_param_named(ignorelid, nouveau_ignorelid, int, 0400);
MODULE_PARM_DESC(duallink, "Allow dual-link TMDS (default: enabled)");
static int nouveau_duallink = 1;
module_param_named(duallink, nouveau_duallink, int, 0400);
struct nouveau_encoder *
find_encoder(struct drm_connector *connector, int type)
{
struct drm_device *dev = connector->dev;
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int i, id;
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
id = connector->encoder_ids[i];
if (!id)
break;
obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
if (type == DCB_OUTPUT_ANY || nv_encoder->dcb->type == type)
return nv_encoder;
}
return NULL;
}
struct nouveau_connector *
nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
{
struct drm_device *dev = to_drm_encoder(encoder)->dev;
struct drm_connector *drm_connector;
list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head) {
if (drm_connector->encoder == to_drm_encoder(encoder))
return nouveau_connector(drm_connector);
}
return NULL;
}
static void
nouveau_connector_destroy(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
kfree(nv_connector->edid);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
kfree(connector);
}
static struct nouveau_i2c_port *
nouveau_connector_ddc_detect(struct drm_connector *connector,
struct nouveau_encoder **pnv_encoder)
{
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_i2c_port *port = NULL;
int i, panel = -ENODEV;
/* eDP panels need powering on by us (if the VBIOS doesn't default it
* to on) before doing any AUX channel transactions. LVDS panel power
* is handled by the SOR itself, and not required for LVDS DDC.
*/
if (nv_connector->type == DCB_CONNECTOR_eDP) {
panel = gpio->get(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff);
if (panel == 0) {
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, 1);
msleep(300);
}
}
for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++) {
struct nouveau_encoder *nv_encoder;
struct drm_mode_object *obj;
int id;
id = connector->encoder_ids[i];
if (!id)
break;
obj = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
if (!obj)
continue;
nv_encoder = nouveau_encoder(obj_to_encoder(obj));
port = nv_encoder->i2c;
if (port && nv_probe_i2c(port, 0x50)) {
*pnv_encoder = nv_encoder;
break;
}
port = NULL;
}
/* eDP panel not detected, restore panel power GPIO to previous
* state to avoid confusing the SOR for other output types.
*/
if (!port && panel == 0)
gpio->set(gpio, 0, DCB_GPIO_PANEL_POWER, 0xff, panel);
return port;
}
static struct nouveau_encoder *
nouveau_connector_of_detect(struct drm_connector *connector)
{
#ifdef __powerpc__
struct drm_device *dev = connector->dev;
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
if (!dn ||
!((nv_encoder = find_encoder(connector, DCB_OUTPUT_TMDS)) ||
(nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG))))
return NULL;
for_each_child_of_node(dn, cn) {
const char *name = of_get_property(cn, "name", NULL);
const void *edid = of_get_property(cn, "EDID", NULL);
int idx = name ? name[strlen(name) - 1] - 'A' : 0;
if (nv_encoder->dcb->i2c_index == idx && edid) {
nv_connector->edid =
kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
of_node_put(cn);
return nv_encoder;
}
}
#endif
return NULL;
}
static void
nouveau_connector_set_encoder(struct drm_connector *connector,
struct nouveau_encoder *nv_encoder)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct drm_device *dev = connector->dev;
if (nv_connector->detected_encoder == nv_encoder)
return;
nv_connector->detected_encoder = nv_encoder;
if (nv_device(drm->device)->card_type >= NV_50) {
connector->interlace_allowed = true;
connector->doublescan_allowed = true;
} else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS ||
nv_encoder->dcb->type == DCB_OUTPUT_TMDS) {
connector->doublescan_allowed = false;
connector->interlace_allowed = false;
} else {
connector->doublescan_allowed = true;
if (nv_device(drm->device)->card_type == NV_20 ||
(nv_device(drm->device)->card_type == NV_10 &&
(dev->pci_device & 0x0ff0) != 0x0100 &&
(dev->pci_device & 0x0ff0) != 0x0150))
/* HW is broken */
connector->interlace_allowed = false;
else
connector->interlace_allowed = true;
}
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
drm_object_property_set_value(&connector->base,
dev->mode_config.dvi_i_subconnector_property,
nv_encoder->dcb->type == DCB_OUTPUT_TMDS ?
DRM_MODE_SUBCONNECTOR_DVID :
DRM_MODE_SUBCONNECTOR_DVIA);
}
}
static enum drm_connector_status
nouveau_connector_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
struct nouveau_encoder *nv_partner;
struct nouveau_i2c_port *i2c;
int type;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
drm_mode_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
i2c = nouveau_connector_ddc_detect(connector, &nv_encoder);
if (i2c) {
nv_connector->edid = drm_get_edid(connector, &i2c->adapter);
drm_mode_connector_update_edid_property(connector,
nv_connector->edid);
if (!nv_connector->edid) {
NV_ERROR(drm, "DDC responded, but no EDID for %s\n",
drm_get_connector_name(connector));
goto detect_analog;
}
if (nv_encoder->dcb->type == DCB_OUTPUT_DP &&
!nouveau_dp_detect(to_drm_encoder(nv_encoder))) {
NV_ERROR(drm, "Detected %s, but failed init\n",
drm_get_connector_name(connector));
return connector_status_disconnected;
}
/* Override encoder type for DVI-I based on whether EDID
* says the display is digital or analog, both use the
* same i2c channel so the value returned from ddc_detect
* isn't necessarily correct.
*/
nv_partner = NULL;
if (nv_encoder->dcb->type == DCB_OUTPUT_TMDS)
nv_partner = find_encoder(connector, DCB_OUTPUT_ANALOG);
if (nv_encoder->dcb->type == DCB_OUTPUT_ANALOG)
nv_partner = find_encoder(connector, DCB_OUTPUT_TMDS);
if (nv_partner && ((nv_encoder->dcb->type == DCB_OUTPUT_ANALOG &&
nv_partner->dcb->type == DCB_OUTPUT_TMDS) ||
(nv_encoder->dcb->type == DCB_OUTPUT_TMDS &&
nv_partner->dcb->type == DCB_OUTPUT_ANALOG))) {
if (nv_connector->edid->input & DRM_EDID_INPUT_DIGITAL)
type = DCB_OUTPUT_TMDS;
else
type = DCB_OUTPUT_ANALOG;
nv_encoder = find_encoder(connector, type);
}
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
nv_encoder = nouveau_connector_of_detect(connector);
if (nv_encoder) {
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
detect_analog:
nv_encoder = find_encoder(connector, DCB_OUTPUT_ANALOG);
if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder(connector, DCB_OUTPUT_TV);
if (nv_encoder && force) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_encoder_helper_funcs *helper =
encoder->helper_private;
if (helper->detect(encoder, connector) ==
connector_status_connected) {
nouveau_connector_set_encoder(connector, nv_encoder);
return connector_status_connected;
}
}
return connector_status_disconnected;
}
static enum drm_connector_status
nouveau_connector_detect_lvds(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = NULL;
enum drm_connector_status status = connector_status_disconnected;
/* Cleanup the previous EDID block. */
if (nv_connector->edid) {
drm_mode_connector_update_edid_property(connector, NULL);
kfree(nv_connector->edid);
nv_connector->edid = NULL;
}
nv_encoder = find_encoder(connector, DCB_OUTPUT_LVDS);
if (!nv_encoder)
return connector_status_disconnected;
/* Try retrieving EDID via DDC */
if (!drm->vbios.fp_no_ddc) {
status = nouveau_connector_detect(connector, force);
if (status == connector_status_connected)
goto out;
}
/* On some laptops (Sony, i'm looking at you) there appears to
* be no direct way of accessing the panel's EDID. The only
* option available to us appears to be to ask ACPI for help..
*
* It's important this check's before trying straps, one of the
* said manufacturer's laptops are configured in such a way
* the nouveau decides an entry in the VBIOS FP mode table is
* valid - it's not (rh#613284)
*/
if (nv_encoder->dcb->lvdsconf.use_acpi_for_edid) {
if ((nv_connector->edid = nouveau_acpi_edid(dev, connector))) {
status = connector_status_connected;
goto out;
}
}
/* If no EDID found above, and the VBIOS indicates a hardcoded
* modeline is avalilable for the panel, set it as the panel's
* native mode and exit.
*/
if (nouveau_bios_fp_mode(dev, NULL) && (drm->vbios.fp_no_ddc ||
nv_encoder->dcb->lvdsconf.use_straps_for_mode)) {
status = connector_status_connected;
goto out;
}
/* Still nothing, some VBIOS images have a hardcoded EDID block
* stored for the panel stored in them.
*/
if (!drm->vbios.fp_no_ddc) {
struct edid *edid =
(struct edid *)nouveau_bios_embedded_edid(dev);
if (edid) {
nv_connector->edid =
kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
if (nv_connector->edid)
status = connector_status_connected;
}
}
out:
#if defined(CONFIG_ACPI_BUTTON) || \
(defined(CONFIG_ACPI_BUTTON_MODULE) && defined(MODULE))
if (status == connector_status_connected &&
!nouveau_ignorelid && !acpi_lid_open())
status = connector_status_unknown;
#endif
drm_mode_connector_update_edid_property(connector, nv_connector->edid);
nouveau_connector_set_encoder(connector, nv_encoder);
return status;
}
static void
nouveau_connector_force(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder;
int type;
if (nv_connector->type == DCB_CONNECTOR_DVI_I) {
if (connector->force == DRM_FORCE_ON_DIGITAL)
type = DCB_OUTPUT_TMDS;
else
type = DCB_OUTPUT_ANALOG;
} else
type = DCB_OUTPUT_ANY;
nv_encoder = find_encoder(connector, type);
if (!nv_encoder) {
NV_ERROR(drm, "can't find encoder to force %s on!\n",
drm_get_connector_name(connector));
connector->status = connector_status_disconnected;
return;
}
nouveau_connector_set_encoder(connector, nv_encoder);
}
static int
nouveau_connector_set_property(struct drm_connector *connector,
struct drm_property *property, uint64_t value)
{
struct nouveau_display *disp = nouveau_display(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
struct drm_device *dev = connector->dev;
struct nouveau_crtc *nv_crtc;
int ret;
nv_crtc = NULL;
if (connector->encoder && connector->encoder->crtc)
nv_crtc = nouveau_crtc(connector->encoder->crtc);
/* Scaling mode */
if (property == dev->mode_config.scaling_mode_property) {
bool modeset = false;
switch (value) {
case DRM_MODE_SCALE_NONE:
case DRM_MODE_SCALE_FULLSCREEN:
case DRM_MODE_SCALE_CENTER:
case DRM_MODE_SCALE_ASPECT:
break;
default:
return -EINVAL;
}
/* LVDS always needs gpu scaling */
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS &&
value == DRM_MODE_SCALE_NONE)
return -EINVAL;
/* Changing between GPU and panel scaling requires a full
* modeset
*/
if ((nv_connector->scaling_mode == DRM_MODE_SCALE_NONE) ||
(value == DRM_MODE_SCALE_NONE))
modeset = true;
nv_connector->scaling_mode = value;
if (!nv_crtc)
return 0;
if (modeset || !nv_crtc->set_scale) {
ret = drm_crtc_helper_set_mode(&nv_crtc->base,
&nv_crtc->base.mode,
nv_crtc->base.x,
nv_crtc->base.y, NULL);
if (!ret)
return -EINVAL;
} else {
ret = nv_crtc->set_scale(nv_crtc, true);
if (ret)
return ret;
}
return 0;
}
/* Underscan */
if (property == disp->underscan_property) {
if (nv_connector->underscan != value) {
nv_connector->underscan = value;
if (!nv_crtc || !nv_crtc->set_scale)
return 0;
return nv_crtc->set_scale(nv_crtc, true);
}
return 0;
}
if (property == disp->underscan_hborder_property) {
if (nv_connector->underscan_hborder != value) {
nv_connector->underscan_hborder = value;
if (!nv_crtc || !nv_crtc->set_scale)
return 0;
return nv_crtc->set_scale(nv_crtc, true);
}
return 0;
}
if (property == disp->underscan_vborder_property) {
if (nv_connector->underscan_vborder != value) {
nv_connector->underscan_vborder = value;
if (!nv_crtc || !nv_crtc->set_scale)
return 0;
return nv_crtc->set_scale(nv_crtc, true);
}
return 0;
}
/* Dithering */
if (property == disp->dithering_mode) {
nv_connector->dithering_mode = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
return nv_crtc->set_dither(nv_crtc, true);
}
if (property == disp->dithering_depth) {
nv_connector->dithering_depth = value;
if (!nv_crtc || !nv_crtc->set_dither)
return 0;
return nv_crtc->set_dither(nv_crtc, true);
}
if (nv_crtc && nv_crtc->set_color_vibrance) {
/* Hue */
if (property == disp->vibrant_hue_property) {
nv_crtc->vibrant_hue = value - 90;
return nv_crtc->set_color_vibrance(nv_crtc, true);
}
/* Saturation */
if (property == disp->color_vibrance_property) {
nv_crtc->color_vibrance = value - 100;
return nv_crtc->set_color_vibrance(nv_crtc, true);
}
}
if (nv_encoder && nv_encoder->dcb->type == DCB_OUTPUT_TV)
return get_slave_funcs(encoder)->set_property(
encoder, connector, property, value);
return -EINVAL;
}
static struct drm_display_mode *
nouveau_connector_native_mode(struct drm_connector *connector)
{
struct drm_connector_helper_funcs *helper = connector->helper_private;
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode, *largest = NULL;
int high_w = 0, high_h = 0, high_v = 0;
list_for_each_entry(mode, &nv_connector->base.probed_modes, head) {
mode->vrefresh = drm_mode_vrefresh(mode);
if (helper->mode_valid(connector, mode) != MODE_OK ||
(mode->flags & DRM_MODE_FLAG_INTERLACE))
continue;
/* Use preferred mode if there is one.. */
if (mode->type & DRM_MODE_TYPE_PREFERRED) {
NV_DEBUG(drm, "native mode from preferred\n");
return drm_mode_duplicate(dev, mode);
}
/* Otherwise, take the resolution with the largest width, then
* height, then vertical refresh
*/
if (mode->hdisplay < high_w)
continue;
if (mode->hdisplay == high_w && mode->vdisplay < high_h)
continue;
if (mode->hdisplay == high_w && mode->vdisplay == high_h &&
mode->vrefresh < high_v)
continue;
high_w = mode->hdisplay;
high_h = mode->vdisplay;
high_v = mode->vrefresh;
largest = mode;
}
NV_DEBUG(drm, "native mode from largest: %dx%d@%d\n",
high_w, high_h, high_v);
return largest ? drm_mode_duplicate(dev, largest) : NULL;
}
struct moderec {
int hdisplay;
int vdisplay;
};
static struct moderec scaler_modes[] = {
{ 1920, 1200 },
{ 1920, 1080 },
{ 1680, 1050 },
{ 1600, 1200 },
{ 1400, 1050 },
{ 1280, 1024 },
{ 1280, 960 },
{ 1152, 864 },
{ 1024, 768 },
{ 800, 600 },
{ 720, 400 },
{ 640, 480 },
{ 640, 400 },
{ 640, 350 },
{}
};
static int
nouveau_connector_scaler_modes_add(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct drm_display_mode *native = nv_connector->native_mode, *m;
struct drm_device *dev = connector->dev;
struct moderec *mode = &scaler_modes[0];
int modes = 0;
if (!native)
return 0;
while (mode->hdisplay) {
if (mode->hdisplay <= native->hdisplay &&
mode->vdisplay <= native->vdisplay) {
m = drm_cvt_mode(dev, mode->hdisplay, mode->vdisplay,
drm_mode_vrefresh(native), false,
false, false);
if (!m)
continue;
m->type |= DRM_MODE_TYPE_DRIVER;
drm_mode_probed_add(connector, m);
modes++;
}
mode++;
}
return modes;
}
static void
nouveau_connector_detect_depth(struct drm_connector *connector)
{
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct nvbios *bios = &drm->vbios;
struct drm_display_mode *mode = nv_connector->native_mode;
bool duallink;
/* if the edid is feeling nice enough to provide this info, use it */
if (nv_connector->edid && connector->display_info.bpc)
return;
/* EDID 1.4 is *supposed* to be supported on eDP, but, Apple... */
if (nv_connector->type == DCB_CONNECTOR_eDP) {
connector->display_info.bpc = 6;
return;
}
/* we're out of options unless we're LVDS, default to 8bpc */
if (nv_encoder->dcb->type != DCB_OUTPUT_LVDS) {
connector->display_info.bpc = 8;
return;
}
connector->display_info.bpc = 6;
/* LVDS: panel straps */
if (bios->fp_no_ddc) {
if (bios->fp.if_is_24bit)
connector->display_info.bpc = 8;
return;
}
/* LVDS: DDC panel, need to first determine the number of links to
* know which if_is_24bit flag to check...
*/
if (nv_connector->edid &&
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG)
duallink = ((u8 *)nv_connector->edid)[121] == 2;
else
duallink = mode->clock >= bios->fp.duallink_transition_clk;
if ((!duallink && (bios->fp.strapless_is_24bit & 1)) ||
( duallink && (bios->fp.strapless_is_24bit & 2)))
connector->display_info.bpc = 8;
}
static int
nouveau_connector_get_modes(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
int ret = 0;
/* destroy the native mode, the attached monitor could have changed.
*/
if (nv_connector->native_mode) {
drm_mode_destroy(dev, nv_connector->native_mode);
nv_connector->native_mode = NULL;
}
if (nv_connector->edid)
ret = drm_add_edid_modes(connector, nv_connector->edid);
else
if (nv_encoder->dcb->type == DCB_OUTPUT_LVDS &&
(nv_encoder->dcb->lvdsconf.use_straps_for_mode ||
drm->vbios.fp_no_ddc) && nouveau_bios_fp_mode(dev, NULL)) {
struct drm_display_mode mode;
nouveau_bios_fp_mode(dev, &mode);
nv_connector->native_mode = drm_mode_duplicate(dev, &mode);
}
/* Determine display colour depth for everything except LVDS now,
* DP requires this before mode_valid() is called.
*/
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS)
nouveau_connector_detect_depth(connector);
/* Find the native mode if this is a digital panel, if we didn't
* find any modes through DDC previously add the native mode to
* the list of modes.
*/
if (!nv_connector->native_mode)
nv_connector->native_mode =
nouveau_connector_native_mode(connector);
if (ret == 0 && nv_connector->native_mode) {
struct drm_display_mode *mode;
mode = drm_mode_duplicate(dev, nv_connector->native_mode);
drm_mode_probed_add(connector, mode);
ret = 1;
}
/* Determine LVDS colour depth, must happen after determining
* "native" mode as some VBIOS tables require us to use the
* pixel clock as part of the lookup...
*/
if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS)
nouveau_connector_detect_depth(connector);
if (nv_encoder->dcb->type == DCB_OUTPUT_TV)
ret = get_slave_funcs(encoder)->get_modes(encoder, connector);
if (nv_connector->type == DCB_CONNECTOR_LVDS ||
nv_connector->type == DCB_CONNECTOR_LVDS_SPWG ||
nv_connector->type == DCB_CONNECTOR_eDP)
ret += nouveau_connector_scaler_modes_add(connector);
return ret;
}
static unsigned
get_tmds_link_bandwidth(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_drm *drm = nouveau_drm(connector->dev);
struct dcb_output *dcb = nv_connector->detected_encoder->dcb;
if (dcb->location != DCB_LOC_ON_CHIP ||
nv_device(drm->device)->chipset >= 0x46)
return 165000;
else if (nv_device(drm->device)->chipset >= 0x40)
return 155000;
else if (nv_device(drm->device)->chipset >= 0x18)
return 135000;
else
return 112000;
}
static int
nouveau_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
struct nouveau_encoder *nv_encoder = nv_connector->detected_encoder;
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
unsigned min_clock = 25000, max_clock = min_clock;
unsigned clock = mode->clock;
switch (nv_encoder->dcb->type) {
case DCB_OUTPUT_LVDS:
if (nv_connector->native_mode &&
(mode->hdisplay > nv_connector->native_mode->hdisplay ||
mode->vdisplay > nv_connector->native_mode->vdisplay))
return MODE_PANEL;
min_clock = 0;
max_clock = 400000;
break;
case DCB_OUTPUT_TMDS:
max_clock = get_tmds_link_bandwidth(connector);
if (nouveau_duallink && nv_encoder->dcb->duallink_possible)
max_clock *= 2;
break;
case DCB_OUTPUT_ANALOG:
max_clock = nv_encoder->dcb->crtconf.maxfreq;
if (!max_clock)
max_clock = 350000;
break;
case DCB_OUTPUT_TV:
return get_slave_funcs(encoder)->mode_valid(encoder, mode);
case DCB_OUTPUT_DP:
max_clock = nv_encoder->dp.link_nr;
max_clock *= nv_encoder->dp.link_bw;
clock = clock * (connector->display_info.bpc * 3) / 10;
break;
default:
BUG_ON(1);
return MODE_BAD;
}
if (clock < min_clock)
return MODE_CLOCK_LOW;
if (clock > max_clock)
return MODE_CLOCK_HIGH;
return MODE_OK;
}
static struct drm_encoder *
nouveau_connector_best_encoder(struct drm_connector *connector)
{
struct nouveau_connector *nv_connector = nouveau_connector(connector);
if (nv_connector->detected_encoder)
return to_drm_encoder(nv_connector->detected_encoder);
return NULL;
}
static const struct drm_connector_helper_funcs
nouveau_connector_helper_funcs = {
.get_modes = nouveau_connector_get_modes,
.mode_valid = nouveau_connector_mode_valid,
.best_encoder = nouveau_connector_best_encoder,
};
static const struct drm_connector_funcs
nouveau_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.save = NULL,
.restore = NULL,
.detect = nouveau_connector_detect,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.force = nouveau_connector_force
};
static const struct drm_connector_funcs
nouveau_connector_funcs_lvds = {
.dpms = drm_helper_connector_dpms,
.save = NULL,
.restore = NULL,
.detect = nouveau_connector_detect_lvds,
.destroy = nouveau_connector_destroy,
.fill_modes = drm_helper_probe_single_connector_modes,
.set_property = nouveau_connector_set_property,
.force = nouveau_connector_force
};
static void
nouveau_connector_hotplug_work(struct work_struct *work)
{
struct nouveau_connector *nv_connector =
container_of(work, struct nouveau_connector, hpd_work);
struct drm_connector *connector = &nv_connector->base;
struct drm_device *dev = connector->dev;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
bool plugged = gpio->get(gpio, 0, nv_connector->hpd.func, 0xff);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un",
drm_get_connector_name(connector));
if (plugged)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
else
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
drm_helper_hpd_irq_event(dev);
}
static int
nouveau_connector_hotplug(struct nouveau_eventh *event, int index)
{
struct nouveau_connector *nv_connector =
container_of(event, struct nouveau_connector, hpd_func);
schedule_work(&nv_connector->hpd_work);
return NVKM_EVENT_KEEP;
}
static int
drm_conntype_from_dcb(enum dcb_connector_type dcb)
{
switch (dcb) {
case DCB_CONNECTOR_VGA : return DRM_MODE_CONNECTOR_VGA;
case DCB_CONNECTOR_TV_0 :
case DCB_CONNECTOR_TV_1 :
case DCB_CONNECTOR_TV_3 : return DRM_MODE_CONNECTOR_TV;
case DCB_CONNECTOR_DMS59_0 :
case DCB_CONNECTOR_DMS59_1 :
case DCB_CONNECTOR_DVI_I : return DRM_MODE_CONNECTOR_DVII;
case DCB_CONNECTOR_DVI_D : return DRM_MODE_CONNECTOR_DVID;
case DCB_CONNECTOR_LVDS :
case DCB_CONNECTOR_LVDS_SPWG: return DRM_MODE_CONNECTOR_LVDS;
case DCB_CONNECTOR_DMS59_DP0:
case DCB_CONNECTOR_DMS59_DP1:
case DCB_CONNECTOR_DP : return DRM_MODE_CONNECTOR_DisplayPort;
case DCB_CONNECTOR_eDP : return DRM_MODE_CONNECTOR_eDP;
case DCB_CONNECTOR_HDMI_0 :
case DCB_CONNECTOR_HDMI_1 : return DRM_MODE_CONNECTOR_HDMIA;
default:
break;
}
return DRM_MODE_CONNECTOR_Unknown;
}
struct drm_connector *
nouveau_connector_create(struct drm_device *dev, int index)
{
const struct drm_connector_funcs *funcs = &nouveau_connector_funcs;
struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_gpio *gpio = nouveau_gpio(drm->device);
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
int type, ret = 0;
bool dummy;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
nv_connector = nouveau_connector(connector);
if (nv_connector->index == index)
return connector;
}
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
return ERR_PTR(-ENOMEM);
connector = &nv_connector->base;
INIT_WORK(&nv_connector->hpd_work, nouveau_connector_hotplug_work);
nv_connector->index = index;
/* attempt to parse vbios connector type and hotplug gpio */
nv_connector->dcb = olddcb_conn(dev, index);
if (nv_connector->dcb) {
static const u8 hpd[16] = {
0xff, 0x07, 0x08, 0xff, 0xff, 0x51, 0x52, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0x5e, 0x5f, 0x60,
};
u32 entry = ROM16(nv_connector->dcb[0]);
if (olddcb_conntab(dev)[3] >= 4)
entry |= (u32)ROM16(nv_connector->dcb[2]) << 16;
ret = gpio->find(gpio, 0, hpd[ffs((entry & 0x07033000) >> 12)],
DCB_GPIO_UNUSED, &nv_connector->hpd);
nv_connector->hpd_func.func = nouveau_connector_hotplug;
if (ret)
nv_connector->hpd.func = DCB_GPIO_UNUSED;
nv_connector->type = nv_connector->dcb[0];
if (drm_conntype_from_dcb(nv_connector->type) ==
DRM_MODE_CONNECTOR_Unknown) {
NV_WARN(drm, "unknown connector type %02x\n",
nv_connector->type);
nv_connector->type = DCB_CONNECTOR_NONE;
}
/* Gigabyte NX85T */
if (nv_match_device(dev, 0x0421, 0x1458, 0x344c)) {
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
nv_connector->type = DCB_CONNECTOR_DVI_I;
}
/* Gigabyte GV-NX86T512H */
if (nv_match_device(dev, 0x0402, 0x1458, 0x3455)) {
if (nv_connector->type == DCB_CONNECTOR_HDMI_1)
nv_connector->type = DCB_CONNECTOR_DVI_I;
}
} else {
nv_connector->type = DCB_CONNECTOR_NONE;
nv_connector->hpd.func = DCB_GPIO_UNUSED;
}
/* no vbios data, or an unknown dcb connector type - attempt to
* figure out something suitable ourselves
*/
if (nv_connector->type == DCB_CONNECTOR_NONE) {
struct nouveau_drm *drm = nouveau_drm(dev);
struct dcb_table *dcbt = &drm->vbios.dcb;
u32 encoders = 0;
int i;
for (i = 0; i < dcbt->entries; i++) {
if (dcbt->entry[i].connector == nv_connector->index)
encoders |= (1 << dcbt->entry[i].type);
}
if (encoders & (1 << DCB_OUTPUT_DP)) {
if (encoders & (1 << DCB_OUTPUT_TMDS))
nv_connector->type = DCB_CONNECTOR_DP;
else
nv_connector->type = DCB_CONNECTOR_eDP;
} else
if (encoders & (1 << DCB_OUTPUT_TMDS)) {
if (encoders & (1 << DCB_OUTPUT_ANALOG))
nv_connector->type = DCB_CONNECTOR_DVI_I;
else
nv_connector->type = DCB_CONNECTOR_DVI_D;
} else
if (encoders & (1 << DCB_OUTPUT_ANALOG)) {
nv_connector->type = DCB_CONNECTOR_VGA;
} else
if (encoders & (1 << DCB_OUTPUT_LVDS)) {
nv_connector->type = DCB_CONNECTOR_LVDS;
} else
if (encoders & (1 << DCB_OUTPUT_TV)) {
nv_connector->type = DCB_CONNECTOR_TV_0;
}
}
type = drm_conntype_from_dcb(nv_connector->type);
if (type == DRM_MODE_CONNECTOR_LVDS) {
ret = nouveau_bios_parse_lvds_table(dev, 0, &dummy, &dummy);
if (ret) {
NV_ERROR(drm, "Error parsing LVDS table, disabling\n");
kfree(nv_connector);
return ERR_PTR(ret);
}
funcs = &nouveau_connector_funcs_lvds;
} else {
funcs = &nouveau_connector_funcs;
}
/* defaults, will get overridden in detect() */
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
drm_connector_init(dev, connector, funcs, type);
drm_connector_helper_add(connector, &nouveau_connector_helper_funcs);
/* Init DVI-I specific properties */
if (nv_connector->type == DCB_CONNECTOR_DVI_I)
drm_object_attach_property(&connector->base, dev->mode_config.dvi_i_subconnector_property, 0);
/* Add overscan compensation options to digital outputs */
if (disp->underscan_property &&
(type == DRM_MODE_CONNECTOR_DVID ||
type == DRM_MODE_CONNECTOR_DVII ||
type == DRM_MODE_CONNECTOR_HDMIA ||
type == DRM_MODE_CONNECTOR_DisplayPort)) {
drm_object_attach_property(&connector->base,
disp->underscan_property,
UNDERSCAN_OFF);
drm_object_attach_property(&connector->base,
disp->underscan_hborder_property,
0);
drm_object_attach_property(&connector->base,
disp->underscan_vborder_property,
0);
}
/* Add hue and saturation options */
if (disp->vibrant_hue_property)
drm_object_attach_property(&connector->base,
disp->vibrant_hue_property,
90);
if (disp->color_vibrance_property)
drm_object_attach_property(&connector->base,
disp->color_vibrance_property,
150);
switch (nv_connector->type) {
case DCB_CONNECTOR_VGA:
if (nv_device(drm->device)->card_type >= NV_50) {
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
}
/* fall-through */
case DCB_CONNECTOR_TV_0:
case DCB_CONNECTOR_TV_1:
case DCB_CONNECTOR_TV_3:
nv_connector->scaling_mode = DRM_MODE_SCALE_NONE;
break;
default:
nv_connector->scaling_mode = DRM_MODE_SCALE_FULLSCREEN;
drm_object_attach_property(&connector->base,
dev->mode_config.scaling_mode_property,
nv_connector->scaling_mode);
if (disp->dithering_mode) {
nv_connector->dithering_mode = DITHERING_MODE_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_mode,
nv_connector->dithering_mode);
}
if (disp->dithering_depth) {
nv_connector->dithering_depth = DITHERING_DEPTH_AUTO;
drm_object_attach_property(&connector->base,
disp->dithering_depth,
nv_connector->dithering_depth);
}
break;
}
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
if (nv_connector->hpd.func != DCB_GPIO_UNUSED)
connector->polled = DRM_CONNECTOR_POLL_HPD;
drm_sysfs_connector_add(connector);
return connector;
}
| gpl-2.0 |
xwliu/Cubietruck_Plus-kernel-source | drivers/usb/gadget/u_uac1.c | 2392 | 7788 | /*
* u_uac1.c -- ALSA audio utilities for Gadget stack
*
* Copyright (C) 2008 Bryan Wu <cooloney@kernel.org>
* Copyright (C) 2008 Analog Devices, Inc
*
* Enter bugs at http://blackfin.uclinux.org/
*
* Licensed under the GPL-2 or later.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/random.h>
#include <linux/syscalls.h>
#include "u_uac1.h"
/*
* This component encapsulates the ALSA devices for USB audio gadget
*/
#define FILE_PCM_PLAYBACK "/dev/snd/pcmC0D0p"
#define FILE_PCM_CAPTURE "/dev/snd/pcmC0D0c"
#define FILE_CONTROL "/dev/snd/controlC0"
static char *fn_play = FILE_PCM_PLAYBACK;
module_param(fn_play, charp, S_IRUGO);
MODULE_PARM_DESC(fn_play, "Playback PCM device file name");
static char *fn_cap = FILE_PCM_CAPTURE;
module_param(fn_cap, charp, S_IRUGO);
MODULE_PARM_DESC(fn_cap, "Capture PCM device file name");
static char *fn_cntl = FILE_CONTROL;
module_param(fn_cntl, charp, S_IRUGO);
MODULE_PARM_DESC(fn_cntl, "Control device file name");
/*-------------------------------------------------------------------------*/
/**
* Some ALSA internal helper functions
*/
static int snd_interval_refine_set(struct snd_interval *i, unsigned int val)
{
struct snd_interval t;
t.empty = 0;
t.min = t.max = val;
t.openmin = t.openmax = 0;
t.integer = 1;
return snd_interval_refine(i, &t);
}
static int _snd_pcm_hw_param_set(struct snd_pcm_hw_params *params,
snd_pcm_hw_param_t var, unsigned int val,
int dir)
{
int changed;
if (hw_is_mask(var)) {
struct snd_mask *m = hw_param_mask(params, var);
if (val == 0 && dir < 0) {
changed = -EINVAL;
snd_mask_none(m);
} else {
if (dir > 0)
val++;
else if (dir < 0)
val--;
changed = snd_mask_refine_set(
hw_param_mask(params, var), val);
}
} else if (hw_is_interval(var)) {
struct snd_interval *i = hw_param_interval(params, var);
if (val == 0 && dir < 0) {
changed = -EINVAL;
snd_interval_none(i);
} else if (dir == 0)
changed = snd_interval_refine_set(i, val);
else {
struct snd_interval t;
t.openmin = 1;
t.openmax = 1;
t.empty = 0;
t.integer = 0;
if (dir < 0) {
t.min = val - 1;
t.max = val;
} else {
t.min = val;
t.max = val+1;
}
changed = snd_interval_refine(i, &t);
}
} else
return -EINVAL;
if (changed) {
params->cmask |= 1 << var;
params->rmask |= 1 << var;
}
return changed;
}
/*-------------------------------------------------------------------------*/
/**
* Set default hardware params
*/
static int playback_default_hw_params(struct gaudio_snd_dev *snd)
{
struct snd_pcm_substream *substream = snd->substream;
struct snd_pcm_hw_params *params;
snd_pcm_sframes_t result;
/*
* SNDRV_PCM_ACCESS_RW_INTERLEAVED,
* SNDRV_PCM_FORMAT_S16_LE
* CHANNELS: 2
* RATE: 48000
*/
snd->access = SNDRV_PCM_ACCESS_RW_INTERLEAVED;
snd->format = SNDRV_PCM_FORMAT_S16_LE;
snd->channels = 2;
snd->rate = 48000;
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
_snd_pcm_hw_params_any(params);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_ACCESS,
snd->access, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_FORMAT,
snd->format, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_CHANNELS,
snd->channels, 0);
_snd_pcm_hw_param_set(params, SNDRV_PCM_HW_PARAM_RATE,
snd->rate, 0);
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_DROP, NULL);
snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_HW_PARAMS, params);
result = snd_pcm_kernel_ioctl(substream, SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
ERROR(snd->card,
"Preparing sound card failed: %d\n", (int)result);
kfree(params);
return result;
}
/* Store the hardware parameters */
snd->access = params_access(params);
snd->format = params_format(params);
snd->channels = params_channels(params);
snd->rate = params_rate(params);
kfree(params);
INFO(snd->card,
"Hardware params: access %x, format %x, channels %d, rate %d\n",
snd->access, snd->format, snd->channels, snd->rate);
return 0;
}
/**
* Playback audio buffer data by ALSA PCM device
*/
static size_t u_audio_playback(struct gaudio *card, void *buf, size_t count)
{
struct gaudio_snd_dev *snd = &card->playback;
struct snd_pcm_substream *substream = snd->substream;
struct snd_pcm_runtime *runtime = substream->runtime;
mm_segment_t old_fs;
ssize_t result;
snd_pcm_sframes_t frames;
try_again:
if (runtime->status->state == SNDRV_PCM_STATE_XRUN ||
runtime->status->state == SNDRV_PCM_STATE_SUSPENDED) {
result = snd_pcm_kernel_ioctl(substream,
SNDRV_PCM_IOCTL_PREPARE, NULL);
if (result < 0) {
ERROR(card, "Preparing sound card failed: %d\n",
(int)result);
return result;
}
}
frames = bytes_to_frames(runtime, count);
old_fs = get_fs();
set_fs(KERNEL_DS);
result = snd_pcm_lib_write(snd->substream, buf, frames);
if (result != frames) {
ERROR(card, "Playback error: %d\n", (int)result);
set_fs(old_fs);
goto try_again;
}
set_fs(old_fs);
return 0;
}
static int u_audio_get_playback_channels(struct gaudio *card)
{
return card->playback.channels;
}
static int u_audio_get_playback_rate(struct gaudio *card)
{
return card->playback.rate;
}
/**
* Open ALSA PCM and control device files
* Initial the PCM or control device
*/
static int gaudio_open_snd_dev(struct gaudio *card)
{
struct snd_pcm_file *pcm_file;
struct gaudio_snd_dev *snd;
if (!card)
return -ENODEV;
/* Open control device */
snd = &card->control;
snd->filp = filp_open(fn_cntl, O_RDWR, 0);
if (IS_ERR(snd->filp)) {
int ret = PTR_ERR(snd->filp);
ERROR(card, "unable to open sound control device file: %s\n",
fn_cntl);
snd->filp = NULL;
return ret;
}
snd->card = card;
/* Open PCM playback device and setup substream */
snd = &card->playback;
snd->filp = filp_open(fn_play, O_WRONLY, 0);
if (IS_ERR(snd->filp)) {
ERROR(card, "No such PCM playback device: %s\n", fn_play);
snd->filp = NULL;
}
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
playback_default_hw_params(snd);
/* Open PCM capture device and setup substream */
snd = &card->capture;
snd->filp = filp_open(fn_cap, O_RDONLY, 0);
if (IS_ERR(snd->filp)) {
ERROR(card, "No such PCM capture device: %s\n", fn_cap);
snd->substream = NULL;
snd->card = NULL;
snd->filp = NULL;
} else {
pcm_file = snd->filp->private_data;
snd->substream = pcm_file->substream;
snd->card = card;
}
return 0;
}
/**
* Close ALSA PCM and control device files
*/
static int gaudio_close_snd_dev(struct gaudio *gau)
{
struct gaudio_snd_dev *snd;
/* Close control device */
snd = &gau->control;
if (snd->filp)
filp_close(snd->filp, current->files);
/* Close PCM playback device and setup substream */
snd = &gau->playback;
if (snd->filp)
filp_close(snd->filp, current->files);
/* Close PCM capture device and setup substream */
snd = &gau->capture;
if (snd->filp)
filp_close(snd->filp, current->files);
return 0;
}
static struct gaudio *the_card;
/**
* gaudio_setup - setup ALSA interface and preparing for USB transfer
*
* This sets up PCM, mixer or MIDI ALSA devices fore USB gadget using.
*
* Returns negative errno, or zero on success
*/
int __init gaudio_setup(struct gaudio *card)
{
int ret;
ret = gaudio_open_snd_dev(card);
if (ret)
ERROR(card, "we need at least one control device\n");
else if (!the_card)
the_card = card;
return ret;
}
/**
* gaudio_cleanup - remove ALSA device interface
*
* This is called to free all resources allocated by @gaudio_setup().
*/
void gaudio_cleanup(void)
{
if (the_card) {
gaudio_close_snd_dev(the_card);
the_card = NULL;
}
}
| gpl-2.0 |
dorimanx/SAMMY-ICS-SOURCE | tools/perf/builtin-stat.c | 2392 | 32767 | /*
* builtin-stat.c
*
* Builtin stat command: Give a precise performance counters summary
* overview about any workload, CPU or specific PID.
*
* Sample output:
$ perf stat ./hackbench 10
Time: 0.118
Performance counter stats for './hackbench 10':
1708.761321 task-clock # 11.037 CPUs utilized
41,190 context-switches # 0.024 M/sec
6,735 CPU-migrations # 0.004 M/sec
17,318 page-faults # 0.010 M/sec
5,205,202,243 cycles # 3.046 GHz
3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
2,603,501,247 instructions # 0.50 insns per cycle
# 1.48 stalled cycles per insn
484,357,498 branches # 283.455 M/sec
6,388,934 branch-misses # 1.32% of all branches
0.154822978 seconds time elapsed
*
* Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
*
* Improvements and fixes by:
*
* Arjan van de Ven <arjan@linux.intel.com>
* Yanmin Zhang <yanmin.zhang@intel.com>
* Wu Fengguang <fengguang.wu@intel.com>
* Mike Galbraith <efault@gmx.de>
* Paul Mackerras <paulus@samba.org>
* Jaswinder Singh Rajput <jaswinder@kernel.org>
*
* Released under the GPL v2. (and only v2, not any later version)
*/
#include "perf.h"
#include "builtin.h"
#include "util/util.h"
#include "util/parse-options.h"
#include "util/parse-events.h"
#include "util/event.h"
#include "util/evlist.h"
#include "util/evsel.h"
#include "util/debug.h"
#include "util/color.h"
#include "util/header.h"
#include "util/cpumap.h"
#include "util/thread.h"
#include "util/thread_map.h"
#include <sys/prctl.h>
#include <math.h>
#include <locale.h>
#define DEFAULT_SEPARATOR " "
static struct perf_event_attr default_attrs[] = {
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
{ .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
{ .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
};
/*
* Detailed stats (-d), covering the L1 and last level data caches:
*/
static struct perf_event_attr detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_LL << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_LL << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
/*
* Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
*/
static struct perf_event_attr very_detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1I << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1I << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_DTLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_DTLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_ITLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_ITLB << 0 |
(PERF_COUNT_HW_CACHE_OP_READ << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
/*
* Very, very detailed stats (-d -d -d), adding prefetch events:
*/
static struct perf_event_attr very_very_detailed_attrs[] = {
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
{ .type = PERF_TYPE_HW_CACHE,
.config =
PERF_COUNT_HW_CACHE_L1D << 0 |
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
struct perf_evlist *evsel_list;
static bool system_wide = false;
static int run_idx = 0;
static int run_count = 1;
static bool no_inherit = false;
static bool scale = true;
static bool no_aggr = false;
static pid_t target_pid = -1;
static pid_t target_tid = -1;
static pid_t child_pid = -1;
static bool null_run = false;
static int detailed_run = 0;
static bool sync_run = false;
static bool big_num = true;
static int big_num_opt = -1;
static const char *cpu_list;
static const char *csv_sep = NULL;
static bool csv_output = false;
static volatile int done = 0;
struct stats
{
double n, mean, M2;
};
struct perf_stat {
struct stats res_stats[3];
};
static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
{
evsel->priv = zalloc(sizeof(struct perf_stat));
return evsel->priv == NULL ? -ENOMEM : 0;
}
static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
{
free(evsel->priv);
evsel->priv = NULL;
}
static void update_stats(struct stats *stats, u64 val)
{
double delta;
stats->n++;
delta = val - stats->mean;
stats->mean += delta / stats->n;
stats->M2 += delta*(val - stats->mean);
}
static double avg_stats(struct stats *stats)
{
return stats->mean;
}
/*
* http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance
*
* (\Sum n_i^2) - ((\Sum n_i)^2)/n
* s^2 = -------------------------------
* n - 1
*
* http://en.wikipedia.org/wiki/Stddev
*
* The std dev of the mean is related to the std dev by:
*
* s
* s_mean = -------
* sqrt(n)
*
*/
static double stddev_stats(struct stats *stats)
{
double variance = stats->M2 / (stats->n - 1);
double variance_mean = variance / stats->n;
return sqrt(variance_mean);
}
struct stats runtime_nsecs_stats[MAX_NR_CPUS];
struct stats runtime_cycles_stats[MAX_NR_CPUS];
struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
struct stats runtime_branches_stats[MAX_NR_CPUS];
struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
struct stats walltime_nsecs_stats;
static int create_perf_stat_counter(struct perf_evsel *evsel)
{
struct perf_event_attr *attr = &evsel->attr;
if (scale)
attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
PERF_FORMAT_TOTAL_TIME_RUNNING;
attr->inherit = !no_inherit;
if (system_wide)
return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false);
if (target_pid == -1 && target_tid == -1) {
attr->disabled = 1;
attr->enable_on_exec = 1;
}
return perf_evsel__open_per_thread(evsel, evsel_list->threads, false);
}
/*
* Does the counter have nsecs as a unit?
*/
static inline int nsec_counter(struct perf_evsel *evsel)
{
if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
return 1;
return 0;
}
/*
* Update various tracking values we maintain to print
* more semantic information such as miss/hit ratios,
* instruction rates, etc:
*/
static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
{
if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
update_stats(&runtime_nsecs_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
update_stats(&runtime_cycles_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
update_stats(&runtime_branches_stats[0], count[0]);
else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
update_stats(&runtime_cacherefs_stats[0], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
update_stats(&runtime_l1_dcache_stats[0], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
update_stats(&runtime_l1_icache_stats[0], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
update_stats(&runtime_ll_cache_stats[0], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
update_stats(&runtime_dtlb_cache_stats[0], count[0]);
else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
update_stats(&runtime_itlb_cache_stats[0], count[0]);
}
/*
* Read out the results of a single counter:
* aggregate counts across CPUs in system-wide mode
*/
static int read_counter_aggr(struct perf_evsel *counter)
{
struct perf_stat *ps = counter->priv;
u64 *count = counter->counts->aggr.values;
int i;
if (__perf_evsel__read(counter, evsel_list->cpus->nr,
evsel_list->threads->nr, scale) < 0)
return -1;
for (i = 0; i < 3; i++)
update_stats(&ps->res_stats[i], count[i]);
if (verbose) {
fprintf(stderr, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
event_name(counter), count[0], count[1], count[2]);
}
/*
* Save the full runtime - to allow normalization during printout:
*/
update_shadow_stats(counter, count);
return 0;
}
/*
* Read out the results of a single counter:
* do not aggregate counts across CPUs in system-wide mode
*/
static int read_counter(struct perf_evsel *counter)
{
u64 *count;
int cpu;
for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
return -1;
count = counter->counts->cpu[cpu].values;
update_shadow_stats(counter, count);
}
return 0;
}
static int run_perf_stat(int argc __used, const char **argv)
{
unsigned long long t0, t1;
struct perf_evsel *counter;
int status = 0;
int child_ready_pipe[2], go_pipe[2];
const bool forks = (argc > 0);
char buf;
if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
perror("failed to create pipes");
exit(1);
}
if (forks) {
if ((child_pid = fork()) < 0)
perror("failed to fork");
if (!child_pid) {
close(child_ready_pipe[0]);
close(go_pipe[1]);
fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
/*
* Do a dummy execvp to get the PLT entry resolved,
* so we avoid the resolver overhead on the real
* execvp call.
*/
execvp("", (char **)argv);
/*
* Tell the parent we're ready to go
*/
close(child_ready_pipe[1]);
/*
* Wait until the parent tells us to go.
*/
if (read(go_pipe[0], &buf, 1) == -1)
perror("unable to read pipe");
execvp(argv[0], (char **)argv);
perror(argv[0]);
exit(-1);
}
if (target_tid == -1 && target_pid == -1 && !system_wide)
evsel_list->threads->map[0] = child_pid;
/*
* Wait for the child to be ready to exec.
*/
close(child_ready_pipe[1]);
close(go_pipe[0]);
if (read(child_ready_pipe[0], &buf, 1) == -1)
perror("unable to read pipe");
close(child_ready_pipe[0]);
}
list_for_each_entry(counter, &evsel_list->entries, node) {
if (create_perf_stat_counter(counter) < 0) {
if (errno == EINVAL || errno == ENOSYS || errno == ENOENT) {
if (verbose)
ui__warning("%s event is not supported by the kernel.\n",
event_name(counter));
continue;
}
if (errno == EPERM || errno == EACCES) {
error("You may not have permission to collect %sstats.\n"
"\t Consider tweaking"
" /proc/sys/kernel/perf_event_paranoid or running as root.",
system_wide ? "system-wide " : "");
} else {
error("open_counter returned with %d (%s). "
"/bin/dmesg may provide additional information.\n",
errno, strerror(errno));
}
if (child_pid != -1)
kill(child_pid, SIGTERM);
die("Not all events could be opened.\n");
return -1;
}
}
if (perf_evlist__set_filters(evsel_list)) {
error("failed to set filter with %d (%s)\n", errno,
strerror(errno));
return -1;
}
/*
* Enable counters and exec the command:
*/
t0 = rdclock();
if (forks) {
close(go_pipe[1]);
wait(&status);
} else {
while(!done) sleep(1);
}
t1 = rdclock();
update_stats(&walltime_nsecs_stats, t1 - t0);
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter(counter);
perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
}
} else {
list_for_each_entry(counter, &evsel_list->entries, node) {
read_counter_aggr(counter);
perf_evsel__close_fd(counter, evsel_list->cpus->nr,
evsel_list->threads->nr);
}
}
return WEXITSTATUS(status);
}
static void print_noise_pct(double total, double avg)
{
double pct = 0.0;
if (avg)
pct = 100.0*total/avg;
fprintf(stderr, " ( +-%6.2f%% )", pct);
}
static void print_noise(struct perf_evsel *evsel, double avg)
{
struct perf_stat *ps;
if (run_count == 1)
return;
ps = evsel->priv;
print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
}
static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
{
double msecs = avg / 1e6;
char cpustr[16] = { '\0', };
const char *fmt = csv_output ? "%s%.6f%s%s" : "%s%18.6f%s%-25s";
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
evsel_list->cpus->map[cpu], csv_sep);
fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
if (evsel->cgrp)
fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
if (csv_output)
return;
if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
fprintf(stderr, " # %8.3f CPUs utilized ", avg / avg_stats(&walltime_nsecs_stats));
}
static void print_stalled_cycles_frontend(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_cycles_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 50.0)
color = PERF_COLOR_RED;
else if (ratio > 30.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 10.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " frontend cycles idle ");
}
static void print_stalled_cycles_backend(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_cycles_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 75.0)
color = PERF_COLOR_RED;
else if (ratio > 50.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 20.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " backend cycles idle ");
}
static void print_branch_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_branches_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all branches ");
}
static void print_l1_dcache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_l1_dcache_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all L1-dcache hits ");
}
static void print_l1_icache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_l1_icache_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all L1-icache hits ");
}
static void print_dtlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all dTLB cache hits ");
}
static void print_itlb_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_itlb_cache_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all iTLB cache hits ");
}
static void print_ll_cache_misses(int cpu, struct perf_evsel *evsel __used, double avg)
{
double total, ratio = 0.0;
const char *color;
total = avg_stats(&runtime_ll_cache_stats[cpu]);
if (total)
ratio = avg / total * 100.0;
color = PERF_COLOR_NORMAL;
if (ratio > 20.0)
color = PERF_COLOR_RED;
else if (ratio > 10.0)
color = PERF_COLOR_MAGENTA;
else if (ratio > 5.0)
color = PERF_COLOR_YELLOW;
fprintf(stderr, " # ");
color_fprintf(stderr, color, "%6.2f%%", ratio);
fprintf(stderr, " of all LL-cache hits ");
}
static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
{
double total, ratio = 0.0;
char cpustr[16] = { '\0', };
const char *fmt;
if (csv_output)
fmt = "%s%.0f%s%s";
else if (big_num)
fmt = "%s%'18.0f%s%-25s";
else
fmt = "%s%18.0f%s%-25s";
if (no_aggr)
sprintf(cpustr, "CPU%*d%s",
csv_output ? 0 : -4,
evsel_list->cpus->map[cpu], csv_sep);
else
cpu = 0;
fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
if (evsel->cgrp)
fprintf(stderr, "%s%s", csv_sep, evsel->cgrp->name);
if (csv_output)
return;
if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
total = avg_stats(&runtime_cycles_stats[cpu]);
if (total)
ratio = avg / total;
fprintf(stderr, " # %5.2f insns per cycle ", ratio);
total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
if (total && avg) {
ratio = total / avg;
fprintf(stderr, "\n # %5.2f stalled cycles per insn", ratio);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
runtime_branches_stats[cpu].n != 0) {
print_branch_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_l1_dcache_stats[cpu].n != 0) {
print_l1_dcache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_l1_icache_stats[cpu].n != 0) {
print_l1_icache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_dtlb_cache_stats[cpu].n != 0) {
print_dtlb_cache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_itlb_cache_stats[cpu].n != 0) {
print_itlb_cache_misses(cpu, evsel, avg);
} else if (
evsel->attr.type == PERF_TYPE_HW_CACHE &&
evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
runtime_ll_cache_stats[cpu].n != 0) {
print_ll_cache_misses(cpu, evsel, avg);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
runtime_cacherefs_stats[cpu].n != 0) {
total = avg_stats(&runtime_cacherefs_stats[cpu]);
if (total)
ratio = avg * 100 / total;
fprintf(stderr, " # %8.3f %% of all cache refs ", ratio);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
print_stalled_cycles_frontend(cpu, evsel, avg);
} else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
print_stalled_cycles_backend(cpu, evsel, avg);
} else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
total = avg_stats(&runtime_nsecs_stats[cpu]);
if (total)
ratio = 1.0 * avg / total;
fprintf(stderr, " # %8.3f GHz ", ratio);
} else if (runtime_nsecs_stats[cpu].n != 0) {
total = avg_stats(&runtime_nsecs_stats[cpu]);
if (total)
ratio = 1000.0 * avg / total;
fprintf(stderr, " # %8.3f M/sec ", ratio);
} else {
fprintf(stderr, " ");
}
}
/*
* Print out the results of a single counter:
* aggregated counts in system-wide mode
*/
static void print_counter_aggr(struct perf_evsel *counter)
{
struct perf_stat *ps = counter->priv;
double avg = avg_stats(&ps->res_stats[0]);
int scaled = counter->counts->scaled;
if (scaled == -1) {
fprintf(stderr, "%*s%s%*s",
csv_output ? 0 : 18,
"<not counted>",
csv_sep,
csv_output ? 0 : -24,
event_name(counter));
if (counter->cgrp)
fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
fputc('\n', stderr);
return;
}
if (nsec_counter(counter))
nsec_printout(-1, counter, avg);
else
abs_printout(-1, counter, avg);
if (csv_output) {
fputc('\n', stderr);
return;
}
print_noise(counter, avg);
if (scaled) {
double avg_enabled, avg_running;
avg_enabled = avg_stats(&ps->res_stats[1]);
avg_running = avg_stats(&ps->res_stats[2]);
fprintf(stderr, " [%5.2f%%]", 100 * avg_running / avg_enabled);
}
fprintf(stderr, "\n");
}
/*
* Print out the results of a single counter:
* does not use aggregated count in system-wide
*/
static void print_counter(struct perf_evsel *counter)
{
u64 ena, run, val;
int cpu;
for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
val = counter->counts->cpu[cpu].val;
ena = counter->counts->cpu[cpu].ena;
run = counter->counts->cpu[cpu].run;
if (run == 0 || ena == 0) {
fprintf(stderr, "CPU%*d%s%*s%s%*s",
csv_output ? 0 : -4,
evsel_list->cpus->map[cpu], csv_sep,
csv_output ? 0 : 18,
"<not counted>", csv_sep,
csv_output ? 0 : -24,
event_name(counter));
if (counter->cgrp)
fprintf(stderr, "%s%s", csv_sep, counter->cgrp->name);
fputc('\n', stderr);
continue;
}
if (nsec_counter(counter))
nsec_printout(cpu, counter, val);
else
abs_printout(cpu, counter, val);
if (!csv_output) {
print_noise(counter, 1.0);
if (run != ena)
fprintf(stderr, " (%.2f%%)", 100.0 * run / ena);
}
fputc('\n', stderr);
}
}
static void print_stat(int argc, const char **argv)
{
struct perf_evsel *counter;
int i;
fflush(stdout);
if (!csv_output) {
fprintf(stderr, "\n");
fprintf(stderr, " Performance counter stats for ");
if(target_pid == -1 && target_tid == -1) {
fprintf(stderr, "\'%s", argv[0]);
for (i = 1; i < argc; i++)
fprintf(stderr, " %s", argv[i]);
} else if (target_pid != -1)
fprintf(stderr, "process id \'%d", target_pid);
else
fprintf(stderr, "thread id \'%d", target_tid);
fprintf(stderr, "\'");
if (run_count > 1)
fprintf(stderr, " (%d runs)", run_count);
fprintf(stderr, ":\n\n");
}
if (no_aggr) {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter(counter);
} else {
list_for_each_entry(counter, &evsel_list->entries, node)
print_counter_aggr(counter);
}
if (!csv_output) {
if (!null_run)
fprintf(stderr, "\n");
fprintf(stderr, " %17.9f seconds time elapsed",
avg_stats(&walltime_nsecs_stats)/1e9);
if (run_count > 1) {
fprintf(stderr, " ");
print_noise_pct(stddev_stats(&walltime_nsecs_stats),
avg_stats(&walltime_nsecs_stats));
}
fprintf(stderr, "\n\n");
}
}
static volatile int signr = -1;
static void skip_signal(int signo)
{
if(child_pid == -1)
done = 1;
signr = signo;
}
static void sig_atexit(void)
{
if (child_pid != -1)
kill(child_pid, SIGTERM);
if (signr == -1)
return;
signal(signr, SIG_DFL);
kill(getpid(), signr);
}
static const char * const stat_usage[] = {
"perf stat [<options>] [<command>]",
NULL
};
static int stat__set_big_num(const struct option *opt __used,
const char *s __used, int unset)
{
big_num_opt = unset ? 0 : 1;
return 0;
}
static const struct option options[] = {
OPT_CALLBACK('e', "event", &evsel_list, "event",
"event selector. use 'perf list' to list available events",
parse_events),
OPT_CALLBACK(0, "filter", &evsel_list, "filter",
"event filter", parse_filter),
OPT_BOOLEAN('i', "no-inherit", &no_inherit,
"child tasks do not inherit counters"),
OPT_INTEGER('p', "pid", &target_pid,
"stat events on existing process id"),
OPT_INTEGER('t', "tid", &target_tid,
"stat events on existing thread id"),
OPT_BOOLEAN('a', "all-cpus", &system_wide,
"system-wide collection from all CPUs"),
OPT_BOOLEAN('c', "scale", &scale,
"scale/normalize counters"),
OPT_INCR('v', "verbose", &verbose,
"be more verbose (show counter open errors, etc)"),
OPT_INTEGER('r', "repeat", &run_count,
"repeat command and print average + stddev (max: 100)"),
OPT_BOOLEAN('n', "null", &null_run,
"null run - dont start any counters"),
OPT_INCR('d', "detailed", &detailed_run,
"detailed run - start a lot of events"),
OPT_BOOLEAN('S', "sync", &sync_run,
"call sync() before starting a run"),
OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
"print large numbers with thousands\' separators",
stat__set_big_num),
OPT_STRING('C', "cpu", &cpu_list, "cpu",
"list of cpus to monitor in system-wide"),
OPT_BOOLEAN('A', "no-aggr", &no_aggr,
"disable CPU count aggregation"),
OPT_STRING('x', "field-separator", &csv_sep, "separator",
"print counts with custom separator"),
OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
"monitor event in cgroup name only",
parse_cgroups),
OPT_END()
};
/*
* Add default attributes, if there were no attributes specified or
* if -d/--detailed, -d -d or -d -d -d is used:
*/
static int add_default_attributes(void)
{
struct perf_evsel *pos;
size_t attr_nr = 0;
size_t c;
/* Set attrs if no event is selected and !null_run: */
if (null_run)
return 0;
if (!evsel_list->nr_entries) {
for (c = 0; c < ARRAY_SIZE(default_attrs); c++) {
pos = perf_evsel__new(default_attrs + c, c + attr_nr);
if (pos == NULL)
return -1;
perf_evlist__add(evsel_list, pos);
}
attr_nr += c;
}
/* Detailed events get appended to the event list: */
if (detailed_run < 1)
return 0;
/* Append detailed run extra attributes: */
for (c = 0; c < ARRAY_SIZE(detailed_attrs); c++) {
pos = perf_evsel__new(detailed_attrs + c, c + attr_nr);
if (pos == NULL)
return -1;
perf_evlist__add(evsel_list, pos);
}
attr_nr += c;
if (detailed_run < 2)
return 0;
/* Append very detailed run extra attributes: */
for (c = 0; c < ARRAY_SIZE(very_detailed_attrs); c++) {
pos = perf_evsel__new(very_detailed_attrs + c, c + attr_nr);
if (pos == NULL)
return -1;
perf_evlist__add(evsel_list, pos);
}
if (detailed_run < 3)
return 0;
/* Append very, very detailed run extra attributes: */
for (c = 0; c < ARRAY_SIZE(very_very_detailed_attrs); c++) {
pos = perf_evsel__new(very_very_detailed_attrs + c, c + attr_nr);
if (pos == NULL)
return -1;
perf_evlist__add(evsel_list, pos);
}
return 0;
}
int cmd_stat(int argc, const char **argv, const char *prefix __used)
{
struct perf_evsel *pos;
int status = -ENOMEM;
setlocale(LC_ALL, "");
evsel_list = perf_evlist__new(NULL, NULL);
if (evsel_list == NULL)
return -ENOMEM;
argc = parse_options(argc, argv, options, stat_usage,
PARSE_OPT_STOP_AT_NON_OPTION);
if (csv_sep)
csv_output = true;
else
csv_sep = DEFAULT_SEPARATOR;
/*
* let the spreadsheet do the pretty-printing
*/
if (csv_output) {
/* User explicitely passed -B? */
if (big_num_opt == 1) {
fprintf(stderr, "-B option not supported with -x\n");
usage_with_options(stat_usage, options);
} else /* Nope, so disable big number formatting */
big_num = false;
} else if (big_num_opt == 0) /* User passed --no-big-num */
big_num = false;
if (!argc && target_pid == -1 && target_tid == -1)
usage_with_options(stat_usage, options);
if (run_count <= 0)
usage_with_options(stat_usage, options);
/* no_aggr, cgroup are for system-wide only */
if ((no_aggr || nr_cgroups) && !system_wide) {
fprintf(stderr, "both cgroup and no-aggregation "
"modes only available in system-wide mode\n");
usage_with_options(stat_usage, options);
}
if (add_default_attributes())
goto out;
if (target_pid != -1)
target_tid = target_pid;
evsel_list->threads = thread_map__new(target_pid, target_tid);
if (evsel_list->threads == NULL) {
pr_err("Problems finding threads of monitor\n");
usage_with_options(stat_usage, options);
}
if (system_wide)
evsel_list->cpus = cpu_map__new(cpu_list);
else
evsel_list->cpus = cpu_map__dummy_new();
if (evsel_list->cpus == NULL) {
perror("failed to parse CPUs map");
usage_with_options(stat_usage, options);
return -1;
}
list_for_each_entry(pos, &evsel_list->entries, node) {
if (perf_evsel__alloc_stat_priv(pos) < 0 ||
perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
goto out_free_fd;
}
/*
* We dont want to block the signals - that would cause
* child tasks to inherit that and Ctrl-C would not work.
* What we want is for Ctrl-C to work in the exec()-ed
* task, but being ignored by perf stat itself:
*/
atexit(sig_atexit);
signal(SIGINT, skip_signal);
signal(SIGALRM, skip_signal);
signal(SIGABRT, skip_signal);
status = 0;
for (run_idx = 0; run_idx < run_count; run_idx++) {
if (run_count != 1 && verbose)
fprintf(stderr, "[ perf stat: executing run #%d ... ]\n", run_idx + 1);
if (sync_run)
sync();
status = run_perf_stat(argc, argv);
}
if (status != -1)
print_stat(argc, argv);
out_free_fd:
list_for_each_entry(pos, &evsel_list->entries, node)
perf_evsel__free_stat_priv(pos);
perf_evlist__delete_maps(evsel_list);
out:
perf_evlist__delete(evsel_list);
return status;
}
| gpl-2.0 |
icebluechao/stuttgart_kernel | kernel/rtmutex-tester.c | 2904 | 8883 | /*
* RT-Mutex-tester: scriptable tester for rt mutexes
*
* started by Thomas Gleixner:
*
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
*/
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/sysdev.h>
#include <linux/timer.h>
#include <linux/freezer.h>
#include "rtmutex.h"
#define MAX_RT_TEST_THREADS 8
#define MAX_RT_TEST_MUTEXES 8
static spinlock_t rttest_lock;
static atomic_t rttest_event;
struct test_thread_data {
int opcode;
int opdata;
int mutexes[MAX_RT_TEST_MUTEXES];
int event;
struct sys_device sysdev;
};
static struct test_thread_data thread_data[MAX_RT_TEST_THREADS];
static struct task_struct *threads[MAX_RT_TEST_THREADS];
static struct rt_mutex mutexes[MAX_RT_TEST_MUTEXES];
enum test_opcodes {
RTTEST_NOP = 0,
RTTEST_SCHEDOT, /* 1 Sched other, data = nice */
RTTEST_SCHEDRT, /* 2 Sched fifo, data = prio */
RTTEST_LOCK, /* 3 Lock uninterruptible, data = lockindex */
RTTEST_LOCKNOWAIT, /* 4 Lock uninterruptible no wait in wakeup, data = lockindex */
RTTEST_LOCKINT, /* 5 Lock interruptible, data = lockindex */
RTTEST_LOCKINTNOWAIT, /* 6 Lock interruptible no wait in wakeup, data = lockindex */
RTTEST_LOCKCONT, /* 7 Continue locking after the wakeup delay */
RTTEST_UNLOCK, /* 8 Unlock, data = lockindex */
/* 9, 10 - reserved for BKL commemoration */
RTTEST_SIGNAL = 11, /* 11 Signal other test thread, data = thread id */
RTTEST_RESETEVENT = 98, /* 98 Reset event counter */
RTTEST_RESET = 99, /* 99 Reset all pending operations */
};
static int handle_op(struct test_thread_data *td, int lockwakeup)
{
int i, id, ret = -EINVAL;
switch(td->opcode) {
case RTTEST_NOP:
return 0;
case RTTEST_LOCKCONT:
td->mutexes[td->opdata] = 1;
td->event = atomic_add_return(1, &rttest_event);
return 0;
case RTTEST_RESET:
for (i = 0; i < MAX_RT_TEST_MUTEXES; i++) {
if (td->mutexes[i] == 4) {
rt_mutex_unlock(&mutexes[i]);
td->mutexes[i] = 0;
}
}
return 0;
case RTTEST_RESETEVENT:
atomic_set(&rttest_event, 0);
return 0;
default:
if (lockwakeup)
return ret;
}
switch(td->opcode) {
case RTTEST_LOCK:
case RTTEST_LOCKNOWAIT:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
return ret;
td->mutexes[id] = 1;
td->event = atomic_add_return(1, &rttest_event);
rt_mutex_lock(&mutexes[id]);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = 4;
return 0;
case RTTEST_LOCKINT:
case RTTEST_LOCKINTNOWAIT:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES)
return ret;
td->mutexes[id] = 1;
td->event = atomic_add_return(1, &rttest_event);
ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = ret ? 0 : 4;
return ret ? -EINTR : 0;
case RTTEST_UNLOCK:
id = td->opdata;
if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
return ret;
td->event = atomic_add_return(1, &rttest_event);
rt_mutex_unlock(&mutexes[id]);
td->event = atomic_add_return(1, &rttest_event);
td->mutexes[id] = 0;
return 0;
default:
break;
}
return ret;
}
/*
* Schedule replacement for rtsem_down(). Only called for threads with
* PF_MUTEX_TESTER set.
*
* This allows us to have finegrained control over the event flow.
*
*/
void schedule_rt_mutex_test(struct rt_mutex *mutex)
{
int tid, op, dat;
struct test_thread_data *td;
/* We have to lookup the task */
for (tid = 0; tid < MAX_RT_TEST_THREADS; tid++) {
if (threads[tid] == current)
break;
}
BUG_ON(tid == MAX_RT_TEST_THREADS);
td = &thread_data[tid];
op = td->opcode;
dat = td->opdata;
switch (op) {
case RTTEST_LOCK:
case RTTEST_LOCKINT:
case RTTEST_LOCKNOWAIT:
case RTTEST_LOCKINTNOWAIT:
if (mutex != &mutexes[dat])
break;
if (td->mutexes[dat] != 1)
break;
td->mutexes[dat] = 2;
td->event = atomic_add_return(1, &rttest_event);
break;
default:
break;
}
schedule();
switch (op) {
case RTTEST_LOCK:
case RTTEST_LOCKINT:
if (mutex != &mutexes[dat])
return;
if (td->mutexes[dat] != 2)
return;
td->mutexes[dat] = 3;
td->event = atomic_add_return(1, &rttest_event);
break;
case RTTEST_LOCKNOWAIT:
case RTTEST_LOCKINTNOWAIT:
if (mutex != &mutexes[dat])
return;
if (td->mutexes[dat] != 2)
return;
td->mutexes[dat] = 1;
td->event = atomic_add_return(1, &rttest_event);
return;
default:
return;
}
td->opcode = 0;
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode > 0) {
int ret;
set_current_state(TASK_RUNNING);
ret = handle_op(td, 1);
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode == RTTEST_LOCKCONT)
break;
td->opcode = ret;
}
/* Wait for the next command to be executed */
schedule();
}
/* Restore previous command and data */
td->opcode = op;
td->opdata = dat;
}
static int test_func(void *data)
{
struct test_thread_data *td = data;
int ret;
current->flags |= PF_MUTEX_TESTER;
set_freezable();
allow_signal(SIGHUP);
for(;;) {
set_current_state(TASK_INTERRUPTIBLE);
if (td->opcode > 0) {
set_current_state(TASK_RUNNING);
ret = handle_op(td, 0);
set_current_state(TASK_INTERRUPTIBLE);
td->opcode = ret;
}
/* Wait for the next command to be executed */
schedule();
try_to_freeze();
if (signal_pending(current))
flush_signals(current);
if(kthread_should_stop())
break;
}
return 0;
}
/**
* sysfs_test_command - interface for test commands
* @dev: thread reference
* @buf: command for actual step
* @count: length of buffer
*
* command syntax:
*
* opcode:data
*/
static ssize_t sysfs_test_command(struct sys_device *dev, struct sysdev_attribute *attr,
const char *buf, size_t count)
{
struct sched_param schedpar;
struct test_thread_data *td;
char cmdbuf[32];
int op, dat, tid, ret;
td = container_of(dev, struct test_thread_data, sysdev);
tid = td->sysdev.id;
/* strings from sysfs write are not 0 terminated! */
if (count >= sizeof(cmdbuf))
return -EINVAL;
/* strip of \n: */
if (buf[count-1] == '\n')
count--;
if (count < 1)
return -EINVAL;
memcpy(cmdbuf, buf, count);
cmdbuf[count] = 0;
if (sscanf(cmdbuf, "%d:%d", &op, &dat) != 2)
return -EINVAL;
switch (op) {
case RTTEST_SCHEDOT:
schedpar.sched_priority = 0;
ret = sched_setscheduler(threads[tid], SCHED_NORMAL, &schedpar);
if (ret)
return ret;
set_user_nice(current, 0);
break;
case RTTEST_SCHEDRT:
schedpar.sched_priority = dat;
ret = sched_setscheduler(threads[tid], SCHED_FIFO, &schedpar);
if (ret)
return ret;
break;
case RTTEST_SIGNAL:
send_sig(SIGHUP, threads[tid], 0);
break;
default:
if (td->opcode > 0)
return -EBUSY;
td->opdata = dat;
td->opcode = op;
wake_up_process(threads[tid]);
}
return count;
}
/**
* sysfs_test_status - sysfs interface for rt tester
* @dev: thread to query
* @buf: char buffer to be filled with thread status info
*/
static ssize_t sysfs_test_status(struct sys_device *dev, struct sysdev_attribute *attr,
char *buf)
{
struct test_thread_data *td;
struct task_struct *tsk;
char *curr = buf;
int i;
td = container_of(dev, struct test_thread_data, sysdev);
tsk = threads[td->sysdev.id];
spin_lock(&rttest_lock);
curr += sprintf(curr,
"O: %4d, E:%8d, S: 0x%08lx, P: %4d, N: %4d, B: %p, M:",
td->opcode, td->event, tsk->state,
(MAX_RT_PRIO - 1) - tsk->prio,
(MAX_RT_PRIO - 1) - tsk->normal_prio,
tsk->pi_blocked_on);
for (i = MAX_RT_TEST_MUTEXES - 1; i >=0 ; i--)
curr += sprintf(curr, "%d", td->mutexes[i]);
spin_unlock(&rttest_lock);
curr += sprintf(curr, ", T: %p, R: %p\n", tsk,
mutexes[td->sysdev.id].owner);
return curr - buf;
}
static SYSDEV_ATTR(status, 0600, sysfs_test_status, NULL);
static SYSDEV_ATTR(command, 0600, NULL, sysfs_test_command);
static struct sysdev_class rttest_sysclass = {
.name = "rttest",
};
static int init_test_thread(int id)
{
thread_data[id].sysdev.cls = &rttest_sysclass;
thread_data[id].sysdev.id = id;
threads[id] = kthread_run(test_func, &thread_data[id], "rt-test-%d", id);
if (IS_ERR(threads[id]))
return PTR_ERR(threads[id]);
return sysdev_register(&thread_data[id].sysdev);
}
static int init_rttest(void)
{
int ret, i;
spin_lock_init(&rttest_lock);
for (i = 0; i < MAX_RT_TEST_MUTEXES; i++)
rt_mutex_init(&mutexes[i]);
ret = sysdev_class_register(&rttest_sysclass);
if (ret)
return ret;
for (i = 0; i < MAX_RT_TEST_THREADS; i++) {
ret = init_test_thread(i);
if (ret)
break;
ret = sysdev_create_file(&thread_data[i].sysdev, &attr_status);
if (ret)
break;
ret = sysdev_create_file(&thread_data[i].sysdev, &attr_command);
if (ret)
break;
}
printk("Initializing RT-Tester: %s\n", ret ? "Failed" : "OK" );
return ret;
}
device_initcall(init_rttest);
| gpl-2.0 |
TeamEOS/kernel_oppo_msm8974 | arch/arm/mach-msm/qdsp6v2/audio_mvs.c | 3416 | 27687 | /* Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/miscdevice.h>
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/mutex.h>
#include <linux/wakelock.h>
#include <linux/slab.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/msm_audio_mvs.h>
#include <linux/pm_qos.h>
#include <mach/qdsp6v2/q6voice.h>
#include <mach/cpuidle.h>
/* Each buffer is 20 ms, queue holds 200 ms of data. */
#define MVS_MAX_Q_LEN 10
/* Length of the DSP frame info header added to the voc packet. */
#define DSP_FRAME_HDR_LEN 1
enum audio_mvs_state_type {
AUDIO_MVS_CLOSED,
AUDIO_MVS_STARTED,
AUDIO_MVS_STOPPED
};
struct audio_mvs_buf_node {
struct list_head list;
struct q6_msm_audio_mvs_frame frame;
};
struct audio_mvs_info_type {
enum audio_mvs_state_type state;
uint32_t mvs_mode;
uint32_t rate_type;
uint32_t dtx_mode;
struct q_min_max_rate min_max_rate;
struct list_head in_queue;
struct list_head free_in_queue;
struct list_head out_queue;
struct list_head free_out_queue;
wait_queue_head_t in_wait;
wait_queue_head_t out_wait;
struct mutex lock;
struct mutex in_lock;
struct mutex out_lock;
spinlock_t dsp_lock;
struct wake_lock suspend_lock;
struct pm_qos_request pm_qos_req;
void *memory_chunk;
};
static struct audio_mvs_info_type audio_mvs_info;
static uint32_t audio_mvs_get_rate(uint32_t mvs_mode, uint32_t rate_type)
{
uint32_t cvs_rate;
if (mvs_mode == MVS_MODE_AMR_WB)
cvs_rate = rate_type - MVS_AMR_MODE_0660;
else
cvs_rate = rate_type;
pr_debug("%s: CVS rate is %d for MVS mode %d\n",
__func__, cvs_rate, mvs_mode);
return cvs_rate;
}
static void audio_mvs_process_ul_pkt(uint8_t *voc_pkt,
uint32_t pkt_len,
void *private_data)
{
struct audio_mvs_buf_node *buf_node = NULL;
struct audio_mvs_info_type *audio = private_data;
unsigned long dsp_flags;
/* Copy up-link packet into out_queue. */
spin_lock_irqsave(&audio->dsp_lock, dsp_flags);
if (!list_empty(&audio->free_out_queue)) {
buf_node = list_first_entry(&audio->free_out_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
switch (audio->mvs_mode) {
case MVS_MODE_AMR:
case MVS_MODE_AMR_WB: {
/* Remove the DSP frame info header. Header format:
* Bits 0-3: Frame rate
* Bits 4-7: Frame type
*/
buf_node->frame.header.frame_type =
((*voc_pkt) & 0xF0) >> 4;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->out_queue);
break;
}
case MVS_MODE_IS127: {
buf_node->frame.header.packet_rate = (*voc_pkt) & 0x0F;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->out_queue);
break;
}
case MVS_MODE_G729A: {
/* G729 frames are 10ms each, but the DSP works with
* 20ms frames and sends two 10ms frames per buffer.
* Extract the two frames and put them in separate
* buffers.
*/
/* Remove the first DSP frame info header.
* Header format:
* Bits 0-1: Frame type
*/
buf_node->frame.header.frame_type = (*voc_pkt) & 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
/* There are two frames in the buffer. Length of the
* first frame:
*/
buf_node->frame.len = (pkt_len -
2 * DSP_FRAME_HDR_LEN) / 2;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
voc_pkt = voc_pkt + buf_node->frame.len;
list_add_tail(&buf_node->list, &audio->out_queue);
/* Get another buffer from the free Q and fill in the
* second frame.
*/
if (!list_empty(&audio->free_out_queue)) {
buf_node =
list_first_entry(&audio->free_out_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
/* Remove the second DSP frame info header.
* Header format:
* Bits 0-1: Frame type
*/
buf_node->frame.header.frame_type =
(*voc_pkt) & 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
/* There are two frames in the buffer. Length
* of the first frame:
*/
buf_node->frame.len = (pkt_len -
2 * DSP_FRAME_HDR_LEN) / 2;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list,
&audio->out_queue);
} else {
/* Drop the second frame. */
pr_err("%s: UL data dropped, read is slow\n",
__func__);
}
break;
}
case MVS_MODE_G711:
case MVS_MODE_G711A: {
/* G711 frames are 10ms each, but the DSP works with
* 20ms frames and sends two 10ms frames per buffer.
* Extract the two frames and put them in separate
* buffers.
*/
/* Remove the first DSP frame info header.
* Header format: G711A
* Bits 0-1: Frame type
* Bits 2-3: Frame rate
*
* Header format: G711
* Bits 2-3: Frame rate
*/
if (audio->mvs_mode == MVS_MODE_G711A)
buf_node->frame.header.frame_type =
(*voc_pkt) & 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
/* There are two frames in the buffer. Length of the
* first frame:
*/
buf_node->frame.len = (pkt_len -
2 * DSP_FRAME_HDR_LEN) / 2;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
voc_pkt = voc_pkt + buf_node->frame.len;
list_add_tail(&buf_node->list, &audio->out_queue);
/* Get another buffer from the free Q and fill in the
* second frame.
*/
if (!list_empty(&audio->free_out_queue)) {
buf_node =
list_first_entry(&audio->free_out_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
/* Remove the second DSP frame info header.
* Header format:
* Bits 0-1: Frame type
* Bits 2-3: Frame rate
*/
if (audio->mvs_mode == MVS_MODE_G711A)
buf_node->frame.header.frame_type =
(*voc_pkt) & 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
/* There are two frames in the buffer. Length
* of the second frame:
*/
buf_node->frame.len = (pkt_len -
2 * DSP_FRAME_HDR_LEN) / 2;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list,
&audio->out_queue);
} else {
/* Drop the second frame. */
pr_err("%s: UL data dropped, read is slow\n",
__func__);
}
break;
}
case MVS_MODE_IS733:
case MVS_MODE_4GV_NB:
case MVS_MODE_4GV_WB: {
/* Remove the DSP frame info header.
* Header format:
* Bits 0-3: frame rate
*/
buf_node->frame.header.packet_rate = (*voc_pkt) & 0x0F;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->out_queue);
break;
}
case MVS_MODE_EFR:
case MVS_MODE_FR:
case MVS_MODE_HR: {
/*
* Remove the DSP frame info header
* Header Format
* Bit 0: bfi unused for uplink
* Bit 1-2: sid applies to both uplink and downlink
* Bit 3: taf unused for uplink
* MVS_MODE_HR
* Bit 4: ufi unused for uplink
*/
buf_node->frame.header.gsm_frame_type.sid =
((*voc_pkt) & 0x06) >> 1;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
buf_node->frame.len = pkt_len - DSP_FRAME_HDR_LEN;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->out_queue);
break;
}
default: {
buf_node->frame.header.frame_type = 0;
buf_node->frame.len = pkt_len;
memcpy(&buf_node->frame.voc_pkt[0],
voc_pkt,
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->out_queue);
}
}
} else {
pr_err("%s: UL data dropped, read is slow\n", __func__);
}
spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags);
wake_up(&audio->out_wait);
}
static void audio_mvs_process_dl_pkt(uint8_t *voc_pkt,
uint32_t *pkt_len,
void *private_data)
{
struct audio_mvs_buf_node *buf_node = NULL;
struct audio_mvs_info_type *audio = private_data;
unsigned long dsp_flags;
spin_lock_irqsave(&audio->dsp_lock, dsp_flags);
if (!list_empty(&audio->in_queue)) {
uint32_t rate_type = audio_mvs_get_rate(audio->mvs_mode,
audio->rate_type);
buf_node = list_first_entry(&audio->in_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
switch (audio->mvs_mode) {
case MVS_MODE_AMR:
case MVS_MODE_AMR_WB: {
/* Add the DSP frame info header. Header format:
* Bits 0-3: Frame rate
* Bits 4-7: Frame type
*/
*voc_pkt =
((buf_node->frame.header.frame_type & 0x0F) << 4) |
(rate_type & 0x0F);
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->free_in_queue);
break;
}
case MVS_MODE_IS127: {
/* Add the DSP frame info header. Header format:
* Bits 0-3: Frame rate
*/
*voc_pkt = buf_node->frame.header.packet_rate & 0x0F;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->free_in_queue);
break;
}
case MVS_MODE_G729A: {
/* G729 frames are 10ms each but the DSP expects 20ms
* worth of data, so send two 10ms frames per buffer.
*/
/* Add the first DSP frame info header. Header format:
* Bits 0-1: Frame type
*/
*voc_pkt = buf_node->frame.header.frame_type & 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
voc_pkt = voc_pkt + buf_node->frame.len;
list_add_tail(&buf_node->list, &audio->free_in_queue);
if (!list_empty(&audio->in_queue)) {
/* Get the second buffer. */
buf_node = list_first_entry(&audio->in_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
/* Add the second DSP frame info header.
* Header format:
* Bits 0-1: Frame type
*/
*voc_pkt = buf_node->frame.header.frame_type
& 0x03;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = *pkt_len +
buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list,
&audio->free_in_queue);
} else {
/* Only 10ms worth of data is available, signal
* erasure frame.
*/
*voc_pkt = MVS_G729A_ERASURE & 0x03;
*pkt_len = *pkt_len + DSP_FRAME_HDR_LEN;
}
break;
}
case MVS_MODE_G711:
case MVS_MODE_G711A: {
/* G711 frames are 10ms each but the DSP expects 20ms
* worth of data, so send two 10ms frames per buffer.
*/
/* Add the first DSP frame info header. Header format:
* Bits 0-1: Frame type
* Bits 2-3: Frame rate
*/
*voc_pkt = ((rate_type & 0x0F) << 2) |
(buf_node->frame.header.frame_type & 0x03);
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
voc_pkt = voc_pkt + buf_node->frame.len;
list_add_tail(&buf_node->list, &audio->free_in_queue);
if (!list_empty(&audio->in_queue)) {
/* Get the second buffer. */
buf_node = list_first_entry(&audio->in_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
/* Add the second DSP frame info header.
* Header format:
* Bits 0-1: Frame type
* Bits 2-3: Frame rate
*/
*voc_pkt = ((rate_type & 0x0F) << 2) |
(buf_node->frame.header.frame_type & 0x03);
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = *pkt_len +
buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list,
&audio->free_in_queue);
} else {
/* Only 10ms worth of data is available, signal
* erasure frame.
*/
*voc_pkt = ((rate_type & 0x0F) << 2) |
(MVS_G711A_ERASURE & 0x03);
*pkt_len = *pkt_len + DSP_FRAME_HDR_LEN;
}
break;
}
case MVS_MODE_IS733:
case MVS_MODE_4GV_NB:
case MVS_MODE_4GV_WB: {
/* Add the DSP frame info header. Header format:
* Bits 0-3 : Frame rate
*/
*voc_pkt = buf_node->frame.header.packet_rate & 0x0F;
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->free_in_queue);
break;
}
case MVS_MODE_EFR:
case MVS_MODE_FR:
case MVS_MODE_HR: {
/*
* Remove the DSP frame info header
* Header Format
* Bit 0: bfi applies only for downlink
* Bit 1-2: sid applies for downlink and uplink
* Bit 3: taf applies only for downlink
* MVS_MODE_HR
* Bit 4: ufi applies only for downlink
*/
*voc_pkt =
((buf_node->frame.header.gsm_frame_type.bfi
& 0x01) |
((buf_node->frame.header.gsm_frame_type.sid
& 0x03) << 1) |
((buf_node->frame.header.gsm_frame_type.taf
& 0x01) << 3));
if (audio->mvs_mode == MVS_MODE_HR) {
*voc_pkt = (*voc_pkt |
((buf_node->frame.header.gsm_frame_type.ufi
& 0x01) << 4) |
((0 & 0x07) << 5));
} else {
*voc_pkt = (*voc_pkt |
((0 & 0x0F) << 4));
}
voc_pkt = voc_pkt + DSP_FRAME_HDR_LEN;
*pkt_len = buf_node->frame.len + DSP_FRAME_HDR_LEN;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->free_in_queue);
break;
}
default: {
*pkt_len = buf_node->frame.len;
memcpy(voc_pkt,
&buf_node->frame.voc_pkt[0],
buf_node->frame.len);
list_add_tail(&buf_node->list, &audio->free_in_queue);
}
}
} else {
*pkt_len = 0;
pr_info("%s: No DL data available to send to MVS\n", __func__);
}
spin_unlock_irqrestore(&audio->dsp_lock, dsp_flags);
wake_up(&audio->in_wait);
}
static uint32_t audio_mvs_get_media_type(uint32_t mvs_mode, uint32_t rate_type)
{
uint32_t media_type;
switch (mvs_mode) {
case MVS_MODE_IS733:
media_type = VSS_MEDIA_ID_13K_MODEM;
break;
case MVS_MODE_IS127:
media_type = VSS_MEDIA_ID_EVRC_MODEM;
break;
case MVS_MODE_4GV_NB:
media_type = VSS_MEDIA_ID_4GV_NB_MODEM;
break;
case MVS_MODE_4GV_WB:
media_type = VSS_MEDIA_ID_4GV_WB_MODEM;
break;
case MVS_MODE_AMR:
media_type = VSS_MEDIA_ID_AMR_NB_MODEM;
break;
case MVS_MODE_EFR:
media_type = VSS_MEDIA_ID_EFR_MODEM;
break;
case MVS_MODE_FR:
media_type = VSS_MEDIA_ID_FR_MODEM;
break;
case MVS_MODE_HR:
media_type = VSS_MEDIA_ID_HR_MODEM;
break;
case MVS_MODE_LINEAR_PCM:
media_type = VSS_MEDIA_ID_PCM_NB;
break;
case MVS_MODE_PCM:
media_type = VSS_MEDIA_ID_PCM_NB;
break;
case MVS_MODE_AMR_WB:
media_type = VSS_MEDIA_ID_AMR_WB_MODEM;
break;
case MVS_MODE_G729A:
media_type = VSS_MEDIA_ID_G729;
break;
case MVS_MODE_G711:
case MVS_MODE_G711A:
if (rate_type == MVS_G711A_MODE_MULAW)
media_type = VSS_MEDIA_ID_G711_MULAW;
else
media_type = VSS_MEDIA_ID_G711_ALAW;
break;
case MVS_MODE_PCM_WB:
media_type = VSS_MEDIA_ID_PCM_WB;
break;
default:
media_type = VSS_MEDIA_ID_PCM_NB;
}
pr_debug("%s: media_type is 0x%x\n", __func__, media_type);
return media_type;
}
static uint32_t audio_mvs_get_network_type(uint32_t mvs_mode)
{
uint32_t network_type;
switch (mvs_mode) {
case MVS_MODE_IS733:
case MVS_MODE_IS127:
case MVS_MODE_4GV_NB:
case MVS_MODE_AMR:
case MVS_MODE_EFR:
case MVS_MODE_FR:
case MVS_MODE_HR:
case MVS_MODE_LINEAR_PCM:
case MVS_MODE_G711:
case MVS_MODE_PCM:
case MVS_MODE_G729A:
case MVS_MODE_G711A:
network_type = VSS_NETWORK_ID_VOIP_NB;
break;
case MVS_MODE_4GV_WB:
case MVS_MODE_AMR_WB:
case MVS_MODE_PCM_WB:
network_type = VSS_NETWORK_ID_VOIP_WB;
break;
default:
network_type = VSS_NETWORK_ID_DEFAULT;
}
pr_debug("%s: network_type is 0x%x\n", __func__, network_type);
return network_type;
}
static int audio_mvs_start(struct audio_mvs_info_type *audio)
{
int rc = 0;
pr_info("%s\n", __func__);
/* Prevent sleep. */
wake_lock(&audio->suspend_lock);
pm_qos_update_request(&audio->pm_qos_req,
msm_cpuidle_get_deep_idle_latency());
rc = voice_set_voc_path_full(1);
if (rc == 0) {
voice_register_mvs_cb(audio_mvs_process_ul_pkt,
audio_mvs_process_dl_pkt,
audio);
voice_config_vocoder(
audio_mvs_get_media_type(audio->mvs_mode, audio->rate_type),
audio_mvs_get_rate(audio->mvs_mode, audio->rate_type),
audio_mvs_get_network_type(audio->mvs_mode),
audio->dtx_mode,
audio->min_max_rate);
audio->state = AUDIO_MVS_STARTED;
} else {
pr_err("%s: Error %d setting voc path to full\n", __func__, rc);
}
return rc;
}
static int audio_mvs_stop(struct audio_mvs_info_type *audio)
{
int rc = 0;
pr_info("%s\n", __func__);
voice_set_voc_path_full(0);
audio->state = AUDIO_MVS_STOPPED;
/* Allow sleep. */
pm_qos_update_request(&audio->pm_qos_req, PM_QOS_DEFAULT_VALUE);
wake_unlock(&audio->suspend_lock);
return rc;
}
static int audio_mvs_open(struct inode *inode, struct file *file)
{
int rc = 0;
int i;
int offset = 0;
struct audio_mvs_buf_node *buf_node = NULL;
pr_info("%s\n", __func__);
mutex_lock(&audio_mvs_info.lock);
/* Allocate input and output buffers. */
audio_mvs_info.memory_chunk = kmalloc(2 * MVS_MAX_Q_LEN *
sizeof(struct audio_mvs_buf_node),
GFP_KERNEL);
if (audio_mvs_info.memory_chunk != NULL) {
for (i = 0; i < MVS_MAX_Q_LEN; i++) {
buf_node = audio_mvs_info.memory_chunk + offset;
list_add_tail(&buf_node->list,
&audio_mvs_info.free_in_queue);
offset = offset + sizeof(struct audio_mvs_buf_node);
}
for (i = 0; i < MVS_MAX_Q_LEN; i++) {
buf_node = audio_mvs_info.memory_chunk + offset;
list_add_tail(&buf_node->list,
&audio_mvs_info.free_out_queue);
offset = offset + sizeof(struct audio_mvs_buf_node);
}
audio_mvs_info.state = AUDIO_MVS_STOPPED;
file->private_data = &audio_mvs_info;
} else {
pr_err("%s: No memory for IO buffers\n", __func__);
rc = -ENOMEM;
}
mutex_unlock(&audio_mvs_info.lock);
return rc;
}
static int audio_mvs_release(struct inode *inode, struct file *file)
{
struct list_head *ptr = NULL;
struct list_head *next = NULL;
struct audio_mvs_buf_node *buf_node = NULL;
struct audio_mvs_info_type *audio = file->private_data;
pr_info("%s\n", __func__);
mutex_lock(&audio->lock);
if (audio->state == AUDIO_MVS_STARTED)
audio_mvs_stop(audio);
/* Free input and output memory. */
mutex_lock(&audio->in_lock);
list_for_each_safe(ptr, next, &audio->in_queue) {
buf_node = list_entry(ptr, struct audio_mvs_buf_node, list);
list_del(&buf_node->list);
}
list_for_each_safe(ptr, next, &audio->free_in_queue) {
buf_node = list_entry(ptr, struct audio_mvs_buf_node, list);
list_del(&buf_node->list);
}
mutex_unlock(&audio->in_lock);
mutex_lock(&audio->out_lock);
list_for_each_safe(ptr, next, &audio->out_queue) {
buf_node = list_entry(ptr, struct audio_mvs_buf_node, list);
list_del(&buf_node->list);
}
list_for_each_safe(ptr, next, &audio->free_out_queue) {
buf_node = list_entry(ptr, struct audio_mvs_buf_node, list);
list_del(&buf_node->list);
}
mutex_unlock(&audio->out_lock);
kfree(audio->memory_chunk);
audio->memory_chunk = NULL;
audio->state = AUDIO_MVS_CLOSED;
mutex_unlock(&audio->lock);
return 0;
}
static ssize_t audio_mvs_read(struct file *file,
char __user *buf,
size_t count,
loff_t *pos)
{
int rc = 0;
struct audio_mvs_buf_node *buf_node = NULL;
struct audio_mvs_info_type *audio = file->private_data;
pr_debug("%s:\n", __func__);
rc = wait_event_interruptible_timeout(audio->out_wait,
(!list_empty(&audio->out_queue) ||
audio->state == AUDIO_MVS_STOPPED),
1 * HZ);
if (rc > 0) {
mutex_lock(&audio->out_lock);
if ((audio->state == AUDIO_MVS_STARTED) &&
(!list_empty(&audio->out_queue))) {
if (count >= sizeof(struct q6_msm_audio_mvs_frame)) {
buf_node = list_first_entry(&audio->out_queue,
struct audio_mvs_buf_node,
list);
list_del(&buf_node->list);
rc = copy_to_user(buf,
&buf_node->frame,
sizeof(struct q6_msm_audio_mvs_frame));
if (rc == 0) {
rc = buf_node->frame.len +
sizeof(buf_node->frame.header) +
sizeof(buf_node->frame.len);
} else {
pr_err("%s: Copy to user retuned %d",
__func__, rc);
rc = -EFAULT;
}
list_add_tail(&buf_node->list,
&audio->free_out_queue);
} else {
pr_err("%s: Read count %d < sizeof(frame) %d",
__func__, count,
sizeof(struct q6_msm_audio_mvs_frame));
rc = -ENOMEM;
}
} else {
pr_err("%s: Read performed in state %d\n",
__func__, audio->state);
rc = -EPERM;
}
mutex_unlock(&audio->out_lock);
} else if (rc == 0) {
pr_err("%s: No UL data available\n", __func__);
rc = -ETIMEDOUT;
} else {
pr_err("%s: Read was interrupted\n", __func__);
rc = -ERESTARTSYS;
}
return rc;
}
static ssize_t audio_mvs_write(struct file *file,
const char __user *buf,
size_t count,
loff_t *pos)
{
int rc = 0;
struct audio_mvs_buf_node *buf_node = NULL;
struct audio_mvs_info_type *audio = file->private_data;
pr_debug("%s:\n", __func__);
rc = wait_event_interruptible_timeout(audio->in_wait,
(!list_empty(&audio->free_in_queue) ||
audio->state == AUDIO_MVS_STOPPED), 1 * HZ);
if (rc > 0) {
mutex_lock(&audio->in_lock);
if (audio->state == AUDIO_MVS_STARTED) {
if (count <= sizeof(struct q6_msm_audio_mvs_frame)) {
if (!list_empty(&audio->free_in_queue)) {
buf_node =
list_first_entry(&audio->free_in_queue,
struct audio_mvs_buf_node, list);
list_del(&buf_node->list);
rc = copy_from_user(&buf_node->frame,
buf,
count);
list_add_tail(&buf_node->list,
&audio->in_queue);
} else {
pr_err("%s: No free DL buffs\n",
__func__);
}
} else {
pr_err("%s: Write count %d < sizeof(frame) %d",
__func__, count,
sizeof(struct q6_msm_audio_mvs_frame));
rc = -ENOMEM;
}
} else {
pr_err("%s: Write performed in invalid state %d\n",
__func__, audio->state);
rc = -EPERM;
}
mutex_unlock(&audio->in_lock);
} else if (rc == 0) {
pr_err("%s: No free DL buffs\n", __func__);
rc = -ETIMEDOUT;
} else {
pr_err("%s: write was interrupted\n", __func__);
rc = -ERESTARTSYS;
}
return rc;
}
static long audio_mvs_ioctl(struct file *file,
unsigned int cmd,
unsigned long arg)
{
int rc = 0;
struct audio_mvs_info_type *audio = file->private_data;
pr_info("%s:\n", __func__);
switch (cmd) {
case AUDIO_GET_MVS_CONFIG: {
struct msm_audio_mvs_config config;
pr_info("%s: IOCTL GET_MVS_CONFIG\n", __func__);
mutex_lock(&audio->lock);
config.mvs_mode = audio->mvs_mode;
config.rate_type = audio->rate_type;
config.dtx_mode = audio->dtx_mode;
config.min_max_rate.min_rate = audio->min_max_rate.min_rate;
config.min_max_rate.max_rate = audio->min_max_rate.max_rate;
mutex_unlock(&audio->lock);
rc = copy_to_user((void *)arg, &config, sizeof(config));
if (rc == 0)
rc = sizeof(config);
else
pr_err("%s: Config copy failed %d\n", __func__, rc);
break;
}
case AUDIO_SET_MVS_CONFIG: {
struct msm_audio_mvs_config config;
pr_info("%s: IOCTL SET_MVS_CONFIG\n", __func__);
rc = copy_from_user(&config, (void *)arg, sizeof(config));
if (rc == 0) {
mutex_lock(&audio->lock);
if (audio->state == AUDIO_MVS_STOPPED) {
audio->mvs_mode = config.mvs_mode;
audio->rate_type = config.rate_type;
audio->dtx_mode = config.dtx_mode;
audio->min_max_rate.min_rate =
config.min_max_rate.min_rate;
audio->min_max_rate.max_rate =
config.min_max_rate.max_rate;
} else {
pr_err("%s: Set confg called in state %d\n",
__func__, audio->state);
rc = -EPERM;
}
mutex_unlock(&audio->lock);
} else {
pr_err("%s: Config copy failed %d\n", __func__, rc);
}
break;
}
case AUDIO_START: {
pr_info("%s: IOCTL START\n", __func__);
mutex_lock(&audio->lock);
if (audio->state == AUDIO_MVS_STOPPED) {
rc = audio_mvs_start(audio);
if (rc != 0)
audio_mvs_stop(audio);
} else {
pr_err("%s: Start called in invalid state %d\n",
__func__, audio->state);
rc = -EPERM;
}
mutex_unlock(&audio->lock);
break;
}
case AUDIO_STOP: {
pr_info("%s: IOCTL STOP\n", __func__);
mutex_lock(&audio->lock);
if (audio->state == AUDIO_MVS_STARTED) {
rc = audio_mvs_stop(audio);
} else {
pr_err("%s: Stop called in invalid state %d\n",
__func__, audio->state);
rc = -EPERM;
}
mutex_unlock(&audio->lock);
break;
}
default: {
pr_err("%s: Unknown IOCTL %d\n", __func__, cmd);
}
}
return rc;
}
static const struct file_operations audio_mvs_fops = {
.owner = THIS_MODULE,
.open = audio_mvs_open,
.release = audio_mvs_release,
.read = audio_mvs_read,
.write = audio_mvs_write,
.unlocked_ioctl = audio_mvs_ioctl
};
struct miscdevice audio_mvs_misc = {
.minor = MISC_DYNAMIC_MINOR,
.name = "msm_mvs",
.fops = &audio_mvs_fops
};
static int __init audio_mvs_init(void)
{
int rc = 0;
memset(&audio_mvs_info, 0, sizeof(audio_mvs_info));
init_waitqueue_head(&audio_mvs_info.in_wait);
init_waitqueue_head(&audio_mvs_info.out_wait);
mutex_init(&audio_mvs_info.lock);
mutex_init(&audio_mvs_info.in_lock);
mutex_init(&audio_mvs_info.out_lock);
spin_lock_init(&audio_mvs_info.dsp_lock);
INIT_LIST_HEAD(&audio_mvs_info.in_queue);
INIT_LIST_HEAD(&audio_mvs_info.free_in_queue);
INIT_LIST_HEAD(&audio_mvs_info.out_queue);
INIT_LIST_HEAD(&audio_mvs_info.free_out_queue);
wake_lock_init(&audio_mvs_info.suspend_lock,
WAKE_LOCK_SUSPEND,
"audio_mvs_suspend");
pm_qos_add_request(&audio_mvs_info.pm_qos_req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
rc = misc_register(&audio_mvs_misc);
return rc;
}
static void __exit audio_mvs_exit(void){
pr_info("%s:\n", __func__);
misc_deregister(&audio_mvs_misc);
}
module_init(audio_mvs_init);
module_exit(audio_mvs_exit);
MODULE_DESCRIPTION("MSM MVS driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
schqiushui/kernel_lollipop_sense_a52 | arch/arm/mach-imx/devices/platform-imx-fb.c | 3416 | 1752 | /*
* Copyright (C) 2010 Pengutronix
* Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify it under
* the terms of the GNU General Public License version 2 as published by the
* Free Software Foundation.
*/
#include <linux/dma-mapping.h>
#include "../hardware.h"
#include "devices-common.h"
#define imx_imx_fb_data_entry_single(soc, _devid, _size) \
{ \
.devid = _devid, \
.iobase = soc ## _LCDC_BASE_ADDR, \
.iosize = _size, \
.irq = soc ## _INT_LCDC, \
}
#ifdef CONFIG_SOC_IMX1
const struct imx_imx_fb_data imx1_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX1, "imx1-fb", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX1 */
#ifdef CONFIG_SOC_IMX21
const struct imx_imx_fb_data imx21_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX21, "imx21-fb", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX21 */
#ifdef CONFIG_SOC_IMX25
const struct imx_imx_fb_data imx25_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX25, "imx21-fb", SZ_16K);
#endif /* ifdef CONFIG_SOC_IMX25 */
#ifdef CONFIG_SOC_IMX27
const struct imx_imx_fb_data imx27_imx_fb_data __initconst =
imx_imx_fb_data_entry_single(MX27, "imx21-fb", SZ_4K);
#endif /* ifdef CONFIG_SOC_IMX27 */
struct platform_device *__init imx_add_imx_fb(
const struct imx_imx_fb_data *data,
const struct imx_fb_platform_data *pdata)
{
struct resource res[] = {
{
.start = data->iobase,
.end = data->iobase + data->iosize - 1,
.flags = IORESOURCE_MEM,
}, {
.start = data->irq,
.end = data->irq,
.flags = IORESOURCE_IRQ,
},
};
return imx_add_platform_device_dmamask(data->devid, 0,
res, ARRAY_SIZE(res),
pdata, sizeof(*pdata), DMA_BIT_MASK(32));
}
| gpl-2.0 |
ManishBadarkhe/linux-next | arch/alpha/kernel/binfmt_loader.c | 4440 | 1093 | #include <linux/init.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/mm_types.h>
#include <linux/binfmts.h>
#include <linux/a.out.h>
static int load_binary(struct linux_binprm *bprm)
{
struct exec *eh = (struct exec *)bprm->buf;
unsigned long loader;
struct file *file;
int retval;
if (eh->fh.f_magic != 0x183 || (eh->fh.f_flags & 0x3000) != 0x3000)
return -ENOEXEC;
if (bprm->loader)
return -ENOEXEC;
allow_write_access(bprm->file);
fput(bprm->file);
bprm->file = NULL;
loader = bprm->vma->vm_end - sizeof(void *);
file = open_exec("/sbin/loader");
retval = PTR_ERR(file);
if (IS_ERR(file))
return retval;
/* Remember if the application is TASO. */
bprm->taso = eh->ah.entry < 0x100000000UL;
bprm->file = file;
bprm->loader = loader;
retval = prepare_binprm(bprm);
if (retval < 0)
return retval;
return search_binary_handler(bprm);
}
static struct linux_binfmt loader_format = {
.load_binary = load_binary,
};
static int __init init_loader_binfmt(void)
{
insert_binfmt(&loader_format);
return 0;
}
arch_initcall(init_loader_binfmt);
| gpl-2.0 |
NormandyCM11/android_kernel_nokia_normandy | fs/configfs/item.c | 8024 | 5506 | /* -*- mode: c; c-basic-offset: 8; -*-
* vim: noexpandtab sw=8 ts=8 sts=0:
*
* item.c - library routines for handling generic config items
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on kobject:
* kobject is Copyright (c) 2002-2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*
* Please see the file Documentation/filesystems/configfs/configfs.txt for
* critical information about using the config_item interface.
*/
#include <linux/string.h>
#include <linux/module.h>
#include <linux/stat.h>
#include <linux/slab.h>
#include <linux/configfs.h>
static inline struct config_item * to_item(struct list_head * entry)
{
return container_of(entry,struct config_item,ci_entry);
}
/* Evil kernel */
static void config_item_release(struct kref *kref);
/**
* config_item_init - initialize item.
* @item: item in question.
*/
void config_item_init(struct config_item * item)
{
kref_init(&item->ci_kref);
INIT_LIST_HEAD(&item->ci_entry);
}
/**
* config_item_set_name - Set the name of an item
* @item: item.
* @name: name.
*
* If strlen(name) >= CONFIGFS_ITEM_NAME_LEN, then use a
* dynamically allocated string that @item->ci_name points to.
* Otherwise, use the static @item->ci_namebuf array.
*/
int config_item_set_name(struct config_item * item, const char * fmt, ...)
{
int error = 0;
int limit = CONFIGFS_ITEM_NAME_LEN;
int need;
va_list args;
char * name;
/*
* First, try the static array
*/
va_start(args,fmt);
need = vsnprintf(item->ci_namebuf,limit,fmt,args);
va_end(args);
if (need < limit)
name = item->ci_namebuf;
else {
/*
* Need more space? Allocate it and try again
*/
limit = need + 1;
name = kmalloc(limit,GFP_KERNEL);
if (!name) {
error = -ENOMEM;
goto Done;
}
va_start(args,fmt);
need = vsnprintf(name,limit,fmt,args);
va_end(args);
/* Still? Give up. */
if (need >= limit) {
kfree(name);
error = -EFAULT;
goto Done;
}
}
/* Free the old name, if necessary. */
if (item->ci_name && item->ci_name != item->ci_namebuf)
kfree(item->ci_name);
/* Now, set the new name */
item->ci_name = name;
Done:
return error;
}
EXPORT_SYMBOL(config_item_set_name);
void config_item_init_type_name(struct config_item *item,
const char *name,
struct config_item_type *type)
{
config_item_set_name(item, name);
item->ci_type = type;
config_item_init(item);
}
EXPORT_SYMBOL(config_item_init_type_name);
void config_group_init_type_name(struct config_group *group, const char *name,
struct config_item_type *type)
{
config_item_set_name(&group->cg_item, name);
group->cg_item.ci_type = type;
config_group_init(group);
}
EXPORT_SYMBOL(config_group_init_type_name);
struct config_item * config_item_get(struct config_item * item)
{
if (item)
kref_get(&item->ci_kref);
return item;
}
static void config_item_cleanup(struct config_item * item)
{
struct config_item_type * t = item->ci_type;
struct config_group * s = item->ci_group;
struct config_item * parent = item->ci_parent;
pr_debug("config_item %s: cleaning up\n",config_item_name(item));
if (item->ci_name != item->ci_namebuf)
kfree(item->ci_name);
item->ci_name = NULL;
if (t && t->ct_item_ops && t->ct_item_ops->release)
t->ct_item_ops->release(item);
if (s)
config_group_put(s);
if (parent)
config_item_put(parent);
}
static void config_item_release(struct kref *kref)
{
config_item_cleanup(container_of(kref, struct config_item, ci_kref));
}
/**
* config_item_put - decrement refcount for item.
* @item: item.
*
* Decrement the refcount, and if 0, call config_item_cleanup().
*/
void config_item_put(struct config_item * item)
{
if (item)
kref_put(&item->ci_kref, config_item_release);
}
/**
* config_group_init - initialize a group for use
* @k: group
*/
void config_group_init(struct config_group *group)
{
config_item_init(&group->cg_item);
INIT_LIST_HEAD(&group->cg_children);
}
/**
* config_group_find_item - search for item in group.
* @group: group we're looking in.
* @name: item's name.
*
* Iterate over @group->cg_list, looking for a matching config_item.
* If matching item is found take a reference and return the item.
* Caller must have locked group via @group->cg_subsys->su_mtx.
*/
struct config_item *config_group_find_item(struct config_group *group,
const char *name)
{
struct list_head * entry;
struct config_item * ret = NULL;
list_for_each(entry,&group->cg_children) {
struct config_item * item = to_item(entry);
if (config_item_name(item) &&
!strcmp(config_item_name(item), name)) {
ret = config_item_get(item);
break;
}
}
return ret;
}
EXPORT_SYMBOL(config_item_init);
EXPORT_SYMBOL(config_group_init);
EXPORT_SYMBOL(config_item_get);
EXPORT_SYMBOL(config_item_put);
EXPORT_SYMBOL(config_group_find_item);
| gpl-2.0 |
jcadduono/idleKernel-note3 | drivers/staging/ft1000/ft1000-pcmcia/ft1000_cs.c | 8024 | 4766 | /*---------------------------------------------------------------------------
FT1000 driver for Flarion Flash OFDM NIC Device
Copyright (C) 1999 David A. Hinds. All Rights Reserved.
Copyright (C) 2002 Flarion Technologies, All rights reserved.
Copyright (C) 2006 Patrik Ostrihon, All rights reserved.
Copyright (C) 2006 ProWeb Consulting, a.s, All rights reserved.
The initial developer of the original code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds.
This file was modified to support the Flarion Flash OFDM NIC Device
by Wai Chan (w.chan@flarion.com).
Port for kernel 2.6 created by Patrik Ostrihon (patrik.ostrihon@pwc.sk)
This program is free software; you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the Free
Software Foundation; either version 2 of the License, or (at your option) any
later version. This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details. You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place -
Suite 330, Boston, MA 02111-1307, USA.
-----------------------------------------------------------------------------*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/ds.h>
/*====================================================================*/
MODULE_AUTHOR("Wai Chan");
MODULE_DESCRIPTION("FT1000 PCMCIA driver");
MODULE_LICENSE("GPL");
/*====================================================================*/
static int ft1000_config(struct pcmcia_device *link);
static void ft1000_detach(struct pcmcia_device *link);
static int ft1000_attach(struct pcmcia_device *link);
#include "ft1000.h"
/*====================================================================*/
static void ft1000_reset(struct pcmcia_device *link)
{
pcmcia_reset_card(link->socket);
}
static int ft1000_attach(struct pcmcia_device *link)
{
link->priv = NULL;
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO;
return ft1000_config(link);
}
static void ft1000_detach(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
if (dev)
stop_ft1000_card(dev);
pcmcia_disable_device(link);
free_netdev(dev);
}
static int ft1000_confcheck(struct pcmcia_device *link, void *priv_data)
{
return pcmcia_request_io(link);
}
/*======================================================================
ft1000_config() is scheduled to run after a CARD_INSERTION event
is received, to configure the PCMCIA socket, and to make the
device available to the system.
======================================================================*/
static int ft1000_config(struct pcmcia_device *link)
{
int ret;
dev_dbg(&link->dev, "ft1000_cs: ft1000_config(0x%p)\n", link);
/* setup IO window */
ret = pcmcia_loop_config(link, ft1000_confcheck, NULL);
if (ret) {
printk(KERN_INFO "ft1000: Could not configure pcmcia\n");
return -ENODEV;
}
/* configure device */
ret = pcmcia_enable_device(link);
if (ret) {
printk(KERN_INFO "ft1000: could not enable pcmcia\n");
goto failed;
}
link->priv = init_ft1000_card(link, &ft1000_reset);
if (!link->priv) {
printk(KERN_INFO "ft1000: Could not register as network device\n");
goto failed;
}
/* Finally, report what we've done */
return 0;
failed:
pcmcia_disable_device(link);
return -ENODEV;
}
static int ft1000_suspend(struct pcmcia_device *link)
{
struct net_device *dev = link->priv;
if (link->open)
netif_device_detach(dev);
return 0;
}
static int ft1000_resume(struct pcmcia_device *link)
{
return 0;
}
/*====================================================================*/
static const struct pcmcia_device_id ft1000_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x0100),
PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1000),
PCMCIA_DEVICE_MANF_CARD(0x02cc, 0x1300),
PCMCIA_DEVICE_NULL,
};
MODULE_DEVICE_TABLE(pcmcia, ft1000_ids);
static struct pcmcia_driver ft1000_cs_driver = {
.owner = THIS_MODULE,
.name = "ft1000_cs",
.probe = ft1000_attach,
.remove = ft1000_detach,
.id_table = ft1000_ids,
.suspend = ft1000_suspend,
.resume = ft1000_resume,
};
static int __init init_ft1000_cs(void)
{
return pcmcia_register_driver(&ft1000_cs_driver);
}
static void __exit exit_ft1000_cs(void)
{
pcmcia_unregister_driver(&ft1000_cs_driver);
}
module_init(init_ft1000_cs);
module_exit(exit_ft1000_cs);
| gpl-2.0 |
nikez/android_kernel_htc_msm8660 | drivers/scsi/a100u2w.c | 8280 | 36929 | /*
* Initio A100 device driver for Linux.
*
* Copyright (c) 1994-1998 Initio Corporation
* Copyright (c) 2003-2004 Christoph Hellwig
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* Revision History:
* 07/02/98 hl - v.91n Initial drivers.
* 09/14/98 hl - v1.01 Support new Kernel.
* 09/22/98 hl - v1.01a Support reset.
* 09/24/98 hl - v1.01b Fixed reset.
* 10/05/98 hl - v1.02 split the source code and release.
* 12/19/98 bv - v1.02a Use spinlocks for 2.1.95 and up
* 01/31/99 bv - v1.02b Use mdelay instead of waitForPause
* 08/08/99 bv - v1.02c Use waitForPause again.
* 06/25/02 Doug Ledford <dledford@redhat.com> - v1.02d
* - Remove limit on number of controllers
* - Port to DMA mapping API
* - Clean up interrupt handler registration
* - Fix memory leaks
* - Fix allocation of scsi host structs and private data
* 11/18/03 Christoph Hellwig <hch@lst.de>
* - Port to new probing API
* - Fix some more leaks in init failure cases
* 9/28/04 Christoph Hellwig <hch@lst.de>
* - merge the two source files
* - remove internal queueing code
* 14/06/07 Alan Cox <alan@lxorguk.ukuu.org.uk>
* - Grand cleanup and Linuxisation
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/blkdev.h>
#include <linux/spinlock.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/ioport.h>
#include <linux/dma-mapping.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include "a100u2w.h"
static struct orc_scb *__orc_alloc_scb(struct orc_host * host);
static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb);
static struct orc_nvram nvram, *nvramp = &nvram;
static u8 default_nvram[64] =
{
/*----------header -------------*/
0x01, /* 0x00: Sub System Vendor ID 0 */
0x11, /* 0x01: Sub System Vendor ID 1 */
0x60, /* 0x02: Sub System ID 0 */
0x10, /* 0x03: Sub System ID 1 */
0x00, /* 0x04: SubClass */
0x01, /* 0x05: Vendor ID 0 */
0x11, /* 0x06: Vendor ID 1 */
0x60, /* 0x07: Device ID 0 */
0x10, /* 0x08: Device ID 1 */
0x00, /* 0x09: Reserved */
0x00, /* 0x0A: Reserved */
0x01, /* 0x0B: Revision of Data Structure */
/* -- Host Adapter Structure --- */
0x01, /* 0x0C: Number Of SCSI Channel */
0x01, /* 0x0D: BIOS Configuration 1 */
0x00, /* 0x0E: BIOS Configuration 2 */
0x00, /* 0x0F: BIOS Configuration 3 */
/* --- SCSI Channel 0 Configuration --- */
0x07, /* 0x10: H/A ID */
0x83, /* 0x11: Channel Configuration */
0x20, /* 0x12: MAX TAG per target */
0x0A, /* 0x13: SCSI Reset Recovering time */
0x00, /* 0x14: Channel Configuration4 */
0x00, /* 0x15: Channel Configuration5 */
/* SCSI Channel 0 Target Configuration */
/* 0x16-0x25 */
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
/* --- SCSI Channel 1 Configuration --- */
0x07, /* 0x26: H/A ID */
0x83, /* 0x27: Channel Configuration */
0x20, /* 0x28: MAX TAG per target */
0x0A, /* 0x29: SCSI Reset Recovering time */
0x00, /* 0x2A: Channel Configuration4 */
0x00, /* 0x2B: Channel Configuration5 */
/* SCSI Channel 1 Target Configuration */
/* 0x2C-0x3B */
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8, 0xC8,
0x00, /* 0x3C: Reserved */
0x00, /* 0x3D: Reserved */
0x00, /* 0x3E: Reserved */
0x00 /* 0x3F: Checksum */
};
static u8 wait_chip_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HCTRL) & HOSTSTOP) /* Wait HOSTSTOP set */
return 1;
mdelay(100);
}
return 0;
}
static u8 wait_firmware_ready(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (inb(host->base + ORC_HSTUS) & RREADY) /* Wait READY set */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_scsi_reset_done(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (!(inb(host->base + ORC_HCTRL) & SCSIRST)) /* Wait SCSIRST done */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_HDO_off(struct orc_host * host)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if (!(inb(host->base + ORC_HCTRL) & HDO)) /* Wait HDO off */
return 1;
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static u8 wait_hdi_set(struct orc_host * host, u8 * data)
{
int i;
for (i = 0; i < 10; i++) { /* Wait 1 second for report timeout */
if ((*data = inb(host->base + ORC_HSTUS)) & HDI)
return 1; /* Wait HDI set */
mdelay(100); /* wait 100ms before try again */
}
return 0;
}
/***************************************************************************/
static unsigned short orc_read_fwrev(struct orc_host * host)
{
u16 version;
u8 data;
outb(ORC_CMD_VERSION, host->base + ORC_HDATA);
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
version = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
version |= inb(host->base + ORC_HDATA) << 8;
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return version;
}
/***************************************************************************/
static u8 orc_nv_write(struct orc_host * host, unsigned char address, unsigned char value)
{
outb(ORC_CMD_SET_NVM, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(address, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(value, host->base + ORC_HDATA); /* Write value */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
return 1;
}
/***************************************************************************/
static u8 orc_nv_read(struct orc_host * host, u8 address, u8 *ptr)
{
unsigned char data;
outb(ORC_CMD_GET_NVM, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(address, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
*ptr = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
return 1;
}
/**
* orc_exec_sb - Queue an SCB with the HA
* @host: host adapter the SCB belongs to
* @scb: SCB to queue for execution
*/
static void orc_exec_scb(struct orc_host * host, struct orc_scb * scb)
{
scb->status = ORCSCB_POST;
outb(scb->scbidx, host->base + ORC_PQUEUE);
}
/**
* se2_rd_all - read SCSI parameters from EEPROM
* @host: Host whose EEPROM is being loaded
*
* Read SCSI H/A configuration parameters from serial EEPROM
*/
static int se2_rd_all(struct orc_host * host)
{
int i;
u8 *np, chksum = 0;
np = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++) { /* <01> */
if (orc_nv_read(host, (u8) i, np) == 0)
return -1;
}
/*------ Is ckecksum ok ? ------*/
np = (u8 *) nvramp;
for (i = 0; i < 63; i++)
chksum += *np++;
if (nvramp->CheckSum != (u8) chksum)
return -1;
return 1;
}
/**
* se2_update_all - update the EEPROM
* @host: Host whose EEPROM is being updated
*
* Update changed bytes in the EEPROM image.
*/
static void se2_update_all(struct orc_host * host)
{ /* setup default pattern */
int i;
u8 *np, *np1, chksum = 0;
/* Calculate checksum first */
np = (u8 *) default_nvram;
for (i = 0; i < 63; i++)
chksum += *np++;
*np = chksum;
np = (u8 *) default_nvram;
np1 = (u8 *) nvramp;
for (i = 0; i < 64; i++, np++, np1++) {
if (*np != *np1)
orc_nv_write(host, (u8) i, *np);
}
}
/**
* read_eeprom - load EEPROM
* @host: Host EEPROM to read
*
* Read the EEPROM for a given host. If it is invalid or fails
* the restore the defaults and use them.
*/
static void read_eeprom(struct orc_host * host)
{
if (se2_rd_all(host) != 1) {
se2_update_all(host); /* setup default pattern */
se2_rd_all(host); /* load again */
}
}
/**
* orc_load_firmware - initialise firmware
* @host: Host to set up
*
* Load the firmware from the EEPROM into controller SRAM. This
* is basically a 4K block copy and then a 4K block read to check
* correctness. The rest is convulted by the indirect interfaces
* in the hardware
*/
static u8 orc_load_firmware(struct orc_host * host)
{
u32 data32;
u16 bios_addr;
u16 i;
u8 *data32_ptr, data;
/* Set up the EEPROM for access */
data = inb(host->base + ORC_GCFG);
outb(data | EEPRG, host->base + ORC_GCFG); /* Enable EEPROM programming */
outb(0x00, host->base + ORC_EBIOSADR2);
outw(0x0000, host->base + ORC_EBIOSADR0);
if (inb(host->base + ORC_EBIOSDATA) != 0x55) {
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
outw(0x0001, host->base + ORC_EBIOSADR0);
if (inb(host->base + ORC_EBIOSDATA) != 0xAA) {
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 0;
}
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Enable SRAM programming */
data32_ptr = (u8 *) & data32;
data32 = cpu_to_le32(0); /* Initial FW address to 0 */
outw(0x0010, host->base + ORC_EBIOSADR0);
*data32_ptr = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(0x0011, host->base + ORC_EBIOSADR0);
*(data32_ptr + 1) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(0x0012, host->base + ORC_EBIOSADR0);
*(data32_ptr + 2) = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
outw(*(data32_ptr + 2), host->base + ORC_EBIOSADR2);
outl(le32_to_cpu(data32), host->base + ORC_FWBASEADR); /* Write FW address */
/* Copy the code from the BIOS to the SRAM */
udelay(500); /* Required on Sun Ultra 5 ... 350 -> failures */
bios_addr = (u16) le32_to_cpu(data32); /* FW code locate at BIOS address + ? */
for (i = 0, data32_ptr = (u8 *) & data32; /* Download the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
outl(le32_to_cpu(data32), host->base + ORC_RISCRAM); /* Write every 4 bytes */
data32_ptr = (u8 *) & data32;
}
}
/* Go back and check they match */
outb(PRGMRST | DOWNLOAD, host->base + ORC_RISCCTL); /* Reset program count 0 */
bios_addr -= 0x1000; /* Reset the BIOS address */
for (i = 0, data32_ptr = (u8 *) & data32; /* Check the code */
i < 0x1000; /* Firmware code size = 4K */
i++, bios_addr++) {
outw(bios_addr, host->base + ORC_EBIOSADR0);
*data32_ptr++ = inb(host->base + ORC_EBIOSDATA); /* Read from BIOS */
if ((i % 4) == 3) {
if (inl(host->base + ORC_RISCRAM) != le32_to_cpu(data32)) {
outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
outb(data, host->base + ORC_GCFG); /*Disable EEPROM programming */
return 0;
}
data32_ptr = (u8 *) & data32;
}
}
/* Success */
outb(PRGMRST, host->base + ORC_RISCCTL); /* Reset program to 0 */
outb(data, host->base + ORC_GCFG); /* Disable EEPROM programming */
return 1;
}
/***************************************************************************/
static void setup_SCBs(struct orc_host * host)
{
struct orc_scb *scb;
int i;
struct orc_extended_scb *escb;
dma_addr_t escb_phys;
/* Setup SCB base and SCB Size registers */
outb(ORC_MAXQUEUE, host->base + ORC_SCBSIZE); /* Total number of SCBs */
/* SCB base address 0 */
outl(host->scb_phys, host->base + ORC_SCBBASE0);
/* SCB base address 1 */
outl(host->scb_phys, host->base + ORC_SCBBASE1);
/* setup scatter list address with one buffer */
scb = host->scb_virt;
escb = host->escb_virt;
for (i = 0; i < ORC_MAXQUEUE; i++) {
escb_phys = (host->escb_phys + (sizeof(struct orc_extended_scb) * i));
scb->sg_addr = cpu_to_le32((u32) escb_phys);
scb->sense_addr = cpu_to_le32((u32) escb_phys);
scb->escb = escb;
scb->scbidx = i;
scb++;
escb++;
}
}
/**
* init_alloc_map - initialise allocation map
* @host: host map to configure
*
* Initialise the allocation maps for this device. If the device
* is not quiescent the caller must hold the allocation lock
*/
static void init_alloc_map(struct orc_host * host)
{
u8 i, j;
for (i = 0; i < MAX_CHANNELS; i++) {
for (j = 0; j < 8; j++) {
host->allocation_map[i][j] = 0xffffffff;
}
}
}
/**
* init_orchid - initialise the host adapter
* @host:host adapter to initialise
*
* Initialise the controller and if necessary load the firmware.
*
* Returns -1 if the initialisation fails.
*/
static int init_orchid(struct orc_host * host)
{
u8 *ptr;
u16 revision;
u8 i;
init_alloc_map(host);
outb(0xFF, host->base + ORC_GIMSK); /* Disable all interrupts */
if (inb(host->base + ORC_HSTUS) & RREADY) { /* Orchid is ready */
revision = orc_read_fwrev(host);
if (revision == 0xFFFF) {
outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
if (wait_chip_ready(host) == 0)
return -1;
orc_load_firmware(host); /* Download FW */
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
outb(0x00, host->base + ORC_HCTRL); /* clear HOSTSTOP */
if (wait_firmware_ready(host) == 0)
return -1;
/* Wait for firmware ready */
} else {
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
}
} else { /* Orchid is not Ready */
outb(DEVRST, host->base + ORC_HCTRL); /* Reset Host Adapter */
if (wait_chip_ready(host) == 0)
return -1;
orc_load_firmware(host); /* Download FW */
setup_SCBs(host); /* Setup SCB base and SCB Size registers */
outb(HDO, host->base + ORC_HCTRL); /* Do Hardware Reset & */
/* clear HOSTSTOP */
if (wait_firmware_ready(host) == 0) /* Wait for firmware ready */
return -1;
}
/* Load an EEProm copy into RAM */
/* Assumes single threaded at this point */
read_eeprom(host);
if (nvramp->revision != 1)
return -1;
host->scsi_id = nvramp->scsi_id;
host->BIOScfg = nvramp->BIOSConfig1;
host->max_targets = MAX_TARGETS;
ptr = (u8 *) & (nvramp->Target00Config);
for (i = 0; i < 16; ptr++, i++) {
host->target_flag[i] = *ptr;
host->max_tags[i] = ORC_MAXTAGS;
}
if (nvramp->SCSI0Config & NCC_BUSRESET)
host->flags |= HCF_SCSI_RESET;
outb(0xFB, host->base + ORC_GIMSK); /* enable RP FIFO interrupt */
return 0;
}
/**
* orc_reset_scsi_bus - perform bus reset
* @host: host being reset
*
* Perform a full bus reset on the adapter.
*/
static int orc_reset_scsi_bus(struct orc_host * host)
{ /* I need Host Control Block Information */
unsigned long flags;
spin_lock_irqsave(&host->allocation_lock, flags);
init_alloc_map(host);
/* reset scsi bus */
outb(SCSIRST, host->base + ORC_HCTRL);
/* FIXME: We can spend up to a second with the lock held and
interrupts off here */
if (wait_scsi_reset_done(host) == 0) {
spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
} else {
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
}
/**
* orc_device_reset - device reset handler
* @host: host to reset
* @cmd: command causing the reset
* @target; target device
*
* Reset registers, reset a hanging bus and kill active and disconnected
* commands for target w/o soft reset
*/
static int orc_device_reset(struct orc_host * host, struct scsi_cmnd *cmd, unsigned int target)
{ /* I need Host Control Block Information */
struct orc_scb *scb;
struct orc_extended_scb *escb;
struct orc_scb *host_scb;
u8 i;
unsigned long flags;
spin_lock_irqsave(&(host->allocation_lock), flags);
scb = (struct orc_scb *) NULL;
escb = (struct orc_extended_scb *) NULL;
/* setup scatter list address with one buffer */
host_scb = host->scb_virt;
/* FIXME: is this safe if we then fail to issue the reset or race
a completion ? */
init_alloc_map(host);
/* Find the scb corresponding to the command */
for (i = 0; i < ORC_MAXQUEUE; i++) {
escb = host_scb->escb;
if (host_scb->status && escb->srb == cmd)
break;
host_scb++;
}
if (i == ORC_MAXQUEUE) {
printk(KERN_ERR "Unable to Reset - No SCB Found\n");
spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
/* Allocate a new SCB for the reset command to the firmware */
if ((scb = __orc_alloc_scb(host)) == NULL) {
/* Can't happen.. */
spin_unlock_irqrestore(&(host->allocation_lock), flags);
return FAILED;
}
/* Reset device is handled by the firmware, we fill in an SCB and
fire it at the controller, it does the rest */
scb->opcode = ORC_BUSDEVRST;
scb->target = target;
scb->hastat = 0;
scb->tastat = 0;
scb->status = 0x0;
scb->link = 0xFF;
scb->reserved0 = 0;
scb->reserved1 = 0;
scb->xferlen = cpu_to_le32(0);
scb->sg_len = cpu_to_le32(0);
escb->srb = NULL;
escb->srb = cmd;
orc_exec_scb(host, scb); /* Start execute SCB */
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
}
/**
* __orc_alloc_scb - allocate an SCB
* @host: host to allocate from
*
* Allocate an SCB and return a pointer to the SCB object. NULL
* is returned if no SCB is free. The caller must already hold
* the allocator lock at this point.
*/
static struct orc_scb *__orc_alloc_scb(struct orc_host * host)
{
u8 channel;
unsigned long idx;
u8 index;
u8 i;
channel = host->index;
for (i = 0; i < 8; i++) {
for (index = 0; index < 32; index++) {
if ((host->allocation_map[channel][i] >> index) & 0x01) {
host->allocation_map[channel][i] &= ~(1 << index);
idx = index + 32 * i;
/*
* Translate the index to a structure instance
*/
return host->scb_virt + idx;
}
}
}
return NULL;
}
/**
* orc_alloc_scb - allocate an SCB
* @host: host to allocate from
*
* Allocate an SCB and return a pointer to the SCB object. NULL
* is returned if no SCB is free.
*/
static struct orc_scb *orc_alloc_scb(struct orc_host * host)
{
struct orc_scb *scb;
unsigned long flags;
spin_lock_irqsave(&host->allocation_lock, flags);
scb = __orc_alloc_scb(host);
spin_unlock_irqrestore(&host->allocation_lock, flags);
return scb;
}
/**
* orc_release_scb - release an SCB
* @host: host owning the SCB
* @scb: SCB that is now free
*
* Called to return a completed SCB to the allocation pool. Before
* calling the SCB must be out of use on both the host and the HA.
*/
static void orc_release_scb(struct orc_host *host, struct orc_scb *scb)
{
unsigned long flags;
u8 index, i, channel;
spin_lock_irqsave(&(host->allocation_lock), flags);
channel = host->index; /* Channel */
index = scb->scbidx;
i = index / 32;
index %= 32;
host->allocation_map[channel][i] |= (1 << index);
spin_unlock_irqrestore(&(host->allocation_lock), flags);
}
/**
* orchid_abort_scb - abort a command
*
* Abort a queued command that has been passed to the firmware layer
* if possible. This is all handled by the firmware. We aks the firmware
* and it either aborts the command or fails
*/
static int orchid_abort_scb(struct orc_host * host, struct orc_scb * scb)
{
unsigned char data, status;
outb(ORC_CMD_ABORT_SCB, host->base + ORC_HDATA); /* Write command */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
outb(scb->scbidx, host->base + ORC_HDATA); /* Write address */
outb(HDO, host->base + ORC_HCTRL);
if (wait_HDO_off(host) == 0) /* Wait HDO off */
return 0;
if (wait_hdi_set(host, &data) == 0) /* Wait HDI set */
return 0;
status = inb(host->base + ORC_HDATA);
outb(data, host->base + ORC_HSTUS); /* Clear HDI */
if (status == 1) /* 0 - Successfully */
return 0; /* 1 - Fail */
return 1;
}
static int inia100_abort_cmd(struct orc_host * host, struct scsi_cmnd *cmd)
{
struct orc_extended_scb *escb;
struct orc_scb *scb;
u8 i;
unsigned long flags;
spin_lock_irqsave(&(host->allocation_lock), flags);
scb = host->scb_virt;
/* Walk the queue until we find the SCB that belongs to the command
block. This isn't a performance critical path so a walk in the park
here does no harm */
for (i = 0; i < ORC_MAXQUEUE; i++, scb++) {
escb = scb->escb;
if (scb->status && escb->srb == cmd) {
if (scb->tag_msg == 0) {
goto out;
} else {
/* Issue an ABORT to the firmware */
if (orchid_abort_scb(host, scb)) {
escb->srb = NULL;
spin_unlock_irqrestore(&host->allocation_lock, flags);
return SUCCESS;
} else
goto out;
}
}
}
out:
spin_unlock_irqrestore(&host->allocation_lock, flags);
return FAILED;
}
/**
* orc_interrupt - IRQ processing
* @host: Host causing the interrupt
*
* This function is called from the IRQ handler and protected
* by the host lock. While the controller reports that there are
* scb's for processing we pull them off the controller, turn the
* index into a host address pointer to the scb and call the scb
* handler.
*
* Returns IRQ_HANDLED if any SCBs were processed, IRQ_NONE otherwise
*/
static irqreturn_t orc_interrupt(struct orc_host * host)
{
u8 scb_index;
struct orc_scb *scb;
/* Check if we have an SCB queued for servicing */
if (inb(host->base + ORC_RQUEUECNT) == 0)
return IRQ_NONE;
do {
/* Get the SCB index of the SCB to service */
scb_index = inb(host->base + ORC_RQUEUE);
/* Translate it back to a host pointer */
scb = (struct orc_scb *) ((unsigned long) host->scb_virt + (unsigned long) (sizeof(struct orc_scb) * scb_index));
scb->status = 0x0;
/* Process the SCB */
inia100_scb_handler(host, scb);
} while (inb(host->base + ORC_RQUEUECNT));
return IRQ_HANDLED;
} /* End of I1060Interrupt() */
/**
* inia100_build_scb - build SCB
* @host: host owing the control block
* @scb: control block to use
* @cmd: Mid layer command
*
* Build a host adapter control block from the SCSI mid layer command
*/
static int inia100_build_scb(struct orc_host * host, struct orc_scb * scb, struct scsi_cmnd * cmd)
{ /* Create corresponding SCB */
struct scatterlist *sg;
struct orc_sgent *sgent; /* Pointer to SG list */
int i, count_sg;
struct orc_extended_scb *escb;
/* Links between the escb, scb and Linux scsi midlayer cmd */
escb = scb->escb;
escb->srb = cmd;
sgent = NULL;
/* Set up the SCB to do a SCSI command block */
scb->opcode = ORC_EXECSCSI;
scb->flags = SCF_NO_DCHK; /* Clear done bit */
scb->target = cmd->device->id;
scb->lun = cmd->device->lun;
scb->reserved0 = 0;
scb->reserved1 = 0;
scb->sg_len = cpu_to_le32(0);
scb->xferlen = cpu_to_le32((u32) scsi_bufflen(cmd));
sgent = (struct orc_sgent *) & escb->sglist[0];
count_sg = scsi_dma_map(cmd);
if (count_sg < 0)
return count_sg;
BUG_ON(count_sg > TOTAL_SG_ENTRY);
/* Build the scatter gather lists */
if (count_sg) {
scb->sg_len = cpu_to_le32((u32) (count_sg * 8));
scsi_for_each_sg(cmd, sg, count_sg, i) {
sgent->base = cpu_to_le32((u32) sg_dma_address(sg));
sgent->length = cpu_to_le32((u32) sg_dma_len(sg));
sgent++;
}
} else {
scb->sg_len = cpu_to_le32(0);
sgent->base = cpu_to_le32(0);
sgent->length = cpu_to_le32(0);
}
scb->sg_addr = (u32) scb->sense_addr; /* sense_addr is already little endian */
scb->hastat = 0;
scb->tastat = 0;
scb->link = 0xFF;
scb->sense_len = SENSE_SIZE;
scb->cdb_len = cmd->cmd_len;
if (scb->cdb_len >= IMAX_CDB) {
printk("max cdb length= %x\b", cmd->cmd_len);
scb->cdb_len = IMAX_CDB;
}
scb->ident = cmd->device->lun | DISC_ALLOW;
if (cmd->device->tagged_supported) { /* Tag Support */
scb->tag_msg = SIMPLE_QUEUE_TAG; /* Do simple tag only */
} else {
scb->tag_msg = 0; /* No tag support */
}
memcpy(scb->cdb, cmd->cmnd, scb->cdb_len);
return 0;
}
/**
* inia100_queue - queue command with host
* @cmd: Command block
* @done: Completion function
*
* Called by the mid layer to queue a command. Process the command
* block, build the host specific scb structures and if there is room
* queue the command down to the controller
*/
static int inia100_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
{
struct orc_scb *scb;
struct orc_host *host; /* Point to Host adapter control block */
host = (struct orc_host *) cmd->device->host->hostdata;
cmd->scsi_done = done;
/* Get free SCSI control block */
if ((scb = orc_alloc_scb(host)) == NULL)
return SCSI_MLQUEUE_HOST_BUSY;
if (inia100_build_scb(host, scb, cmd)) {
orc_release_scb(host, scb);
return SCSI_MLQUEUE_HOST_BUSY;
}
orc_exec_scb(host, scb); /* Start execute SCB */
return 0;
}
static DEF_SCSI_QCMD(inia100_queue)
/*****************************************************************************
Function name : inia100_abort
Description : Abort a queued command.
(commands that are on the bus can't be aborted easily)
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_abort(struct scsi_cmnd * cmd)
{
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return inia100_abort_cmd(host, cmd);
}
/*****************************************************************************
Function name : inia100_reset
Description : Reset registers, reset a hanging bus and
kill active and disconnected commands for target w/o soft reset
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_bus_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return orc_reset_scsi_bus(host);
}
/*****************************************************************************
Function name : inia100_device_reset
Description : Reset the device
Input : host - Pointer to host adapter structure
Output : None.
Return : pSRB - Pointer to SCSI request block.
*****************************************************************************/
static int inia100_device_reset(struct scsi_cmnd * cmd)
{ /* I need Host Control Block Information */
struct orc_host *host;
host = (struct orc_host *) cmd->device->host->hostdata;
return orc_device_reset(host, cmd, scmd_id(cmd));
}
/**
* inia100_scb_handler - interrupt callback
* @host: Host causing the interrupt
* @scb: SCB the controller returned as needing processing
*
* Perform completion processing on a control block. Do the conversions
* from host to SCSI midlayer error coding, save any sense data and
* the complete with the midlayer and recycle the scb.
*/
static void inia100_scb_handler(struct orc_host *host, struct orc_scb *scb)
{
struct scsi_cmnd *cmd; /* Pointer to SCSI request block */
struct orc_extended_scb *escb;
escb = scb->escb;
if ((cmd = (struct scsi_cmnd *) escb->srb) == NULL) {
printk(KERN_ERR "inia100_scb_handler: SRB pointer is empty\n");
orc_release_scb(host, scb); /* Release SCB for current channel */
return;
}
escb->srb = NULL;
switch (scb->hastat) {
case 0x0:
case 0xa: /* Linked command complete without error and linked normally */
case 0xb: /* Linked command complete without error interrupt generated */
scb->hastat = 0;
break;
case 0x11: /* Selection time out-The initiator selection or target
reselection was not complete within the SCSI Time out period */
scb->hastat = DID_TIME_OUT;
break;
case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus
phase sequence was requested by the target. The host adapter
will generate a SCSI Reset Condition, notifying the host with
a SCRD interrupt */
scb->hastat = DID_RESET;
break;
case 0x1a: /* SCB Aborted. 07/21/98 */
scb->hastat = DID_ABORT;
break;
case 0x12: /* Data overrun/underrun-The target attempted to transfer more data
than was allocated by the Data Length field or the sum of the
Scatter / Gather Data Length fields. */
case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */
case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. */
default:
printk(KERN_DEBUG "inia100: %x %x\n", scb->hastat, scb->tastat);
scb->hastat = DID_ERROR; /* Couldn't find any better */
break;
}
if (scb->tastat == 2) { /* Check condition */
memcpy((unsigned char *) &cmd->sense_buffer[0],
(unsigned char *) &escb->sglist[0], SENSE_SIZE);
}
cmd->result = scb->tastat | (scb->hastat << 16);
scsi_dma_unmap(cmd);
cmd->scsi_done(cmd); /* Notify system DONE */
orc_release_scb(host, scb); /* Release SCB for current channel */
}
/**
* inia100_intr - interrupt handler
* @irqno: Interrupt value
* @devid: Host adapter
*
* Entry point for IRQ handling. All the real work is performed
* by orc_interrupt.
*/
static irqreturn_t inia100_intr(int irqno, void *devid)
{
struct Scsi_Host *shost = (struct Scsi_Host *)devid;
struct orc_host *host = (struct orc_host *)shost->hostdata;
unsigned long flags;
irqreturn_t res;
spin_lock_irqsave(shost->host_lock, flags);
res = orc_interrupt(host);
spin_unlock_irqrestore(shost->host_lock, flags);
return res;
}
static struct scsi_host_template inia100_template = {
.proc_name = "inia100",
.name = inia100_REVID,
.queuecommand = inia100_queue,
.eh_abort_handler = inia100_abort,
.eh_bus_reset_handler = inia100_bus_reset,
.eh_device_reset_handler = inia100_device_reset,
.can_queue = 1,
.this_id = 1,
.sg_tablesize = SG_ALL,
.cmd_per_lun = 1,
.use_clustering = ENABLE_CLUSTERING,
};
static int __devinit inia100_probe_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct Scsi_Host *shost;
struct orc_host *host;
unsigned long port, bios;
int error = -ENODEV;
u32 sz;
unsigned long biosaddr;
char *bios_phys;
if (pci_enable_device(pdev))
goto out;
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
printk(KERN_WARNING "Unable to set 32bit DMA "
"on inia100 adapter, ignoring.\n");
goto out_disable_device;
}
pci_set_master(pdev);
port = pci_resource_start(pdev, 0);
if (!request_region(port, 256, "inia100")) {
printk(KERN_WARNING "inia100: io port 0x%lx, is busy.\n", port);
goto out_disable_device;
}
/* <02> read from base address + 0x50 offset to get the bios value. */
bios = inw(port + 0x50);
shost = scsi_host_alloc(&inia100_template, sizeof(struct orc_host));
if (!shost)
goto out_release_region;
host = (struct orc_host *)shost->hostdata;
host->pdev = pdev;
host->base = port;
host->BIOScfg = bios;
spin_lock_init(&host->allocation_lock);
/* Get total memory needed for SCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_scb);
host->scb_virt = pci_alloc_consistent(pdev, sz,
&host->scb_phys);
if (!host->scb_virt) {
printk("inia100: SCB memory allocation error\n");
goto out_host_put;
}
memset(host->scb_virt, 0, sz);
/* Get total memory needed for ESCB */
sz = ORC_MAXQUEUE * sizeof(struct orc_extended_scb);
host->escb_virt = pci_alloc_consistent(pdev, sz,
&host->escb_phys);
if (!host->escb_virt) {
printk("inia100: ESCB memory allocation error\n");
goto out_free_scb_array;
}
memset(host->escb_virt, 0, sz);
biosaddr = host->BIOScfg;
biosaddr = (biosaddr << 4);
bios_phys = phys_to_virt(biosaddr);
if (init_orchid(host)) { /* Initialize orchid chip */
printk("inia100: initial orchid fail!!\n");
goto out_free_escb_array;
}
shost->io_port = host->base;
shost->n_io_port = 0xff;
shost->can_queue = ORC_MAXQUEUE;
shost->unique_id = shost->io_port;
shost->max_id = host->max_targets;
shost->max_lun = 16;
shost->irq = pdev->irq;
shost->this_id = host->scsi_id; /* Assign HCS index */
shost->sg_tablesize = TOTAL_SG_ENTRY;
/* Initial orc chip */
error = request_irq(pdev->irq, inia100_intr, IRQF_SHARED,
"inia100", shost);
if (error < 0) {
printk(KERN_WARNING "inia100: unable to get irq %d\n",
pdev->irq);
goto out_free_escb_array;
}
pci_set_drvdata(pdev, shost);
error = scsi_add_host(shost, &pdev->dev);
if (error)
goto out_free_irq;
scsi_scan_host(shost);
return 0;
out_free_irq:
free_irq(shost->irq, shost);
out_free_escb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
out_free_scb_array:
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
out_host_put:
scsi_host_put(shost);
out_release_region:
release_region(port, 256);
out_disable_device:
pci_disable_device(pdev);
out:
return error;
}
static void __devexit inia100_remove_one(struct pci_dev *pdev)
{
struct Scsi_Host *shost = pci_get_drvdata(pdev);
struct orc_host *host = (struct orc_host *)shost->hostdata;
scsi_remove_host(shost);
free_irq(shost->irq, shost);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_extended_scb),
host->escb_virt, host->escb_phys);
pci_free_consistent(pdev, ORC_MAXQUEUE * sizeof(struct orc_scb),
host->scb_virt, host->scb_phys);
release_region(shost->io_port, 256);
scsi_host_put(shost);
}
static struct pci_device_id inia100_pci_tbl[] = {
{PCI_VENDOR_ID_INIT, 0x1060, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0,}
};
MODULE_DEVICE_TABLE(pci, inia100_pci_tbl);
static struct pci_driver inia100_pci_driver = {
.name = "inia100",
.id_table = inia100_pci_tbl,
.probe = inia100_probe_one,
.remove = __devexit_p(inia100_remove_one),
};
static int __init inia100_init(void)
{
return pci_register_driver(&inia100_pci_driver);
}
static void __exit inia100_exit(void)
{
pci_unregister_driver(&inia100_pci_driver);
}
MODULE_DESCRIPTION("Initio A100U2W SCSI driver");
MODULE_AUTHOR("Initio Corporation");
MODULE_LICENSE("Dual BSD/GPL");
module_init(inia100_init);
module_exit(inia100_exit);
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.