repo_name
string
path
string
copies
string
size
string
content
string
license
string
Tommy-Geenexus/android_kernel_sony_apq8064_yuga_5.x
arch/arm/mach-msm/qdsp6v2/audio_acdb.c
525
29254
/* Copyright (c) 2010-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/fs.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include <linux/msm_ion.h> #include <linux/mm.h> #include <mach/qdsp6v2/audio_acdb.h> #include <linux/slab.h> #define MAX_NETWORKS 15 #define MAX_HW_DELAY_ENTRIES 25 struct sidetone_atomic_cal { atomic_t enable; atomic_t gain; }; struct acdb_data { struct mutex acdb_mutex; /* ANC Cal */ struct acdb_atomic_cal_block anc_cal; /* AudProc Cal */ atomic_t asm_topology; atomic_t adm_topology[MAX_AUDPROC_TYPES]; struct acdb_atomic_cal_block audproc_cal[MAX_AUDPROC_TYPES]; struct acdb_atomic_cal_block audstrm_cal[MAX_AUDPROC_TYPES]; struct acdb_atomic_cal_block audvol_cal[MAX_AUDPROC_TYPES]; /* VocProc Cal */ atomic_t voice_rx_topology; atomic_t voice_tx_topology; struct acdb_atomic_cal_block vocproc_cal[MAX_NETWORKS]; struct acdb_atomic_cal_block vocstrm_cal[MAX_NETWORKS]; struct acdb_atomic_cal_block vocvol_cal[MAX_NETWORKS]; /* size of cal block tables above*/ atomic_t vocproc_cal_size; atomic_t vocstrm_cal_size; atomic_t vocvol_cal_size; /* Total size of cal data for all networks */ atomic_t vocproc_total_cal_size; atomic_t vocstrm_total_cal_size; atomic_t vocvol_total_cal_size; /* AFE cal */ struct acdb_atomic_cal_block afe_cal[MAX_AUDPROC_TYPES]; /* Sidetone Cal */ struct sidetone_atomic_cal sidetone_cal; /* Allocation information */ struct ion_client *ion_client; struct ion_handle *ion_handle; atomic_t map_handle; atomic64_t paddr; atomic64_t kvaddr; atomic64_t mem_len; /* Av sync delay info */ struct hw_delay hw_delay_rx; struct hw_delay hw_delay_tx; }; static struct acdb_data acdb_data; static atomic_t usage_count; uint32_t get_voice_rx_topology(void) { return atomic_read(&acdb_data.voice_rx_topology); } void store_voice_rx_topology(uint32_t topology) { atomic_set(&acdb_data.voice_rx_topology, topology); } uint32_t get_voice_tx_topology(void) { return atomic_read(&acdb_data.voice_tx_topology); } void store_voice_tx_topology(uint32_t topology) { atomic_set(&acdb_data.voice_tx_topology, topology); } uint32_t get_adm_rx_topology(void) { return atomic_read(&acdb_data.adm_topology[RX_CAL]); } void store_adm_rx_topology(uint32_t topology) { atomic_set(&acdb_data.adm_topology[RX_CAL], topology); } uint32_t get_adm_tx_topology(void) { return atomic_read(&acdb_data.adm_topology[TX_CAL]); } void store_adm_tx_topology(uint32_t topology) { atomic_set(&acdb_data.adm_topology[TX_CAL], topology); } uint32_t get_asm_topology(void) { return atomic_read(&acdb_data.asm_topology); } void store_asm_topology(uint32_t topology) { atomic_set(&acdb_data.asm_topology, topology); } void get_all_voice_cal(struct acdb_cal_block *cal_block) { cal_block->cal_kvaddr = atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.vocproc_cal[0].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.vocproc_total_cal_size) + atomic_read(&acdb_data.vocstrm_total_cal_size) + atomic_read(&acdb_data.vocvol_total_cal_size); } void get_all_cvp_cal(struct acdb_cal_block *cal_block) { cal_block->cal_kvaddr = atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.vocproc_cal[0].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.vocproc_total_cal_size) + atomic_read(&acdb_data.vocvol_total_cal_size); } void get_all_vocproc_cal(struct acdb_cal_block *cal_block) { cal_block->cal_kvaddr = atomic_read(&acdb_data.vocproc_cal[0].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.vocproc_cal[0].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.vocproc_total_cal_size); } void get_all_vocstrm_cal(struct acdb_cal_block *cal_block) { cal_block->cal_kvaddr = atomic_read(&acdb_data.vocstrm_cal[0].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.vocstrm_cal[0].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.vocstrm_total_cal_size); } void get_all_vocvol_cal(struct acdb_cal_block *cal_block) { cal_block->cal_kvaddr = atomic_read(&acdb_data.vocvol_cal[0].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.vocvol_cal[0].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.vocvol_total_cal_size); } int get_hw_delay(int32_t path, struct hw_delay_entry *entry) { int i, result = 0; struct hw_delay *delay = NULL; struct hw_delay_entry *info = NULL; pr_debug("%s,\n", __func__); if (entry == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); result = -EINVAL; goto ret; } if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); result = -EINVAL; goto ret; } mutex_lock(&acdb_data.acdb_mutex); if (path == RX_CAL) delay = &acdb_data.hw_delay_rx; else if (path == TX_CAL) delay = &acdb_data.hw_delay_tx; else pr_err("ACDB=> %s Invalid path: %d\n",__func__,path); if ((delay == NULL) || ((delay != NULL) && delay->num_entries == 0)) { pr_err("ACDB=> %s Invalid delay/ delay entries\n", __func__); result = -EINVAL; goto done; } info = (struct hw_delay_entry *)(delay->delay_info); if (info == NULL) { pr_err("ACDB=> %s Delay entries info is NULL\n", __func__); result = -EINVAL; goto done; } for (i = 0; i < delay->num_entries; i++) { if (info[i].sample_rate == entry->sample_rate) { entry->delay_usec = info[i].delay_usec; break; } } if (i == delay->num_entries) { pr_err("ACDB=> %s: Unable to find delay for sample rate %d\n", __func__, entry->sample_rate); result = -EINVAL; } done: mutex_unlock(&acdb_data.acdb_mutex); ret: pr_err("ACDB=> %s: Path = %d samplerate = %u usec = %u status %d\n", __func__, path, entry->sample_rate, entry->delay_usec, result); return result; } int store_hw_delay(int32_t path, void *arg) { int result = 0; struct hw_delay delay; struct hw_delay *delay_dest = NULL; pr_debug("%s,\n", __func__); if ((path >= MAX_AUDPROC_TYPES) || (path < 0) || (arg == NULL)) { pr_err("ACDB=> Bad path/ pointer sent to %s, path: %d\n", __func__, path); result = -EINVAL; goto done; } result = copy_from_user((void *)&delay, (void *)arg, sizeof(struct hw_delay)); if (result) { pr_err("ACDB=> %s failed to copy hw delay: result=%d path=%d\n", __func__, result, path); result = -EFAULT; goto done; } if ((delay.num_entries <= 0) || (delay.num_entries > MAX_HW_DELAY_ENTRIES)) { pr_debug("ACDB=> %s incorrect no of hw delay entries: %d\n", __func__, delay.num_entries); result = -EINVAL; goto done; } if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); result = -EINVAL; goto done; } pr_debug("ACDB=> %s : Path = %d num_entries = %d\n", __func__, path, delay.num_entries); mutex_lock(&acdb_data.acdb_mutex); if (path == RX_CAL) delay_dest = &acdb_data.hw_delay_rx; else if (path == TX_CAL) delay_dest = &acdb_data.hw_delay_tx; delay_dest->num_entries = delay.num_entries; result = copy_from_user(delay_dest->delay_info, delay.delay_info, (sizeof(struct hw_delay_entry)* delay.num_entries)); if (result) { pr_err("ACDB=> %s failed to copy hw delay info res=%d path=%d", __func__, result, path); result = -EFAULT; } mutex_unlock(&acdb_data.acdb_mutex); done: return result; } void get_anc_cal(struct acdb_cal_block *cal_block) { pr_debug("%s\n", __func__); if (cal_block == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } cal_block->cal_kvaddr = atomic_read(&acdb_data.anc_cal.cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.anc_cal.cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.anc_cal.cal_size); done: return; } void store_anc_cal(struct cal_block *cal_block) { pr_debug("%s,\n", __func__); if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_block->cal_offset, (long)atomic64_read(&acdb_data.mem_len)); goto done; } atomic_set(&acdb_data.anc_cal.cal_kvaddr, cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr)); atomic_set(&acdb_data.anc_cal.cal_paddr, cal_block->cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.anc_cal.cal_size, cal_block->cal_size); done: return; } void store_afe_cal(int32_t path, struct cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_block->cal_offset, (long)atomic64_read(&acdb_data.mem_len)); goto done; } if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } atomic_set(&acdb_data.afe_cal[path].cal_kvaddr, cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr)); atomic_set(&acdb_data.afe_cal[path].cal_paddr, cal_block->cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.afe_cal[path].cal_size, cal_block->cal_size); done: return; } void get_afe_cal(int32_t path, struct acdb_cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } if ((path >= MAX_AUDPROC_TYPES) || (path < 0)) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } cal_block->cal_kvaddr = atomic_read(&acdb_data.afe_cal[path].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.afe_cal[path].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.afe_cal[path].cal_size); done: return; } void store_audproc_cal(int32_t path, struct cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_block->cal_offset, (long)atomic64_read(&acdb_data.mem_len)); goto done; } if (path >= MAX_AUDPROC_TYPES) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } atomic_set(&acdb_data.audproc_cal[path].cal_kvaddr, cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr)); atomic_set(&acdb_data.audproc_cal[path].cal_paddr, cal_block->cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.audproc_cal[path].cal_size, cal_block->cal_size); done: return; } void get_audproc_cal(int32_t path, struct acdb_cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } if (path >= MAX_AUDPROC_TYPES) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } cal_block->cal_kvaddr = atomic_read(&acdb_data.audproc_cal[path].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.audproc_cal[path].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.audproc_cal[path].cal_size); done: return; } void store_audstrm_cal(int32_t path, struct cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_block->cal_offset, (long)atomic64_read(&acdb_data.mem_len)); goto done; } if (path >= MAX_AUDPROC_TYPES) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } atomic_set(&acdb_data.audstrm_cal[path].cal_kvaddr, cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr)); atomic_set(&acdb_data.audstrm_cal[path].cal_paddr, cal_block->cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.audstrm_cal[path].cal_size, cal_block->cal_size); done: return; } void get_audstrm_cal(int32_t path, struct acdb_cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } if (path >= MAX_AUDPROC_TYPES) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } cal_block->cal_kvaddr = atomic_read(&acdb_data.audstrm_cal[path].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.audstrm_cal[path].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.audstrm_cal[path].cal_size); done: return; } void store_audvol_cal(int32_t path, struct cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block->cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_block->cal_offset, (long)atomic64_read(&acdb_data.mem_len)); goto done; } if (path >= MAX_AUDPROC_TYPES) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } atomic_set(&acdb_data.audvol_cal[path].cal_kvaddr, cal_block->cal_offset + atomic64_read(&acdb_data.kvaddr)); atomic_set(&acdb_data.audvol_cal[path].cal_paddr, cal_block->cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.audvol_cal[path].cal_size, cal_block->cal_size); done: return; } void get_audvol_cal(int32_t path, struct acdb_cal_block *cal_block) { pr_debug("%s, path = %d\n", __func__, path); if (cal_block == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } if (path >= MAX_AUDPROC_TYPES || path < 0) { pr_err("ACDB=> Bad path sent to %s, path: %d\n", __func__, path); goto done; } cal_block->cal_kvaddr = atomic_read(&acdb_data.audvol_cal[path].cal_kvaddr); cal_block->cal_paddr = atomic_read(&acdb_data.audvol_cal[path].cal_paddr); cal_block->cal_size = atomic_read(&acdb_data.audvol_cal[path].cal_size); done: return; } void store_vocproc_cal(int32_t len, struct cal_block *cal_blocks) { int i; pr_debug("%s\n", __func__); if (len > MAX_NETWORKS) { pr_err("%s: Calibration sent for %d networks, only %d are " "supported!\n", __func__, len, MAX_NETWORKS); goto done; } atomic_set(&acdb_data.vocproc_total_cal_size, 0); for (i = 0; i < len; i++) { if (cal_blocks[i].cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_blocks[i].cal_offset, (long)atomic64_read(&acdb_data.mem_len)); atomic_set(&acdb_data.vocproc_cal[i].cal_size, 0); } else { atomic_add(cal_blocks[i].cal_size, &acdb_data.vocproc_total_cal_size); atomic_set(&acdb_data.vocproc_cal[i].cal_size, cal_blocks[i].cal_size); atomic_set(&acdb_data.vocproc_cal[i].cal_paddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.vocproc_cal[i].cal_kvaddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.kvaddr)); } } atomic_set(&acdb_data.vocproc_cal_size, len); done: return; } void get_vocproc_cal(struct acdb_cal_data *cal_data) { pr_debug("%s\n", __func__); if (cal_data == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } cal_data->num_cal_blocks = atomic_read(&acdb_data.vocproc_cal_size); cal_data->cal_blocks = &acdb_data.vocproc_cal[0]; done: return; } void store_vocstrm_cal(int32_t len, struct cal_block *cal_blocks) { int i; pr_debug("%s\n", __func__); if (len > MAX_NETWORKS) { pr_err("%s: Calibration sent for %d networks, only %d are " "supported!\n", __func__, len, MAX_NETWORKS); goto done; } atomic_set(&acdb_data.vocstrm_total_cal_size, 0); for (i = 0; i < len; i++) { if (cal_blocks[i].cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_blocks[i].cal_offset, (long)atomic64_read(&acdb_data.mem_len)); atomic_set(&acdb_data.vocstrm_cal[i].cal_size, 0); } else { atomic_add(cal_blocks[i].cal_size, &acdb_data.vocstrm_total_cal_size); atomic_set(&acdb_data.vocstrm_cal[i].cal_size, cal_blocks[i].cal_size); atomic_set(&acdb_data.vocstrm_cal[i].cal_paddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.vocstrm_cal[i].cal_kvaddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.kvaddr)); } } atomic_set(&acdb_data.vocstrm_cal_size, len); done: return; } void get_vocstrm_cal(struct acdb_cal_data *cal_data) { pr_debug("%s\n", __func__); if (cal_data == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } cal_data->num_cal_blocks = atomic_read(&acdb_data.vocstrm_cal_size); cal_data->cal_blocks = &acdb_data.vocstrm_cal[0]; done: return; } void store_vocvol_cal(int32_t len, struct cal_block *cal_blocks) { int i; pr_debug("%s\n", __func__); if (len > MAX_NETWORKS) { pr_err("%s: Calibration sent for %d networks, only %d are " "supported!\n", __func__, len, MAX_NETWORKS); goto done; } atomic_set(&acdb_data.vocvol_total_cal_size, 0); for (i = 0; i < len; i++) { if (cal_blocks[i].cal_offset > atomic64_read(&acdb_data.mem_len)) { pr_err("%s: offset %d is > mem_len %ld\n", __func__, cal_blocks[i].cal_offset, (long)atomic64_read(&acdb_data.mem_len)); atomic_set(&acdb_data.vocvol_cal[i].cal_size, 0); } else { atomic_add(cal_blocks[i].cal_size, &acdb_data.vocvol_total_cal_size); atomic_set(&acdb_data.vocvol_cal[i].cal_size, cal_blocks[i].cal_size); atomic_set(&acdb_data.vocvol_cal[i].cal_paddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.paddr)); atomic_set(&acdb_data.vocvol_cal[i].cal_kvaddr, cal_blocks[i].cal_offset + atomic64_read(&acdb_data.kvaddr)); } } atomic_set(&acdb_data.vocvol_cal_size, len); done: return; } void get_vocvol_cal(struct acdb_cal_data *cal_data) { pr_debug("%s\n", __func__); if (cal_data == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } cal_data->num_cal_blocks = atomic_read(&acdb_data.vocvol_cal_size); cal_data->cal_blocks = &acdb_data.vocvol_cal[0]; done: return; } void store_sidetone_cal(struct sidetone_cal *cal_data) { pr_debug("%s\n", __func__); atomic_set(&acdb_data.sidetone_cal.enable, cal_data->enable); atomic_set(&acdb_data.sidetone_cal.gain, cal_data->gain); } void get_sidetone_cal(struct sidetone_cal *cal_data) { pr_debug("%s\n", __func__); if (cal_data == NULL) { pr_err("ACDB=> NULL pointer sent to %s\n", __func__); goto done; } cal_data->enable = atomic_read(&acdb_data.sidetone_cal.enable); cal_data->gain = atomic_read(&acdb_data.sidetone_cal.gain); done: return; } static int acdb_open(struct inode *inode, struct file *f) { s32 result = 0; pr_debug("%s\n", __func__); if (atomic64_read(&acdb_data.mem_len)) { pr_debug("%s: ACDB opened but memory allocated, " "using existing allocation!\n", __func__); } atomic_inc(&usage_count); return result; } static void allocate_hw_delay_entries(void) { /* Allocate memory for hw delay entries */ acdb_data.hw_delay_rx.num_entries = 0; acdb_data.hw_delay_tx.num_entries = 0; acdb_data.hw_delay_rx.delay_info = kmalloc(sizeof(struct hw_delay_entry)* MAX_HW_DELAY_ENTRIES, GFP_KERNEL); if (acdb_data.hw_delay_rx.delay_info == NULL) { pr_err("%s : Failed to allocate av sync delay entries rx\n", __func__); } acdb_data.hw_delay_tx.delay_info = kmalloc(sizeof(struct hw_delay_entry)* MAX_HW_DELAY_ENTRIES, GFP_KERNEL); if (acdb_data.hw_delay_tx.delay_info == NULL) { pr_err("%s : Failed to allocate av sync delay entries tx\n", __func__); } return; } static int deregister_memory(void) { mutex_lock(&acdb_data.acdb_mutex); kfree(acdb_data.hw_delay_tx.delay_info); kfree(acdb_data.hw_delay_rx.delay_info); mutex_unlock(&acdb_data.acdb_mutex); if (atomic64_read(&acdb_data.mem_len)) { mutex_lock(&acdb_data.acdb_mutex); atomic_set(&acdb_data.vocstrm_total_cal_size, 0); atomic_set(&acdb_data.vocproc_total_cal_size, 0); atomic_set(&acdb_data.vocvol_total_cal_size, 0); atomic64_set(&acdb_data.mem_len, 0); ion_unmap_kernel(acdb_data.ion_client, acdb_data.ion_handle); ion_free(acdb_data.ion_client, acdb_data.ion_handle); ion_client_destroy(acdb_data.ion_client); mutex_unlock(&acdb_data.acdb_mutex); } return 0; } static int register_memory(void) { int result; unsigned long paddr; void *kvptr; unsigned long kvaddr; unsigned long mem_len; mutex_lock(&acdb_data.acdb_mutex); allocate_hw_delay_entries(); acdb_data.ion_client = msm_ion_client_create(UINT_MAX, "audio_acdb_client"); if (IS_ERR_OR_NULL(acdb_data.ion_client)) { pr_err("%s: Could not register ION client!!!\n", __func__); result = PTR_ERR(acdb_data.ion_client); goto err; } acdb_data.ion_handle = ion_import_dma_buf(acdb_data.ion_client, atomic_read(&acdb_data.map_handle)); if (IS_ERR_OR_NULL(acdb_data.ion_handle)) { pr_err("%s: Could not import map handle!!!\n", __func__); result = PTR_ERR(acdb_data.ion_handle); goto err_ion_client; } result = ion_phys(acdb_data.ion_client, acdb_data.ion_handle, &paddr, (size_t *)&mem_len); if (result != 0) { pr_err("%s: Could not get phys addr!!!\n", __func__); goto err_ion_handle; } kvptr = ion_map_kernel(acdb_data.ion_client, acdb_data.ion_handle); if (IS_ERR_OR_NULL(kvptr)) { pr_err("%s: Could not get kernel virt addr!!!\n", __func__); result = PTR_ERR(kvptr); goto err_ion_handle; } kvaddr = (unsigned long)kvptr; atomic64_set(&acdb_data.paddr, paddr); atomic64_set(&acdb_data.kvaddr, kvaddr); atomic64_set(&acdb_data.mem_len, mem_len); mutex_unlock(&acdb_data.acdb_mutex); pr_debug("%s: done! paddr = 0x%lx, " "kvaddr = 0x%lx, len = x%lx\n", __func__, (long)atomic64_read(&acdb_data.paddr), (long)atomic64_read(&acdb_data.kvaddr), (long)atomic64_read(&acdb_data.mem_len)); return result; err_ion_handle: ion_free(acdb_data.ion_client, acdb_data.ion_handle); err_ion_client: ion_client_destroy(acdb_data.ion_client); err: atomic64_set(&acdb_data.mem_len, 0); mutex_unlock(&acdb_data.acdb_mutex); return result; } static long acdb_ioctl(struct file *f, unsigned int cmd, unsigned long arg) { int32_t result = 0; int32_t size; int32_t map_fd; uint32_t topology; struct cal_block data[MAX_NETWORKS]; pr_debug("%s\n", __func__); switch (cmd) { case AUDIO_REGISTER_PMEM: pr_debug("AUDIO_REGISTER_PMEM\n"); if (atomic_read(&acdb_data.mem_len)) { deregister_memory(); pr_debug("Remove the existing memory\n"); } if (copy_from_user(&map_fd, (void *)arg, sizeof(map_fd))) { pr_err("%s: fail to copy memory handle!\n", __func__); result = -EFAULT; } else { atomic_set(&acdb_data.map_handle, map_fd); result = register_memory(); } goto done; case AUDIO_DEREGISTER_PMEM: pr_debug("AUDIO_DEREGISTER_PMEM\n"); deregister_memory(); goto done; case AUDIO_SET_VOICE_RX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_voice_rx_topology(topology); goto done; case AUDIO_SET_VOICE_TX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_voice_tx_topology(topology); goto done; case AUDIO_SET_ADM_RX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_adm_rx_topology(topology); goto done; case AUDIO_SET_ADM_TX_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_adm_tx_topology(topology); goto done; case AUDIO_SET_ASM_TOPOLOGY: if (copy_from_user(&topology, (void *)arg, sizeof(topology))) { pr_err("%s: fail to copy topology!\n", __func__); result = -EFAULT; } store_asm_topology(topology); goto done; case AUDIO_SET_HW_DELAY_RX: result = store_hw_delay(RX_CAL, (void *)arg); goto done; case AUDIO_SET_HW_DELAY_TX: result = store_hw_delay(TX_CAL, (void *)arg); goto done; } if (copy_from_user(&size, (void *) arg, sizeof(size))) { result = -EFAULT; goto done; } if ((size <= 0) || (size > sizeof(data))) { pr_err("%s: Invalid size sent to driver: %d\n", __func__, size); result = -EFAULT; goto done; } if (copy_from_user(data, (void *)(arg + sizeof(size)), size)) { pr_err("%s: fail to copy table size %d\n", __func__, size); result = -EFAULT; goto done; } if (data == NULL) { pr_err("%s: NULL pointer sent to driver!\n", __func__); result = -EFAULT; goto done; } switch (cmd) { case AUDIO_SET_AUDPROC_TX_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audproc_cal(TX_CAL, data); break; case AUDIO_SET_AUDPROC_RX_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audproc_cal(RX_CAL, data); break; case AUDIO_SET_AUDPROC_TX_STREAM_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audstrm_cal(TX_CAL, data); break; case AUDIO_SET_AUDPROC_RX_STREAM_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audstrm_cal(RX_CAL, data); break; case AUDIO_SET_AUDPROC_TX_VOL_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audvol_cal(TX_CAL, data); break; case AUDIO_SET_AUDPROC_RX_VOL_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More Audproc Cal then expected, " "size received: %d\n", __func__, size); store_audvol_cal(RX_CAL, data); break; case AUDIO_SET_AFE_TX_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More AFE Cal then expected, " "size received: %d\n", __func__, size); store_afe_cal(TX_CAL, data); break; case AUDIO_SET_AFE_RX_CAL: if (size > sizeof(struct cal_block)) pr_err("%s: More AFE Cal then expected, " "size received: %d\n", __func__, size); store_afe_cal(RX_CAL, data); break; case AUDIO_SET_VOCPROC_CAL: store_vocproc_cal(size / sizeof(struct cal_block), data); break; case AUDIO_SET_VOCPROC_STREAM_CAL: store_vocstrm_cal(size / sizeof(struct cal_block), data); break; case AUDIO_SET_VOCPROC_VOL_CAL: store_vocvol_cal(size / sizeof(struct cal_block), data); break; case AUDIO_SET_SIDETONE_CAL: if (size > sizeof(struct sidetone_cal)) pr_err("%s: More sidetone cal then expected, " "size received: %d\n", __func__, size); store_sidetone_cal((struct sidetone_cal *)data); break; case AUDIO_SET_ANC_CAL: store_anc_cal(data); break; default: pr_err("ACDB=> ACDB ioctl not found!\n"); } done: return result; } static int acdb_mmap(struct file *file, struct vm_area_struct *vma) { int result = 0; uint32_t size = vma->vm_end - vma->vm_start; pr_debug("%s\n", __func__); if (atomic64_read(&acdb_data.mem_len)) { if (size <= atomic64_read(&acdb_data.mem_len)) { vma->vm_page_prot = pgprot_noncached( vma->vm_page_prot); result = remap_pfn_range(vma, vma->vm_start, atomic64_read(&acdb_data.paddr) >> PAGE_SHIFT, size, vma->vm_page_prot); } else { pr_err("%s: Not enough memory!\n", __func__); result = -ENOMEM; } } else { pr_err("%s: memory is not allocated, yet!\n", __func__); result = -ENODEV; } return result; } static int acdb_release(struct inode *inode, struct file *f) { s32 result = 0; atomic_dec(&usage_count); atomic_read(&usage_count); pr_debug("%s: ref count %d!\n", __func__, atomic_read(&usage_count)); if (atomic_read(&usage_count) >= 1) result = -EBUSY; else result = deregister_memory(); return result; } static const struct file_operations acdb_fops = { .owner = THIS_MODULE, .open = acdb_open, .release = acdb_release, .unlocked_ioctl = acdb_ioctl, .mmap = acdb_mmap, }; struct miscdevice acdb_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_acdb", .fops = &acdb_fops, }; static int __init acdb_init(void) { memset(&acdb_data, 0, sizeof(acdb_data)); mutex_init(&acdb_data.acdb_mutex); atomic_set(&usage_count, 0); return misc_register(&acdb_misc); } static void __exit acdb_exit(void) { } module_init(acdb_init); module_exit(acdb_exit); MODULE_DESCRIPTION("MSM 8x60 Audio ACDB driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
dagnarf/sgh-i717-dagkernel
arch/x86/kernel/x8664_ksyms_64.c
1037
1412
/* Exports for assembly files. All C exports should go in the respective C files. */ #include <linux/module.h> #include <linux/smp.h> #include <net/checksum.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/uaccess.h> #include <asm/desc.h> #include <asm/ftrace.h> #ifdef CONFIG_FUNCTION_TRACER /* mcount is defined in assembly */ EXPORT_SYMBOL(mcount); #endif EXPORT_SYMBOL(__get_user_1); EXPORT_SYMBOL(__get_user_2); EXPORT_SYMBOL(__get_user_4); EXPORT_SYMBOL(__get_user_8); EXPORT_SYMBOL(__put_user_1); EXPORT_SYMBOL(__put_user_2); EXPORT_SYMBOL(__put_user_4); EXPORT_SYMBOL(__put_user_8); EXPORT_SYMBOL(copy_user_generic_string); EXPORT_SYMBOL(copy_user_generic_unrolled); EXPORT_SYMBOL(__copy_user_nocache); EXPORT_SYMBOL(_copy_from_user); EXPORT_SYMBOL(_copy_to_user); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(csum_partial); /* * Export string functions. We normally rely on gcc builtin for most of these, * but gcc sometimes decides not to inline them. */ #undef memcpy #undef memset #undef memmove extern void *memset(void *, int, __kernel_size_t); extern void *memcpy(void *, const void *, __kernel_size_t); extern void *__memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(__memcpy); EXPORT_SYMBOL(empty_zero_page); #ifndef CONFIG_PARAVIRT EXPORT_SYMBOL(native_load_gs_index); #endif
gpl-2.0
agat63/E4GT_FH13_kernel
drivers/media/video/saa6588.c
3341
13384
/* Driver for SAA6588 RDS decoder (c) 2005 Hans J. Koch This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/types.h> #include <linux/videodev2.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/wait.h> #include <asm/uaccess.h> #include <media/saa6588.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> /* insmod options */ static unsigned int debug; static unsigned int xtal; static unsigned int mmbs; static unsigned int plvl; static unsigned int bufblocks = 100; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "enable debug messages"); module_param(xtal, int, 0); MODULE_PARM_DESC(xtal, "select oscillator frequency (0..3), default 0"); module_param(mmbs, int, 0); MODULE_PARM_DESC(mmbs, "enable MMBS mode: 0=off (default), 1=on"); module_param(plvl, int, 0); MODULE_PARM_DESC(plvl, "select pause level (0..3), default 0"); module_param(bufblocks, int, 0); MODULE_PARM_DESC(bufblocks, "number of buffered blocks, default 100"); MODULE_DESCRIPTION("v4l2 driver module for SAA6588 RDS decoder"); MODULE_AUTHOR("Hans J. Koch <koch@hjk-az.de>"); MODULE_LICENSE("GPL"); /* ---------------------------------------------------------------------- */ #define UNSET (-1U) #define PREFIX "saa6588: " #define dprintk if (debug) printk struct saa6588 { struct v4l2_subdev sd; struct delayed_work work; spinlock_t lock; unsigned char *buffer; unsigned int buf_size; unsigned int rd_index; unsigned int wr_index; unsigned int block_count; unsigned char last_blocknum; wait_queue_head_t read_queue; int data_available_for_read; u8 sync; }; static inline struct saa6588 *to_saa6588(struct v4l2_subdev *sd) { return container_of(sd, struct saa6588, sd); } /* ---------------------------------------------------------------------- */ /* * SAA6588 defines */ /* Initialization and mode control byte (0w) */ /* bit 0+1 (DAC0/DAC1) */ #define cModeStandard 0x00 #define cModeFastPI 0x01 #define cModeReducedRequest 0x02 #define cModeInvalid 0x03 /* bit 2 (RBDS) */ #define cProcessingModeRDS 0x00 #define cProcessingModeRBDS 0x04 /* bit 3+4 (SYM0/SYM1) */ #define cErrCorrectionNone 0x00 #define cErrCorrection2Bits 0x08 #define cErrCorrection5Bits 0x10 #define cErrCorrectionNoneRBDS 0x18 /* bit 5 (NWSY) */ #define cSyncNormal 0x00 #define cSyncRestart 0x20 /* bit 6 (TSQD) */ #define cSigQualityDetectOFF 0x00 #define cSigQualityDetectON 0x40 /* bit 7 (SQCM) */ #define cSigQualityTriggered 0x00 #define cSigQualityContinous 0x80 /* Pause level and flywheel control byte (1w) */ /* bits 0..5 (FEB0..FEB5) */ #define cFlywheelMaxBlocksMask 0x3F #define cFlywheelDefault 0x20 /* bits 6+7 (PL0/PL1) */ #define cPauseLevel_11mV 0x00 #define cPauseLevel_17mV 0x40 #define cPauseLevel_27mV 0x80 #define cPauseLevel_43mV 0xC0 /* Pause time/oscillator frequency/quality detector control byte (1w) */ /* bits 0..4 (SQS0..SQS4) */ #define cQualityDetectSensMask 0x1F #define cQualityDetectDefault 0x0F /* bit 5 (SOSC) */ #define cSelectOscFreqOFF 0x00 #define cSelectOscFreqON 0x20 /* bit 6+7 (PTF0/PTF1) */ #define cOscFreq_4332kHz 0x00 #define cOscFreq_8664kHz 0x40 #define cOscFreq_12996kHz 0x80 #define cOscFreq_17328kHz 0xC0 /* ---------------------------------------------------------------------- */ static int block_to_user_buf(struct saa6588 *s, unsigned char __user *user_buf) { int i; if (s->rd_index == s->wr_index) { if (debug > 2) dprintk(PREFIX "Read: buffer empty.\n"); return 0; } if (debug > 2) { dprintk(PREFIX "Read: "); for (i = s->rd_index; i < s->rd_index + 3; i++) dprintk("0x%02x ", s->buffer[i]); } if (copy_to_user(user_buf, &s->buffer[s->rd_index], 3)) return -EFAULT; s->rd_index += 3; if (s->rd_index >= s->buf_size) s->rd_index = 0; s->block_count--; if (debug > 2) dprintk("%d blocks total.\n", s->block_count); return 1; } static void read_from_buf(struct saa6588 *s, struct saa6588_command *a) { unsigned long flags; unsigned char __user *buf_ptr = a->buffer; unsigned int i; unsigned int rd_blocks; a->result = 0; if (!a->buffer) return; while (!s->data_available_for_read) { int ret = wait_event_interruptible(s->read_queue, s->data_available_for_read); if (ret == -ERESTARTSYS) { a->result = -EINTR; return; } } spin_lock_irqsave(&s->lock, flags); rd_blocks = a->block_count; if (rd_blocks > s->block_count) rd_blocks = s->block_count; if (!rd_blocks) { spin_unlock_irqrestore(&s->lock, flags); return; } for (i = 0; i < rd_blocks; i++) { if (block_to_user_buf(s, buf_ptr)) { buf_ptr += 3; a->result++; } else break; } a->result *= 3; s->data_available_for_read = (s->block_count > 0); spin_unlock_irqrestore(&s->lock, flags); } static void block_to_buf(struct saa6588 *s, unsigned char *blockbuf) { unsigned int i; if (debug > 3) dprintk(PREFIX "New block: "); for (i = 0; i < 3; ++i) { if (debug > 3) dprintk("0x%02x ", blockbuf[i]); s->buffer[s->wr_index] = blockbuf[i]; s->wr_index++; } if (s->wr_index >= s->buf_size) s->wr_index = 0; if (s->wr_index == s->rd_index) { s->rd_index += 3; if (s->rd_index >= s->buf_size) s->rd_index = 0; } else s->block_count++; if (debug > 3) dprintk("%d blocks total.\n", s->block_count); } static void saa6588_i2c_poll(struct saa6588 *s) { struct i2c_client *client = v4l2_get_subdevdata(&s->sd); unsigned long flags; unsigned char tmpbuf[6]; unsigned char blocknum; unsigned char tmp; /* Although we only need 3 bytes, we have to read at least 6. SAA6588 returns garbage otherwise. */ if (6 != i2c_master_recv(client, &tmpbuf[0], 6)) { if (debug > 1) dprintk(PREFIX "read error!\n"); return; } s->sync = tmpbuf[0] & 0x10; if (!s->sync) return; blocknum = tmpbuf[0] >> 5; if (blocknum == s->last_blocknum) { if (debug > 3) dprintk("Saw block %d again.\n", blocknum); return; } s->last_blocknum = blocknum; /* Byte order according to v4l2 specification: Byte 0: Least Significant Byte of RDS Block Byte 1: Most Significant Byte of RDS Block Byte 2 Bit 7: Error bit. Indicates that an uncorrectable error occurred during reception of this block. Bit 6: Corrected bit. Indicates that an error was corrected for this data block. Bits 5-3: Same as bits 0-2. Bits 2-0: Block number. SAA6588 byte order is Status-MSB-LSB, so we have to swap the first and the last of the 3 bytes block. */ tmp = tmpbuf[2]; tmpbuf[2] = tmpbuf[0]; tmpbuf[0] = tmp; /* Map 'Invalid block E' to 'Invalid Block' */ if (blocknum == 6) blocknum = V4L2_RDS_BLOCK_INVALID; /* And if are not in mmbs mode, then 'Block E' is also mapped to 'Invalid Block'. As far as I can tell MMBS is discontinued, and if there is ever a need to support E blocks, then please contact the linux-media mailinglist. */ else if (!mmbs && blocknum == 5) blocknum = V4L2_RDS_BLOCK_INVALID; tmp = blocknum; tmp |= blocknum << 3; /* Received offset == Offset Name (OK ?) */ if ((tmpbuf[2] & 0x03) == 0x03) tmp |= V4L2_RDS_BLOCK_ERROR; /* uncorrectable error */ else if ((tmpbuf[2] & 0x03) != 0x00) tmp |= V4L2_RDS_BLOCK_CORRECTED; /* corrected error */ tmpbuf[2] = tmp; /* Is this enough ? Should we also check other bits ? */ spin_lock_irqsave(&s->lock, flags); block_to_buf(s, tmpbuf); spin_unlock_irqrestore(&s->lock, flags); s->data_available_for_read = 1; wake_up_interruptible(&s->read_queue); } static void saa6588_work(struct work_struct *work) { struct saa6588 *s = container_of(work, struct saa6588, work.work); saa6588_i2c_poll(s); schedule_delayed_work(&s->work, msecs_to_jiffies(20)); } static void saa6588_configure(struct saa6588 *s) { struct i2c_client *client = v4l2_get_subdevdata(&s->sd); unsigned char buf[3]; int rc; buf[0] = cSyncRestart; if (mmbs) buf[0] |= cProcessingModeRBDS; buf[1] = cFlywheelDefault; switch (plvl) { case 0: buf[1] |= cPauseLevel_11mV; break; case 1: buf[1] |= cPauseLevel_17mV; break; case 2: buf[1] |= cPauseLevel_27mV; break; case 3: buf[1] |= cPauseLevel_43mV; break; default: /* nothing */ break; } buf[2] = cQualityDetectDefault | cSelectOscFreqON; switch (xtal) { case 0: buf[2] |= cOscFreq_4332kHz; break; case 1: buf[2] |= cOscFreq_8664kHz; break; case 2: buf[2] |= cOscFreq_12996kHz; break; case 3: buf[2] |= cOscFreq_17328kHz; break; default: /* nothing */ break; } dprintk(PREFIX "writing: 0w=0x%02x 1w=0x%02x 2w=0x%02x\n", buf[0], buf[1], buf[2]); rc = i2c_master_send(client, buf, 3); if (rc != 3) printk(PREFIX "i2c i/o error: rc == %d (should be 3)\n", rc); } /* ---------------------------------------------------------------------- */ static long saa6588_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg) { struct saa6588 *s = to_saa6588(sd); struct saa6588_command *a = arg; switch (cmd) { /* --- open() for /dev/radio --- */ case SAA6588_CMD_OPEN: a->result = 0; /* return error if chip doesn't work ??? */ break; /* --- close() for /dev/radio --- */ case SAA6588_CMD_CLOSE: s->data_available_for_read = 1; wake_up_interruptible(&s->read_queue); a->result = 0; break; /* --- read() for /dev/radio --- */ case SAA6588_CMD_READ: read_from_buf(s, a); break; /* --- poll() for /dev/radio --- */ case SAA6588_CMD_POLL: a->result = 0; if (s->data_available_for_read) { a->result |= POLLIN | POLLRDNORM; } poll_wait(a->instance, &s->read_queue, a->event_list); break; default: /* nothing */ return -ENOIOCTLCMD; } return 0; } static int saa6588_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa6588 *s = to_saa6588(sd); vt->capability |= V4L2_TUNER_CAP_RDS | V4L2_TUNER_CAP_RDS_BLOCK_IO; if (s->sync) vt->rxsubchans |= V4L2_TUNER_SUB_RDS; return 0; } static int saa6588_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct saa6588 *s = to_saa6588(sd); saa6588_configure(s); return 0; } static int saa6588_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_SAA6588, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops saa6588_core_ops = { .g_chip_ident = saa6588_g_chip_ident, .ioctl = saa6588_ioctl, }; static const struct v4l2_subdev_tuner_ops saa6588_tuner_ops = { .g_tuner = saa6588_g_tuner, .s_tuner = saa6588_s_tuner, }; static const struct v4l2_subdev_ops saa6588_ops = { .core = &saa6588_core_ops, .tuner = &saa6588_tuner_ops, }; /* ---------------------------------------------------------------------- */ static int saa6588_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct saa6588 *s; struct v4l2_subdev *sd; v4l_info(client, "saa6588 found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); s = kzalloc(sizeof(*s), GFP_KERNEL); if (s == NULL) return -ENOMEM; s->buf_size = bufblocks * 3; s->buffer = kmalloc(s->buf_size, GFP_KERNEL); if (s->buffer == NULL) { kfree(s); return -ENOMEM; } sd = &s->sd; v4l2_i2c_subdev_init(sd, client, &saa6588_ops); spin_lock_init(&s->lock); s->block_count = 0; s->wr_index = 0; s->rd_index = 0; s->last_blocknum = 0xff; init_waitqueue_head(&s->read_queue); s->data_available_for_read = 0; saa6588_configure(s); /* start polling via eventd */ INIT_DELAYED_WORK(&s->work, saa6588_work); schedule_delayed_work(&s->work, 0); return 0; } static int saa6588_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct saa6588 *s = to_saa6588(sd); v4l2_device_unregister_subdev(sd); cancel_delayed_work_sync(&s->work); kfree(s->buffer); kfree(s); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id saa6588_id[] = { { "saa6588", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, saa6588_id); static struct i2c_driver saa6588_driver = { .driver = { .owner = THIS_MODULE, .name = "saa6588", }, .probe = saa6588_probe, .remove = saa6588_remove, .id_table = saa6588_id, }; static __init int init_saa6588(void) { return i2c_add_driver(&saa6588_driver); } static __exit void exit_saa6588(void) { i2c_del_driver(&saa6588_driver); } module_init(init_saa6588); module_exit(exit_saa6588);
gpl-2.0
NoelMacwan/Kernel-Honami-14.1.N.0.52
arch/arm/mach-imx/mach-scb9328.c
4877
3283
/* * linux/arch/arm/mach-mx1/mach-scb9328.c * * Copyright (c) 2004 Sascha Hauer <saschahauer@web.de> * Copyright (c) 2006-2008 Juergen Beisert <jbeisert@netscape.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/platform_device.h> #include <linux/mtd/physmap.h> #include <linux/interrupt.h> #include <linux/dm9000.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/common.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/iomux-mx1.h> #include "devices-imx1.h" /* * This scb9328 has a 32MiB flash */ static struct resource flash_resource = { .start = MX1_CS0_PHYS, .end = MX1_CS0_PHYS + (32 * 1024 * 1024) - 1, .flags = IORESOURCE_MEM, }; static struct physmap_flash_data scb_flash_data = { .width = 2, }; static struct platform_device scb_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &scb_flash_data, }, .resource = &flash_resource, .num_resources = 1, }; /* * scb9328 has a DM9000 network controller * connected to CS5, with 16 bit data path * and interrupt connected to GPIO 3 */ /* * internal datapath is fixed 16 bit */ static struct dm9000_plat_data dm9000_platdata = { .flags = DM9000_PLATF_16BITONLY, }; /* * the DM9000 drivers wants two defined address spaces * to gain access to address latch registers and the data path. */ static struct resource dm9000x_resources[] = { { .name = "address area", .start = MX1_CS5_PHYS, .end = MX1_CS5_PHYS + 1, .flags = IORESOURCE_MEM, /* address access */ }, { .name = "data area", .start = MX1_CS5_PHYS + 4, .end = MX1_CS5_PHYS + 5, .flags = IORESOURCE_MEM, /* data access */ }, { .start = IRQ_GPIOC(3), .end = IRQ_GPIOC(3), .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct platform_device dm9000x_device = { .name = "dm9000", .id = 0, .num_resources = ARRAY_SIZE(dm9000x_resources), .resource = dm9000x_resources, .dev = { .platform_data = &dm9000_platdata, } }; static const int mxc_uart1_pins[] = { PC9_PF_UART1_CTS, PC10_PF_UART1_RTS, PC11_PF_UART1_TXD, PC12_PF_UART1_RXD, }; static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static struct platform_device *devices[] __initdata = { &scb_flash_device, &dm9000x_device, }; /* * scb9328_init - Init the CPU card itself */ static void __init scb9328_init(void) { imx1_soc_init(); mxc_gpio_setup_multiple_pins(mxc_uart1_pins, ARRAY_SIZE(mxc_uart1_pins), "UART1"); imx1_add_imx_uart0(&uart_pdata); printk(KERN_INFO"Scb9328: Adding devices\n"); platform_add_devices(devices, ARRAY_SIZE(devices)); } static void __init scb9328_timer_init(void) { mx1_clocks_init(32000); } static struct sys_timer scb9328_timer = { .init = scb9328_timer_init, }; MACHINE_START(SCB9328, "Synertronixx scb9328") /* Sascha Hauer */ .atag_offset = 100, .map_io = mx1_map_io, .init_early = imx1_init_early, .init_irq = mx1_init_irq, .handle_irq = imx1_handle_irq, .timer = &scb9328_timer, .init_machine = scb9328_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
MoKee/android_kernel_yu_msm8916
drivers/scsi/device_handler/scsi_dh_hp_sw.c
7693
10601
/* * Basic HP/COMPAQ MSA 1000 support. This is only needed if your HW cannot be * upgraded. * * Copyright (C) 2006 Red Hat, Inc. All rights reserved. * Copyright (C) 2006 Mike Christie * Copyright (C) 2008 Hannes Reinecke <hare@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/slab.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_dbg.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define HP_SW_NAME "hp_sw" #define HP_SW_TIMEOUT (60 * HZ) #define HP_SW_RETRIES 3 #define HP_SW_PATH_UNINITIALIZED -1 #define HP_SW_PATH_ACTIVE 0 #define HP_SW_PATH_PASSIVE 1 struct hp_sw_dh_data { unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int path_state; int retries; int retry_cnt; struct scsi_device *sdev; activate_complete callback_fn; void *callback_data; }; static int hp_sw_start_stop(struct hp_sw_dh_data *); static inline struct hp_sw_dh_data *get_hp_sw_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; BUG_ON(scsi_dh_data == NULL); return ((struct hp_sw_dh_data *) scsi_dh_data->buf); } /* * tur_done - Handle TEST UNIT READY return status * @sdev: sdev the command has been sent to * @errors: blk error code * * Returns SCSI_DH_DEV_OFFLINED if the sdev is on the passive path */ static int tur_done(struct scsi_device *sdev, unsigned char *sense) { struct scsi_sense_hdr sshdr; int ret; ret = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); if (!ret) { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, no sense available\n", HP_SW_NAME); ret = SCSI_DH_IO; goto done; } switch (sshdr.sense_key) { case UNIT_ATTENTION: ret = SCSI_DH_IMM_RETRY; break; case NOT_READY: if ((sshdr.asc == 0x04) && (sshdr.ascq == 2)) { /* * LUN not ready - Initialization command required * * This is the passive path */ ret = SCSI_DH_DEV_OFFLINED; break; } /* Fallthrough */ default: sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed, sense %x/%x/%x\n", HP_SW_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); break; } done: return ret; } /* * hp_sw_tur - Send TEST UNIT READY * @sdev: sdev command should be sent to * * Use the TEST UNIT READY command to determine * the path state. */ static int hp_sw_tur(struct scsi_device *sdev, struct hp_sw_dh_data *h) { struct request *req; int ret; retry: req = blk_get_request(sdev->request_queue, WRITE, GFP_NOIO); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(TEST_UNIT_READY); req->cmd[0] = TEST_UNIT_READY; req->timeout = HP_SW_TIMEOUT; req->sense = h->sense; memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; ret = blk_execute_rq(req->q, NULL, req, 1); if (ret == -EIO) { if (req->sense_len > 0) { ret = tur_done(sdev, h->sense); } else { sdev_printk(KERN_WARNING, sdev, "%s: sending tur failed with %x\n", HP_SW_NAME, req->errors); ret = SCSI_DH_IO; } } else { h->path_state = HP_SW_PATH_ACTIVE; ret = SCSI_DH_OK; } if (ret == SCSI_DH_IMM_RETRY) { blk_put_request(req); goto retry; } if (ret == SCSI_DH_DEV_OFFLINED) { h->path_state = HP_SW_PATH_PASSIVE; ret = SCSI_DH_OK; } blk_put_request(req); return ret; } /* * start_done - Handle START STOP UNIT return status * @sdev: sdev the command has been sent to * @errors: blk error code */ static int start_done(struct scsi_device *sdev, unsigned char *sense) { struct scsi_sense_hdr sshdr; int rc; rc = scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr); if (!rc) { sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, " "no sense available\n", HP_SW_NAME); return SCSI_DH_IO; } switch (sshdr.sense_key) { case NOT_READY: if ((sshdr.asc == 0x04) && (sshdr.ascq == 3)) { /* * LUN not ready - manual intervention required * * Switch-over in progress, retry. */ rc = SCSI_DH_RETRY; break; } /* fall through */ default: sdev_printk(KERN_WARNING, sdev, "%s: sending start_stop_unit failed, sense %x/%x/%x\n", HP_SW_NAME, sshdr.sense_key, sshdr.asc, sshdr.ascq); rc = SCSI_DH_IO; } return rc; } static void start_stop_endio(struct request *req, int error) { struct hp_sw_dh_data *h = req->end_io_data; unsigned err = SCSI_DH_OK; if (error || host_byte(req->errors) != DID_OK || msg_byte(req->errors) != COMMAND_COMPLETE) { sdev_printk(KERN_WARNING, h->sdev, "%s: sending start_stop_unit failed with %x\n", HP_SW_NAME, req->errors); err = SCSI_DH_IO; goto done; } if (req->sense_len > 0) { err = start_done(h->sdev, h->sense); if (err == SCSI_DH_RETRY) { err = SCSI_DH_IO; if (--h->retry_cnt) { blk_put_request(req); err = hp_sw_start_stop(h); if (err == SCSI_DH_OK) return; } } } done: req->end_io_data = NULL; __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; } return; } /* * hp_sw_start_stop - Send START STOP UNIT command * @sdev: sdev command should be sent to * * Sending START STOP UNIT activates the SP. */ static int hp_sw_start_stop(struct hp_sw_dh_data *h) { struct request *req; req = blk_get_request(h->sdev->request_queue, WRITE, GFP_ATOMIC); if (!req) return SCSI_DH_RES_TEMP_UNAVAIL; req->cmd_type = REQ_TYPE_BLOCK_PC; req->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; req->cmd_len = COMMAND_SIZE(START_STOP); req->cmd[0] = START_STOP; req->cmd[4] = 1; /* Start spin cycle */ req->timeout = HP_SW_TIMEOUT; req->sense = h->sense; memset(req->sense, 0, SCSI_SENSE_BUFFERSIZE); req->sense_len = 0; req->end_io_data = h; blk_execute_rq_nowait(req->q, NULL, req, 1, start_stop_endio); return SCSI_DH_OK; } static int hp_sw_prep_fn(struct scsi_device *sdev, struct request *req) { struct hp_sw_dh_data *h = get_hp_sw_data(sdev); int ret = BLKPREP_OK; if (h->path_state != HP_SW_PATH_ACTIVE) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } /* * hp_sw_activate - Activate a path * @sdev: sdev on the path to be activated * * The HP Active/Passive firmware is pretty simple; * the passive path reports NOT READY with sense codes * 0x04/0x02; a START STOP UNIT command will then * activate the passive path (and deactivate the * previously active one). */ static int hp_sw_activate(struct scsi_device *sdev, activate_complete fn, void *data) { int ret = SCSI_DH_OK; struct hp_sw_dh_data *h = get_hp_sw_data(sdev); ret = hp_sw_tur(sdev, h); if (ret == SCSI_DH_OK && h->path_state == HP_SW_PATH_PASSIVE) { h->retry_cnt = h->retries; h->callback_fn = fn; h->callback_data = data; ret = hp_sw_start_stop(h); if (ret == SCSI_DH_OK) return 0; h->callback_fn = h->callback_data = NULL; } if (fn) fn(data, ret); return 0; } static const struct scsi_dh_devlist hp_sw_dh_data_list[] = { {"COMPAQ", "MSA1000 VOLUME"}, {"COMPAQ", "HSV110"}, {"HP", "HSV100"}, {"DEC", "HSG80"}, {NULL, NULL}, }; static bool hp_sw_match(struct scsi_device *sdev) { int i; if (scsi_device_tpgs(sdev)) return false; for (i = 0; hp_sw_dh_data_list[i].vendor; i++) { if (!strncmp(sdev->vendor, hp_sw_dh_data_list[i].vendor, strlen(hp_sw_dh_data_list[i].vendor)) && !strncmp(sdev->model, hp_sw_dh_data_list[i].model, strlen(hp_sw_dh_data_list[i].model))) { return true; } } return false; } static int hp_sw_bus_attach(struct scsi_device *sdev); static void hp_sw_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler hp_sw_dh = { .name = HP_SW_NAME, .module = THIS_MODULE, .devlist = hp_sw_dh_data_list, .attach = hp_sw_bus_attach, .detach = hp_sw_bus_detach, .activate = hp_sw_activate, .prep_fn = hp_sw_prep_fn, .match = hp_sw_match, }; static int hp_sw_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct hp_sw_dh_data *h; unsigned long flags; int ret; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach Failed\n", HP_SW_NAME); return 0; } scsi_dh_data->scsi_dh = &hp_sw_dh; h = (struct hp_sw_dh_data *) scsi_dh_data->buf; h->path_state = HP_SW_PATH_UNINITIALIZED; h->retries = HP_SW_RETRIES; h->sdev = sdev; ret = hp_sw_tur(sdev, h); if (ret != SCSI_DH_OK || h->path_state == HP_SW_PATH_UNINITIALIZED) goto failed; if (!try_module_get(THIS_MODULE)) goto failed; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_INFO, sdev, "%s: attached to %s path\n", HP_SW_NAME, h->path_state == HP_SW_PATH_ACTIVE? "active":"passive"); return 0; failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", HP_SW_NAME); return -EINVAL; } static void hp_sw_bus_detach( struct scsi_device *sdev ) { struct scsi_dh_data *scsi_dh_data; unsigned long flags; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); scsi_dh_data = sdev->scsi_dh_data; sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); module_put(THIS_MODULE); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", HP_SW_NAME); kfree(scsi_dh_data); } static int __init hp_sw_init(void) { return scsi_register_device_handler(&hp_sw_dh); } static void __exit hp_sw_exit(void) { scsi_unregister_device_handler(&hp_sw_dh); } module_init(hp_sw_init); module_exit(hp_sw_exit); MODULE_DESCRIPTION("HP Active/Passive driver"); MODULE_AUTHOR("Mike Christie <michaelc@cs.wisc.edu"); MODULE_LICENSE("GPL");
gpl-2.0
janarthananfit/Nightlies-4.4
kernel/test_kprobes.c
8461
8671
/* * test_kprobes.c - simple sanity test for *probes * * Copyright IBM Corp. 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/kprobes.h> #include <linux/random.h> #define div_factor 3 static u32 rand1, preh_val, posth_val, jph_val; static int errors, handler_errors, num_tests; static u32 (*target)(u32 value); static u32 (*target2)(u32 value); static noinline u32 kprobe_target(u32 value) { return (value / div_factor); } static int kp_pre_handler(struct kprobe *p, struct pt_regs *regs) { preh_val = (rand1 / div_factor); return 0; } static void kp_post_handler(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { if (preh_val != (rand1 / div_factor)) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in post_handler\n"); } posth_val = preh_val + div_factor; } static struct kprobe kp = { .symbol_name = "kprobe_target", .pre_handler = kp_pre_handler, .post_handler = kp_post_handler }; static int test_kprobe(void) { int ret; ret = register_kprobe(&kp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_kprobe(&kp); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler not called\n"); handler_errors++; } return 0; } static noinline u32 kprobe_target2(u32 value) { return (value / div_factor) + 1; } static int kp_pre_handler2(struct kprobe *p, struct pt_regs *regs) { preh_val = (rand1 / div_factor) + 1; return 0; } static void kp_post_handler2(struct kprobe *p, struct pt_regs *regs, unsigned long flags) { if (preh_val != (rand1 / div_factor) + 1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in post_handler2\n"); } posth_val = preh_val + div_factor; } static struct kprobe kp2 = { .symbol_name = "kprobe_target2", .pre_handler = kp_pre_handler2, .post_handler = kp_post_handler2 }; static int test_kprobes(void) { int ret; struct kprobe *kps[2] = {&kp, &kp2}; /* addr and flags should be cleard for reusing kprobe. */ kp.addr = NULL; kp.flags = 0; ret = register_kprobes(kps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kprobes returned %d\n", ret); return ret; } preh_val = 0; posth_val = 0; ret = target(rand1); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler not called\n"); handler_errors++; } preh_val = 0; posth_val = 0; ret = target2(rand1); if (preh_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe pre_handler2 not called\n"); handler_errors++; } if (posth_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "kprobe post_handler2 not called\n"); handler_errors++; } unregister_kprobes(kps, 2); return 0; } static u32 j_kprobe_target(u32 value) { if (value != rand1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in jprobe handler\n"); } jph_val = rand1; jprobe_return(); return 0; } static struct jprobe jp = { .entry = j_kprobe_target, .kp.symbol_name = "kprobe_target" }; static int test_jprobe(void) { int ret; ret = register_jprobe(&jp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_jprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_jprobe(&jp); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler not called\n"); handler_errors++; } return 0; } static struct jprobe jp2 = { .entry = j_kprobe_target, .kp.symbol_name = "kprobe_target2" }; static int test_jprobes(void) { int ret; struct jprobe *jps[2] = {&jp, &jp2}; /* addr and flags should be cleard for reusing kprobe. */ jp.kp.addr = NULL; jp.kp.flags = 0; ret = register_jprobes(jps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_jprobes returned %d\n", ret); return ret; } jph_val = 0; ret = target(rand1); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler not called\n"); handler_errors++; } jph_val = 0; ret = target2(rand1); if (jph_val == 0) { printk(KERN_ERR "Kprobe smoke test failed: " "jprobe handler2 not called\n"); handler_errors++; } unregister_jprobes(jps, 2); return 0; } #ifdef CONFIG_KRETPROBES static u32 krph_val; static int entry_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { krph_val = (rand1 / div_factor); return 0; } static int return_handler(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long ret = regs_return_value(regs); if (ret != (rand1 / div_factor)) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in kretprobe handler\n"); } if (krph_val == 0) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "call to kretprobe entry handler failed\n"); } krph_val = rand1; return 0; } static struct kretprobe rp = { .handler = return_handler, .entry_handler = entry_handler, .kp.symbol_name = "kprobe_target" }; static int test_kretprobe(void) { int ret; ret = register_kretprobe(&rp); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kretprobe returned %d\n", ret); return ret; } ret = target(rand1); unregister_kretprobe(&rp); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler not called\n"); handler_errors++; } return 0; } static int return_handler2(struct kretprobe_instance *ri, struct pt_regs *regs) { unsigned long ret = regs_return_value(regs); if (ret != (rand1 / div_factor) + 1) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "incorrect value in kretprobe handler2\n"); } if (krph_val == 0) { handler_errors++; printk(KERN_ERR "Kprobe smoke test failed: " "call to kretprobe entry handler failed\n"); } krph_val = rand1; return 0; } static struct kretprobe rp2 = { .handler = return_handler2, .entry_handler = entry_handler, .kp.symbol_name = "kprobe_target2" }; static int test_kretprobes(void) { int ret; struct kretprobe *rps[2] = {&rp, &rp2}; /* addr and flags should be cleard for reusing kprobe. */ rp.kp.addr = NULL; rp.kp.flags = 0; ret = register_kretprobes(rps, 2); if (ret < 0) { printk(KERN_ERR "Kprobe smoke test failed: " "register_kretprobe returned %d\n", ret); return ret; } krph_val = 0; ret = target(rand1); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler not called\n"); handler_errors++; } krph_val = 0; ret = target2(rand1); if (krph_val != rand1) { printk(KERN_ERR "Kprobe smoke test failed: " "kretprobe handler2 not called\n"); handler_errors++; } unregister_kretprobes(rps, 2); return 0; } #endif /* CONFIG_KRETPROBES */ int init_test_probes(void) { int ret; target = kprobe_target; target2 = kprobe_target2; do { rand1 = random32(); } while (rand1 <= div_factor); printk(KERN_INFO "Kprobe smoke test started\n"); num_tests++; ret = test_kprobe(); if (ret < 0) errors++; num_tests++; ret = test_kprobes(); if (ret < 0) errors++; num_tests++; ret = test_jprobe(); if (ret < 0) errors++; num_tests++; ret = test_jprobes(); if (ret < 0) errors++; #ifdef CONFIG_KRETPROBES num_tests++; ret = test_kretprobe(); if (ret < 0) errors++; num_tests++; ret = test_kretprobes(); if (ret < 0) errors++; #endif /* CONFIG_KRETPROBES */ if (errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d out of " "%d tests failed\n", errors, num_tests); else if (handler_errors) printk(KERN_ERR "BUG: Kprobe smoke test: %d error(s) " "running handlers\n", handler_errors); else printk(KERN_INFO "Kprobe smoke test passed successfully\n"); return 0; }
gpl-2.0
Alonso1398/muZic_kernel_ivoryss
drivers/media/video/cx18/cx18-av-firmware.c
9229
7232
/* * cx18 ADEC firmware functions * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "cx18-driver.h" #include "cx18-io.h" #include <linux/firmware.h> #define CX18_AUDIO_ENABLE 0xc72014 #define CX18_AI1_MUX_MASK 0x30 #define CX18_AI1_MUX_I2S1 0x00 #define CX18_AI1_MUX_I2S2 0x10 #define CX18_AI1_MUX_843_I2S 0x20 #define CX18_AI1_MUX_INVALID 0x30 #define FWFILE "v4l-cx23418-dig.fw" static int cx18_av_verifyfw(struct cx18 *cx, const struct firmware *fw) { struct v4l2_subdev *sd = &cx->av_state.sd; int ret = 0; const u8 *data; u32 size; int addr; u32 expected, dl_control; /* Ensure we put the 8051 in reset and enable firmware upload mode */ dl_control = cx18_av_read4(cx, CXADEC_DL_CTL); do { dl_control &= 0x00ffffff; dl_control |= 0x0f000000; cx18_av_write4_noretry(cx, CXADEC_DL_CTL, dl_control); dl_control = cx18_av_read4(cx, CXADEC_DL_CTL); } while ((dl_control & 0xff000000) != 0x0f000000); /* Read and auto increment until at address 0x0000 */ while (dl_control & 0x3fff) dl_control = cx18_av_read4(cx, CXADEC_DL_CTL); data = fw->data; size = fw->size; for (addr = 0; addr < size; addr++) { dl_control &= 0xffff3fff; /* ignore top 2 bits of address */ expected = 0x0f000000 | ((u32)data[addr] << 16) | addr; if (expected != dl_control) { CX18_ERR_DEV(sd, "verification of %s firmware load " "failed: expected %#010x got %#010x\n", FWFILE, expected, dl_control); ret = -EIO; break; } dl_control = cx18_av_read4(cx, CXADEC_DL_CTL); } if (ret == 0) CX18_INFO_DEV(sd, "verified load of %s firmware (%d bytes)\n", FWFILE, size); return ret; } int cx18_av_loadfw(struct cx18 *cx) { struct v4l2_subdev *sd = &cx->av_state.sd; const struct firmware *fw = NULL; u32 size; u32 u, v; const u8 *ptr; int i; int retries1 = 0; if (request_firmware(&fw, FWFILE, &cx->pci_dev->dev) != 0) { CX18_ERR_DEV(sd, "unable to open firmware %s\n", FWFILE); return -EINVAL; } /* The firmware load often has byte errors, so allow for several retries, both at byte level and at the firmware load level. */ while (retries1 < 5) { cx18_av_write4_expect(cx, CXADEC_CHIP_CTRL, 0x00010000, 0x00008430, 0xffffffff); /* cx25843 */ cx18_av_write_expect(cx, CXADEC_STD_DET_CTL, 0xf6, 0xf6, 0xff); /* Reset the Mako core, Register is alias of CXADEC_CHIP_CTRL */ cx18_av_write4_expect(cx, 0x8100, 0x00010000, 0x00008430, 0xffffffff); /* cx25843 */ /* Put the 8051 in reset and enable firmware upload */ cx18_av_write4_noretry(cx, CXADEC_DL_CTL, 0x0F000000); ptr = fw->data; size = fw->size; for (i = 0; i < size; i++) { u32 dl_control = 0x0F000000 | i | ((u32)ptr[i] << 16); u32 value = 0; int retries2; int unrec_err = 0; for (retries2 = 0; retries2 < CX18_MAX_MMIO_WR_RETRIES; retries2++) { cx18_av_write4_noretry(cx, CXADEC_DL_CTL, dl_control); udelay(10); value = cx18_av_read4(cx, CXADEC_DL_CTL); if (value == dl_control) break; /* Check if we can correct the byte by changing the address. We can only write the lower address byte of the address. */ if ((value & 0x3F00) != (dl_control & 0x3F00)) { unrec_err = 1; break; } } if (unrec_err || retries2 >= CX18_MAX_MMIO_WR_RETRIES) break; } if (i == size) break; retries1++; } if (retries1 >= 5) { CX18_ERR_DEV(sd, "unable to load firmware %s\n", FWFILE); release_firmware(fw); return -EIO; } cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x03000000 | fw->size, 0x03000000, 0x13000000); CX18_INFO_DEV(sd, "loaded %s firmware (%d bytes)\n", FWFILE, size); if (cx18_av_verifyfw(cx, fw) == 0) cx18_av_write4_expect(cx, CXADEC_DL_CTL, 0x13000000 | fw->size, 0x13000000, 0x13000000); /* Output to the 416 */ cx18_av_and_or4(cx, CXADEC_PIN_CTRL1, ~0, 0x78000); /* Audio input control 1 set to Sony mode */ /* Audio output input 2 is 0 for slave operation input */ /* 0xC4000914[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */ /* 0xC4000914[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge after WS transition for first bit of audio word. */ cx18_av_write4(cx, CXADEC_I2S_IN_CTL, 0x000000A0); /* Audio output control 1 is set to Sony mode */ /* Audio output control 2 is set to 1 for master mode */ /* 0xC4000918[5]: 0 = left sample on WS=0, 1 = left sample on WS=1 */ /* 0xC4000918[7]: 0 = Philips mode, 1 = Sony mode (1st SCK rising edge after WS transition for first bit of audio word. */ /* 0xC4000918[8]: 0 = slave operation, 1 = master (SCK_OUT and WS_OUT are generated) */ cx18_av_write4(cx, CXADEC_I2S_OUT_CTL, 0x000001A0); /* set alt I2s master clock to /0x16 and enable alt divider i2s passthrough */ cx18_av_write4(cx, CXADEC_PIN_CFG3, 0x5600B687); cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, 0x000000F6, 0x000000F6, 0x3F00FFFF); /* CxDevWrReg(CXADEC_STD_DET_CTL, 0x000000FF); */ /* Set bit 0 in register 0x9CC to signify that this is MiniMe. */ /* Register 0x09CC is defined by the Merlin firmware, and doesn't have a name in the spec. */ cx18_av_write4(cx, 0x09CC, 1); v = cx18_read_reg(cx, CX18_AUDIO_ENABLE); /* If bit 11 is 1, clear bit 10 */ if (v & 0x800) cx18_write_reg_expect(cx, v & 0xFFFFFBFF, CX18_AUDIO_ENABLE, 0, 0x400); /* Toggle the AI1 MUX */ v = cx18_read_reg(cx, CX18_AUDIO_ENABLE); u = v & CX18_AI1_MUX_MASK; v &= ~CX18_AI1_MUX_MASK; if (u == CX18_AI1_MUX_843_I2S || u == CX18_AI1_MUX_INVALID) { /* Switch to I2S1 */ v |= CX18_AI1_MUX_I2S1; cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); /* Switch back to the A/V decoder core I2S output */ v = (v & ~CX18_AI1_MUX_MASK) | CX18_AI1_MUX_843_I2S; } else { /* Switch to the A/V decoder core I2S output */ v |= CX18_AI1_MUX_843_I2S; cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); /* Switch back to I2S1 or I2S2 */ v = (v & ~CX18_AI1_MUX_MASK) | u; } cx18_write_reg_expect(cx, v | 0xb00, CX18_AUDIO_ENABLE, v, CX18_AI1_MUX_MASK); /* Enable WW auto audio standard detection */ v = cx18_av_read4(cx, CXADEC_STD_DET_CTL); v |= 0xFF; /* Auto by default */ v |= 0x400; /* Stereo by default */ v |= 0x14000000; cx18_av_write4_expect(cx, CXADEC_STD_DET_CTL, v, v, 0x3F00FFFF); release_firmware(fw); return 0; }
gpl-2.0
mayli/wrapfs-latest
drivers/media/pci/cx18/cx18-cards.c
12301
20522
/* * cx18 functions to query card hardware * * Derived from ivtv-cards.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-cards.h" #include "cx18-av-core.h" #include "cx18-i2c.h" #include <media/cs5345.h> #define V4L2_STD_PAL_SECAM (V4L2_STD_PAL|V4L2_STD_SECAM) /********************** card configuration *******************************/ /* usual i2c tuner addresses to probe */ static struct cx18_card_tuner_i2c cx18_i2c_std = { .radio = { I2C_CLIENT_END }, .demod = { 0x43, I2C_CLIENT_END }, .tv = { 0x61, 0x60, I2C_CLIENT_END }, }; /* * usual i2c tuner addresses to probe with additional demod address for * an NXP TDA8295 at 0x42 (N.B. it can possibly be at 0x4b or 0x4c too). */ static struct cx18_card_tuner_i2c cx18_i2c_nxp = { .radio = { I2C_CLIENT_END }, .demod = { 0x42, 0x43, I2C_CLIENT_END }, .tv = { 0x61, 0x60, I2C_CLIENT_END }, }; /* Please add new PCI IDs to: http://pci-ids.ucw.cz/ This keeps the PCI ID database up to date. Note that the entries must be added under vendor 0x4444 (Conexant) as subsystem IDs. New vendor IDs should still be added to the vendor ID list. */ /* Hauppauge HVR-1600 cards */ /* Note: for Hauppauge cards the tveeprom information is used instead of PCI IDs */ static const struct cx18_card cx18_card_hvr1600_esmt = { .type = CX18_CARD_HVR_1600_ESMT, .name = "Hauppauge HVR-1600", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* ESMT M13S128324A-5B memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x44220e82, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x3001, .gpio_init.direction = 0x3001, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3001, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_std, }; static const struct cx18_card cx18_card_hvr1600_s5h1411 = { .type = CX18_CARD_HVR_1600_S5H1411, .name = "Hauppauge HVR-1600", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* ESMT M13S128324A-5B memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x44220e82, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x3801, .gpio_init.direction = 0x3801, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3801, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_nxp, }; static const struct cx18_card cx18_card_hvr1600_samsung = { .type = CX18_CARD_HVR_1600_SAMSUNG, .name = "Hauppauge HVR-1600 (Preproduction)", .comment = "Simultaneous Digital and Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_CS5345, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_CS5345 | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL | CX18_HW_Z8F0811_IR_HAUP, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO1 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE3 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO2 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE4 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO8, CS5345_IN_1 | CS5345_MCLK_1_5 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, CS5345_IN_2 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL1, CS5345_IN_3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, CS5345_IN_4 }, .ddr = { /* Samsung K4D263238G-VC33 memory */ .chip_config = 0x003, .refresh = 0x30c, .timing1 = 0x23230b73, .timing2 = 0x08, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x3001, .gpio_init.direction = 0x3001, .gpio_i2c_slave_reset = { .active_lo_mask = 0x3001, .msecs_asserted = 10, .msecs_recovery = 40, .ir_reset_mask = 0x0001, }, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Compro VideoMate H900: note that this card is analog only! */ static const struct cx18_card_pci_info cx18_pci_h900[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_COMPRO, 0xe100 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_h900 = { .type = CX18_CARD_COMPRO_H900, .name = "Compro VideoMate H900", .comment = "Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 0 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 0 }, .tuners = { { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .ddr = { /* EtronTech EM6A9160TS-5G memory */ .chip_config = 0x50003, .refresh = 0x753, .timing1 = 0x24330e84, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0, }, .xceive_pin = 15, .pci_list = cx18_pci_h900, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Yuan MPC718: not working at the moment! */ static const struct cx18_card_pci_info cx18_pci_mpc718[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_YUAN, 0x0718 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_mpc718 = { .type = CX18_CARD_YUAN_MPC718, .name = "Yuan MPC718 MiniPCI DVB-T/Analog", .comment = "Experimenters needed for device to work well.\n" "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, /* FIXME - the FM radio is just a guess and driver doesn't use SIF */ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Hynix HY5DU283222B DDR RAM */ .chip_config = 0x303, .refresh = 0x3bd, .timing1 = 0x36320966, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x1, .gpio_init.direction = 0x3, /* FIXME - these GPIO's are just guesses */ .gpio_audio_input = { .mask = 0x3, .tuner = 0x1, .linein = 0x3, .radio = 0x1 }, .xceive_pin = 0, .pci_list = cx18_pci_mpc718, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* GoTView PCI */ static const struct cx18_card_pci_info cx18_pci_gotview_dvd3[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_GOTVIEW, 0x3343 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_gotview_dvd3 = { .type = CX18_CARD_GOTVIEW_PCI_DVD3, .name = "GoTView PCI DVD3 Hybrid", .comment = "Experimenters needed for device to work well.\n" "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_TVEEPROM | CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, /* FIXME - the FM radio is just a guess and driver doesn't use SIF */ .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Hynix HY5DU283222B DDR RAM */ .chip_config = 0x303, .refresh = 0x3bd, .timing1 = 0x36320966, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 2, }, .gpio_init.initial_value = 0x1, .gpio_init.direction = 0x3, .gpio_audio_input = { .mask = 0x3, .tuner = 0x1, .linein = 0x2, .radio = 0x1 }, .xceive_pin = 0, .pci_list = cx18_pci_gotview_dvd3, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Conexant Raptor PAL/SECAM: note that this card is analog only! */ static const struct cx18_card_pci_info cx18_pci_cnxt_raptor_pal[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_CONEXANT, 0x0009 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_cnxt_raptor_pal = { .type = CX18_CARD_CNXT_RAPTOR_PAL, .name = "Conexant Raptor PAL/SECAM", .comment = "Analog TV capture supported\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, { CX18_CARD_INPUT_SVIDEO2, 2, CX18_AV_SVIDEO_LUMA7 | CX18_AV_SVIDEO_CHROMA8 }, { CX18_CARD_INPUT_COMPOSITE2, 2, CX18_AV_COMPOSITE6 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, { CX18_CARD_INPUT_LINE_IN2, CX18_AV_AUDIO_SERIAL2, 1 }, }, .tuners = { { .std = V4L2_STD_PAL_SECAM, .tuner = TUNER_PHILIPS_FM1216ME_MK3 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO_SERIAL1, 2 }, .ddr = { /* MT 46V16M16 memory */ .chip_config = 0x50306, .refresh = 0x753, .timing1 = 0x33220953, .timing2 = 0x09, .tune_lane = 0, .initial_emrs = 0, }, .gpio_init.initial_value = 0x1002, .gpio_init.direction = 0xf002, .gpio_audio_input = { .mask = 0xf002, .tuner = 0x1002, /* LED D1 Tuner AF */ .linein = 0x2000, /* LED D2 Line In 1 */ .radio = 0x4002 }, /* LED D3 Tuner AF */ .pci_list = cx18_pci_cnxt_raptor_pal, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Toshiba Qosmio laptop internal DVB-T/Analog Hybrid Tuner */ static const struct cx18_card_pci_info cx18_pci_toshiba_qosmio_dvbt[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_TOSHIBA, 0x0110 }, { 0, 0, 0 } }; static const struct cx18_card cx18_card_toshiba_qosmio_dvbt = { .type = CX18_CARD_TOSHIBA_QOSMIO_DVBT, .name = "Toshiba Qosmio DVB-T/Analog", .comment = "Experimenters and photos needed for device to work well.\n" "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE6 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .ddr = { .chip_config = 0x202, .refresh = 0x3bb, .timing1 = 0x33320a63, .timing2 = 0x0a, .tune_lane = 0, .initial_emrs = 0x42, }, .xceive_pin = 15, .pci_list = cx18_pci_toshiba_qosmio_dvbt, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Leadtek WinFast PVR2100 */ static const struct cx18_card_pci_info cx18_pci_leadtek_pvr2100[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6f27 }, /* PVR2100 */ { 0, 0, 0 } }; static const struct cx18_card cx18_card_leadtek_pvr2100 = { .type = CX18_CARD_LEADTEK_PVR2100, .name = "Leadtek WinFast PVR2100", .comment = "Experimenters and photos needed for device to work well.\n" "\tTo help, mail the ivtv-devel list (www.ivtvdriver.org).\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { /* XC2028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Pointer to proper DDR config values provided by Terry Wu */ .chip_config = 0x303, .refresh = 0x3bb, .timing1 = 0x24220e83, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0x2, }, .gpio_init.initial_value = 0x6, .gpio_init.direction = 0x7, .gpio_audio_input = { .mask = 0x7, .tuner = 0x6, .linein = 0x2, .radio = 0x2 }, .xceive_pin = 1, .pci_list = cx18_pci_leadtek_pvr2100, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ /* Leadtek WinFast DVR3100 H */ static const struct cx18_card_pci_info cx18_pci_leadtek_dvr3100h[] = { { PCI_DEVICE_ID_CX23418, CX18_PCI_ID_LEADTEK, 0x6690 }, /* DVR3100 H */ { 0, 0, 0 } }; static const struct cx18_card cx18_card_leadtek_dvr3100h = { .type = CX18_CARD_LEADTEK_DVR3100H, .name = "Leadtek WinFast DVR3100 H", .comment = "Simultaneous DVB-T and Analog capture supported,\n" "\texcept when capturing Analog from the antenna input.\n", .v4l2_capabilities = CX18_CAP_ENCODER, .hw_audio_ctrl = CX18_HW_418_AV, .hw_muxer = CX18_HW_GPIO_MUX, .hw_all = CX18_HW_418_AV | CX18_HW_TUNER | CX18_HW_GPIO_MUX | CX18_HW_DVB | CX18_HW_GPIO_RESET_CTRL, .video_inputs = { { CX18_CARD_INPUT_VID_TUNER, 0, CX18_AV_COMPOSITE2 }, { CX18_CARD_INPUT_SVIDEO1, 1, CX18_AV_SVIDEO_LUMA3 | CX18_AV_SVIDEO_CHROMA4 }, { CX18_CARD_INPUT_COMPOSITE1, 1, CX18_AV_COMPOSITE7 }, { CX18_CARD_INPUT_COMPONENT1, 1, CX18_AV_COMPONENT1 }, }, .audio_inputs = { { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 0 }, { CX18_CARD_INPUT_LINE_IN1, CX18_AV_AUDIO_SERIAL1, 1 }, }, .tuners = { /* XC3028 tuner */ { .std = V4L2_STD_ALL, .tuner = TUNER_XC2028 }, }, .radio_input = { CX18_CARD_INPUT_AUD_TUNER, CX18_AV_AUDIO5, 2 }, .ddr = { /* Pointer to proper DDR config values provided by Terry Wu */ .chip_config = 0x303, .refresh = 0x3bb, .timing1 = 0x24220e83, .timing2 = 0x1f, .tune_lane = 0, .initial_emrs = 0x2, }, .gpio_init.initial_value = 0x6, .gpio_init.direction = 0x7, .gpio_audio_input = { .mask = 0x7, .tuner = 0x6, .linein = 0x2, .radio = 0x2 }, .xceive_pin = 1, .pci_list = cx18_pci_leadtek_dvr3100h, .i2c = &cx18_i2c_std, }; /* ------------------------------------------------------------------------- */ static const struct cx18_card *cx18_card_list[] = { &cx18_card_hvr1600_esmt, &cx18_card_hvr1600_samsung, &cx18_card_h900, &cx18_card_mpc718, &cx18_card_cnxt_raptor_pal, &cx18_card_toshiba_qosmio_dvbt, &cx18_card_leadtek_pvr2100, &cx18_card_leadtek_dvr3100h, &cx18_card_gotview_dvd3, &cx18_card_hvr1600_s5h1411 }; const struct cx18_card *cx18_get_card(u16 index) { if (index >= ARRAY_SIZE(cx18_card_list)) return NULL; return cx18_card_list[index]; } int cx18_get_input(struct cx18 *cx, u16 index, struct v4l2_input *input) { const struct cx18_card_video_input *card_input = cx->card->video_inputs + index; static const char * const input_strs[] = { "Tuner 1", "S-Video 1", "S-Video 2", "Composite 1", "Composite 2", "Component 1" }; if (index >= cx->nof_inputs) return -EINVAL; input->index = index; strlcpy(input->name, input_strs[card_input->video_type - 1], sizeof(input->name)); input->type = (card_input->video_type == CX18_CARD_INPUT_VID_TUNER ? V4L2_INPUT_TYPE_TUNER : V4L2_INPUT_TYPE_CAMERA); input->audioset = (1 << cx->nof_audio_inputs) - 1; input->std = (input->type == V4L2_INPUT_TYPE_TUNER) ? cx->tuner_std : V4L2_STD_ALL; return 0; } int cx18_get_audio_input(struct cx18 *cx, u16 index, struct v4l2_audio *audio) { const struct cx18_card_audio_input *aud_input = cx->card->audio_inputs + index; static const char * const input_strs[] = { "Tuner 1", "Line In 1", "Line In 2" }; memset(audio, 0, sizeof(*audio)); if (index >= cx->nof_audio_inputs) return -EINVAL; strlcpy(audio->name, input_strs[aud_input->audio_type - 1], sizeof(audio->name)); audio->index = index; audio->capability = V4L2_AUDCAP_STEREO; return 0; }
gpl-2.0
johnparker007/mame
src/mame/drivers/orbit.cpp
14
11801
// license:BSD-3-Clause // copyright-holders:Stefan Jokisch /*************************************************************************** Atari Orbit Driver game 0 = beginner slow game 1 = beginner medium game 2 = beginner fast game 3 = intermediate slow game 4 = intermediate fast game 5 = expert fast shells only game 6 = expert slow game 7 = expert medium game 8 = expert fast game 9 = super expert Flip screen DIP doesn't work because it's not supported by the game. ***************************************************************************/ #include "emu.h" #include "includes/orbit.h" #include "cpu/m6800/m6800.h" #include "machine/watchdog.h" #include "speaker.h" #define MASTER_CLOCK XTAL(12'096'000) /************************************* * * Interrupts and timing * *************************************/ TIMER_DEVICE_CALLBACK_MEMBER(orbit_state::nmi_32v) { int scanline = param; int nmistate = (scanline & 32) && m_latch->q2_r(); m_maincpu->set_input_line(INPUT_LINE_NMI, nmistate ? ASSERT_LINE : CLEAR_LINE); } TIMER_CALLBACK_MEMBER(orbit_state::irq_off) { m_maincpu->set_input_line(0, CLEAR_LINE); } INTERRUPT_GEN_MEMBER(orbit_state::interrupt) { m_maincpu->set_input_line(0, ASSERT_LINE); m_irq_off_timer->adjust(m_screen->time_until_vblank_end()); } /************************************* * * Bit flags * *************************************/ WRITE_LINE_MEMBER(orbit_state::coin_lockout_w) { machine().bookkeeping().coin_lockout_w(0, !state); machine().bookkeeping().coin_lockout_w(1, !state); } /************************************* * * Address maps * *************************************/ void orbit_state::main_map(address_map &map) { map.global_mask(0x7fff); map(0x0000, 0x00ff).mirror(0x0700).ram(); map(0x0800, 0x0800).mirror(0x07ff).portr("P1"); map(0x1000, 0x1000).mirror(0x07ff).portr("P2"); map(0x1800, 0x1800).mirror(0x07ff).portr("DSW1"); map(0x2000, 0x2000).mirror(0x07ff).portr("DSW2"); map(0x2800, 0x2800).mirror(0x07ff).portr("BUTTONS"); map(0x3000, 0x33bf).mirror(0x0400).ram().w(FUNC(orbit_state::playfield_w)).share(m_playfield_ram); map(0x33c0, 0x33ff).mirror(0x0400).ram().share(m_sprite_ram); map(0x3800, 0x3800).mirror(0x00ff).w(FUNC(orbit_state::note_w)); map(0x3900, 0x3900).mirror(0x00ff).w(FUNC(orbit_state::noise_amp_w)); map(0x3a00, 0x3a00).mirror(0x00ff).w(FUNC(orbit_state::note_amp_w)); map(0x3c00, 0x3c0f).mirror(0x00f0).w(m_latch, FUNC(f9334_device::write_a0)); map(0x3e00, 0x3e00).mirror(0x00ff).w(FUNC(orbit_state::noise_rst_w)); map(0x3f00, 0x3f00).mirror(0x00ff).w("watchdog", FUNC(watchdog_timer_device::reset_w)); map(0x6000, 0x7fff).rom(); } /************************************* * * Port definitions * *************************************/ static INPUT_PORTS_START( orbit ) PORT_START("P1") /* 0800 */ PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_UNUSED ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_BUTTON2 ) PORT_PLAYER(1) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_PLAYER(1) /* actually buttons */ PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_PLAYER(1) PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON3 ) PORT_PLAYER(1) PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_PLAYER(1) PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_TILT ) PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_COIN2 ) PORT_START("P2") /* 1000 */ PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_UNUSED ) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_BUTTON2 ) PORT_PLAYER(2) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_JOYSTICK_LEFT ) PORT_PLAYER(2) /* actually buttons */ PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_JOYSTICK_RIGHT ) PORT_PLAYER(2) PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON3 ) PORT_PLAYER(2) PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_PLAYER(2) PORT_SERVICE( 0x40, IP_ACTIVE_LOW ) PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_COIN1 ) PORT_START("DSW1") /* 1800 */ PORT_DIPNAME( 0x07, 0x00, "Play Time Per Credit" ) PORT_DIPLOCATION("DSW1:1,2,3") PORT_DIPSETTING( 0x00, "0:30" ) PORT_DIPSETTING( 0x01, "1:00" ) PORT_DIPSETTING( 0x02, "1:30" ) PORT_DIPSETTING( 0x03, "2:00" ) PORT_DIPSETTING( 0x04, "2:30" ) PORT_DIPSETTING( 0x05, "3:00" ) PORT_DIPSETTING( 0x06, "3:30" ) PORT_DIPSETTING( 0x07, "4:00" ) PORT_DIPNAME( 0x18, 0x00, DEF_STR( Language ) ) PORT_DIPLOCATION("DSW1:4,5") PORT_DIPSETTING( 0x00, DEF_STR( English ) ) PORT_DIPSETTING( 0x08, DEF_STR( Spanish ) ) PORT_DIPSETTING( 0x10, DEF_STR( French ) ) PORT_DIPSETTING( 0x18, DEF_STR( German ) ) PORT_DIPNAME( 0x20, 0x00, DEF_STR( Free_Play )) PORT_DIPLOCATION("DSW1:6") PORT_DIPSETTING( 0x00, DEF_STR( Off )) PORT_DIPSETTING( 0x20, DEF_STR( On )) PORT_DIPUNUSED_DIPLOC( 0x40, 0x00, "DSW1:7" ) PORT_DIPUNUSED_DIPLOC( 0x80, 0x00, "DSW1:8" ) PORT_START("DSW2") /* 2000 */ PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game Reset") PORT_CODE(KEYCODE_PLUS_PAD) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 9") PORT_CODE(KEYCODE_9_PAD) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 8") PORT_CODE(KEYCODE_8_PAD) PORT_DIPNAME( 0x08, 0x00, DEF_STR( Flip_Screen )) PORT_DIPSETTING( 0x00, DEF_STR( Off )) PORT_DIPSETTING( 0x08, DEF_STR( On )) PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Heat Reset") PORT_CODE(KEYCODE_ENTER_PAD) PORT_DIPNAME( 0x20, 0x20, "NEXT TEST" ) /* should be off */ PORT_DIPSETTING( 0x20, DEF_STR( Off )) PORT_DIPSETTING( 0x00, DEF_STR( On )) PORT_DIPNAME( 0x40, 0x40, "DIAG TEST" ) /* should be off */ PORT_DIPSETTING( 0x40, DEF_STR( Off )) PORT_DIPSETTING( 0x00, DEF_STR( On )) PORT_BIT( 0x80, IP_ACTIVE_HIGH, IPT_CUSTOM ) PORT_VBLANK("screen") PORT_START("BUTTONS") /* 2800 */ PORT_BIT( 0x01, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 7 / Strong Gravity") PORT_CODE(KEYCODE_7_PAD) PORT_BIT( 0x02, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 6 / Stars") PORT_CODE(KEYCODE_6_PAD) PORT_BIT( 0x04, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 5 / Unlimited Supplies") PORT_CODE(KEYCODE_5_PAD) PORT_BIT( 0x08, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 4 / Space Stations") PORT_CODE(KEYCODE_4_PAD) PORT_BIT( 0x10, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 3 / Black Hole") PORT_CODE(KEYCODE_3_PAD) PORT_BIT( 0x20, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 2 / Zero Gravity") PORT_CODE(KEYCODE_2_PAD) PORT_BIT( 0x40, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 1 / Negative Gravity") PORT_CODE(KEYCODE_1_PAD) PORT_BIT( 0x80, IP_ACTIVE_LOW, IPT_BUTTON1 ) PORT_NAME("Game 0 / Bounce Back") PORT_CODE(KEYCODE_0_PAD) INPUT_PORTS_END /************************************* * * Graphics layouts * *************************************/ static const gfx_layout orbit_full_sprite_layout = { 8, 32, RGN_FRAC(1,1), 1, { 0 }, { STEP8(0,1) }, { STEP32(0,8) }, 0x100 }; static const gfx_layout orbit_upper_sprite_layout = { 8, 16, RGN_FRAC(1,1), 1, { 0 }, { STEP8(0,1) }, { STEP16(0,8) }, 0x100 }; static const gfx_layout orbit_lower_sprite_layout = { 8, 16, RGN_FRAC(1,1), 1, { 0 }, { STEP8(0,1) }, { STEP16(0x80,8) }, 0x100 }; static const gfx_layout orbit_tile_layout = { 8, 8, RGN_FRAC(1,1), 1, { 0 }, { STEP8(0,1) }, { STEP8(0,8) }, 0x40 }; static GFXDECODE_START( gfx_orbit ) GFXDECODE_ENTRY( "gfx1", 0, orbit_full_sprite_layout, 0, 1 ) GFXDECODE_ENTRY( "gfx1", 0, orbit_upper_sprite_layout, 0, 1 ) GFXDECODE_ENTRY( "gfx1", 0, orbit_lower_sprite_layout, 0, 1 ) GFXDECODE_SCALE( "gfx2", 0, orbit_tile_layout, 0, 1, 2, 2 ) GFXDECODE_END /************************************* * * Machine setup * *************************************/ void orbit_state::machine_start() { m_irq_off_timer = machine().scheduler().timer_alloc(timer_expired_delegate(FUNC(orbit_state::irq_off), this)); save_item(NAME(m_flip_screen)); } void orbit_state::machine_reset() { m_flip_screen = 0; } /************************************* * * Machine drivers * *************************************/ void orbit_state::orbit(machine_config &config) { /* basic machine hardware */ M6800(config, m_maincpu, MASTER_CLOCK / 16); m_maincpu->set_addrmap(AS_PROGRAM, &orbit_state::main_map); m_maincpu->set_vblank_int("screen", FUNC(orbit_state::interrupt)); TIMER(config, "32v").configure_scanline(FUNC(orbit_state::nmi_32v), "screen", 0, 32); F9334(config, m_latch); // M6 /* BIT0 => UNUSED */ /* BIT1 => LOCKOUT */ /* BIT2 => NMI ENABLE */ /* BIT3 => HEAT RST LED */ /* BIT4 => PANEL BUS OC */ /* BIT5 => PANEL STROBE */ /* BIT6 => HYPER LED */ /* BIT7 => WARNING SND */ m_latch->q_out_cb<1>().set(FUNC(orbit_state::coin_lockout_w)); m_latch->q_out_cb<3>().set_output("led0"); m_latch->q_out_cb<6>().set_output("led1"); m_latch->q_out_cb<7>().set(m_discrete, FUNC(discrete_device::write_line<ORBIT_WARNING_EN>)); WATCHDOG_TIMER(config, "watchdog"); /* video hardware */ SCREEN(config, m_screen, SCREEN_TYPE_RASTER); m_screen->set_raw(MASTER_CLOCK*2, 384*2, 0, 256*2, 261*2, 0, 240*2); m_screen->set_screen_update(FUNC(orbit_state::screen_update)); m_screen->set_palette(m_palette); GFXDECODE(config, m_gfxdecode, m_palette, gfx_orbit); PALETTE(config, m_palette, palette_device::MONOCHROME); /* sound hardware */ SPEAKER(config, "lspeaker").front_left(); SPEAKER(config, "rspeaker").front_right(); DISCRETE(config, m_discrete, orbit_discrete); m_discrete->add_route(0, "lspeaker", 1.0); m_discrete->add_route(1, "rspeaker", 1.0); } /************************************* * * ROM definitions * *************************************/ ROM_START( orbit ) ROM_REGION( 0x10000, "maincpu", 0 ) ROM_LOAD_NIB_LOW ( "033701.h2", 0x6800, 0x400, CRC(6de43b85) SHA1(1643972f45d3a0dd6540158c575cd84cee2b0c9a) ) ROM_LOAD_NIB_HIGH( "033693.l2", 0x6800, 0x400, CRC(8878409e) SHA1(a14e0161705bbc230f0aec1837ebc41d62178368) ) ROM_LOAD_NIB_LOW ( "033702.h1", 0x6C00, 0x400, CRC(8166bdcb) SHA1(b7ae6cd46b4aff6e1e1ec9273cf068dec4a8cd46) ) ROM_LOAD_NIB_HIGH( "033694.l1", 0x6C00, 0x400, CRC(5337a8ee) SHA1(1606bfa652bb5253c387f11c96d77d7a84983344) ) ROM_LOAD_NIB_LOW ( "033699.f2", 0x7000, 0x400, CRC(b498b36f) SHA1(5d150af193196fccd7c20ba731a020a9ae75e516) ) ROM_LOAD_NIB_HIGH( "033691.m2", 0x7000, 0x400, CRC(6cbabb21) SHA1(fffb3f7be73c72b4775d8cdfe174c75ae4389cba) ) ROM_LOAD_NIB_LOW ( "033700.f1", 0x7400, 0x400, CRC(9807c922) SHA1(b6b62530b24d967104f632540ef98f2b4780c3ed) ) ROM_LOAD_NIB_HIGH( "033692.m1", 0x7400, 0x400, CRC(96167d1b) SHA1(6f272b2f1b30aa94f51ea5710f4114bfdea19f2c) ) ROM_LOAD_NIB_LOW ( "033697.e2", 0x7800, 0x400, CRC(19ccf0dc) SHA1(7d12c4985bd0a25ef518246faf2849e5a0cf600b) ) ROM_LOAD_NIB_HIGH( "033689.n2", 0x7800, 0x400, CRC(ea3b70c1) SHA1(5e985fed057f362deaeb5e4049c4e8c1d449d6e1) ) ROM_LOAD_NIB_LOW ( "033698.e1", 0x7C00, 0x400, CRC(356a7c32) SHA1(a3496c0f9d9f3e2e0b452cdc0e908dc93d179990) ) ROM_RELOAD( 0xFC00, 0x400 ) ROM_LOAD_NIB_HIGH( "033690.n1", 0x7C00, 0x400, CRC(f756ebd4) SHA1(4e473541b712078c6a81901714a6243de348e543) ) ROM_RELOAD( 0xFC00, 0x400 ) ROM_REGION( 0x1000, "gfx1", 0 ) /* sprites */ ROM_LOAD( "033712.b7", 0x0000, 0x800, CRC(cfd43bf2) SHA1(dbca0da6ed355aac921bae5adeef2f384f5fa2c3) ) ROM_LOAD( "033713.d7", 0x0800, 0x800, CRC(5ac89f4d) SHA1(747889b33cd83510a640e68fb4581a3e881c43a3) ) ROM_REGION( 0x200, "gfx2", 0 ) /* tiles */ ROM_LOAD( "033711.a7", 0x0000, 0x200, CRC(9987174a) SHA1(d2117b6e6d64c29aef8ad8c94256baea493bce5c) ) ROM_REGION( 0x100, "proms", 0 ) /* sync, unused */ ROM_LOAD( "033688.p6", 0x0000, 0x100, CRC(ee66ddba) SHA1(5b9ae4cbf019375c8d54528b69280413c641c4f2) ) ROM_END /************************************* * * Game drivers * *************************************/ GAME( 1978, orbit, 0, orbit, orbit, orbit_state, empty_init, 0, "Atari", "Orbit", MACHINE_SUPPORTS_SAVE )
gpl-2.0
bigsupersquid/android_kernel_lge_msm7x27-3.0.x
arch/arm/mach-msm/lge/board-alessi-snd.c
14
6365
/* linux/arch/arm/mach-msm/lge/board-alessi-snd.c * * Copyright (C) 2010 LGE. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/board.h> #include "board-alessi.h" #define SND(desc, num) { .name = #desc, .id = num } static struct snd_endpoint snd_endpoints_list[] = { #if 0 SND(HANDSET, 0), SND(MONO_HEADSET, 2), SND(HEADSET, 3), SND(SPEAKER, 6), SND(TTY_HEADSET, 8), SND(TTY_VCO, 9), SND(TTY_HCO, 10), SND(BT, 12), SND(IN_S_SADC_OUT_HANDSET, 16), SND(IN_S_SADC_OUT_SPEAKER_PHONE, 25), SND(CURRENT, 27), #else /* LGE_CHANGE_S, [junyoub.an] , 2010-02-12, Define sound device*/ SND(HANDSET_LOOPBACK,5), SND(HANDSET, 6), SND(HEADSET_LOOPBACK, 1), SND(HEADSET, 2), SND(HEADSET_STEREO, 3), SND(SPEAKER, 0), SND(SPEAKER_IN_CALL, 7), SND(SPEAKER_RING, 8), SND(HEADSET_AND_SPEAKER, 8), SND(FM_HEADSET, 10), SND(FM_SPEAKER, 11), SND(BT, 13), SND(TTY_HEADSET, 15), SND(TTY_VCO, 16), SND(TTY_HCO, 17), SND(TTY_HCO_SPEAKER, 18), SND(HANDSET_VR, 20), SND(HEADSET_VR, 21), SND(BT_VR, 23), SND(CURRENT, 30), /* LGE_CHANGE_E, [junyoub.an] , 2010-02-12, Define sound device*/ #endif }; #undef SND static struct msm_snd_endpoints msm_device_snd_endpoints = { .endpoints = snd_endpoints_list, .num = sizeof(snd_endpoints_list) / sizeof(struct snd_endpoint) }; struct platform_device msm_device_snd = { .name = "msm_snd", .id = -1, .dev = { .platform_data = &msm_device_snd_endpoints }, }; #define DEC0_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC1_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC2_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC3_FORMAT ((1<<MSM_ADSP_CODEC_MP3)| \ (1<<MSM_ADSP_CODEC_AAC)|(1<<MSM_ADSP_CODEC_WMA)| \ (1<<MSM_ADSP_CODEC_WMAPRO)|(1<<MSM_ADSP_CODEC_AMRWB)| \ (1<<MSM_ADSP_CODEC_AMRNB)|(1<<MSM_ADSP_CODEC_WAV)| \ (1<<MSM_ADSP_CODEC_ADPCM)|(1<<MSM_ADSP_CODEC_YADPCM)| \ (1<<MSM_ADSP_CODEC_EVRC)|(1<<MSM_ADSP_CODEC_QCELP)) #define DEC4_FORMAT (1<<MSM_ADSP_CODEC_MIDI) static unsigned int dec_concurrency_table[] = { /* Audio LP */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DMA)), 0, 0, 0, 0, /* Concurrency 1 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 2 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 3 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 4 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 5 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_TUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), /* Concurrency 6 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), 0, 0, 0, 0, /* Concurrency 7 */ (DEC0_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC1_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC2_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC3_FORMAT|(1<<MSM_ADSP_MODE_NONTUNNEL)|(1<<MSM_ADSP_OP_DM)), (DEC4_FORMAT), }; #define DEC_INFO(name, queueid, decid, nr_codec) { .module_name = name, \ .module_queueid = queueid, .module_decid = decid, \ .nr_codec_support = nr_codec} static struct msm_adspdec_info dec_info_list[] = { DEC_INFO("AUDPLAY0TASK", 13, 0, 11), /* AudPlay0BitStreamCtrlQueue */ DEC_INFO("AUDPLAY1TASK", 14, 1, 11), /* AudPlay1BitStreamCtrlQueue */ DEC_INFO("AUDPLAY2TASK", 15, 2, 11), /* AudPlay2BitStreamCtrlQueue */ DEC_INFO("AUDPLAY3TASK", 16, 3, 11), /* AudPlay3BitStreamCtrlQueue */ DEC_INFO("AUDPLAY4TASK", 17, 4, 1), /* AudPlay4BitStreamCtrlQueue */ }; static struct msm_adspdec_database msm_device_adspdec_database = { .num_dec = ARRAY_SIZE(dec_info_list), .num_concurrency_support = (ARRAY_SIZE(dec_concurrency_table) / \ ARRAY_SIZE(dec_info_list)), .dec_concurrency_table = dec_concurrency_table, .dec_info_list = dec_info_list, }; struct platform_device msm_device_adspdec = { .name = "msm_adspdec", .id = -1, .dev = { .platform_data = &msm_device_adspdec_database }, };
gpl-2.0
wolf-feathers66/wolf.kernel
arch/m68k/mac/config.c
14
26081
/* * linux/arch/m68k/mac/config.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ /* * Miscellaneous linux stuff */ #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/tty.h> #include <linux/console.h> #include <linux/interrupt.h> /* keyb */ #include <linux/random.h> #include <linux/delay.h> /* keyb */ #include <linux/init.h> #include <linux/vt_kern.h> #include <linux/platform_device.h> #include <linux/adb.h> #include <linux/cuda.h> #define BOOTINFO_COMPAT_1_0 #include <asm/setup.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/rtc.h> #include <asm/machdep.h> #include <asm/macintosh.h> #include <asm/macints.h> #include <asm/machw.h> #include <asm/mac_iop.h> #include <asm/mac_via.h> #include <asm/mac_oss.h> #include <asm/mac_psc.h> /* Mac bootinfo struct */ struct mac_booter_data mac_bi_data; /* The phys. video addr. - might be bogus on some machines */ static unsigned long mac_orig_videoaddr; /* Mac specific timer functions */ extern unsigned long mac_gettimeoffset(void); extern int mac_hwclk(int, struct rtc_time *); extern int mac_set_clock_mmss(unsigned long); extern void iop_preinit(void); extern void iop_init(void); extern void via_init(void); extern void via_init_clock(irq_handler_t func); extern void via_flush_cache(void); extern void oss_init(void); extern void psc_init(void); extern void baboon_init(void); extern void mac_mksound(unsigned int, unsigned int); static void mac_get_model(char *str); static void mac_identify(void); static void mac_report_hardware(void); #ifdef CONFIG_EARLY_PRINTK asmlinkage void __init mac_early_print(const char *s, unsigned n); static void __init mac_early_cons_write(struct console *con, const char *s, unsigned n) { mac_early_print(s, n); } static struct console __initdata mac_early_cons = { .name = "early", .write = mac_early_cons_write, .flags = CON_PRINTBUFFER | CON_BOOT, .index = -1 }; int __init mac_unregister_early_cons(void) { /* mac_early_print can't be used after init sections are discarded */ return unregister_console(&mac_early_cons); } late_initcall(mac_unregister_early_cons); #endif static void __init mac_sched_init(irq_handler_t vector) { via_init_clock(vector); } /* * Parse a Macintosh-specific record in the bootinfo */ int __init mac_parse_bootinfo(const struct bi_record *record) { int unknown = 0; const u_long *data = record->data; switch (record->tag) { case BI_MAC_MODEL: mac_bi_data.id = *data; break; case BI_MAC_VADDR: mac_bi_data.videoaddr = *data; break; case BI_MAC_VDEPTH: mac_bi_data.videodepth = *data; break; case BI_MAC_VROW: mac_bi_data.videorow = *data; break; case BI_MAC_VDIM: mac_bi_data.dimensions = *data; break; case BI_MAC_VLOGICAL: mac_bi_data.videological = VIDEOMEMBASE + (*data & ~VIDEOMEMMASK); mac_orig_videoaddr = *data; break; case BI_MAC_SCCBASE: mac_bi_data.sccbase = *data; break; case BI_MAC_BTIME: mac_bi_data.boottime = *data; break; case BI_MAC_GMTBIAS: mac_bi_data.gmtbias = *data; break; case BI_MAC_MEMSIZE: mac_bi_data.memsize = *data; break; case BI_MAC_CPUID: mac_bi_data.cpuid = *data; break; case BI_MAC_ROMBASE: mac_bi_data.rombase = *data; break; default: unknown = 1; break; } return unknown; } /* * Flip into 24bit mode for an instant - flushes the L2 cache card. We * have to disable interrupts for this. Our IRQ handlers will crap * themselves if they take an IRQ in 24bit mode! */ static void mac_cache_card_flush(int writeback) { unsigned long flags; local_irq_save(flags); via_flush_cache(); local_irq_restore(flags); } void __init config_mac(void) { if (!MACH_IS_MAC) printk(KERN_ERR "ERROR: no Mac, but config_mac() called!!\n"); mach_sched_init = mac_sched_init; mach_init_IRQ = mac_init_IRQ; mach_get_model = mac_get_model; mach_gettimeoffset = mac_gettimeoffset; mach_hwclk = mac_hwclk; mach_set_clock_mmss = mac_set_clock_mmss; mach_reset = mac_reset; mach_halt = mac_poweroff; mach_power_off = mac_poweroff; mach_max_dma_address = 0xffffffff; #if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE) mach_beep = mac_mksound; #endif #ifdef CONFIG_EARLY_PRINTK register_console(&mac_early_cons); #endif /* * Determine hardware present */ mac_identify(); mac_report_hardware(); /* * AFAIK only the IIci takes a cache card. The IIfx has onboard * cache ... someone needs to figure out how to tell if it's on or * not. */ if (macintosh_config->ident == MAC_MODEL_IICI || macintosh_config->ident == MAC_MODEL_IIFX) mach_l2_flush = mac_cache_card_flush; } /* * Macintosh Table: hardcoded model configuration data. * * Much of this was defined by Alan, based on who knows what docs. * I've added a lot more, and some of that was pure guesswork based * on hardware pages present on the Mac web site. Possibly wildly * inaccurate, so look here if a new Mac model won't run. Example: if * a Mac crashes immediately after the VIA1 registers have been dumped * to the screen, it probably died attempting to read DirB on a RBV. * Meaning it should have MAC_VIA_IICI here :-) */ struct mac_model *macintosh_config; EXPORT_SYMBOL(macintosh_config); static struct mac_model mac_data_table[] = { /* * We'll pretend to be a Macintosh II, that's pretty safe. */ { .ident = MAC_MODEL_II, .name = "Unknown", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_IWM, }, /* * Original Mac II hardware */ { .ident = MAC_MODEL_II, .name = "II", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_IWM, }, { .ident = MAC_MODEL_IIX, .name = "IIx", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IICX, .name = "IIcx", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_SE30, .name = "SE/30", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_II, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Weirdified Mac II hardware - all subtly different. Gee thanks * Apple. All these boxes seem to have VIA2 in a different place to * the Mac II (+1A000 rather than +4000) * CSA: see http://developer.apple.com/technotes/hw/hw_09.html */ { .ident = MAC_MODEL_IICI, .name = "IIci", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIFX, .name = "IIfx", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_IOP, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, { .ident = MAC_MODEL_IISI, .name = "IIsi", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIVI, .name = "IIvi", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_IIVX, .name = "IIvx", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Classic models (guessing: similar to SE/30? Nope, similar to LC...) */ { .ident = MAC_MODEL_CLII, .name = "Classic II", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_CCL, .name = "Color Classic", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_CCLII, .name = "Color Classic II", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Some Mac LC machines. Basically the same as the IIci, ADB like IIsi */ { .ident = MAC_MODEL_LC, .name = "LC", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_LCII, .name = "LC II", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_LCIII, .name = "LC III", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Quadra. Video is at 0xF9000000, via is like a MacII. We label it * differently as some of the stuff connected to VIA2 seems different. * Better SCSI chip and onboard ethernet using a NatSemi SONIC except * the 660AV and 840AV which use an AMD 79C940 (MACE). * The 700, 900 and 950 have some I/O chips in the wrong place to * confuse us. The 840AV has a SCSI location of its own (same as * the 660AV). */ { .ident = MAC_MODEL_Q605, .name = "Quadra 605", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q605_ACC, .name = "Quadra 605", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q610, .name = "Quadra 610", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q630, .name = "Quadra 630", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q650, .name = "Quadra 650", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, /* The Q700 does have a NS Sonic */ { .ident = MAC_MODEL_Q700, .name = "Quadra 700", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q800, .name = "Quadra 800", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_Q840, .name = "Quadra 840AV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_AV, }, { .ident = MAC_MODEL_Q900, .name = "Quadra 900", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, { .ident = MAC_MODEL_Q950, .name = "Quadra 950", .adb_type = MAC_ADB_IOP, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA2, .scc_type = MAC_SCC_IOP, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_IOP, }, /* * Performa - more LC type machines */ { .ident = MAC_MODEL_P460, .name = "Performa 460", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P475, .name = "Performa 475", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P475F, .name = "Performa 475", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P520, .name = "Performa 520", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P550, .name = "Performa 550", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* These have the comm slot, and therefore possibly SONIC ethernet */ { .ident = MAC_MODEL_P575, .name = "Performa 575", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_P588, .name = "Performa 588", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .ide_type = MAC_IDE_QUADRA, .scc_type = MAC_SCC_II, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_TV, .name = "TV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_P600, .name = "Performa 600", .adb_type = MAC_ADB_IISI, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_II, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Centris - just guessing again; maybe like Quadra. * The C610 may or may not have SONIC. We probe to make sure. */ { .ident = MAC_MODEL_C610, .name = "Centris 610", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_C650, .name = "Centris 650", .adb_type = MAC_ADB_II, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR1, }, { .ident = MAC_MODEL_C660, .name = "Centris 660AV", .adb_type = MAC_ADB_CUDA, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_QUADRA3, .scc_type = MAC_SCC_PSC, .ether_type = MAC_ETHER_MACE, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_AV, }, /* * The PowerBooks all the same "Combo" custom IC for SCSI and SCC * and a PMU (in two variations?) for ADB. Most of them use the * Quadra-style VIAs. A few models also have IDE from hell. */ { .ident = MAC_MODEL_PB140, .name = "PowerBook 140", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB145, .name = "PowerBook 145", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB150, .name = "PowerBook 150", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_PB, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB160, .name = "PowerBook 160", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB165, .name = "PowerBook 165", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB165C, .name = "PowerBook 165c", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB170, .name = "PowerBook 170", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB180, .name = "PowerBook 180", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB180C, .name = "PowerBook 180c", .adb_type = MAC_ADB_PB1, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB190, .name = "PowerBook 190", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .ide_type = MAC_IDE_BABOON, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB520, .name = "PowerBook 520", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_QUADRA, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .ether_type = MAC_ETHER_SONIC, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * PowerBook Duos are pretty much like normal PowerBooks * All of these probably have onboard SONIC in the Dock which * means we'll have to probe for it eventually. */ { .ident = MAC_MODEL_PB210, .name = "PowerBook Duo 210", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB230, .name = "PowerBook Duo 230", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB250, .name = "PowerBook Duo 250", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB270C, .name = "PowerBook Duo 270c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB280, .name = "PowerBook Duo 280", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, { .ident = MAC_MODEL_PB280C, .name = "PowerBook Duo 280c", .adb_type = MAC_ADB_PB2, .via_type = MAC_VIA_IICI, .scsi_type = MAC_SCSI_OLD, .scc_type = MAC_SCC_QUADRA, .nubus_type = MAC_NUBUS, .floppy_type = MAC_FLOPPY_SWIM_ADDR2, }, /* * Other stuff? */ { .ident = -1 } }; static struct resource scc_a_rsrcs[] = { { .flags = IORESOURCE_MEM }, { .flags = IORESOURCE_IRQ }, }; static struct resource scc_b_rsrcs[] = { { .flags = IORESOURCE_MEM }, { .flags = IORESOURCE_IRQ }, }; struct platform_device scc_a_pdev = { .name = "scc", .id = 0, .num_resources = ARRAY_SIZE(scc_a_rsrcs), .resource = scc_a_rsrcs, }; EXPORT_SYMBOL(scc_a_pdev); struct platform_device scc_b_pdev = { .name = "scc", .id = 1, .num_resources = ARRAY_SIZE(scc_b_rsrcs), .resource = scc_b_rsrcs, }; EXPORT_SYMBOL(scc_b_pdev); static void __init mac_identify(void) { struct mac_model *m; /* Penguin data useful? */ int model = mac_bi_data.id; if (!model) { /* no bootinfo model id -> NetBSD booter was used! */ /* XXX FIXME: breaks for model > 31 */ model = (mac_bi_data.cpuid >> 2) & 63; printk(KERN_WARNING "No bootinfo model ID, using cpuid instead " "(obsolete bootloader?)\n"); } macintosh_config = mac_data_table; for (m = macintosh_config; m->ident != -1; m++) { if (m->ident == model) { macintosh_config = m; break; } } /* Set up serial port resources for the console initcall. */ scc_a_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase + 2; scc_a_rsrcs[0].end = scc_a_rsrcs[0].start; scc_b_rsrcs[0].start = (resource_size_t) mac_bi_data.sccbase; scc_b_rsrcs[0].end = scc_b_rsrcs[0].start; switch (macintosh_config->scc_type) { case MAC_SCC_PSC: scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC_A; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC_B; break; default: /* On non-PSC machines, the serial ports share an IRQ. */ if (macintosh_config->ident == MAC_MODEL_IIFX) { scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_MAC_SCC; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_MAC_SCC; } else { scc_a_rsrcs[1].start = scc_a_rsrcs[1].end = IRQ_AUTO_4; scc_b_rsrcs[1].start = scc_b_rsrcs[1].end = IRQ_AUTO_4; } break; } /* * We need to pre-init the IOPs, if any. Otherwise * the serial console won't work if the user had * the serial ports set to "Faster" mode in MacOS. */ iop_preinit(); printk(KERN_INFO "Detected Macintosh model: %d\n", model); /* * Report booter data: */ printk(KERN_DEBUG " Penguin bootinfo data:\n"); printk(KERN_DEBUG " Video: addr 0x%lx " "row 0x%lx depth %lx dimensions %ld x %ld\n", mac_bi_data.videoaddr, mac_bi_data.videorow, mac_bi_data.videodepth, mac_bi_data.dimensions & 0xFFFF, mac_bi_data.dimensions >> 16); printk(KERN_DEBUG " Videological 0x%lx phys. 0x%lx, SCC at 0x%lx\n", mac_bi_data.videological, mac_orig_videoaddr, mac_bi_data.sccbase); printk(KERN_DEBUG " Boottime: 0x%lx GMTBias: 0x%lx\n", mac_bi_data.boottime, mac_bi_data.gmtbias); printk(KERN_DEBUG " Machine ID: %ld CPUid: 0x%lx memory size: 0x%lx\n", mac_bi_data.id, mac_bi_data.cpuid, mac_bi_data.memsize); iop_init(); via_init(); oss_init(); psc_init(); baboon_init(); #ifdef CONFIG_ADB_CUDA find_via_cuda(); #endif } static void __init mac_report_hardware(void) { printk(KERN_INFO "Apple Macintosh %s\n", macintosh_config->name); } static void mac_get_model(char *str) { strcpy(str, "Macintosh "); strcat(str, macintosh_config->name); } static struct resource swim_rsrc = { .flags = IORESOURCE_MEM }; static struct platform_device swim_pdev = { .name = "swim", .id = -1, .num_resources = 1, .resource = &swim_rsrc, }; static struct platform_device esp_0_pdev = { .name = "mac_esp", .id = 0, }; static struct platform_device esp_1_pdev = { .name = "mac_esp", .id = 1, }; static struct platform_device sonic_pdev = { .name = "macsonic", .id = -1, }; static struct platform_device mace_pdev = { .name = "macmace", .id = -1, }; int __init mac_platform_init(void) { u8 *swim_base; if (!MACH_IS_MAC) return -ENODEV; /* * Serial devices */ platform_device_register(&scc_a_pdev); platform_device_register(&scc_b_pdev); /* * Floppy device */ switch (macintosh_config->floppy_type) { case MAC_FLOPPY_SWIM_ADDR1: swim_base = (u8 *)(VIA1_BASE + 0x1E000); break; case MAC_FLOPPY_SWIM_ADDR2: swim_base = (u8 *)(VIA1_BASE + 0x16000); break; default: swim_base = NULL; break; } if (swim_base) { swim_rsrc.start = (resource_size_t) swim_base, swim_rsrc.end = (resource_size_t) swim_base + 0x2000, platform_device_register(&swim_pdev); } /* * SCSI device(s) */ switch (macintosh_config->scsi_type) { case MAC_SCSI_QUADRA: case MAC_SCSI_QUADRA3: platform_device_register(&esp_0_pdev); break; case MAC_SCSI_QUADRA2: platform_device_register(&esp_0_pdev); if ((macintosh_config->ident == MAC_MODEL_Q900) || (macintosh_config->ident == MAC_MODEL_Q950)) platform_device_register(&esp_1_pdev); break; } /* * Ethernet device */ switch (macintosh_config->ether_type) { case MAC_ETHER_SONIC: platform_device_register(&sonic_pdev); break; case MAC_ETHER_MACE: platform_device_register(&mace_pdev); break; } return 0; } arch_initcall(mac_platform_init);
gpl-2.0
sh028044/gdb-7.6.2
gdb/xtensa-linux-nat.c
14
9612
/* Xtensa GNU/Linux native support. Copyright (C) 2007-2013 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "defs.h" #include "gdb_string.h" #include "frame.h" #include "inferior.h" #include "gdbcore.h" #include "regcache.h" #include "gdb_assert.h" #include "target.h" #include "linux-nat.h" #include <stdint.h> #include <sys/types.h> #include <sys/param.h> #include <signal.h> #include <sys/user.h> #include <sys/ioctl.h> #include "gdb_wait.h" #include <fcntl.h> #include <sys/procfs.h> #include <sys/ptrace.h> #include "gregset.h" #include "xtensa-tdep.h" /* Extended register set depends on hardware configs. Keeping these definitions separately allows to introduce hardware-specific overlays. */ #include "xtensa-xtregs.c" static int get_thread_id (ptid_t ptid) { int tid = TIDGET (ptid); if (0 == tid) tid = PIDGET (ptid); return tid; } #define GET_THREAD_ID(PTID) get_thread_id (PTID) void fill_gregset (const struct regcache *regcache, gdb_gregset_t *gregsetp, int regnum) { int i; xtensa_elf_gregset_t *regs = (xtensa_elf_gregset_t *) gregsetp; struct gdbarch *gdbarch = get_regcache_arch (regcache); if (regnum == gdbarch_pc_regnum (gdbarch) || regnum == -1) regcache_raw_collect (regcache, gdbarch_pc_regnum (gdbarch), &regs->pc); if (regnum == gdbarch_ps_regnum (gdbarch) || regnum == -1) regcache_raw_collect (regcache, gdbarch_ps_regnum (gdbarch), &regs->ps); if (regnum == gdbarch_tdep (gdbarch)->wb_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->wb_regnum, &regs->windowbase); if (regnum == gdbarch_tdep (gdbarch)->ws_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->ws_regnum, &regs->windowstart); if (regnum == gdbarch_tdep (gdbarch)->lbeg_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->lbeg_regnum, &regs->lbeg); if (regnum == gdbarch_tdep (gdbarch)->lend_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->lend_regnum, &regs->lend); if (regnum == gdbarch_tdep (gdbarch)->lcount_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->lcount_regnum, &regs->lcount); if (regnum == gdbarch_tdep (gdbarch)->sar_regnum || regnum == -1) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->sar_regnum, &regs->sar); if (regnum >=gdbarch_tdep (gdbarch)->ar_base && regnum < gdbarch_tdep (gdbarch)->ar_base + gdbarch_tdep (gdbarch)->num_aregs) regcache_raw_collect (regcache,regnum, &regs->ar[regnum - gdbarch_tdep (gdbarch)->ar_base]); else if (regnum == -1) { for (i = 0; i < gdbarch_tdep (gdbarch)->num_aregs; ++i) regcache_raw_collect (regcache, gdbarch_tdep (gdbarch)->ar_base + i, &regs->ar[i]); } } void supply_gregset_reg (struct regcache *regcache, const gdb_gregset_t *gregsetp, int regnum) { int i; xtensa_elf_gregset_t *regs = (xtensa_elf_gregset_t *) gregsetp; struct gdbarch *gdbarch = get_regcache_arch (regcache); if (regnum == gdbarch_pc_regnum (gdbarch) || regnum == -1) regcache_raw_supply (regcache, gdbarch_pc_regnum (gdbarch), &regs->pc); if (regnum == gdbarch_ps_regnum (gdbarch) || regnum == -1) regcache_raw_supply (regcache, gdbarch_ps_regnum (gdbarch), &regs->ps); if (regnum == gdbarch_tdep (gdbarch)->wb_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->wb_regnum, &regs->windowbase); if (regnum == gdbarch_tdep (gdbarch)->ws_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->ws_regnum, &regs->windowstart); if (regnum == gdbarch_tdep (gdbarch)->lbeg_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->lbeg_regnum, &regs->lbeg); if (regnum == gdbarch_tdep (gdbarch)->lend_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->lend_regnum, &regs->lend); if (regnum == gdbarch_tdep (gdbarch)->lcount_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->lcount_regnum, &regs->lcount); if (regnum == gdbarch_tdep (gdbarch)->sar_regnum || regnum == -1) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->sar_regnum, &regs->sar); if (regnum >=gdbarch_tdep (gdbarch)->ar_base && regnum < gdbarch_tdep (gdbarch)->ar_base + gdbarch_tdep (gdbarch)->num_aregs) regcache_raw_supply (regcache,regnum, &regs->ar[regnum - gdbarch_tdep (gdbarch)->ar_base]); else if (regnum == -1) { for (i = 0; i < gdbarch_tdep (gdbarch)->num_aregs; ++i) regcache_raw_supply (regcache, gdbarch_tdep (gdbarch)->ar_base + i, &regs->ar[i]); } } void supply_gregset (struct regcache *regcache, const gdb_gregset_t *gregsetp) { supply_gregset_reg (regcache, gregsetp, -1); } void fill_fpregset (const struct regcache *regcache, gdb_fpregset_t *fpregsetp, int regnum) { return; } void supply_fpregset (struct regcache *regcache, const gdb_fpregset_t *fpregsetp) { return; } /* Fetch greg-register(s) from process/thread TID and store value(s) in GDB's register array. */ static void fetch_gregs (struct regcache *regcache, int regnum) { int tid = GET_THREAD_ID (inferior_ptid); const gdb_gregset_t regs; int areg; if (ptrace (PTRACE_GETREGS, tid, 0, (long) &regs) < 0) { perror_with_name (_("Couldn't get registers")); return; } supply_gregset_reg (regcache, &regs, regnum); } /* Store greg-register(s) in GDB's register array into the process/thread specified by TID. */ static void store_gregs (struct regcache *regcache, int regnum) { int tid = GET_THREAD_ID (inferior_ptid); gdb_gregset_t regs; int areg; if (ptrace (PTRACE_GETREGS, tid, 0, (long) &regs) < 0) { perror_with_name (_("Couldn't get registers")); return; } fill_gregset (regcache, &regs, regnum); if (ptrace (PTRACE_SETREGS, tid, 0, (long) &regs) < 0) { perror_with_name (_("Couldn't write registers")); return; } } static int xtreg_lo; static int xtreg_high; /* Fetch/Store Xtensa TIE registers. Xtensa GNU/Linux PTRACE interface provides special requests for this. */ static void fetch_xtregs (struct regcache *regcache, int regnum) { int tid = GET_THREAD_ID (inferior_ptid); const xtensa_regtable_t *ptr; char xtregs [XTENSA_ELF_XTREG_SIZE]; if (ptrace (PTRACE_GETXTREGS, tid, 0, (long)&xtregs) < 0) perror_with_name (_("Couldn't get extended registers")); for (ptr = xtensa_regmap_table; ptr->name; ptr++) if (regnum == ptr->gdb_regnum || regnum == -1) regcache_raw_supply (regcache, ptr->gdb_regnum, xtregs + ptr->ptrace_offset); } static void store_xtregs (struct regcache *regcache, int regnum) { int tid = GET_THREAD_ID (inferior_ptid); const xtensa_regtable_t *ptr; char xtregs [XTENSA_ELF_XTREG_SIZE]; if (ptrace (PTRACE_GETXTREGS, tid, 0, (long)&xtregs) < 0) perror_with_name (_("Couldn't get extended registers")); for (ptr = xtensa_regmap_table; ptr->name; ptr++) if (regnum == ptr->gdb_regnum || regnum == -1) regcache_raw_collect (regcache, ptr->gdb_regnum, xtregs + ptr->ptrace_offset); if (ptrace (PTRACE_SETXTREGS, tid, 0, (long)&xtregs) < 0) perror_with_name (_("Couldn't write extended registers")); } void xtensa_linux_fetch_inferior_registers (struct target_ops *ops, struct regcache *regcache, int regnum) { if (regnum == -1) { fetch_gregs (regcache, regnum); fetch_xtregs (regcache, regnum); } else if ((regnum < xtreg_lo) || (regnum > xtreg_high)) fetch_gregs (regcache, regnum); else fetch_xtregs (regcache, regnum); } void xtensa_linux_store_inferior_registers (struct target_ops *ops, struct regcache *regcache, int regnum) { if (regnum == -1) { store_gregs (regcache, regnum); store_xtregs (regcache, regnum); } else if ((regnum < xtreg_lo) || (regnum > xtreg_high)) store_gregs (regcache, regnum); else store_xtregs (regcache, regnum); } void _initialize_xtensa_linux_nat (void); void _initialize_xtensa_linux_nat (void) { struct target_ops *t; const xtensa_regtable_t *ptr; /* Calculate the number range for extended registers. */ xtreg_lo = 1000000000; xtreg_high = -1; for (ptr = xtensa_regmap_table; ptr->name; ptr++) { if (ptr->gdb_regnum < xtreg_lo) xtreg_lo = ptr->gdb_regnum; if (ptr->gdb_regnum > xtreg_high) xtreg_high = ptr->gdb_regnum; } /* Fill in the generic GNU/Linux methods. */ t = linux_target (); /* Add our register access methods. */ t->to_fetch_registers = xtensa_linux_fetch_inferior_registers; t->to_store_registers = xtensa_linux_store_inferior_registers; linux_nat_add_target (t); }
gpl-2.0
robacklin/ts7800
arch/powerpc/kernel/ptrace.c
14
13113
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Derived from "arch/m68k/kernel/ptrace.c" * Copyright (C) 1994 by Hamish Macdonald * Taken from linux/kernel/ptrace.c and modified for M680x0. * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * * Modified by Cort Dougan (cort@hq.fsmlabs.com) * and Paul Mackerras (paulus@samba.org). * * This file is subject to the terms and conditions of the GNU General * Public License. See the file README.legal in the main directory of * this archive for more details. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/smp_lock.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/security.h> #include <linux/signal.h> #include <linux/seccomp.h> #include <linux/audit.h> #ifdef CONFIG_PPC32 #include <linux/module.h> #endif #include <asm/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/system.h> #ifdef CONFIG_PPC64 #include "ptrace-common.h" #endif #ifdef CONFIG_PPC32 /* * Set of msr bits that gdb can change on behalf of a process. */ #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) #define MSR_DEBUGCHANGE 0 #else #define MSR_DEBUGCHANGE (MSR_SE | MSR_BE) #endif #endif /* CONFIG_PPC32 */ /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ #ifdef CONFIG_PPC32 /* * Get contents of register REGNO in task TASK. */ static inline unsigned long get_reg(struct task_struct *task, int regno) { if (regno < sizeof(struct pt_regs) / sizeof(unsigned long) && task->thread.regs != NULL) return ((unsigned long *)task->thread.regs)[regno]; return (0); } /* * Write contents of register REGNO in task TASK. */ static inline int put_reg(struct task_struct *task, int regno, unsigned long data) { if (regno <= PT_MQ && task->thread.regs != NULL) { if (regno == PT_MSR) data = (data & MSR_DEBUGCHANGE) | (task->thread.regs->msr & ~MSR_DEBUGCHANGE); ((unsigned long *)task->thread.regs)[regno] = data; return 0; } return -EIO; } #ifdef CONFIG_ALTIVEC /* * Get contents of AltiVec register state in task TASK */ static inline int get_vrregs(unsigned long __user *data, struct task_struct *task) { int i, j; if (!access_ok(VERIFY_WRITE, data, 133 * sizeof(unsigned long))) return -EFAULT; /* copy AltiVec registers VR[0] .. VR[31] */ for (i = 0; i < 32; i++) for (j = 0; j < 4; j++, data++) if (__put_user(task->thread.vr[i].u[j], data)) return -EFAULT; /* copy VSCR */ for (i = 0; i < 4; i++, data++) if (__put_user(task->thread.vscr.u[i], data)) return -EFAULT; /* copy VRSAVE */ if (__put_user(task->thread.vrsave, data)) return -EFAULT; return 0; } /* * Write contents of AltiVec register state into task TASK. */ static inline int set_vrregs(struct task_struct *task, unsigned long __user *data) { int i, j; if (!access_ok(VERIFY_READ, data, 133 * sizeof(unsigned long))) return -EFAULT; /* copy AltiVec registers VR[0] .. VR[31] */ for (i = 0; i < 32; i++) for (j = 0; j < 4; j++, data++) if (__get_user(task->thread.vr[i].u[j], data)) return -EFAULT; /* copy VSCR */ for (i = 0; i < 4; i++, data++) if (__get_user(task->thread.vscr.u[i], data)) return -EFAULT; /* copy VRSAVE */ if (__get_user(task->thread.vrsave, data)) return -EFAULT; return 0; } #endif #ifdef CONFIG_SPE /* * For get_evrregs/set_evrregs functions 'data' has the following layout: * * struct { * u32 evr[32]; * u64 acc; * u32 spefscr; * } */ /* * Get contents of SPE register state in task TASK. */ static inline int get_evrregs(unsigned long *data, struct task_struct *task) { int i; if (!access_ok(VERIFY_WRITE, data, 35 * sizeof(unsigned long))) return -EFAULT; /* copy SPEFSCR */ if (__put_user(task->thread.spefscr, &data[34])) return -EFAULT; /* copy SPE registers EVR[0] .. EVR[31] */ for (i = 0; i < 32; i++, data++) if (__put_user(task->thread.evr[i], data)) return -EFAULT; /* copy ACC */ if (__put_user64(task->thread.acc, (unsigned long long *)data)) return -EFAULT; return 0; } /* * Write contents of SPE register state into task TASK. */ static inline int set_evrregs(struct task_struct *task, unsigned long *data) { int i; if (!access_ok(VERIFY_READ, data, 35 * sizeof(unsigned long))) return -EFAULT; /* copy SPEFSCR */ if (__get_user(task->thread.spefscr, &data[34])) return -EFAULT; /* copy SPE registers EVR[0] .. EVR[31] */ for (i = 0; i < 32; i++, data++) if (__get_user(task->thread.evr[i], data)) return -EFAULT; /* copy ACC */ if (__get_user64(task->thread.acc, (unsigned long long*)data)) return -EFAULT; return 0; } #endif /* CONFIG_SPE */ static inline void set_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) task->thread.dbcr0 = DBCR0_IDM | DBCR0_IC; regs->msr |= MSR_DE; #else regs->msr |= MSR_SE; #endif } } static inline void clear_single_step(struct task_struct *task) { struct pt_regs *regs = task->thread.regs; if (regs != NULL) { #if defined(CONFIG_40x) || defined(CONFIG_BOOKE) task->thread.dbcr0 = 0; regs->msr &= ~MSR_DE; #else regs->msr &= ~MSR_SE; #endif } } #endif /* CONFIG_PPC32 */ /* * Called by kernel/ptrace.c when detaching.. * * Make sure single step bits etc are not set. */ void ptrace_disable(struct task_struct *child) { /* make sure the single step bit is not set. */ clear_single_step(child); } long arch_ptrace(struct task_struct *child, long request, long addr, long data) { int ret = -EPERM; switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKTEXT: /* read word at location addr. */ case PTRACE_PEEKDATA: { unsigned long tmp; int copied; copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0); ret = -EIO; if (copied != sizeof(tmp)) break; ret = put_user(tmp,(unsigned long __user *) data); break; } /* read the word at location addr in the USER area. */ case PTRACE_PEEKUSR: { unsigned long index, tmp; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = (unsigned long) addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; #ifdef CONFIG_PPC32 CHECK_FULL_REGS(child->thread.regs); #endif if (index < PT_FPR0) { tmp = get_reg(child, (int) index); } else { flush_fp_to_thread(child); tmp = ((unsigned long *)child->thread.fpr)[index - PT_FPR0]; } ret = put_user(tmp,(unsigned long __user *) data); break; } /* If I and D space are separate, this will have to be fixed. */ case PTRACE_POKETEXT: /* write the word at location addr. */ case PTRACE_POKEDATA: ret = 0; if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data)) break; ret = -EIO; break; /* write the word at location addr in the USER area */ case PTRACE_POKEUSR: { unsigned long index; ret = -EIO; /* convert to index and check */ #ifdef CONFIG_PPC32 index = (unsigned long) addr >> 2; if ((addr & 3) || (index > PT_FPSCR) || (child->thread.regs == NULL)) #else index = (unsigned long) addr >> 3; if ((addr & 7) || (index > PT_FPSCR)) #endif break; #ifdef CONFIG_PPC32 CHECK_FULL_REGS(child->thread.regs); #endif if (index == PT_ORIG_R3) break; if (index < PT_FPR0) { ret = put_reg(child, index, data); } else { flush_fp_to_thread(child); ((unsigned long *)child->thread.fpr)[index - PT_FPR0] = data; ret = 0; } break; } case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */ case PTRACE_CONT: { /* restart after signal. */ ret = -EIO; if (!valid_signal(data)) break; if (request == PTRACE_SYSCALL) set_tsk_thread_flag(child, TIF_SYSCALL_TRACE); else clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); child->exit_code = data; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); ret = 0; break; } /* * make the child exit. Best I can do is send it a sigkill. * perhaps it should be put in the status that it wants to * exit. */ case PTRACE_KILL: { ret = 0; if (child->exit_state == EXIT_ZOMBIE) /* already dead */ break; child->exit_code = SIGKILL; /* make sure the single step bit is not set. */ clear_single_step(child); wake_up_process(child); break; } case PTRACE_SINGLESTEP: { /* set the trap flag. */ ret = -EIO; if (!valid_signal(data)) break; clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE); set_single_step(child); child->exit_code = data; /* give it a chance to run. */ wake_up_process(child); ret = 0; break; } #ifdef CONFIG_PPC64 case PTRACE_GET_DEBUGREG: { ret = -EINVAL; /* We only support one DABR and no IABRS at the moment */ if (addr > 0) break; ret = put_user(child->thread.dabr, (unsigned long __user *)data); break; } case PTRACE_SET_DEBUGREG: ret = ptrace_set_debugreg(child, addr, data); break; #endif case PTRACE_DETACH: ret = ptrace_detach(child, data); break; case PPC_PTRACE_GETREGS: { /* Get GPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_SETREGS: { /* Set GPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.regs)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_GETFPREGS: { /* Get FPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; flush_fp_to_thread(child); for (i = 0; i < 32; i++) { ret = put_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } case PPC_PTRACE_SETFPREGS: { /* Get FPRs 0 - 31. */ int i; unsigned long *reg = &((unsigned long *)child->thread.fpr)[0]; unsigned long __user *tmp = (unsigned long __user *)addr; flush_fp_to_thread(child); for (i = 0; i < 32; i++) { ret = get_user(*reg, tmp); if (ret) break; reg++; tmp++; } break; } #ifdef CONFIG_ALTIVEC case PTRACE_GETVRREGS: /* Get the child altivec register state. */ flush_altivec_to_thread(child); ret = get_vrregs((unsigned long __user *)data, child); break; case PTRACE_SETVRREGS: /* Set the child altivec register state. */ flush_altivec_to_thread(child); ret = set_vrregs(child, (unsigned long __user *)data); break; #endif #ifdef CONFIG_SPE case PTRACE_GETEVRREGS: /* Get the child spe register state. */ if (child->thread.regs->msr & MSR_SPE) giveup_spe(child); ret = get_evrregs((unsigned long __user *)data, child); break; case PTRACE_SETEVRREGS: /* Set the child spe register state. */ /* this is to clear the MSR_SPE bit to force a reload * of register state from memory */ if (child->thread.regs->msr & MSR_SPE) giveup_spe(child); ret = set_evrregs(child, (unsigned long __user *)data); break; #endif default: ret = ptrace_request(child, request, addr, data); break; } return ret; } static void do_syscall_trace(void) { /* the 0x80 provides a way for the tracing parent to distinguish between a syscall stop and SIGTRAP delivery */ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD) ? 0x80 : 0)); /* * this isn't the same as continuing with a signal, but it will do * for normal use. strace only continues with a signal if the * stopping signal is not SIGTRAP. -brl */ if (current->exit_code) { send_sig(current->exit_code, current, 1); current->exit_code = 0; } } void do_syscall_trace_enter(struct pt_regs *regs) { secure_computing(regs->gpr[0]); if (test_thread_flag(TIF_SYSCALL_TRACE) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); if (unlikely(current->audit_context)) { #ifdef CONFIG_PPC64 if (!test_thread_flag(TIF_32BIT)) audit_syscall_entry(AUDIT_ARCH_PPC64, regs->gpr[0], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]); else #endif audit_syscall_entry(AUDIT_ARCH_PPC, regs->gpr[0], regs->gpr[3] & 0xffffffff, regs->gpr[4] & 0xffffffff, regs->gpr[5] & 0xffffffff, regs->gpr[6] & 0xffffffff); } } void do_syscall_trace_leave(struct pt_regs *regs) { if (unlikely(current->audit_context)) audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, regs->result); if ((test_thread_flag(TIF_SYSCALL_TRACE) || test_thread_flag(TIF_SINGLESTEP)) && (current->ptrace & PT_PTRACED)) do_syscall_trace(); }
gpl-2.0
clcarwin/src
graphics/skia/src/svg/SkSVGLine.cpp
14
1154
/* libs/graphics/svg/SkSVGLine.cpp ** ** Copyright 2006, The Android Open Source Project ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ #include "SkSVGLine.h" #include "SkSVGParser.h" const SkSVGAttribute SkSVGLine::gAttributes[] = { SVG_ATTRIBUTE(x1), SVG_ATTRIBUTE(x2), SVG_ATTRIBUTE(y1), SVG_ATTRIBUTE(y2) }; DEFINE_SVG_INFO(Line) void SkSVGLine::translate(SkSVGParser& parser, bool defState) { parser._startElement("line"); INHERITED::translate(parser, defState); SVG_ADD_ATTRIBUTE(x1); SVG_ADD_ATTRIBUTE(y1); SVG_ADD_ATTRIBUTE(x2); SVG_ADD_ATTRIBUTE(y2); parser._endElement(); }
gpl-2.0
evaautomation/ragel
ragel/cdfflat.cpp
14
8787
/* * Copyright 2004-2006 Adrian Thurston <thurston@complang.org> * 2004 Erich Ocean <eric.ocean@ampede.com> * 2005 Alan West <alan@alanz.com> */ /* This file is part of Ragel. * * Ragel is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Ragel is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with Ragel; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "ragel.h" #include "cdfflat.h" #include "redfsm.h" #include "gendata.h" std::ostream &FFlatCodeGen::TO_STATE_ACTION( RedStateAp *state ) { int act = 0; if ( state->toStateAction != 0 ) act = state->toStateAction->actListId+1; out << act; return out; } std::ostream &FFlatCodeGen::FROM_STATE_ACTION( RedStateAp *state ) { int act = 0; if ( state->fromStateAction != 0 ) act = state->fromStateAction->actListId+1; out << act; return out; } std::ostream &FFlatCodeGen::EOF_ACTION( RedStateAp *state ) { int act = 0; if ( state->eofAction != 0 ) act = state->eofAction->actListId+1; out << act; return out; } /* Write out the function for a transition. */ std::ostream &FFlatCodeGen::TRANS_ACTION( RedTransAp *trans ) { int action = 0; if ( trans->action != 0 ) action = trans->action->actListId+1; out << action; return out; } /* Write out the function switch. This switch is keyed on the values * of the func index. */ std::ostream &FFlatCodeGen::TO_STATE_ACTION_SWITCH() { /* Loop the actions. */ for ( GenActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) { if ( redAct->numToStateRefs > 0 ) { /* Write the entry label. */ out << "\tcase " << redAct->actListId+1 << ":\n"; /* Write each action in the list of action items. */ for ( GenActionTable::Iter item = redAct->key; item.lte(); item++ ) ACTION( out, item->value, 0, false, false ); out << "\tbreak;\n"; } } genLineDirective( out ); return out; } /* Write out the function switch. This switch is keyed on the values * of the func index. */ std::ostream &FFlatCodeGen::FROM_STATE_ACTION_SWITCH() { /* Loop the actions. */ for ( GenActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) { if ( redAct->numFromStateRefs > 0 ) { /* Write the entry label. */ out << "\tcase " << redAct->actListId+1 << ":\n"; /* Write each action in the list of action items. */ for ( GenActionTable::Iter item = redAct->key; item.lte(); item++ ) ACTION( out, item->value, 0, false, false ); out << "\tbreak;\n"; } } genLineDirective( out ); return out; } std::ostream &FFlatCodeGen::EOF_ACTION_SWITCH() { /* Loop the actions. */ for ( GenActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) { if ( redAct->numEofRefs > 0 ) { /* Write the entry label. */ out << "\tcase " << redAct->actListId+1 << ":\n"; /* Write each action in the list of action items. */ for ( GenActionTable::Iter item = redAct->key; item.lte(); item++ ) ACTION( out, item->value, 0, true, false ); out << "\tbreak;\n"; } } genLineDirective( out ); return out; } /* Write out the function switch. This switch is keyed on the values * of the func index. */ std::ostream &FFlatCodeGen::ACTION_SWITCH() { /* Loop the actions. */ for ( GenActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) { if ( redAct->numTransRefs > 0 ) { /* Write the entry label. */ out << "\tcase " << redAct->actListId+1 << ":\n"; /* Write each action in the list of action items. */ for ( GenActionTable::Iter item = redAct->key; item.lte(); item++ ) ACTION( out, item->value, 0, false, false ); out << "\tbreak;\n"; } } genLineDirective( out ); return out; } void FFlatCodeGen::writeData() { if ( redFsm->anyConditions() ) { OPEN_ARRAY( WIDE_ALPH_TYPE(), CK() ); COND_KEYS(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondSpan), CSP() ); COND_KEY_SPANS(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCond), C() ); CONDS(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondIndexOffset), CO() ); COND_INDEX_OFFSET(); CLOSE_ARRAY() << "\n"; } OPEN_ARRAY( WIDE_ALPH_TYPE(), K() ); KEYS(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxSpan), SP() ); KEY_SPANS(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxFlatIndexOffset), IO() ); FLAT_INDEX_OFFSET(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndex), I() ); INDICIES(); CLOSE_ARRAY() << "\n"; OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() ); TRANS_TARGS(); CLOSE_ARRAY() << "\n"; if ( redFsm->anyActions() ) { OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), TA() ); TRANS_ACTIONS(); CLOSE_ARRAY() << "\n"; } if ( redFsm->anyToStateActions() ) { OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() ); TO_STATE_ACTIONS(); CLOSE_ARRAY() << "\n"; } if ( redFsm->anyFromStateActions() ) { OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() ); FROM_STATE_ACTIONS(); CLOSE_ARRAY() << "\n"; } if ( redFsm->anyEofActions() ) { OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), EA() ); EOF_ACTIONS(); CLOSE_ARRAY() << "\n"; } if ( redFsm->anyEofTrans() ) { OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndexOffset+1), ET() ); EOF_TRANS(); CLOSE_ARRAY() << "\n"; } STATE_IDS(); } void FFlatCodeGen::writeExec() { testEofUsed = false; outLabelUsed = false; out << " {\n" " int _slen"; if ( redFsm->anyRegCurStateRef() ) out << ", _ps"; out << ";\n"; out << " int _trans"; if ( redFsm->anyConditions() ) out << ", _cond"; out << ";\n"; out << " " << PTR_CONST() << WIDE_ALPH_TYPE() << PTR_CONST_END() << POINTER() << "_keys;\n" " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxIndex) << PTR_CONST_END() << POINTER() << "_inds;\n"; if ( redFsm->anyConditions() ) { out << " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxCond) << PTR_CONST_END() << POINTER() << "_conds;\n" " " << WIDE_ALPH_TYPE() << " _widec;\n"; } if ( !noEnd ) { testEofUsed = true; out << " if ( " << P() << " == " << PE() << " )\n" " goto _test_eof;\n"; } if ( redFsm->errState != 0 ) { outLabelUsed = true; out << " if ( " << vCS() << " == " << redFsm->errState->id << " )\n" " goto _out;\n"; } out << "_resume:\n"; if ( redFsm->anyFromStateActions() ) { out << " switch ( " << FSA() << "[" << vCS() << "] ) {\n"; FROM_STATE_ACTION_SWITCH(); SWITCH_DEFAULT() << " }\n" "\n"; } if ( redFsm->anyConditions() ) COND_TRANSLATE(); LOCATE_TRANS(); if ( redFsm->anyEofTrans() ) out << "_eof_trans:\n"; if ( redFsm->anyRegCurStateRef() ) out << " _ps = " << vCS() << ";\n"; out << " " << vCS() << " = " << TT() << "[_trans];\n\n"; if ( redFsm->anyRegActions() ) { out << " if ( " << TA() << "[_trans] == 0 )\n" " goto _again;\n" "\n" " switch ( " << TA() << "[_trans] ) {\n"; ACTION_SWITCH(); SWITCH_DEFAULT() << " }\n" "\n"; } if ( redFsm->anyRegActions() || redFsm->anyActionGotos() || redFsm->anyActionCalls() || redFsm->anyActionRets() ) out << "_again:\n"; if ( redFsm->anyToStateActions() ) { out << " switch ( " << TSA() << "[" << vCS() << "] ) {\n"; TO_STATE_ACTION_SWITCH(); SWITCH_DEFAULT() << " }\n" "\n"; } if ( redFsm->errState != 0 ) { outLabelUsed = true; out << " if ( " << vCS() << " == " << redFsm->errState->id << " )\n" " goto _out;\n"; } if ( !noEnd ) { out << " if ( ++" << P() << " != " << PE() << " )\n" " goto _resume;\n"; } else { out << " " << P() << " += 1;\n" " goto _resume;\n"; } if ( testEofUsed ) out << " _test_eof: {}\n"; if ( redFsm->anyEofTrans() || redFsm->anyEofActions() ) { out << " if ( " << P() << " == " << vEOF() << " )\n" " {\n"; if ( redFsm->anyEofTrans() ) { out << " if ( " << ET() << "[" << vCS() << "] > 0 ) {\n" " _trans = " << ET() << "[" << vCS() << "] - 1;\n" " goto _eof_trans;\n" " }\n"; } if ( redFsm->anyEofActions() ) { out << " switch ( " << EA() << "[" << vCS() << "] ) {\n"; EOF_ACTION_SWITCH(); SWITCH_DEFAULT() << " }\n"; } out << " }\n" "\n"; } if ( outLabelUsed ) out << " _out: {}\n"; out << " }\n"; }
gpl-2.0
ghmajx/asuswrt-merlin
release/src/router/gdb/gdb/vax-tdep.c
14
15206
/* Target-dependent code for the VAX. Copyright (C) 1986, 1989, 1991, 1992, 1995, 1996, 1998, 1999, 2000, 2002, 2003, 2004, 2005, 2007 Free Software Foundation, Inc. This file is part of GDB. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include "defs.h" #include "arch-utils.h" #include "dis-asm.h" #include "floatformat.h" #include "frame.h" #include "frame-base.h" #include "frame-unwind.h" #include "gdbcore.h" #include "gdbtypes.h" #include "osabi.h" #include "regcache.h" #include "regset.h" #include "trad-frame.h" #include "value.h" #include "gdb_string.h" #include "vax-tdep.h" /* Return the name of register REGNUM. */ static const char * vax_register_name (int regnum) { static char *register_names[] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", "r11", "ap", "fp", "sp", "pc", "ps", }; if (regnum >= 0 && regnum < ARRAY_SIZE (register_names)) return register_names[regnum]; return NULL; } /* Return the GDB type object for the "standard" data type of data in register REGNUM. */ static struct type * vax_register_type (struct gdbarch *gdbarch, int regnum) { return builtin_type_int; } /* Core file support. */ /* Supply register REGNUM from the buffer specified by GREGS and LEN in the general-purpose register set REGSET to register cache REGCACHE. If REGNUM is -1, do this for all registers in REGSET. */ static void vax_supply_gregset (const struct regset *regset, struct regcache *regcache, int regnum, const void *gregs, size_t len) { const gdb_byte *regs = gregs; int i; for (i = 0; i < VAX_NUM_REGS; i++) { if (regnum == i || regnum == -1) regcache_raw_supply (regcache, i, regs + i * 4); } } /* VAX register set. */ static struct regset vax_gregset = { NULL, vax_supply_gregset }; /* Return the appropriate register set for the core section identified by SECT_NAME and SECT_SIZE. */ static const struct regset * vax_regset_from_core_section (struct gdbarch *gdbarch, const char *sect_name, size_t sect_size) { if (strcmp (sect_name, ".reg") == 0 && sect_size >= VAX_NUM_REGS * 4) return &vax_gregset; return NULL; } /* The VAX UNIX calling convention uses R1 to pass a structure return value address instead of passing it as a first (hidden) argument as the VMS calling convention suggests. */ static CORE_ADDR vax_store_arguments (struct regcache *regcache, int nargs, struct value **args, CORE_ADDR sp) { gdb_byte buf[4]; int count = 0; int i; /* We create an argument list on the stack, and make the argument pointer to it. */ /* Push arguments in reverse order. */ for (i = nargs - 1; i >= 0; i--) { int len = TYPE_LENGTH (value_enclosing_type (args[i])); sp -= (len + 3) & ~3; count += (len + 3) / 4; write_memory (sp, value_contents_all (args[i]), len); } /* Push argument count. */ sp -= 4; store_unsigned_integer (buf, 4, count); write_memory (sp, buf, 4); /* Update the argument pointer. */ store_unsigned_integer (buf, 4, sp); regcache_cooked_write (regcache, VAX_AP_REGNUM, buf); return sp; } static CORE_ADDR vax_push_dummy_call (struct gdbarch *gdbarch, struct value *function, struct regcache *regcache, CORE_ADDR bp_addr, int nargs, struct value **args, CORE_ADDR sp, int struct_return, CORE_ADDR struct_addr) { CORE_ADDR fp = sp; gdb_byte buf[4]; /* Set up the function arguments. */ sp = vax_store_arguments (regcache, nargs, args, sp); /* Store return value address. */ if (struct_return) regcache_cooked_write_unsigned (regcache, VAX_R1_REGNUM, struct_addr); /* Store return address in the PC slot. */ sp -= 4; store_unsigned_integer (buf, 4, bp_addr); write_memory (sp, buf, 4); /* Store the (fake) frame pointer in the FP slot. */ sp -= 4; store_unsigned_integer (buf, 4, fp); write_memory (sp, buf, 4); /* Skip the AP slot. */ sp -= 4; /* Store register save mask and control bits. */ sp -= 4; store_unsigned_integer (buf, 4, 0); write_memory (sp, buf, 4); /* Store condition handler. */ sp -= 4; store_unsigned_integer (buf, 4, 0); write_memory (sp, buf, 4); /* Update the stack pointer and frame pointer. */ store_unsigned_integer (buf, 4, sp); regcache_cooked_write (regcache, VAX_SP_REGNUM, buf); regcache_cooked_write (regcache, VAX_FP_REGNUM, buf); /* Return the saved (fake) frame pointer. */ return fp; } static struct frame_id vax_unwind_dummy_id (struct gdbarch *gdbarch, struct frame_info *next_frame) { CORE_ADDR fp; fp = frame_unwind_register_unsigned (next_frame, VAX_FP_REGNUM); return frame_id_build (fp, frame_pc_unwind (next_frame)); } static enum return_value_convention vax_return_value (struct gdbarch *gdbarch, struct type *type, struct regcache *regcache, gdb_byte *readbuf, const gdb_byte *writebuf) { int len = TYPE_LENGTH (type); gdb_byte buf[8]; if (TYPE_CODE (type) == TYPE_CODE_STRUCT || TYPE_CODE (type) == TYPE_CODE_UNION || TYPE_CODE (type) == TYPE_CODE_ARRAY) { /* The default on VAX is to return structures in static memory. Consequently a function must return the address where we can find the return value. */ if (readbuf) { ULONGEST addr; regcache_raw_read_unsigned (regcache, VAX_R0_REGNUM, &addr); read_memory (addr, readbuf, len); } return RETURN_VALUE_ABI_RETURNS_ADDRESS; } if (readbuf) { /* Read the contents of R0 and (if necessary) R1. */ regcache_cooked_read (regcache, VAX_R0_REGNUM, buf); if (len > 4) regcache_cooked_read (regcache, VAX_R1_REGNUM, buf + 4); memcpy (readbuf, buf, len); } if (writebuf) { /* Read the contents to R0 and (if necessary) R1. */ memcpy (buf, writebuf, len); regcache_cooked_write (regcache, VAX_R0_REGNUM, buf); if (len > 4) regcache_cooked_write (regcache, VAX_R1_REGNUM, buf + 4); } return RETURN_VALUE_REGISTER_CONVENTION; } /* Use the program counter to determine the contents and size of a breakpoint instruction. Return a pointer to a string of bytes that encode a breakpoint instruction, store the length of the string in *LEN and optionally adjust *PC to point to the correct memory location for inserting the breakpoint. */ static const gdb_byte * vax_breakpoint_from_pc (CORE_ADDR *pc, int *len) { static gdb_byte break_insn[] = { 3 }; *len = sizeof (break_insn); return break_insn; } /* Advance PC across any function entry prologue instructions to reach some "real" code. */ static CORE_ADDR vax_skip_prologue (CORE_ADDR pc) { gdb_byte op = read_memory_unsigned_integer (pc, 1); if (op == 0x11) pc += 2; /* skip brb */ if (op == 0x31) pc += 3; /* skip brw */ if (op == 0xC2 && (read_memory_unsigned_integer (pc + 2, 1)) == 0x5E) pc += 3; /* skip subl2 */ if (op == 0x9E && (read_memory_unsigned_integer (pc + 1, 1)) == 0xAE && (read_memory_unsigned_integer (pc + 3, 1)) == 0x5E) pc += 4; /* skip movab */ if (op == 0x9E && (read_memory_unsigned_integer (pc + 1, 1)) == 0xCE && (read_memory_unsigned_integer (pc + 4, 1)) == 0x5E) pc += 5; /* skip movab */ if (op == 0x9E && (read_memory_unsigned_integer (pc + 1, 1)) == 0xEE && (read_memory_unsigned_integer (pc + 6, 1)) == 0x5E) pc += 7; /* skip movab */ return pc; } /* Unwinding the stack is relatively easy since the VAX has a dedicated frame pointer, and frames are set up automatically as the result of a function call. Most of the relevant information can be inferred from the documentation of the Procedure Call Instructions in the VAX MACRO and Instruction Set Reference Manual. */ struct vax_frame_cache { /* Base address. */ CORE_ADDR base; /* Table of saved registers. */ struct trad_frame_saved_reg *saved_regs; }; struct vax_frame_cache * vax_frame_cache (struct frame_info *next_frame, void **this_cache) { struct vax_frame_cache *cache; CORE_ADDR addr; ULONGEST mask; int regnum; if (*this_cache) return *this_cache; /* Allocate a new cache. */ cache = FRAME_OBSTACK_ZALLOC (struct vax_frame_cache); cache->saved_regs = trad_frame_alloc_saved_regs (next_frame); /* The frame pointer is used as the base for the frame. */ cache->base = frame_unwind_register_unsigned (next_frame, VAX_FP_REGNUM); if (cache->base == 0) return cache; /* The register save mask and control bits determine the layout of the stack frame. */ mask = get_frame_memory_unsigned (next_frame, cache->base + 4, 4) >> 16; /* These are always saved. */ cache->saved_regs[VAX_PC_REGNUM].addr = cache->base + 16; cache->saved_regs[VAX_FP_REGNUM].addr = cache->base + 12; cache->saved_regs[VAX_AP_REGNUM].addr = cache->base + 8; cache->saved_regs[VAX_PS_REGNUM].addr = cache->base + 4; /* Scan the register save mask and record the location of the saved registers. */ addr = cache->base + 20; for (regnum = 0; regnum < VAX_AP_REGNUM; regnum++) { if (mask & (1 << regnum)) { cache->saved_regs[regnum].addr = addr; addr += 4; } } /* The CALLS/CALLG flag determines whether this frame has a General Argument List or a Stack Argument List. */ if (mask & (1 << 13)) { ULONGEST numarg; /* This is a procedure with Stack Argument List. Adjust the stack address for the arguments that were pushed onto the stack. The return instruction will automatically pop the arguments from the stack. */ numarg = get_frame_memory_unsigned (next_frame, addr, 1); addr += 4 + numarg * 4; } /* Bits 1:0 of the stack pointer were saved in the control bits. */ trad_frame_set_value (cache->saved_regs, VAX_SP_REGNUM, addr + (mask >> 14)); return cache; } static void vax_frame_this_id (struct frame_info *next_frame, void **this_cache, struct frame_id *this_id) { struct vax_frame_cache *cache = vax_frame_cache (next_frame, this_cache); /* This marks the outermost frame. */ if (cache->base == 0) return; (*this_id) = frame_id_build (cache->base, frame_func_unwind (next_frame, NORMAL_FRAME)); } static void vax_frame_prev_register (struct frame_info *next_frame, void **this_cache, int regnum, int *optimizedp, enum lval_type *lvalp, CORE_ADDR *addrp, int *realnump, gdb_byte *valuep) { struct vax_frame_cache *cache = vax_frame_cache (next_frame, this_cache); trad_frame_get_prev_register (next_frame, cache->saved_regs, regnum, optimizedp, lvalp, addrp, realnump, valuep); } static const struct frame_unwind vax_frame_unwind = { NORMAL_FRAME, vax_frame_this_id, vax_frame_prev_register }; static const struct frame_unwind * vax_frame_sniffer (struct frame_info *next_frame) { return &vax_frame_unwind; } static CORE_ADDR vax_frame_base_address (struct frame_info *next_frame, void **this_cache) { struct vax_frame_cache *cache = vax_frame_cache (next_frame, this_cache); return cache->base; } static CORE_ADDR vax_frame_args_address (struct frame_info *next_frame, void **this_cache) { return frame_unwind_register_unsigned (next_frame, VAX_AP_REGNUM); } static const struct frame_base vax_frame_base = { &vax_frame_unwind, vax_frame_base_address, vax_frame_base_address, vax_frame_args_address }; /* Return number of arguments for FRAME. */ static int vax_frame_num_args (struct frame_info *frame) { CORE_ADDR args; /* Assume that the argument pointer for the outermost frame is hosed, as is the case on NetBSD/vax ELF. */ if (get_frame_base_address (frame) == 0) return 0; args = get_frame_register_unsigned (frame, VAX_AP_REGNUM); return get_frame_memory_unsigned (frame, args, 1); } static CORE_ADDR vax_unwind_pc (struct gdbarch *gdbarch, struct frame_info *next_frame) { return frame_unwind_register_unsigned (next_frame, VAX_PC_REGNUM); } /* Initialize the current architecture based on INFO. If possible, re-use an architecture from ARCHES, which is a list of architectures already created during this debugging session. Called e.g. at program startup, when reading a core file, and when reading a binary file. */ static struct gdbarch * vax_gdbarch_init (struct gdbarch_info info, struct gdbarch_list *arches) { struct gdbarch *gdbarch; /* If there is already a candidate, use it. */ arches = gdbarch_list_lookup_by_info (arches, &info); if (arches != NULL) return arches->gdbarch; gdbarch = gdbarch_alloc (&info, NULL); set_gdbarch_float_format (gdbarch, floatformats_vax_f); set_gdbarch_double_format (gdbarch, floatformats_vax_d); set_gdbarch_long_double_format (gdbarch, floatformats_vax_d); set_gdbarch_long_double_bit (gdbarch, 64); /* Register info */ set_gdbarch_num_regs (gdbarch, VAX_NUM_REGS); set_gdbarch_register_name (gdbarch, vax_register_name); set_gdbarch_register_type (gdbarch, vax_register_type); set_gdbarch_sp_regnum (gdbarch, VAX_SP_REGNUM); set_gdbarch_pc_regnum (gdbarch, VAX_PC_REGNUM); set_gdbarch_ps_regnum (gdbarch, VAX_PS_REGNUM); set_gdbarch_regset_from_core_section (gdbarch, vax_regset_from_core_section); /* Frame and stack info */ set_gdbarch_skip_prologue (gdbarch, vax_skip_prologue); set_gdbarch_frame_num_args (gdbarch, vax_frame_num_args); set_gdbarch_frame_args_skip (gdbarch, 4); /* Stack grows downward. */ set_gdbarch_inner_than (gdbarch, core_addr_lessthan); /* Return value info */ set_gdbarch_return_value (gdbarch, vax_return_value); /* Call dummy code. */ set_gdbarch_push_dummy_call (gdbarch, vax_push_dummy_call); set_gdbarch_unwind_dummy_id (gdbarch, vax_unwind_dummy_id); /* Breakpoint info */ set_gdbarch_breakpoint_from_pc (gdbarch, vax_breakpoint_from_pc); /* Misc info */ set_gdbarch_deprecated_function_start_offset (gdbarch, 2); set_gdbarch_believe_pcc_promotion (gdbarch, 1); set_gdbarch_print_insn (gdbarch, print_insn_vax); set_gdbarch_unwind_pc (gdbarch, vax_unwind_pc); frame_base_set_default (gdbarch, &vax_frame_base); /* Hook in ABI-specific overrides, if they have been registered. */ gdbarch_init_osabi (info, gdbarch); frame_unwind_append_sniffer (gdbarch, vax_frame_sniffer); return (gdbarch); } /* Provide a prototype to silence -Wmissing-prototypes. */ void _initialize_vax_tdep (void); void _initialize_vax_tdep (void) { gdbarch_register (bfd_arch_vax, vax_gdbarch_init, NULL); }
gpl-2.0
Doxito/Ri-Core_Privado
src/server/scripts/EasternKingdoms/Scholomance/boss_kormok.cpp
14
3634
/* * Copyright (C) 2008-2012 TrinityCore <http://www.trinitycore.org/> * Copyright (C) 2006-2009 ScriptDev2 <https://scriptdev2.svn.sourceforge.net/> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along * with this program. If not, see <http://www.gnu.org/licenses/>. */ /* ScriptData SDName: Boss_Kormok SD%Complete: 100 SDComment: SDCategory: Scholomance EndScriptData */ #include "ScriptPCH.h" #define SPELL_SHADOWBOLTVOLLEY 20741 #define SPELL_BONESHIELD 27688 class boss_kormok : public CreatureScript { public: boss_kormok() : CreatureScript("boss_kormok") { } CreatureAI* GetAI(Creature* creature) const { return new boss_kormokAI (creature); } struct boss_kormokAI : public ScriptedAI { boss_kormokAI(Creature* c) : ScriptedAI(c) {} uint32 ShadowVolley_Timer; uint32 BoneShield_Timer; uint32 Minion_Timer; uint32 Mage_Timer; bool Mages; void Reset() { ShadowVolley_Timer = 10000; BoneShield_Timer = 2000; Minion_Timer = 15000; Mage_Timer = 0; Mages = false; } void EnterCombat(Unit* /*who*/) { } void SummonMinions(Unit* victim) { if (Creature* SummonedMinion = DoSpawnCreature(16119, float(irand(-7, 7)), float(irand(-7, 7)), 0, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 120000)) SummonedMinion->AI()->AttackStart(victim); } void SummonMages(Unit* victim) { if (Creature* SummonedMage = DoSpawnCreature(16120, float(irand(-9, 9)), float(irand(-9, 9)), 0, 0, TEMPSUMMON_TIMED_OR_CORPSE_DESPAWN, 120000)) SummonedMage->AI()->AttackStart(victim); } void UpdateAI(const uint32 diff) { if (!UpdateVictim()) return; //ShadowVolley_Timer if (ShadowVolley_Timer <= diff) { DoCast(me->getVictim(), SPELL_SHADOWBOLTVOLLEY); ShadowVolley_Timer = 15000; } else ShadowVolley_Timer -= diff; //BoneShield_Timer if (BoneShield_Timer <= diff) { DoCast(me->getVictim(), SPELL_BONESHIELD); BoneShield_Timer = 45000; } else BoneShield_Timer -= diff; //Minion_Timer if (Minion_Timer <= diff) { //Cast SummonMinions(me->getVictim()); SummonMinions(me->getVictim()); SummonMinions(me->getVictim()); SummonMinions(me->getVictim()); Minion_Timer = 12000; } else Minion_Timer -= diff; //Summon 2 Bone Mages if (!Mages && HealthBelowPct(26)) { //Cast SummonMages(me->getVictim()); SummonMages(me->getVictim()); Mages = true; } DoMeleeAttackIfReady(); } }; }; void AddSC_boss_kormok() { new boss_kormok(); }
gpl-2.0
geekydoc/android_kernel_lge_l70pds_l70pn
arch/arm/mach-msm/lge/board-8610-w6ds-global-sca.c
14
4419
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_fdt.h> #include <linux/of_irq.h> #include <linux/memory.h> #include <linux/msm_tsens.h> #include <asm/mach/map.h> #include <asm/arch_timer.h> #include <asm/hardware/gic.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <mach/board.h> #include <mach/gpiomux.h> #include <mach/msm_iomap.h> #include <mach/restart.h> #ifdef CONFIG_ION_MSM #include <mach/ion.h> #endif #include <linux/regulator/qpnp-regulator.h> #include <mach/msm_memtypes.h> #include <mach/socinfo.h> #include <mach/board.h> #include <mach/clk-provider.h> #include <mach/msm_smd.h> #include <mach/rpm-smd.h> #include <mach/rpm-regulator-smd.h> #include <mach/msm_smem.h> #include <linux/msm_thermal.h> #include "../board-dt.h" #include "../clock.h" #include "../platsmp.h" #include "../spm.h" #include "../pm.h" #include "../modem_notifier.h" #include <mach/board_lge.h> static struct memtype_reserve msm8610_reserve_table[] __initdata = { [MEMTYPE_SMI] = { }, [MEMTYPE_EBI0] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, [MEMTYPE_EBI1] = { .flags = MEMTYPE_FLAGS_1M_ALIGN, }, }; static int msm8610_paddr_to_memtype(unsigned int paddr) { return MEMTYPE_EBI1; } static struct of_dev_auxdata msm8610_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \ "msm_sdcc.1", NULL), OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \ "msm_sdcc.2", NULL), OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9824900, \ "msm_sdcc.1", NULL), OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF98A4900, \ "msm_sdcc.2", NULL), {} }; static struct reserve_info msm8610_reserve_info __initdata = { .memtype_reserve_table = msm8610_reserve_table, .paddr_to_memtype = msm8610_paddr_to_memtype, }; static void __init msm8610_early_memory(void) { reserve_info = &msm8610_reserve_info; of_scan_flat_dt(dt_scan_for_memory_hole, msm8610_reserve_table); } static void __init msm8610_reserve(void) { reserve_info = &msm8610_reserve_info; of_scan_flat_dt(dt_scan_for_memory_reserve, msm8610_reserve_table); #ifdef CONFIG_MACH_LGE of_scan_flat_dt(lge_init_dt_scan_chosen, NULL); #endif msm_reserve(); #if defined(CONFIG_ANDROID_RAM_CONSOLE) lge_reserve(); #endif } void __init msm8610_add_drivers(void) { msm_smem_init(); msm_init_modem_notifier_list(); msm_smd_init(); msm_rpm_driver_init(); msm_spm_device_init(); msm_pm_sleep_status_init(); rpm_regulator_smd_driver_init(); qpnp_regulator_init(); tsens_tm_init_driver(); /* */ msm_thermal_device_init(); if (of_board_is_rumi()) msm_clock_init(&msm8610_rumi_clock_init_data); else msm_clock_init(&msm8610_clock_init_data); #if defined(CONFIG_ANDROID_RAM_CONSOLE) lge_add_persistent_device(); #endif #ifdef CONFIG_USB_G_LGE_ANDROID lge_android_usb_init(); #endif #ifdef CONFIG_LGE_ENABLE_MMC_STRENGTH_CONTROL lge_add_mmc_strength_devices(); #endif } void __init msm8610_init(void) { struct of_dev_auxdata *adata = msm8610_auxdata_lookup; if (socinfo_init() < 0) pr_err("%s: socinfo_init() failed\n", __func__); msm8610_init_gpiomux(); board_dt_populate(adata); msm8610_add_drivers(); } static const char *msm8610_dt_match[] __initconst = { "qcom,msm8610", NULL }; DT_MACHINE_START(MSM8610_DT, "Qualcomm MSM 8610 (Flattened Device Tree)") .map_io = msm_map_msm8610_io, .init_irq = msm_dt_init_irq, .init_machine = msm8610_init, .handle_irq = gic_handle_irq, .timer = &msm_dt_timer, .dt_compat = msm8610_dt_match, .restart = msm_restart, .reserve = msm8610_reserve, .init_very_early = msm8610_early_memory, .smp = &arm_smp_ops, MACHINE_END
gpl-2.0
davidgraeff/linux
drivers/media/platform/s5p-mfc/s5p_mfc_opr_v5.c
526
54639
/* * drivers/media/platform/samsung/mfc5/s5p_mfc_opr_v5.c * * Samsung MFC (Multi Function Codec - FIMV) driver * This file contains hw related functions. * * Kamil Debski, Copyright (c) 2011 Samsung Electronics * http://www.samsung.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include "s5p_mfc_common.h" #include "s5p_mfc_cmd.h" #include "s5p_mfc_ctrl.h" #include "s5p_mfc_debug.h" #include "s5p_mfc_intr.h" #include "s5p_mfc_pm.h" #include "s5p_mfc_opr.h" #include "s5p_mfc_opr_v5.h" #include <asm/cacheflush.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/firmware.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #define OFFSETA(x) (((x) - dev->bank1) >> MFC_OFFSET_SHIFT) #define OFFSETB(x) (((x) - dev->bank2) >> MFC_OFFSET_SHIFT) /* Allocate temporary buffers for decoding */ static int s5p_mfc_alloc_dec_temp_buffers_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv; int ret; ctx->dsc.size = buf_size->dsc; ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->dsc); if (ret) { mfc_err("Failed to allocate temporary buffer\n"); return ret; } BUG_ON(ctx->dsc.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); memset(ctx->dsc.virt, 0, ctx->dsc.size); wmb(); return 0; } /* Release temporary buffers for decoding */ static void s5p_mfc_release_dec_desc_buffer_v5(struct s5p_mfc_ctx *ctx) { s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->dsc); } /* Allocate codec buffers */ static int s5p_mfc_alloc_codec_buffers_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int enc_ref_y_size = 0; unsigned int enc_ref_c_size = 0; unsigned int guard_width, guard_height; int ret; if (ctx->type == MFCINST_DECODER) { mfc_debug(2, "Luma size:%d Chroma size:%d MV size:%d\n", ctx->luma_size, ctx->chroma_size, ctx->mv_size); mfc_debug(2, "Totals bufs: %d\n", ctx->total_dpb_count); } else if (ctx->type == MFCINST_ENCODER) { enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN); if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) { enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height >> 1, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN); } else { guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(guard_width * guard_height, S5P_FIMV_NV12MT_SALIGN); } mfc_debug(2, "recon luma size: %d chroma size: %d\n", enc_ref_y_size, enc_ref_c_size); } else { return -EINVAL; } /* Codecs have different memory requirements */ switch (ctx->codec_mode) { case S5P_MFC_CODEC_H264_DEC: ctx->bank1.size = ALIGN(S5P_FIMV_DEC_NB_IP_SIZE + S5P_FIMV_DEC_VERT_NB_MV_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2.size = ctx->total_dpb_count * ctx->mv_size; break; case S5P_MFC_CODEC_MPEG4_DEC: ctx->bank1.size = ALIGN(S5P_FIMV_DEC_NB_DCAC_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_STX_PARSER_SIZE + S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2.size = 0; break; case S5P_MFC_CODEC_VC1RCV_DEC: case S5P_MFC_CODEC_VC1_DEC: ctx->bank1.size = ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_NB_DCAC_SIZE + 3 * S5P_FIMV_DEC_VC1_BITPLANE_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2.size = 0; break; case S5P_MFC_CODEC_MPEG2_DEC: ctx->bank1.size = 0; ctx->bank2.size = 0; break; case S5P_MFC_CODEC_H263_DEC: ctx->bank1.size = ALIGN(S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE + S5P_FIMV_DEC_UPNB_MV_SIZE + S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE + S5P_FIMV_DEC_NB_DCAC_SIZE, S5P_FIMV_DEC_BUF_ALIGN); ctx->bank2.size = 0; break; case S5P_MFC_CODEC_H264_ENC: ctx->bank1.size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_COLFLG_SIZE + S5P_FIMV_ENC_INTRAMD_SIZE + S5P_FIMV_ENC_NBORINFO_SIZE; ctx->bank2.size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4) + S5P_FIMV_ENC_INTRAPRED_SIZE; break; case S5P_MFC_CODEC_MPEG4_ENC: ctx->bank1.size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_COLFLG_SIZE + S5P_FIMV_ENC_ACDCCOEF_SIZE; ctx->bank2.size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4); break; case S5P_MFC_CODEC_H263_ENC: ctx->bank1.size = (enc_ref_y_size * 2) + S5P_FIMV_ENC_UPMV_SIZE + S5P_FIMV_ENC_ACDCCOEF_SIZE; ctx->bank2.size = (enc_ref_y_size * 2) + (enc_ref_c_size * 4); break; default: break; } /* Allocate only if memory from bank 1 is necessary */ if (ctx->bank1.size > 0) { ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->bank1); if (ret) { mfc_err("Failed to allocate Bank1 temporary buffer\n"); return ret; } BUG_ON(ctx->bank1.dma & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); } /* Allocate only if memory from bank 2 is necessary */ if (ctx->bank2.size > 0) { ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_r, &ctx->bank2); if (ret) { mfc_err("Failed to allocate Bank2 temporary buffer\n"); s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); return ret; } BUG_ON(ctx->bank2.dma & ((1 << MFC_BANK2_ALIGN_ORDER) - 1)); } return 0; } /* Release buffers allocated for codec */ static void s5p_mfc_release_codec_buffers_v5(struct s5p_mfc_ctx *ctx) { s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->bank1); s5p_mfc_release_priv_buf(ctx->dev->mem_dev_r, &ctx->bank2); } /* Allocate memory for instance data buffer */ static int s5p_mfc_alloc_instance_buffer_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv; int ret; if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC || ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) ctx->ctx.size = buf_size->h264_ctx; else ctx->ctx.size = buf_size->non_h264_ctx; ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->ctx); if (ret) { mfc_err("Failed to allocate instance buffer\n"); return ret; } ctx->ctx.ofs = OFFSETA(ctx->ctx.dma); /* Zero content of the allocated memory */ memset(ctx->ctx.virt, 0, ctx->ctx.size); wmb(); /* Initialize shared memory */ ctx->shm.size = buf_size->shm; ret = s5p_mfc_alloc_priv_buf(dev->mem_dev_l, &ctx->shm); if (ret) { mfc_err("Failed to allocate shared memory buffer\n"); return ret; } /* shared memory offset only keeps the offset from base (port a) */ ctx->shm.ofs = ctx->shm.dma - dev->bank1; BUG_ON(ctx->shm.ofs & ((1 << MFC_BANK1_ALIGN_ORDER) - 1)); memset(ctx->shm.virt, 0, buf_size->shm); wmb(); return 0; } /* Release instance buffer */ static void s5p_mfc_release_instance_buffer_v5(struct s5p_mfc_ctx *ctx) { s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->ctx); s5p_mfc_release_priv_buf(ctx->dev->mem_dev_l, &ctx->shm); } static int s5p_mfc_alloc_dev_context_buffer_v5(struct s5p_mfc_dev *dev) { /* NOP */ return 0; } static void s5p_mfc_release_dev_context_buffer_v5(struct s5p_mfc_dev *dev) { /* NOP */ } static void s5p_mfc_write_info_v5(struct s5p_mfc_ctx *ctx, unsigned int data, unsigned int ofs) { writel(data, (ctx->shm.virt + ofs)); wmb(); } static unsigned int s5p_mfc_read_info_v5(struct s5p_mfc_ctx *ctx, unsigned int ofs) { rmb(); return readl(ctx->shm.virt + ofs); } static void s5p_mfc_dec_calc_dpb_size_v5(struct s5p_mfc_ctx *ctx) { unsigned int guard_width, guard_height; ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN); ctx->buf_height = ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); mfc_debug(2, "SEQ Done: Movie dimensions %dx%d, buffer dimensions: %dx%d\n", ctx->img_width, ctx->img_height, ctx->buf_width, ctx->buf_height); if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) { ctx->luma_size = ALIGN(ctx->buf_width * ctx->buf_height, S5P_FIMV_DEC_BUF_ALIGN); ctx->chroma_size = ALIGN(ctx->buf_width * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN), S5P_FIMV_DEC_BUF_ALIGN); ctx->mv_size = ALIGN(ctx->buf_width * ALIGN((ctx->buf_height >> 2), S5P_FIMV_NV12MT_VALIGN), S5P_FIMV_DEC_BUF_ALIGN); } else { guard_width = ALIGN(ctx->img_width + 24, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN(ctx->img_height + 16, S5P_FIMV_NV12MT_VALIGN); ctx->luma_size = ALIGN(guard_width * guard_height, S5P_FIMV_DEC_BUF_ALIGN); guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN); ctx->chroma_size = ALIGN(guard_width * guard_height, S5P_FIMV_DEC_BUF_ALIGN); ctx->mv_size = 0; } } static void s5p_mfc_enc_calc_src_size_v5(struct s5p_mfc_ctx *ctx) { if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) { ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN); ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12M_LVALIGN); ctx->chroma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12M_HALIGN) * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12M_CVALIGN); ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12M_SALIGN); ctx->chroma_size = ALIGN(ctx->chroma_size, S5P_FIMV_NV12M_SALIGN); } else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) { ctx->buf_width = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN); ctx->luma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); ctx->chroma_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN); ctx->luma_size = ALIGN(ctx->luma_size, S5P_FIMV_NV12MT_SALIGN); ctx->chroma_size = ALIGN(ctx->chroma_size, S5P_FIMV_NV12MT_SALIGN); } } /* Set registers for decoding temporary buffers */ static void s5p_mfc_set_dec_desc_buffer(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf_size_v5 *buf_size = dev->variant->buf_size->priv; mfc_write(dev, OFFSETA(ctx->dsc.dma), S5P_FIMV_SI_CH0_DESC_ADR); mfc_write(dev, buf_size->dsc, S5P_FIMV_SI_CH0_DESC_SIZE); } /* Set registers for shared buffer */ static void s5p_mfc_set_shared_buffer(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, ctx->shm.ofs, S5P_FIMV_SI_CH0_HOST_WR_ADR); } /* Set registers for decoding stream buffer */ static int s5p_mfc_set_dec_stream_buffer_v5(struct s5p_mfc_ctx *ctx, int buf_addr, unsigned int start_num_byte, unsigned int buf_size) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETA(buf_addr), S5P_FIMV_SI_CH0_SB_ST_ADR); mfc_write(dev, ctx->dec_src_buf_size, S5P_FIMV_SI_CH0_CPB_SIZE); mfc_write(dev, buf_size, S5P_FIMV_SI_CH0_SB_FRM_SIZE); s5p_mfc_write_info_v5(ctx, start_num_byte, START_BYTE_NUM); return 0; } /* Set decoding frame buffer */ static int s5p_mfc_set_dec_frame_buffer_v5(struct s5p_mfc_ctx *ctx) { unsigned int frame_size, i; unsigned int frame_size_ch, frame_size_mv; struct s5p_mfc_dev *dev = ctx->dev; unsigned int dpb; size_t buf_addr1, buf_addr2; int buf_size1, buf_size2; buf_addr1 = ctx->bank1.dma; buf_size1 = ctx->bank1.size; buf_addr2 = ctx->bank2.dma; buf_size2 = ctx->bank2.size; dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~S5P_FIMV_DPB_COUNT_MASK; mfc_write(dev, ctx->total_dpb_count | dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL); s5p_mfc_set_shared_buffer(ctx); switch (ctx->codec_mode) { case S5P_MFC_CODEC_H264_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_VERT_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_VERT_NB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_VERT_NB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NB_IP_ADR); buf_addr1 += S5P_FIMV_DEC_NB_IP_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_IP_SIZE; break; case S5P_MFC_CODEC_MPEG4_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_SP_ADR); buf_addr1 += S5P_FIMV_DEC_STX_PARSER_SIZE; buf_size1 -= S5P_FIMV_DEC_STX_PARSER_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; break; case S5P_MFC_CODEC_H263_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; break; case S5P_MFC_CODEC_VC1_DEC: case S5P_MFC_CODEC_VC1RCV_DEC: mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_NB_DCAC_ADR); buf_addr1 += S5P_FIMV_DEC_NB_DCAC_SIZE; buf_size1 -= S5P_FIMV_DEC_NB_DCAC_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_OT_LINE_ADR); buf_addr1 += S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; buf_size1 -= S5P_FIMV_DEC_OVERLAP_TRANSFORM_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_UP_NB_MV_ADR); buf_addr1 += S5P_FIMV_DEC_UPNB_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_UPNB_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_SA_MV_ADR); buf_addr1 += S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; buf_size1 -= S5P_FIMV_DEC_SUB_ANCHOR_MV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE3_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE2_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_VC1_BITPLANE1_ADR); buf_addr1 += S5P_FIMV_DEC_VC1_BITPLANE_SIZE; buf_size1 -= S5P_FIMV_DEC_VC1_BITPLANE_SIZE; break; case S5P_MFC_CODEC_MPEG2_DEC: break; default: mfc_err("Unknown codec for decoding (%x)\n", ctx->codec_mode); return -EINVAL; } frame_size = ctx->luma_size; frame_size_ch = ctx->chroma_size; frame_size_mv = ctx->mv_size; mfc_debug(2, "Frm size: %d ch: %d mv: %d\n", frame_size, frame_size_ch, frame_size_mv); for (i = 0; i < ctx->total_dpb_count; i++) { /* Bank2 */ mfc_debug(2, "Luma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.luma); mfc_write(dev, OFFSETB(ctx->dst_bufs[i].cookie.raw.luma), S5P_FIMV_DEC_LUMA_ADR + i * 4); mfc_debug(2, "\tChroma %d: %x\n", i, ctx->dst_bufs[i].cookie.raw.chroma); mfc_write(dev, OFFSETA(ctx->dst_bufs[i].cookie.raw.chroma), S5P_FIMV_DEC_CHROMA_ADR + i * 4); if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) { mfc_debug(2, "\tBuf2: %x, size: %d\n", buf_addr2, buf_size2); mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_H264_MV_ADR + i * 4); buf_addr2 += frame_size_mv; buf_size2 -= frame_size_mv; } } mfc_debug(2, "Buf1: %u, buf_size1: %d\n", buf_addr1, buf_size1); mfc_debug(2, "Buf 1/2 size after: %d/%d (frames %d)\n", buf_size1, buf_size2, ctx->total_dpb_count); if (buf_size1 < 0 || buf_size2 < 0) { mfc_debug(2, "Not enough memory has been allocated\n"); return -ENOMEM; } s5p_mfc_write_info_v5(ctx, frame_size, ALLOC_LUMA_DPB_SIZE); s5p_mfc_write_info_v5(ctx, frame_size_ch, ALLOC_CHROMA_DPB_SIZE); if (ctx->codec_mode == S5P_MFC_CODEC_H264_DEC) s5p_mfc_write_info_v5(ctx, frame_size_mv, ALLOC_MV_SIZE); mfc_write(dev, ((S5P_FIMV_CH_INIT_BUFS & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } /* Set registers for encoding stream buffer */ static int s5p_mfc_set_enc_stream_buffer_v5(struct s5p_mfc_ctx *ctx, unsigned long addr, unsigned int size) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETA(addr), S5P_FIMV_ENC_SI_CH0_SB_ADR); mfc_write(dev, size, S5P_FIMV_ENC_SI_CH0_SB_SIZE); return 0; } static void s5p_mfc_set_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx, unsigned long y_addr, unsigned long c_addr) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, OFFSETB(y_addr), S5P_FIMV_ENC_SI_CH0_CUR_Y_ADR); mfc_write(dev, OFFSETB(c_addr), S5P_FIMV_ENC_SI_CH0_CUR_C_ADR); } static void s5p_mfc_get_enc_frame_buffer_v5(struct s5p_mfc_ctx *ctx, unsigned long *y_addr, unsigned long *c_addr) { struct s5p_mfc_dev *dev = ctx->dev; *y_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_Y_ADDR) << MFC_OFFSET_SHIFT); *c_addr = dev->bank2 + (mfc_read(dev, S5P_FIMV_ENCODED_C_ADDR) << MFC_OFFSET_SHIFT); } /* Set encoding ref & codec buffer */ static int s5p_mfc_set_enc_ref_buffer_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; size_t buf_addr1, buf_addr2; size_t buf_size1, buf_size2; unsigned int enc_ref_y_size, enc_ref_c_size; unsigned int guard_width, guard_height; int i; buf_addr1 = ctx->bank1.dma; buf_size1 = ctx->bank1.size; buf_addr2 = ctx->bank2.dma; buf_size2 = ctx->bank2.size; enc_ref_y_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN(ctx->img_height, S5P_FIMV_NV12MT_VALIGN); enc_ref_y_size = ALIGN(enc_ref_y_size, S5P_FIMV_NV12MT_SALIGN); if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) { enc_ref_c_size = ALIGN(ctx->img_width, S5P_FIMV_NV12MT_HALIGN) * ALIGN((ctx->img_height >> 1), S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(enc_ref_c_size, S5P_FIMV_NV12MT_SALIGN); } else { guard_width = ALIGN(ctx->img_width + 16, S5P_FIMV_NV12MT_HALIGN); guard_height = ALIGN((ctx->img_height >> 1) + 4, S5P_FIMV_NV12MT_VALIGN); enc_ref_c_size = ALIGN(guard_width * guard_height, S5P_FIMV_NV12MT_SALIGN); } mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); switch (ctx->codec_mode) { case S5P_MFC_CODEC_H264_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_COZERO_FLAG_ADR); buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE; buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_UP_INTRA_MD_ADR); buf_addr1 += S5P_FIMV_ENC_INTRAMD_SIZE; buf_size1 -= S5P_FIMV_ENC_INTRAMD_SIZE; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_H264_UP_INTRA_PRED_ADR); buf_addr2 += S5P_FIMV_ENC_INTRAPRED_SIZE; buf_size2 -= S5P_FIMV_ENC_INTRAPRED_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H264_NBOR_INFO_ADR); buf_addr1 += S5P_FIMV_ENC_NBORINFO_SIZE; buf_size1 -= S5P_FIMV_ENC_NBORINFO_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; case S5P_MFC_CODEC_MPEG4_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_COZERO_FLAG_ADR); buf_addr1 += S5P_FIMV_ENC_COLFLG_SIZE; buf_size1 -= S5P_FIMV_ENC_COLFLG_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_MPEG4_ACDC_COEF_ADR); buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE; buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; case S5P_MFC_CODEC_H263_ENC: for (i = 0; i < 2; i++) { mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_ENC_REF0_LUMA_ADR + (4 * i)); buf_addr1 += enc_ref_y_size; buf_size1 -= enc_ref_y_size; mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF2_LUMA_ADR + (4 * i)); buf_addr2 += enc_ref_y_size; buf_size2 -= enc_ref_y_size; } for (i = 0; i < 4; i++) { mfc_write(dev, OFFSETB(buf_addr2), S5P_FIMV_ENC_REF0_CHROMA_ADR + (4 * i)); buf_addr2 += enc_ref_c_size; buf_size2 -= enc_ref_c_size; } mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_UP_MV_ADR); buf_addr1 += S5P_FIMV_ENC_UPMV_SIZE; buf_size1 -= S5P_FIMV_ENC_UPMV_SIZE; mfc_write(dev, OFFSETA(buf_addr1), S5P_FIMV_H263_ACDC_COEF_ADR); buf_addr1 += S5P_FIMV_ENC_ACDCCOEF_SIZE; buf_size1 -= S5P_FIMV_ENC_ACDCCOEF_SIZE; mfc_debug(2, "buf_size1: %d, buf_size2: %d\n", buf_size1, buf_size2); break; default: mfc_err("Unknown codec set for encoding: %d\n", ctx->codec_mode); return -EINVAL; } return 0; } static int s5p_mfc_set_enc_params(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; unsigned int reg; unsigned int shm; /* width */ mfc_write(dev, ctx->img_width, S5P_FIMV_ENC_HSIZE_PX); /* height */ mfc_write(dev, ctx->img_height, S5P_FIMV_ENC_VSIZE_PX); /* pictype : enable, IDR period */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); reg |= (1 << 18); reg &= ~(0xFFFF); reg |= p->gop_size; mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); mfc_write(dev, 0, S5P_FIMV_ENC_B_RECON_WRITE_ON); /* multi-slice control */ /* multi-slice MB number or bit size */ mfc_write(dev, p->slice_mode, S5P_FIMV_ENC_MSLICE_CTRL); if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_MB) { mfc_write(dev, p->slice_mb, S5P_FIMV_ENC_MSLICE_MB); } else if (p->slice_mode == V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES) { mfc_write(dev, p->slice_bit, S5P_FIMV_ENC_MSLICE_BIT); } else { mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_MB); mfc_write(dev, 0, S5P_FIMV_ENC_MSLICE_BIT); } /* cyclic intra refresh */ mfc_write(dev, p->intra_refresh_mb, S5P_FIMV_ENC_CIR_CTRL); /* memory structure cur. frame */ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR); else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR); /* padding control & value */ reg = mfc_read(dev, S5P_FIMV_ENC_PADDING_CTRL); if (p->pad) { /** enable */ reg |= (1 << 31); /** cr value */ reg &= ~(0xFF << 16); reg |= (p->pad_cr << 16); /** cb value */ reg &= ~(0xFF << 8); reg |= (p->pad_cb << 8); /** y value */ reg &= ~(0xFF); reg |= (p->pad_luma); } else { /** disable & all value clear */ reg = 0; } mfc_write(dev, reg, S5P_FIMV_ENC_PADDING_CTRL); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /** frame-level rate control */ reg &= ~(0x1 << 9); reg |= (p->rc_frame << 9); mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* bit rate */ if (p->rc_frame) mfc_write(dev, p->rc_bitrate, S5P_FIMV_ENC_RC_BIT_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_BIT_RATE); /* reaction coefficient */ if (p->rc_frame) mfc_write(dev, p->rc_reaction_coeff, S5P_FIMV_ENC_RC_RPARA); shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); /* seq header ctrl */ shm &= ~(0x1 << 3); shm |= (p->seq_hdr_mode << 3); /* frame skip mode */ shm &= ~(0x3 << 1); shm |= (p->frame_skip_mode << 1); s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); /* fixed target bit */ s5p_mfc_write_info_v5(ctx, p->fixed_target_bit, RC_CONTROL_CONFIG); return 0; } static int s5p_mfc_set_enc_params_h264(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_h264_enc_params *p_264 = &p->codec.h264; unsigned int reg; unsigned int shm; s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* num_b_frame - 0 ~ 2 */ reg &= ~(0x3 << 16); reg |= (p->num_b_frame << 16); mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* profile & level */ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE); /* level */ reg &= ~(0xFF << 8); reg |= (p_264->level << 8); /* profile - 0 ~ 2 */ reg &= ~(0x3F); reg |= p_264->profile; mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE); /* interlace */ mfc_write(dev, p_264->interlace, S5P_FIMV_ENC_PIC_STRUCT); /* height */ if (p_264->interlace) mfc_write(dev, ctx->img_height >> 1, S5P_FIMV_ENC_VSIZE_PX); /* loopfilter ctrl */ mfc_write(dev, p_264->loop_filter_mode, S5P_FIMV_ENC_LF_CTRL); /* loopfilter alpha offset */ if (p_264->loop_filter_alpha < 0) { reg = 0x10; reg |= (0xFF - p_264->loop_filter_alpha) + 1; } else { reg = 0x00; reg |= (p_264->loop_filter_alpha & 0xF); } mfc_write(dev, reg, S5P_FIMV_ENC_ALPHA_OFF); /* loopfilter beta offset */ if (p_264->loop_filter_beta < 0) { reg = 0x10; reg |= (0xFF - p_264->loop_filter_beta) + 1; } else { reg = 0x00; reg |= (p_264->loop_filter_beta & 0xF); } mfc_write(dev, reg, S5P_FIMV_ENC_BETA_OFF); /* entropy coding mode */ if (p_264->entropy_mode == V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC) mfc_write(dev, 1, S5P_FIMV_ENC_H264_ENTROPY_MODE); else mfc_write(dev, 0, S5P_FIMV_ENC_H264_ENTROPY_MODE); /* number of ref. picture */ reg = mfc_read(dev, S5P_FIMV_ENC_H264_NUM_OF_REF); /* num of ref. pictures of P */ reg &= ~(0x3 << 5); reg |= (p_264->num_ref_pic_4p << 5); /* max number of ref. pictures */ reg &= ~(0x1F); reg |= p_264->max_ref_pic; mfc_write(dev, reg, S5P_FIMV_ENC_H264_NUM_OF_REF); /* 8x8 transform enable */ mfc_write(dev, p_264->_8x8_transform, S5P_FIMV_ENC_H264_TRANS_FLAG); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* macroblock level rate control */ reg &= ~(0x1 << 8); reg |= (p->rc_mb << 8); /* frame QP */ reg &= ~(0x3F); reg |= p_264->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* frame rate */ if (p->rc_frame && p->rc_framerate_denom) mfc_write(dev, p->rc_framerate_num * 1000 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_264->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_264->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* macroblock adaptive scaling features */ if (p->rc_mb) { reg = mfc_read(dev, S5P_FIMV_ENC_RC_MB_CTRL); /* dark region */ reg &= ~(0x1 << 3); reg |= (p_264->rc_mb_dark << 3); /* smooth region */ reg &= ~(0x1 << 2); reg |= (p_264->rc_mb_smooth << 2); /* static region */ reg &= ~(0x1 << 1); reg |= (p_264->rc_mb_static << 1); /* high activity region */ reg &= ~(0x1); reg |= p_264->rc_mb_activity; mfc_write(dev, reg, S5P_FIMV_ENC_RC_MB_CTRL); } if (!p->rc_frame && !p->rc_mb) { shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= ((p_264->rc_b_frame_qp & 0x3F) << 6); shm |= (p_264->rc_p_frame_qp & 0x3F); s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); } /* extended encoder ctrl */ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); /* AR VUI control */ shm &= ~(0x1 << 15); shm |= (p_264->vui_sar << 1); s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); if (p_264->vui_sar) { /* aspect ration IDC */ shm = s5p_mfc_read_info_v5(ctx, SAMPLE_ASPECT_RATIO_IDC); shm &= ~(0xFF); shm |= p_264->vui_sar_idc; s5p_mfc_write_info_v5(ctx, shm, SAMPLE_ASPECT_RATIO_IDC); if (p_264->vui_sar_idc == 0xFF) { /* sample AR info */ shm = s5p_mfc_read_info_v5(ctx, EXTENDED_SAR); shm &= ~(0xFFFFFFFF); shm |= p_264->vui_ext_sar_width << 16; shm |= p_264->vui_ext_sar_height; s5p_mfc_write_info_v5(ctx, shm, EXTENDED_SAR); } } /* intra picture period for H.264 */ shm = s5p_mfc_read_info_v5(ctx, H264_I_PERIOD); /* control */ shm &= ~(0x1 << 16); shm |= (p_264->open_gop << 16); /* value */ if (p_264->open_gop) { shm &= ~(0xFFFF); shm |= p_264->open_gop_size; } s5p_mfc_write_info_v5(ctx, shm, H264_I_PERIOD); /* extended encoder ctrl */ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p_264->cpb_size << 16); } s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); return 0; } static int s5p_mfc_set_enc_params_mpeg4(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_mpeg4 = &p->codec.mpeg4; unsigned int reg; unsigned int shm; unsigned int framerate; s5p_mfc_set_enc_params(ctx); /* pictype : number of B */ reg = mfc_read(dev, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* num_b_frame - 0 ~ 2 */ reg &= ~(0x3 << 16); reg |= (p->num_b_frame << 16); mfc_write(dev, reg, S5P_FIMV_ENC_PIC_TYPE_CTRL); /* profile & level */ reg = mfc_read(dev, S5P_FIMV_ENC_PROFILE); /* level */ reg &= ~(0xFF << 8); reg |= (p_mpeg4->level << 8); /* profile - 0 ~ 2 */ reg &= ~(0x3F); reg |= p_mpeg4->profile; mfc_write(dev, reg, S5P_FIMV_ENC_PROFILE); /* quarter_pixel */ mfc_write(dev, p_mpeg4->quarter_pixel, S5P_FIMV_ENC_MPEG4_QUART_PXL); /* qp */ if (!p->rc_frame) { shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= ((p_mpeg4->rc_b_frame_qp & 0x3F) << 6); shm |= (p_mpeg4->rc_p_frame_qp & 0x3F); s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); } /* frame rate */ if (p->rc_frame) { if (p->rc_framerate_denom > 0) { framerate = p->rc_framerate_num * 1000 / p->rc_framerate_denom; mfc_write(dev, framerate, S5P_FIMV_ENC_RC_FRAME_RATE); shm = s5p_mfc_read_info_v5(ctx, RC_VOP_TIMING); shm &= ~(0xFFFFFFFF); shm |= (1 << 31); shm |= ((p->rc_framerate_num & 0x7FFF) << 16); shm |= (p->rc_framerate_denom & 0xFFFF); s5p_mfc_write_info_v5(ctx, shm, RC_VOP_TIMING); } } else { mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); } /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* frame QP */ reg &= ~(0x3F); reg |= p_mpeg4->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_mpeg4->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_mpeg4->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* extended encoder ctrl */ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p->vbv_size << 16); } s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); return 0; } static int s5p_mfc_set_enc_params_h263(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_enc_params *p = &ctx->enc_params; struct s5p_mfc_mpeg4_enc_params *p_h263 = &p->codec.mpeg4; unsigned int reg; unsigned int shm; s5p_mfc_set_enc_params(ctx); /* qp */ if (!p->rc_frame) { shm = s5p_mfc_read_info_v5(ctx, P_B_FRAME_QP); shm &= ~(0xFFF); shm |= (p_h263->rc_p_frame_qp & 0x3F); s5p_mfc_write_info_v5(ctx, shm, P_B_FRAME_QP); } /* frame rate */ if (p->rc_frame && p->rc_framerate_denom) mfc_write(dev, p->rc_framerate_num * 1000 / p->rc_framerate_denom, S5P_FIMV_ENC_RC_FRAME_RATE); else mfc_write(dev, 0, S5P_FIMV_ENC_RC_FRAME_RATE); /* rate control config. */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_CONFIG); /* frame QP */ reg &= ~(0x3F); reg |= p_h263->rc_frame_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_CONFIG); /* max & min value of QP */ reg = mfc_read(dev, S5P_FIMV_ENC_RC_QBOUND); /* max QP */ reg &= ~(0x3F << 8); reg |= (p_h263->rc_max_qp << 8); /* min QP */ reg &= ~(0x3F); reg |= p_h263->rc_min_qp; mfc_write(dev, reg, S5P_FIMV_ENC_RC_QBOUND); /* extended encoder ctrl */ shm = s5p_mfc_read_info_v5(ctx, EXT_ENC_CONTROL); /* vbv buffer size */ if (p->frame_skip_mode == V4L2_MPEG_MFC51_VIDEO_FRAME_SKIP_MODE_BUF_LIMIT) { shm &= ~(0xFFFF << 16); shm |= (p->vbv_size << 16); } s5p_mfc_write_info_v5(ctx, shm, EXT_ENC_CONTROL); return 0; } /* Initialize decoding */ static int s5p_mfc_init_decode_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_shared_buffer(ctx); /* Setup loop filter, for decoding this is only valid for MPEG4 */ if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_DEC) mfc_write(dev, ctx->loop_filter_mpeg4, S5P_FIMV_ENC_LF_CTRL); else mfc_write(dev, 0, S5P_FIMV_ENC_LF_CTRL); mfc_write(dev, ((ctx->slice_interface & S5P_FIMV_SLICE_INT_MASK) << S5P_FIMV_SLICE_INT_SHIFT) | (ctx->display_delay_enable << S5P_FIMV_DDELAY_ENA_SHIFT) | ((ctx->display_delay & S5P_FIMV_DDELAY_VAL_MASK) << S5P_FIMV_DDELAY_VAL_SHIFT), S5P_FIMV_SI_CH0_DPB_CONF_CTRL); mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } static void s5p_mfc_set_flush(struct s5p_mfc_ctx *ctx, int flush) { struct s5p_mfc_dev *dev = ctx->dev; unsigned int dpb; if (flush) dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) | ( S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT); else dpb = mfc_read(dev, S5P_FIMV_SI_CH0_DPB_CONF_CTRL) & ~(S5P_FIMV_DPB_FLUSH_MASK << S5P_FIMV_DPB_FLUSH_SHIFT); mfc_write(dev, dpb, S5P_FIMV_SI_CH0_DPB_CONF_CTRL); } /* Decode a single frame */ static int s5p_mfc_decode_one_frame_v5(struct s5p_mfc_ctx *ctx, enum s5p_mfc_decode_arg last_frame) { struct s5p_mfc_dev *dev = ctx->dev; mfc_write(dev, ctx->dec_dst_flag, S5P_FIMV_SI_CH0_RELEASE_BUF); s5p_mfc_set_shared_buffer(ctx); s5p_mfc_set_flush(ctx, ctx->dpb_flush_flag); /* Issue different commands to instance basing on whether it * is the last frame or not. */ switch (last_frame) { case MFC_DEC_FRAME: mfc_write(dev, ((S5P_FIMV_CH_FRAME_START & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; case MFC_DEC_LAST_FRAME: mfc_write(dev, ((S5P_FIMV_CH_LAST_FRAME & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; case MFC_DEC_RES_CHANGE: mfc_write(dev, ((S5P_FIMV_CH_FRAME_START_REALLOC & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); break; } mfc_debug(2, "Decoding a usual frame\n"); return 0; } static int s5p_mfc_init_encode_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; if (ctx->codec_mode == S5P_MFC_CODEC_H264_ENC) s5p_mfc_set_enc_params_h264(ctx); else if (ctx->codec_mode == S5P_MFC_CODEC_MPEG4_ENC) s5p_mfc_set_enc_params_mpeg4(ctx); else if (ctx->codec_mode == S5P_MFC_CODEC_H263_ENC) s5p_mfc_set_enc_params_h263(ctx); else { mfc_err("Unknown codec for encoding (%x)\n", ctx->codec_mode); return -EINVAL; } s5p_mfc_set_shared_buffer(ctx); mfc_write(dev, ((S5P_FIMV_CH_SEQ_HEADER << 16) & 0x70000) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } /* Encode a single frame */ static int s5p_mfc_encode_one_frame_v5(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; int cmd; /* memory structure cur. frame */ if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12M) mfc_write(dev, 0, S5P_FIMV_ENC_MAP_FOR_CUR); else if (ctx->src_fmt->fourcc == V4L2_PIX_FMT_NV12MT) mfc_write(dev, 3, S5P_FIMV_ENC_MAP_FOR_CUR); s5p_mfc_set_shared_buffer(ctx); if (ctx->state == MFCINST_FINISHING) cmd = S5P_FIMV_CH_LAST_FRAME; else cmd = S5P_FIMV_CH_FRAME_START; mfc_write(dev, ((cmd & S5P_FIMV_CH_MASK) << S5P_FIMV_CH_SHIFT) | (ctx->inst_no), S5P_FIMV_SI_CH0_INST_ID); return 0; } static int s5p_mfc_get_new_ctx(struct s5p_mfc_dev *dev) { unsigned long flags; int new_ctx; int cnt; spin_lock_irqsave(&dev->condlock, flags); new_ctx = (dev->curr_ctx + 1) % MFC_NUM_CONTEXTS; cnt = 0; while (!test_bit(new_ctx, &dev->ctx_work_bits)) { new_ctx = (new_ctx + 1) % MFC_NUM_CONTEXTS; if (++cnt > MFC_NUM_CONTEXTS) { /* No contexts to run */ spin_unlock_irqrestore(&dev->condlock, flags); return -EAGAIN; } } spin_unlock_irqrestore(&dev->condlock, flags); return new_ctx; } static void s5p_mfc_run_res_change(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_decode_one_frame_v5(ctx, MFC_DEC_RES_CHANGE); } static int s5p_mfc_run_dec_frame(struct s5p_mfc_ctx *ctx, int last_frame) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *temp_vb; unsigned long flags; unsigned int index; if (ctx->state == MFCINST_FINISHING) { last_frame = MFC_DEC_LAST_FRAME; s5p_mfc_set_dec_stream_buffer_v5(ctx, 0, 0, 0); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_decode_one_frame_v5(ctx, last_frame); return 0; } spin_lock_irqsave(&dev->irqlock, flags); /* Frames are being decoded */ if (list_empty(&ctx->src_queue)) { mfc_debug(2, "No src buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } /* Get the next source buffer */ temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); temp_vb->flags |= MFC_BUF_FLAG_USED; s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), ctx->consumed_stream, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); index = temp_vb->b->v4l2_buf.index; dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); if (temp_vb->b->v4l2_planes[0].bytesused == 0) { last_frame = MFC_DEC_LAST_FRAME; mfc_debug(2, "Setting ctx->state to FINISHING\n"); ctx->state = MFCINST_FINISHING; } s5p_mfc_decode_one_frame_v5(ctx, last_frame); return 0; } static int s5p_mfc_run_enc_frame(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; struct s5p_mfc_buf *src_mb; unsigned long src_y_addr, src_c_addr, dst_addr; unsigned int dst_size; spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue) && ctx->state != MFCINST_FINISHING) { mfc_debug(2, "no src buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->dst_queue)) { mfc_debug(2, "no dst buffers\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EAGAIN; } if (list_empty(&ctx->src_queue)) { /* send null frame */ s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2); src_mb = NULL; } else { src_mb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); src_mb->flags |= MFC_BUF_FLAG_USED; if (src_mb->b->v4l2_planes[0].bytesused == 0) { /* send null frame */ s5p_mfc_set_enc_frame_buffer_v5(ctx, dev->bank2, dev->bank2); ctx->state = MFCINST_FINISHING; } else { src_y_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 0); src_c_addr = vb2_dma_contig_plane_dma_addr(src_mb->b, 1); s5p_mfc_set_enc_frame_buffer_v5(ctx, src_y_addr, src_c_addr); if (src_mb->flags & MFC_BUF_FLAG_EOS) ctx->state = MFCINST_FINISHING; } } dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_mb->flags |= MFC_BUF_FLAG_USED; dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); mfc_debug(2, "encoding buffer with index=%d state=%d\n", src_mb ? src_mb->b->v4l2_buf.index : -1, ctx->state); s5p_mfc_encode_one_frame_v5(ctx); return 0; } static void s5p_mfc_run_init_dec(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *temp_vb; /* Initializing decoding - parsing header */ spin_lock_irqsave(&dev->irqlock, flags); mfc_debug(2, "Preparing to init decoding\n"); temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); s5p_mfc_set_dec_desc_buffer(ctx); mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_decode_v5(ctx); } static void s5p_mfc_run_init_enc(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; s5p_mfc_set_enc_ref_buffer_v5(ctx); spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer_v5(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); s5p_mfc_init_encode_v5(ctx); } static int s5p_mfc_run_init_dec_buffers(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; unsigned long flags; struct s5p_mfc_buf *temp_vb; int ret; /* * Header was parsed now starting processing * First set the output frame buffers */ if (ctx->capture_state != QUEUE_BUFS_MMAPED) { mfc_err("It seems that not all destionation buffers were " "mmaped\nMFC requires that all destination are mmaped " "before starting processing\n"); return -EAGAIN; } spin_lock_irqsave(&dev->irqlock, flags); if (list_empty(&ctx->src_queue)) { mfc_err("Header has been deallocated in the middle of" " initialization\n"); spin_unlock_irqrestore(&dev->irqlock, flags); return -EIO; } temp_vb = list_entry(ctx->src_queue.next, struct s5p_mfc_buf, list); mfc_debug(2, "Header size: %d\n", temp_vb->b->v4l2_planes[0].bytesused); s5p_mfc_set_dec_stream_buffer_v5(ctx, vb2_dma_contig_plane_dma_addr(temp_vb->b, 0), 0, temp_vb->b->v4l2_planes[0].bytesused); spin_unlock_irqrestore(&dev->irqlock, flags); dev->curr_ctx = ctx->num; s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_set_dec_frame_buffer_v5(ctx); if (ret) { mfc_err("Failed to alloc frame mem\n"); ctx->state = MFCINST_ERROR; } return ret; } /* Try running an operation on hardware */ static void s5p_mfc_try_run_v5(struct s5p_mfc_dev *dev) { struct s5p_mfc_ctx *ctx; int new_ctx; unsigned int ret = 0; if (test_bit(0, &dev->enter_suspend)) { mfc_debug(1, "Entering suspend so do not schedule any jobs\n"); return; } /* Check whether hardware is not running */ if (test_and_set_bit(0, &dev->hw_lock) != 0) { /* This is perfectly ok, the scheduled ctx should wait */ mfc_debug(1, "Couldn't lock HW\n"); return; } /* Choose the context to run */ new_ctx = s5p_mfc_get_new_ctx(dev); if (new_ctx < 0) { /* No contexts to run */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) { mfc_err("Failed to unlock hardware\n"); return; } mfc_debug(1, "No ctx is scheduled to be run\n"); return; } ctx = dev->ctx[new_ctx]; /* Got context to run in ctx */ /* * Last frame has already been sent to MFC. * Now obtaining frames from MFC buffer */ s5p_mfc_clock_on(); if (ctx->type == MFCINST_DECODER) { s5p_mfc_set_dec_desc_buffer(ctx); switch (ctx->state) { case MFCINST_FINISHING: s5p_mfc_run_dec_frame(ctx, MFC_DEC_LAST_FRAME); break; case MFCINST_RUNNING: ret = s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); break; case MFCINST_INIT: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, ctx); break; case MFCINST_RETURN_INST: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_dec(ctx); break; case MFCINST_HEAD_PARSED: ret = s5p_mfc_run_init_dec_buffers(ctx); mfc_debug(1, "head parsed\n"); break; case MFCINST_RES_CHANGE_INIT: s5p_mfc_run_res_change(ctx); break; case MFCINST_RES_CHANGE_FLUSH: s5p_mfc_run_dec_frame(ctx, MFC_DEC_FRAME); break; case MFCINST_RES_CHANGE_END: mfc_debug(2, "Finished remaining frames after resolution change\n"); ctx->capture_state = QUEUE_FREE; mfc_debug(2, "Will re-init the codec\n"); s5p_mfc_run_init_dec(ctx); break; default: ret = -EAGAIN; } } else if (ctx->type == MFCINST_ENCODER) { switch (ctx->state) { case MFCINST_FINISHING: case MFCINST_RUNNING: ret = s5p_mfc_run_enc_frame(ctx); break; case MFCINST_INIT: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, open_inst_cmd, ctx); break; case MFCINST_RETURN_INST: s5p_mfc_clean_ctx_int_flags(ctx); ret = s5p_mfc_hw_call(dev->mfc_cmds, close_inst_cmd, ctx); break; case MFCINST_GOT_INST: s5p_mfc_run_init_enc(ctx); break; default: ret = -EAGAIN; } } else { mfc_err("Invalid context type: %d\n", ctx->type); ret = -EAGAIN; } if (ret) { /* Free hardware lock */ if (test_and_clear_bit(0, &dev->hw_lock) == 0) mfc_err("Failed to unlock hardware\n"); /* This is in deed imporant, as no operation has been * scheduled, reduce the clock count as no one will * ever do this, because no interrupt related to this try_run * will ever come from hardware. */ s5p_mfc_clock_off(); } } static void s5p_mfc_cleanup_queue_v5(struct list_head *lh, struct vb2_queue *vq) { struct s5p_mfc_buf *b; int i; while (!list_empty(lh)) { b = list_entry(lh->next, struct s5p_mfc_buf, list); for (i = 0; i < b->b->num_planes; i++) vb2_set_plane_payload(b->b, i, 0); vb2_buffer_done(b->b, VB2_BUF_STATE_ERROR); list_del(&b->list); } } static void s5p_mfc_clear_int_flags_v5(struct s5p_mfc_dev *dev) { mfc_write(dev, 0, S5P_FIMV_RISC_HOST_INT); mfc_write(dev, 0, S5P_FIMV_RISC2HOST_CMD); mfc_write(dev, 0xffff, S5P_FIMV_SI_RTN_CHID); } static int s5p_mfc_get_dspl_y_adr_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_DISPLAY_Y_ADR) << MFC_OFFSET_SHIFT; } static int s5p_mfc_get_dec_y_adr_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_DECODE_Y_ADR) << MFC_OFFSET_SHIFT; } static int s5p_mfc_get_dspl_status_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_DISPLAY_STATUS); } static int s5p_mfc_get_dec_status_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_DECODE_STATUS); } static int s5p_mfc_get_dec_frame_type_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_DECODE_FRAME_TYPE) & S5P_FIMV_DECODE_FRAME_MASK; } static int s5p_mfc_get_disp_frame_type_v5(struct s5p_mfc_ctx *ctx) { return (s5p_mfc_read_info_v5(ctx, DISP_PIC_FRAME_TYPE) >> S5P_FIMV_SHARED_DISP_FRAME_TYPE_SHIFT) & S5P_FIMV_DECODE_FRAME_MASK; } static int s5p_mfc_get_consumed_stream_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_CONSUMED_BYTES); } static int s5p_mfc_get_int_reason_v5(struct s5p_mfc_dev *dev) { int reason; reason = mfc_read(dev, S5P_FIMV_RISC2HOST_CMD) & S5P_FIMV_RISC2HOST_CMD_MASK; switch (reason) { case S5P_FIMV_R2H_CMD_OPEN_INSTANCE_RET: reason = S5P_MFC_R2H_CMD_OPEN_INSTANCE_RET; break; case S5P_FIMV_R2H_CMD_CLOSE_INSTANCE_RET: reason = S5P_MFC_R2H_CMD_CLOSE_INSTANCE_RET; break; case S5P_FIMV_R2H_CMD_SEQ_DONE_RET: reason = S5P_MFC_R2H_CMD_SEQ_DONE_RET; break; case S5P_FIMV_R2H_CMD_FRAME_DONE_RET: reason = S5P_MFC_R2H_CMD_FRAME_DONE_RET; break; case S5P_FIMV_R2H_CMD_SLICE_DONE_RET: reason = S5P_MFC_R2H_CMD_SLICE_DONE_RET; break; case S5P_FIMV_R2H_CMD_SYS_INIT_RET: reason = S5P_MFC_R2H_CMD_SYS_INIT_RET; break; case S5P_FIMV_R2H_CMD_FW_STATUS_RET: reason = S5P_MFC_R2H_CMD_FW_STATUS_RET; break; case S5P_FIMV_R2H_CMD_SLEEP_RET: reason = S5P_MFC_R2H_CMD_SLEEP_RET; break; case S5P_FIMV_R2H_CMD_WAKEUP_RET: reason = S5P_MFC_R2H_CMD_WAKEUP_RET; break; case S5P_FIMV_R2H_CMD_INIT_BUFFERS_RET: reason = S5P_MFC_R2H_CMD_INIT_BUFFERS_RET; break; case S5P_FIMV_R2H_CMD_ENC_COMPLETE_RET: reason = S5P_MFC_R2H_CMD_COMPLETE_SEQ_RET; break; case S5P_FIMV_R2H_CMD_ERR_RET: reason = S5P_MFC_R2H_CMD_ERR_RET; break; default: reason = S5P_MFC_R2H_CMD_EMPTY; } return reason; } static int s5p_mfc_get_int_err_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG2); } static int s5p_mfc_err_dec_v5(unsigned int err) { return (err & S5P_FIMV_ERR_DEC_MASK) >> S5P_FIMV_ERR_DEC_SHIFT; } static int s5p_mfc_err_dspl_v5(unsigned int err) { return (err & S5P_FIMV_ERR_DSPL_MASK) >> S5P_FIMV_ERR_DSPL_SHIFT; } static int s5p_mfc_get_img_width_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_HRESOL); } static int s5p_mfc_get_img_height_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_VRESOL); } static int s5p_mfc_get_dpb_count_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_SI_BUF_NUMBER); } static int s5p_mfc_get_mv_count_v5(struct s5p_mfc_dev *dev) { /* NOP */ return -1; } static int s5p_mfc_get_inst_no_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_RISC2HOST_ARG1); } static int s5p_mfc_get_enc_strm_size_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_ENC_SI_STRM_SIZE); } static int s5p_mfc_get_enc_slice_type_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_ENC_SI_SLICE_TYPE); } static int s5p_mfc_get_enc_dpb_count_v5(struct s5p_mfc_dev *dev) { return -1; } static int s5p_mfc_get_enc_pic_count_v5(struct s5p_mfc_dev *dev) { return mfc_read(dev, S5P_FIMV_ENC_SI_PIC_CNT); } static int s5p_mfc_get_sei_avail_status_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, FRAME_PACK_SEI_AVAIL); } static int s5p_mfc_get_mvc_num_views_v5(struct s5p_mfc_dev *dev) { return -1; } static int s5p_mfc_get_mvc_view_id_v5(struct s5p_mfc_dev *dev) { return -1; } static unsigned int s5p_mfc_get_pic_type_top_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, PIC_TIME_TOP); } static unsigned int s5p_mfc_get_pic_type_bot_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, PIC_TIME_BOT); } static unsigned int s5p_mfc_get_crop_info_h_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, CROP_INFO_H); } static unsigned int s5p_mfc_get_crop_info_v_v5(struct s5p_mfc_ctx *ctx) { return s5p_mfc_read_info_v5(ctx, CROP_INFO_V); } /* Initialize opr function pointers for MFC v5 */ static struct s5p_mfc_hw_ops s5p_mfc_ops_v5 = { .alloc_dec_temp_buffers = s5p_mfc_alloc_dec_temp_buffers_v5, .release_dec_desc_buffer = s5p_mfc_release_dec_desc_buffer_v5, .alloc_codec_buffers = s5p_mfc_alloc_codec_buffers_v5, .release_codec_buffers = s5p_mfc_release_codec_buffers_v5, .alloc_instance_buffer = s5p_mfc_alloc_instance_buffer_v5, .release_instance_buffer = s5p_mfc_release_instance_buffer_v5, .alloc_dev_context_buffer = s5p_mfc_alloc_dev_context_buffer_v5, .release_dev_context_buffer = s5p_mfc_release_dev_context_buffer_v5, .dec_calc_dpb_size = s5p_mfc_dec_calc_dpb_size_v5, .enc_calc_src_size = s5p_mfc_enc_calc_src_size_v5, .set_dec_stream_buffer = s5p_mfc_set_dec_stream_buffer_v5, .set_dec_frame_buffer = s5p_mfc_set_dec_frame_buffer_v5, .set_enc_stream_buffer = s5p_mfc_set_enc_stream_buffer_v5, .set_enc_frame_buffer = s5p_mfc_set_enc_frame_buffer_v5, .get_enc_frame_buffer = s5p_mfc_get_enc_frame_buffer_v5, .set_enc_ref_buffer = s5p_mfc_set_enc_ref_buffer_v5, .init_decode = s5p_mfc_init_decode_v5, .init_encode = s5p_mfc_init_encode_v5, .encode_one_frame = s5p_mfc_encode_one_frame_v5, .try_run = s5p_mfc_try_run_v5, .cleanup_queue = s5p_mfc_cleanup_queue_v5, .clear_int_flags = s5p_mfc_clear_int_flags_v5, .write_info = s5p_mfc_write_info_v5, .read_info = s5p_mfc_read_info_v5, .get_dspl_y_adr = s5p_mfc_get_dspl_y_adr_v5, .get_dec_y_adr = s5p_mfc_get_dec_y_adr_v5, .get_dspl_status = s5p_mfc_get_dspl_status_v5, .get_dec_status = s5p_mfc_get_dec_status_v5, .get_dec_frame_type = s5p_mfc_get_dec_frame_type_v5, .get_disp_frame_type = s5p_mfc_get_disp_frame_type_v5, .get_consumed_stream = s5p_mfc_get_consumed_stream_v5, .get_int_reason = s5p_mfc_get_int_reason_v5, .get_int_err = s5p_mfc_get_int_err_v5, .err_dec = s5p_mfc_err_dec_v5, .err_dspl = s5p_mfc_err_dspl_v5, .get_img_width = s5p_mfc_get_img_width_v5, .get_img_height = s5p_mfc_get_img_height_v5, .get_dpb_count = s5p_mfc_get_dpb_count_v5, .get_mv_count = s5p_mfc_get_mv_count_v5, .get_inst_no = s5p_mfc_get_inst_no_v5, .get_enc_strm_size = s5p_mfc_get_enc_strm_size_v5, .get_enc_slice_type = s5p_mfc_get_enc_slice_type_v5, .get_enc_dpb_count = s5p_mfc_get_enc_dpb_count_v5, .get_enc_pic_count = s5p_mfc_get_enc_pic_count_v5, .get_sei_avail_status = s5p_mfc_get_sei_avail_status_v5, .get_mvc_num_views = s5p_mfc_get_mvc_num_views_v5, .get_mvc_view_id = s5p_mfc_get_mvc_view_id_v5, .get_pic_type_top = s5p_mfc_get_pic_type_top_v5, .get_pic_type_bot = s5p_mfc_get_pic_type_bot_v5, .get_crop_info_h = s5p_mfc_get_crop_info_h_v5, .get_crop_info_v = s5p_mfc_get_crop_info_v_v5, }; struct s5p_mfc_hw_ops *s5p_mfc_init_hw_ops_v5(void) { return &s5p_mfc_ops_v5; }
gpl-2.0
CyanogenMod/android_kernel_motorola_titan
drivers/watchdog/hpwdt.c
1550
21923
/* * HP WatchDog Driver * based on * * SoftDog 0.05: A Software Watchdog Device * * (c) Copyright 2007 Hewlett-Packard Development Company, L.P. * Thomas Mingarelli <thomas.mingarelli@hp.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/io.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/watchdog.h> #ifdef CONFIG_HPWDT_NMI_DECODING #include <linux/dmi.h> #include <linux/spinlock.h> #include <linux/nmi.h> #include <linux/kdebug.h> #include <linux/notifier.h> #include <asm/cacheflush.h> #endif /* CONFIG_HPWDT_NMI_DECODING */ #include <asm/nmi.h> #define HPWDT_VERSION "1.3.0" #define SECS_TO_TICKS(secs) ((secs) * 1000 / 128) #define TICKS_TO_SECS(ticks) ((ticks) * 128 / 1000) #define HPWDT_MAX_TIMER TICKS_TO_SECS(65535) #define DEFAULT_MARGIN 30 static unsigned int soft_margin = DEFAULT_MARGIN; /* in seconds */ static unsigned int reload; /* the computed soft_margin */ static bool nowayout = WATCHDOG_NOWAYOUT; static char expect_release; static unsigned long hpwdt_is_open; static void __iomem *pci_mem_addr; /* the PCI-memory address */ static unsigned long __iomem *hpwdt_timer_reg; static unsigned long __iomem *hpwdt_timer_con; static DEFINE_PCI_DEVICE_TABLE(hpwdt_devices) = { { PCI_DEVICE(PCI_VENDOR_ID_COMPAQ, 0xB203) }, /* iLO2 */ { PCI_DEVICE(PCI_VENDOR_ID_HP, 0x3306) }, /* iLO3 */ {0}, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, hpwdt_devices); #ifdef CONFIG_HPWDT_NMI_DECODING #define PCI_BIOS32_SD_VALUE 0x5F32335F /* "_32_" */ #define CRU_BIOS_SIGNATURE_VALUE 0x55524324 #define PCI_BIOS32_PARAGRAPH_LEN 16 #define PCI_ROM_BASE1 0x000F0000 #define ROM_SIZE 0x10000 struct bios32_service_dir { u32 signature; u32 entry_point; u8 revision; u8 length; u8 checksum; u8 reserved[5]; }; /* type 212 */ struct smbios_cru64_info { u8 type; u8 byte_length; u16 handle; u32 signature; u64 physical_address; u32 double_length; u32 double_offset; }; #define SMBIOS_CRU64_INFORMATION 212 /* type 219 */ struct smbios_proliant_info { u8 type; u8 byte_length; u16 handle; u32 power_features; u32 omega_features; u32 reserved; u32 misc_features; }; #define SMBIOS_ICRU_INFORMATION 219 struct cmn_registers { union { struct { u8 ral; u8 rah; u16 rea2; }; u32 reax; } u1; union { struct { u8 rbl; u8 rbh; u8 reb2l; u8 reb2h; }; u32 rebx; } u2; union { struct { u8 rcl; u8 rch; u16 rec2; }; u32 recx; } u3; union { struct { u8 rdl; u8 rdh; u16 red2; }; u32 redx; } u4; u32 resi; u32 redi; u16 rds; u16 res; u32 reflags; } __attribute__((packed)); static unsigned int hpwdt_nmi_decoding; static unsigned int allow_kdump; static unsigned int priority; /* hpwdt at end of die_notify list */ static unsigned int is_icru; static DEFINE_SPINLOCK(rom_lock); static void *cru_rom_addr; static struct cmn_registers cmn_regs; extern asmlinkage void asminline_call(struct cmn_registers *pi86Regs, unsigned long *pRomEntry); #ifdef CONFIG_X86_32 /* --32 Bit Bios------------------------------------------------------------ */ #define HPWDT_ARCH 32 asm(".text \n\t" ".align 4 \n" "asminline_call: \n\t" "pushl %ebp \n\t" "movl %esp, %ebp \n\t" "pusha \n\t" "pushf \n\t" "push %es \n\t" "push %ds \n\t" "pop %es \n\t" "movl 8(%ebp),%eax \n\t" "movl 4(%eax),%ebx \n\t" "movl 8(%eax),%ecx \n\t" "movl 12(%eax),%edx \n\t" "movl 16(%eax),%esi \n\t" "movl 20(%eax),%edi \n\t" "movl (%eax),%eax \n\t" "push %cs \n\t" "call *12(%ebp) \n\t" "pushf \n\t" "pushl %eax \n\t" "movl 8(%ebp),%eax \n\t" "movl %ebx,4(%eax) \n\t" "movl %ecx,8(%eax) \n\t" "movl %edx,12(%eax) \n\t" "movl %esi,16(%eax) \n\t" "movl %edi,20(%eax) \n\t" "movw %ds,24(%eax) \n\t" "movw %es,26(%eax) \n\t" "popl %ebx \n\t" "movl %ebx,(%eax) \n\t" "popl %ebx \n\t" "movl %ebx,28(%eax) \n\t" "pop %es \n\t" "popf \n\t" "popa \n\t" "leave \n\t" "ret \n\t" ".previous"); /* * cru_detect * * Routine Description: * This function uses the 32-bit BIOS Service Directory record to * search for a $CRU record. * * Return Value: * 0 : SUCCESS * <0 : FAILURE */ static int __devinit cru_detect(unsigned long map_entry, unsigned long map_offset) { void *bios32_map; unsigned long *bios32_entrypoint; unsigned long cru_physical_address; unsigned long cru_length; unsigned long physical_bios_base = 0; unsigned long physical_bios_offset = 0; int retval = -ENODEV; bios32_map = ioremap(map_entry, (2 * PAGE_SIZE)); if (bios32_map == NULL) return -ENODEV; bios32_entrypoint = bios32_map + map_offset; cmn_regs.u1.reax = CRU_BIOS_SIGNATURE_VALUE; set_memory_x((unsigned long)bios32_map, 2); asminline_call(&cmn_regs, bios32_entrypoint); if (cmn_regs.u1.ral != 0) { pr_warn("Call succeeded but with an error: 0x%x\n", cmn_regs.u1.ral); } else { physical_bios_base = cmn_regs.u2.rebx; physical_bios_offset = cmn_regs.u4.redx; cru_length = cmn_regs.u3.recx; cru_physical_address = physical_bios_base + physical_bios_offset; /* If the values look OK, then map it in. */ if ((physical_bios_base + physical_bios_offset)) { cru_rom_addr = ioremap(cru_physical_address, cru_length); if (cru_rom_addr) { set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, (cru_length + PAGE_SIZE - 1) >> PAGE_SHIFT); retval = 0; } } pr_debug("CRU Base Address: 0x%lx\n", physical_bios_base); pr_debug("CRU Offset Address: 0x%lx\n", physical_bios_offset); pr_debug("CRU Length: 0x%lx\n", cru_length); pr_debug("CRU Mapped Address: %p\n", &cru_rom_addr); } iounmap(bios32_map); return retval; } /* * bios_checksum */ static int __devinit bios_checksum(const char __iomem *ptr, int len) { char sum = 0; int i; /* * calculate checksum of size bytes. This should add up * to zero if we have a valid header. */ for (i = 0; i < len; i++) sum += ptr[i]; return ((sum == 0) && (len > 0)); } /* * bios32_present * * Routine Description: * This function finds the 32-bit BIOS Service Directory * * Return Value: * 0 : SUCCESS * <0 : FAILURE */ static int __devinit bios32_present(const char __iomem *p) { struct bios32_service_dir *bios_32_ptr; int length; unsigned long map_entry, map_offset; bios_32_ptr = (struct bios32_service_dir *) p; /* * Search for signature by checking equal to the swizzled value * instead of calling another routine to perform a strcmp. */ if (bios_32_ptr->signature == PCI_BIOS32_SD_VALUE) { length = bios_32_ptr->length * PCI_BIOS32_PARAGRAPH_LEN; if (bios_checksum(p, length)) { /* * According to the spec, we're looking for the * first 4KB-aligned address below the entrypoint * listed in the header. The Service Directory code * is guaranteed to occupy no more than 2 4KB pages. */ map_entry = bios_32_ptr->entry_point & ~(PAGE_SIZE - 1); map_offset = bios_32_ptr->entry_point - map_entry; return cru_detect(map_entry, map_offset); } } return -ENODEV; } static int __devinit detect_cru_service(void) { char __iomem *p, *q; int rc = -1; /* * Search from 0x0f0000 through 0x0fffff, inclusive. */ p = ioremap(PCI_ROM_BASE1, ROM_SIZE); if (p == NULL) return -ENOMEM; for (q = p; q < p + ROM_SIZE; q += 16) { rc = bios32_present(q); if (!rc) break; } iounmap(p); return rc; } /* ------------------------------------------------------------------------- */ #endif /* CONFIG_X86_32 */ #ifdef CONFIG_X86_64 /* --64 Bit Bios------------------------------------------------------------ */ #define HPWDT_ARCH 64 asm(".text \n\t" ".align 4 \n" "asminline_call: \n\t" "pushq %rbp \n\t" "movq %rsp, %rbp \n\t" "pushq %rax \n\t" "pushq %rbx \n\t" "pushq %rdx \n\t" "pushq %r12 \n\t" "pushq %r9 \n\t" "movq %rsi, %r12 \n\t" "movq %rdi, %r9 \n\t" "movl 4(%r9),%ebx \n\t" "movl 8(%r9),%ecx \n\t" "movl 12(%r9),%edx \n\t" "movl 16(%r9),%esi \n\t" "movl 20(%r9),%edi \n\t" "movl (%r9),%eax \n\t" "call *%r12 \n\t" "pushfq \n\t" "popq %r12 \n\t" "movl %eax, (%r9) \n\t" "movl %ebx, 4(%r9) \n\t" "movl %ecx, 8(%r9) \n\t" "movl %edx, 12(%r9) \n\t" "movl %esi, 16(%r9) \n\t" "movl %edi, 20(%r9) \n\t" "movq %r12, %rax \n\t" "movl %eax, 28(%r9) \n\t" "popq %r9 \n\t" "popq %r12 \n\t" "popq %rdx \n\t" "popq %rbx \n\t" "popq %rax \n\t" "leave \n\t" "ret \n\t" ".previous"); /* * dmi_find_cru * * Routine Description: * This function checks whether or not a SMBIOS/DMI record is * the 64bit CRU info or not */ static void __devinit dmi_find_cru(const struct dmi_header *dm, void *dummy) { struct smbios_cru64_info *smbios_cru64_ptr; unsigned long cru_physical_address; if (dm->type == SMBIOS_CRU64_INFORMATION) { smbios_cru64_ptr = (struct smbios_cru64_info *) dm; if (smbios_cru64_ptr->signature == CRU_BIOS_SIGNATURE_VALUE) { cru_physical_address = smbios_cru64_ptr->physical_address + smbios_cru64_ptr->double_offset; cru_rom_addr = ioremap(cru_physical_address, smbios_cru64_ptr->double_length); set_memory_x((unsigned long)cru_rom_addr & PAGE_MASK, smbios_cru64_ptr->double_length >> PAGE_SHIFT); } } } static int __devinit detect_cru_service(void) { cru_rom_addr = NULL; dmi_walk(dmi_find_cru, NULL); /* if cru_rom_addr has been set then we found a CRU service */ return ((cru_rom_addr != NULL) ? 0 : -ENODEV); } /* ------------------------------------------------------------------------- */ #endif /* CONFIG_X86_64 */ #endif /* CONFIG_HPWDT_NMI_DECODING */ /* * Watchdog operations */ static void hpwdt_start(void) { reload = SECS_TO_TICKS(soft_margin); iowrite16(reload, hpwdt_timer_reg); iowrite8(0x85, hpwdt_timer_con); } static void hpwdt_stop(void) { unsigned long data; data = ioread8(hpwdt_timer_con); data &= 0xFE; iowrite8(data, hpwdt_timer_con); } static void hpwdt_ping(void) { iowrite16(reload, hpwdt_timer_reg); } static int hpwdt_change_timer(int new_margin) { if (new_margin < 1 || new_margin > HPWDT_MAX_TIMER) { pr_warn("New value passed in is invalid: %d seconds\n", new_margin); return -EINVAL; } soft_margin = new_margin; pr_debug("New timer passed in is %d seconds\n", new_margin); reload = SECS_TO_TICKS(soft_margin); return 0; } static int hpwdt_time_left(void) { return TICKS_TO_SECS(ioread16(hpwdt_timer_reg)); } #ifdef CONFIG_HPWDT_NMI_DECODING /* * NMI Handler */ static int hpwdt_pretimeout(unsigned int ulReason, struct pt_regs *regs) { unsigned long rom_pl; static int die_nmi_called; if (!hpwdt_nmi_decoding) goto out; spin_lock_irqsave(&rom_lock, rom_pl); if (!die_nmi_called && !is_icru) asminline_call(&cmn_regs, cru_rom_addr); die_nmi_called = 1; spin_unlock_irqrestore(&rom_lock, rom_pl); if (allow_kdump) hpwdt_stop(); if (!is_icru) { if (cmn_regs.u1.ral == 0) { panic("An NMI occurred, " "but unable to determine source.\n"); } } panic("An NMI occurred, please see the Integrated " "Management Log for details.\n"); out: return NMI_DONE; } #endif /* CONFIG_HPWDT_NMI_DECODING */ /* * /dev/watchdog handling */ static int hpwdt_open(struct inode *inode, struct file *file) { /* /dev/watchdog can only be opened once */ if (test_and_set_bit(0, &hpwdt_is_open)) return -EBUSY; /* Start the watchdog */ hpwdt_start(); hpwdt_ping(); return nonseekable_open(inode, file); } static int hpwdt_release(struct inode *inode, struct file *file) { /* Stop the watchdog */ if (expect_release == 42) { hpwdt_stop(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); hpwdt_ping(); } expect_release = 0; /* /dev/watchdog is being closed, make sure it can be re-opened */ clear_bit(0, &hpwdt_is_open); return 0; } static ssize_t hpwdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { /* See if we got the magic character 'V' and reload the timer */ if (len) { if (!nowayout) { size_t i; /* note: just in case someone wrote the magic character * five months ago... */ expect_release = 0; /* scan to see whether or not we got the magic char. */ for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_release = 42; } } /* someone wrote to us, we should reload the timer */ hpwdt_ping(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "HP iLO2+ HW Watchdog Timer", }; static long hpwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_margin; int ret = -ENOTTY; switch (cmd) { case WDIOC_GETSUPPORT: ret = 0; if (copy_to_user(argp, &ident, sizeof(ident))) ret = -EFAULT; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: hpwdt_ping(); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(new_margin, p); if (ret) break; ret = hpwdt_change_timer(new_margin); if (ret) break; hpwdt_ping(); /* Fall */ case WDIOC_GETTIMEOUT: ret = put_user(soft_margin, p); break; case WDIOC_GETTIMELEFT: ret = put_user(hpwdt_time_left(), p); break; } return ret; } /* * Kernel interfaces */ static const struct file_operations hpwdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = hpwdt_write, .unlocked_ioctl = hpwdt_ioctl, .open = hpwdt_open, .release = hpwdt_release, }; static struct miscdevice hpwdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &hpwdt_fops, }; /* * Init & Exit */ #ifdef CONFIG_HPWDT_NMI_DECODING #ifdef CONFIG_X86_LOCAL_APIC static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) { /* * If nmi_watchdog is turned off then we can turn on * our nmi decoding capability. */ hpwdt_nmi_decoding = 1; } #else static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) { dev_warn(&dev->dev, "NMI decoding is disabled. " "Your kernel does not support a NMI Watchdog.\n"); } #endif /* CONFIG_X86_LOCAL_APIC */ /* * dmi_find_icru * * Routine Description: * This function checks whether or not we are on an iCRU-based server. * This check is independent of architecture and needs to be made for * any ProLiant system. */ static void __devinit dmi_find_icru(const struct dmi_header *dm, void *dummy) { struct smbios_proliant_info *smbios_proliant_ptr; if (dm->type == SMBIOS_ICRU_INFORMATION) { smbios_proliant_ptr = (struct smbios_proliant_info *) dm; if (smbios_proliant_ptr->misc_features & 0x01) is_icru = 1; } } static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) { int retval; /* * On typical CRU-based systems we need to map that service in * the BIOS. For 32 bit Operating Systems we need to go through * the 32 Bit BIOS Service Directory. For 64 bit Operating * Systems we get that service through SMBIOS. * * On systems that support the new iCRU service all we need to * do is call dmi_walk to get the supported flag value and skip * the old cru detect code. */ dmi_walk(dmi_find_icru, NULL); if (!is_icru) { /* * We need to map the ROM to get the CRU service. * For 32 bit Operating Systems we need to go through the 32 Bit * BIOS Service Directory * For 64 bit Operating Systems we get that service through SMBIOS. */ retval = detect_cru_service(); if (retval < 0) { dev_warn(&dev->dev, "Unable to detect the %d Bit CRU Service.\n", HPWDT_ARCH); return retval; } /* * We know this is the only CRU call we need to make so lets keep as * few instructions as possible once the NMI comes in. */ cmn_regs.u1.rah = 0x0D; cmn_regs.u1.ral = 0x02; } /* * If the priority is set to 1, then we will be put first on the * die notify list to handle a critical NMI. The default is to * be last so other users of the NMI signal can function. */ retval = register_nmi_handler(NMI_UNKNOWN, hpwdt_pretimeout, (priority) ? NMI_FLAG_FIRST : 0, "hpwdt"); if (retval != 0) { dev_warn(&dev->dev, "Unable to register a die notifier (err=%d).\n", retval); if (cru_rom_addr) iounmap(cru_rom_addr); } dev_info(&dev->dev, "HP Watchdog Timer Driver: NMI decoding initialized" ", allow kernel dump: %s (default = 0/OFF)" ", priority: %s (default = 0/LAST).\n", (allow_kdump == 0) ? "OFF" : "ON", (priority == 0) ? "LAST" : "FIRST"); return 0; } static void hpwdt_exit_nmi_decoding(void) { unregister_nmi_handler(NMI_UNKNOWN, "hpwdt"); if (cru_rom_addr) iounmap(cru_rom_addr); } #else /* !CONFIG_HPWDT_NMI_DECODING */ static void __devinit hpwdt_check_nmi_decoding(struct pci_dev *dev) { } static int __devinit hpwdt_init_nmi_decoding(struct pci_dev *dev) { return 0; } static void hpwdt_exit_nmi_decoding(void) { } #endif /* CONFIG_HPWDT_NMI_DECODING */ static int __devinit hpwdt_init_one(struct pci_dev *dev, const struct pci_device_id *ent) { int retval; /* * Check if we can do NMI decoding or not */ hpwdt_check_nmi_decoding(dev); /* * First let's find out if we are on an iLO2+ server. We will * not run on a legacy ASM box. * So we only support the G5 ProLiant servers and higher. */ if (dev->subsystem_vendor != PCI_VENDOR_ID_HP) { dev_warn(&dev->dev, "This server does not have an iLO2+ ASIC.\n"); return -ENODEV; } if (pci_enable_device(dev)) { dev_warn(&dev->dev, "Not possible to enable PCI Device: 0x%x:0x%x.\n", ent->vendor, ent->device); return -ENODEV; } pci_mem_addr = pci_iomap(dev, 1, 0x80); if (!pci_mem_addr) { dev_warn(&dev->dev, "Unable to detect the iLO2+ server memory.\n"); retval = -ENOMEM; goto error_pci_iomap; } hpwdt_timer_reg = pci_mem_addr + 0x70; hpwdt_timer_con = pci_mem_addr + 0x72; /* Make sure that timer is disabled until /dev/watchdog is opened */ hpwdt_stop(); /* Make sure that we have a valid soft_margin */ if (hpwdt_change_timer(soft_margin)) hpwdt_change_timer(DEFAULT_MARGIN); /* Initialize NMI Decoding functionality */ retval = hpwdt_init_nmi_decoding(dev); if (retval != 0) goto error_init_nmi_decoding; retval = misc_register(&hpwdt_miscdev); if (retval < 0) { dev_warn(&dev->dev, "Unable to register miscdev on minor=%d (err=%d).\n", WATCHDOG_MINOR, retval); goto error_misc_register; } dev_info(&dev->dev, "HP Watchdog Timer Driver: %s" ", timer margin: %d seconds (nowayout=%d).\n", HPWDT_VERSION, soft_margin, nowayout); return 0; error_misc_register: hpwdt_exit_nmi_decoding(); error_init_nmi_decoding: pci_iounmap(dev, pci_mem_addr); error_pci_iomap: pci_disable_device(dev); return retval; } static void __devexit hpwdt_exit(struct pci_dev *dev) { if (!nowayout) hpwdt_stop(); misc_deregister(&hpwdt_miscdev); hpwdt_exit_nmi_decoding(); pci_iounmap(dev, pci_mem_addr); pci_disable_device(dev); } static struct pci_driver hpwdt_driver = { .name = "hpwdt", .id_table = hpwdt_devices, .probe = hpwdt_init_one, .remove = __devexit_p(hpwdt_exit), }; static void __exit hpwdt_cleanup(void) { pci_unregister_driver(&hpwdt_driver); } static int __init hpwdt_init(void) { return pci_register_driver(&hpwdt_driver); } MODULE_AUTHOR("Tom Mingarelli"); MODULE_DESCRIPTION("hp watchdog driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(HPWDT_VERSION); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); module_param(soft_margin, int, 0); MODULE_PARM_DESC(soft_margin, "Watchdog timeout in seconds"); module_param(nowayout, bool, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #ifdef CONFIG_HPWDT_NMI_DECODING module_param(allow_kdump, int, 0); MODULE_PARM_DESC(allow_kdump, "Start a kernel dump after NMI occurs"); module_param(priority, int, 0); MODULE_PARM_DESC(priority, "The hpwdt driver handles NMIs first or last" " (default = 0/Last)\n"); #endif /* !CONFIG_HPWDT_NMI_DECODING */ module_init(hpwdt_init); module_exit(hpwdt_cleanup);
gpl-2.0
Xmister/linux-sunxi
drivers/net/wireless/b43/phy_a.c
2318
16417
/* Broadcom B43 wireless driver IEEE 802.11a PHY driver Copyright (c) 2005 Martin Langer <martin-langer@gmx.de>, Copyright (c) 2005-2007 Stefano Brivio <stefano.brivio@polimi.it> Copyright (c) 2005-2008 Michael Buesch <mb@bu3sch.de> Copyright (c) 2005, 2006 Danny van Dyk <kugelfang@gentoo.org> Copyright (c) 2005, 2006 Andreas Jaggi <andreas.jaggi@waterwave.ch> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/slab.h> #include "b43.h" #include "phy_a.h" #include "phy_common.h" #include "wa.h" #include "tables.h" #include "main.h" /* Get the freq, as it has to be written to the device. */ static inline u16 channel2freq_a(u8 channel) { B43_WARN_ON(channel > 200); return (5000 + 5 * channel); } static inline u16 freq_r3A_value(u16 frequency) { u16 value; if (frequency < 5091) value = 0x0040; else if (frequency < 5321) value = 0x0000; else if (frequency < 5806) value = 0x0080; else value = 0x0040; return value; } #if 0 /* This function converts a TSSI value to dBm in Q5.2 */ static s8 b43_aphy_estimate_power_out(struct b43_wldev *dev, s8 tssi) { struct b43_phy *phy = &dev->phy; struct b43_phy_a *aphy = phy->a; s8 dbm = 0; s32 tmp; tmp = (aphy->tgt_idle_tssi - aphy->cur_idle_tssi + tssi); tmp += 0x80; tmp = clamp_val(tmp, 0x00, 0xFF); dbm = aphy->tssi2dbm[tmp]; //TODO: There's a FIXME on the specs return dbm; } #endif static void b43_radio_set_tx_iq(struct b43_wldev *dev) { static const u8 data_high[5] = { 0x00, 0x40, 0x80, 0x90, 0xD0 }; static const u8 data_low[5] = { 0x00, 0x01, 0x05, 0x06, 0x0A }; u16 tmp = b43_radio_read16(dev, 0x001E); int i, j; for (i = 0; i < 5; i++) { for (j = 0; j < 5; j++) { if (tmp == (data_high[i] << 4 | data_low[j])) { b43_phy_write(dev, 0x0069, (i - j) << 8 | 0x00C0); return; } } } } static void aphy_channel_switch(struct b43_wldev *dev, unsigned int channel) { u16 freq, r8, tmp; freq = channel2freq_a(channel); r8 = b43_radio_read16(dev, 0x0008); b43_write16(dev, 0x03F0, freq); b43_radio_write16(dev, 0x0008, r8); //TODO: write max channel TX power? to Radio 0x2D tmp = b43_radio_read16(dev, 0x002E); tmp &= 0x0080; //TODO: OR tmp with the Power out estimation for this channel? b43_radio_write16(dev, 0x002E, tmp); if (freq >= 4920 && freq <= 5500) { /* * r8 = (((freq * 15 * 0xE1FC780F) >> 32) / 29) & 0x0F; * = (freq * 0.025862069 */ r8 = 3 * freq / 116; /* is equal to r8 = freq * 0.025862 */ } b43_radio_write16(dev, 0x0007, (r8 << 4) | r8); b43_radio_write16(dev, 0x0020, (r8 << 4) | r8); b43_radio_write16(dev, 0x0021, (r8 << 4) | r8); b43_radio_maskset(dev, 0x0022, 0x000F, (r8 << 4)); b43_radio_write16(dev, 0x002A, (r8 << 4)); b43_radio_write16(dev, 0x002B, (r8 << 4)); b43_radio_maskset(dev, 0x0008, 0x00F0, (r8 << 4)); b43_radio_maskset(dev, 0x0029, 0xFF0F, 0x00B0); b43_radio_write16(dev, 0x0035, 0x00AA); b43_radio_write16(dev, 0x0036, 0x0085); b43_radio_maskset(dev, 0x003A, 0xFF20, freq_r3A_value(freq)); b43_radio_mask(dev, 0x003D, 0x00FF); b43_radio_maskset(dev, 0x0081, 0xFF7F, 0x0080); b43_radio_mask(dev, 0x0035, 0xFFEF); b43_radio_maskset(dev, 0x0035, 0xFFEF, 0x0010); b43_radio_set_tx_iq(dev); //TODO: TSSI2dbm workaround //FIXME b43_phy_xmitpower(dev); } static void b43_radio_init2060(struct b43_wldev *dev) { b43_radio_write16(dev, 0x0004, 0x00C0); b43_radio_write16(dev, 0x0005, 0x0008); b43_radio_write16(dev, 0x0009, 0x0040); b43_radio_write16(dev, 0x0005, 0x00AA); b43_radio_write16(dev, 0x0032, 0x008F); b43_radio_write16(dev, 0x0006, 0x008F); b43_radio_write16(dev, 0x0034, 0x008F); b43_radio_write16(dev, 0x002C, 0x0007); b43_radio_write16(dev, 0x0082, 0x0080); b43_radio_write16(dev, 0x0080, 0x0000); b43_radio_write16(dev, 0x003F, 0x00DA); b43_radio_mask(dev, 0x0005, ~0x0008); b43_radio_mask(dev, 0x0081, ~0x0010); b43_radio_mask(dev, 0x0081, ~0x0020); b43_radio_mask(dev, 0x0081, ~0x0020); msleep(1); /* delay 400usec */ b43_radio_maskset(dev, 0x0081, ~0x0020, 0x0010); msleep(1); /* delay 400usec */ b43_radio_maskset(dev, 0x0005, ~0x0008, 0x0008); b43_radio_mask(dev, 0x0085, ~0x0010); b43_radio_mask(dev, 0x0005, ~0x0008); b43_radio_mask(dev, 0x0081, ~0x0040); b43_radio_maskset(dev, 0x0081, ~0x0040, 0x0040); b43_radio_write16(dev, 0x0005, (b43_radio_read16(dev, 0x0081) & ~0x0008) | 0x0008); b43_phy_write(dev, 0x0063, 0xDDC6); b43_phy_write(dev, 0x0069, 0x07BE); b43_phy_write(dev, 0x006A, 0x0000); aphy_channel_switch(dev, dev->phy.ops->get_default_chan(dev)); msleep(1); } static void b43_phy_rssiagc(struct b43_wldev *dev, u8 enable) { int i; if (dev->phy.rev < 3) { if (enable) for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_LNAHPFGAIN1, i, 0xFFF8); b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, 0xFFF8); } else for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) { b43_ofdmtab_write16(dev, B43_OFDMTAB_LNAHPFGAIN1, i, b43_tab_rssiagc1[i]); b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc1[i]); } } else { if (enable) for (i = 0; i < B43_TAB_RSSIAGC1_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, 0x0820); else for (i = 0; i < B43_TAB_RSSIAGC2_SIZE; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_WRSSI, i, b43_tab_rssiagc2[i]); } } static void b43_phy_ww(struct b43_wldev *dev) { u16 b, curr_s, best_s = 0xFFFF; int i; b43_phy_mask(dev, B43_PHY_CRS0, ~B43_PHY_CRS0_EN); b43_phy_set(dev, B43_PHY_OFDM(0x1B), 0x1000); b43_phy_maskset(dev, B43_PHY_OFDM(0x82), 0xF0FF, 0x0300); b43_radio_set(dev, 0x0009, 0x0080); b43_radio_maskset(dev, 0x0012, 0xFFFC, 0x0002); b43_wa_initgains(dev); b43_phy_write(dev, B43_PHY_OFDM(0xBA), 0x3ED5); b = b43_phy_read(dev, B43_PHY_PWRDOWN); b43_phy_write(dev, B43_PHY_PWRDOWN, (b & 0xFFF8) | 0x0005); b43_radio_set(dev, 0x0004, 0x0004); for (i = 0x10; i <= 0x20; i++) { b43_radio_write16(dev, 0x0013, i); curr_s = b43_phy_read(dev, B43_PHY_OTABLEQ) & 0x00FF; if (!curr_s) { best_s = 0x0000; break; } else if (curr_s >= 0x0080) curr_s = 0x0100 - curr_s; if (curr_s < best_s) best_s = curr_s; } b43_phy_write(dev, B43_PHY_PWRDOWN, b); b43_radio_mask(dev, 0x0004, 0xFFFB); b43_radio_write16(dev, 0x0013, best_s); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1_R1, 0, 0xFFEC); b43_phy_write(dev, B43_PHY_OFDM(0xB7), 0x1E80); b43_phy_write(dev, B43_PHY_OFDM(0xB6), 0x1C00); b43_phy_write(dev, B43_PHY_OFDM(0xB5), 0x0EC0); b43_phy_write(dev, B43_PHY_OFDM(0xB2), 0x00C0); b43_phy_write(dev, B43_PHY_OFDM(0xB9), 0x1FFF); b43_phy_maskset(dev, B43_PHY_OFDM(0xBB), 0xF000, 0x0053); b43_phy_maskset(dev, B43_PHY_OFDM61, 0xFE1F, 0x0120); b43_phy_maskset(dev, B43_PHY_OFDM(0x13), 0x0FFF, 0x3000); b43_phy_maskset(dev, B43_PHY_OFDM(0x14), 0x0FFF, 0x3000); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 6, 0x0017); for (i = 0; i < 6; i++) b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, i, 0x000F); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0D, 0x000E); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0E, 0x0011); b43_ofdmtab_write16(dev, B43_OFDMTAB_AGC1, 0x0F, 0x0013); b43_phy_write(dev, B43_PHY_OFDM(0x33), 0x5030); b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); } static void hardware_pctl_init_aphy(struct b43_wldev *dev) { //TODO } void b43_phy_inita(struct b43_wldev *dev) { struct ssb_bus *bus = dev->sdev->bus; struct b43_phy *phy = &dev->phy; /* This lowlevel A-PHY init is also called from G-PHY init. * So we must not access phy->a, if called from G-PHY code. */ B43_WARN_ON((phy->type != B43_PHYTYPE_A) && (phy->type != B43_PHYTYPE_G)); might_sleep(); if (phy->rev >= 6) { if (phy->type == B43_PHYTYPE_A) b43_phy_mask(dev, B43_PHY_OFDM(0x1B), ~0x1000); if (b43_phy_read(dev, B43_PHY_ENCORE) & B43_PHY_ENCORE_EN) b43_phy_set(dev, B43_PHY_ENCORE, 0x0010); else b43_phy_mask(dev, B43_PHY_ENCORE, ~0x1010); } b43_wa_all(dev); if (phy->type == B43_PHYTYPE_A) { if (phy->gmode && (phy->rev < 3)) b43_phy_set(dev, 0x0034, 0x0001); b43_phy_rssiagc(dev, 0); b43_phy_set(dev, B43_PHY_CRS0, B43_PHY_CRS0_EN); b43_radio_init2060(dev); if ((bus->boardinfo.vendor == SSB_BOARDVENDOR_BCM) && ((bus->boardinfo.type == SSB_BOARD_BU4306) || (bus->boardinfo.type == SSB_BOARD_BU4309))) { ; //TODO: A PHY LO } if (phy->rev >= 3) b43_phy_ww(dev); hardware_pctl_init_aphy(dev); //TODO: radar detection } if ((phy->type == B43_PHYTYPE_G) && (dev->sdev->bus->sprom.boardflags_lo & B43_BFL_PACTRL)) { b43_phy_maskset(dev, B43_PHY_OFDM(0x6E), 0xE000, 0x3CF); } } /* Initialise the TSSI->dBm lookup table */ static int b43_aphy_init_tssi2dbm_table(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_a *aphy = phy->a; s16 pab0, pab1, pab2; pab0 = (s16) (dev->sdev->bus->sprom.pa1b0); pab1 = (s16) (dev->sdev->bus->sprom.pa1b1); pab2 = (s16) (dev->sdev->bus->sprom.pa1b2); if (pab0 != 0 && pab1 != 0 && pab2 != 0 && pab0 != -1 && pab1 != -1 && pab2 != -1) { /* The pabX values are set in SPROM. Use them. */ if ((s8) dev->sdev->bus->sprom.itssi_a != 0 && (s8) dev->sdev->bus->sprom.itssi_a != -1) aphy->tgt_idle_tssi = (s8) (dev->sdev->bus->sprom.itssi_a); else aphy->tgt_idle_tssi = 62; aphy->tssi2dbm = b43_generate_dyn_tssi2dbm_tab(dev, pab0, pab1, pab2); if (!aphy->tssi2dbm) return -ENOMEM; } else { /* pabX values not set in SPROM, * but APHY needs a generated table. */ aphy->tssi2dbm = NULL; b43err(dev->wl, "Could not generate tssi2dBm " "table (wrong SPROM info)!\n"); return -ENODEV; } return 0; } static int b43_aphy_op_allocate(struct b43_wldev *dev) { struct b43_phy_a *aphy; int err; aphy = kzalloc(sizeof(*aphy), GFP_KERNEL); if (!aphy) return -ENOMEM; dev->phy.a = aphy; err = b43_aphy_init_tssi2dbm_table(dev); if (err) goto err_free_aphy; return 0; err_free_aphy: kfree(aphy); dev->phy.a = NULL; return err; } static void b43_aphy_op_prepare_structs(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_a *aphy = phy->a; const void *tssi2dbm; int tgt_idle_tssi; /* tssi2dbm table is constant, so it is initialized at alloc time. * Save a copy of the pointer. */ tssi2dbm = aphy->tssi2dbm; tgt_idle_tssi = aphy->tgt_idle_tssi; /* Zero out the whole PHY structure. */ memset(aphy, 0, sizeof(*aphy)); aphy->tssi2dbm = tssi2dbm; aphy->tgt_idle_tssi = tgt_idle_tssi; //TODO init struct b43_phy_a } static void b43_aphy_op_free(struct b43_wldev *dev) { struct b43_phy *phy = &dev->phy; struct b43_phy_a *aphy = phy->a; kfree(aphy->tssi2dbm); aphy->tssi2dbm = NULL; kfree(aphy); dev->phy.a = NULL; } static int b43_aphy_op_init(struct b43_wldev *dev) { b43_phy_inita(dev); return 0; } static inline u16 adjust_phyreg(struct b43_wldev *dev, u16 offset) { /* OFDM registers are base-registers for the A-PHY. */ if ((offset & B43_PHYROUTE) == B43_PHYROUTE_OFDM_GPHY) { offset &= ~B43_PHYROUTE; offset |= B43_PHYROUTE_BASE; } #if B43_DEBUG if ((offset & B43_PHYROUTE) == B43_PHYROUTE_EXT_GPHY) { /* Ext-G registers are only available on G-PHYs */ b43err(dev->wl, "Invalid EXT-G PHY access at " "0x%04X on A-PHY\n", offset); dump_stack(); } if ((offset & B43_PHYROUTE) == B43_PHYROUTE_N_BMODE) { /* N-BMODE registers are only available on N-PHYs */ b43err(dev->wl, "Invalid N-BMODE PHY access at " "0x%04X on A-PHY\n", offset); dump_stack(); } #endif /* B43_DEBUG */ return offset; } static u16 b43_aphy_op_read(struct b43_wldev *dev, u16 reg) { reg = adjust_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); return b43_read16(dev, B43_MMIO_PHY_DATA); } static void b43_aphy_op_write(struct b43_wldev *dev, u16 reg, u16 value) { reg = adjust_phyreg(dev, reg); b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); b43_write16(dev, B43_MMIO_PHY_DATA, value); } static u16 b43_aphy_op_radio_read(struct b43_wldev *dev, u16 reg) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); /* A-PHY needs 0x40 for read access */ reg |= 0x40; b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW); } static void b43_aphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value) { /* Register 1 is a 32-bit register. */ B43_WARN_ON(reg == 1); b43_write16(dev, B43_MMIO_RADIO_CONTROL, reg); b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); } static bool b43_aphy_op_supports_hwpctl(struct b43_wldev *dev) { return (dev->phy.rev >= 5); } static void b43_aphy_op_software_rfkill(struct b43_wldev *dev, bool blocked) { struct b43_phy *phy = &dev->phy; if (!blocked) { if (phy->radio_on) return; b43_radio_write16(dev, 0x0004, 0x00C0); b43_radio_write16(dev, 0x0005, 0x0008); b43_phy_mask(dev, 0x0010, 0xFFF7); b43_phy_mask(dev, 0x0011, 0xFFF7); b43_radio_init2060(dev); } else { b43_radio_write16(dev, 0x0004, 0x00FF); b43_radio_write16(dev, 0x0005, 0x00FB); b43_phy_set(dev, 0x0010, 0x0008); b43_phy_set(dev, 0x0011, 0x0008); } } static int b43_aphy_op_switch_channel(struct b43_wldev *dev, unsigned int new_channel) { if (new_channel > 200) return -EINVAL; aphy_channel_switch(dev, new_channel); return 0; } static unsigned int b43_aphy_op_get_default_chan(struct b43_wldev *dev) { return 36; /* Default to channel 36 */ } static void b43_aphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna) {//TODO struct b43_phy *phy = &dev->phy; u16 tmp; int autodiv = 0; if (antenna == B43_ANTENNA_AUTO0 || antenna == B43_ANTENNA_AUTO1) autodiv = 1; b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP); b43_phy_maskset(dev, B43_PHY_BBANDCFG, ~B43_PHY_BBANDCFG_RXANT, (autodiv ? B43_ANTENNA_AUTO1 : antenna) << B43_PHY_BBANDCFG_RXANT_SHIFT); if (autodiv) { tmp = b43_phy_read(dev, B43_PHY_ANTDWELL); if (antenna == B43_ANTENNA_AUTO1) tmp &= ~B43_PHY_ANTDWELL_AUTODIV1; else tmp |= B43_PHY_ANTDWELL_AUTODIV1; b43_phy_write(dev, B43_PHY_ANTDWELL, tmp); } if (phy->rev < 3) b43_phy_maskset(dev, B43_PHY_ANTDWELL, 0xFF00, 0x24); else { b43_phy_set(dev, B43_PHY_OFDM61, 0x10); if (phy->rev == 3) { b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x1D); b43_phy_write(dev, B43_PHY_ADIVRELATED, 8); } else { b43_phy_write(dev, B43_PHY_CLIPPWRDOWNT, 0x3A); b43_phy_maskset(dev, B43_PHY_ADIVRELATED, 0xFF00, 8); } } b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP); } static void b43_aphy_op_adjust_txpower(struct b43_wldev *dev) {//TODO } static enum b43_txpwr_result b43_aphy_op_recalc_txpower(struct b43_wldev *dev, bool ignore_tssi) {//TODO return B43_TXPWR_RES_DONE; } static void b43_aphy_op_pwork_15sec(struct b43_wldev *dev) {//TODO } static void b43_aphy_op_pwork_60sec(struct b43_wldev *dev) {//TODO } const struct b43_phy_operations b43_phyops_a = { .allocate = b43_aphy_op_allocate, .free = b43_aphy_op_free, .prepare_structs = b43_aphy_op_prepare_structs, .init = b43_aphy_op_init, .phy_read = b43_aphy_op_read, .phy_write = b43_aphy_op_write, .radio_read = b43_aphy_op_radio_read, .radio_write = b43_aphy_op_radio_write, .supports_hwpctl = b43_aphy_op_supports_hwpctl, .software_rfkill = b43_aphy_op_software_rfkill, .switch_analog = b43_phyop_switch_analog_generic, .switch_channel = b43_aphy_op_switch_channel, .get_default_chan = b43_aphy_op_get_default_chan, .set_rx_antenna = b43_aphy_op_set_rx_antenna, .recalc_txpower = b43_aphy_op_recalc_txpower, .adjust_txpower = b43_aphy_op_adjust_txpower, .pwork_15sec = b43_aphy_op_pwork_15sec, .pwork_60sec = b43_aphy_op_pwork_60sec, };
gpl-2.0
iconia-dev/android_kernel_acer_t20-common
arch/mips/kernel/vpe.c
2318
37406
/* * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ /* * VPE support module * * Provides support for loading a MIPS SP program on VPE1. * The SP environment is rather simple, no tlb's. It needs to be relocatable * (or partially linked). You should initialise your stack in the startup * code. This loader looks for the symbol __start and sets up * execution to resume from there. The MIPS SDE kit contains suitable examples. * * To load and run, simply cat a SP 'program file' to /dev/vpe1. * i.e cat spapp >/dev/vpe1. */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/module.h> #include <linux/fs.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/bootmem.h> #include <asm/mipsregs.h> #include <asm/mipsmtregs.h> #include <asm/cacheflush.h> #include <asm/atomic.h> #include <asm/cpu.h> #include <asm/mips_mt.h> #include <asm/processor.h> #include <asm/system.h> #include <asm/vpe.h> #include <asm/kspd.h> typedef void *vpe_handle; #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) /* * The number of TCs and VPEs physically available on the core */ static int hw_tcs, hw_vpes; static char module_name[] = "vpe"; static int major; static const int minor = 1; /* fixed for now */ #ifdef CONFIG_MIPS_APSP_KSPD static struct kspd_notifications kspd_events; static int kspd_events_reqd; #endif /* grab the likely amount of memory we will need. */ #ifdef CONFIG_MIPS_VPE_LOADER_TOM #define P_SIZE (2 * 1024 * 1024) #else /* add an overhead to the max kmalloc size for non-striped symbols/etc */ #define P_SIZE (256 * 1024) #endif extern unsigned long physical_memsize; #define MAX_VPES 16 #define VPE_PATH_MAX 256 enum vpe_state { VPE_STATE_UNUSED = 0, VPE_STATE_INUSE, VPE_STATE_RUNNING }; enum tc_state { TC_STATE_UNUSED = 0, TC_STATE_INUSE, TC_STATE_RUNNING, TC_STATE_DYNAMIC }; struct vpe { enum vpe_state state; /* (device) minor associated with this vpe */ int minor; /* elfloader stuff */ void *load_addr; unsigned long len; char *pbuffer; unsigned long plen; unsigned int uid, gid; char cwd[VPE_PATH_MAX]; unsigned long __start; /* tc's associated with this vpe */ struct list_head tc; /* The list of vpe's */ struct list_head list; /* shared symbol address */ void *shared_ptr; /* the list of who wants to know when something major happens */ struct list_head notify; unsigned int ntcs; }; struct tc { enum tc_state state; int index; struct vpe *pvpe; /* parent VPE */ struct list_head tc; /* The list of TC's with this VPE */ struct list_head list; /* The global list of tc's */ }; struct { spinlock_t vpe_list_lock; struct list_head vpe_list; /* Virtual processing elements */ spinlock_t tc_list_lock; struct list_head tc_list; /* Thread contexts */ } vpecontrol = { .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) }; static void release_progmem(void *ptr); /* get the vpe associated with this minor */ static struct vpe *get_vpe(int minor) { struct vpe *res, *v; if (!cpu_has_mipsmt) return NULL; res = NULL; spin_lock(&vpecontrol.vpe_list_lock); list_for_each_entry(v, &vpecontrol.vpe_list, list) { if (v->minor == minor) { res = v; break; } } spin_unlock(&vpecontrol.vpe_list_lock); return res; } /* get the vpe associated with this minor */ static struct tc *get_tc(int index) { struct tc *res, *t; res = NULL; spin_lock(&vpecontrol.tc_list_lock); list_for_each_entry(t, &vpecontrol.tc_list, list) { if (t->index == index) { res = t; break; } } spin_unlock(&vpecontrol.tc_list_lock); return NULL; } /* allocate a vpe and associate it with this minor (or index) */ static struct vpe *alloc_vpe(int minor) { struct vpe *v; if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) return NULL; INIT_LIST_HEAD(&v->tc); spin_lock(&vpecontrol.vpe_list_lock); list_add_tail(&v->list, &vpecontrol.vpe_list); spin_unlock(&vpecontrol.vpe_list_lock); INIT_LIST_HEAD(&v->notify); v->minor = minor; return v; } /* allocate a tc. At startup only tc0 is running, all other can be halted. */ static struct tc *alloc_tc(int index) { struct tc *tc; if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) goto out; INIT_LIST_HEAD(&tc->tc); tc->index = index; spin_lock(&vpecontrol.tc_list_lock); list_add_tail(&tc->list, &vpecontrol.tc_list); spin_unlock(&vpecontrol.tc_list_lock); out: return tc; } /* clean up and free everything */ static void release_vpe(struct vpe *v) { list_del(&v->list); if (v->load_addr) release_progmem(v); kfree(v); } static void __maybe_unused dump_mtregs(void) { unsigned long val; val = read_c0_config3(); printk("config3 0x%lx MT %ld\n", val, (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); val = read_c0_mvpcontrol(); printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, (val & MVPCONTROL_EVP)); val = read_c0_mvpconf0(); printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); } /* Find some VPE program space */ static void *alloc_progmem(unsigned long len) { void *addr; #ifdef CONFIG_MIPS_VPE_LOADER_TOM /* * This means you must tell Linux to use less memory than you * physically have, for example by passing a mem= boot argument. */ addr = pfn_to_kaddr(max_low_pfn); memset(addr, 0, len); #else /* simple grab some mem for now */ addr = kzalloc(len, GFP_KERNEL); #endif return addr; } static void release_progmem(void *ptr) { #ifndef CONFIG_MIPS_VPE_LOADER_TOM kfree(ptr); #endif } /* Update size with this section: return offset. */ static long get_offset(unsigned long *size, Elf_Shdr * sechdr) { long ret; ret = ALIGN(*size, sechdr->sh_addralign ? : 1); *size = ret + sechdr->sh_size; return ret; } /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, Elf_Shdr * sechdrs, const char *secstrings) { static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section * in this array; otherwise modify the text_size * finder in the two loops below */ {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL}, {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL}, {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL}, {ARCH_SHF_SMALL | SHF_ALLOC, 0} }; unsigned int m, i; for (i = 0; i < hdr->e_shnum; i++) sechdrs[i].sh_entsize = ~0UL; for (m = 0; m < ARRAY_SIZE(masks); ++m) { for (i = 0; i < hdr->e_shnum; ++i) { Elf_Shdr *s = &sechdrs[i]; // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL) continue; s->sh_entsize = get_offset((unsigned long *)&mod->core_size, s); } if (m == 0) mod->core_text_size = mod->core_size; } } /* from module-elf32.c, but subverted a little */ struct mips_hi16 { struct mips_hi16 *next; Elf32_Addr *addr; Elf32_Addr value; }; static struct mips_hi16 *mips_hi16_list; static unsigned int gp_offs, gp_addr; static int apply_r_mips_none(struct module *me, uint32_t *location, Elf32_Addr v) { return 0; } static int apply_r_mips_gprel16(struct module *me, uint32_t *location, Elf32_Addr v) { int rel; if( !(*location & 0xffff) ) { rel = (int)v - gp_addr; } else { /* .sbss + gp(relative) + offset */ /* kludge! */ rel = (int)(short)((int)v + gp_offs + (int)(short)(*location & 0xffff) - gp_addr); } if( (rel > 32768) || (rel < -32768) ) { printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " "relative address 0x%x out of range of gp register\n", rel); return -ENOEXEC; } *location = (*location & 0xffff0000) | (rel & 0xffff); return 0; } static int apply_r_mips_pc16(struct module *me, uint32_t *location, Elf32_Addr v) { int rel; rel = (((unsigned int)v - (unsigned int)location)); rel >>= 2; // because the offset is in _instructions_ not bytes. rel -= 1; // and one instruction less due to the branch delay slot. if( (rel > 32768) || (rel < -32768) ) { printk(KERN_DEBUG "VPE loader: " "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); return -ENOEXEC; } *location = (*location & 0xffff0000) | (rel & 0xffff); return 0; } static int apply_r_mips_32(struct module *me, uint32_t *location, Elf32_Addr v) { *location += v; return 0; } static int apply_r_mips_26(struct module *me, uint32_t *location, Elf32_Addr v) { if (v % 4) { printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " " unaligned relocation\n"); return -ENOEXEC; } /* * Not desperately convinced this is a good check of an overflow condition * anyway. But it gets in the way of handling undefined weak symbols which * we want to set to zero. * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { * printk(KERN_ERR * "module %s: relocation overflow\n", * me->name); * return -ENOEXEC; * } */ *location = (*location & ~0x03ffffff) | ((*location + (v >> 2)) & 0x03ffffff); return 0; } static int apply_r_mips_hi16(struct module *me, uint32_t *location, Elf32_Addr v) { struct mips_hi16 *n; /* * We cannot relocate this one now because we don't know the value of * the carry we need to add. Save the information, and let LO16 do the * actual relocation. */ n = kmalloc(sizeof *n, GFP_KERNEL); if (!n) return -ENOMEM; n->addr = location; n->value = v; n->next = mips_hi16_list; mips_hi16_list = n; return 0; } static int apply_r_mips_lo16(struct module *me, uint32_t *location, Elf32_Addr v) { unsigned long insnlo = *location; Elf32_Addr val, vallo; struct mips_hi16 *l, *next; /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; if (mips_hi16_list != NULL) { l = mips_hi16_list; while (l != NULL) { unsigned long insn; /* * The value for the HI16 had best be the same. */ if (v != l->value) { printk(KERN_DEBUG "VPE loader: " "apply_r_mips_lo16/hi16: \t" "inconsistent value information\n"); goto out_free; } /* * Do the HI16 relocation. Note that we actually don't * need to know anything about the LO16 itself, except * where to find the low 16 bits of the addend needed * by the LO16. */ insn = *l->addr; val = ((insn & 0xffff) << 16) + vallo; val += v; /* * Account for the sign extension that will happen in * the low bits. */ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; insn = (insn & ~0xffff) | val; *l->addr = insn; next = l->next; kfree(l); l = next; } mips_hi16_list = NULL; } /* * Ok, we're done with the HI16 relocs. Now deal with the LO16. */ val = v + vallo; insnlo = (insnlo & ~0xffff) | (val & 0xffff); *location = insnlo; return 0; out_free: while (l != NULL) { next = l->next; kfree(l); l = next; } mips_hi16_list = NULL; return -ENOEXEC; } static int (*reloc_handlers[]) (struct module *me, uint32_t *location, Elf32_Addr v) = { [R_MIPS_NONE] = apply_r_mips_none, [R_MIPS_32] = apply_r_mips_32, [R_MIPS_26] = apply_r_mips_26, [R_MIPS_HI16] = apply_r_mips_hi16, [R_MIPS_LO16] = apply_r_mips_lo16, [R_MIPS_GPREL16] = apply_r_mips_gprel16, [R_MIPS_PC16] = apply_r_mips_pc16 }; static char *rstrs[] = { [R_MIPS_NONE] = "MIPS_NONE", [R_MIPS_32] = "MIPS_32", [R_MIPS_26] = "MIPS_26", [R_MIPS_HI16] = "MIPS_HI16", [R_MIPS_LO16] = "MIPS_LO16", [R_MIPS_GPREL16] = "MIPS_GPREL16", [R_MIPS_PC16] = "MIPS_PC16" }; static int apply_relocations(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; Elf32_Sym *sym; uint32_t *location; unsigned int i; Elf32_Addr v; int res; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { Elf32_Word r_info = rel[i].r_info; /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to */ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr + ELF32_R_SYM(r_info); if (!sym->st_value) { printk(KERN_DEBUG "%s: undefined weak symbol %s\n", me->name, strtab + sym->st_name); /* just print the warning, dont barf */ } v = sym->st_value; res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); if( res ) { char *r = rstrs[ELF32_R_TYPE(r_info)]; printk(KERN_WARNING "VPE loader: .text+0x%x " "relocation type %s for symbol \"%s\" failed\n", rel[i].r_offset, r ? r : "UNKNOWN", strtab + sym->st_name); return res; } } return 0; } static inline void save_gp_address(unsigned int secbase, unsigned int rel) { gp_addr = secbase + rel; gp_offs = gp_addr - (secbase & 0xffff0000); } /* end module-elf32.c */ /* Change all symbols so that sh_value encodes the pointer directly. */ static void simplify_symbols(Elf_Shdr * sechdrs, unsigned int symindex, const char *strtab, const char *secstrings, unsigned int nsecs, struct module *mod) { Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; unsigned long secbase, bssbase = 0; unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); int size; /* find the .bss section for COMMON symbols */ for (i = 0; i < nsecs; i++) { if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) { bssbase = sechdrs[i].sh_addr; break; } } for (i = 1; i < n; i++) { switch (sym[i].st_shndx) { case SHN_COMMON: /* Allocate space for the symbol in the .bss section. st_value is currently size. We want it to have the address of the symbol. */ size = sym[i].st_value; sym[i].st_value = bssbase; bssbase += size; break; case SHN_ABS: /* Don't need to do anything */ break; case SHN_UNDEF: /* ret = -ENOENT; */ break; case SHN_MIPS_SCOMMON: printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, sym[i].st_shndx); // .sbss section break; default: secbase = sechdrs[sym[i].st_shndx].sh_addr; if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { save_gp_address(secbase, sym[i].st_value); } sym[i].st_value += secbase; break; } } } #ifdef DEBUG_ELFLOADER static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); for (i = 1; i < n; i++) { printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name, sym[i].st_value); } } #endif /* We are prepared so configure and start the VPE... */ static int vpe_run(struct vpe * v) { unsigned long flags, val, dmt_flag; struct vpe_notifications *n; unsigned int vpeflags; struct tc *t; /* check we are the Master VPE */ local_irq_save(flags); val = read_c0_vpeconf0(); if (!(val & VPECONF0_MVP)) { printk(KERN_WARNING "VPE loader: only Master VPE's are allowed to configure MT\n"); local_irq_restore(flags); return -1; } dmt_flag = dmt(); vpeflags = dvpe(); if (!list_empty(&v->tc)) { if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { evpe(vpeflags); emt(dmt_flag); local_irq_restore(flags); printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", t->index); return -ENOEXEC; } } else { evpe(vpeflags); emt(dmt_flag); local_irq_restore(flags); printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", v->minor); return -ENOEXEC; } /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(t->index); /* should check it is halted, and not activated */ if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { evpe(vpeflags); emt(dmt_flag); local_irq_restore(flags); printk(KERN_WARNING "VPE loader: TC %d is already active!\n", t->index); return -ENOEXEC; } /* Write the address we want it to start running from in the TCPC register. */ write_tc_c0_tcrestart((unsigned long)v->__start); write_tc_c0_tccontext((unsigned long)0); /* * Mark the TC as activated, not interrupt exempt and not dynamically * allocatable */ val = read_tc_c0_tcstatus(); val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; write_tc_c0_tcstatus(val); write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); /* * The sde-kit passes 'memsize' to __start in $a3, so set something * here... Or set $a3 to zero and define DFLT_STACK_SIZE and * DFLT_HEAP_SIZE when you compile your program */ mttgpr(6, v->ntcs); mttgpr(7, physical_memsize); /* set up VPE1 */ /* * bind the TC to VPE 1 as late as possible so we only have the final * VPE registers to set up, and so an EJTAG probe can trigger on it */ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); back_to_back_c0_hazard(); /* Set up the XTC bit in vpeconf0 to point at our tc */ write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | (t->index << VPECONF0_XTC_SHIFT)); back_to_back_c0_hazard(); /* enable this VPE */ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); /* clear out any left overs from a previous program */ write_vpe_c0_status(0); write_vpe_c0_cause(0); /* take system out of configuration state */ clear_c0_mvpcontrol(MVPCONTROL_VPC); /* * SMTC/SMVP kernels manage VPE enable independently, * but uniprocessor kernels need to turn it on, even * if that wasn't the pre-dvpe() state. */ #ifdef CONFIG_SMP evpe(vpeflags); #else evpe(EVPE_ENABLE); #endif emt(dmt_flag); local_irq_restore(flags); list_for_each_entry(n, &v->notify, list) n->start(minor); return 0; } static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, unsigned int symindex, const char *strtab, struct module *mod) { Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); for (i = 1; i < n; i++) { if (strcmp(strtab + sym[i].st_name, "__start") == 0) { v->__start = sym[i].st_value; } if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { v->shared_ptr = (void *)sym[i].st_value; } } if ( (v->__start == 0) || (v->shared_ptr == NULL)) return -1; return 0; } /* * Allocates a VPE with some program code space(the load address), copies the * contents of the program (p)buffer performing relocatations/etc, free's it * when finished. */ static int vpe_elfload(struct vpe * v) { Elf_Ehdr *hdr; Elf_Shdr *sechdrs; long err = 0; char *secstrings, *strtab = NULL; unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; struct module mod; // so we can re-use the relocations code memset(&mod, 0, sizeof(struct module)); strcpy(mod.name, "VPE loader"); hdr = (Elf_Ehdr *) v->pbuffer; len = v->plen; /* Sanity checks against insmoding binaries or wrong arch, weird elf version */ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) || !elf_check_arch(hdr) || hdr->e_shentsize != sizeof(*sechdrs)) { printk(KERN_WARNING "VPE loader: program wrong arch or weird elf version\n"); return -ENOEXEC; } if (hdr->e_type == ET_REL) relocate = 1; if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { printk(KERN_ERR "VPE loader: program length %u truncated\n", len); return -ENOEXEC; } /* Convenience variables */ sechdrs = (void *)hdr + hdr->e_shoff; secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; sechdrs[0].sh_addr = 0; /* And these should exist, but gcc whinges if we don't init them */ symindex = strindex = 0; if (relocate) { for (i = 1; i < hdr->e_shnum; i++) { if (sechdrs[i].sh_type != SHT_NOBITS && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { printk(KERN_ERR "VPE program length %u truncated\n", len); return -ENOEXEC; } /* Mark all sections sh_addr with their address in the temporary image. */ sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; /* Internal symbols and strings. */ if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; strtab = (char *)hdr + sechdrs[strindex].sh_offset; } } layout_sections(&mod, hdr, sechdrs, secstrings); } v->load_addr = alloc_progmem(mod.core_size); if (!v->load_addr) return -ENOMEM; pr_info("VPE loader: loading to %p\n", v->load_addr); if (relocate) { for (i = 0; i < hdr->e_shnum; i++) { void *dest; if (!(sechdrs[i].sh_flags & SHF_ALLOC)) continue; dest = v->load_addr + sechdrs[i].sh_entsize; if (sechdrs[i].sh_type != SHT_NOBITS) memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); /* Update sh_addr to point to copy in image. */ sechdrs[i].sh_addr = (unsigned long)dest; printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); } /* Fix up syms, so that st_value is a pointer to location. */ simplify_symbols(sechdrs, symindex, strtab, secstrings, hdr->e_shnum, &mod); /* Now do relocations. */ for (i = 1; i < hdr->e_shnum; i++) { const char *strtab = (char *)sechdrs[strindex].sh_addr; unsigned int info = sechdrs[i].sh_info; /* Not a valid relocation section? */ if (info >= hdr->e_shnum) continue; /* Don't bother with non-allocated sections */ if (!(sechdrs[info].sh_flags & SHF_ALLOC)) continue; if (sechdrs[i].sh_type == SHT_REL) err = apply_relocations(sechdrs, strtab, symindex, i, &mod); else if (sechdrs[i].sh_type == SHT_RELA) err = apply_relocate_add(sechdrs, strtab, symindex, i, &mod); if (err < 0) return err; } } else { struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); for (i = 0; i < hdr->e_phnum; i++) { if (phdr->p_type == PT_LOAD) { memcpy((void *)phdr->p_paddr, (char *)hdr + phdr->p_offset, phdr->p_filesz); memset((void *)phdr->p_paddr + phdr->p_filesz, 0, phdr->p_memsz - phdr->p_filesz); } phdr++; } for (i = 0; i < hdr->e_shnum; i++) { /* Internal symbols and strings. */ if (sechdrs[i].sh_type == SHT_SYMTAB) { symindex = i; strindex = sechdrs[i].sh_link; strtab = (char *)hdr + sechdrs[strindex].sh_offset; /* mark the symtab's address for when we try to find the magic symbols */ sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; } } } /* make sure it's physically written out */ flush_icache_range((unsigned long)v->load_addr, (unsigned long)v->load_addr + v->len); if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { if (v->__start == 0) { printk(KERN_WARNING "VPE loader: program does not contain " "a __start symbol\n"); return -ENOEXEC; } if (v->shared_ptr == NULL) printk(KERN_WARNING "VPE loader: " "program does not contain vpe_shared symbol.\n" " Unable to use AMVP (AP/SP) facilities.\n"); } printk(" elf loaded\n"); return 0; } static void cleanup_tc(struct tc *tc) { unsigned long flags; unsigned int mtflags, vpflags; int tmp; local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(tc->index); tmp = read_tc_c0_tcstatus(); /* mark not allocated and not dynamically allocatable */ tmp &= ~(TCSTATUS_A | TCSTATUS_DA); tmp |= TCSTATUS_IXMT; /* interrupt exempt */ write_tc_c0_tcstatus(tmp); write_tc_c0_tchalt(TCHALT_H); mips_ihb(); /* bind it to anything other than VPE1 */ // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); } static int getcwd(char *buff, int size) { mm_segment_t old_fs; int ret; old_fs = get_fs(); set_fs(KERNEL_DS); ret = sys_getcwd(buff, size); set_fs(old_fs); return ret; } /* checks VPE is unused and gets ready to load program */ static int vpe_open(struct inode *inode, struct file *filp) { enum vpe_state state; struct vpe_notifications *not; struct vpe *v; int ret; if (minor != iminor(inode)) { /* assume only 1 device at the moment. */ pr_warning("VPE loader: only vpe1 is supported\n"); return -ENODEV; } if ((v = get_vpe(tclimit)) == NULL) { pr_warning("VPE loader: unable to get vpe\n"); return -ENODEV; } state = xchg(&v->state, VPE_STATE_INUSE); if (state != VPE_STATE_UNUSED) { printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); list_for_each_entry(not, &v->notify, list) { not->stop(tclimit); } release_progmem(v->load_addr); cleanup_tc(get_tc(tclimit)); } /* this of-course trashes what was there before... */ v->pbuffer = vmalloc(P_SIZE); if (!v->pbuffer) { pr_warning("VPE loader: unable to allocate memory\n"); return -ENOMEM; } v->plen = P_SIZE; v->load_addr = NULL; v->len = 0; v->uid = filp->f_cred->fsuid; v->gid = filp->f_cred->fsgid; #ifdef CONFIG_MIPS_APSP_KSPD /* get kspd to tell us when a syscall_exit happens */ if (!kspd_events_reqd) { kspd_notify(&kspd_events); kspd_events_reqd++; } #endif v->cwd[0] = 0; ret = getcwd(v->cwd, VPE_PATH_MAX); if (ret < 0) printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); v->shared_ptr = NULL; v->__start = 0; return 0; } static int vpe_release(struct inode *inode, struct file *filp) { struct vpe *v; Elf_Ehdr *hdr; int ret = 0; v = get_vpe(tclimit); if (v == NULL) return -ENODEV; hdr = (Elf_Ehdr *) v->pbuffer; if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { if (vpe_elfload(v) >= 0) { vpe_run(v); } else { printk(KERN_WARNING "VPE loader: ELF load failed.\n"); ret = -ENOEXEC; } } else { printk(KERN_WARNING "VPE loader: only elf files are supported\n"); ret = -ENOEXEC; } /* It's good to be able to run the SP and if it chokes have a look at the /dev/rt?. But if we reset the pointer to the shared struct we lose what has happened. So perhaps if garbage is sent to the vpe device, use it as a trigger for the reset. Hopefully a nice executable will be along shortly. */ if (ret < 0) v->shared_ptr = NULL; vfree(v->pbuffer); v->plen = 0; return ret; } static ssize_t vpe_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { size_t ret = count; struct vpe *v; if (iminor(file->f_path.dentry->d_inode) != minor) return -ENODEV; v = get_vpe(tclimit); if (v == NULL) return -ENODEV; if ((count + v->len) > v->plen) { printk(KERN_WARNING "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); return -ENOMEM; } count -= copy_from_user(v->pbuffer + v->len, buffer, count); if (!count) return -EFAULT; v->len += count; return ret; } static const struct file_operations vpe_fops = { .owner = THIS_MODULE, .open = vpe_open, .release = vpe_release, .write = vpe_write, .llseek = noop_llseek, }; /* module wrapper entry points */ /* give me a vpe */ vpe_handle vpe_alloc(void) { int i; struct vpe *v; /* find a vpe */ for (i = 1; i < MAX_VPES; i++) { if ((v = get_vpe(i)) != NULL) { v->state = VPE_STATE_INUSE; return v; } } return NULL; } EXPORT_SYMBOL(vpe_alloc); /* start running from here */ int vpe_start(vpe_handle vpe, unsigned long start) { struct vpe *v = vpe; v->__start = start; return vpe_run(v); } EXPORT_SYMBOL(vpe_start); /* halt it for now */ int vpe_stop(vpe_handle vpe) { struct vpe *v = vpe; struct tc *t; unsigned int evpe_flags; evpe_flags = dvpe(); if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { settc(t->index); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); } evpe(evpe_flags); return 0; } EXPORT_SYMBOL(vpe_stop); /* I've done with it thank you */ int vpe_free(vpe_handle vpe) { struct vpe *v = vpe; struct tc *t; unsigned int evpe_flags; if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { return -ENOEXEC; } evpe_flags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); settc(t->index); write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); /* halt the TC */ write_tc_c0_tchalt(TCHALT_H); mips_ihb(); /* mark the TC unallocated */ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); v->state = VPE_STATE_UNUSED; clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(evpe_flags); return 0; } EXPORT_SYMBOL(vpe_free); void *vpe_get_shared(int index) { struct vpe *v; if ((v = get_vpe(index)) == NULL) return NULL; return v->shared_ptr; } EXPORT_SYMBOL(vpe_get_shared); int vpe_getuid(int index) { struct vpe *v; if ((v = get_vpe(index)) == NULL) return -1; return v->uid; } EXPORT_SYMBOL(vpe_getuid); int vpe_getgid(int index) { struct vpe *v; if ((v = get_vpe(index)) == NULL) return -1; return v->gid; } EXPORT_SYMBOL(vpe_getgid); int vpe_notify(int index, struct vpe_notifications *notify) { struct vpe *v; if ((v = get_vpe(index)) == NULL) return -1; list_add(&notify->list, &v->notify); return 0; } EXPORT_SYMBOL(vpe_notify); char *vpe_getcwd(int index) { struct vpe *v; if ((v = get_vpe(index)) == NULL) return NULL; return v->cwd; } EXPORT_SYMBOL(vpe_getcwd); #ifdef CONFIG_MIPS_APSP_KSPD static void kspd_sp_exit( int sp_id) { cleanup_tc(get_tc(sp_id)); } #endif static ssize_t store_kill(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct vpe *vpe = get_vpe(tclimit); struct vpe_notifications *not; list_for_each_entry(not, &vpe->notify, list) { not->stop(tclimit); } release_progmem(vpe->load_addr); cleanup_tc(get_tc(tclimit)); vpe_stop(vpe); vpe_free(vpe); return len; } static ssize_t show_ntcs(struct device *cd, struct device_attribute *attr, char *buf) { struct vpe *vpe = get_vpe(tclimit); return sprintf(buf, "%d\n", vpe->ntcs); } static ssize_t store_ntcs(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct vpe *vpe = get_vpe(tclimit); unsigned long new; char *endp; new = simple_strtoul(buf, &endp, 0); if (endp == buf) goto out_einval; if (new == 0 || new > (hw_tcs - tclimit)) goto out_einval; vpe->ntcs = new; return len; out_einval: return -EINVAL; } static struct device_attribute vpe_class_attributes[] = { __ATTR(kill, S_IWUSR, NULL, store_kill), __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), {} }; static void vpe_device_release(struct device *cd) { kfree(cd); } struct class vpe_class = { .name = "vpe", .owner = THIS_MODULE, .dev_release = vpe_device_release, .dev_attrs = vpe_class_attributes, }; struct device vpe_device; static int __init vpe_module_init(void) { unsigned int mtflags, vpflags; unsigned long flags, val; struct vpe *v = NULL; struct tc *t; int tc, err; if (!cpu_has_mipsmt) { printk("VPE loader: not a MIPS MT capable processor\n"); return -ENODEV; } if (vpelimit == 0) { printk(KERN_WARNING "No VPEs reserved for AP/SP, not " "initializing VPE loader.\nPass maxvpes=<n> argument as " "kernel argument\n"); return -ENODEV; } if (tclimit == 0) { printk(KERN_WARNING "No TCs reserved for AP/SP, not " "initializing VPE loader.\nPass maxtcs=<n> argument as " "kernel argument\n"); return -ENODEV; } major = register_chrdev(0, module_name, &vpe_fops); if (major < 0) { printk("VPE loader: unable to register character device\n"); return major; } err = class_register(&vpe_class); if (err) { printk(KERN_ERR "vpe_class registration failed\n"); goto out_chrdev; } device_initialize(&vpe_device); vpe_device.class = &vpe_class, vpe_device.parent = NULL, dev_set_name(&vpe_device, "vpe1"); vpe_device.devt = MKDEV(major, minor); err = device_add(&vpe_device); if (err) { printk(KERN_ERR "Adding vpe_device failed\n"); goto out_class; } local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); /* Put MVPE's into 'configuration state' */ set_c0_mvpcontrol(MVPCONTROL_VPC); /* dump_mtregs(); */ val = read_c0_mvpconf0(); hw_tcs = (val & MVPCONF0_PTC) + 1; hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; for (tc = tclimit; tc < hw_tcs; tc++) { /* * Must re-enable multithreading temporarily or in case we * reschedule send IPIs or similar we might hang. */ clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); t = alloc_tc(tc); if (!t) { err = -ENOMEM; goto out; } local_irq_save(flags); mtflags = dmt(); vpflags = dvpe(); set_c0_mvpcontrol(MVPCONTROL_VPC); /* VPE's */ if (tc < hw_tcs) { settc(tc); if ((v = alloc_vpe(tc)) == NULL) { printk(KERN_WARNING "VPE: unable to allocate VPE\n"); goto out_reenable; } v->ntcs = hw_tcs - tclimit; /* add the tc to the list of this vpe's tc's. */ list_add(&t->tc, &v->tc); /* deactivate all but vpe0 */ if (tc >= tclimit) { unsigned long tmp = read_vpe_c0_vpeconf0(); tmp &= ~VPECONF0_VPA; /* master VPE */ tmp |= VPECONF0_MVP; write_vpe_c0_vpeconf0(tmp); } /* disable multi-threading with TC's */ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); if (tc >= vpelimit) { /* * Set config to be the same as vpe0, * particularly kseg0 coherency alg */ write_vpe_c0_config(read_c0_config()); } } /* TC's */ t->pvpe = v; /* set the parent vpe */ if (tc >= tclimit) { unsigned long tmp; settc(tc); /* Any TC that is bound to VPE0 gets left as is - in case we are running SMTC on VPE0. A TC that is bound to any other VPE gets bound to VPE0, ideally I'd like to make it homeless but it doesn't appear to let me bind a TC to a non-existent VPE. Which is perfectly reasonable. The (un)bound state is visible to an EJTAG probe so may notify GDB... */ if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) { /* tc is bound >vpe0 */ write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); t->pvpe = get_vpe(0); /* set the parent vpe */ } /* halt the TC */ write_tc_c0_tchalt(TCHALT_H); mips_ihb(); tmp = read_tc_c0_tcstatus(); /* mark not activated and not dynamically allocatable */ tmp &= ~(TCSTATUS_A | TCSTATUS_DA); tmp |= TCSTATUS_IXMT; /* interrupt exempt */ write_tc_c0_tcstatus(tmp); } } out_reenable: /* release config state */ clear_c0_mvpcontrol(MVPCONTROL_VPC); evpe(vpflags); emt(mtflags); local_irq_restore(flags); #ifdef CONFIG_MIPS_APSP_KSPD kspd_events.kspd_sp_exit = kspd_sp_exit; #endif return 0; out_class: class_unregister(&vpe_class); out_chrdev: unregister_chrdev(major, module_name); out: return err; } static void __exit vpe_module_exit(void) { struct vpe *v, *n; device_del(&vpe_device); unregister_chrdev(major, module_name); /* No locking needed here */ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { if (v->state != VPE_STATE_UNUSED) release_vpe(v); } } module_init(vpe_module_init); module_exit(vpe_module_exit); MODULE_DESCRIPTION("MIPS VPE Loader"); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
PlayOSS-Dev/acer_picasso_kernel
fs/qnx4/namei.c
3342
3327
/* * QNX4 file system, Linux implementation. * * Version : 0.2.1 * * Using parts of the xiafs filesystem. * * History : * * 01-06-1998 by Richard Frowijn : first release. * 21-06-1998 by Frank Denis : dcache support, fixed error codes. * 04-07-1998 by Frank Denis : first step for rmdir/unlink. */ #include <linux/buffer_head.h> #include "qnx4.h" /* * check if the filename is correct. For some obscure reason, qnx writes a * new file twice in the directory entry, first with all possible options at 0 * and for a second time the way it is, they want us not to access the qnx * filesystem when whe are using linux. */ static int qnx4_match(int len, const char *name, struct buffer_head *bh, unsigned long *offset) { struct qnx4_inode_entry *de; int namelen, thislen; if (bh == NULL) { printk(KERN_WARNING "qnx4: matching unassigned buffer !\n"); return 0; } de = (struct qnx4_inode_entry *) (bh->b_data + *offset); *offset += QNX4_DIR_ENTRY_SIZE; if ((de->di_status & QNX4_FILE_LINK) != 0) { namelen = QNX4_NAME_MAX; } else { namelen = QNX4_SHORT_NAME_MAX; } /* "" means "." ---> so paths like "/usr/lib//libc.a" work */ if (!len && (de->di_fname[0] == '.') && (de->di_fname[1] == '\0')) { return 1; } thislen = strlen( de->di_fname ); if ( thislen > namelen ) thislen = namelen; if (len != thislen) { return 0; } if (strncmp(name, de->di_fname, len) == 0) { if ((de->di_status & (QNX4_FILE_USED|QNX4_FILE_LINK)) != 0) { return 1; } } return 0; } static struct buffer_head *qnx4_find_entry(int len, struct inode *dir, const char *name, struct qnx4_inode_entry **res_dir, int *ino) { unsigned long block, offset, blkofs; struct buffer_head *bh; *res_dir = NULL; if (!dir->i_sb) { printk(KERN_WARNING "qnx4: no superblock on dir.\n"); return NULL; } bh = NULL; block = offset = blkofs = 0; while (blkofs * QNX4_BLOCK_SIZE + offset < dir->i_size) { if (!bh) { bh = qnx4_bread(dir, blkofs, 0); if (!bh) { blkofs++; continue; } } *res_dir = (struct qnx4_inode_entry *) (bh->b_data + offset); if (qnx4_match(len, name, bh, &offset)) { block = qnx4_block_map( dir, blkofs ); *ino = block * QNX4_INODES_PER_BLOCK + (offset / QNX4_DIR_ENTRY_SIZE) - 1; return bh; } if (offset < bh->b_size) { continue; } brelse(bh); bh = NULL; offset = 0; blkofs++; } brelse(bh); *res_dir = NULL; return NULL; } struct dentry * qnx4_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { int ino; struct qnx4_inode_entry *de; struct qnx4_link_info *lnk; struct buffer_head *bh; const char *name = dentry->d_name.name; int len = dentry->d_name.len; struct inode *foundinode = NULL; if (!(bh = qnx4_find_entry(len, dir, name, &de, &ino))) goto out; /* The entry is linked, let's get the real info */ if ((de->di_status & QNX4_FILE_LINK) == QNX4_FILE_LINK) { lnk = (struct qnx4_link_info *) de; ino = (le32_to_cpu(lnk->dl_inode_blk) - 1) * QNX4_INODES_PER_BLOCK + lnk->dl_inode_ndx; } brelse(bh); foundinode = qnx4_iget(dir->i_sb, ino); if (IS_ERR(foundinode)) { QNX4DEBUG((KERN_ERR "qnx4: lookup->iget -> error %ld\n", PTR_ERR(foundinode))); return ERR_CAST(foundinode); } out: d_add(dentry, foundinode); return NULL; }
gpl-2.0
TheWolfer22/android_kernel_lge_g3
drivers/staging/iio/dac/ad5686.c
4878
11514
/* * AD5686R, AD5685R, AD5684R Digital to analog converters driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/fs.h> #include <linux/device.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/regulator/consumer.h> #include "../iio.h" #include "../sysfs.h" #include "dac.h" #define AD5686_DAC_CHANNELS 4 #define AD5686_ADDR(x) ((x) << 16) #define AD5686_CMD(x) ((x) << 20) #define AD5686_ADDR_DAC(chan) (0x1 << (chan)) #define AD5686_ADDR_ALL_DAC 0xF #define AD5686_CMD_NOOP 0x0 #define AD5686_CMD_WRITE_INPUT_N 0x1 #define AD5686_CMD_UPDATE_DAC_N 0x2 #define AD5686_CMD_WRITE_INPUT_N_UPDATE_N 0x3 #define AD5686_CMD_POWERDOWN_DAC 0x4 #define AD5686_CMD_LDAC_MASK 0x5 #define AD5686_CMD_RESET 0x6 #define AD5686_CMD_INTERNAL_REFER_SETUP 0x7 #define AD5686_CMD_DAISY_CHAIN_ENABLE 0x8 #define AD5686_CMD_READBACK_ENABLE 0x9 #define AD5686_LDAC_PWRDN_NONE 0x0 #define AD5686_LDAC_PWRDN_1K 0x1 #define AD5686_LDAC_PWRDN_100K 0x2 #define AD5686_LDAC_PWRDN_3STATE 0x3 /** * struct ad5686_chip_info - chip specific information * @int_vref_mv: AD5620/40/60: the internal reference voltage * @channel: channel specification */ struct ad5686_chip_info { u16 int_vref_mv; struct iio_chan_spec channel[AD5686_DAC_CHANNELS]; }; /** * struct ad5446_state - driver instance specific data * @spi: spi_device * @chip_info: chip model specific constants, available modes etc * @reg: supply regulator * @vref_mv: actual reference voltage used * @pwr_down_mask: power down mask * @pwr_down_mode: current power down mode * @data: spi transfer buffers */ struct ad5686_state { struct spi_device *spi; const struct ad5686_chip_info *chip_info; struct regulator *reg; unsigned short vref_mv; unsigned pwr_down_mask; unsigned pwr_down_mode; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ union { u32 d32; u8 d8[4]; } data[3] ____cacheline_aligned; }; /** * ad5686_supported_device_ids: */ enum ad5686_supported_device_ids { ID_AD5684, ID_AD5685, ID_AD5686, }; #define AD5868_CHANNEL(chan, bits, shift) { \ .type = IIO_VOLTAGE, \ .indexed = 1, \ .output = 1, \ .channel = chan, \ .info_mask = IIO_CHAN_INFO_SCALE_SHARED_BIT, \ .address = AD5686_ADDR_DAC(chan), \ .scan_type = IIO_ST('u', bits, 16, shift) \ } static const struct ad5686_chip_info ad5686_chip_info_tbl[] = { [ID_AD5684] = { .channel[0] = AD5868_CHANNEL(0, 12, 4), .channel[1] = AD5868_CHANNEL(1, 12, 4), .channel[2] = AD5868_CHANNEL(2, 12, 4), .channel[3] = AD5868_CHANNEL(3, 12, 4), .int_vref_mv = 2500, }, [ID_AD5685] = { .channel[0] = AD5868_CHANNEL(0, 14, 2), .channel[1] = AD5868_CHANNEL(1, 14, 2), .channel[2] = AD5868_CHANNEL(2, 14, 2), .channel[3] = AD5868_CHANNEL(3, 14, 2), .int_vref_mv = 2500, }, [ID_AD5686] = { .channel[0] = AD5868_CHANNEL(0, 16, 0), .channel[1] = AD5868_CHANNEL(1, 16, 0), .channel[2] = AD5868_CHANNEL(2, 16, 0), .channel[3] = AD5868_CHANNEL(3, 16, 0), .int_vref_mv = 2500, }, }; static int ad5686_spi_write(struct ad5686_state *st, u8 cmd, u8 addr, u16 val, u8 shift) { val <<= shift; st->data[0].d32 = cpu_to_be32(AD5686_CMD(cmd) | AD5686_ADDR(addr) | val); return spi_write(st->spi, &st->data[0].d8[1], 3); } static int ad5686_spi_read(struct ad5686_state *st, u8 addr) { struct spi_transfer t[] = { { .tx_buf = &st->data[0].d8[1], .len = 3, .cs_change = 1, }, { .tx_buf = &st->data[1].d8[1], .rx_buf = &st->data[2].d8[1], .len = 3, }, }; struct spi_message m; int ret; spi_message_init(&m); spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); st->data[0].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_READBACK_ENABLE) | AD5686_ADDR(addr)); st->data[1].d32 = cpu_to_be32(AD5686_CMD(AD5686_CMD_NOOP)); ret = spi_sync(st->spi, &m); if (ret < 0) return ret; return be32_to_cpu(st->data[2].d32); } static ssize_t ad5686_read_powerdown_mode(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5686_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); char mode[][15] = {"", "1kohm_to_gnd", "100kohm_to_gnd", "three_state"}; return sprintf(buf, "%s\n", mode[(st->pwr_down_mode >> (this_attr->address * 2)) & 0x3]); } static ssize_t ad5686_write_powerdown_mode(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5686_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); unsigned mode; if (sysfs_streq(buf, "1kohm_to_gnd")) mode = AD5686_LDAC_PWRDN_1K; else if (sysfs_streq(buf, "100kohm_to_gnd")) mode = AD5686_LDAC_PWRDN_100K; else if (sysfs_streq(buf, "three_state")) mode = AD5686_LDAC_PWRDN_3STATE; else return -EINVAL; st->pwr_down_mode &= ~(0x3 << (this_attr->address * 2)); st->pwr_down_mode |= (mode << (this_attr->address * 2)); return len; } static ssize_t ad5686_read_dac_powerdown(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5686_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); return sprintf(buf, "%d\n", !!(st->pwr_down_mask & (0x3 << (this_attr->address * 2)))); } static ssize_t ad5686_write_dac_powerdown(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { bool readin; int ret; struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad5686_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); ret = strtobool(buf, &readin); if (ret) return ret; if (readin == true) st->pwr_down_mask |= (0x3 << (this_attr->address * 2)); else st->pwr_down_mask &= ~(0x3 << (this_attr->address * 2)); ret = ad5686_spi_write(st, AD5686_CMD_POWERDOWN_DAC, 0, st->pwr_down_mask & st->pwr_down_mode, 0); return ret ? ret : len; } static IIO_CONST_ATTR(out_voltage_powerdown_mode_available, "1kohm_to_gnd 100kohm_to_gnd three_state"); #define IIO_DEV_ATTR_DAC_POWERDOWN_MODE(_num) \ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown_mode, \ S_IRUGO | S_IWUSR, \ ad5686_read_powerdown_mode, \ ad5686_write_powerdown_mode, _num) static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(0); static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(1); static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(2); static IIO_DEV_ATTR_DAC_POWERDOWN_MODE(3); #define IIO_DEV_ATTR_DAC_POWERDOWN(_num) \ IIO_DEVICE_ATTR(out_voltage##_num##_powerdown, \ S_IRUGO | S_IWUSR, \ ad5686_read_dac_powerdown, \ ad5686_write_dac_powerdown, _num) static IIO_DEV_ATTR_DAC_POWERDOWN(0); static IIO_DEV_ATTR_DAC_POWERDOWN(1); static IIO_DEV_ATTR_DAC_POWERDOWN(2); static IIO_DEV_ATTR_DAC_POWERDOWN(3); static struct attribute *ad5686_attributes[] = { &iio_dev_attr_out_voltage0_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage1_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage2_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage3_powerdown.dev_attr.attr, &iio_dev_attr_out_voltage0_powerdown_mode.dev_attr.attr, &iio_dev_attr_out_voltage1_powerdown_mode.dev_attr.attr, &iio_dev_attr_out_voltage2_powerdown_mode.dev_attr.attr, &iio_dev_attr_out_voltage3_powerdown_mode.dev_attr.attr, &iio_const_attr_out_voltage_powerdown_mode_available.dev_attr.attr, NULL, }; static const struct attribute_group ad5686_attribute_group = { .attrs = ad5686_attributes, }; static int ad5686_read_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int *val, int *val2, long m) { struct ad5686_state *st = iio_priv(indio_dev); unsigned long scale_uv; int ret; switch (m) { case 0: mutex_lock(&indio_dev->mlock); ret = ad5686_spi_read(st, chan->address); mutex_unlock(&indio_dev->mlock); if (ret < 0) return ret; *val = ret; return IIO_VAL_INT; break; case IIO_CHAN_INFO_SCALE: scale_uv = (st->vref_mv * 100000) >> (chan->scan_type.realbits); *val = scale_uv / 100000; *val2 = (scale_uv % 100000) * 10; return IIO_VAL_INT_PLUS_MICRO; } return -EINVAL; } static int ad5686_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan, int val, int val2, long mask) { struct ad5686_state *st = iio_priv(indio_dev); int ret; switch (mask) { case 0: if (val > (1 << chan->scan_type.realbits) || val < 0) return -EINVAL; mutex_lock(&indio_dev->mlock); ret = ad5686_spi_write(st, AD5686_CMD_WRITE_INPUT_N_UPDATE_N, chan->address, val, chan->scan_type.shift); mutex_unlock(&indio_dev->mlock); break; default: ret = -EINVAL; } return ret; } static const struct iio_info ad5686_info = { .read_raw = ad5686_read_raw, .write_raw = ad5686_write_raw, .attrs = &ad5686_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad5686_probe(struct spi_device *spi) { struct ad5686_state *st; struct iio_dev *indio_dev; int ret, regdone = 0, voltage_uv = 0; indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) return -ENOMEM; st = iio_priv(indio_dev); spi_set_drvdata(spi, indio_dev); st->reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(st->reg)) { ret = regulator_enable(st->reg); if (ret) goto error_put_reg; voltage_uv = regulator_get_voltage(st->reg); } st->chip_info = &ad5686_chip_info_tbl[spi_get_device_id(spi)->driver_data]; if (voltage_uv) st->vref_mv = voltage_uv / 1000; else st->vref_mv = st->chip_info->int_vref_mv; st->spi = spi; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; indio_dev->info = &ad5686_info; indio_dev->modes = INDIO_DIRECT_MODE; indio_dev->channels = st->chip_info->channel; indio_dev->num_channels = AD5686_DAC_CHANNELS; regdone = 1; ret = ad5686_spi_write(st, AD5686_CMD_INTERNAL_REFER_SETUP, 0, !!voltage_uv, 0); if (ret) goto error_disable_reg; ret = iio_device_register(indio_dev); if (ret) goto error_disable_reg; return 0; error_disable_reg: if (!IS_ERR(st->reg)) regulator_disable(st->reg); error_put_reg: if (!IS_ERR(st->reg)) regulator_put(st->reg); iio_free_device(indio_dev); return ret; } static int __devexit ad5686_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad5686_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad5686_id[] = { {"ad5684", ID_AD5684}, {"ad5685", ID_AD5685}, {"ad5686", ID_AD5686}, {} }; MODULE_DEVICE_TABLE(spi, ad5686_id); static struct spi_driver ad5686_driver = { .driver = { .name = "ad5686", .owner = THIS_MODULE, }, .probe = ad5686_probe, .remove = __devexit_p(ad5686_remove), .id_table = ad5686_id, }; module_spi_driver(ad5686_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD5686/85/84 DAC"); MODULE_LICENSE("GPL v2");
gpl-2.0
GalaxyTab4/starlightknight_kernel_samsung_matissewifi
drivers/staging/iio/adc/ad7606_par.c
4878
3834
/* * AD7606 Parallel Interface ADC driver * * Copyright 2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/err.h> #include <linux/io.h> #include "../iio.h" #include "ad7606.h" static int ad7606_par16_read_block(struct device *dev, int count, void *buf) { struct platform_device *pdev = to_platform_device(dev); struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct ad7606_state *st = iio_priv(indio_dev); insw((unsigned long) st->base_address, buf, count); return 0; } static const struct ad7606_bus_ops ad7606_par16_bops = { .read_block = ad7606_par16_read_block, }; static int ad7606_par8_read_block(struct device *dev, int count, void *buf) { struct platform_device *pdev = to_platform_device(dev); struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct ad7606_state *st = iio_priv(indio_dev); insb((unsigned long) st->base_address, buf, count * 2); return 0; } static const struct ad7606_bus_ops ad7606_par8_bops = { .read_block = ad7606_par8_read_block, }; static int __devinit ad7606_par_probe(struct platform_device *pdev) { struct resource *res; struct iio_dev *indio_dev; void __iomem *addr; resource_size_t remap_size; int ret, irq; irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no irq\n"); return -ENODEV; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; remap_size = resource_size(res); /* Request the regions */ if (!request_mem_region(res->start, remap_size, "iio-ad7606")) { ret = -EBUSY; goto out1; } addr = ioremap(res->start, remap_size); if (!addr) { ret = -ENOMEM; goto out1; } indio_dev = ad7606_probe(&pdev->dev, irq, addr, platform_get_device_id(pdev)->driver_data, remap_size > 1 ? &ad7606_par16_bops : &ad7606_par8_bops); if (IS_ERR(indio_dev)) { ret = PTR_ERR(indio_dev); goto out2; } platform_set_drvdata(pdev, indio_dev); return 0; out2: iounmap(addr); out1: release_mem_region(res->start, remap_size); return ret; } static int __devexit ad7606_par_remove(struct platform_device *pdev) { struct iio_dev *indio_dev = platform_get_drvdata(pdev); struct resource *res; struct ad7606_state *st = iio_priv(indio_dev); ad7606_remove(indio_dev, platform_get_irq(pdev, 0)); iounmap(st->base_address); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM static int ad7606_par_suspend(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); ad7606_suspend(indio_dev); return 0; } static int ad7606_par_resume(struct device *dev) { struct iio_dev *indio_dev = dev_get_drvdata(dev); ad7606_resume(indio_dev); return 0; } static const struct dev_pm_ops ad7606_pm_ops = { .suspend = ad7606_par_suspend, .resume = ad7606_par_resume, }; #define AD7606_PAR_PM_OPS (&ad7606_pm_ops) #else #define AD7606_PAR_PM_OPS NULL #endif /* CONFIG_PM */ static struct platform_device_id ad7606_driver_ids[] = { { .name = "ad7606-8", .driver_data = ID_AD7606_8, }, { .name = "ad7606-6", .driver_data = ID_AD7606_6, }, { .name = "ad7606-4", .driver_data = ID_AD7606_4, }, { } }; MODULE_DEVICE_TABLE(platform, ad7606_driver_ids); static struct platform_driver ad7606_driver = { .probe = ad7606_par_probe, .remove = __devexit_p(ad7606_par_remove), .id_table = ad7606_driver_ids, .driver = { .name = "ad7606", .owner = THIS_MODULE, .pm = AD7606_PAR_PM_OPS, }, }; module_platform_driver(ad7606_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD7606 ADC"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanogenMod/android_kernel_amazon_hdx-common
kernel/rtmutex-debug.c
5134
4793
/* * RT-Mutexes: blocking mutual exclusion locks with PI support * * started by Ingo Molnar and Thomas Gleixner: * * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> * * This code is based on the rt.c implementation in the preempt-rt tree. * Portions of said code are * * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey * Copyright (C) 2006 Esben Nielsen * Copyright (C) 2006 Kihon Technologies Inc., * Steven Rostedt <rostedt@goodmis.org> * * See rt.c in preempt-rt for proper credits and further information */ #include <linux/sched.h> #include <linux/delay.h> #include <linux/export.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/syscalls.h> #include <linux/interrupt.h> #include <linux/plist.h> #include <linux/fs.h> #include <linux/debug_locks.h> #include "rtmutex_common.h" static void printk_task(struct task_struct *p) { if (p) printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio); else printk("<none>"); } static void printk_lock(struct rt_mutex *lock, int print_owner) { if (lock->name) printk(" [%p] {%s}\n", lock, lock->name); else printk(" [%p] {%s:%d}\n", lock, lock->file, lock->line); if (print_owner && rt_mutex_owner(lock)) { printk(".. ->owner: %p\n", lock->owner); printk(".. held by: "); printk_task(rt_mutex_owner(lock)); printk("\n"); } } void rt_mutex_debug_task_free(struct task_struct *task) { DEBUG_LOCKS_WARN_ON(!plist_head_empty(&task->pi_waiters)); DEBUG_LOCKS_WARN_ON(task->pi_blocked_on); } /* * We fill out the fields in the waiter to store the information about * the deadlock. We print when we return. act_waiter can be NULL in * case of a remove waiter operation. */ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter, struct rt_mutex *lock) { struct task_struct *task; if (!debug_locks || detect || !act_waiter) return; task = rt_mutex_owner(act_waiter->lock); if (task && task != current) { act_waiter->deadlock_task_pid = get_pid(task_pid(task)); act_waiter->deadlock_lock = lock; } } void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) { struct task_struct *task; if (!waiter->deadlock_lock || !debug_locks) return; rcu_read_lock(); task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID); if (!task) { rcu_read_unlock(); return; } if (!debug_locks_off()) { rcu_read_unlock(); return; } printk("\n============================================\n"); printk( "[ BUG: circular locking deadlock detected! ]\n"); printk("%s\n", print_tainted()); printk( "--------------------------------------------\n"); printk("%s/%d is deadlocking current task %s/%d\n\n", task->comm, task_pid_nr(task), current->comm, task_pid_nr(current)); printk("\n1) %s/%d is trying to acquire this lock:\n", current->comm, task_pid_nr(current)); printk_lock(waiter->lock, 1); printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task_pid_nr(task)); printk_lock(waiter->deadlock_lock, 1); debug_show_held_locks(current); debug_show_held_locks(task); printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task_pid_nr(task)); show_stack(task, NULL); printk("\n%s/%d's [current] stackdump:\n\n", current->comm, task_pid_nr(current)); dump_stack(); debug_show_all_locks(); rcu_read_unlock(); printk("[ turning off deadlock detection." "Please report this trace. ]\n\n"); } void debug_rt_mutex_lock(struct rt_mutex *lock) { } void debug_rt_mutex_unlock(struct rt_mutex *lock) { DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current); } void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner) { } void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) { DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock)); } void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) { memset(waiter, 0x11, sizeof(*waiter)); plist_node_init(&waiter->list_entry, MAX_PRIO); plist_node_init(&waiter->pi_list_entry, MAX_PRIO); waiter->deadlock_task_pid = NULL; } void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) { put_pid(waiter->deadlock_task_pid); DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->list_entry)); DEBUG_LOCKS_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); memset(waiter, 0x22, sizeof(*waiter)); } void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) { /* * Make sure we are not reinitializing a held lock: */ debug_check_no_locks_freed((void *)lock, sizeof(*lock)); lock->name = name; } void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task) { } void rt_mutex_deadlock_account_unlock(struct task_struct *task) { }
gpl-2.0
robacklin/linux-2.6.39.4
net/bridge/netfilter/ebt_arp.c
13582
3671
/* * ebt_arp * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * Tim Gardner <timg@tpi.com> * * April, 2002 * */ #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/module.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_bridge/ebtables.h> #include <linux/netfilter_bridge/ebt_arp.h> static bool ebt_arp_mt(const struct sk_buff *skb, struct xt_action_param *par) { const struct ebt_arp_info *info = par->matchinfo; const struct arphdr *ah; struct arphdr _arph; ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph); if (ah == NULL) return false; if (info->bitmask & EBT_ARP_OPCODE && FWINV(info->opcode != ah->ar_op, EBT_ARP_OPCODE)) return false; if (info->bitmask & EBT_ARP_HTYPE && FWINV(info->htype != ah->ar_hrd, EBT_ARP_HTYPE)) return false; if (info->bitmask & EBT_ARP_PTYPE && FWINV(info->ptype != ah->ar_pro, EBT_ARP_PTYPE)) return false; if (info->bitmask & (EBT_ARP_SRC_IP | EBT_ARP_DST_IP | EBT_ARP_GRAT)) { const __be32 *sap, *dap; __be32 saddr, daddr; if (ah->ar_pln != sizeof(__be32) || ah->ar_pro != htons(ETH_P_IP)) return false; sap = skb_header_pointer(skb, sizeof(struct arphdr) + ah->ar_hln, sizeof(saddr), &saddr); if (sap == NULL) return false; dap = skb_header_pointer(skb, sizeof(struct arphdr) + 2*ah->ar_hln+sizeof(saddr), sizeof(daddr), &daddr); if (dap == NULL) return false; if (info->bitmask & EBT_ARP_SRC_IP && FWINV(info->saddr != (*sap & info->smsk), EBT_ARP_SRC_IP)) return false; if (info->bitmask & EBT_ARP_DST_IP && FWINV(info->daddr != (*dap & info->dmsk), EBT_ARP_DST_IP)) return false; if (info->bitmask & EBT_ARP_GRAT && FWINV(*dap != *sap, EBT_ARP_GRAT)) return false; } if (info->bitmask & (EBT_ARP_SRC_MAC | EBT_ARP_DST_MAC)) { const unsigned char *mp; unsigned char _mac[ETH_ALEN]; uint8_t verdict, i; if (ah->ar_hln != ETH_ALEN || ah->ar_hrd != htons(ARPHRD_ETHER)) return false; if (info->bitmask & EBT_ARP_SRC_MAC) { mp = skb_header_pointer(skb, sizeof(struct arphdr), sizeof(_mac), &_mac); if (mp == NULL) return false; verdict = 0; for (i = 0; i < 6; i++) verdict |= (mp[i] ^ info->smaddr[i]) & info->smmsk[i]; if (FWINV(verdict != 0, EBT_ARP_SRC_MAC)) return false; } if (info->bitmask & EBT_ARP_DST_MAC) { mp = skb_header_pointer(skb, sizeof(struct arphdr) + ah->ar_hln + ah->ar_pln, sizeof(_mac), &_mac); if (mp == NULL) return false; verdict = 0; for (i = 0; i < 6; i++) verdict |= (mp[i] ^ info->dmaddr[i]) & info->dmmsk[i]; if (FWINV(verdict != 0, EBT_ARP_DST_MAC)) return false; } } return true; } static int ebt_arp_mt_check(const struct xt_mtchk_param *par) { const struct ebt_arp_info *info = par->matchinfo; const struct ebt_entry *e = par->entryinfo; if ((e->ethproto != htons(ETH_P_ARP) && e->ethproto != htons(ETH_P_RARP)) || e->invflags & EBT_IPROTO) return -EINVAL; if (info->bitmask & ~EBT_ARP_MASK || info->invflags & ~EBT_ARP_MASK) return -EINVAL; return 0; } static struct xt_match ebt_arp_mt_reg __read_mostly = { .name = "arp", .revision = 0, .family = NFPROTO_BRIDGE, .match = ebt_arp_mt, .checkentry = ebt_arp_mt_check, .matchsize = sizeof(struct ebt_arp_info), .me = THIS_MODULE, }; static int __init ebt_arp_init(void) { return xt_register_match(&ebt_arp_mt_reg); } static void __exit ebt_arp_fini(void) { xt_unregister_match(&ebt_arp_mt_reg); } module_init(ebt_arp_init); module_exit(ebt_arp_fini); MODULE_DESCRIPTION("Ebtables: ARP protocol packet match"); MODULE_LICENSE("GPL");
gpl-2.0
rex-xxx/mt6572_x201
external/webkit/Tools/DumpRenderTree/cg/ImageDiffCG.cpp
15
9926
/* * Copyright (C) 2005, 2007 Apple Inc. All rights reserved. * Copyright (C) 2005 Ben La Monica <ben.lamonica@gmail.com>. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define min min // FIXME: We need to be able to include these defines from a config.h somewhere. #define JS_EXPORT_PRIVATE #define WTF_EXPORT_PRIVATE #include <stdio.h> #include <wtf/Platform.h> #include <wtf/RetainPtr.h> #if PLATFORM(WIN) #include <winsock2.h> #include <windows.h> #include <fcntl.h> #include <io.h> #include <wtf/MathExtras.h> #endif #include <CoreGraphics/CGBitmapContext.h> #include <CoreGraphics/CGImage.h> #include <ImageIO/CGImageDestination.h> #if PLATFORM(MAC) #include <LaunchServices/UTCoreTypes.h> #endif #ifndef CGFLOAT_DEFINED #ifdef __LP64__ typedef double CGFloat; #else typedef float CGFloat; #endif #define CGFLOAT_DEFINED 1 #endif using namespace std; #if PLATFORM(WIN) static inline float strtof(const char *nptr, char **endptr) { return strtod(nptr, endptr); } static const CFStringRef kUTTypePNG = CFSTR("public.png"); #endif static RetainPtr<CGImageRef> createImageFromStdin(int bytesRemaining) { unsigned char buffer[2048]; RetainPtr<CFMutableDataRef> data(AdoptCF, CFDataCreateMutable(0, bytesRemaining)); while (bytesRemaining > 0) { size_t bytesToRead = min(bytesRemaining, 2048); size_t bytesRead = fread(buffer, 1, bytesToRead, stdin); CFDataAppendBytes(data.get(), buffer, static_cast<CFIndex>(bytesRead)); bytesRemaining -= static_cast<int>(bytesRead); } RetainPtr<CGDataProviderRef> dataProvider(AdoptCF, CGDataProviderCreateWithCFData(data.get())); return RetainPtr<CGImageRef>(AdoptCF, CGImageCreateWithPNGDataProvider(dataProvider.get(), 0, false, kCGRenderingIntentDefault)); } static void releaseMallocBuffer(void* info, const void* data, size_t size) { free((void*)data); } static RetainPtr<CGImageRef> createDifferenceImage(CGImageRef baseImage, CGImageRef testImage, float& difference) { size_t width = CGImageGetWidth(baseImage); size_t height = CGImageGetHeight(baseImage); size_t rowBytes = width * 4; // Draw base image in bitmap context void* baseBuffer = calloc(height, rowBytes); RetainPtr<CGContextRef> baseContext(AdoptCF, CGBitmapContextCreate(baseBuffer, width, height, 8, rowBytes, CGImageGetColorSpace(baseImage), kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host)); CGContextDrawImage(baseContext.get(), CGRectMake(0, 0, width, height), baseImage); // Draw test image in bitmap context void* buffer = calloc(height, rowBytes); RetainPtr<CGContextRef> context(AdoptCF, CGBitmapContextCreate(buffer, width, height, 8, rowBytes, CGImageGetColorSpace(testImage), kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host)); CGContextDrawImage(context.get(), CGRectMake(0, 0, width, height), testImage); // Compare the content of the 2 bitmaps void* diffBuffer = malloc(width * height); float count = 0.0f; float sum = 0.0f; float maxDistance = 0.0f; unsigned char* basePixel = (unsigned char*)baseBuffer; unsigned char* pixel = (unsigned char*)buffer; unsigned char* diff = (unsigned char*)diffBuffer; for (size_t y = 0; y < height; ++y) { for (size_t x = 0; x < width; ++x) { float red = (pixel[0] - basePixel[0]) / max<float>(255 - basePixel[0], basePixel[0]); float green = (pixel[1] - basePixel[1]) / max<float>(255 - basePixel[1], basePixel[1]); float blue = (pixel[2] - basePixel[2]) / max<float>(255 - basePixel[2], basePixel[2]); float alpha = (pixel[3] - basePixel[3]) / max<float>(255 - basePixel[3], basePixel[3]); float distance = sqrtf(red * red + green * green + blue * blue + alpha * alpha) / 2.0f; *diff++ = (unsigned char)(distance * 255.0f); if (distance >= 1.0f / 255.0f) { count += 1.0f; sum += distance; if (distance > maxDistance) maxDistance = distance; } basePixel += 4; pixel += 4; } } // Compute the difference as a percentage combining both the number of different pixels and their difference amount i.e. the average distance over the entire image if (count > 0.0f) difference = 100.0f * sum / (height * width); else difference = 0.0f; RetainPtr<CGImageRef> diffImage; // Generate a normalized diff image if there is any difference if (difference > 0.0f) { if (maxDistance < 1.0f) { diff = (unsigned char*)diffBuffer; for(size_t p = 0; p < height * width; ++p) diff[p] = diff[p] / maxDistance; } static CGColorSpaceRef diffColorspace = CGColorSpaceCreateDeviceGray(); RetainPtr<CGDataProviderRef> provider(AdoptCF, CGDataProviderCreateWithData(0, diffBuffer, width * height, releaseMallocBuffer)); diffImage.adoptCF(CGImageCreate(width, height, 8, 8, width, diffColorspace, 0, provider.get(), 0, false, kCGRenderingIntentDefault)); } else free(diffBuffer); // Destroy drawing buffers if (buffer) free(buffer); if (baseBuffer) free(baseBuffer); return diffImage; } static inline bool imageHasAlpha(CGImageRef image) { CGImageAlphaInfo info = CGImageGetAlphaInfo(image); return (info >= kCGImageAlphaPremultipliedLast) && (info <= kCGImageAlphaFirst); } int main(int argc, const char* argv[]) { #if PLATFORM(WIN) _setmode(0, _O_BINARY); _setmode(1, _O_BINARY); #endif float tolerance = 0.0f; for (int i = 1; i < argc; ++i) { if (!strcmp(argv[i], "-t") || !strcmp(argv[i], "--tolerance")) { if (i >= argc - 1) exit(1); tolerance = strtof(argv[i + 1], 0); ++i; continue; } } char buffer[2048]; RetainPtr<CGImageRef> actualImage; RetainPtr<CGImageRef> baselineImage; while (fgets(buffer, sizeof(buffer), stdin)) { // remove the CR char* newLineCharacter = strchr(buffer, '\n'); if (newLineCharacter) *newLineCharacter = '\0'; if (!strncmp("Content-Length: ", buffer, 16)) { strtok(buffer, " "); int imageSize = strtol(strtok(0, " "), 0, 10); if (imageSize > 0 && !actualImage) actualImage = createImageFromStdin(imageSize); else if (imageSize > 0 && !baselineImage) baselineImage = createImageFromStdin(imageSize); else fputs("error, image size must be specified.\n", stdout); } if (actualImage && baselineImage) { RetainPtr<CGImageRef> diffImage; float difference = 100.0f; if ((CGImageGetWidth(actualImage.get()) == CGImageGetWidth(baselineImage.get())) && (CGImageGetHeight(actualImage.get()) == CGImageGetHeight(baselineImage.get())) && (imageHasAlpha(actualImage.get()) == imageHasAlpha(baselineImage.get()))) { diffImage = createDifferenceImage(actualImage.get(), baselineImage.get(), difference); // difference is passed by reference if (difference <= tolerance) difference = 0.0f; else { difference = roundf(difference * 100.0f) / 100.0f; difference = max(difference, 0.01f); // round to 2 decimal places } } else fputs("error, test and reference image have different properties.\n", stderr); if (difference > 0.0f) { if (diffImage) { RetainPtr<CFMutableDataRef> imageData(AdoptCF, CFDataCreateMutable(0, 0)); RetainPtr<CGImageDestinationRef> imageDest(AdoptCF, CGImageDestinationCreateWithData(imageData.get(), kUTTypePNG, 1, 0)); CGImageDestinationAddImage(imageDest.get(), diffImage.get(), 0); CGImageDestinationFinalize(imageDest.get()); printf("Content-Length: %lu\n", CFDataGetLength(imageData.get())); fwrite(CFDataGetBytePtr(imageData.get()), 1, CFDataGetLength(imageData.get()), stdout); } fprintf(stdout, "diff: %01.2f%% failed\n", difference); } else fprintf(stdout, "diff: %01.2f%% passed\n", difference); actualImage = 0; baselineImage = 0; } fflush(stdout); } return 0; }
gpl-2.0
MoKee/android_kernel_oppo_n1
drivers/usb/host/ehci-msm.c
15
5936
/* ehci-msm.c - HSUSB Host Controller Driver Implementation * * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * Partly derived from ehci-fsl.c and ehci-hcd.c * Copyright (c) 2000-2004 by David Brownell * Copyright (c) 2005 MontaVista Software * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/usb/otg.h> #include <linux/usb/msm_hsusb_hw.h> #define MSM_USB_BASE (hcd->regs) static struct usb_phy *phy; static int ehci_msm_reset(struct usb_hcd *hcd) { struct ehci_hcd *ehci = hcd_to_ehci(hcd); int retval; ehci->caps = USB_CAPLENGTH; hcd->has_tt = 1; ehci->log2_irq_thresh = 5; retval = ehci_setup(hcd); if (retval) return retval; /* bursts of unspecified length. */ writel(0, USB_AHBBURST); /* Use the AHB transactor */ writel_relaxed(0x08, USB_AHBMODE); /* Disable streaming mode and select host mode */ writel(0x13, USB_USBMODE); ehci_port_power(ehci, 1); return 0; } static struct hc_driver msm_hc_driver = { .description = hcd_name, .product_desc = "Qualcomm On-Chip EHCI Host Controller", .hcd_priv_size = sizeof(struct ehci_hcd), /* * generic hardware linkage */ .irq = ehci_irq, .flags = HCD_USB2 | HCD_MEMORY, .reset = ehci_msm_reset, .start = ehci_run, .stop = ehci_stop, .shutdown = ehci_shutdown, /* * managing i/o requests and associated device resources */ .urb_enqueue = ehci_urb_enqueue, .urb_dequeue = ehci_urb_dequeue, .endpoint_disable = ehci_endpoint_disable, .endpoint_reset = ehci_endpoint_reset, .clear_tt_buffer_complete = ehci_clear_tt_buffer_complete, /* * scheduling support */ .get_frame_number = ehci_get_frame, /* * root hub support */ .hub_status_data = ehci_hub_status_data, .hub_control = ehci_hub_control, .relinquish_port = ehci_relinquish_port, .port_handed_over = ehci_port_handed_over, /* * PM support */ .bus_suspend = ehci_bus_suspend, .bus_resume = ehci_bus_resume, }; static u64 msm_ehci_dma_mask = DMA_BIT_MASK(64); static int ehci_msm_probe(struct platform_device *pdev) { struct usb_hcd *hcd; struct resource *res; int ret; dev_dbg(&pdev->dev, "ehci_msm proble\n"); if (!pdev->dev.dma_mask) pdev->dev.dma_mask = &msm_ehci_dma_mask; if (!pdev->dev.coherent_dma_mask) pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); hcd = usb_create_hcd(&msm_hc_driver, &pdev->dev, dev_name(&pdev->dev)); if (!hcd) { dev_err(&pdev->dev, "Unable to create HCD\n"); return -ENOMEM; } hcd_to_bus(hcd)->skip_resume = true; hcd->irq = platform_get_irq(pdev, 0); if (hcd->irq < 0) { dev_err(&pdev->dev, "Unable to get IRQ resource\n"); ret = hcd->irq; goto put_hcd; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Unable to get memory resource\n"); ret = -ENODEV; goto put_hcd; } hcd->rsrc_start = res->start; hcd->rsrc_len = resource_size(res); hcd->regs = ioremap(hcd->rsrc_start, hcd->rsrc_len); if (!hcd->regs) { dev_err(&pdev->dev, "ioremap failed\n"); ret = -ENOMEM; goto put_hcd; } /* * OTG driver takes care of PHY initialization, clock management, * powering up VBUS, mapping of registers address space and power * management. */ phy = usb_get_transceiver(); if (!phy) { dev_err(&pdev->dev, "unable to find transceiver\n"); ret = -ENODEV; goto unmap; } ret = otg_set_host(phy->otg, &hcd->self); if (ret < 0) { dev_err(&pdev->dev, "unable to register with transceiver\n"); goto put_transceiver; } hcd_to_ehci(hcd)->transceiver = phy; device_init_wakeup(&pdev->dev, 1); pm_runtime_enable(&pdev->dev); return 0; put_transceiver: usb_put_transceiver(phy); unmap: iounmap(hcd->regs); put_hcd: usb_put_hcd(hcd); return ret; } static int __devexit ehci_msm_remove(struct platform_device *pdev) { struct usb_hcd *hcd = platform_get_drvdata(pdev); device_init_wakeup(&pdev->dev, 0); pm_runtime_disable(&pdev->dev); pm_runtime_set_suspended(&pdev->dev); hcd_to_ehci(hcd)->transceiver = NULL; otg_set_host(phy->otg, NULL); usb_put_transceiver(phy); usb_put_hcd(hcd); return 0; } #ifdef CONFIG_PM_RUNTIME static int ehci_msm_runtime_idle(struct device *dev) { dev_dbg(dev, "ehci runtime idle\n"); return 0; } static int ehci_msm_runtime_suspend(struct device *dev) { dev_dbg(dev, "ehci runtime suspend\n"); /* * Notify OTG about suspend. It takes care of * putting the hardware in LPM. */ return usb_phy_set_suspend(phy, 1); } static int ehci_msm_runtime_resume(struct device *dev) { dev_dbg(dev, "ehci runtime resume\n"); return usb_phy_set_suspend(phy, 0); } #endif #ifdef CONFIG_PM_SLEEP static int ehci_msm_pm_suspend(struct device *dev) { return 0; } static int ehci_msm_pm_resume(struct device *dev) { return 0; } #endif static const struct dev_pm_ops ehci_msm_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(ehci_msm_pm_suspend, ehci_msm_pm_resume) SET_RUNTIME_PM_OPS(ehci_msm_runtime_suspend, ehci_msm_runtime_resume, ehci_msm_runtime_idle) }; static struct platform_driver ehci_msm_driver = { .probe = ehci_msm_probe, .remove = __devexit_p(ehci_msm_remove), .driver = { .name = "msm_hsusb_host", .pm = &ehci_msm_dev_pm_ops, }, };
gpl-2.0
RasPlex/systemtap
runtime/linux/uprobes2/uprobes_x86.c
15
24541
/* * Userspace Probes (UProbes) * uprobes.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Copyright (C) IBM Corporation, 2006-2008 */ /* * In versions of uprobes built in the SystemTap runtime, this file * is #included at the end of uprobes.c. */ #include <asm/uaccess.h> #ifdef CONFIG_X86_32 #define is_32bit_app(tsk) 1 #else #define is_32bit_app(tsk) (test_tsk_thread_flag(tsk, TIF_IA32)) #endif /* Adapted from arch/x86_64/kprobes.c */ #undef W #define W(row,b0,b1,b2,b3,b4,b5,b6,b7,b8,b9,ba,bb,bc,bd,be,bf) \ (((b0##ULL<< 0x0)|(b1##ULL<< 0x1)|(b2##ULL<< 0x2)|(b3##ULL<< 0x3) | \ (b4##ULL<< 0x4)|(b5##ULL<< 0x5)|(b6##ULL<< 0x6)|(b7##ULL<< 0x7) | \ (b8##ULL<< 0x8)|(b9##ULL<< 0x9)|(ba##ULL<< 0xa)|(bb##ULL<< 0xb) | \ (bc##ULL<< 0xc)|(bd##ULL<< 0xd)|(be##ULL<< 0xe)|(bf##ULL<< 0xf)) \ << (row % 64)) static const volatile u64 good_insns_64[256 / 64] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ------------------------------- */ W(0x00, 1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0)| /* 00 */ W(0x10, 1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0)| /* 10 */ W(0x20, 1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0)| /* 20 */ W(0x30, 1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,0), /* 30 */ W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */ W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 50 */ W(0x60, 0,0,0,1,1,1,0,0,1,1,1,1,0,0,0,0)| /* 60 */ W(0x70, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 70 */ W(0x80, 1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */ W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 90 */ W(0xa0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* a0 */ W(0xb0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* b0 */ W(0xc0, 1,1,1,1,0,0,1,1,1,1,1,1,0,0,0,0)| /* c0 */ W(0xd0, 1,1,1,1,0,0,0,1,1,1,1,1,1,1,1,1)| /* d0 */ W(0xe0, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* e0 */ W(0xf0, 0,0,1,1,0,1,1,1,1,1,0,0,1,1,1,1) /* f0 */ /* ------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; /* Good-instruction tables for 32-bit apps -- copied from i386 uprobes */ static const volatile u64 good_insns_32[256 / 64] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ------------------------------- */ W(0x00, 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,0)| /* 00 */ W(0x10, 1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,0)| /* 10 */ W(0x20, 1,1,1,1,1,1,0,1,1,1,1,1,1,1,0,1)| /* 20 */ W(0x30, 1,1,1,1,1,1,0,1,1,1,1,1,1,1,0,1), /* 30 */ W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */ W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 50 */ W(0x60, 1,1,1,0,1,1,0,0,1,1,1,1,0,0,0,0)| /* 60 */ W(0x70, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* 70 */ W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */ W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 90 */ W(0xa0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* a0 */ W(0xb0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1), /* b0 */ W(0xc0, 1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0)| /* c0 */ W(0xd0, 1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1)| /* d0 */ W(0xe0, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* e0 */ W(0xf0, 0,0,1,1,0,1,1,1,1,1,0,0,1,1,1,1) /* f0 */ /* ------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; /* Using this for both 64-bit and 32-bit apps */ static const volatile u64 good_2byte_insns[256 / 64] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ------------------------------- */ W(0x00, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1)| /* 00 */ W(0x10, 1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1)| /* 10 */ W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* 20 */ W(0x30, 0,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0), /* 30 */ W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 40 */ W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 50 */ W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 60 */ W(0x70, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,1,1), /* 70 */ W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */ W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 90 */ W(0xa0, 1,1,1,1,1,1,0,0,1,1,1,1,1,1,0,1)| /* a0 */ W(0xb0, 1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1), /* b0 */ W(0xc0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* c0 */ W(0xd0, 0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* d0 */ W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* e0 */ W(0xf0, 0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* f0 */ /* ------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; /* * opcodes we'll probably never support: * 6c-6d, e4-e5, ec-ed - in * 6e-6f, e6-e7, ee-ef - out * cc, cd - int3, int * cf - iret * d6 - illegal instruction * f1 - int1/icebp * f4 - hlt * fa, fb - cli, sti * 0f - lar, lsl, syscall, clts, sysret, sysenter, sysexit, invd, wbinvd, ud2 * * invalid opcodes in 64-bit mode: * 06, 0e, 16, 1e, 27, 2f, 37, 3f, 60-62, 82, c4-c5, d4-d5 * * 63 - we support this opcode in x86_64 but not in i386. * opcodes we may need to refine support for: * 0f - 2-byte instructions: For many of these instructions, the validity * depends on the prefix and/or the reg field. On such instructions, we * just consider the opcode combination valid if it corresponds to any * valid instruction. * 8f - Group 1 - only reg = 0 is OK * c6-c7 - Group 11 - only reg = 0 is OK * d9-df - fpu insns with some illegal encodings * f2, f3 - repnz, repz prefixes. These are also the first byte for * certain floating-point instructions, such as addsd. * fe - Group 4 - only reg = 0 or 1 is OK * ff - Group 5 - only reg = 0-6 is OK * * others -- Do we need to support these? * 0f - (floating-point?) prefetch instructions * 07, 17, 1f - pop es, pop ss, pop ds * 26, 2e, 36, 3e - es:, cs:, ss:, ds: segment prefixes -- * but 64 and 65 (fs: and gs:) seem to be used, so we support them * 67 - addr16 prefix * ce - into * f0 - lock prefix */ /* * TODO: * - Where necessary, examine the modrm byte and allow only valid instructions * in the different Groups and fpu instructions. * - Note: If we go past the first byte, do we need to verify that * subsequent bytes were actually there, rather than off the last page? * - Be clearer about which instructions we'll never probe. */ /* * Return 1 if this is a legacy instruction prefix we support, -1 if * it's one we don't support, or 0 if it's not a prefix at all. */ static inline int check_legacy_prefix(u8 byte) { switch (byte) { case 0x26: case 0x2e: case 0x36: case 0x3e: case 0xf0: return -1; case 0x64: case 0x65: case 0x66: case 0x67: case 0xf2: case 0xf3: return 1; default: return 0; } } static void report_bad_1byte_opcode(int mode, uprobe_opcode_t op) { printk(KERN_ERR "In %d-bit apps, " "uprobes does not currently support probing " "instructions whose first byte is 0x%2.2x\n", mode, op); } static void report_bad_2byte_opcode(uprobe_opcode_t op) { printk(KERN_ERR "uprobes does not currently support probing " "instructions with the 2-byte opcode 0x0f 0x%2.2x\n", op); } static void report_bad_opcode_prefix(uprobe_opcode_t op, uprobe_opcode_t prefix) { printk(KERN_ERR "uprobes does not currently support probing " "instructions whose first byte is 0x%2.2x " "with a prefix 0x%2.2x\n", op, prefix); } /* Figure out how uprobe_post_ssout should perform ip fixup. */ static int setup_uprobe_post_ssout(struct uprobe_probept *ppt, uprobe_opcode_t *insn) { /* * Some of these require special treatment, but we don't know what to * do with arbitrary prefixes, so we refuse to probe them. */ int prefix_ok = 0; switch (*insn) { case 0xc3: /* ret */ if ((insn - ppt->insn == 1) && (*ppt->insn == 0xf3 || *ppt->insn == 0xf2)) /* * "rep ret" is an AMD kludge that's used by GCC, * so we need to treat it like a normal ret. */ prefix_ok = 1; case 0xcb: /* more ret/lret */ case 0xc2: case 0xca: /* rip is correct */ ppt->arch_info.flags |= UPFIX_ABS_IP; break; case 0xe8: /* call relative - Fix return addr */ ppt->arch_info.flags |= UPFIX_RETURN; break; case 0x9a: /* call absolute - Fix return addr */ ppt->arch_info.flags |= UPFIX_RETURN | UPFIX_ABS_IP; break; case 0xff: if ((insn[1] & 0x30) == 0x10) { /* call absolute, indirect */ /* Fix return addr; rip is correct. */ ppt->arch_info.flags |= UPFIX_ABS_IP | UPFIX_RETURN; } else if ((insn[1] & 0x31) == 0x20 || /* jmp near, absolute indirect */ (insn[1] & 0x31) == 0x21) { /* jmp far, absolute indirect */ /* rip is correct. */ ppt->arch_info.flags |= UPFIX_ABS_IP; } break; case 0xea: /* jmp absolute -- rip is correct */ ppt->arch_info.flags |= UPFIX_ABS_IP; break; default: /* Assuming that normal ip-fixup is ok for other prefixed opcodes. */ prefix_ok = 1; break; } if (!prefix_ok && insn != ppt->insn) { report_bad_opcode_prefix(*insn, *ppt->insn); return -EPERM; } return 0; } static int validate_insn_32bits(struct uprobe_probept *ppt) { uprobe_opcode_t *insn = ppt->insn; int pfx, ret; /* Skip good instruction prefixes; reject "bad" ones. */ while ((pfx = check_legacy_prefix(insn[0])) == 1) insn++; if (pfx < 0) { report_bad_1byte_opcode(32, insn[0]); return -EPERM; } if ((ret = setup_uprobe_post_ssout(ppt, insn)) != 0) return ret; if (test_bit(insn[0], (unsigned long*)good_insns_32)) return 0; if (insn[0] == 0x0f) { if (test_bit(insn[1], (unsigned long*)good_2byte_insns)) return 0; report_bad_2byte_opcode(insn[1]); } else report_bad_1byte_opcode(32, insn[0]); return -EPERM; } static int validate_insn_64bits(struct uprobe_probept *ppt) { uprobe_opcode_t *insn = ppt->insn; int pfx, ret; /* Skip good instruction prefixes; reject "bad" ones. */ while ((pfx = check_legacy_prefix(insn[0])) == 1) insn++; if (pfx < 0) { report_bad_1byte_opcode(64, insn[0]); return -EPERM; } /* Skip REX prefix. */ if ((insn[0] & 0xf0) == 0x40) insn++; if ((ret = setup_uprobe_post_ssout(ppt, insn)) != 0) return ret; if (test_bit(insn[0], (unsigned long*)good_insns_64)) return 0; if (insn[0] == 0x0f) { if (test_bit(insn[1], (unsigned long*)good_2byte_insns)) return 0; report_bad_2byte_opcode(insn[1]); } else report_bad_1byte_opcode(64, insn[0]); return -EPERM; } #ifdef CONFIG_X86_64 static int handle_riprel_insn(struct uprobe_probept *ppt); #endif static int arch_validate_probed_insn(struct uprobe_probept *ppt, struct task_struct *tsk) { int ret; ppt->arch_info.flags = 0x0; #ifdef CONFIG_X86_64 ppt->arch_info.rip_target_address = 0x0; #endif if (is_32bit_app(tsk)) return validate_insn_32bits(ppt); if ((ret = validate_insn_64bits(ppt)) != 0) return ret; #ifdef CONFIG_X86_64 (void) handle_riprel_insn(ppt); #endif return 0; } #ifdef CONFIG_X86_64 /* * Returns 0 if the indicated instruction has no immediate operand * and/or can't use rip-relative addressing. Otherwise returns * the size of the immediate operand in the instruction. (Note that * for instructions such as "movq $7,xxxx(%rip)" the immediate-operand * field is 4 bytes, even though 8 bytes are stored.) */ static int immediate_operand_size(u8 opcode1, u8 opcode2, u8 reg, int operand_size_prefix) { switch (opcode1) { case 0x6b: /* imul immed,mem,reg */ case 0x80: /* Group 1 */ case 0x83: /* Group 1 */ case 0xc0: /* Group 2 */ case 0xc1: /* Group 2 */ case 0xc6: /* Group 11 */ return 1; case 0x69: /* imul immed,mem,reg */ case 0x81: /* Group 1 */ case 0xc7: /* Group 11 */ return (operand_size_prefix ? 2 : 4); case 0xf6: /* Group 3, reg field == 0 or 1 */ return (reg > 1 ? 0 : 1); case 0xf7: /* Group 3, reg field == 0 or 1 */ if (reg > 1) return 0; return (operand_size_prefix ? 2 : 4); case 0x0f: /* 2-byte opcodes */ switch (opcode2) { /* * Note: 0x71-73 (Groups 12-14) have immediate operands, * but not memory operands. */ case 0x70: /* pshuf* immed,mem,reg */ case 0xa4: /* shld immed,reg,mem */ case 0xac: /* shrd immed,reg,mem */ case 0xc2: /* cmpps or cmppd */ case 0xc4: /* pinsrw */ case 0xc5: /* pextrw */ case 0xc6: /* shufps or shufpd */ case 0x0f: /* 3DNow extensions */ return 1; default: return 0; } } return 0; } /* * TODO: These tables are common for kprobes and uprobes and can be moved * to a common place. */ static const volatile u64 onebyte_has_modrm[256 / 64] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ------------------------------- */ W(0x00, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 00 */ W(0x10, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 10 */ W(0x20, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0)| /* 20 */ W(0x30, 1,1,1,1,0,0,0,0,1,1,1,1,0,0,0,0), /* 30 */ W(0x40, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 40 */ W(0x50, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 50 */ W(0x60, 0,0,1,1,0,0,0,0,0,1,0,1,0,0,0,0)| /* 60 */ W(0x70, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 70 */ W(0x80, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 80 */ W(0x90, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 90 */ W(0xa0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* a0 */ W(0xb0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* b0 */ W(0xc0, 1,1,0,0,1,1,1,1,0,0,0,0,0,0,0,0)| /* c0 */ W(0xd0, 1,1,1,1,0,0,0,0,1,1,1,1,1,1,1,1)| /* d0 */ W(0xe0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* e0 */ W(0xf0, 0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,1) /* f0 */ /* ------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; static const volatile u64 twobyte_has_modrm[256 / 64] = { /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ /* ------------------------------- */ W(0x00, 1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,1)| /* 0f */ W(0x10, 1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0)| /* 1f */ W(0x20, 1,1,1,1,1,0,1,0,1,1,1,1,1,1,1,1)| /* 2f */ W(0x30, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0), /* 3f */ W(0x40, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 4f */ W(0x50, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 5f */ W(0x60, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 6f */ W(0x70, 1,1,1,1,1,1,1,0,0,0,0,0,1,1,1,1), /* 7f */ W(0x80, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)| /* 8f */ W(0x90, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* 9f */ W(0xa0, 0,0,0,1,1,1,1,1,0,0,0,1,1,1,1,1)| /* af */ W(0xb0, 1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1), /* bf */ W(0xc0, 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0)| /* cf */ W(0xd0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* df */ W(0xe0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1)| /* ef */ W(0xf0, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0) /* ff */ /* ------------------------------- */ /* 0 1 2 3 4 5 6 7 8 9 a b c d e f */ }; /* * If pp->insn doesn't use rip-relative addressing, return 0. Otherwise, * rewrite the instruction so that it accesses its memory operand * indirectly through a scratch register. Set flags and rip_target_address * in ppt->arch_info accordingly. (The contents of the scratch register * will be saved before we single-step the modified instruction, and * restored afterward.) Return 1. * * We do this because a rip-relative instruction can access only a * relatively small area (+/- 2 GB from the instruction), and the SSOL * area typically lies beyond that area. At least for instructions * that store to memory, we can't single-step the original instruction * and "fix things up" later, because the misdirected store could be * disastrous. * * Some useful facts about rip-relative instructions: * - There's always a modrm byte. * - There's never a SIB byte. * - The offset is always 4 bytes. */ static int handle_riprel_insn(struct uprobe_probept *ppt) { u8 *insn = (u8*) ppt->insn; u8 opcode1, opcode2, modrm, reg; int need_modrm; int operand_size_prefix = 0; int immed_size, instruction_size; /* * Skip legacy instruction prefixes. Some of these we don't * support (yet), but here we pretend to support all of them. * Skip the REX prefix, if any. */ while (check_legacy_prefix(*insn)) { if (*insn == 0x66) operand_size_prefix = 1; insn++; } if ((*insn & 0xf0) == 0x40) insn++; opcode1 = *insn; if (opcode1 == 0x0f) { /* Two-byte opcode. */ opcode2 = *++insn; need_modrm = test_bit(opcode2, (unsigned long*)twobyte_has_modrm); } else { /* One-byte opcode. */ opcode2 = 0x0; need_modrm = test_bit(opcode1, (unsigned long*)onebyte_has_modrm); } if (!need_modrm) return 0; modrm = *++insn; /* * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. */ if ((modrm & 0xc7) != 0x5) return 0; /* * We have a rip-relative instruction. insn points at the * modrm byte. The next 4 bytes are the offset. Beyond the * offset, for some instructions, is the immediate operand. */ reg = (modrm >> 3) & 0x7; immed_size = immediate_operand_size(opcode1, opcode2, reg, operand_size_prefix); instruction_size = (insn - (u8*) ppt->insn) /* prefixes + opcodes */ + 1 /* modrm byte */ + 4 /* offset */ + immed_size; /* immediate field */ #ifdef DEBUG_UPROBES_RIP { int i; BUG_ON(instruction_size > 15); printk(KERN_INFO "Munging rip-relative insn:"); for (i = 0; i < instruction_size; i++) printk(" %2.2x", ppt->insn[i]); printk("\n"); } #endif /* * Convert from rip-relative addressing to indirect addressing * via a scratch register. Change the r/m field from 0x5 (%rip) * to 0x0 (%rax) or 0x1 (%rcx), and squeeze out the offset field. */ if (reg == 0) { /* * The register operand (if any) is either the A register * (%rax, %eax, etc.) or (if the 0x4 bit is set in the * REX prefix) %r8. In any case, we know the C register * is NOT the register operand, so we use %rcx (register * #1) for the scratch register. */ ppt->arch_info.flags |= UPFIX_RIP_RCX; /* Change modrm from 00 000 101 to 00 000 001. */ *insn = 0x1; } else { /* Use %rax (register #0) for the scratch register. */ ppt->arch_info.flags |= UPFIX_RIP_RAX; /* Change modrm from 00 xxx 101 to 00 xxx 000 */ *insn = (reg << 3); } /* Target address = address of next instruction + (signed) offset */ insn++; ppt->arch_info.rip_target_address = (long) ppt->vaddr + instruction_size + *((s32*)insn); if (immed_size) memmove(insn, insn+4, immed_size); #ifdef DEBUG_UPROBES_RIP { int i; printk(KERN_INFO "Munged rip-relative insn: "); for (i = 0; i < instruction_size-4; i++) printk(" %2.2x", ppt->insn[i]); printk("\n"); printk(KERN_INFO "Target address = %#lx\n", ppt->arch_info.rip_target_address); } #endif return 1; } #endif /* * Get an instruction slot from the process's SSOL area, containing the * instruction at ppt's probepoint. Point the rip at that slot, in * preparation for single-stepping out of line. * * If we're emulating a rip-relative instruction, save the contents * of the scratch register and store the target address in that register. */ static void uprobe_pre_ssout(struct uprobe_task *utask, struct uprobe_probept *ppt, struct pt_regs *regs) { struct uprobe_ssol_slot *slot; slot = uprobe_get_insn_slot(ppt); if (!slot) { utask->doomed = 1; return; } regs->ip = (long)slot->insn; utask->singlestep_addr = regs->ip; #ifdef CONFIG_X86_64 if (ppt->arch_info.flags & UPFIX_RIP_RAX) { utask->arch_info.saved_scratch_register = regs->ax; regs->ax = ppt->arch_info.rip_target_address; } else if (ppt->arch_info.flags & UPFIX_RIP_RCX) { utask->arch_info.saved_scratch_register = regs->cx; regs->cx = ppt->arch_info.rip_target_address; } #endif } /* * Called by uprobe_post_ssout() to adjust the return address * pushed by a call instruction executed out of line. */ static void adjust_ret_addr(unsigned long rsp, long correction, struct uprobe_task *utask) { unsigned long nleft; if (is_32bit_app(current)) { s32 ra; nleft = copy_from_user(&ra, (const void __user *) rsp, 4); if (unlikely(nleft != 0)) goto fail; ra += (s32) correction; nleft = copy_to_user((void __user *) rsp, &ra, 4); if (unlikely(nleft != 0)) goto fail; } else { s64 ra; nleft = copy_from_user(&ra, (const void __user *) rsp, 8); if (unlikely(nleft != 0)) goto fail; ra += correction; nleft = copy_to_user((void __user *) rsp, &ra, 8); if (unlikely(nleft != 0)) goto fail; } return; fail: printk(KERN_ERR "uprobes: Failed to adjust return address after" " single-stepping call instruction;" " pid=%d, rsp=%#lx\n", current->pid, rsp); utask->doomed = 1; } /* * Called after single-stepping. ppt->vaddr is the address of the * instruction whose first byte has been replaced by the "int3" * instruction. To avoid the SMP problems that can occur when we * temporarily put back the original opcode to single-step, we * single-stepped a copy of the instruction. The address of this * copy is utask->singlestep_addr. * * This function prepares to return from the post-single-step * trap. We have to fix things up as follows: * * 0) Typically, the new rip is relative to the copied instruction. We * need to make it relative to the original instruction. Exceptions are * return instructions and absolute or indirect jump or call instructions. * * 1) If the single-stepped instruction was a call, the return address * that is atop the stack is the address following the copied instruction. * We need to make it the address following the original instruction. * * 2) If the original instruction was a rip-relative instruction such as * "movl %edx,0xnnnn(%rip)", we have instead executed an equivalent * instruction using a scratch register -- e.g., "movl %edx,(%rax)". * We need to restore the contents of the scratch register and adjust * the rip, keeping in mind that the instruction we executed is 4 bytes * shorter than the original instruction (since we squeezed out the offset * field). */ static void uprobe_post_ssout(struct uprobe_task *utask, struct uprobe_probept *ppt, struct pt_regs *regs) { unsigned long copy_ip = utask->singlestep_addr; unsigned long orig_ip = ppt->vaddr; long correction = (long) (orig_ip - copy_ip); unsigned long flags = ppt->arch_info.flags; up_read(&ppt->slot->rwsem); #ifdef CONFIG_X86_64 if (flags & (UPFIX_RIP_RAX | UPFIX_RIP_RCX)) { if (flags & UPFIX_RIP_RAX) regs->ax = utask->arch_info.saved_scratch_register; else regs->cx = utask->arch_info.saved_scratch_register; /* * The original instruction includes a displacement, and so * is 4 bytes longer than what we've just single-stepped. * Fall through to handle stuff like "jmpq *...(%rip)" and * "callq *...(%rip)". */ correction += 4; } #endif if (flags & UPFIX_RETURN) adjust_ret_addr(regs->sp, correction, utask); if (!(flags & UPFIX_ABS_IP)) regs->ip += correction; } /* * Replace the return address with the trampoline address. Returns * the original return address. */ static unsigned long arch_hijack_uret_addr(unsigned long trampoline_address, struct pt_regs *regs, struct uprobe_task *utask) { int nleft; unsigned long orig_ret_addr = 0; /* clear high bits for 32-bit apps */ size_t rasize; if (is_32bit_app(current)) rasize = 4; else rasize = 8; nleft = copy_from_user(&orig_ret_addr, (const void __user *) regs->sp, rasize); if (unlikely(nleft != 0)) return 0; if (orig_ret_addr == trampoline_address) /* * There's another uretprobe on this function, and it was * processed first, so the return address has already * been hijacked. */ return orig_ret_addr; nleft = copy_to_user((void __user *) regs->sp, &trampoline_address, rasize); if (unlikely(nleft != 0)) { if (nleft != rasize) { printk(KERN_ERR "uretprobe_entry_handler: " "return address partially clobbered -- " "pid=%d, %%sp=%#lx, %%ip=%#lx\n", current->pid, regs->sp, regs->ip); utask->doomed = 1; } // else nothing written, so no harm return 0; } return orig_ret_addr; } /* Check if instruction is nop and return true. */ static int uprobe_emulate_insn(struct pt_regs *regs, struct uprobe_probept *ppt) { uprobe_opcode_t *insn = ppt->insn; if (insn[0] == 0x90) /* regs->ip already points to the insn after the nop/int3. */ return 1; /* TODO: add multibyte nop instructions */ /* For multibyte nop instructions, we need to set ip accordingly. */ return 0; }
gpl-2.0
tarunkapadia93/gk_armani
drivers/gpu/msm/kgsl_events.c
271
8425
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include <linux/list.h> #include <linux/workqueue.h> #include <kgsl_device.h> #include "kgsl_trace.h" #include "adreno.h" /* * Define an kmem cache for the event structures since we allocate and free them * so frequently */ static struct kmem_cache *events_cache; static inline void signal_event(struct kgsl_device *device, struct kgsl_event *event, int result) { list_del(&event->node); event->result = result; queue_work(device->events_wq, &event->work); } /** * _kgsl_event_worker() - Work handler for processing GPU event callbacks * @work: Pointer to the work_struct for the event * * Each event callback has its own work struct and is run on a event specific * workqeuue. This is the worker that queues up the event callback function. */ static void _kgsl_event_worker(struct work_struct *work) { struct kgsl_event *event = container_of(work, struct kgsl_event, work); int id = KGSL_CONTEXT_ID(event->context); trace_kgsl_fire_event(id, event->timestamp, event->result, jiffies - event->created, event->func); if (event->func) event->func(event->device, event->context, event->priv, event->result); kgsl_context_put(event->context); kmem_cache_free(events_cache, event); } /** * kgsl_process_event_group() - Handle all the retired events in a group * @device: Pointer to a KGSL device * @group: Pointer to a GPU events group to process */ void kgsl_process_event_group(struct kgsl_device *device, struct kgsl_event_group *group) { struct kgsl_event *event, *tmp; unsigned int timestamp; struct kgsl_context *context; if (group == NULL) return; context = group->context; _kgsl_context_get(context); spin_lock(&group->lock); timestamp = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); /* * If no timestamps have been retired since the last time we were here * then we can avoid going through this loop */ if (timestamp_cmp(timestamp, group->processed) <= 0) goto out; list_for_each_entry_safe(event, tmp, &group->events, node) { if (timestamp_cmp(event->timestamp, timestamp) <= 0) signal_event(device, event, KGSL_EVENT_RETIRED); } group->processed = timestamp; out: spin_unlock(&group->lock); kgsl_context_put(context); } EXPORT_SYMBOL(kgsl_process_event_group); /** * kgsl_cancel_events_timestamp() - Cancel pending events for a given timestamp * @device: Pointer to a KGSL device * @group: Ponter to the GPU event group that owns the event * @timestamp: Registered expiry timestamp for the event */ void kgsl_cancel_events_timestamp(struct kgsl_device *device, struct kgsl_event_group *group, unsigned int timestamp) { struct kgsl_event *event, *tmp; spin_lock(&group->lock); list_for_each_entry_safe(event, tmp, &group->events, node) { if (timestamp_cmp(timestamp, event->timestamp) == 0) signal_event(device, event, KGSL_EVENT_CANCELLED); } spin_unlock(&group->lock); } EXPORT_SYMBOL(kgsl_cancel_events_timestamp); /** * kgsl_cancel_events() - Cancel all pending events in the group * @device: Pointer to a KGSL device * @group: Pointer to a kgsl_events_group */ void kgsl_cancel_events(struct kgsl_device *device, struct kgsl_event_group *group) { struct kgsl_event *event, *tmp; spin_lock(&group->lock); list_for_each_entry_safe(event, tmp, &group->events, node) signal_event(device, event, KGSL_EVENT_CANCELLED); spin_unlock(&group->lock); } EXPORT_SYMBOL(kgsl_cancel_events); /** * kgsl_cancel_event() - Cancel a specific event from a group * @device: Pointer to a KGSL device * @group: Pointer to the group that contains the events * @timestamp: Registered expiry timestamp for the event * @func: Registered callback for the function * @priv: Registered priv data for the function */ void kgsl_cancel_event(struct kgsl_device *device, struct kgsl_event_group *group, unsigned int timestamp, kgsl_event_func func, void *priv) { struct kgsl_event *event, *tmp; spin_lock(&group->lock); list_for_each_entry_safe(event, tmp, &group->events, node) { if (timestamp == event->timestamp && func == event->func && event->priv == priv) signal_event(device, event, KGSL_EVENT_CANCELLED); } spin_unlock(&group->lock); } EXPORT_SYMBOL(kgsl_cancel_event); /** * kgsl_add_event() - Add a new GPU event to a group * @device: Pointer to a KGSL device * @group: Pointer to the group to add the event to * @timestamp: Timestamp that the event will expire on * @func: Callback function for the event * @priv: Private data to send to the callback function */ int kgsl_add_event(struct kgsl_device *device, struct kgsl_event_group *group, unsigned int timestamp, kgsl_event_func func, void *priv) { unsigned int queued, retired; struct kgsl_context *context = group->context; struct kgsl_event *event; if (!func) return -EINVAL; /* * If the caller is creating their own timestamps, let them schedule * events in the future. Otherwise only allow timestamps that have been * queued. */ if (!context || !(context->flags & KGSL_CONTEXT_USER_GENERATED_TS)) { queued = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED); if (timestamp_cmp(timestamp, queued) > 0) return -EINVAL; } event = kmem_cache_alloc(events_cache, GFP_KERNEL); if (event == NULL) return -ENOMEM; /* Get a reference to the context while the event is active */ _kgsl_context_get(context); event->device = device; event->context = context; event->timestamp = timestamp; event->priv = priv; event->func = func; event->created = jiffies; INIT_WORK(&event->work, _kgsl_event_worker); trace_kgsl_register_event(KGSL_CONTEXT_ID(context), timestamp, func); spin_lock(&group->lock); /* * Check to see if the requested timestamp has already retired. If so, * schedule the callback right away */ retired = kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); if (timestamp_cmp(retired, timestamp) >= 0) { event->result = KGSL_EVENT_RETIRED; queue_work(device->events_wq, &event->work); spin_unlock(&group->lock); return 0; } /* Add the event to the group list */ list_add_tail(&event->node, &group->events); spin_unlock(&group->lock); return 0; } EXPORT_SYMBOL(kgsl_add_event); static DEFINE_RWLOCK(group_lock); static LIST_HEAD(group_list); /** * kgsl_process_events() - Work queue for processing new timestamp events * @work: Pointer to a work_struct */ void kgsl_process_events(struct work_struct *work) { struct kgsl_event_group *group; struct kgsl_device *device = container_of(work, struct kgsl_device, event_work); read_lock(&group_lock); list_for_each_entry(group, &group_list, group) kgsl_process_event_group(device, group); read_unlock(&group_lock); } EXPORT_SYMBOL(kgsl_process_events); /** * kgsl_del_event_group() - Remove a GPU event group * @group: GPU event group to remove */ void kgsl_del_event_group(struct kgsl_event_group *group) { /* Make sure that all the events have been deleted from the list */ BUG_ON(!list_empty(&group->events)); write_lock(&group_lock); list_del(&group->group); write_unlock(&group_lock); } EXPORT_SYMBOL(kgsl_del_event_group); /** * kgsl_add_event_group() - Add a new GPU event group * group: Pointer to the new group to add to the list */ void kgsl_add_event_group(struct kgsl_event_group *group, struct kgsl_context *context) { spin_lock_init(&group->lock); INIT_LIST_HEAD(&group->events); group->context = context; write_lock(&group_lock); list_add_tail(&group->group, &group_list); write_unlock(&group_lock); } EXPORT_SYMBOL(kgsl_add_event_group); /** * kgsl_events_exit() - Destroy the event kmem cache on module exit */ void kgsl_events_exit(void) { if (events_cache) kmem_cache_destroy(events_cache); } /** * kgsl_events_init() - Create the event kmem cache on module start */ void __init kgsl_events_init(void) { events_cache = KMEM_CACHE(kgsl_event, 0); }
gpl-2.0
kannu1994/maguro_kernel
drivers/gpu/drm/radeon/radeon_atombios.c
271
102748
/* * Copyright 2007-8 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" #include "atom-bits.h" /* from radeon_encoder.c */ extern uint32_t radeon_get_encoder_enum(struct drm_device *dev, uint32_t supported_device, uint8_t dac); extern void radeon_link_encoder_connector(struct drm_device *dev); extern void radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device, u16 caps); /* from radeon_connector.c */ extern void radeon_add_atom_connector(struct drm_device *dev, uint32_t connector_id, uint32_t supported_device, int connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint32_t igp_lane_info, uint16_t connector_object_id, struct radeon_hpd *hpd, struct radeon_router *router); /* from radeon_legacy_encoder.c */ extern void radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_enum, uint32_t supported_device); union atom_supported_devices { struct _ATOM_SUPPORTED_DEVICES_INFO info; struct _ATOM_SUPPORTED_DEVICES_INFO_2 info_2; struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1; }; static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev, uint8_t id) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); i2c.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ if ((rdev->family == CHIP_R420) || (rdev->family == CHIP_R423) || (rdev->family == CHIP_RV410)) { if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { gpio->ucClkMaskShift = 0x19; gpio->ucDataMaskShift = 0x18; } } /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; gpio->ucDataEnShift = 8; gpio->ucDataY_Shift = 8; gpio->ucDataA_Shift = 8; } } /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } if (gpio->sucI2cId.ucAccess == id) { i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift); i2c.en_data_mask = (1 << gpio->ucDataEnShift); i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); i2c.y_data_mask = (1 << gpio->ucDataY_Shift); i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); i2c.a_data_mask = (1 << gpio->ucDataA_Shift); if (gpio->sucI2cId.sbfAccess.bfHW_Capable) i2c.hw_capable = true; else i2c.hw_capable = false; if (gpio->sucI2cId.ucAccess == 0xa0) i2c.mm_i2c = true; else i2c.mm_i2c = false; i2c.i2c_id = gpio->sucI2cId.ucAccess; if (i2c.mask_clk_reg) i2c.valid = true; break; } } } return i2c; } void radeon_atombios_i2c_init(struct radeon_device *rdev) { struct atom_context *ctx = rdev->mode_info.atom_context; ATOM_GPIO_I2C_ASSIGMENT *gpio; struct radeon_i2c_bus_rec i2c; int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info); struct _ATOM_GPIO_I2C_INFO *i2c_info; uint16_t data_offset, size; int i, num_indices; char stmp[32]; memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec)); if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_I2C_ASSIGMENT); for (i = 0; i < num_indices; i++) { gpio = &i2c_info->asGPIO_Info[i]; i2c.valid = false; /* r4xx mask is technically not used by the hw, so patch in the legacy mask bits */ if ((rdev->family == CHIP_R420) || (rdev->family == CHIP_R423) || (rdev->family == CHIP_RV410)) { if ((le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0018) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x0019) || (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x001a)) { gpio->ucClkMaskShift = 0x19; gpio->ucDataMaskShift = 0x18; } } /* some evergreen boards have bad data for this entry */ if (ASIC_IS_DCE4(rdev)) { if ((i == 7) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1936) && (gpio->sucI2cId.ucAccess == 0)) { gpio->sucI2cId.ucAccess = 0x97; gpio->ucDataMaskShift = 8; gpio->ucDataEnShift = 8; gpio->ucDataY_Shift = 8; gpio->ucDataA_Shift = 8; } } /* some DCE3 boards have bad data for this entry */ if (ASIC_IS_DCE3(rdev)) { if ((i == 4) && (le16_to_cpu(gpio->usClkMaskRegisterIndex) == 0x1fda) && (gpio->sucI2cId.ucAccess == 0x94)) gpio->sucI2cId.ucAccess = 0x14; } i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4; i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4; i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4; i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4; i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4; i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4; i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4; i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4; i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift); i2c.mask_data_mask = (1 << gpio->ucDataMaskShift); i2c.en_clk_mask = (1 << gpio->ucClkEnShift); i2c.en_data_mask = (1 << gpio->ucDataEnShift); i2c.y_clk_mask = (1 << gpio->ucClkY_Shift); i2c.y_data_mask = (1 << gpio->ucDataY_Shift); i2c.a_clk_mask = (1 << gpio->ucClkA_Shift); i2c.a_data_mask = (1 << gpio->ucDataA_Shift); if (gpio->sucI2cId.sbfAccess.bfHW_Capable) i2c.hw_capable = true; else i2c.hw_capable = false; if (gpio->sucI2cId.ucAccess == 0xa0) i2c.mm_i2c = true; else i2c.mm_i2c = false; i2c.i2c_id = gpio->sucI2cId.ucAccess; if (i2c.mask_clk_reg) { i2c.valid = true; sprintf(stmp, "0x%x", i2c.i2c_id); rdev->i2c_bus[i] = radeon_i2c_create(rdev->ddev, &i2c, stmp); } } } } static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev, u8 id) { struct atom_context *ctx = rdev->mode_info.atom_context; struct radeon_gpio_rec gpio; int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT); struct _ATOM_GPIO_PIN_LUT *gpio_info; ATOM_GPIO_PIN_ASSIGNMENT *pin; u16 data_offset, size; int i, num_indices; memset(&gpio, 0, sizeof(struct radeon_gpio_rec)); gpio.valid = false; if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) { gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT); for (i = 0; i < num_indices; i++) { pin = &gpio_info->asGPIO_Pin[i]; if (id == pin->ucGPIO_ID) { gpio.id = pin->ucGPIO_ID; gpio.reg = le16_to_cpu(pin->usGpioPin_AIndex) * 4; gpio.mask = (1 << pin->ucGpioPinBitShift); gpio.valid = true; break; } } } return gpio; } static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev, struct radeon_gpio_rec *gpio) { struct radeon_hpd hpd; u32 reg; memset(&hpd, 0, sizeof(struct radeon_hpd)); if (ASIC_IS_DCE4(rdev)) reg = EVERGREEN_DC_GPIO_HPD_A; else reg = AVIVO_DC_GPIO_HPD_A; hpd.gpio = *gpio; if (gpio->reg == reg) { switch(gpio->mask) { case (1 << 0): hpd.hpd = RADEON_HPD_1; break; case (1 << 8): hpd.hpd = RADEON_HPD_2; break; case (1 << 16): hpd.hpd = RADEON_HPD_3; break; case (1 << 24): hpd.hpd = RADEON_HPD_4; break; case (1 << 26): hpd.hpd = RADEON_HPD_5; break; case (1 << 28): hpd.hpd = RADEON_HPD_6; break; default: hpd.hpd = RADEON_HPD_NONE; break; } } else hpd.hpd = RADEON_HPD_NONE; return hpd; } static bool radeon_atom_apply_quirks(struct drm_device *dev, uint32_t supported_device, int *connector_type, struct radeon_i2c_bus_rec *i2c_bus, uint16_t *line_mux, struct radeon_hpd *hpd) { /* Asus M2A-VM HDMI board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x791e) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x826d)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* Asrock RS600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x1849) && (dev->pdev->subsystem_device == 0x7941)) { if ((*connector_type == DRM_MODE_CONNECTOR_HDMIA) && (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) *connector_type = DRM_MODE_CONNECTOR_DVID; } /* MSI K9A2GM V2/V3 board has no HDMI or DVI */ if ((dev->pdev->device == 0x796e) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x7302)) { if ((supported_device == ATOM_DEVICE_DFP2_SUPPORT) || (supported_device == ATOM_DEVICE_DFP3_SUPPORT)) return false; } /* a-bit f-i90hd - ciaranm on #radeonhd - this board has no DVI */ if ((dev->pdev->device == 0x7941) && (dev->pdev->subsystem_vendor == 0x147b) && (dev->pdev->subsystem_device == 0x2412)) { if (*connector_type == DRM_MODE_CONNECTOR_DVII) return false; } /* Falcon NW laptop lists vga ddc line for LVDS */ if ((dev->pdev->device == 0x5653) && (dev->pdev->subsystem_vendor == 0x1462) && (dev->pdev->subsystem_device == 0x0291)) { if (*connector_type == DRM_MODE_CONNECTOR_LVDS) { i2c_bus->valid = false; *line_mux = 53; } } /* HIS X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7146) && (dev->pdev->subsystem_vendor == 0x17af) && (dev->pdev->subsystem_device == 0x2058)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Gigabyte X1300 is DVI+VGA, not DVI+DVI */ if ((dev->pdev->device == 0x7142) && (dev->pdev->subsystem_vendor == 0x1458) && (dev->pdev->subsystem_device == 0x2134)) { if (supported_device == ATOM_DEVICE_DFP1_SUPPORT) return false; } /* Funky macbooks */ if ((dev->pdev->device == 0x71C5) && (dev->pdev->subsystem_vendor == 0x106b) && (dev->pdev->subsystem_device == 0x0080)) { if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) || (supported_device == ATOM_DEVICE_DFP2_SUPPORT)) return false; if (supported_device == ATOM_DEVICE_CRT2_SUPPORT) *line_mux = 0x90; } /* mac rv630, rv730, others */ if ((supported_device == ATOM_DEVICE_TV1_SUPPORT) && (*connector_type == DRM_MODE_CONNECTOR_DVII)) { *connector_type = DRM_MODE_CONNECTOR_9PinDIN; *line_mux = CONNECTOR_7PIN_DIN_ENUM_ID1; } /* ASUS HD 3600 XT board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01da)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3600 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x9598) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e4)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* ASUS HD 3450 board lists the DVI port as HDMI */ if ((dev->pdev->device == 0x95C5) && (dev->pdev->subsystem_vendor == 0x1043) && (dev->pdev->subsystem_device == 0x01e2)) { if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } /* some BIOSes seem to report DAC on HDMI - usually this is a board with * HDMI + VGA reporting as HDMI */ if (*connector_type == DRM_MODE_CONNECTOR_HDMIA) { if (supported_device & (ATOM_DEVICE_CRT_SUPPORT)) { *connector_type = DRM_MODE_CONNECTOR_VGA; *line_mux = 0; } } /* Acer laptop (Acer TravelMate 5730/5730G) has an HDMI port * on the laptop and a DVI port on the docking station and * both share the same encoder, hpd pin, and ddc line. * So while the bios table is technically correct, * we drop the DVI port here since xrandr has no concept of * encoders and will try and drive both connectors * with different crtcs which isn't possible on the hardware * side and leaves no crtcs for LVDS or VGA. */ if (((dev->pdev->device == 0x95c4) || (dev->pdev->device == 0x9591)) && (dev->pdev->subsystem_vendor == 0x1025) && (dev->pdev->subsystem_device == 0x013c)) { if ((*connector_type == DRM_MODE_CONNECTOR_DVII) && (supported_device == ATOM_DEVICE_DFP1_SUPPORT)) { /* actually it's a DVI-D port not DVI-I */ *connector_type = DRM_MODE_CONNECTOR_DVID; return false; } } /* XFX Pine Group device rv730 reports no VGA DDC lines * even though they are wired up to record 0x93 */ if ((dev->pdev->device == 0x9498) && (dev->pdev->subsystem_vendor == 0x1682) && (dev->pdev->subsystem_device == 0x2452)) { struct radeon_device *rdev = dev->dev_private; *i2c_bus = radeon_lookup_i2c_gpio(rdev, 0x93); } /* Fujitsu D3003-S2 board lists DVI-I as DVI-D and VGA */ if ((dev->pdev->device == 0x9802) && (dev->pdev->subsystem_vendor == 0x1734) && (dev->pdev->subsystem_device == 0x11bd)) { if (*connector_type == DRM_MODE_CONNECTOR_VGA) { *connector_type = DRM_MODE_CONNECTOR_DVII; *line_mux = 0x3103; } else if (*connector_type == DRM_MODE_CONNECTOR_DVID) { *connector_type = DRM_MODE_CONNECTOR_DVII; } } return true; } const int supported_devices_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVIA, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_DisplayPort }; const uint16_t supported_devices_connector_object_id_convert[] = { CONNECTOR_OBJECT_ID_NONE, CONNECTOR_OBJECT_ID_VGA, CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I, /* not all boards support DL */ CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D, /* not all boards support DL */ CONNECTOR_OBJECT_ID_VGA, /* technically DVI-A */ CONNECTOR_OBJECT_ID_COMPOSITE, CONNECTOR_OBJECT_ID_SVIDEO, CONNECTOR_OBJECT_ID_LVDS, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_9PIN_DIN, CONNECTOR_OBJECT_ID_DISPLAYPORT, CONNECTOR_OBJECT_ID_HDMI_TYPE_A, CONNECTOR_OBJECT_ID_HDMI_TYPE_B, CONNECTOR_OBJECT_ID_SVIDEO }; const int object_connector_convert[] = { DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVII, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_DVID, DRM_MODE_CONNECTOR_VGA, DRM_MODE_CONNECTOR_Composite, DRM_MODE_CONNECTOR_SVIDEO, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_HDMIA, DRM_MODE_CONNECTOR_HDMIB, DRM_MODE_CONNECTOR_LVDS, DRM_MODE_CONNECTOR_9PinDIN, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_Unknown, DRM_MODE_CONNECTOR_DisplayPort, DRM_MODE_CONNECTOR_eDP, DRM_MODE_CONNECTOR_Unknown }; bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, Object_Header); u16 size, data_offset; u8 frev, crev; ATOM_CONNECTOR_OBJECT_TABLE *con_obj; ATOM_ENCODER_OBJECT_TABLE *enc_obj; ATOM_OBJECT_TABLE *router_obj; ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj; ATOM_OBJECT_HEADER *obj_header; int i, j, k, path_size, device_support; int connector_type; u16 igp_lane_info, conn_id, connector_object_id; struct radeon_i2c_bus_rec ddc_bus; struct radeon_router router; struct radeon_gpio_rec gpio; struct radeon_hpd hpd; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) return false; if (crev < 2) return false; obj_header = (ATOM_OBJECT_HEADER *) (ctx->bios + data_offset); path_obj = (ATOM_DISPLAY_OBJECT_PATH_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usDisplayPathTableOffset)); con_obj = (ATOM_CONNECTOR_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usConnectorObjectTableOffset)); enc_obj = (ATOM_ENCODER_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usEncoderObjectTableOffset)); router_obj = (ATOM_OBJECT_TABLE *) (ctx->bios + data_offset + le16_to_cpu(obj_header->usRouterObjectTableOffset)); device_support = le16_to_cpu(obj_header->usDeviceSupport); path_size = 0; for (i = 0; i < path_obj->ucNumOfDispPath; i++) { uint8_t *addr = (uint8_t *) path_obj->asDispPath; ATOM_DISPLAY_OBJECT_PATH *path; addr += path_size; path = (ATOM_DISPLAY_OBJECT_PATH *) addr; path_size += le16_to_cpu(path->usSize); if (device_support & le16_to_cpu(path->usDeviceTag)) { uint8_t con_obj_id, con_obj_num, con_obj_type; con_obj_id = (le16_to_cpu(path->usConnObjectId) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; con_obj_num = (le16_to_cpu(path->usConnObjectId) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; con_obj_type = (le16_to_cpu(path->usConnObjectId) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; /* TODO CV support */ if (le16_to_cpu(path->usDeviceTag) == ATOM_DEVICE_CV_SUPPORT) continue; /* IGP chips */ if ((rdev->flags & RADEON_IS_IGP) && (con_obj_id == CONNECTOR_OBJECT_ID_PCIE_CONNECTOR)) { uint16_t igp_offset = 0; ATOM_INTEGRATED_SYSTEM_INFO_V2 *igp_obj; index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &igp_offset)) { if (crev >= 2) { igp_obj = (ATOM_INTEGRATED_SYSTEM_INFO_V2 *) (ctx->bios + igp_offset); if (igp_obj) { uint32_t slot_config, ct; if (con_obj_num == 1) slot_config = igp_obj-> ulDDISlot1Config; else slot_config = igp_obj-> ulDDISlot2Config; ct = (slot_config >> 16) & 0xff; connector_type = object_connector_convert [ct]; connector_object_id = ct; igp_lane_info = slot_config & 0xffff; } else continue; } else continue; } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } } else { igp_lane_info = 0; connector_type = object_connector_convert[con_obj_id]; connector_object_id = con_obj_id; } if (connector_type == DRM_MODE_CONNECTOR_Unknown) continue; router.ddc_valid = false; router.cd_valid = false; for (j = 0; j < ((le16_to_cpu(path->usSize) - 8) / 2); j++) { uint8_t grph_obj_id, grph_obj_num, grph_obj_type; grph_obj_id = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_ID_MASK) >> OBJECT_ID_SHIFT; grph_obj_num = (le16_to_cpu(path->usGraphicObjIds[j]) & ENUM_ID_MASK) >> ENUM_ID_SHIFT; grph_obj_type = (le16_to_cpu(path->usGraphicObjIds[j]) & OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; if (grph_obj_type == GRAPH_OBJECT_TYPE_ENCODER) { for (k = 0; k < enc_obj->ucNumberOfObjects; k++) { u16 encoder_obj = le16_to_cpu(enc_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == encoder_obj) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(enc_obj->asObjects[k].usRecordOffset)); ATOM_ENCODER_CAP_RECORD *cap_record; u16 caps = 0; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_ENCODER_CAP_RECORD_TYPE: cap_record =(ATOM_ENCODER_CAP_RECORD *) record; caps = le16_to_cpu(cap_record->usEncoderCap); break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } radeon_add_atom_encoder(dev, encoder_obj, le16_to_cpu (path-> usDeviceTag), caps); } } } else if (grph_obj_type == GRAPH_OBJECT_TYPE_ROUTER) { for (k = 0; k < router_obj->ucNumberOfObjects; k++) { u16 router_obj_id = le16_to_cpu(router_obj->asObjects[k].usObjectID); if (le16_to_cpu(path->usGraphicObjIds[j]) == router_obj_id) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; ATOM_ROUTER_DDC_PATH_SELECT_RECORD *ddc_path; ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *cd_path; ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *router_src_dst_table = (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); int enum_id; router.router_id = router_obj_id; for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; enum_id++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) break; } while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; router.i2c_info = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); router.i2c_addr = i2c_record->ucI2CAddr >> 1; break; case ATOM_ROUTER_DDC_PATH_SELECT_RECORD_TYPE: ddc_path = (ATOM_ROUTER_DDC_PATH_SELECT_RECORD *) record; router.ddc_valid = true; router.ddc_mux_type = ddc_path->ucMuxType; router.ddc_mux_control_pin = ddc_path->ucMuxControlPin; router.ddc_mux_state = ddc_path->ucMuxState[enum_id]; break; case ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD_TYPE: cd_path = (ATOM_ROUTER_DATA_CLOCK_PATH_SELECT_RECORD *) record; router.cd_valid = true; router.cd_mux_type = cd_path->ucMuxType; router.cd_mux_control_pin = cd_path->ucMuxControlPin; router.cd_mux_state = cd_path->ucMuxState[enum_id]; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record->ucRecordSize); } } } } } /* look up gpio for ddc, hpd */ ddc_bus.valid = false; hpd.hpd = RADEON_HPD_NONE; if ((le16_to_cpu(path->usDeviceTag) & (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) { for (j = 0; j < con_obj->ucNumberOfObjects; j++) { if (le16_to_cpu(path->usConnObjectId) == le16_to_cpu(con_obj->asObjects[j]. usObjectID)) { ATOM_COMMON_RECORD_HEADER *record = (ATOM_COMMON_RECORD_HEADER *) (ctx->bios + data_offset + le16_to_cpu(con_obj-> asObjects[j]. usRecordOffset)); ATOM_I2C_RECORD *i2c_record; ATOM_HPD_INT_RECORD *hpd_record; ATOM_I2C_ID_CONFIG_ACCESS *i2c_config; while (record->ucRecordSize > 0 && record->ucRecordType > 0 && record->ucRecordType <= ATOM_MAX_OBJECT_RECORD_NUMBER) { switch (record->ucRecordType) { case ATOM_I2C_RECORD_TYPE: i2c_record = (ATOM_I2C_RECORD *) record; i2c_config = (ATOM_I2C_ID_CONFIG_ACCESS *) &i2c_record->sucI2cId; ddc_bus = radeon_lookup_i2c_gpio(rdev, i2c_config-> ucAccess); break; case ATOM_HPD_INT_RECORD_TYPE: hpd_record = (ATOM_HPD_INT_RECORD *) record; gpio = radeon_lookup_gpio(rdev, hpd_record->ucHPDIntGPIOID); hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio); hpd.plugged_state = hpd_record->ucPlugged_PinState; break; } record = (ATOM_COMMON_RECORD_HEADER *) ((char *)record + record-> ucRecordSize); } break; } } } /* needed for aux chan transactions */ ddc_bus.hpd = hpd.hpd; conn_id = le16_to_cpu(path->usConnObjectId); if (!radeon_atom_apply_quirks (dev, le16_to_cpu(path->usDeviceTag), &connector_type, &ddc_bus, &conn_id, &hpd)) continue; radeon_add_atom_connector(dev, conn_id, le16_to_cpu(path-> usDeviceTag), connector_type, &ddc_bus, igp_lane_info, connector_object_id, &hpd, &router); } } radeon_link_encoder_connector(dev); return true; } static uint16_t atombios_get_connector_object_id(struct drm_device *dev, int connector_type, uint16_t devices) { struct radeon_device *rdev = dev->dev_private; if (rdev->flags & RADEON_IS_IGP) { return supported_devices_connector_object_id_convert [connector_type]; } else if (((connector_type == DRM_MODE_CONNECTOR_DVII) || (connector_type == DRM_MODE_CONNECTOR_DVID)) && (devices & ATOM_DEVICE_DFP2_SUPPORT)) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, XTMDS_Info); uint16_t size, data_offset; uint8_t frev, crev; ATOM_XTMDS_INFO *xtmds; if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset); if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D; } else { if (connector_type == DRM_MODE_CONNECTOR_DVII) return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I; else return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D; } } else return supported_devices_connector_object_id_convert [connector_type]; } else { return supported_devices_connector_object_id_convert [connector_type]; } } struct bios_connector { bool valid; uint16_t line_mux; uint16_t devices; int connector_type; struct radeon_i2c_bus_rec ddc_bus; struct radeon_hpd hpd; }; bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; struct atom_context *ctx = mode_info->atom_context; int index = GetIndexIntoMasterTable(DATA, SupportedDevicesInfo); uint16_t size, data_offset; uint8_t frev, crev; uint16_t device_support; uint8_t dac; union atom_supported_devices *supported_devices; int i, j, max_device; struct bios_connector *bios_connectors; size_t bc_size = sizeof(*bios_connectors) * ATOM_MAX_SUPPORTED_DEVICE; struct radeon_router router; router.ddc_valid = false; router.cd_valid = false; bios_connectors = kzalloc(bc_size, GFP_KERNEL); if (!bios_connectors) return false; if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) { kfree(bios_connectors); return false; } supported_devices = (union atom_supported_devices *)(ctx->bios + data_offset); device_support = le16_to_cpu(supported_devices->info.usDeviceSupport); if (frev > 1) max_device = ATOM_MAX_SUPPORTED_DEVICE; else max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO; for (i = 0; i < max_device; i++) { ATOM_CONNECTOR_INFO_I2C ci = supported_devices->info.asConnInfo[i]; bios_connectors[i].valid = false; if (!(device_support & (1 << i))) { continue; } if (i == ATOM_DEVICE_CV_INDEX) { DRM_DEBUG_KMS("Skipping Component Video\n"); continue; } bios_connectors[i].connector_type = supported_devices_connector_convert[ci.sucConnectorInfo. sbfAccess. bfConnectorType]; if (bios_connectors[i].connector_type == DRM_MODE_CONNECTOR_Unknown) continue; dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC; bios_connectors[i].line_mux = ci.sucI2cId.ucAccess; /* give tv unique connector ids */ if (i == ATOM_DEVICE_TV1_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 50; } else if (i == ATOM_DEVICE_TV2_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 51; } else if (i == ATOM_DEVICE_CV_INDEX) { bios_connectors[i].ddc_bus.valid = false; bios_connectors[i].line_mux = 52; } else bios_connectors[i].ddc_bus = radeon_lookup_i2c_gpio(rdev, bios_connectors[i].line_mux); if ((crev > 1) && (frev > 1)) { u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap; switch (isb) { case 0x4: bios_connectors[i].hpd.hpd = RADEON_HPD_1; break; case 0xa: bios_connectors[i].hpd.hpd = RADEON_HPD_2; break; default: bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; break; } } else { if (i == ATOM_DEVICE_DFP1_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_1; else if (i == ATOM_DEVICE_DFP2_INDEX) bios_connectors[i].hpd.hpd = RADEON_HPD_2; else bios_connectors[i].hpd.hpd = RADEON_HPD_NONE; } /* Always set the connector type to VGA for CRT1/CRT2. if they are * shared with a DVI port, we'll pick up the DVI connector when we * merge the outputs. Some bioses incorrectly list VGA ports as DVI. */ if (i == ATOM_DEVICE_CRT1_INDEX || i == ATOM_DEVICE_CRT2_INDEX) bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_VGA; if (!radeon_atom_apply_quirks (dev, (1 << i), &bios_connectors[i].connector_type, &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux, &bios_connectors[i].hpd)) continue; bios_connectors[i].valid = true; bios_connectors[i].devices = (1 << i); if (ASIC_IS_AVIVO(rdev) || radeon_r4xx_atom) radeon_add_atom_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i), 0); else radeon_add_legacy_encoder(dev, radeon_get_encoder_enum(dev, (1 << i), dac), (1 << i)); } /* combine shared connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { for (j = 0; j < max_device; j++) { if (bios_connectors[j].valid && (i != j)) { if (bios_connectors[i].line_mux == bios_connectors[j].line_mux) { /* make sure not to combine LVDS */ if (bios_connectors[i].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[i].line_mux = 53; bios_connectors[i].ddc_bus.valid = false; continue; } if (bios_connectors[j].devices & (ATOM_DEVICE_LCD_SUPPORT)) { bios_connectors[j].line_mux = 53; bios_connectors[j].ddc_bus.valid = false; continue; } /* combine analog and digital for DVI-I */ if (((bios_connectors[i].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[j].devices & (ATOM_DEVICE_CRT_SUPPORT))) || ((bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) && (bios_connectors[i].devices & (ATOM_DEVICE_CRT_SUPPORT)))) { bios_connectors[i].devices |= bios_connectors[j].devices; bios_connectors[i].connector_type = DRM_MODE_CONNECTOR_DVII; if (bios_connectors[j].devices & (ATOM_DEVICE_DFP_SUPPORT)) bios_connectors[i].hpd = bios_connectors[j].hpd; bios_connectors[j].valid = false; } } } } } } /* add the connectors */ for (i = 0; i < max_device; i++) { if (bios_connectors[i].valid) { uint16_t connector_object_id = atombios_get_connector_object_id(dev, bios_connectors[i].connector_type, bios_connectors[i].devices); radeon_add_atom_connector(dev, bios_connectors[i].line_mux, bios_connectors[i].devices, bios_connectors[i]. connector_type, &bios_connectors[i].ddc_bus, 0, connector_object_id, &bios_connectors[i].hpd, &router); } } radeon_link_encoder_connector(dev); kfree(bios_connectors); return true; } union firmware_info { ATOM_FIRMWARE_INFO info; ATOM_FIRMWARE_INFO_V1_2 info_12; ATOM_FIRMWARE_INFO_V1_3 info_13; ATOM_FIRMWARE_INFO_V1_4 info_14; ATOM_FIRMWARE_INFO_V2_1 info_21; ATOM_FIRMWARE_INFO_V2_2 info_22; }; bool radeon_atom_get_clock_info(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); union firmware_info *firmware_info; uint8_t frev, crev; struct radeon_pll *p1pll = &rdev->clock.p1pll; struct radeon_pll *p2pll = &rdev->clock.p2pll; struct radeon_pll *dcpll = &rdev->clock.dcpll; struct radeon_pll *spll = &rdev->clock.spll; struct radeon_pll *mpll = &rdev->clock.mpll; uint16_t data_offset; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); /* pixel clocks */ p1pll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); p1pll->reference_div = 0; if (crev < 2) p1pll->pll_out_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Output); else p1pll->pll_out_min = le32_to_cpu(firmware_info->info_12.ulMinPixelClockPLL_Output); p1pll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output); if (crev >= 4) { p1pll->lcd_pll_out_min = le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_min == 0) p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100; if (p1pll->lcd_pll_out_max == 0) p1pll->lcd_pll_out_max = p1pll->pll_out_max; } else { p1pll->lcd_pll_out_min = p1pll->pll_out_min; p1pll->lcd_pll_out_max = p1pll->pll_out_max; } if (p1pll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) p1pll->pll_out_min = 64800; else p1pll->pll_out_min = 20000; } p1pll->pll_in_min = le16_to_cpu(firmware_info->info.usMinPixelClockPLL_Input); p1pll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxPixelClockPLL_Input); *p2pll = *p1pll; /* system clock */ if (ASIC_IS_DCE4(rdev)) spll->reference_freq = le16_to_cpu(firmware_info->info_21.usCoreReferenceClock); else spll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); spll->reference_div = 0; spll->pll_out_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Output); spll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxEngineClockPLL_Output); /* ??? */ if (spll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) spll->pll_out_min = 64800; else spll->pll_out_min = 20000; } spll->pll_in_min = le16_to_cpu(firmware_info->info.usMinEngineClockPLL_Input); spll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxEngineClockPLL_Input); /* memory clock */ if (ASIC_IS_DCE4(rdev)) mpll->reference_freq = le16_to_cpu(firmware_info->info_21.usMemoryReferenceClock); else mpll->reference_freq = le16_to_cpu(firmware_info->info.usReferenceClock); mpll->reference_div = 0; mpll->pll_out_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Output); mpll->pll_out_max = le32_to_cpu(firmware_info->info.ulMaxMemoryClockPLL_Output); /* ??? */ if (mpll->pll_out_min == 0) { if (ASIC_IS_AVIVO(rdev)) mpll->pll_out_min = 64800; else mpll->pll_out_min = 20000; } mpll->pll_in_min = le16_to_cpu(firmware_info->info.usMinMemoryClockPLL_Input); mpll->pll_in_max = le16_to_cpu(firmware_info->info.usMaxMemoryClockPLL_Input); rdev->clock.default_sclk = le32_to_cpu(firmware_info->info.ulDefaultEngineClock); rdev->clock.default_mclk = le32_to_cpu(firmware_info->info.ulDefaultMemoryClock); if (ASIC_IS_DCE4(rdev)) { rdev->clock.default_dispclk = le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); if (rdev->clock.default_dispclk == 0) { if (ASIC_IS_DCE5(rdev)) rdev->clock.default_dispclk = 54000; /* 540 Mhz */ else rdev->clock.default_dispclk = 60000; /* 600 Mhz */ } rdev->clock.dp_extclk = le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); } *dcpll = *p1pll; rdev->clock.max_pixel_clock = le16_to_cpu(firmware_info->info.usMaxPixelClock); if (rdev->clock.max_pixel_clock == 0) rdev->clock.max_pixel_clock = 40000; return true; } return false; } union igp_info { struct _ATOM_INTEGRATED_SYSTEM_INFO info; struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; }; bool radeon_atombios_sideport_present(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); union igp_info *igp_info; u8 frev, crev; u16 data_offset; /* sideport is AMD only */ if (rdev->family == CHIP_RS600) return false; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { igp_info = (union igp_info *)(mode_info->atom_context->bios + data_offset); switch (crev) { case 1: if (le32_to_cpu(igp_info->info.ulBootUpMemoryClock)) return true; break; case 2: if (le32_to_cpu(igp_info->info_2.ulBootUpSidePortClock)) return true; break; default: DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); break; } } return false; } bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder, struct radeon_encoder_int_tmds *tmds) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, TMDS_Info); uint16_t data_offset; struct _ATOM_TMDS_INFO *tmds_info; uint8_t frev, crev; uint16_t maxfreq; int i; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tmds_info = (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios + data_offset); maxfreq = le16_to_cpu(tmds_info->usMaxFrequency); for (i = 0; i < 4; i++) { tmds->tmds_pll[i].freq = le16_to_cpu(tmds_info->asMiscInfo[i].usFrequency); tmds->tmds_pll[i].value = tmds_info->asMiscInfo[i].ucPLL_ChargePump & 0x3f; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VCO_Gain & 0x3f) << 6; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_DutyCycle & 0xf) << 12; tmds->tmds_pll[i].value |= (tmds_info->asMiscInfo[i]. ucPLL_VoltageSwing & 0xf) << 16; DRM_DEBUG_KMS("TMDS PLL From ATOMBIOS %u %x\n", tmds->tmds_pll[i].freq, tmds->tmds_pll[i].value); if (maxfreq == tmds->tmds_pll[i].freq) { tmds->tmds_pll[i].freq = 0xffffffff; break; } } return true; } return false; } bool radeon_atombios_get_ppll_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PPLL_SS_Info); uint16_t data_offset, size; struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset); num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_SPREAD_SPECTRUM_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if (ss_info->asSS_Info[i].ucSS_Id == id) { ss->percentage = le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage); ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType; ss->step = ss_info->asSS_Info[i].ucSS_Step; ss->delay = ss_info->asSS_Info[i].ucSS_Delay; ss->range = ss_info->asSS_Info[i].ucSS_Range; ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div; return true; } } } return false; } static void radeon_atombios_get_igp_ss_overrides(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); u16 data_offset, size; struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *igp_info; u8 frev, crev; u16 percentage = 0, rate = 0; /* get any igp specific overrides */ if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { igp_info = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 *) (mode_info->atom_context->bios + data_offset); switch (id) { case ASIC_INTERNAL_SS_ON_TMDS: percentage = le16_to_cpu(igp_info->usDVISSPercentage); rate = le16_to_cpu(igp_info->usDVISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_HDMI: percentage = le16_to_cpu(igp_info->usHDMISSPercentage); rate = le16_to_cpu(igp_info->usHDMISSpreadRateIn10Hz); break; case ASIC_INTERNAL_SS_ON_LVDS: percentage = le16_to_cpu(igp_info->usLvdsSSPercentage); rate = le16_to_cpu(igp_info->usLvdsSSpreadRateIn10Hz); break; } if (percentage) ss->percentage = percentage; if (rate) ss->rate = rate; } } union asic_ss_info { struct _ATOM_ASIC_INTERNAL_SS_INFO info; struct _ATOM_ASIC_INTERNAL_SS_INFO_V2 info_2; struct _ATOM_ASIC_INTERNAL_SS_INFO_V3 info_3; }; bool radeon_atombios_get_asic_ss_info(struct radeon_device *rdev, struct radeon_atom_ss *ss, int id, u32 clock) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); uint16_t data_offset, size; union asic_ss_info *ss_info; uint8_t frev, crev; int i, num_indices; memset(ss, 0, sizeof(struct radeon_atom_ss)); if (atom_parse_data_header(mode_info->atom_context, index, &size, &frev, &crev, &data_offset)) { ss_info = (union asic_ss_info *)(mode_info->atom_context->bios + data_offset); switch (frev) { case 1: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT); for (i = 0; i < num_indices; i++) { if ((ss_info->info.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info.asSpreadSpectrum[i].usSpreadRateInKhz); return true; } } break; case 2: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V2); for (i = 0; i < num_indices; i++) { if ((ss_info->info_2.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_2.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_2.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_2.asSpreadSpectrum[i].usSpreadRateIn10Hz); return true; } } break; case 3: num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_ASIC_SS_ASSIGNMENT_V3); for (i = 0; i < num_indices; i++) { if ((ss_info->info_3.asSpreadSpectrum[i].ucClockIndication == id) && (clock <= le32_to_cpu(ss_info->info_3.asSpreadSpectrum[i].ulTargetClockRange))) { ss->percentage = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadSpectrumPercentage); ss->type = ss_info->info_3.asSpreadSpectrum[i].ucSpreadSpectrumMode; ss->rate = le16_to_cpu(ss_info->info_3.asSpreadSpectrum[i].usSpreadRateIn10Hz); if (rdev->flags & RADEON_IS_IGP) radeon_atombios_get_igp_ss_overrides(rdev, ss, id); return true; } } break; default: DRM_ERROR("Unsupported ASIC_InternalSS_Info table: %d %d\n", frev, crev); break; } } return false; } union lvds_info { struct _ATOM_LVDS_INFO info; struct _ATOM_LVDS_INFO_V12 info_12; }; struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, LVDS_Info); uint16_t data_offset, misc; union lvds_info *lvds_info; uint8_t frev, crev; struct radeon_encoder_atom_dig *lvds = NULL; int encoder_enum = (encoder->encoder_enum & ENUM_ID_MASK) >> ENUM_ID_SHIFT; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { lvds_info = (union lvds_info *)(mode_info->atom_context->bios + data_offset); lvds = kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL); if (!lvds) return NULL; lvds->native_mode.clock = le16_to_cpu(lvds_info->info.sLCDTiming.usPixClk) * 10; lvds->native_mode.hdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usHActive); lvds->native_mode.vdisplay = le16_to_cpu(lvds_info->info.sLCDTiming.usVActive); lvds->native_mode.htotal = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHBlanking_Time); lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncOffset); lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usHSyncWidth); lvds->native_mode.vtotal = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVBlanking_Time); lvds->native_mode.vsync_start = lvds->native_mode.vdisplay + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncOffset); lvds->native_mode.vsync_end = lvds->native_mode.vsync_start + le16_to_cpu(lvds_info->info.sLCDTiming.usVSyncWidth); lvds->panel_pwr_delay = le16_to_cpu(lvds_info->info.usOffDelayInMs); lvds->lcd_misc = lvds_info->info.ucLVDS_Misc; misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN; lvds->native_mode.width_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageHSize); lvds->native_mode.height_mm = le16_to_cpu(lvds_info->info.sLCDTiming.usImageVSize); /* set crtc values */ drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V); lvds->lcd_ss_id = lvds_info->info.ucSS_Id; encoder->native_mode = lvds->native_mode; if (encoder_enum == 2) lvds->linkb = true; else lvds->linkb = false; /* parse the lcd record table */ if (le16_to_cpu(lvds_info->info.usModePatchTableOffset)) { ATOM_FAKE_EDID_PATCH_RECORD *fake_edid_record; ATOM_PANEL_RESOLUTION_PATCH_RECORD *panel_res_record; bool bad_record = false; u8 *record; if ((frev == 1) && (crev < 2)) /* absolute */ record = (u8 *)(mode_info->atom_context->bios + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); else /* relative */ record = (u8 *)(mode_info->atom_context->bios + data_offset + le16_to_cpu(lvds_info->info.usModePatchTableOffset)); while (*record != ATOM_RECORD_END_TYPE) { switch (*record) { case LCD_MODE_PATCH_RECORD_MODE_TYPE: record += sizeof(ATOM_PATCH_RECORD_MODE); break; case LCD_RTS_RECORD_TYPE: record += sizeof(ATOM_LCD_RTS_RECORD); break; case LCD_CAP_RECORD_TYPE: record += sizeof(ATOM_LCD_MODE_CONTROL_CAP); break; case LCD_FAKE_EDID_PATCH_RECORD_TYPE: fake_edid_record = (ATOM_FAKE_EDID_PATCH_RECORD *)record; if (fake_edid_record->ucFakeEDIDLength) { struct edid *edid; int edid_size = max((int)EDID_LENGTH, (int)fake_edid_record->ucFakeEDIDLength); edid = kmalloc(edid_size, GFP_KERNEL); if (edid) { memcpy((u8 *)edid, (u8 *)&fake_edid_record->ucFakeEDIDString[0], fake_edid_record->ucFakeEDIDLength); if (drm_edid_is_valid(edid)) { rdev->mode_info.bios_hardcoded_edid = edid; rdev->mode_info.bios_hardcoded_edid_size = edid_size; } else kfree(edid); } } record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; lvds->native_mode.width_mm = panel_res_record->usHSize; lvds->native_mode.height_mm = panel_res_record->usVSize; record += sizeof(ATOM_PANEL_RESOLUTION_PATCH_RECORD); break; default: DRM_ERROR("Bad LCD record %d\n", *record); bad_record = true; break; } if (bad_record) break; } } } return lvds; } struct radeon_encoder_primary_dac * radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_primary_dac *p_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL); if (!p_dac) return NULL; bg = dac_info->ucDAC1_BG_Adjustment; dac = dac_info->ucDAC1_DAC_Adjustment; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } return p_dac; } bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index, struct drm_display_mode *mode) { struct radeon_mode_info *mode_info = &rdev->mode_info; ATOM_ANALOG_TV_INFO *tv_info; ATOM_ANALOG_TV_INFO_V1_2 *tv_info_v1_2; ATOM_DTD_FORMAT *dtd_timings; int data_index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); u8 frev, crev; u16 data_offset, misc; if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset)) return false; switch (crev) { case 1: tv_info = (ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING) return false; mode->crtc_htotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Total); mode->crtc_hdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_Disp); mode->crtc_hsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart); mode->crtc_hsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_H_SyncWidth); mode->crtc_vtotal = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Total); mode->crtc_vdisplay = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_Disp); mode->crtc_vsync_start = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart); mode->crtc_vsync_end = le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncStart) + le16_to_cpu(tv_info->aModeTimings[index].usCRTC_V_SyncWidth); mode->flags = 0; misc = le16_to_cpu(tv_info->aModeTimings[index].susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(tv_info->aModeTimings[index].usPixelClock) * 10; if (index == 1) { /* PAL timings appear to have wrong values for totals */ mode->crtc_htotal -= 1; mode->crtc_vtotal -= 1; } break; case 2: tv_info_v1_2 = (ATOM_ANALOG_TV_INFO_V1_2 *)(mode_info->atom_context->bios + data_offset); if (index >= MAX_SUPPORTED_TV_TIMING_V1_2) return false; dtd_timings = &tv_info_v1_2->aModeTimings[index]; mode->crtc_htotal = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHBlanking_Time); mode->crtc_hdisplay = le16_to_cpu(dtd_timings->usHActive); mode->crtc_hsync_start = le16_to_cpu(dtd_timings->usHActive) + le16_to_cpu(dtd_timings->usHSyncOffset); mode->crtc_hsync_end = mode->crtc_hsync_start + le16_to_cpu(dtd_timings->usHSyncWidth); mode->crtc_vtotal = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVBlanking_Time); mode->crtc_vdisplay = le16_to_cpu(dtd_timings->usVActive); mode->crtc_vsync_start = le16_to_cpu(dtd_timings->usVActive) + le16_to_cpu(dtd_timings->usVSyncOffset); mode->crtc_vsync_end = mode->crtc_vsync_start + le16_to_cpu(dtd_timings->usVSyncWidth); mode->flags = 0; misc = le16_to_cpu(dtd_timings->susModeMiscInfo.usAccess); if (misc & ATOM_VSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NVSYNC; if (misc & ATOM_HSYNC_POLARITY) mode->flags |= DRM_MODE_FLAG_NHSYNC; if (misc & ATOM_COMPOSITESYNC) mode->flags |= DRM_MODE_FLAG_CSYNC; if (misc & ATOM_INTERLACE) mode->flags |= DRM_MODE_FLAG_INTERLACE; if (misc & ATOM_DOUBLE_CLOCK_MODE) mode->flags |= DRM_MODE_FLAG_DBLSCAN; mode->clock = le16_to_cpu(dtd_timings->usPixClk) * 10; break; } return true; } enum radeon_tv_std radeon_atombios_get_tv_info(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, AnalogTV_Info); uint16_t data_offset; uint8_t frev, crev; struct _ATOM_ANALOG_TV_INFO *tv_info; enum radeon_tv_std tv_std = TV_STD_NTSC; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { tv_info = (struct _ATOM_ANALOG_TV_INFO *) (mode_info->atom_context->bios + data_offset); switch (tv_info->ucTV_BootUpDefaultStandard) { case ATOM_TV_NTSC: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Default TV standard: NTSC\n"); break; case ATOM_TV_NTSCJ: tv_std = TV_STD_NTSC_J; DRM_DEBUG_KMS("Default TV standard: NTSC-J\n"); break; case ATOM_TV_PAL: tv_std = TV_STD_PAL; DRM_DEBUG_KMS("Default TV standard: PAL\n"); break; case ATOM_TV_PALM: tv_std = TV_STD_PAL_M; DRM_DEBUG_KMS("Default TV standard: PAL-M\n"); break; case ATOM_TV_PALN: tv_std = TV_STD_PAL_N; DRM_DEBUG_KMS("Default TV standard: PAL-N\n"); break; case ATOM_TV_PALCN: tv_std = TV_STD_PAL_CN; DRM_DEBUG_KMS("Default TV standard: PAL-CN\n"); break; case ATOM_TV_PAL60: tv_std = TV_STD_PAL_60; DRM_DEBUG_KMS("Default TV standard: PAL-60\n"); break; case ATOM_TV_SECAM: tv_std = TV_STD_SECAM; DRM_DEBUG_KMS("Default TV standard: SECAM\n"); break; default: tv_std = TV_STD_NTSC; DRM_DEBUG_KMS("Unknown TV standard; defaulting to NTSC\n"); break; } } return tv_std; } struct radeon_encoder_tv_dac * radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder) { struct drm_device *dev = encoder->base.dev; struct radeon_device *rdev = dev->dev_private; struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, CompassionateData); uint16_t data_offset; struct _COMPASSIONATE_DATA *dac_info; uint8_t frev, crev; uint8_t bg, dac; struct radeon_encoder_tv_dac *tv_dac = NULL; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { dac_info = (struct _COMPASSIONATE_DATA *) (mode_info->atom_context->bios + data_offset); tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL); if (!tv_dac) return NULL; bg = dac_info->ucDAC2_CRT2_BG_Adjustment; dac = dac_info->ucDAC2_CRT2_DAC_Adjustment; tv_dac->ps2_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_PAL_BG_Adjustment; dac = dac_info->ucDAC2_PAL_DAC_Adjustment; tv_dac->pal_tvdac_adj = (bg << 16) | (dac << 20); bg = dac_info->ucDAC2_NTSC_BG_Adjustment; dac = dac_info->ucDAC2_NTSC_DAC_Adjustment; tv_dac->ntsc_tvdac_adj = (bg << 16) | (dac << 20); tv_dac->tv_std = radeon_atombios_get_tv_info(rdev); } return tv_dac; } static const char *thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "asc7xxx", }; static const char *pp_lib_thermal_controller_names[] = { "NONE", "lm63", "adm1032", "adm1030", "max6649", "lm64", "f75375", "RV6xx", "RV770", "adt7473", "NONE", "External GPIO", "Evergreen", "emc2103", "Sumo", "Northern Islands", }; union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; struct _ATOM_POWERPLAY_INFO_V3 info_3; struct _ATOM_PPLIB_POWERPLAYTABLE pplib; struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; }; union pplib_clock_info { struct _ATOM_PPLIB_R600_CLOCK_INFO r600; struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; }; union pplib_power_state { struct _ATOM_PPLIB_STATE v1; struct _ATOM_PPLIB_STATE_V2 v2; }; static void radeon_atombios_parse_misc_flags_1_3(struct radeon_device *rdev, int state_index, u32 misc, u32 misc2) { rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; /* order matters! */ if (misc & ATOM_PM_MISCINFO_POWER_SAVING_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_POWERSAVE; if (misc & ATOM_PM_MISCINFO_DEFAULT_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_DEFAULT_LOW_DC_STATE_ENTRY_TRUE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; if (misc & ATOM_PM_MISCINFO_LOAD_BALANCE_EN) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_3D_ACCELERATION_EN) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; } if (misc2 & ATOM_PM_MISCINFO2_SYSTEM_AC_LITE_MODE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; if (misc & ATOM_PM_MISCINFO_DRIVER_DEFAULT_MODE) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; } else if (state_index == 0) { rdev->pm.power_state[state_index].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } } static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; u32 misc, misc2 = 0; int num_modes = 0, i; int state_index = 0; struct radeon_i2c_bus_rec i2c_bus; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); /* add the i2c bus for thermal/fan chip */ if (power_info->info.ucOverdriveThermalController > 0) { DRM_INFO("Possible %s thermal controller at 0x%02x\n", thermal_controller_names[power_info->info.ucOverdriveThermalController], power_info->info.ucOverdriveControllerAddress >> 1); i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = thermal_controller_names[power_info->info. ucOverdriveThermalController]; info.addr = power_info->info.ucOverdriveControllerAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* last mode is usually default, array is low to high */ for (i = 0; i < num_modes; i++) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; switch (frev) { case 1: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le16_to_cpu(power_info->info.asPowerPlayInfo[i].usEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info.asPowerPlayInfo[i].ulMiscInfo); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, 0); state_index++; break; case 2: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_2.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_2.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_2.asPowerPlayInfo[i].ucVoltageDropIndex; } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; case 3: rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMemoryClock); rdev->pm.power_state[state_index].clock_info[0].sclk = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulEngineClock); /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[0].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[0].sclk == 0)) continue; rdev->pm.power_state[state_index].pcie_lanes = power_info->info_3.asPowerPlayInfo[i].ucNumPciELanes; misc = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo); misc2 = le32_to_cpu(power_info->info_3.asPowerPlayInfo[i].ulMiscInfo2); if ((misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_SUPPORT) || (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH)) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_GPIO; rdev->pm.power_state[state_index].clock_info[0].voltage.gpio = radeon_lookup_gpio(rdev, power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex); if (misc & ATOM_PM_MISCINFO_VOLTAGE_DROP_ACTIVE_HIGH) rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = true; else rdev->pm.power_state[state_index].clock_info[0].voltage.active_high = false; } else if (misc & ATOM_PM_MISCINFO_PROGRAM_VOLTAGE) { rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_VDDC; rdev->pm.power_state[state_index].clock_info[0].voltage.vddc_id = power_info->info_3.asPowerPlayInfo[i].ucVoltageDropIndex; if (misc2 & ATOM_PM_MISCINFO2_VDDCI_DYNAMIC_VOLTAGE_EN) { rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_enabled = true; rdev->pm.power_state[state_index].clock_info[0].voltage.vddci_id = power_info->info_3.asPowerPlayInfo[i].ucVDDCI_VoltageDropIndex; } } rdev->pm.power_state[state_index].flags = RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; radeon_atombios_parse_misc_flags_1_3(rdev, state_index, misc, misc2); state_index++; break; } } /* last mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[state_index - 1].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index - 1; rdev->pm.power_state[state_index - 1].default_clock_mode = &rdev->pm.power_state[state_index - 1].clock_info[0]; rdev->pm.power_state[state_index].flags &= ~RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; rdev->pm.power_state[state_index].misc = 0; rdev->pm.power_state[state_index].misc2 = 0; } return state_index; } static void radeon_atombios_add_pplib_thermal_controller(struct radeon_device *rdev, ATOM_PPLIB_THERMALCONTROLLER *controller) { struct radeon_i2c_bus_rec i2c_bus; /* add the i2c bus for thermal/fan chip */ if (controller->ucType > 0) { if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_RV770; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_SUMO; } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { DRM_INFO("Internal thermal controller %s fan control\n", (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); rdev->pm.int_thermal_type = THERMAL_TYPE_NI; } else if ((controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) || (controller->ucType == ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL)) { DRM_INFO("Special thermal controller config\n"); } else { DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", pp_lib_thermal_controller_names[controller->ucType], controller->ucI2cAddress >> 1, (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); i2c_bus = radeon_lookup_i2c_gpio(rdev, controller->ucI2cLine); rdev->pm.i2c_bus = radeon_i2c_lookup(rdev, &i2c_bus); if (rdev->pm.i2c_bus) { struct i2c_board_info info = { }; const char *name = pp_lib_thermal_controller_names[controller->ucType]; info.addr = controller->ucI2cAddress >> 1; strlcpy(info.type, name, sizeof(info.type)); i2c_new_device(&rdev->pm.i2c_bus->adapter, &info); } } } } static void radeon_atombios_get_default_voltages(struct radeon_device *rdev, u16 *vddc, u16 *vddci) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, FirmwareInfo); u8 frev, crev; u16 data_offset; union firmware_info *firmware_info; *vddc = 0; *vddci = 0; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { firmware_info = (union firmware_info *)(mode_info->atom_context->bios + data_offset); *vddc = le16_to_cpu(firmware_info->info_14.usBootUpVDDCVoltage); if ((frev == 2) && (crev >= 2)) *vddci = le16_to_cpu(firmware_info->info_22.usBootUpVDDCIVoltage); } } static void radeon_atombios_parse_pplib_non_clock_info(struct radeon_device *rdev, int state_index, int mode_index, struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info) { int j; u32 misc = le32_to_cpu(non_clock_info->ulCapsAndSettings); u32 misc2 = le16_to_cpu(non_clock_info->usClassification); u16 vddc, vddci; radeon_atombios_get_default_voltages(rdev, &vddc, &vddci); rdev->pm.power_state[state_index].misc = misc; rdev->pm.power_state[state_index].misc2 = misc2; rdev->pm.power_state[state_index].pcie_lanes = ((misc & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; switch (misc2 & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BATTERY; break; case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_BALANCED; break; case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; case ATOM_PPLIB_CLASSIFICATION_UI_NONE: if (misc2 & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_PERFORMANCE; break; } rdev->pm.power_state[state_index].flags = 0; if (misc & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) rdev->pm.power_state[state_index].flags |= RADEON_PM_STATE_SINGLE_DISPLAY_ONLY; if (misc2 & ATOM_PPLIB_CLASSIFICATION_BOOT) { rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[mode_index - 1]; if (ASIC_IS_DCE5(rdev)) { /* NI chips post without MC ucode, so default clocks are strobe mode only */ rdev->pm.default_sclk = rdev->pm.power_state[state_index].clock_info[0].sclk; rdev->pm.default_mclk = rdev->pm.power_state[state_index].clock_info[0].mclk; rdev->pm.default_vddc = rdev->pm.power_state[state_index].clock_info[0].voltage.voltage; rdev->pm.default_vddci = rdev->pm.power_state[state_index].clock_info[0].voltage.vddci; } else { /* patch the table values with the default slck/mclk from firmware info */ for (j = 0; j < mode_index; j++) { rdev->pm.power_state[state_index].clock_info[j].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[j].sclk = rdev->clock.default_sclk; if (vddc) rdev->pm.power_state[state_index].clock_info[j].voltage.voltage = vddc; } } } } static bool radeon_atombios_parse_pplib_clock_info(struct radeon_device *rdev, int state_index, int mode_index, union pplib_clock_info *clock_info) { u32 sclk, mclk; if (rdev->flags & RADEON_IS_IGP) { if (rdev->family >= CHIP_PALM) { sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); sclk |= clock_info->sumo.ucEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } else { sclk = le16_to_cpu(clock_info->rs780.usLowEngineClockLow); sclk |= clock_info->rs780.ucLowEngineClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; } } else if (ASIC_IS_DCE4(rdev)) { sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow); sclk |= clock_info->evergreen.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow); mclk |= clock_info->evergreen.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->evergreen.usVDDC); rdev->pm.power_state[state_index].clock_info[mode_index].voltage.vddci = le16_to_cpu(clock_info->evergreen.usVDDCI); } else { sclk = le16_to_cpu(clock_info->r600.usEngineClockLow); sclk |= clock_info->r600.ucEngineClockHigh << 16; mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow); mclk |= clock_info->r600.ucMemoryClockHigh << 16; rdev->pm.power_state[state_index].clock_info[mode_index].mclk = mclk; rdev->pm.power_state[state_index].clock_info[mode_index].sclk = sclk; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.type = VOLTAGE_SW; rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = le16_to_cpu(clock_info->r600.usVDDC); } /* patch up vddc if necessary */ if (rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage == 0xff01) { u16 vddc; if (radeon_atom_get_max_vddc(rdev, &vddc) == 0) rdev->pm.power_state[state_index].clock_info[mode_index].voltage.voltage = vddc; } if (rdev->flags & RADEON_IS_IGP) { /* skip invalid modes */ if (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0) return false; } else { /* skip invalid modes */ if ((rdev->pm.power_state[state_index].clock_info[mode_index].mclk == 0) || (rdev->pm.power_state[state_index].clock_info[mode_index].sclk == 0)) return false; } return true; } static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; /* first mode is usually default, followed by low to high */ for (i = 0; i < power_info->pplib.ucNumStates; i++) { mode_index = 0; power_state = (union pplib_power_state *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset) + i * power_info->pplib.ucStateEntrySize); non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) + (power_state->v1.ucNonClockStateIndex * power_info->pplib.ucNonClockSize)); for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) { clock_info = (union pplib_clock_info *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) + (power_state->v1.ucClockStateIndices[j] * power_info->pplib.ucClockInfoSize)); valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; union pplib_power_state *power_state; int i, j, non_clock_array_index, clock_array_index; int state_index = 0, mode_index = 0; union pplib_clock_info *clock_info; struct StateArray *state_array; struct ClockInfoArray *clock_info_array; struct NonClockInfoArray *non_clock_info_array; bool valid; union power_info *power_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; if (!atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) return state_index; power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); state_array = (struct StateArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usStateArrayOffset)); clock_info_array = (struct ClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; for (i = 0; i < state_array->ucNumEntries; i++) { mode_index = 0; power_state = (union pplib_power_state *)&state_array->states[i]; /* XXX this might be an inagua bug... */ non_clock_array_index = i; /* power_state->v2.nonClockInfoIndex */ non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) &non_clock_info_array->nonClockInfo[non_clock_array_index]; for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { clock_array_index = power_state->v2.clockInfoIndex[j]; /* XXX this might be an inagua bug... */ if (clock_array_index >= clock_info_array->ucNumEntries) continue; clock_info = (union pplib_clock_info *) &clock_info_array->clockInfo[clock_array_index]; valid = radeon_atombios_parse_pplib_clock_info(rdev, state_index, mode_index, clock_info); if (valid) mode_index++; } rdev->pm.power_state[state_index].num_clock_modes = mode_index; if (mode_index) { radeon_atombios_parse_pplib_non_clock_info(rdev, state_index, mode_index, non_clock_info); state_index++; } } /* if multiple clock modes, mark the lowest as no display */ for (i = 0; i < state_index; i++) { if (rdev->pm.power_state[i].num_clock_modes > 1) rdev->pm.power_state[i].clock_info[0].flags |= RADEON_PM_MODE_NO_DISPLAY; } /* first mode is usually default */ if (rdev->pm.default_power_state_index == -1) { rdev->pm.power_state[0].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.default_power_state_index = 0; rdev->pm.power_state[0].default_clock_mode = &rdev->pm.power_state[0].clock_info[0]; } return state_index; } void radeon_atombios_get_power_modes(struct radeon_device *rdev) { struct radeon_mode_info *mode_info = &rdev->mode_info; int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); u16 data_offset; u8 frev, crev; int state_index = 0; rdev->pm.default_power_state_index = -1; if (atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset)) { switch (frev) { case 1: case 2: case 3: state_index = radeon_atombios_parse_power_table_1_3(rdev); break; case 4: case 5: state_index = radeon_atombios_parse_power_table_4_5(rdev); break; case 6: state_index = radeon_atombios_parse_power_table_6(rdev); break; default: break; } } else { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { /* add the default mode */ rdev->pm.power_state[state_index].type = POWER_STATE_TYPE_DEFAULT; rdev->pm.power_state[state_index].num_clock_modes = 1; rdev->pm.power_state[state_index].clock_info[0].mclk = rdev->clock.default_mclk; rdev->pm.power_state[state_index].clock_info[0].sclk = rdev->clock.default_sclk; rdev->pm.power_state[state_index].default_clock_mode = &rdev->pm.power_state[state_index].clock_info[0]; rdev->pm.power_state[state_index].clock_info[0].voltage.type = VOLTAGE_NONE; rdev->pm.power_state[state_index].pcie_lanes = 16; rdev->pm.default_power_state_index = state_index; rdev->pm.power_state[state_index].flags = 0; state_index++; } } rdev->pm.num_power_states = state_index; rdev->pm.current_power_state_index = rdev->pm.default_power_state_index; rdev->pm.current_clock_mode_index = 0; if (rdev->pm.default_power_state_index >= 0) rdev->pm.current_vddc = rdev->pm.power_state[rdev->pm.default_power_state_index].clock_info[0].voltage.voltage; else rdev->pm.current_vddc = 0; } void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable) { DYNAMIC_CLOCK_GATING_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, DynamicClockGating); args.ucEnable = enable; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } uint32_t radeon_atom_get_engine_clock(struct radeon_device *rdev) { GET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetEngineClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnEngineClock); } uint32_t radeon_atom_get_memory_clock(struct radeon_device *rdev) { GET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, GetMemoryClock); atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); return le32_to_cpu(args.ulReturnMemoryClock); } void radeon_atom_set_engine_clock(struct radeon_device *rdev, uint32_t eng_clock) { SET_ENGINE_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetEngineClock); args.ulTargetEngineClock = cpu_to_le32(eng_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } void radeon_atom_set_memory_clock(struct radeon_device *rdev, uint32_t mem_clock) { SET_MEMORY_CLOCK_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, SetMemoryClock); if (rdev->flags & RADEON_IS_IGP) return; args.ulTargetMemoryClock = cpu_to_le32(mem_clock); /* 10 khz */ atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } union set_voltage { struct _SET_VOLTAGE_PS_ALLOCATION alloc; struct _SET_VOLTAGE_PARAMETERS v1; struct _SET_VOLTAGE_PARAMETERS_V2 v2; }; void radeon_atom_set_voltage(struct radeon_device *rdev, u16 voltage_level, u8 voltage_type) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); u8 frev, crev, volt_index = voltage_level; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return; /* 0xff01 is a flag rather then an actual voltage */ if (voltage_level == 0xff01) return; switch (crev) { case 1: args.v1.ucVoltageType = voltage_type; args.v1.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_ALL_SOURCE; args.v1.ucVoltageIndex = volt_index; break; case 2: args.v2.ucVoltageType = voltage_type; args.v2.ucVoltageMode = SET_ASIC_VOLTAGE_MODE_SET_VOLTAGE; args.v2.usVoltageLevel = cpu_to_le16(voltage_level); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return; } atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); } int radeon_atom_get_max_vddc(struct radeon_device *rdev, u16 *voltage) { union set_voltage args; int index = GetIndexIntoMasterTable(COMMAND, SetVoltage); u8 frev, crev; if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev)) return -EINVAL; switch (crev) { case 1: return -EINVAL; case 2: args.v2.ucVoltageType = SET_VOLTAGE_GET_MAX_VOLTAGE; args.v2.ucVoltageMode = 0; args.v2.usVoltageLevel = 0; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); *voltage = le16_to_cpu(args.v2.usVoltageLevel); break; default: DRM_ERROR("Unknown table version %d, %d\n", frev, crev); return -EINVAL; } return 0; } void radeon_atom_initialize_bios_scratch_regs(struct drm_device *dev) { struct radeon_device *rdev = dev->dev_private; uint32_t bios_2_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } /* let the bios control the backlight */ bios_2_scratch &= ~ATOM_S2_VRI_BRIGHT_ENABLE; /* tell the bios not to handle mode switching */ bios_6_scratch |= ATOM_S6_ACC_BLOCK_DISPLAY_SWITCH; if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_save_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) rdev->bios_scratch[i] = RREG32(scratch_reg + (i * 4)); } void radeon_restore_bios_scratch_regs(struct radeon_device *rdev) { uint32_t scratch_reg; int i; if (rdev->family >= CHIP_R600) scratch_reg = R600_BIOS_0_SCRATCH; else scratch_reg = RADEON_BIOS_0_SCRATCH; for (i = 0; i < RADEON_BIOS_NUM_SCRATCH; i++) WREG32(scratch_reg + (i * 4), rdev->bios_scratch[i]); } void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; uint32_t bios_6_scratch; if (rdev->family >= CHIP_R600) bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); else bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); if (lock) { bios_6_scratch |= ATOM_S6_CRITICAL_STATE; bios_6_scratch &= ~ATOM_S6_ACC_MODE; } else { bios_6_scratch &= ~ATOM_S6_CRITICAL_STATE; bios_6_scratch |= ATOM_S6_ACC_MODE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); else WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } /* at some point we may want to break this out into individual functions */ void radeon_atombios_connected_scratch_regs(struct drm_connector *connector, struct drm_encoder *encoder, bool connected) { struct drm_device *dev = connector->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_connector *radeon_connector = to_radeon_connector(connector); struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_0_scratch, bios_3_scratch, bios_6_scratch; if (rdev->family >= CHIP_R600) { bios_0_scratch = RREG32(R600_BIOS_0_SCRATCH); bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); bios_6_scratch = RREG32(R600_BIOS_6_SCRATCH); } else { bios_0_scratch = RREG32(RADEON_BIOS_0_SCRATCH); bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); bios_6_scratch = RREG32(RADEON_BIOS_6_SCRATCH); } if ((radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_TV1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("TV1 connected\n"); bios_3_scratch |= ATOM_S3_TV1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_TV1; } else { DRM_DEBUG_KMS("TV1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_TV1_MASK; bios_3_scratch &= ~ATOM_S3_TV1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_TV1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CV_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CV connected\n"); bios_3_scratch |= ATOM_S3_CV_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CV; } else { DRM_DEBUG_KMS("CV disconnected\n"); bios_0_scratch &= ~ATOM_S0_CV_MASK; bios_3_scratch &= ~ATOM_S3_CV_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CV; } } if ((radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_LCD1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("LCD1 connected\n"); bios_0_scratch |= ATOM_S0_LCD1; bios_3_scratch |= ATOM_S3_LCD1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_LCD1; } else { DRM_DEBUG_KMS("LCD1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_LCD1; bios_3_scratch &= ~ATOM_S3_LCD1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_LCD1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT1 connected\n"); bios_0_scratch |= ATOM_S0_CRT1_COLOR; bios_3_scratch |= ATOM_S3_CRT1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT1; } else { DRM_DEBUG_KMS("CRT1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT1_MASK; bios_3_scratch &= ~ATOM_S3_CRT1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT1; } } if ((radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_CRT2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("CRT2 connected\n"); bios_0_scratch |= ATOM_S0_CRT2_COLOR; bios_3_scratch |= ATOM_S3_CRT2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_CRT2; } else { DRM_DEBUG_KMS("CRT2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_CRT2_MASK; bios_3_scratch &= ~ATOM_S3_CRT2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_CRT2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP1_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP1 connected\n"); bios_0_scratch |= ATOM_S0_DFP1; bios_3_scratch |= ATOM_S3_DFP1_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP1; } else { DRM_DEBUG_KMS("DFP1 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP1; bios_3_scratch &= ~ATOM_S3_DFP1_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP1; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP2_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP2 connected\n"); bios_0_scratch |= ATOM_S0_DFP2; bios_3_scratch |= ATOM_S3_DFP2_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP2; } else { DRM_DEBUG_KMS("DFP2 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP2; bios_3_scratch &= ~ATOM_S3_DFP2_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP2; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP3_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP3 connected\n"); bios_0_scratch |= ATOM_S0_DFP3; bios_3_scratch |= ATOM_S3_DFP3_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP3; } else { DRM_DEBUG_KMS("DFP3 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP3; bios_3_scratch &= ~ATOM_S3_DFP3_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP3; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP4_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP4 connected\n"); bios_0_scratch |= ATOM_S0_DFP4; bios_3_scratch |= ATOM_S3_DFP4_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP4; } else { DRM_DEBUG_KMS("DFP4 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP4; bios_3_scratch &= ~ATOM_S3_DFP4_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP4; } } if ((radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) && (radeon_connector->devices & ATOM_DEVICE_DFP5_SUPPORT)) { if (connected) { DRM_DEBUG_KMS("DFP5 connected\n"); bios_0_scratch |= ATOM_S0_DFP5; bios_3_scratch |= ATOM_S3_DFP5_ACTIVE; bios_6_scratch |= ATOM_S6_ACC_REQ_DFP5; } else { DRM_DEBUG_KMS("DFP5 disconnected\n"); bios_0_scratch &= ~ATOM_S0_DFP5; bios_3_scratch &= ~ATOM_S3_DFP5_ACTIVE; bios_6_scratch &= ~ATOM_S6_ACC_REQ_DFP5; } } if (rdev->family >= CHIP_R600) { WREG32(R600_BIOS_0_SCRATCH, bios_0_scratch); WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); WREG32(R600_BIOS_6_SCRATCH, bios_6_scratch); } else { WREG32(RADEON_BIOS_0_SCRATCH, bios_0_scratch); WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); WREG32(RADEON_BIOS_6_SCRATCH, bios_6_scratch); } } void radeon_atombios_encoder_crtc_scratch_regs(struct drm_encoder *encoder, int crtc) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_3_scratch; if (rdev->family >= CHIP_R600) bios_3_scratch = RREG32(R600_BIOS_3_SCRATCH); else bios_3_scratch = RREG32(RADEON_BIOS_3_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_TV1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 18); } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CV_CRTC_ACTIVE; bios_3_scratch |= (crtc << 24); } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 16); } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_CRT2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 20); } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_LCD1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 17); } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP1_CRTC_ACTIVE; bios_3_scratch |= (crtc << 19); } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP2_CRTC_ACTIVE; bios_3_scratch |= (crtc << 23); } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { bios_3_scratch &= ~ATOM_S3_DFP3_CRTC_ACTIVE; bios_3_scratch |= (crtc << 25); } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_3_SCRATCH, bios_3_scratch); else WREG32(RADEON_BIOS_3_SCRATCH, bios_3_scratch); } void radeon_atombios_encoder_dpms_scratch_regs(struct drm_encoder *encoder, bool on) { struct drm_device *dev = encoder->dev; struct radeon_device *rdev = dev->dev_private; struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); uint32_t bios_2_scratch; if (rdev->family >= CHIP_R600) bios_2_scratch = RREG32(R600_BIOS_2_SCRATCH); else bios_2_scratch = RREG32(RADEON_BIOS_2_SCRATCH); if (radeon_encoder->devices & ATOM_DEVICE_TV1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_TV1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_TV1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CV_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CV_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CV_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_CRT2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_CRT2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_CRT2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_LCD1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_LCD1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_LCD1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP1_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP1_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP1_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP2_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP2_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP3_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP3_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP3_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP4_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP4_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP4_DPMS_STATE; } if (radeon_encoder->devices & ATOM_DEVICE_DFP5_SUPPORT) { if (on) bios_2_scratch &= ~ATOM_S2_DFP5_DPMS_STATE; else bios_2_scratch |= ATOM_S2_DFP5_DPMS_STATE; } if (rdev->family >= CHIP_R600) WREG32(R600_BIOS_2_SCRATCH, bios_2_scratch); else WREG32(RADEON_BIOS_2_SCRATCH, bios_2_scratch); }
gpl-2.0
XiphosSystemsCorp/busybox
libbb/bb_strtonum.c
271
4075
/* vi: set sw=4 ts=4: */ /* * Utility routines. * * Copyright (C) 1999-2004 by Erik Andersen <andersen@codepoet.org> * * Licensed under GPLv2 or later, see file LICENSE in this source tree. */ #include "libbb.h" /* On exit: errno = 0 only if there was non-empty, '\0' terminated value * errno = EINVAL if value was not '\0' terminated, but otherwise ok * Return value is still valid, caller should just check whether end[0] * is a valid terminating char for particular case. OTOH, if caller * requires '\0' terminated input, [s]he can just check errno == 0. * errno = ERANGE if value had alphanumeric terminating char ("1234abcg"). * errno = ERANGE if value is out of range, missing, etc. * errno = ERANGE if value had minus sign for strtouXX (even "-0" is not ok ) * return value is all-ones in this case. * * Test code: * char *endptr; * const char *minus = "-"; * errno = 0; * bb_strtoi(minus, &endptr, 0); // must set ERANGE * printf("minus:%p endptr:%p errno:%d EINVAL:%d\n", minus, endptr, errno, EINVAL); * errno = 0; * bb_strtoi("-0-", &endptr, 0); // must set EINVAL and point to second '-' * printf("endptr[0]:%c errno:%d EINVAL:%d\n", endptr[0], errno, EINVAL); */ static unsigned long long ret_ERANGE(void) { errno = ERANGE; /* this ain't as small as it looks (on glibc) */ return ULLONG_MAX; } static unsigned long long handle_errors(unsigned long long v, char **endp) { char next_ch = **endp; /* errno is already set to ERANGE by strtoXXX if value overflowed */ if (next_ch) { /* "1234abcg" or out-of-range? */ if (isalnum(next_ch) || errno) return ret_ERANGE(); /* good number, just suspicious terminator */ errno = EINVAL; } return v; } unsigned long long FAST_FUNC bb_strtoull(const char *arg, char **endp, int base) { unsigned long long v; char *endptr; if (!endp) endp = &endptr; *endp = (char*) arg; /* strtoul(" -4200000000") returns 94967296, errno 0 (!) */ /* I don't think that this is right. Preventing this... */ if (!isalnum(arg[0])) return ret_ERANGE(); /* not 100% correct for lib func, but convenient for the caller */ errno = 0; v = strtoull(arg, endp, base); return handle_errors(v, endp); } long long FAST_FUNC bb_strtoll(const char *arg, char **endp, int base) { unsigned long long v; char *endptr; char first; if (!endp) endp = &endptr; *endp = (char*) arg; /* Check for the weird "feature": * a "-" string is apparently a valid "number" for strto[u]l[l]! * It returns zero and errno is 0! :( */ first = (arg[0] != '-' ? arg[0] : arg[1]); if (!isalnum(first)) return ret_ERANGE(); errno = 0; v = strtoll(arg, endp, base); return handle_errors(v, endp); } #if ULONG_MAX != ULLONG_MAX unsigned long FAST_FUNC bb_strtoul(const char *arg, char **endp, int base) { unsigned long v; char *endptr; if (!endp) endp = &endptr; *endp = (char*) arg; if (!isalnum(arg[0])) return ret_ERANGE(); errno = 0; v = strtoul(arg, endp, base); return handle_errors(v, endp); } long FAST_FUNC bb_strtol(const char *arg, char **endp, int base) { long v; char *endptr; char first; if (!endp) endp = &endptr; *endp = (char*) arg; first = (arg[0] != '-' ? arg[0] : arg[1]); if (!isalnum(first)) return ret_ERANGE(); errno = 0; v = strtol(arg, endp, base); return handle_errors(v, endp); } #endif #if UINT_MAX != ULONG_MAX unsigned FAST_FUNC bb_strtou(const char *arg, char **endp, int base) { unsigned long v; char *endptr; if (!endp) endp = &endptr; *endp = (char*) arg; if (!isalnum(arg[0])) return ret_ERANGE(); errno = 0; v = strtoul(arg, endp, base); if (v > UINT_MAX) return ret_ERANGE(); return handle_errors(v, endp); } int FAST_FUNC bb_strtoi(const char *arg, char **endp, int base) { long v; char *endptr; char first; if (!endp) endp = &endptr; *endp = (char*) arg; first = (arg[0] != '-' ? arg[0] : arg[1]); if (!isalnum(first)) return ret_ERANGE(); errno = 0; v = strtol(arg, endp, base); if (v > INT_MAX) return ret_ERANGE(); if (v < INT_MIN) return ret_ERANGE(); return handle_errors(v, endp); } #endif
gpl-2.0
blueboy/portalR2
dep/ACE_wrappers/ace/Proactor.cpp
271
29849
// $Id: Proactor.cpp 91368 2010-08-16 13:03:34Z mhengstmengel $ #include "ace/config-lite.h" #include "ace/Proactor.h" #if defined (ACE_HAS_WIN32_OVERLAPPED_IO) || defined (ACE_HAS_AIO_CALLS) // This only works on Win32 platforms and on Unix platforms with aio // calls. #include "ace/Auto_Ptr.h" #include "ace/Proactor_Impl.h" #include "ace/Object_Manager.h" #include "ace/Task_T.h" #if !defined (ACE_HAS_WINCE) && !defined (ACE_LACKS_ACE_SVCCONF) # include "ace/Service_Config.h" #endif /* !ACE_HAS_WINCE && !ACE_LACKS_ACE_SVCCONF */ #include "ace/Task_T.h" #include "ace/Log_Msg.h" #include "ace/Framework_Component.h" #if defined (ACE_HAS_AIO_CALLS) # include "ace/POSIX_Proactor.h" # include "ace/POSIX_CB_Proactor.h" #else /* !ACE_HAS_AIO_CALLS */ # include "ace/WIN32_Proactor.h" #endif /* ACE_HAS_AIO_CALLS */ #if !defined (__ACE_INLINE__) #include "ace/Proactor.inl" #endif /* __ACE_INLINE__ */ #include "ace/Auto_Event.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL /// Process-wide ACE_Proactor. ACE_Proactor *ACE_Proactor::proactor_ = 0; /// Controls whether the Proactor is deleted when we shut down (we can /// only delete it safely if we created it!) bool ACE_Proactor::delete_proactor_ = false; /** * @class ACE_Proactor_Timer_Handler * * @brief A Handler for timer. It helps in the management of timers * registered with the Proactor. * * This object has a thread that will wait on the earliest time * in a list of timers and an event. When a timer expires, the * thread will post a completion event on the port and go back * to waiting on the timer queue and event. If the event is * signaled, the thread will refresh the time it is currently * waiting on (in case the earliest time has changed). */ class ACE_Proactor_Timer_Handler : public ACE_Task<ACE_NULL_SYNCH> { /// Proactor has special privileges /// Access needed to: timer_event_ friend class ACE_Proactor; public: /// Constructor. ACE_Proactor_Timer_Handler (ACE_Proactor &proactor); /// Destructor. virtual ~ACE_Proactor_Timer_Handler (void); /// Proactor calls this to shut down the timer handler /// gracefully. Just calling the destructor alone doesnt do what /// <destroy> does. <destroy> make sure the thread exits properly. int destroy (void); protected: /// Run by a daemon thread to handle deferred processing. In other /// words, this method will do the waiting on the earliest timer and /// event. virtual int svc (void); /// Event to wait on. ACE_Auto_Event timer_event_; /// Proactor. ACE_Proactor &proactor_; /// Flag used to indicate when we are shutting down. int shutting_down_; }; ACE_Proactor_Timer_Handler::ACE_Proactor_Timer_Handler (ACE_Proactor &proactor) : ACE_Task <ACE_NULL_SYNCH> (&proactor.thr_mgr_), proactor_ (proactor), shutting_down_ (0) { } ACE_Proactor_Timer_Handler::~ACE_Proactor_Timer_Handler (void) { // Mark for closing down. this->shutting_down_ = 1; // Signal timer event. this->timer_event_.signal (); // Wait for the Timer Handler thread to exit. this->wait (); } int ACE_Proactor_Timer_Handler::svc (void) { ACE_Time_Value absolute_time; ACE_Time_Value relative_time; int result = 0; while (this->shutting_down_ == 0) { // Check whether the timer queue has any items in it. if (this->proactor_.timer_queue ()->is_empty () == 0) { // Get the earliest absolute time. absolute_time = this->proactor_.timer_queue ()->earliest_time (); // Get current time from timer queue since we don't know // which <gettimeofday> was used. ACE_Time_Value cur_time = this->proactor_.timer_queue ()->gettimeofday (); // Compare absolute time with curent time received from the // timer queue. if (absolute_time > cur_time) relative_time = absolute_time - cur_time; else relative_time = ACE_Time_Value::zero; // Block for relative time. result = this->timer_event_.wait (&relative_time, 0); } else // The timer queue has no entries, so wait indefinitely. result = this->timer_event_.wait (); // Check for timer expiries. if (result == -1) { switch (errno) { case ETIME: // timeout: expire timers this->proactor_.timer_queue ()->expire (); break; default: // Error. ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t):%p\n"), ACE_TEXT ("ACE_Proactor_Timer_Handler::svc:wait failed")), -1); } } } return 0; } // ********************************************************************* ACE_Proactor_Handle_Timeout_Upcall::ACE_Proactor_Handle_Timeout_Upcall (void) : proactor_ (0) { } int ACE_Proactor_Handle_Timeout_Upcall::registration (TIMER_QUEUE &, ACE_Handler *, const void *) { return 0; } int ACE_Proactor_Handle_Timeout_Upcall::preinvoke (TIMER_QUEUE &, ACE_Handler *, const void *, int, const ACE_Time_Value &, const void *&) { return 0; } int ACE_Proactor_Handle_Timeout_Upcall::postinvoke (TIMER_QUEUE &, ACE_Handler *, const void *, int, const ACE_Time_Value &, const void *) { return 0; } int ACE_Proactor_Handle_Timeout_Upcall::timeout (TIMER_QUEUE &, ACE_Handler *handler, const void *act, int, const ACE_Time_Value &time) { if (this->proactor_ == 0) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("(%t) No Proactor set in ACE_Proactor_Handle_Timeout_Upcall,") ACE_TEXT (" no completion port to post timeout to?!@\n")), -1); // Create the Asynch_Timer. ACE_Asynch_Result_Impl *asynch_timer = this->proactor_->create_asynch_timer (handler->proxy (), act, time, ACE_INVALID_HANDLE, 0, -1); if (asynch_timer == 0) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t):%p\n"), ACE_TEXT ("ACE_Proactor_Handle_Timeout_Upcall::timeout:") ACE_TEXT ("create_asynch_timer failed")), -1); auto_ptr<ACE_Asynch_Result_Impl> safe_asynch_timer (asynch_timer); // Post a completion. if (-1 == safe_asynch_timer->post_completion (this->proactor_->implementation ())) ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("Failure in dealing with timers: ") ACE_TEXT ("PostQueuedCompletionStatus failed\n")), -1); // The completion has been posted. The proactor is now responsible // for managing the asynch_timer memory. (void) safe_asynch_timer.release (); return 0; } int ACE_Proactor_Handle_Timeout_Upcall::cancel_type (TIMER_QUEUE &, ACE_Handler *, int, int &) { // Do nothing return 0; } int ACE_Proactor_Handle_Timeout_Upcall::cancel_timer (TIMER_QUEUE &, ACE_Handler *, int, int) { // Do nothing return 0; } int ACE_Proactor_Handle_Timeout_Upcall::deletion (TIMER_QUEUE &, ACE_Handler *, const void *) { // Do nothing return 0; } int ACE_Proactor_Handle_Timeout_Upcall::proactor (ACE_Proactor &proactor) { if (this->proactor_ == 0) { this->proactor_ = &proactor; return 0; } else ACE_ERROR_RETURN ((LM_ERROR, ACE_TEXT ("ACE_Proactor_Handle_Timeout_Upcall is only suppose") ACE_TEXT (" to be used with ONE (and only one) Proactor\n")), -1); } // ********************************************************************* ACE_Proactor::ACE_Proactor (ACE_Proactor_Impl *implementation, bool delete_implementation, TIMER_QUEUE *tq) : implementation_ (0), delete_implementation_ (delete_implementation), timer_handler_ (0), timer_queue_ (0), delete_timer_queue_ (0), end_event_loop_ (0), event_loop_thread_count_ (0) { this->implementation (implementation); if (this->implementation () == 0) { #if defined (ACE_HAS_AIO_CALLS) // POSIX Proactor. # if defined (ACE_POSIX_AIOCB_PROACTOR) ACE_NEW (implementation, ACE_POSIX_AIOCB_Proactor); # elif defined (ACE_POSIX_SIG_PROACTOR) ACE_NEW (implementation, ACE_POSIX_SIG_Proactor); # else /* Default order: CB, SIG, AIOCB */ # if !defined(ACE_HAS_BROKEN_SIGEVENT_STRUCT) ACE_NEW (implementation, ACE_POSIX_CB_Proactor); # else # if defined(ACE_HAS_POSIX_REALTIME_SIGNALS) ACE_NEW (implementation, ACE_POSIX_SIG_Proactor); # else ACE_NEW (implementation, ACE_POSIX_AIOCB_Proactor); # endif /* ACE_HAS_POSIX_REALTIME_SIGNALS */ # endif /* !ACE_HAS_BROKEN_SIGEVENT_STRUCT */ # endif /* ACE_POSIX_AIOCB_PROACTOR */ #elif (defined (ACE_WIN32) && !defined (ACE_HAS_WINCE)) // WIN_Proactor. ACE_NEW (implementation, ACE_WIN32_Proactor); #endif /* ACE_HAS_AIO_CALLS */ this->implementation (implementation); this->delete_implementation_ = true; } // Set the timer queue. this->timer_queue (tq); // Create the timer handler ACE_NEW (this->timer_handler_, ACE_Proactor_Timer_Handler (*this)); // Activate <timer_handler>. if (this->timer_handler_->activate () == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t):%p\n"), ACE_TEXT ("Task::activate:could not create thread\n"))); } ACE_Proactor::~ACE_Proactor (void) { this->close (); } ACE_Proactor * ACE_Proactor::instance (size_t /* threads */) { ACE_TRACE ("ACE_Proactor::instance"); if (ACE_Proactor::proactor_ == 0) { // Perform Double-Checked Locking Optimization. ACE_MT (ACE_GUARD_RETURN (ACE_Recursive_Thread_Mutex, ace_mon, *ACE_Static_Object_Lock::instance (), 0)); if (ACE_Proactor::proactor_ == 0) { ACE_NEW_RETURN (ACE_Proactor::proactor_, ACE_Proactor, 0); ACE_Proactor::delete_proactor_ = true; ACE_REGISTER_FRAMEWORK_COMPONENT(ACE_Proactor, ACE_Proactor::proactor_); } } return ACE_Proactor::proactor_; } ACE_Proactor * ACE_Proactor::instance (ACE_Proactor * r, bool delete_proactor) { ACE_TRACE ("ACE_Proactor::instance"); ACE_MT (ACE_GUARD_RETURN (ACE_Recursive_Thread_Mutex, ace_mon, *ACE_Static_Object_Lock::instance (), 0)); ACE_Proactor *t = ACE_Proactor::proactor_; ACE_Proactor::delete_proactor_ = delete_proactor; ACE_Proactor::proactor_ = r; ACE_REGISTER_FRAMEWORK_COMPONENT(ACE_Proactor, ACE_Proactor::proactor_); return t; } void ACE_Proactor::close_singleton (void) { ACE_TRACE ("ACE_Proactor::close_singleton"); ACE_MT (ACE_GUARD (ACE_Recursive_Thread_Mutex, ace_mon, *ACE_Static_Object_Lock::instance ())); if (ACE_Proactor::delete_proactor_) { delete ACE_Proactor::proactor_; ACE_Proactor::proactor_ = 0; ACE_Proactor::delete_proactor_ = false; } } const ACE_TCHAR * ACE_Proactor::dll_name (void) { return ACE_TEXT ("ACE"); } const ACE_TCHAR * ACE_Proactor::name (void) { return ACE_TEXT ("ACE_Proactor"); } int ACE_Proactor::check_reconfiguration (ACE_Proactor *) { #if !defined (ACE_HAS_WINCE) && !defined (ACE_LACKS_ACE_SVCCONF) if (ACE_Service_Config::reconfig_occurred ()) { ACE_Service_Config::reconfigure (); return 1; } #endif /* ! ACE_HAS_WINCE || ! ACE_LACKS_ACE_SVCCONF */ return 0; } int ACE_Proactor::proactor_run_event_loop (PROACTOR_EVENT_HOOK eh) { ACE_TRACE ("ACE_Proactor::proactor_run_event_loop"); int result = 0; { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); // Early check. It is ok to do this without lock, since we care just // whether it is zero or non-zero. if (this->end_event_loop_ != 0) return 0; // First time you are in. Increment the thread count. this->event_loop_thread_count_ ++; } // Run the event loop. for (;;) { // Check the end loop flag. It is ok to do this without lock, // since we care just whether it is zero or non-zero. if (this->end_event_loop_ != 0) break; // <end_event_loop> is not set. Ready to do <handle_events>. result = this->handle_events (); if (eh != 0 && (*eh) (this)) continue; if (result == -1) break; } // Leaving the event loop. Decrement the thread count. { // Obtain the lock in the MT environments. ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); // Decrement the thread count. this->event_loop_thread_count_ --; if (this->event_loop_thread_count_ > 0 && this->end_event_loop_ != 0) this->proactor_post_wakeup_completions (1); } return result; } // Handle events for -tv- time. handle_events updates -tv- to reflect // time elapsed, so do not return until -tv- == 0, or an error occurs. int ACE_Proactor::proactor_run_event_loop (ACE_Time_Value &tv, PROACTOR_EVENT_HOOK eh) { ACE_TRACE ("ACE_Proactor::proactor_run_event_loop"); int result = 0; { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); // Early check. It is ok to do this without lock, since we care just // whether it is zero or non-zero. if (this->end_event_loop_ != 0 || tv == ACE_Time_Value::zero) return 0; // First time you are in. Increment the thread count. this->event_loop_thread_count_ ++; } // Run the event loop. for (;;) { // Check the end loop flag. It is ok to do this without lock, // since we care just whether it is zero or non-zero. if (this->end_event_loop_ != 0) break; // <end_event_loop> is not set. Ready to do <handle_events>. result = this->handle_events (tv); if (eh != 0 && (*eh) (this)) continue; if (result == -1 || result == 0) break; } // Leaving the event loop. Decrement the thread count. { ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); // Decrement the thread count. this->event_loop_thread_count_ --; if (this->event_loop_thread_count_ > 0 && this->end_event_loop_ != 0) this->proactor_post_wakeup_completions (1); } return result; } int ACE_Proactor::proactor_reset_event_loop(void) { ACE_TRACE ("ACE_Proactor::proactor_reset_event_loop"); // Obtain the lock in the MT environments. ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); this->end_event_loop_ = 0; return 0; } int ACE_Proactor::proactor_end_event_loop (void) { ACE_TRACE ("ACE_Proactor::proactor_end_event_loop"); int how_many = 0; { // Obtain the lock, set the end flag and post the wakeup // completions. ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); // Set the end flag. this->end_event_loop_ = 1; // Number of completions to post. how_many = this->event_loop_thread_count_; if (how_many == 0) return 0; } // Post completions to all the threads so that they will all wake // up. return this->proactor_post_wakeup_completions (how_many); } int ACE_Proactor::proactor_event_loop_done (void) { ACE_TRACE ("ACE_Proactor::proactor_event_loop_done"); ACE_MT (ACE_GUARD_RETURN (ACE_Thread_Mutex, ace_mon, mutex_, -1)); return this->end_event_loop_ != 0 ? 1 : 0 ; } int ACE_Proactor::close (void) { // Close the implementation. if (this->implementation ()->close () == -1) ACE_ERROR ((LM_ERROR, ACE_TEXT ("%N:%l:(%P | %t):%p\n"), ACE_TEXT ("ACE_Proactor::close: implementation close"))); // Delete the implementation. if (this->delete_implementation_) { delete this->implementation (); this->implementation_ = 0; } // Delete the timer handler. if (this->timer_handler_) { delete this->timer_handler_; this->timer_handler_ = 0; } // Delete the timer queue. if (this->delete_timer_queue_) { delete this->timer_queue_; this->timer_queue_ = 0; this->delete_timer_queue_ = 0; } return 0; } int ACE_Proactor::register_handle (ACE_HANDLE handle, const void *completion_key) { return this->implementation ()->register_handle (handle, completion_key); } long ACE_Proactor::schedule_timer (ACE_Handler &handler, const void *act, const ACE_Time_Value &time) { return this->schedule_timer (handler, act, time, ACE_Time_Value::zero); } long ACE_Proactor::schedule_repeating_timer (ACE_Handler &handler, const void *act, const ACE_Time_Value &interval) { return this->schedule_timer (handler, act, interval, interval); } long ACE_Proactor::schedule_timer (ACE_Handler &handler, const void *act, const ACE_Time_Value &time, const ACE_Time_Value &interval) { // absolute time. ACE_Time_Value absolute_time = this->timer_queue_->gettimeofday () + time; // Only one guy goes in here at a time ACE_MT (ACE_GUARD_RETURN (ACE_SYNCH_RECURSIVE_MUTEX, ace_mon, this->timer_queue_->mutex (), -1)); // Remember the old proactor. ACE_Proactor *old_proactor = handler.proactor (); // Assign *this* Proactor to the handler. handler.proactor (this); // Schedule the timer long result = this->timer_queue_->schedule (&handler, act, absolute_time, interval); if (result != -1) { // no failures: check to see if we are the earliest time if (this->timer_queue_->earliest_time () == absolute_time) // wake up the timer thread if (this->timer_handler_->timer_event_.signal () == -1) { // Cancel timer this->timer_queue_->cancel (result); result = -1; } } if (result == -1) { // Reset the old proactor in case of failures. handler.proactor (old_proactor); } return result; } int ACE_Proactor::cancel_timer (long timer_id, const void **arg, int dont_call_handle_close) { // No need to singal timer event here. Even if the cancel timer was // the earliest, we will have an extra wakeup. return this->timer_queue_->cancel (timer_id, arg, dont_call_handle_close); } int ACE_Proactor::cancel_timer (ACE_Handler &handler, int dont_call_handle_close) { // No need to signal timer event here. Even if the cancel timer was // the earliest, we will have an extra wakeup. return this->timer_queue_->cancel (&handler, dont_call_handle_close); } int ACE_Proactor::handle_events (ACE_Time_Value &wait_time) { return implementation ()->handle_events (wait_time); } int ACE_Proactor::handle_events (void) { return this->implementation ()->handle_events (); } int ACE_Proactor::wake_up_dispatch_threads (void) { return 0; } int ACE_Proactor::close_dispatch_threads (int) { return 0; } size_t ACE_Proactor::number_of_threads (void) const { return this->implementation ()->number_of_threads (); } void ACE_Proactor::number_of_threads (size_t threads) { this->implementation ()->number_of_threads (threads); } ACE_Proactor::TIMER_QUEUE * ACE_Proactor::timer_queue (void) const { return this->timer_queue_; } void ACE_Proactor::timer_queue (TIMER_QUEUE *tq) { // Cleanup old timer queue. if (this->delete_timer_queue_) { delete this->timer_queue_; this->delete_timer_queue_ = 0; } // New timer queue. if (tq == 0) { ACE_NEW (this->timer_queue_, TIMER_HEAP); this->delete_timer_queue_ = 1; } else { this->timer_queue_ = tq; this->delete_timer_queue_ = 0; } // Set the proactor in the timer queue's functor this->timer_queue_->upcall_functor ().proactor (*this); } ACE_HANDLE ACE_Proactor::get_handle (void) const { return this->implementation ()->get_handle (); } ACE_Proactor_Impl * ACE_Proactor::implementation (void) const { return this->implementation_; } ACE_Asynch_Read_Stream_Impl * ACE_Proactor::create_asynch_read_stream (void) { return this->implementation ()->create_asynch_read_stream (); } ACE_Asynch_Write_Stream_Impl * ACE_Proactor::create_asynch_write_stream (void) { return this->implementation ()->create_asynch_write_stream (); } ACE_Asynch_Read_Dgram_Impl * ACE_Proactor::create_asynch_read_dgram (void) { return this->implementation ()->create_asynch_read_dgram (); } ACE_Asynch_Write_Dgram_Impl * ACE_Proactor::create_asynch_write_dgram (void) { return this->implementation ()->create_asynch_write_dgram (); } ACE_Asynch_Read_File_Impl * ACE_Proactor::create_asynch_read_file (void) { return this->implementation ()->create_asynch_read_file (); } ACE_Asynch_Write_File_Impl * ACE_Proactor::create_asynch_write_file (void) { return this->implementation ()->create_asynch_write_file (); } ACE_Asynch_Accept_Impl * ACE_Proactor::create_asynch_accept (void) { return this->implementation ()->create_asynch_accept (); } ACE_Asynch_Connect_Impl * ACE_Proactor::create_asynch_connect (void) { return this->implementation ()->create_asynch_connect (); } ACE_Asynch_Transmit_File_Impl * ACE_Proactor::create_asynch_transmit_file (void) { return this->implementation ()->create_asynch_transmit_file (); } ACE_Asynch_Read_Stream_Result_Impl * ACE_Proactor::create_asynch_read_stream_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, u_long bytes_to_read, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_read_stream_result (handler_proxy, handle, message_block, bytes_to_read, act, event, priority, signal_number); } ACE_Asynch_Write_Stream_Result_Impl * ACE_Proactor::create_asynch_write_stream_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, u_long bytes_to_write, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_write_stream_result (handler_proxy, handle, message_block, bytes_to_write, act, event, priority, signal_number); } ACE_Asynch_Read_File_Result_Impl * ACE_Proactor::create_asynch_read_file_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, u_long bytes_to_read, const void* act, u_long offset, u_long offset_high, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_read_file_result (handler_proxy, handle, message_block, bytes_to_read, act, offset, offset_high, event, priority, signal_number); } ACE_Asynch_Write_File_Result_Impl * ACE_Proactor::create_asynch_write_file_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block &message_block, u_long bytes_to_write, const void* act, u_long offset, u_long offset_high, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_write_file_result (handler_proxy, handle, message_block, bytes_to_write, act, offset, offset_high, event, priority, signal_number); } ACE_Asynch_Read_Dgram_Result_Impl * ACE_Proactor::create_asynch_read_dgram_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block *message_block, size_t bytes_to_read, int flags, int protocol_family, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation()->create_asynch_read_dgram_result (handler_proxy, handle, message_block, bytes_to_read, flags, protocol_family, act, event, priority, signal_number); } ACE_Asynch_Write_Dgram_Result_Impl * ACE_Proactor::create_asynch_write_dgram_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE handle, ACE_Message_Block *message_block, size_t bytes_to_write, int flags, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation()->create_asynch_write_dgram_result (handler_proxy, handle, message_block, bytes_to_write, flags, act, event, priority, signal_number); } ACE_Asynch_Accept_Result_Impl * ACE_Proactor::create_asynch_accept_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE listen_handle, ACE_HANDLE accept_handle, ACE_Message_Block &message_block, u_long bytes_to_read, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_accept_result (handler_proxy, listen_handle, accept_handle, message_block, bytes_to_read, act, event, priority, signal_number); } ACE_Asynch_Connect_Result_Impl * ACE_Proactor::create_asynch_connect_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE connect_handle, const void* act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_connect_result (handler_proxy, connect_handle, act, event, priority, signal_number); } ACE_Asynch_Transmit_File_Result_Impl * ACE_Proactor::create_asynch_transmit_file_result (ACE_Handler::Proxy_Ptr &handler_proxy, ACE_HANDLE socket, ACE_HANDLE file, ACE_Asynch_Transmit_File::Header_And_Trailer *header_and_trailer, u_long bytes_to_write, u_long offset, u_long offset_high, u_long bytes_per_send, u_long flags, const void *act, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_transmit_file_result (handler_proxy, socket, file, header_and_trailer, bytes_to_write, offset, offset_high, bytes_per_send, flags, act, event, priority, signal_number); } ACE_Asynch_Result_Impl * ACE_Proactor::create_asynch_timer (ACE_Handler::Proxy_Ptr &handler_proxy, const void *act, const ACE_Time_Value &tv, ACE_HANDLE event, int priority, int signal_number) { return this->implementation ()->create_asynch_timer (handler_proxy, act, tv, event, priority, signal_number); } int ACE_Proactor::proactor_post_wakeup_completions (int how_many) { return this->implementation ()->post_wakeup_completions (how_many); } void ACE_Proactor::implementation (ACE_Proactor_Impl *implementation) { this->implementation_ = implementation; } ACE_END_VERSIONED_NAMESPACE_DECL #else /* !ACE_WIN32 || !ACE_HAS_AIO_CALLS */ ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_Proactor * ACE_Proactor::instance (size_t /* threads */) { return 0; } ACE_Proactor * ACE_Proactor::instance (ACE_Proactor *) { return 0; } void ACE_Proactor::close_singleton (void) { } int ACE_Proactor::run_event_loop (void) { // not implemented return -1; } int ACE_Proactor::run_event_loop (ACE_Time_Value &) { // not implemented return -1; } int ACE_Proactor::end_event_loop (void) { // not implemented return -1; } sig_atomic_t ACE_Proactor::event_loop_done (void) { return sig_atomic_t (1); } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_HAS_WIN32_OVERLAPPED_IO || ACE_HAS_AIO_CALLS */
gpl-2.0
mericon/Xp_Kernel_LGH850
virt/drivers/mtd/maps/bfin-async-flash.c
527
5083
/* * drivers/mtd/maps/bfin-async-flash.c * * Handle the case where flash memory and ethernet mac/phy are * mapped onto the same async bank. The BF533-STAMP does this * for example. All board-specific configuration goes in your * board resources file. * * Copyright 2000 Nicolas Pitre <nico@fluxnic.net> * Copyright 2005-2008 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/blackfin.h> #include <linux/gpio.h> #include <linux/io.h> #include <asm/unaligned.h> #define pr_devinit(fmt, args...) \ ({ static const char __fmt[] = fmt; printk(__fmt, ## args); }) #define DRIVER_NAME "bfin-async-flash" struct async_state { struct mtd_info *mtd; struct map_info map; int enet_flash_pin; uint32_t flash_ambctl0, flash_ambctl1; uint32_t save_ambctl0, save_ambctl1; unsigned long irq_flags; }; static void switch_to_flash(struct async_state *state) { local_irq_save(state->irq_flags); gpio_set_value(state->enet_flash_pin, 0); state->save_ambctl0 = bfin_read_EBIU_AMBCTL0(); state->save_ambctl1 = bfin_read_EBIU_AMBCTL1(); bfin_write_EBIU_AMBCTL0(state->flash_ambctl0); bfin_write_EBIU_AMBCTL1(state->flash_ambctl1); SSYNC(); } static void switch_back(struct async_state *state) { bfin_write_EBIU_AMBCTL0(state->save_ambctl0); bfin_write_EBIU_AMBCTL1(state->save_ambctl1); SSYNC(); gpio_set_value(state->enet_flash_pin, 1); local_irq_restore(state->irq_flags); } static map_word bfin_flash_read(struct map_info *map, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t word; map_word test; switch_to_flash(state); word = readw(map->virt + ofs); switch_back(state); test.x[0] = word; return test; } static void bfin_flash_copy_from(struct map_info *map, void *to, unsigned long from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(to, map->virt + from, len); switch_back(state); } static void bfin_flash_write(struct map_info *map, map_word d1, unsigned long ofs) { struct async_state *state = (struct async_state *)map->map_priv_1; uint16_t d; d = d1.x[0]; switch_to_flash(state); writew(d, map->virt + ofs); SSYNC(); switch_back(state); } static void bfin_flash_copy_to(struct map_info *map, unsigned long to, const void *from, ssize_t len) { struct async_state *state = (struct async_state *)map->map_priv_1; switch_to_flash(state); memcpy(map->virt + to, from, len); SSYNC(); switch_back(state); } static const char * const part_probe_types[] = { "cmdlinepart", "RedBoot", NULL }; static int bfin_flash_probe(struct platform_device *pdev) { int ret; struct physmap_flash_data *pdata = dev_get_platdata(&pdev->dev); struct resource *memory = platform_get_resource(pdev, IORESOURCE_MEM, 0); struct resource *flash_ambctl = platform_get_resource(pdev, IORESOURCE_MEM, 1); struct async_state *state; state = kzalloc(sizeof(*state), GFP_KERNEL); if (!state) return -ENOMEM; state->map.name = DRIVER_NAME; state->map.read = bfin_flash_read; state->map.copy_from = bfin_flash_copy_from; state->map.write = bfin_flash_write; state->map.copy_to = bfin_flash_copy_to; state->map.bankwidth = pdata->width; state->map.size = resource_size(memory); state->map.virt = (void __iomem *)memory->start; state->map.phys = memory->start; state->map.map_priv_1 = (unsigned long)state; state->enet_flash_pin = platform_get_irq(pdev, 0); state->flash_ambctl0 = flash_ambctl->start; state->flash_ambctl1 = flash_ambctl->end; if (gpio_request(state->enet_flash_pin, DRIVER_NAME)) { pr_devinit(KERN_ERR DRIVER_NAME ": Failed to request gpio %d\n", state->enet_flash_pin); kfree(state); return -EBUSY; } gpio_direction_output(state->enet_flash_pin, 1); pr_devinit(KERN_NOTICE DRIVER_NAME ": probing %d-bit flash bus\n", state->map.bankwidth * 8); state->mtd = do_map_probe(memory->name, &state->map); if (!state->mtd) { gpio_free(state->enet_flash_pin); kfree(state); return -ENXIO; } mtd_device_parse_register(state->mtd, part_probe_types, NULL, pdata->parts, pdata->nr_parts); platform_set_drvdata(pdev, state); return 0; } static int bfin_flash_remove(struct platform_device *pdev) { struct async_state *state = platform_get_drvdata(pdev); gpio_free(state->enet_flash_pin); mtd_device_unregister(state->mtd); map_destroy(state->mtd); kfree(state); return 0; } static struct platform_driver bfin_flash_driver = { .probe = bfin_flash_probe, .remove = bfin_flash_remove, .driver = { .name = DRIVER_NAME, }, }; module_platform_driver(bfin_flash_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MTD map driver for Blackfins with flash/ethernet on same async bank");
gpl-2.0
Hive-Resurrection/kernel_htc_flounder
net/ipv6/ip6_input.c
1295
8683
/* * IPv6 input * Linux INET6 implementation * * Authors: * Pedro Roque <roque@di.fc.ul.pt> * Ian P. Morris <I.P.Morris@soton.ac.uk> * * Based in linux/net/ipv4/ip_input.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ /* Changes * * Mitsuru KANDA @USAGI and * YOSHIFUJI Hideaki @USAGI: Remove ipv6_parse_exthdrs(). */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/icmpv6.h> #include <linux/mroute6.h> #include <linux/slab.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <net/sock.h> #include <net/snmp.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/ip6_route.h> #include <net/addrconf.h> #include <net/xfrm.h> int ip6_rcv_finish(struct sk_buff *skb) { if (sysctl_ip_early_demux && !skb_dst(skb) && skb->sk == NULL) { const struct inet6_protocol *ipprot; ipprot = rcu_dereference(inet6_protos[ipv6_hdr(skb)->nexthdr]); if (ipprot && ipprot->early_demux) ipprot->early_demux(skb); } if (!skb_dst(skb)) ip6_route_input(skb); return dst_input(skb); } int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev) { const struct ipv6hdr *hdr; u32 pkt_len; struct inet6_dev *idev; struct net *net = dev_net(skb->dev); if (skb->pkt_type == PACKET_OTHERHOST) { kfree_skb(skb); return NET_RX_DROP; } rcu_read_lock(); idev = __in6_dev_get(skb->dev); IP6_UPD_PO_STATS_BH(net, idev, IPSTATS_MIB_IN, skb->len); if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL || !idev || unlikely(idev->cnf.disable_ipv6)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); goto drop; } memset(IP6CB(skb), 0, sizeof(struct inet6_skb_parm)); /* * Store incoming device index. When the packet will * be queued, we cannot refer to skb->dev anymore. * * BTW, when we send a packet for our own local address on a * non-loopback interface (e.g. ethX), it is being delivered * via the loopback interface (lo) here; skb->dev = loopback_dev. * It, however, should be considered as if it is being * arrived via the sending interface (ethX), because of the * nature of scoping architecture. --yoshfuji */ IP6CB(skb)->iif = skb_dst(skb) ? ip6_dst_idev(skb_dst(skb))->dev->ifindex : dev->ifindex; if (unlikely(!pskb_may_pull(skb, sizeof(*hdr)))) goto err; hdr = ipv6_hdr(skb); if (hdr->version != 6) goto err; /* * RFC4291 2.5.3 * A packet received on an interface with a destination address * of loopback must be dropped. */ if (!(dev->flags & IFF_LOOPBACK) && ipv6_addr_loopback(&hdr->daddr)) goto err; /* RFC4291 Errata ID: 3480 * Interface-Local scope spans only a single interface on a * node and is useful only for loopback transmission of * multicast. Packets with interface-local scope received * from another node must be discarded. */ if (!(skb->pkt_type == PACKET_LOOPBACK || dev->flags & IFF_LOOPBACK) && ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) goto err; /* RFC4291 2.7 * Nodes must not originate a packet to a multicast address whose scope * field contains the reserved value 0; if such a packet is received, it * must be silently dropped. */ if (ipv6_addr_is_multicast(&hdr->daddr) && IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 0) goto err; /* * RFC4291 2.7 * Multicast addresses must not be used as source addresses in IPv6 * packets or appear in any Routing header. */ if (ipv6_addr_is_multicast(&hdr->saddr)) goto err; skb->transport_header = skb->network_header + sizeof(*hdr); IP6CB(skb)->nhoff = offsetof(struct ipv6hdr, nexthdr); pkt_len = ntohs(hdr->payload_len); /* pkt_len may be zero if Jumbo payload option is present */ if (pkt_len || hdr->nexthdr != NEXTHDR_HOP) { if (pkt_len + sizeof(struct ipv6hdr) > skb->len) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INTRUNCATEDPKTS); goto drop; } if (pskb_trim_rcsum(skb, pkt_len + sizeof(struct ipv6hdr))) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); goto drop; } hdr = ipv6_hdr(skb); } if (hdr->nexthdr == NEXTHDR_HOP) { if (ipv6_parse_hopopts(skb) < 0) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); rcu_read_unlock(); return NET_RX_DROP; } } rcu_read_unlock(); /* Must drop socket now because of tproxy. */ skb_orphan(skb); return NF_HOOK(NFPROTO_IPV6, NF_INET_PRE_ROUTING, skb, dev, NULL, ip6_rcv_finish); err: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INHDRERRORS); drop: rcu_read_unlock(); kfree_skb(skb); return NET_RX_DROP; } /* * Deliver the packet to the host */ static int ip6_input_finish(struct sk_buff *skb) { struct net *net = dev_net(skb_dst(skb)->dev); const struct inet6_protocol *ipprot; struct inet6_dev *idev; unsigned int nhoff; int nexthdr; bool raw; /* * Parse extension headers */ rcu_read_lock(); resubmit: idev = ip6_dst_idev(skb_dst(skb)); if (!pskb_pull(skb, skb_transport_offset(skb))) goto discard; nhoff = IP6CB(skb)->nhoff; nexthdr = skb_network_header(skb)[nhoff]; raw = raw6_local_deliver(skb, nexthdr); if ((ipprot = rcu_dereference(inet6_protos[nexthdr])) != NULL) { int ret; if (ipprot->flags & INET6_PROTO_FINAL) { const struct ipv6hdr *hdr; /* Free reference early: we don't need it any more, and it may hold ip_conntrack module loaded indefinitely. */ nf_reset(skb); skb_postpull_rcsum(skb, skb_network_header(skb), skb_network_header_len(skb)); hdr = ipv6_hdr(skb); if (ipv6_addr_is_multicast(&hdr->daddr) && !ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, &hdr->saddr) && !ipv6_is_mld(skb, nexthdr, skb_network_header_len(skb))) goto discard; } if (!(ipprot->flags & INET6_PROTO_NOPOLICY) && !xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) goto discard; ret = ipprot->handler(skb); if (ret > 0) goto resubmit; else if (ret == 0) IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); } else { if (!raw) { if (xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INUNKNOWNPROTOS); icmpv6_send(skb, ICMPV6_PARAMPROB, ICMPV6_UNK_NEXTHDR, nhoff); } kfree_skb(skb); } else { IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDELIVERS); consume_skb(skb); } } rcu_read_unlock(); return 0; discard: IP6_INC_STATS_BH(net, idev, IPSTATS_MIB_INDISCARDS); rcu_read_unlock(); kfree_skb(skb); return 0; } int ip6_input(struct sk_buff *skb) { return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_IN, skb, skb->dev, NULL, ip6_input_finish); } int ip6_mc_input(struct sk_buff *skb) { const struct ipv6hdr *hdr; bool deliver; IP6_UPD_PO_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INMCAST, skb->len); hdr = ipv6_hdr(skb); deliver = ipv6_chk_mcast_addr(skb->dev, &hdr->daddr, NULL); #ifdef CONFIG_IPV6_MROUTE /* * IPv6 multicast router mode is now supported ;) */ if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && !(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) && likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) { /* * Okay, we try to forward - split and duplicate * packets. */ struct sk_buff *skb2; struct inet6_skb_parm *opt = IP6CB(skb); /* Check for MLD */ if (unlikely(opt->flags & IP6SKB_ROUTERALERT)) { /* Check if this is a mld message */ u8 nexthdr = hdr->nexthdr; __be16 frag_off; int offset; /* Check if the value of Router Alert * is for MLD (0x0000). */ if (opt->ra == htons(IPV6_OPT_ROUTERALERT_MLD)) { deliver = false; if (!ipv6_ext_hdr(nexthdr)) { /* BUG */ goto out; } offset = ipv6_skip_exthdr(skb, sizeof(*hdr), &nexthdr, &frag_off); if (offset < 0) goto out; if (!ipv6_is_mld(skb, nexthdr, offset)) goto out; deliver = true; } /* unknown RA - process it normally */ } if (deliver) skb2 = skb_clone(skb, GFP_ATOMIC); else { skb2 = skb; skb = NULL; } if (skb2) { ip6_mr_input(skb2); } } out: #endif if (likely(deliver)) ip6_input(skb); else { /* discard */ kfree_skb(skb); } return 0; }
gpl-2.0
iamroot12C/linux
drivers/media/usb/as102/as102_fw.c
1295
5497
/* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/ctype.h> #include <linux/delay.h> #include <linux/firmware.h> #include "as102_drv.h" #include "as102_fw.h" static const char as102_st_fw1[] = "as102_data1_st.hex"; static const char as102_st_fw2[] = "as102_data2_st.hex"; static const char as102_dt_fw1[] = "as102_data1_dt.hex"; static const char as102_dt_fw2[] = "as102_data2_dt.hex"; static unsigned char atohx(unsigned char *dst, char *src) { unsigned char value = 0; char msb = tolower(*src) - '0'; char lsb = tolower(*(src + 1)) - '0'; if (msb > 9) msb -= 7; if (lsb > 9) lsb -= 7; *dst = value = ((msb & 0xF) << 4) | (lsb & 0xF); return value; } /* * Parse INTEL HEX firmware file to extract address and data. */ static int parse_hex_line(unsigned char *fw_data, unsigned char *addr, unsigned char *data, int *dataLength, unsigned char *addr_has_changed) { int count = 0; unsigned char *src, dst; if (*fw_data++ != ':') { pr_err("invalid firmware file\n"); return -EFAULT; } /* locate end of line */ for (src = fw_data; *src != '\n'; src += 2) { atohx(&dst, src); /* parse line to split addr / data */ switch (count) { case 0: *dataLength = dst; break; case 1: addr[2] = dst; break; case 2: addr[3] = dst; break; case 3: /* check if data is an address */ if (dst == 0x04) *addr_has_changed = 1; else *addr_has_changed = 0; break; case 4: case 5: if (*addr_has_changed) addr[(count - 4)] = dst; else data[(count - 4)] = dst; break; default: data[(count - 4)] = dst; break; } count++; } /* return read value + ':' + '\n' */ return (count * 2) + 2; } static int as102_firmware_upload(struct as10x_bus_adapter_t *bus_adap, unsigned char *cmd, const struct firmware *firmware) { struct as10x_fw_pkt_t fw_pkt; int total_read_bytes = 0, errno = 0; unsigned char addr_has_changed = 0; for (total_read_bytes = 0; total_read_bytes < firmware->size; ) { int read_bytes = 0, data_len = 0; /* parse intel hex line */ read_bytes = parse_hex_line( (u8 *) (firmware->data + total_read_bytes), fw_pkt.raw.address, fw_pkt.raw.data, &data_len, &addr_has_changed); if (read_bytes <= 0) goto error; /* detect the end of file */ total_read_bytes += read_bytes; if (total_read_bytes == firmware->size) { fw_pkt.u.request[0] = 0x00; fw_pkt.u.request[1] = 0x03; /* send EOF command */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) &fw_pkt, 2, 0); if (errno < 0) goto error; } else { if (!addr_has_changed) { /* prepare command to send */ fw_pkt.u.request[0] = 0x00; fw_pkt.u.request[1] = 0x01; data_len += sizeof(fw_pkt.u.request); data_len += sizeof(fw_pkt.raw.address); /* send cmd to device */ errno = bus_adap->ops->upload_fw_pkt(bus_adap, (uint8_t *) &fw_pkt, data_len, 0); if (errno < 0) goto error; } } } error: return (errno == 0) ? total_read_bytes : errno; } int as102_fw_upload(struct as10x_bus_adapter_t *bus_adap) { int errno = -EFAULT; const struct firmware *firmware = NULL; unsigned char *cmd_buf = NULL; const char *fw1, *fw2; struct usb_device *dev = bus_adap->usb_dev; /* select fw file to upload */ if (dual_tuner) { fw1 = as102_dt_fw1; fw2 = as102_dt_fw2; } else { fw1 = as102_st_fw1; fw2 = as102_st_fw2; } /* allocate buffer to store firmware upload command and data */ cmd_buf = kzalloc(MAX_FW_PKT_SIZE, GFP_KERNEL); if (cmd_buf == NULL) { errno = -ENOMEM; goto error; } /* request kernel to locate firmware file: part1 */ errno = request_firmware(&firmware, fw1, &dev->dev); if (errno < 0) { pr_err("%s: unable to locate firmware file: %s\n", DRIVER_NAME, fw1); goto error; } /* initiate firmware upload */ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware); if (errno < 0) { pr_err("%s: error during firmware upload part1\n", DRIVER_NAME); goto error; } pr_info("%s: firmware: %s loaded with success\n", DRIVER_NAME, fw1); release_firmware(firmware); /* wait for boot to complete */ mdelay(100); /* request kernel to locate firmware file: part2 */ errno = request_firmware(&firmware, fw2, &dev->dev); if (errno < 0) { pr_err("%s: unable to locate firmware file: %s\n", DRIVER_NAME, fw2); goto error; } /* initiate firmware upload */ errno = as102_firmware_upload(bus_adap, cmd_buf, firmware); if (errno < 0) { pr_err("%s: error during firmware upload part2\n", DRIVER_NAME); goto error; } pr_info("%s: firmware: %s loaded with success\n", DRIVER_NAME, fw2); error: kfree(cmd_buf); release_firmware(firmware); return errno; }
gpl-2.0
Tk-Glitch/Glitch_Flo_AOSP
net/mac80211/wpa.c
1551
17101
/* * Copyright 2002-2004, Instant802 Networks, Inc. * Copyright 2008, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/netdevice.h> #include <linux/types.h> #include <linux/skbuff.h> #include <linux/compiler.h> #include <linux/ieee80211.h> #include <linux/gfp.h> #include <asm/unaligned.h> #include <net/mac80211.h> #include <crypto/aes.h> #include "ieee80211_i.h" #include "michael.h" #include "tkip.h" #include "aes_ccm.h" #include "aes_cmac.h" #include "wpa.h" ieee80211_tx_result ieee80211_tx_h_michael_mic_add(struct ieee80211_tx_data *tx) { u8 *data, *key, *mic; size_t data_len; unsigned int hdrlen; struct ieee80211_hdr *hdr; struct sk_buff *skb = tx->skb; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int tail; hdr = (struct ieee80211_hdr *)skb->data; if (!tx->key || tx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || skb->len < 24 || !ieee80211_is_data_present(hdr->frame_control)) return TX_CONTINUE; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen) return TX_DROP; data = skb->data + hdrlen; data_len = skb->len - hdrlen; if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) { /* Need to use software crypto for the test */ info->control.hw_key = NULL; } if (info->control.hw_key && (info->flags & IEEE80211_TX_CTL_DONTFRAG || tx->local->ops->set_frag_threshold) && !(tx->key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { /* hwaccel - with no need for SW-generated MMIC */ return TX_CONTINUE; } tail = MICHAEL_MIC_LEN; if (!info->control.hw_key) tail += TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return TX_DROP; key = &tx->key->conf.key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]; mic = skb_put(skb, MICHAEL_MIC_LEN); michael_mic(key, hdr, data, data_len, mic); if (unlikely(info->flags & IEEE80211_TX_INTFL_TKIP_MIC_FAILURE)) mic[0]++; return TX_CONTINUE; } ieee80211_rx_result ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) { u8 *data, *key = NULL; size_t data_len; unsigned int hdrlen; u8 mic[MICHAEL_MIC_LEN]; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * it makes no sense to check for MIC errors on anything other * than data frames. */ if (!ieee80211_is_data_present(hdr->frame_control)) return RX_CONTINUE; /* * No way to verify the MIC if the hardware stripped it or * the IV with the key index. In this case we have solely rely * on the driver to set RX_FLAG_MMIC_ERROR in the event of a * MIC failure report. */ if (status->flag & (RX_FLAG_MMIC_STRIPPED | RX_FLAG_IV_STRIPPED)) { if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) goto update_iv; return RX_CONTINUE; } /* * Some hardware seems to generate Michael MIC failure reports; even * though, the frame was not encrypted with TKIP and therefore has no * MIC. Ignore the flag them to avoid triggering countermeasures. */ if (!rx->key || rx->key->conf.cipher != WLAN_CIPHER_SUITE_TKIP || !(status->flag & RX_FLAG_DECRYPTED)) return RX_CONTINUE; if (rx->sdata->vif.type == NL80211_IFTYPE_AP && rx->key->conf.keyidx) { /* * APs with pairwise keys should never receive Michael MIC * errors for non-zero keyidx because these are reserved for * group keys and only the AP is sending real multicast * frames in the BSS. ( */ return RX_DROP_UNUSABLE; } if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (skb->len < hdrlen + MICHAEL_MIC_LEN) return RX_DROP_UNUSABLE; if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; hdr = (void *)skb->data; data = skb->data + hdrlen; data_len = skb->len - hdrlen - MICHAEL_MIC_LEN; key = &rx->key->conf.key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]; michael_mic(key, hdr, data, data_len, mic); if (memcmp(mic, data + data_len, MICHAEL_MIC_LEN) != 0) goto mic_fail; /* remove Michael MIC from payload */ skb_trim(skb, skb->len - MICHAEL_MIC_LEN); update_iv: /* update IV in key information to be able to detect replays */ rx->key->u.tkip.rx[rx->security_idx].iv32 = rx->tkip_iv32; rx->key->u.tkip.rx[rx->security_idx].iv16 = rx->tkip_iv16; return RX_CONTINUE; mic_fail: /* * In some cases the key can be unset - e.g. a multicast packet, in * a driver that supports HW encryption. Send up the key idx only if * the key is set. */ mac80211_ev_michael_mic_failure(rx->sdata, rx->key ? rx->key->conf.keyidx : -1, (void *) skb->data, NULL, GFP_ATOMIC); return RX_DROP_UNUSABLE; } static int tkip_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); unsigned long flags; unsigned int hdrlen; int len, tail; u8 *pos; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) { /* hwaccel - with no need for software-generated IV */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = TKIP_ICV_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < TKIP_IV_LEN)) return -1; pos = skb_push(skb, TKIP_IV_LEN); memmove(pos, pos + TKIP_IV_LEN, hdrlen); pos += hdrlen; /* Increase IV for the frame */ spin_lock_irqsave(&key->u.tkip.txlock, flags); key->u.tkip.tx.iv16++; if (key->u.tkip.tx.iv16 == 0) key->u.tkip.tx.iv32++; pos = ieee80211_tkip_add_iv(pos, key); spin_unlock_irqrestore(&key->u.tkip.txlock, flags); /* hwaccel - with software IV */ if (info->control.hw_key) return 0; /* Add room for ICV */ skb_put(skb, TKIP_ICV_LEN); return ieee80211_tkip_encrypt_data(tx->local->wep_tx_tfm, key, skb, pos, len); } ieee80211_tx_result ieee80211_crypto_tkip_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (tkip_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_tkip_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) rx->skb->data; int hdrlen, res, hwaccel = 0; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control)) return RX_CONTINUE; if (!rx->sta || skb->len - hdrlen < 12) return RX_DROP_UNUSABLE; /* it may be possible to optimize this a bit more */ if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; hdr = (void *)skb->data; /* * Let TKIP code verify IV, but skip decryption. * In the case where hardware checks the IV as well, * we don't even get here, see ieee80211_rx_h_decrypt() */ if (status->flag & RX_FLAG_DECRYPTED) hwaccel = 1; res = ieee80211_tkip_decrypt_data(rx->local->wep_rx_tfm, key, skb->data + hdrlen, skb->len - hdrlen, rx->sta->sta.addr, hdr->addr1, hwaccel, rx->security_idx, &rx->tkip_iv32, &rx->tkip_iv16); if (res != TKIP_DECRYPT_OK) return RX_DROP_UNUSABLE; /* Trim ICV */ skb_trim(skb, skb->len - TKIP_ICV_LEN); /* Remove IV */ memmove(skb->data + TKIP_IV_LEN, skb->data, hdrlen); skb_pull(skb, TKIP_IV_LEN); return RX_CONTINUE; } static void ccmp_special_blocks(struct sk_buff *skb, u8 *pn, u8 *scratch, int encrypted) { __le16 mask_fc; int a4_included, mgmt; u8 qos_tid; u8 *b_0, *aad; u16 data_len, len_a; unsigned int hdrlen; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; memset(scratch, 0, 6 * AES_BLOCK_SIZE); b_0 = scratch + 3 * AES_BLOCK_SIZE; aad = scratch + 4 * AES_BLOCK_SIZE; /* * Mask FC: zero subtype b4 b5 b6 (if not mgmt) * Retry, PwrMgt, MoreData; set Protected */ mgmt = ieee80211_is_mgmt(hdr->frame_control); mask_fc = hdr->frame_control; mask_fc &= ~cpu_to_le16(IEEE80211_FCTL_RETRY | IEEE80211_FCTL_PM | IEEE80211_FCTL_MOREDATA); if (!mgmt) mask_fc &= ~cpu_to_le16(0x0070); mask_fc |= cpu_to_le16(IEEE80211_FCTL_PROTECTED); hdrlen = ieee80211_hdrlen(hdr->frame_control); len_a = hdrlen - 2; a4_included = ieee80211_has_a4(hdr->frame_control); if (ieee80211_is_data_qos(hdr->frame_control)) qos_tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; else qos_tid = 0; data_len = skb->len - hdrlen - CCMP_HDR_LEN; if (encrypted) data_len -= CCMP_MIC_LEN; /* First block, b_0 */ b_0[0] = 0x59; /* flags: Adata: 1, M: 011, L: 001 */ /* Nonce: Nonce Flags | A2 | PN * Nonce Flags: Priority (b0..b3) | Management (b4) | Reserved (b5..b7) */ b_0[1] = qos_tid | (mgmt << 4); memcpy(&b_0[2], hdr->addr2, ETH_ALEN); memcpy(&b_0[8], pn, CCMP_PN_LEN); /* l(m) */ put_unaligned_be16(data_len, &b_0[14]); /* AAD (extra authenticate-only data) / masked 802.11 header * FC | A1 | A2 | A3 | SC | [A4] | [QC] */ put_unaligned_be16(len_a, &aad[0]); put_unaligned(mask_fc, (__le16 *)&aad[2]); memcpy(&aad[4], &hdr->addr1, 3 * ETH_ALEN); /* Mask Seq#, leave Frag# */ aad[22] = *((u8 *) &hdr->seq_ctrl) & 0x0f; aad[23] = 0; if (a4_included) { memcpy(&aad[24], hdr->addr4, ETH_ALEN); aad[30] = qos_tid; aad[31] = 0; } else { memset(&aad[24], 0, ETH_ALEN + IEEE80211_QOS_CTL_LEN); aad[24] = qos_tid; } } static inline void ccmp_pn2hdr(u8 *hdr, u8 *pn, int key_id) { hdr[0] = pn[5]; hdr[1] = pn[4]; hdr[2] = 0; hdr[3] = 0x20 | (key_id << 6); hdr[4] = pn[3]; hdr[5] = pn[2]; hdr[6] = pn[1]; hdr[7] = pn[0]; } static inline void ccmp_hdr2pn(u8 *pn, u8 *hdr) { pn[0] = hdr[7]; pn[1] = hdr[6]; pn[2] = hdr[5]; pn[3] = hdr[4]; pn[4] = hdr[1]; pn[5] = hdr[0]; } static int ccmp_encrypt_skb(struct ieee80211_tx_data *tx, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_key *key = tx->key; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int hdrlen, len, tail; u8 *pos; u8 pn[6]; u64 pn64; u8 scratch[6 * AES_BLOCK_SIZE]; if (info->control.hw_key && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV) && !(info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) { /* * hwaccel has no need for preallocated room for CCMP * header or MIC fields */ return 0; } hdrlen = ieee80211_hdrlen(hdr->frame_control); len = skb->len - hdrlen; if (info->control.hw_key) tail = 0; else tail = CCMP_MIC_LEN; if (WARN_ON(skb_tailroom(skb) < tail || skb_headroom(skb) < CCMP_HDR_LEN)) return -1; pos = skb_push(skb, CCMP_HDR_LEN); memmove(pos, pos + CCMP_HDR_LEN, hdrlen); /* the HW only needs room for the IV, but not the actual IV */ if (info->control.hw_key && (info->control.hw_key->flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE)) return 0; hdr = (struct ieee80211_hdr *) pos; pos += hdrlen; pn64 = atomic64_inc_return(&key->u.ccmp.tx_pn); pn[5] = pn64; pn[4] = pn64 >> 8; pn[3] = pn64 >> 16; pn[2] = pn64 >> 24; pn[1] = pn64 >> 32; pn[0] = pn64 >> 40; ccmp_pn2hdr(pos, pn, key->conf.keyidx); /* hwaccel - with software CCMP header */ if (info->control.hw_key) return 0; pos += CCMP_HDR_LEN; ccmp_special_blocks(skb, pn, scratch, 0); ieee80211_aes_ccm_encrypt(key->u.ccmp.tfm, scratch, pos, len, pos, skb_put(skb, CCMP_MIC_LEN)); return 0; } ieee80211_tx_result ieee80211_crypto_ccmp_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; ieee80211_tx_set_protected(tx); skb_queue_walk(&tx->skbs, skb) { if (ccmp_encrypt_skb(tx, skb) < 0) return TX_DROP; } return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_ccmp_decrypt(struct ieee80211_rx_data *rx) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; int hdrlen; struct ieee80211_key *key = rx->key; struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); u8 pn[CCMP_PN_LEN]; int data_len; int queue; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (!ieee80211_is_data(hdr->frame_control) && !ieee80211_is_robust_mgmt_frame(hdr)) return RX_CONTINUE; data_len = skb->len - hdrlen - CCMP_HDR_LEN - CCMP_MIC_LEN; if (!rx->sta || data_len < 0) return RX_DROP_UNUSABLE; if (status->flag & RX_FLAG_DECRYPTED) { if (!pskb_may_pull(rx->skb, hdrlen + CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } else { if (skb_linearize(rx->skb)) return RX_DROP_UNUSABLE; } ccmp_hdr2pn(pn, skb->data + hdrlen); queue = rx->security_idx; if (memcmp(pn, key->u.ccmp.rx_pn[queue], CCMP_PN_LEN) <= 0) { key->u.ccmp.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { u8 scratch[6 * AES_BLOCK_SIZE]; /* hardware didn't decrypt/verify MIC */ ccmp_special_blocks(skb, pn, scratch, 1); if (ieee80211_aes_ccm_decrypt( key->u.ccmp.tfm, scratch, skb->data + hdrlen + CCMP_HDR_LEN, data_len, skb->data + skb->len - CCMP_MIC_LEN, skb->data + hdrlen + CCMP_HDR_LEN)) return RX_DROP_UNUSABLE; } memcpy(key->u.ccmp.rx_pn[queue], pn, CCMP_PN_LEN); /* Remove CCMP header and MIC */ if (pskb_trim(skb, skb->len - CCMP_MIC_LEN)) return RX_DROP_UNUSABLE; memmove(skb->data + CCMP_HDR_LEN, skb->data, hdrlen); skb_pull(skb, CCMP_HDR_LEN); return RX_CONTINUE; } static void bip_aad(struct sk_buff *skb, u8 *aad) { /* BIP AAD: FC(masked) || A1 || A2 || A3 */ /* FC type/subtype */ aad[0] = skb->data[0]; /* Mask FC Retry, PwrMgt, MoreData flags to zero */ aad[1] = skb->data[1] & ~(BIT(4) | BIT(5) | BIT(6)); /* A1 || A2 || A3 */ memcpy(aad + 2, skb->data + 4, 3 * ETH_ALEN); } static inline void bip_ipn_set64(u8 *d, u64 pn) { *d++ = pn; *d++ = pn >> 8; *d++ = pn >> 16; *d++ = pn >> 24; *d++ = pn >> 32; *d = pn >> 40; } static inline void bip_ipn_swap(u8 *d, const u8 *s) { *d++ = s[5]; *d++ = s[4]; *d++ = s[3]; *d++ = s[2]; *d++ = s[1]; *d = s[0]; } ieee80211_tx_result ieee80211_crypto_aes_cmac_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info; struct ieee80211_key *key = tx->key; struct ieee80211_mmie *mmie; u8 aad[20]; u64 pn64; if (WARN_ON(skb_queue_len(&tx->skbs) != 1)) return TX_DROP; skb = skb_peek(&tx->skbs); info = IEEE80211_SKB_CB(skb); if (info->control.hw_key) return TX_CONTINUE; if (WARN_ON(skb_tailroom(skb) < sizeof(*mmie))) return TX_DROP; mmie = (struct ieee80211_mmie *) skb_put(skb, sizeof(*mmie)); mmie->element_id = WLAN_EID_MMIE; mmie->length = sizeof(*mmie) - 2; mmie->key_id = cpu_to_le16(key->conf.keyidx); /* PN = PN + 1 */ pn64 = atomic64_inc_return(&key->u.aes_cmac.tx_pn); bip_ipn_set64(mmie->sequence_number, pn64); bip_aad(skb, aad); /* * MIC = AES-128-CMAC(IGTK, AAD || Management Frame Body || MMIE, 64) */ ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mmie->mic); return TX_CONTINUE; } ieee80211_rx_result ieee80211_crypto_aes_cmac_decrypt(struct ieee80211_rx_data *rx) { struct sk_buff *skb = rx->skb; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct ieee80211_key *key = rx->key; struct ieee80211_mmie *mmie; u8 aad[20], mic[8], ipn[6]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_mgmt(hdr->frame_control)) return RX_CONTINUE; /* management frames are already linear */ if (skb->len < 24 + sizeof(*mmie)) return RX_DROP_UNUSABLE; mmie = (struct ieee80211_mmie *) (skb->data + skb->len - sizeof(*mmie)); if (mmie->element_id != WLAN_EID_MMIE || mmie->length != sizeof(*mmie) - 2) return RX_DROP_UNUSABLE; /* Invalid MMIE */ bip_ipn_swap(ipn, mmie->sequence_number); if (memcmp(ipn, key->u.aes_cmac.rx_pn, 6) <= 0) { key->u.aes_cmac.replays++; return RX_DROP_UNUSABLE; } if (!(status->flag & RX_FLAG_DECRYPTED)) { /* hardware didn't decrypt/verify MIC */ bip_aad(skb, aad); ieee80211_aes_cmac(key->u.aes_cmac.tfm, aad, skb->data + 24, skb->len - 24, mic); if (memcmp(mic, mmie->mic, sizeof(mmie->mic)) != 0) { key->u.aes_cmac.icverrors++; return RX_DROP_UNUSABLE; } } memcpy(key->u.aes_cmac.rx_pn, ipn, 6); /* Remove MMIE */ skb_trim(skb, skb->len - sizeof(*mmie)); return RX_CONTINUE; } ieee80211_tx_result ieee80211_crypto_hw_encrypt(struct ieee80211_tx_data *tx) { struct sk_buff *skb; struct ieee80211_tx_info *info = NULL; skb_queue_walk(&tx->skbs, skb) { info = IEEE80211_SKB_CB(skb); /* handle hw-only algorithm */ if (!info->control.hw_key) return TX_DROP; } ieee80211_tx_set_protected(tx); return TX_CONTINUE; }
gpl-2.0
stariver/qt210-kernel
sound/soc/blackfin/bf5xx-tdm-pcm.c
1551
9355
/* * File: sound/soc/blackfin/bf5xx-tdm-pcm.c * Author: Barry Song <Barry.Song@analog.com> * * Created: Tue June 06 2009 * Description: DMA driver for tdm codec * * Modified: * Copyright 2009 Analog Devices Inc. * * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/gfp.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <asm/dma.h> #include "bf5xx-tdm-pcm.h" #include "bf5xx-tdm.h" #include "bf5xx-sport.h" #define PCM_BUFFER_MAX 0x8000 #define FRAGMENT_SIZE_MIN (4*1024) #define FRAGMENTS_MIN 2 #define FRAGMENTS_MAX 32 static void bf5xx_dma_irq(void *data) { struct snd_pcm_substream *pcm = data; snd_pcm_period_elapsed(pcm); } static const struct snd_pcm_hardware bf5xx_pcm_hardware = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_S32_LE, .rates = SNDRV_PCM_RATE_48000, .channels_min = 2, .channels_max = 8, .buffer_bytes_max = PCM_BUFFER_MAX, .period_bytes_min = FRAGMENT_SIZE_MIN, .period_bytes_max = PCM_BUFFER_MAX/2, .periods_min = FRAGMENTS_MIN, .periods_max = FRAGMENTS_MAX, }; static int bf5xx_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { size_t size = bf5xx_pcm_hardware.buffer_bytes_max; snd_pcm_lib_malloc_pages(substream, size * 4); return 0; } static int bf5xx_pcm_hw_free(struct snd_pcm_substream *substream) { snd_pcm_lib_free_pages(substream); return 0; } static int bf5xx_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; int fragsize_bytes = frames_to_bytes(runtime, runtime->period_size); fragsize_bytes /= runtime->channels; /* inflate the fragsize to match the dma width of SPORT */ fragsize_bytes *= 8; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { sport_set_tx_callback(sport, bf5xx_dma_irq, substream); sport_config_tx_dma(sport, runtime->dma_area, runtime->periods, fragsize_bytes); } else { sport_set_rx_callback(sport, bf5xx_dma_irq, substream); sport_config_rx_dma(sport, runtime->dma_area, runtime->periods, fragsize_bytes); } return 0; } static int bf5xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; int ret = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_tx_start(sport); else sport_rx_start(sport); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_tx_stop(sport); else sport_rx_stop(sport); break; default: ret = -EINVAL; } return ret; } static snd_pcm_uframes_t bf5xx_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; unsigned int diff; snd_pcm_uframes_t frames; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { diff = sport_curr_offset_tx(sport); frames = diff / (8*4); /* 32 bytes per frame */ } else { diff = sport_curr_offset_rx(sport); frames = diff / (8*4); } return frames; } static int bf5xx_pcm_open(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct sport_device *sport_handle = snd_soc_dai_get_drvdata(cpu_dai); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_dma_buffer *buf = &substream->dma_buffer; int ret = 0; snd_soc_set_runtime_hwparams(substream, &bf5xx_pcm_hardware); ret = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (ret < 0) goto out; if (sport_handle != NULL) { if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) sport_handle->tx_buf = buf->area; else sport_handle->rx_buf = buf->area; runtime->private_data = sport_handle; } else { pr_err("sport_handle is NULL\n"); ret = -ENODEV; } out: return ret; } static int bf5xx_pcm_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void *buf, snd_pcm_uframes_t count) { struct snd_pcm_runtime *runtime = substream->runtime; struct sport_device *sport = runtime->private_data; struct bf5xx_tdm_port *tdm_port = sport->private_data; unsigned int *src; unsigned int *dst; int i; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { src = buf; dst = (unsigned int *)substream->runtime->dma_area; dst += pos * 8; while (count--) { for (i = 0; i < substream->runtime->channels; i++) *(dst + tdm_port->tx_map[i]) = *src++; dst += 8; } } else { src = (unsigned int *)substream->runtime->dma_area; dst = buf; src += pos * 8; while (count--) { for (i = 0; i < substream->runtime->channels; i++) *dst++ = *(src + tdm_port->rx_map[i]); src += 8; } } return 0; } static int bf5xx_pcm_silence(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { unsigned char *buf = substream->runtime->dma_area; buf += pos * 8 * 4; memset(buf, '\0', count * 8 * 4); return 0; } struct snd_pcm_ops bf5xx_pcm_tdm_ops = { .open = bf5xx_pcm_open, .ioctl = snd_pcm_lib_ioctl, .hw_params = bf5xx_pcm_hw_params, .hw_free = bf5xx_pcm_hw_free, .prepare = bf5xx_pcm_prepare, .trigger = bf5xx_pcm_trigger, .pointer = bf5xx_pcm_pointer, .copy = bf5xx_pcm_copy, .silence = bf5xx_pcm_silence, }; static int bf5xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream) { struct snd_pcm_substream *substream = pcm->streams[stream].substream; struct snd_dma_buffer *buf = &substream->dma_buffer; size_t size = bf5xx_pcm_hardware.buffer_bytes_max; buf->dev.type = SNDRV_DMA_TYPE_DEV; buf->dev.dev = pcm->card->dev; buf->private_data = NULL; buf->area = dma_alloc_coherent(pcm->card->dev, size * 4, &buf->addr, GFP_KERNEL); if (!buf->area) { pr_err("Failed to allocate dma memory - Please increase uncached DMA memory region\n"); return -ENOMEM; } buf->bytes = size; return 0; } static void bf5xx_pcm_free_dma_buffers(struct snd_pcm *pcm) { struct snd_pcm_substream *substream; struct snd_dma_buffer *buf; int stream; for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (!substream) continue; buf = &substream->dma_buffer; if (!buf->area) continue; dma_free_coherent(NULL, buf->bytes, buf->area, 0); buf->area = NULL; } } static u64 bf5xx_pcm_dmamask = DMA_BIT_MASK(32); static int bf5xx_pcm_tdm_new(struct snd_card *card, struct snd_soc_dai *dai, struct snd_pcm *pcm) { int ret = 0; if (!card->dev->dma_mask) card->dev->dma_mask = &bf5xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (dai->driver->playback.channels_min) { ret = bf5xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_PLAYBACK); if (ret) goto out; } if (dai->driver->capture.channels_min) { ret = bf5xx_pcm_preallocate_dma_buffer(pcm, SNDRV_PCM_STREAM_CAPTURE); if (ret) goto out; } out: return ret; } static struct snd_soc_platform_driver bf5xx_tdm_soc_platform = { .ops = &bf5xx_pcm_tdm_ops, .pcm_new = bf5xx_pcm_tdm_new, .pcm_free = bf5xx_pcm_free_dma_buffers, }; static int __devinit bf5xx_soc_platform_probe(struct platform_device *pdev) { return snd_soc_register_platform(&pdev->dev, &bf5xx_tdm_soc_platform); } static int __devexit bf5xx_soc_platform_remove(struct platform_device *pdev) { snd_soc_unregister_platform(&pdev->dev); return 0; } static struct platform_driver bfin_tdm_driver = { .driver = { .name = "bfin-tdm-pcm-audio", .owner = THIS_MODULE, }, .probe = bf5xx_soc_platform_probe, .remove = __devexit_p(bf5xx_soc_platform_remove), }; static int __init snd_bfin_tdm_init(void) { return platform_driver_register(&bfin_tdm_driver); } module_init(snd_bfin_tdm_init); static void __exit snd_bfin_tdm_exit(void) { platform_driver_unregister(&bfin_tdm_driver); } module_exit(snd_bfin_tdm_exit); MODULE_AUTHOR("Barry Song"); MODULE_DESCRIPTION("ADI Blackfin TDM PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
clemsyn/TF101-kernelOC
drivers/rtc/rtc-at91rm9200.c
1551
10461
/* * Real Time Clock interface for Linux on Atmel AT91RM9200 * * Copyright (C) 2002 Rick Bronson * * Converted to RTC class model by Andrew Victor * * Ported to Linux 2.6 by Steven Scholz * Based on s3c2410-rtc.c Simtec Electronics * * Based on sa1100-rtc.c by Nils Faerber * Based on rtc.c by Paul Gortmaker * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/time.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/completion.h> #include <asm/uaccess.h> #include <mach/at91_rtc.h> #define AT91_RTC_EPOCH 1900UL /* just like arch/arm/common/rtctime.c */ static DECLARE_COMPLETION(at91_rtc_updated); static unsigned int at91_alarm_year = AT91_RTC_EPOCH; /* * Decode time/date into rtc_time structure */ static void at91_rtc_decodetime(unsigned int timereg, unsigned int calreg, struct rtc_time *tm) { unsigned int time, date; /* must read twice in case it changes */ do { time = at91_sys_read(timereg); date = at91_sys_read(calreg); } while ((time != at91_sys_read(timereg)) || (date != at91_sys_read(calreg))); tm->tm_sec = bcd2bin((time & AT91_RTC_SEC) >> 0); tm->tm_min = bcd2bin((time & AT91_RTC_MIN) >> 8); tm->tm_hour = bcd2bin((time & AT91_RTC_HOUR) >> 16); /* * The Calendar Alarm register does not have a field for * the year - so these will return an invalid value. When an * alarm is set, at91_alarm_year wille store the current year. */ tm->tm_year = bcd2bin(date & AT91_RTC_CENT) * 100; /* century */ tm->tm_year += bcd2bin((date & AT91_RTC_YEAR) >> 8); /* year */ tm->tm_wday = bcd2bin((date & AT91_RTC_DAY) >> 21) - 1; /* day of the week [0-6], Sunday=0 */ tm->tm_mon = bcd2bin((date & AT91_RTC_MONTH) >> 16) - 1; tm->tm_mday = bcd2bin((date & AT91_RTC_DATE) >> 24); } /* * Read current time and date in RTC */ static int at91_rtc_readtime(struct device *dev, struct rtc_time *tm) { at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, tm); tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = tm->tm_year - 1900; pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); return 0; } /* * Set current time and date in RTC */ static int at91_rtc_settime(struct device *dev, struct rtc_time *tm) { unsigned long cr; pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); /* Stop Time/Calendar from counting */ cr = at91_sys_read(AT91_RTC_CR); at91_sys_write(AT91_RTC_CR, cr | AT91_RTC_UPDCAL | AT91_RTC_UPDTIM); at91_sys_write(AT91_RTC_IER, AT91_RTC_ACKUPD); wait_for_completion(&at91_rtc_updated); /* wait for ACKUPD interrupt */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD); at91_sys_write(AT91_RTC_TIMR, bin2bcd(tm->tm_sec) << 0 | bin2bcd(tm->tm_min) << 8 | bin2bcd(tm->tm_hour) << 16); at91_sys_write(AT91_RTC_CALR, bin2bcd((tm->tm_year + 1900) / 100) /* century */ | bin2bcd(tm->tm_year % 100) << 8 /* year */ | bin2bcd(tm->tm_mon + 1) << 16 /* tm_mon starts at zero */ | bin2bcd(tm->tm_wday + 1) << 21 /* day of the week [0-6], Sunday=0 */ | bin2bcd(tm->tm_mday) << 24); /* Restart Time/Calendar */ cr = at91_sys_read(AT91_RTC_CR); at91_sys_write(AT91_RTC_CR, cr & ~(AT91_RTC_UPDCAL | AT91_RTC_UPDTIM)); return 0; } /* * Read alarm time and date in RTC */ static int at91_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time *tm = &alrm->time; at91_rtc_decodetime(AT91_RTC_TIMALR, AT91_RTC_CALALR, tm); tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year = at91_alarm_year - 1900; alrm->enabled = (at91_sys_read(AT91_RTC_IMR) & AT91_RTC_ALARM) ? 1 : 0; pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, 1900 + tm->tm_year, tm->tm_mon, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec); return 0; } /* * Set alarm time and date in RTC */ static int at91_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rtc_time tm; at91_rtc_decodetime(AT91_RTC_TIMR, AT91_RTC_CALR, &tm); at91_alarm_year = tm.tm_year; tm.tm_hour = alrm->time.tm_hour; tm.tm_min = alrm->time.tm_min; tm.tm_sec = alrm->time.tm_sec; at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); at91_sys_write(AT91_RTC_TIMALR, bin2bcd(tm.tm_sec) << 0 | bin2bcd(tm.tm_min) << 8 | bin2bcd(tm.tm_hour) << 16 | AT91_RTC_HOUREN | AT91_RTC_MINEN | AT91_RTC_SECEN); at91_sys_write(AT91_RTC_CALALR, bin2bcd(tm.tm_mon + 1) << 16 /* tm_mon starts at zero */ | bin2bcd(tm.tm_mday) << 24 | AT91_RTC_DATEEN | AT91_RTC_MTHEN); if (alrm->enabled) { at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); } pr_debug("%s(): %4d-%02d-%02d %02d:%02d:%02d\n", __func__, at91_alarm_year, tm.tm_mon, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); return 0; } /* * Handle commands from user-space */ static int at91_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { int ret = 0; pr_debug("%s(): cmd=%08x, arg=%08lx.\n", __func__, cmd, arg); /* important: scrub old status before enabling IRQs */ switch (cmd) { case RTC_AIE_OFF: /* alarm off */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_ALARM); break; case RTC_AIE_ON: /* alarm on */ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_ALARM); at91_sys_write(AT91_RTC_IER, AT91_RTC_ALARM); break; case RTC_UIE_OFF: /* update off */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_SECEV); break; case RTC_UIE_ON: /* update on */ at91_sys_write(AT91_RTC_SCCR, AT91_RTC_SECEV); at91_sys_write(AT91_RTC_IER, AT91_RTC_SECEV); break; default: ret = -ENOIOCTLCMD; break; } return ret; } /* * Provide additional RTC information in /proc/driver/rtc */ static int at91_rtc_proc(struct device *dev, struct seq_file *seq) { unsigned long imr = at91_sys_read(AT91_RTC_IMR); seq_printf(seq, "update_IRQ\t: %s\n", (imr & AT91_RTC_ACKUPD) ? "yes" : "no"); seq_printf(seq, "periodic_IRQ\t: %s\n", (imr & AT91_RTC_SECEV) ? "yes" : "no"); return 0; } /* * IRQ handler for the RTC */ static irqreturn_t at91_rtc_interrupt(int irq, void *dev_id) { struct platform_device *pdev = dev_id; struct rtc_device *rtc = platform_get_drvdata(pdev); unsigned int rtsr; unsigned long events = 0; rtsr = at91_sys_read(AT91_RTC_SR) & at91_sys_read(AT91_RTC_IMR); if (rtsr) { /* this interrupt is shared! Is it ours? */ if (rtsr & AT91_RTC_ALARM) events |= (RTC_AF | RTC_IRQF); if (rtsr & AT91_RTC_SECEV) events |= (RTC_UF | RTC_IRQF); if (rtsr & AT91_RTC_ACKUPD) complete(&at91_rtc_updated); at91_sys_write(AT91_RTC_SCCR, rtsr); /* clear status reg */ rtc_update_irq(rtc, 1, events); pr_debug("%s(): num=%ld, events=0x%02lx\n", __func__, events >> 8, events & 0x000000FF); return IRQ_HANDLED; } return IRQ_NONE; /* not handled */ } static const struct rtc_class_ops at91_rtc_ops = { .ioctl = at91_rtc_ioctl, .read_time = at91_rtc_readtime, .set_time = at91_rtc_settime, .read_alarm = at91_rtc_readalarm, .set_alarm = at91_rtc_setalarm, .proc = at91_rtc_proc, }; /* * Initialize and install RTC driver */ static int __init at91_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtc; int ret; at91_sys_write(AT91_RTC_CR, 0); at91_sys_write(AT91_RTC_MR, 0); /* 24 hour mode */ /* Disable all interrupts */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); ret = request_irq(AT91_ID_SYS, at91_rtc_interrupt, IRQF_SHARED, "at91_rtc", pdev); if (ret) { printk(KERN_ERR "at91_rtc: IRQ %d already in use.\n", AT91_ID_SYS); return ret; } /* cpu init code should really have flagged this device as * being wake-capable; if it didn't, do that here. */ if (!device_can_wakeup(&pdev->dev)) device_init_wakeup(&pdev->dev, 1); rtc = rtc_device_register(pdev->name, &pdev->dev, &at91_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { free_irq(AT91_ID_SYS, pdev); return PTR_ERR(rtc); } platform_set_drvdata(pdev, rtc); printk(KERN_INFO "AT91 Real Time Clock driver.\n"); return 0; } /* * Disable and remove the RTC driver */ static int __exit at91_rtc_remove(struct platform_device *pdev) { struct rtc_device *rtc = platform_get_drvdata(pdev); /* Disable all interrupts */ at91_sys_write(AT91_RTC_IDR, AT91_RTC_ACKUPD | AT91_RTC_ALARM | AT91_RTC_SECEV | AT91_RTC_TIMEV | AT91_RTC_CALEV); free_irq(AT91_ID_SYS, pdev); rtc_device_unregister(rtc); platform_set_drvdata(pdev, NULL); return 0; } #ifdef CONFIG_PM /* AT91RM9200 RTC Power management control */ static u32 at91_rtc_imr; static int at91_rtc_suspend(struct device *dev) { /* this IRQ is shared with DBGU and other hardware which isn't * necessarily doing PM like we are... */ at91_rtc_imr = at91_sys_read(AT91_RTC_IMR) & (AT91_RTC_ALARM|AT91_RTC_SECEV); if (at91_rtc_imr) { if (device_may_wakeup(dev)) enable_irq_wake(AT91_ID_SYS); else at91_sys_write(AT91_RTC_IDR, at91_rtc_imr); } return 0; } static int at91_rtc_resume(struct device *dev) { if (at91_rtc_imr) { if (device_may_wakeup(dev)) disable_irq_wake(AT91_ID_SYS); else at91_sys_write(AT91_RTC_IER, at91_rtc_imr); } return 0; } static const struct dev_pm_ops at91_rtc_pm = { .suspend = at91_rtc_suspend, .resume = at91_rtc_resume, }; #define at91_rtc_pm_ptr &at91_rtc_pm #else #define at91_rtc_pm_ptr NULL #endif static struct platform_driver at91_rtc_driver = { .remove = __exit_p(at91_rtc_remove), .driver = { .name = "at91_rtc", .owner = THIS_MODULE, .pm = at91_rtc_pm_ptr, }, }; static int __init at91_rtc_init(void) { return platform_driver_probe(&at91_rtc_driver, at91_rtc_probe); } static void __exit at91_rtc_exit(void) { platform_driver_unregister(&at91_rtc_driver); } module_init(at91_rtc_init); module_exit(at91_rtc_exit); MODULE_AUTHOR("Rick Bronson"); MODULE_DESCRIPTION("RTC driver for Atmel AT91RM9200"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:at91_rtc");
gpl-2.0
idor/wl12xx
arch/mips/mipssim/sim_smtc.c
2319
2816
/* * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ /* * Simulator Platform-specific hooks for SMTC operation */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/cpumask.h> #include <linux/interrupt.h> #include <linux/smp.h> #include <asm/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> #include <asm/smtc.h> #include <asm/system.h> #include <asm/mmu_context.h> #include <asm/smtc_ipi.h> /* VPE/SMP Prototype implements platform interfaces directly */ /* * Cause the specified action to be performed on a targeted "CPU" */ static void ssmtc_send_ipi_single(int cpu, unsigned int action) { smtc_send_ipi(cpu, LINUX_SMP_IPI, action); /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ } static inline void ssmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action) { unsigned int i; for_each_cpu(i, mask) ssmtc_send_ipi_single(i, action); } /* * Post-config but pre-boot cleanup entry point */ static void __cpuinit ssmtc_init_secondary(void) { smtc_init_secondary(); } /* * SMP initialization finalization entry point */ static void __cpuinit ssmtc_smp_finish(void) { smtc_smp_finish(); } /* * Hook for after all CPUs are online */ static void ssmtc_cpus_done(void) { } /* * Platform "CPU" startup hook */ static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle) { smtc_boot_secondary(cpu, idle); } static void __init ssmtc_smp_setup(void) { if (read_c0_config3() & (1 << 2)) mipsmt_build_cpu_map(0); } /* * Platform SMP pre-initialization */ static void ssmtc_prepare_cpus(unsigned int max_cpus) { /* * As noted above, we can assume a single CPU for now * but it may be multithreaded. */ if (read_c0_config3() & (1 << 2)) { mipsmt_prepare_cpus(); } } struct plat_smp_ops ssmtc_smp_ops = { .send_ipi_single = ssmtc_send_ipi_single, .send_ipi_mask = ssmtc_send_ipi_mask, .init_secondary = ssmtc_init_secondary, .smp_finish = ssmtc_smp_finish, .cpus_done = ssmtc_cpus_done, .boot_secondary = ssmtc_boot_secondary, .smp_setup = ssmtc_smp_setup, .prepare_cpus = ssmtc_prepare_cpus, };
gpl-2.0
UniqueDroid/lge-kernel-x3-p880
arch/powerpc/sysdev/cpm2_pic.c
2831
7360
/* * Platform information definitions. * * Copied from arch/ppc/syslib/cpm2_pic.c with minor subsequent updates * to make in work in arch/powerpc/. Original (c) belongs to Dan Malek. * * Author: Vitaly Bordug <vbordug@ru.mvista.com> * * 1999-2001 (c) Dan Malek <dan@embeddedalley.com> * 2006 (c) MontaVista Software, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ /* The CPM2 internal interrupt controller. It is usually * the only interrupt controller. * There are two 32-bit registers (high/low) for up to 64 * possible interrupts. * * Now, the fun starts.....Interrupt Numbers DO NOT MAP * in a simple arithmetic fashion to mask or pending registers. * That is, interrupt 4 does not map to bit position 4. * We create two tables, indexed by vector number, to indicate * which register to use and which bit in the register to use. */ #include <linux/stddef.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/irq.h> #include <asm/immap_cpm2.h> #include <asm/mpc8260.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/fs_pd.h> #include "cpm2_pic.h" /* External IRQS */ #define CPM2_IRQ_EXT1 19 #define CPM2_IRQ_EXT7 25 /* Port C IRQS */ #define CPM2_IRQ_PORTC15 48 #define CPM2_IRQ_PORTC0 63 static intctl_cpm2_t __iomem *cpm2_intctl; static struct irq_host *cpm2_pic_host; #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; static const u_char irq_to_siureg[] = { 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; /* bit numbers do not match the docs, these are precomputed so the bit for * a given irq is (1 << irq_to_siubit[irq]) */ static const u_char irq_to_siubit[] = { 0, 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 2, 1, 0, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 0, 31, 30, 29, 28, 27, 26, 25, 24, 23, 22, 21, 20, 19, 18, 17, 16, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, }; static void cpm2_mask_irq(struct irq_data *d) { int bit, word; unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; ppc_cached_irq_mask[word] &= ~(1 << bit); out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); } static void cpm2_unmask_irq(struct irq_data *d) { int bit, word; unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; ppc_cached_irq_mask[word] |= 1 << bit; out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); } static void cpm2_ack(struct irq_data *d) { int bit, word; unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit); } static void cpm2_end_irq(struct irq_data *d) { int bit, word; unsigned int irq_nr = irqd_to_hwirq(d); bit = irq_to_siubit[irq_nr]; word = irq_to_siureg[irq_nr]; ppc_cached_irq_mask[word] |= 1 << bit; out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); /* * Work around large numbers of spurious IRQs on PowerPC 82xx * systems. */ mb(); } static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) { unsigned int src = irqd_to_hwirq(d); unsigned int vold, vnew, edibit; /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or * IRQ_TYPE_EDGE_BOTH (default). All others are IRQ_TYPE_EDGE_FALLING * or IRQ_TYPE_LEVEL_LOW (default) */ if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) { if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_EDGE_BOTH; if (flow_type != IRQ_TYPE_EDGE_BOTH && flow_type != IRQ_TYPE_EDGE_FALLING) goto err_sense; } else { if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) goto err_sense; } irqd_set_trigger_type(d, flow_type); if (flow_type & IRQ_TYPE_LEVEL_LOW) __irq_set_handler_locked(d->irq, handle_level_irq); else __irq_set_handler_locked(d->irq, handle_edge_irq); /* internal IRQ senses are LEVEL_LOW * EXT IRQ and Port C IRQ senses are programmable */ if (src >= CPM2_IRQ_EXT1 && src <= CPM2_IRQ_EXT7) edibit = (14 - (src - CPM2_IRQ_EXT1)); else if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) edibit = (31 - (CPM2_IRQ_PORTC0 - src)); else return (flow_type & IRQ_TYPE_LEVEL_LOW) ? IRQ_SET_MASK_OK_NOCOPY : -EINVAL; vold = in_be32(&cpm2_intctl->ic_siexr); if ((flow_type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_FALLING) vnew = vold | (1 << edibit); else vnew = vold & ~(1 << edibit); if (vold != vnew) out_be32(&cpm2_intctl->ic_siexr, vnew); return IRQ_SET_MASK_OK_NOCOPY; err_sense: pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); return -EINVAL; } static struct irq_chip cpm2_pic = { .name = "CPM2 SIU", .irq_mask = cpm2_mask_irq, .irq_unmask = cpm2_unmask_irq, .irq_ack = cpm2_ack, .irq_eoi = cpm2_end_irq, .irq_set_type = cpm2_set_irq_type, .flags = IRQCHIP_EOI_IF_HANDLED, }; unsigned int cpm2_get_irq(void) { int irq; unsigned long bits; /* For CPM2, read the SIVEC register and shift the bits down * to get the irq number. */ bits = in_be32(&cpm2_intctl->ic_sivec); irq = bits >> 26; if (irq == 0) return(-1); return irq_linear_revmap(cpm2_pic_host, irq); } static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, irq_hw_number_t hw) { pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); irq_set_status_flags(virq, IRQ_LEVEL); irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq); return 0; } static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { *out_hwirq = intspec[0]; if (intsize > 1) *out_flags = intspec[1]; else *out_flags = IRQ_TYPE_NONE; return 0; } static struct irq_host_ops cpm2_pic_host_ops = { .map = cpm2_pic_host_map, .xlate = cpm2_pic_host_xlate, }; void cpm2_pic_init(struct device_node *node) { int i; cpm2_intctl = cpm2_map(im_intctl); /* Clear the CPM IRQ controller, in case it has any bits set * from the bootloader */ /* Mask out everything */ out_be32(&cpm2_intctl->ic_simrh, 0x00000000); out_be32(&cpm2_intctl->ic_simrl, 0x00000000); wmb(); /* Ack everything */ out_be32(&cpm2_intctl->ic_sipnrh, 0xffffffff); out_be32(&cpm2_intctl->ic_sipnrl, 0xffffffff); wmb(); /* Dummy read of the vector */ i = in_be32(&cpm2_intctl->ic_sivec); rmb(); /* Initialize the default interrupt mapping priorities, * in case the boot rom changed something on us. */ out_be16(&cpm2_intctl->ic_sicr, 0); out_be32(&cpm2_intctl->ic_scprrh, 0x05309770); out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); /* create a legacy host */ cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 64, &cpm2_pic_host_ops, 64); if (cpm2_pic_host == NULL) { printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); return; } }
gpl-2.0
coreos/linux-deprecated
drivers/hid/hid-speedlink.c
4367
2323
/* * HID driver for Speedlink Vicious and Divine Cezanne (USB mouse). * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from * the HID descriptor. * * Copyright (c) 2011, 2013 Stefan Kriwanek <dev@stefankriwanek.de> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" static const struct hid_device_id speedlink_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE)}, { } }; static int speedlink_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* * The Cezanne mouse has a second "keyboard" USB endpoint for it is * able to map keyboard events to the button presses. * It sends a standard keyboard report descriptor, though, whose * LEDs we ignore. */ switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_LED: return -1; } return 0; } static int speedlink_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* No other conditions due to usage_table. */ /* This fixes the "jumpy" cursor occuring due to invalid events sent * by the device. Some devices only send them with value==+256, others * don't. However, catching abs(value)>=256 is restrictive enough not * to interfere with devices that were bug-free (has been tested). */ if (abs(value) >= 256) return 1; /* Drop useless distance 0 events (on button clicks etc.) as well */ if (value == 0) return 1; return 0; } MODULE_DEVICE_TABLE(hid, speedlink_devices); static const struct hid_usage_id speedlink_grabbed_usages[] = { { HID_GD_X, EV_REL, 0 }, { HID_GD_Y, EV_REL, 1 }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver speedlink_driver = { .name = "speedlink", .id_table = speedlink_devices, .usage_table = speedlink_grabbed_usages, .input_mapping = speedlink_input_mapping, .event = speedlink_event, }; module_hid_driver(speedlink_driver); MODULE_LICENSE("GPL");
gpl-2.0
moddingg33k/android_kernel_google
drivers/staging/iio/trigger/iio-trig-gpio.c
4879
4025
/* * Industrial I/O - gpio based trigger support * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Currently this is more of a functioning proof of concept than a full * fledged trigger driver. * * TODO: * * Add board config elements to allow specification of startup settings. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> #include "../iio.h" #include "../trigger.h" static LIST_HEAD(iio_gpio_trigger_list); static DEFINE_MUTEX(iio_gpio_trigger_list_lock); struct iio_gpio_trigger_info { struct mutex in_use; unsigned int irq; }; /* * Need to reference count these triggers and only enable gpio interrupts * as appropriate. */ /* So what functionality do we want in here?... */ /* set high / low as interrupt type? */ static irqreturn_t iio_gpio_trigger_poll(int irq, void *private) { /* Timestamp not currently provided */ iio_trigger_poll(private, 0); return IRQ_HANDLED; } static const struct iio_trigger_ops iio_gpio_trigger_ops = { .owner = THIS_MODULE, }; static int iio_gpio_trigger_probe(struct platform_device *pdev) { struct iio_gpio_trigger_info *trig_info; struct iio_trigger *trig, *trig2; unsigned long irqflags; struct resource *irq_res; int irq, ret = 0, irq_res_cnt = 0; do { irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_res_cnt); if (irq_res == NULL) { if (irq_res_cnt == 0) dev_err(&pdev->dev, "No GPIO IRQs specified"); break; } irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED; for (irq = irq_res->start; irq <= irq_res->end; irq++) { trig = iio_allocate_trigger("irqtrig%d", irq); if (!trig) { ret = -ENOMEM; goto error_free_completed_registrations; } trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); if (!trig_info) { ret = -ENOMEM; goto error_put_trigger; } trig->private_data = trig_info; trig_info->irq = irq; trig->ops = &iio_gpio_trigger_ops; ret = request_irq(irq, iio_gpio_trigger_poll, irqflags, trig->name, trig); if (ret) { dev_err(&pdev->dev, "request IRQ-%d failed", irq); goto error_free_trig_info; } ret = iio_trigger_register(trig); if (ret) goto error_release_irq; list_add_tail(&trig->alloc_list, &iio_gpio_trigger_list); } irq_res_cnt++; } while (irq_res != NULL); return 0; /* First clean up the partly allocated trigger */ error_release_irq: free_irq(irq, trig); error_free_trig_info: kfree(trig_info); error_put_trigger: iio_put_trigger(trig); error_free_completed_registrations: /* The rest should have been added to the iio_gpio_trigger_list */ list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; free_irq(gpio_to_irq(trig_info->irq), trig); kfree(trig_info); iio_trigger_unregister(trig); } return ret; } static int iio_gpio_trigger_remove(struct platform_device *pdev) { struct iio_trigger *trig, *trig2; struct iio_gpio_trigger_info *trig_info; mutex_lock(&iio_gpio_trigger_list_lock); list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; iio_trigger_unregister(trig); free_irq(trig_info->irq, trig); kfree(trig_info); iio_put_trigger(trig); } mutex_unlock(&iio_gpio_trigger_list_lock); return 0; } static struct platform_driver iio_gpio_trigger_driver = { .probe = iio_gpio_trigger_probe, .remove = iio_gpio_trigger_remove, .driver = { .name = "iio_gpio_trigger", .owner = THIS_MODULE, }, }; module_platform_driver(iio_gpio_trigger_driver); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem"); MODULE_LICENSE("GPL v2");
gpl-2.0
stedman420/android_kernel_zte_warplte
drivers/staging/iio/dds/ad9834.c
4879
12011
/* * AD9833/AD9834/AD9837/AD9838 SPI DDS driver * * Copyright 2010-2011 Analog Devices Inc. * * Licensed under the GPL-2. */ #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/list.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <linux/err.h> #include <linux/module.h> #include <asm/div64.h> #include "../iio.h" #include "../sysfs.h" #include "dds.h" #include "ad9834.h" static unsigned int ad9834_calc_freqreg(unsigned long mclk, unsigned long fout) { unsigned long long freqreg = (u64) fout * (u64) (1 << AD9834_FREQ_BITS); do_div(freqreg, mclk); return freqreg; } static int ad9834_write_frequency(struct ad9834_state *st, unsigned long addr, unsigned long fout) { unsigned long regval; if (fout > (st->mclk / 2)) return -EINVAL; regval = ad9834_calc_freqreg(st->mclk, fout); st->freq_data[0] = cpu_to_be16(addr | (regval & RES_MASK(AD9834_FREQ_BITS / 2))); st->freq_data[1] = cpu_to_be16(addr | ((regval >> (AD9834_FREQ_BITS / 2)) & RES_MASK(AD9834_FREQ_BITS / 2))); return spi_sync(st->spi, &st->freq_msg); } static int ad9834_write_phase(struct ad9834_state *st, unsigned long addr, unsigned long phase) { if (phase > (1 << AD9834_PHASE_BITS)) return -EINVAL; st->data = cpu_to_be16(addr | phase); return spi_sync(st->spi, &st->msg); } static ssize_t ad9834_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret; long val; ret = strict_strtoul(buf, 10, &val); if (ret) goto error_ret; mutex_lock(&indio_dev->mlock); switch ((u32) this_attr->address) { case AD9834_REG_FREQ0: case AD9834_REG_FREQ1: ret = ad9834_write_frequency(st, this_attr->address, val); break; case AD9834_REG_PHASE0: case AD9834_REG_PHASE1: ret = ad9834_write_phase(st, this_attr->address, val); break; case AD9834_OPBITEN: if (st->control & AD9834_MODE) { ret = -EINVAL; /* AD9843 reserved mode */ break; } if (val) st->control |= AD9834_OPBITEN; else st->control &= ~AD9834_OPBITEN; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_PIN_SW: if (val) st->control |= AD9834_PIN_SW; else st->control &= ~AD9834_PIN_SW; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_FSEL: case AD9834_PSEL: if (val == 0) st->control &= ~(this_attr->address | AD9834_PIN_SW); else if (val == 1) { st->control |= this_attr->address; st->control &= ~AD9834_PIN_SW; } else { ret = -EINVAL; break; } st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; case AD9834_RESET: if (val) st->control &= ~AD9834_RESET; else st->control |= AD9834_RESET; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); break; default: ret = -ENODEV; } mutex_unlock(&indio_dev->mlock); error_ret: return ret ? ret : len; } static ssize_t ad9834_store_wavetype(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); int ret = 0; bool is_ad9833_7 = (st->devid == ID_AD9833) || (st->devid == ID_AD9837); mutex_lock(&indio_dev->mlock); switch ((u32) this_attr->address) { case 0: if (sysfs_streq(buf, "sine")) { st->control &= ~AD9834_MODE; if (is_ad9833_7) st->control &= ~AD9834_OPBITEN; } else if (sysfs_streq(buf, "triangle")) { if (is_ad9833_7) { st->control &= ~AD9834_OPBITEN; st->control |= AD9834_MODE; } else if (st->control & AD9834_OPBITEN) { ret = -EINVAL; /* AD9843 reserved mode */ } else { st->control |= AD9834_MODE; } } else if (is_ad9833_7 && sysfs_streq(buf, "square")) { st->control &= ~AD9834_MODE; st->control |= AD9834_OPBITEN; } else { ret = -EINVAL; } break; case 1: if (sysfs_streq(buf, "square") && !(st->control & AD9834_MODE)) { st->control &= ~AD9834_MODE; st->control |= AD9834_OPBITEN; } else { ret = -EINVAL; } break; default: ret = -EINVAL; break; } if (!ret) { st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); } mutex_unlock(&indio_dev->mlock); return ret ? ret : len; } static ssize_t ad9834_show_out0_wavetype_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); char *str; if ((st->devid == ID_AD9833) || (st->devid == ID_AD9837)) str = "sine triangle square"; else if (st->control & AD9834_OPBITEN) str = "sine"; else str = "sine triangle"; return sprintf(buf, "%s\n", str); } static IIO_DEVICE_ATTR(dds0_out0_wavetype_available, S_IRUGO, ad9834_show_out0_wavetype_available, NULL, 0); static ssize_t ad9834_show_out1_wavetype_available(struct device *dev, struct device_attribute *attr, char *buf) { struct iio_dev *indio_dev = dev_get_drvdata(dev); struct ad9834_state *st = iio_priv(indio_dev); char *str; if (st->control & AD9834_MODE) str = ""; else str = "square"; return sprintf(buf, "%s\n", str); } static IIO_DEVICE_ATTR(dds0_out1_wavetype_available, S_IRUGO, ad9834_show_out1_wavetype_available, NULL, 0); /** * see dds.h for further information */ static IIO_DEV_ATTR_FREQ(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ0); static IIO_DEV_ATTR_FREQ(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_FREQ1); static IIO_DEV_ATTR_FREQSYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_FSEL); static IIO_CONST_ATTR_FREQ_SCALE(0, "1"); /* 1Hz */ static IIO_DEV_ATTR_PHASE(0, 0, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE0); static IIO_DEV_ATTR_PHASE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_REG_PHASE1); static IIO_DEV_ATTR_PHASESYMBOL(0, S_IWUSR, NULL, ad9834_write, AD9834_PSEL); static IIO_CONST_ATTR_PHASE_SCALE(0, "0.0015339808"); /* 2PI/2^12 rad*/ static IIO_DEV_ATTR_PINCONTROL_EN(0, S_IWUSR, NULL, ad9834_write, AD9834_PIN_SW); static IIO_DEV_ATTR_OUT_ENABLE(0, S_IWUSR, NULL, ad9834_write, AD9834_RESET); static IIO_DEV_ATTR_OUTY_ENABLE(0, 1, S_IWUSR, NULL, ad9834_write, AD9834_OPBITEN); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 0, ad9834_store_wavetype, 0); static IIO_DEV_ATTR_OUT_WAVETYPE(0, 1, ad9834_store_wavetype, 1); static struct attribute *ad9834_attributes[] = { &iio_dev_attr_dds0_freq0.dev_attr.attr, &iio_dev_attr_dds0_freq1.dev_attr.attr, &iio_const_attr_dds0_freq_scale.dev_attr.attr, &iio_dev_attr_dds0_phase0.dev_attr.attr, &iio_dev_attr_dds0_phase1.dev_attr.attr, &iio_const_attr_dds0_phase_scale.dev_attr.attr, &iio_dev_attr_dds0_pincontrol_en.dev_attr.attr, &iio_dev_attr_dds0_freqsymbol.dev_attr.attr, &iio_dev_attr_dds0_phasesymbol.dev_attr.attr, &iio_dev_attr_dds0_out_enable.dev_attr.attr, &iio_dev_attr_dds0_out1_enable.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out1_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr, &iio_dev_attr_dds0_out1_wavetype_available.dev_attr.attr, NULL, }; static struct attribute *ad9833_attributes[] = { &iio_dev_attr_dds0_freq0.dev_attr.attr, &iio_dev_attr_dds0_freq1.dev_attr.attr, &iio_const_attr_dds0_freq_scale.dev_attr.attr, &iio_dev_attr_dds0_phase0.dev_attr.attr, &iio_dev_attr_dds0_phase1.dev_attr.attr, &iio_const_attr_dds0_phase_scale.dev_attr.attr, &iio_dev_attr_dds0_freqsymbol.dev_attr.attr, &iio_dev_attr_dds0_phasesymbol.dev_attr.attr, &iio_dev_attr_dds0_out_enable.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype.dev_attr.attr, &iio_dev_attr_dds0_out0_wavetype_available.dev_attr.attr, NULL, }; static const struct attribute_group ad9834_attribute_group = { .attrs = ad9834_attributes, }; static const struct attribute_group ad9833_attribute_group = { .attrs = ad9833_attributes, }; static const struct iio_info ad9834_info = { .attrs = &ad9834_attribute_group, .driver_module = THIS_MODULE, }; static const struct iio_info ad9833_info = { .attrs = &ad9833_attribute_group, .driver_module = THIS_MODULE, }; static int __devinit ad9834_probe(struct spi_device *spi) { struct ad9834_platform_data *pdata = spi->dev.platform_data; struct ad9834_state *st; struct iio_dev *indio_dev; struct regulator *reg; int ret; if (!pdata) { dev_dbg(&spi->dev, "no platform data?\n"); return -ENODEV; } reg = regulator_get(&spi->dev, "vcc"); if (!IS_ERR(reg)) { ret = regulator_enable(reg); if (ret) goto error_put_reg; } indio_dev = iio_allocate_device(sizeof(*st)); if (indio_dev == NULL) { ret = -ENOMEM; goto error_disable_reg; } spi_set_drvdata(spi, indio_dev); st = iio_priv(indio_dev); st->mclk = pdata->mclk; st->spi = spi; st->devid = spi_get_device_id(spi)->driver_data; st->reg = reg; indio_dev->dev.parent = &spi->dev; indio_dev->name = spi_get_device_id(spi)->name; switch (st->devid) { case ID_AD9833: case ID_AD9837: indio_dev->info = &ad9833_info; break; default: indio_dev->info = &ad9834_info; break; } indio_dev->modes = INDIO_DIRECT_MODE; /* Setup default messages */ st->xfer.tx_buf = &st->data; st->xfer.len = 2; spi_message_init(&st->msg); spi_message_add_tail(&st->xfer, &st->msg); st->freq_xfer[0].tx_buf = &st->freq_data[0]; st->freq_xfer[0].len = 2; st->freq_xfer[0].cs_change = 1; st->freq_xfer[1].tx_buf = &st->freq_data[1]; st->freq_xfer[1].len = 2; spi_message_init(&st->freq_msg); spi_message_add_tail(&st->freq_xfer[0], &st->freq_msg); spi_message_add_tail(&st->freq_xfer[1], &st->freq_msg); st->control = AD9834_B28 | AD9834_RESET; if (!pdata->en_div2) st->control |= AD9834_DIV2; if (!pdata->en_signbit_msb_out && (st->devid == ID_AD9834)) st->control |= AD9834_SIGN_PIB; st->data = cpu_to_be16(AD9834_REG_CMD | st->control); ret = spi_sync(st->spi, &st->msg); if (ret) { dev_err(&spi->dev, "device init failed\n"); goto error_free_device; } ret = ad9834_write_frequency(st, AD9834_REG_FREQ0, pdata->freq0); if (ret) goto error_free_device; ret = ad9834_write_frequency(st, AD9834_REG_FREQ1, pdata->freq1); if (ret) goto error_free_device; ret = ad9834_write_phase(st, AD9834_REG_PHASE0, pdata->phase0); if (ret) goto error_free_device; ret = ad9834_write_phase(st, AD9834_REG_PHASE1, pdata->phase1); if (ret) goto error_free_device; ret = iio_device_register(indio_dev); if (ret) goto error_free_device; return 0; error_free_device: iio_free_device(indio_dev); error_disable_reg: if (!IS_ERR(reg)) regulator_disable(reg); error_put_reg: if (!IS_ERR(reg)) regulator_put(reg); return ret; } static int __devexit ad9834_remove(struct spi_device *spi) { struct iio_dev *indio_dev = spi_get_drvdata(spi); struct ad9834_state *st = iio_priv(indio_dev); iio_device_unregister(indio_dev); if (!IS_ERR(st->reg)) { regulator_disable(st->reg); regulator_put(st->reg); } iio_free_device(indio_dev); return 0; } static const struct spi_device_id ad9834_id[] = { {"ad9833", ID_AD9833}, {"ad9834", ID_AD9834}, {"ad9837", ID_AD9837}, {"ad9838", ID_AD9838}, {} }; MODULE_DEVICE_TABLE(spi, ad9834_id); static struct spi_driver ad9834_driver = { .driver = { .name = "ad9834", .owner = THIS_MODULE, }, .probe = ad9834_probe, .remove = __devexit_p(ad9834_remove), .id_table = ad9834_id, }; module_spi_driver(ad9834_driver); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("Analog Devices AD9833/AD9834/AD9837/AD9838 DDS"); MODULE_LICENSE("GPL v2");
gpl-2.0
jjhiza/Monarch
drivers/staging/iio/accel/adis16201_ring.c
4879
3546
#include <linux/export.h> #include <linux/interrupt.h> #include <linux/mutex.h> #include <linux/kernel.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include "../iio.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "adis16201.h" /** * adis16201_read_ring_data() read data registers which will be placed into ring * @dev: device associated with child of actual device (iio_dev or iio_trig) * @rx: somewhere to pass back the value read **/ static int adis16201_read_ring_data(struct iio_dev *indio_dev, u8 *rx) { struct spi_message msg; struct adis16201_state *st = iio_priv(indio_dev); struct spi_transfer xfers[ADIS16201_OUTPUTS + 1]; int ret; int i; mutex_lock(&st->buf_lock); spi_message_init(&msg); memset(xfers, 0, sizeof(xfers)); for (i = 0; i <= ADIS16201_OUTPUTS; i++) { xfers[i].bits_per_word = 8; xfers[i].cs_change = 1; xfers[i].len = 2; xfers[i].delay_usecs = 20; if (i < ADIS16201_OUTPUTS) { xfers[i].tx_buf = st->tx + 2 * i; st->tx[2 * i] = ADIS16201_READ_REG(ADIS16201_SUPPLY_OUT + 2 * i); st->tx[2 * i + 1] = 0; } if (i >= 1) xfers[i].rx_buf = rx + 2 * (i - 1); spi_message_add_tail(&xfers[i], &msg); } ret = spi_sync(st->us, &msg); if (ret) dev_err(&st->us->dev, "problem when burst reading"); mutex_unlock(&st->buf_lock); return ret; } /* Whilst this makes a lot of calls to iio_sw_ring functions - it is to device * specific to be rolled into the core. */ static irqreturn_t adis16201_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct adis16201_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; int i = 0; s16 *data; size_t datasize = ring->access->get_bytes_per_datum(ring); data = kmalloc(datasize, GFP_KERNEL); if (data == NULL) { dev_err(&st->us->dev, "memory alloc failed in ring bh"); return -ENOMEM; } if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength) && adis16201_read_ring_data(indio_dev, st->rx) >= 0) for (; i < bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength); i++) data[i] = be16_to_cpup((__be16 *)&(st->rx[i*2])); /* Guaranteed to be aligned with 8 byte boundary */ if (ring->scan_timestamp) *((s64 *)(data + ((i + 3)/4)*4)) = pf->timestamp; ring->access->store_to(ring, (u8 *)data, pf->timestamp); iio_trigger_notify_done(indio_dev->trig); kfree(data); return IRQ_HANDLED; } void adis16201_unconfigure_ring(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); } static const struct iio_buffer_setup_ops adis16201_ring_setup_ops = { .preenable = &iio_sw_buffer_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; int adis16201_configure_ring(struct iio_dev *indio_dev) { int ret = 0; struct iio_buffer *ring; ring = iio_sw_rb_allocate(indio_dev); if (!ring) { ret = -ENOMEM; return ret; } indio_dev->buffer = ring; ring->scan_timestamp = true; indio_dev->setup_ops = &adis16201_ring_setup_ops; indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time, &adis16201_trigger_handler, IRQF_ONESHOT, indio_dev, "adis16201_consumer%d", indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_iio_sw_rb_free; } indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_iio_sw_rb_free: iio_sw_rb_free(indio_dev->buffer); return ret; }
gpl-2.0
jgcaap/NewKernel
drivers/staging/iio/adc/ad7476_ring.c
4879
3255
/* * Copyright 2010 Analog Devices Inc. * Copyright (C) 2008 Jonathan Cameron * * Licensed under the GPL-2 or later. * * ad7476_ring.c */ #include <linux/interrupt.h> #include <linux/device.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include "../iio.h" #include "../buffer.h" #include "../ring_sw.h" #include "../trigger_consumer.h" #include "ad7476.h" /** * ad7476_ring_preenable() setup the parameters of the ring before enabling * * The complex nature of the setting of the number of bytes per datum is due * to this driver currently ensuring that the timestamp is stored at an 8 * byte boundary. **/ static int ad7476_ring_preenable(struct iio_dev *indio_dev) { struct ad7476_state *st = iio_priv(indio_dev); struct iio_buffer *ring = indio_dev->buffer; st->d_size = bitmap_weight(indio_dev->active_scan_mask, indio_dev->masklength) * st->chip_info->channel[0].scan_type.storagebits / 8; if (ring->scan_timestamp) { st->d_size += sizeof(s64); if (st->d_size % sizeof(s64)) st->d_size += sizeof(s64) - (st->d_size % sizeof(s64)); } if (indio_dev->buffer->access->set_bytes_per_datum) indio_dev->buffer->access-> set_bytes_per_datum(indio_dev->buffer, st->d_size); return 0; } static irqreturn_t ad7476_trigger_handler(int irq, void *p) { struct iio_poll_func *pf = p; struct iio_dev *indio_dev = pf->indio_dev; struct ad7476_state *st = iio_priv(indio_dev); s64 time_ns; __u8 *rxbuf; int b_sent; rxbuf = kzalloc(st->d_size, GFP_KERNEL); if (rxbuf == NULL) return -ENOMEM; b_sent = spi_read(st->spi, rxbuf, st->chip_info->channel[0].scan_type.storagebits / 8); if (b_sent < 0) goto done; time_ns = iio_get_time_ns(); if (indio_dev->buffer->scan_timestamp) memcpy(rxbuf + st->d_size - sizeof(s64), &time_ns, sizeof(time_ns)); indio_dev->buffer->access->store_to(indio_dev->buffer, rxbuf, time_ns); done: iio_trigger_notify_done(indio_dev->trig); kfree(rxbuf); return IRQ_HANDLED; } static const struct iio_buffer_setup_ops ad7476_ring_setup_ops = { .preenable = &ad7476_ring_preenable, .postenable = &iio_triggered_buffer_postenable, .predisable = &iio_triggered_buffer_predisable, }; int ad7476_register_ring_funcs_and_init(struct iio_dev *indio_dev) { struct ad7476_state *st = iio_priv(indio_dev); int ret = 0; indio_dev->buffer = iio_sw_rb_allocate(indio_dev); if (!indio_dev->buffer) { ret = -ENOMEM; goto error_ret; } indio_dev->pollfunc = iio_alloc_pollfunc(NULL, &ad7476_trigger_handler, IRQF_ONESHOT, indio_dev, "%s_consumer%d", spi_get_device_id(st->spi)->name, indio_dev->id); if (indio_dev->pollfunc == NULL) { ret = -ENOMEM; goto error_deallocate_sw_rb; } /* Ring buffer functions - here trigger setup related */ indio_dev->setup_ops = &ad7476_ring_setup_ops; indio_dev->buffer->scan_timestamp = true; /* Flag that polled ring buffering is possible */ indio_dev->modes |= INDIO_BUFFER_TRIGGERED; return 0; error_deallocate_sw_rb: iio_sw_rb_free(indio_dev->buffer); error_ret: return ret; } void ad7476_ring_cleanup(struct iio_dev *indio_dev) { iio_dealloc_pollfunc(indio_dev->pollfunc); iio_sw_rb_free(indio_dev->buffer); }
gpl-2.0
CyanHacker-Lollipop/kernel_oneplus_msm8974
drivers/staging/iio/trigger/iio-trig-gpio.c
4879
4025
/* * Industrial I/O - gpio based trigger support * * Copyright (c) 2008 Jonathan Cameron * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * Currently this is more of a functioning proof of concept than a full * fledged trigger driver. * * TODO: * * Add board config elements to allow specification of startup settings. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/slab.h> #include "../iio.h" #include "../trigger.h" static LIST_HEAD(iio_gpio_trigger_list); static DEFINE_MUTEX(iio_gpio_trigger_list_lock); struct iio_gpio_trigger_info { struct mutex in_use; unsigned int irq; }; /* * Need to reference count these triggers and only enable gpio interrupts * as appropriate. */ /* So what functionality do we want in here?... */ /* set high / low as interrupt type? */ static irqreturn_t iio_gpio_trigger_poll(int irq, void *private) { /* Timestamp not currently provided */ iio_trigger_poll(private, 0); return IRQ_HANDLED; } static const struct iio_trigger_ops iio_gpio_trigger_ops = { .owner = THIS_MODULE, }; static int iio_gpio_trigger_probe(struct platform_device *pdev) { struct iio_gpio_trigger_info *trig_info; struct iio_trigger *trig, *trig2; unsigned long irqflags; struct resource *irq_res; int irq, ret = 0, irq_res_cnt = 0; do { irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, irq_res_cnt); if (irq_res == NULL) { if (irq_res_cnt == 0) dev_err(&pdev->dev, "No GPIO IRQs specified"); break; } irqflags = (irq_res->flags & IRQF_TRIGGER_MASK) | IRQF_SHARED; for (irq = irq_res->start; irq <= irq_res->end; irq++) { trig = iio_allocate_trigger("irqtrig%d", irq); if (!trig) { ret = -ENOMEM; goto error_free_completed_registrations; } trig_info = kzalloc(sizeof(*trig_info), GFP_KERNEL); if (!trig_info) { ret = -ENOMEM; goto error_put_trigger; } trig->private_data = trig_info; trig_info->irq = irq; trig->ops = &iio_gpio_trigger_ops; ret = request_irq(irq, iio_gpio_trigger_poll, irqflags, trig->name, trig); if (ret) { dev_err(&pdev->dev, "request IRQ-%d failed", irq); goto error_free_trig_info; } ret = iio_trigger_register(trig); if (ret) goto error_release_irq; list_add_tail(&trig->alloc_list, &iio_gpio_trigger_list); } irq_res_cnt++; } while (irq_res != NULL); return 0; /* First clean up the partly allocated trigger */ error_release_irq: free_irq(irq, trig); error_free_trig_info: kfree(trig_info); error_put_trigger: iio_put_trigger(trig); error_free_completed_registrations: /* The rest should have been added to the iio_gpio_trigger_list */ list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; free_irq(gpio_to_irq(trig_info->irq), trig); kfree(trig_info); iio_trigger_unregister(trig); } return ret; } static int iio_gpio_trigger_remove(struct platform_device *pdev) { struct iio_trigger *trig, *trig2; struct iio_gpio_trigger_info *trig_info; mutex_lock(&iio_gpio_trigger_list_lock); list_for_each_entry_safe(trig, trig2, &iio_gpio_trigger_list, alloc_list) { trig_info = trig->private_data; iio_trigger_unregister(trig); free_irq(trig_info->irq, trig); kfree(trig_info); iio_put_trigger(trig); } mutex_unlock(&iio_gpio_trigger_list_lock); return 0; } static struct platform_driver iio_gpio_trigger_driver = { .probe = iio_gpio_trigger_probe, .remove = iio_gpio_trigger_remove, .driver = { .name = "iio_gpio_trigger", .owner = THIS_MODULE, }, }; module_platform_driver(iio_gpio_trigger_driver); MODULE_AUTHOR("Jonathan Cameron <jic23@cam.ac.uk>"); MODULE_DESCRIPTION("Example gpio trigger for the iio subsystem"); MODULE_LICENSE("GPL v2");
gpl-2.0
evilwombat/gopro-linux
arch/mips/powertv/asic/asic-cronus.c
7695
4328
/* * Locations of devices in the Cronus ASIC * * Copyright (C) 2005-2009 Scientific-Atlanta, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Author: Ken Eppinett * David Schleef <ds@schleef.org> * * Description: Defines the platform resources for the SA settop. */ #include <linux/init.h> #include <asm/mach-powertv/asic.h> #define CRONUS_ADDR(x) (CRONUS_IO_BASE + (x)) const struct register_map cronus_register_map __initdata = { .eic_slow0_strt_add = {.phys = CRONUS_ADDR(0x000000)}, .eic_cfg_bits = {.phys = CRONUS_ADDR(0x000038)}, .eic_ready_status = {.phys = CRONUS_ADDR(0x00004C)}, .chipver3 = {.phys = CRONUS_ADDR(0x2A0800)}, .chipver2 = {.phys = CRONUS_ADDR(0x2A0804)}, .chipver1 = {.phys = CRONUS_ADDR(0x2A0808)}, .chipver0 = {.phys = CRONUS_ADDR(0x2A080C)}, /* The registers of IRBlaster */ .uart1_intstat = {.phys = CRONUS_ADDR(0x2A1800)}, .uart1_inten = {.phys = CRONUS_ADDR(0x2A1804)}, .uart1_config1 = {.phys = CRONUS_ADDR(0x2A1808)}, .uart1_config2 = {.phys = CRONUS_ADDR(0x2A180C)}, .uart1_divisorhi = {.phys = CRONUS_ADDR(0x2A1810)}, .uart1_divisorlo = {.phys = CRONUS_ADDR(0x2A1814)}, .uart1_data = {.phys = CRONUS_ADDR(0x2A1818)}, .uart1_status = {.phys = CRONUS_ADDR(0x2A181C)}, .int_stat_3 = {.phys = CRONUS_ADDR(0x2A2800)}, .int_stat_2 = {.phys = CRONUS_ADDR(0x2A2804)}, .int_stat_1 = {.phys = CRONUS_ADDR(0x2A2808)}, .int_stat_0 = {.phys = CRONUS_ADDR(0x2A280C)}, .int_config = {.phys = CRONUS_ADDR(0x2A2810)}, .int_int_scan = {.phys = CRONUS_ADDR(0x2A2818)}, .ien_int_3 = {.phys = CRONUS_ADDR(0x2A2830)}, .ien_int_2 = {.phys = CRONUS_ADDR(0x2A2834)}, .ien_int_1 = {.phys = CRONUS_ADDR(0x2A2838)}, .ien_int_0 = {.phys = CRONUS_ADDR(0x2A283C)}, .int_level_3_3 = {.phys = CRONUS_ADDR(0x2A2880)}, .int_level_3_2 = {.phys = CRONUS_ADDR(0x2A2884)}, .int_level_3_1 = {.phys = CRONUS_ADDR(0x2A2888)}, .int_level_3_0 = {.phys = CRONUS_ADDR(0x2A288C)}, .int_level_2_3 = {.phys = CRONUS_ADDR(0x2A2890)}, .int_level_2_2 = {.phys = CRONUS_ADDR(0x2A2894)}, .int_level_2_1 = {.phys = CRONUS_ADDR(0x2A2898)}, .int_level_2_0 = {.phys = CRONUS_ADDR(0x2A289C)}, .int_level_1_3 = {.phys = CRONUS_ADDR(0x2A28A0)}, .int_level_1_2 = {.phys = CRONUS_ADDR(0x2A28A4)}, .int_level_1_1 = {.phys = CRONUS_ADDR(0x2A28A8)}, .int_level_1_0 = {.phys = CRONUS_ADDR(0x2A28AC)}, .int_level_0_3 = {.phys = CRONUS_ADDR(0x2A28B0)}, .int_level_0_2 = {.phys = CRONUS_ADDR(0x2A28B4)}, .int_level_0_1 = {.phys = CRONUS_ADDR(0x2A28B8)}, .int_level_0_0 = {.phys = CRONUS_ADDR(0x2A28BC)}, .int_docsis_en = {.phys = CRONUS_ADDR(0x2A28F4)}, .mips_pll_setup = {.phys = CRONUS_ADDR(0x1C0000)}, .fs432x4b4_usb_ctl = {.phys = CRONUS_ADDR(0x1C0028)}, .test_bus = {.phys = CRONUS_ADDR(0x1C00CC)}, .crt_spare = {.phys = CRONUS_ADDR(0x1c00d4)}, .usb2_ohci_int_mask = {.phys = CRONUS_ADDR(0x20000C)}, .usb2_strap = {.phys = CRONUS_ADDR(0x200014)}, .ehci_hcapbase = {.phys = CRONUS_ADDR(0x21FE00)}, .ohci_hc_revision = {.phys = CRONUS_ADDR(0x21fc00)}, .bcm1_bs_lmi_steer = {.phys = CRONUS_ADDR(0x2E0008)}, .usb2_control = {.phys = CRONUS_ADDR(0x2E004C)}, .usb2_stbus_obc = {.phys = CRONUS_ADDR(0x21FF00)}, .usb2_stbus_mess_size = {.phys = CRONUS_ADDR(0x21FF04)}, .usb2_stbus_chunk_size = {.phys = CRONUS_ADDR(0x21FF08)}, .pcie_regs = {.phys = CRONUS_ADDR(0x220000)}, .tim_ch = {.phys = CRONUS_ADDR(0x2A2C10)}, .tim_cl = {.phys = CRONUS_ADDR(0x2A2C14)}, .gpio_dout = {.phys = CRONUS_ADDR(0x2A2C20)}, .gpio_din = {.phys = CRONUS_ADDR(0x2A2C24)}, .gpio_dir = {.phys = CRONUS_ADDR(0x2A2C2C)}, .watchdog = {.phys = CRONUS_ADDR(0x2A2C30)}, .front_panel = {.phys = CRONUS_ADDR(0x2A3800)}, };
gpl-2.0
froggy666uk/Froggy_SensMod_CM10.1
arch/arm/mach-omap2/clkt2xxx_apll.c
7951
3286
/* * OMAP2xxx APLL clock control functions * * Copyright (C) 2005-2008 Texas Instruments, Inc. * Copyright (C) 2004-2010 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * Based on earlier work by Tuukka Tikkanen, Tony Lindgren, * Gordon McNutt and RidgeRun, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #undef DEBUG #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <plat/clock.h> #include <plat/prcm.h> #include "clock.h" #include "clock2xxx.h" #include "cm2xxx_3xxx.h" #include "cm-regbits-24xx.h" /* CM_CLKEN_PLL.EN_{54,96}M_PLL options (24XX) */ #define EN_APLL_STOPPED 0 #define EN_APLL_LOCKED 3 /* CM_CLKSEL1_PLL.APLLS_CLKIN options (24XX) */ #define APLLS_CLKIN_19_2MHZ 0 #define APLLS_CLKIN_13MHZ 2 #define APLLS_CLKIN_12MHZ 3 void __iomem *cm_idlest_pll; /* Private functions */ /* Enable an APLL if off */ static int omap2_clk_apll_enable(struct clk *clk, u32 status_mask) { u32 cval, apll_mask; apll_mask = EN_APLL_LOCKED << clk->enable_bit; cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); if ((cval & apll_mask) == apll_mask) return 0; /* apll already enabled */ cval &= ~apll_mask; cval |= apll_mask; omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN); omap2_cm_wait_idlest(cm_idlest_pll, status_mask, OMAP24XX_CM_IDLEST_VAL, clk->name); /* * REVISIT: Should we return an error code if omap2_wait_clock_ready() * fails? */ return 0; } static int omap2_clk_apll96_enable(struct clk *clk) { return omap2_clk_apll_enable(clk, OMAP24XX_ST_96M_APLL_MASK); } static int omap2_clk_apll54_enable(struct clk *clk) { return omap2_clk_apll_enable(clk, OMAP24XX_ST_54M_APLL_MASK); } static void _apll96_allow_idle(struct clk *clk) { omap2xxx_cm_set_apll96_auto_low_power_stop(); } static void _apll96_deny_idle(struct clk *clk) { omap2xxx_cm_set_apll96_disable_autoidle(); } static void _apll54_allow_idle(struct clk *clk) { omap2xxx_cm_set_apll54_auto_low_power_stop(); } static void _apll54_deny_idle(struct clk *clk) { omap2xxx_cm_set_apll54_disable_autoidle(); } /* Stop APLL */ static void omap2_clk_apll_disable(struct clk *clk) { u32 cval; cval = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKEN); cval &= ~(EN_APLL_LOCKED << clk->enable_bit); omap2_cm_write_mod_reg(cval, PLL_MOD, CM_CLKEN); } /* Public data */ const struct clkops clkops_apll96 = { .enable = omap2_clk_apll96_enable, .disable = omap2_clk_apll_disable, .allow_idle = _apll96_allow_idle, .deny_idle = _apll96_deny_idle, }; const struct clkops clkops_apll54 = { .enable = omap2_clk_apll54_enable, .disable = omap2_clk_apll_disable, .allow_idle = _apll54_allow_idle, .deny_idle = _apll54_deny_idle, }; /* Public functions */ u32 omap2xxx_get_apll_clkin(void) { u32 aplls, srate = 0; aplls = omap2_cm_read_mod_reg(PLL_MOD, CM_CLKSEL1); aplls &= OMAP24XX_APLLS_CLKIN_MASK; aplls >>= OMAP24XX_APLLS_CLKIN_SHIFT; if (aplls == APLLS_CLKIN_19_2MHZ) srate = 19200000; else if (aplls == APLLS_CLKIN_13MHZ) srate = 13000000; else if (aplls == APLLS_CLKIN_12MHZ) srate = 12000000; return srate; }
gpl-2.0
shark147/k2ul-kernel
drivers/ide/ide-io.c
11791
24214
/* * IDE I/O functions * * Basic PIO and command management functionality. * * This code was split off from ide.c. See ide.c for history and original * copyrights. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * For the avoidance of doubt the "preferred form" of this code is one which * is in an open non patent encumbered format. Where cryptographic key signing * forms part of the process of creating an executable the information * including keys needed to generate an equivalently functional executable * are deemed to be part of the source code. */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/major.h> #include <linux/errno.h> #include <linux/genhd.h> #include <linux/blkpg.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/ide.h> #include <linux/completion.h> #include <linux/reboot.h> #include <linux/cdrom.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/kmod.h> #include <linux/scatterlist.h> #include <linux/bitops.h> #include <asm/byteorder.h> #include <asm/irq.h> #include <asm/uaccess.h> #include <asm/io.h> int ide_end_rq(ide_drive_t *drive, struct request *rq, int error, unsigned int nr_bytes) { /* * decide whether to reenable DMA -- 3 is a random magic for now, * if we DMA timeout more than 3 times, just stay in PIO */ if ((drive->dev_flags & IDE_DFLAG_DMA_PIO_RETRY) && drive->retry_pio <= 3) { drive->dev_flags &= ~IDE_DFLAG_DMA_PIO_RETRY; ide_dma_on(drive); } return blk_end_request(rq, error, nr_bytes); } EXPORT_SYMBOL_GPL(ide_end_rq); void ide_complete_cmd(ide_drive_t *drive, struct ide_cmd *cmd, u8 stat, u8 err) { const struct ide_tp_ops *tp_ops = drive->hwif->tp_ops; struct ide_taskfile *tf = &cmd->tf; struct request *rq = cmd->rq; u8 tf_cmd = tf->command; tf->error = err; tf->status = stat; if (cmd->ftf_flags & IDE_FTFLAG_IN_DATA) { u8 data[2]; tp_ops->input_data(drive, cmd, data, 2); cmd->tf.data = data[0]; cmd->hob.data = data[1]; } ide_tf_readback(drive, cmd); if ((cmd->tf_flags & IDE_TFLAG_CUSTOM_HANDLER) && tf_cmd == ATA_CMD_IDLEIMMEDIATE) { if (tf->lbal != 0xc4) { printk(KERN_ERR "%s: head unload failed!\n", drive->name); ide_tf_dump(drive->name, cmd); } else drive->dev_flags |= IDE_DFLAG_PARKED; } if (rq && rq->cmd_type == REQ_TYPE_ATA_TASKFILE) { struct ide_cmd *orig_cmd = rq->special; if (cmd->tf_flags & IDE_TFLAG_DYN) kfree(orig_cmd); else memcpy(orig_cmd, cmd, sizeof(*cmd)); } } int ide_complete_rq(ide_drive_t *drive, int error, unsigned int nr_bytes) { ide_hwif_t *hwif = drive->hwif; struct request *rq = hwif->rq; int rc; /* * if failfast is set on a request, override number of sectors * and complete the whole request right now */ if (blk_noretry_request(rq) && error <= 0) nr_bytes = blk_rq_sectors(rq) << 9; rc = ide_end_rq(drive, rq, error, nr_bytes); if (rc == 0) hwif->rq = NULL; return rc; } EXPORT_SYMBOL(ide_complete_rq); void ide_kill_rq(ide_drive_t *drive, struct request *rq) { u8 drv_req = (rq->cmd_type == REQ_TYPE_SPECIAL) && rq->rq_disk; u8 media = drive->media; drive->failed_pc = NULL; if ((media == ide_floppy || media == ide_tape) && drv_req) { rq->errors = 0; } else { if (media == ide_tape) rq->errors = IDE_DRV_ERROR_GENERAL; else if (rq->cmd_type != REQ_TYPE_FS && rq->errors == 0) rq->errors = -EIO; } ide_complete_rq(drive, -EIO, blk_rq_bytes(rq)); } static void ide_tf_set_specify_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { tf->nsect = drive->sect; tf->lbal = drive->sect; tf->lbam = drive->cyl; tf->lbah = drive->cyl >> 8; tf->device = (drive->head - 1) | drive->select; tf->command = ATA_CMD_INIT_DEV_PARAMS; } static void ide_tf_set_restore_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { tf->nsect = drive->sect; tf->command = ATA_CMD_RESTORE; } static void ide_tf_set_setmult_cmd(ide_drive_t *drive, struct ide_taskfile *tf) { tf->nsect = drive->mult_req; tf->command = ATA_CMD_SET_MULTI; } /** * do_special - issue some special commands * @drive: drive the command is for * * do_special() is used to issue ATA_CMD_INIT_DEV_PARAMS, * ATA_CMD_RESTORE and ATA_CMD_SET_MULTI commands to a drive. */ static ide_startstop_t do_special(ide_drive_t *drive) { struct ide_cmd cmd; #ifdef DEBUG printk(KERN_DEBUG "%s: %s: 0x%02x\n", drive->name, __func__, drive->special_flags); #endif if (drive->media != ide_disk) { drive->special_flags = 0; drive->mult_req = 0; return ide_stopped; } memset(&cmd, 0, sizeof(cmd)); cmd.protocol = ATA_PROT_NODATA; if (drive->special_flags & IDE_SFLAG_SET_GEOMETRY) { drive->special_flags &= ~IDE_SFLAG_SET_GEOMETRY; ide_tf_set_specify_cmd(drive, &cmd.tf); } else if (drive->special_flags & IDE_SFLAG_RECALIBRATE) { drive->special_flags &= ~IDE_SFLAG_RECALIBRATE; ide_tf_set_restore_cmd(drive, &cmd.tf); } else if (drive->special_flags & IDE_SFLAG_SET_MULTMODE) { drive->special_flags &= ~IDE_SFLAG_SET_MULTMODE; ide_tf_set_setmult_cmd(drive, &cmd.tf); } else BUG(); cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.tf_flags = IDE_TFLAG_CUSTOM_HANDLER; do_rw_taskfile(drive, &cmd); return ide_started; } void ide_map_sg(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; struct scatterlist *sg = hwif->sg_table; struct request *rq = cmd->rq; cmd->sg_nents = blk_rq_map_sg(drive->queue, rq, sg); } EXPORT_SYMBOL_GPL(ide_map_sg); void ide_init_sg_cmd(struct ide_cmd *cmd, unsigned int nr_bytes) { cmd->nbytes = cmd->nleft = nr_bytes; cmd->cursg_ofs = 0; cmd->cursg = NULL; } EXPORT_SYMBOL_GPL(ide_init_sg_cmd); /** * execute_drive_command - issue special drive command * @drive: the drive to issue the command on * @rq: the request structure holding the command * * execute_drive_cmd() issues a special drive command, usually * initiated by ioctl() from the external hdparm program. The * command can be a drive command, drive task or taskfile * operation. Weirdly you can call it with NULL to wait for * all commands to finish. Don't do this as that is due to change */ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, struct request *rq) { struct ide_cmd *cmd = rq->special; if (cmd) { if (cmd->protocol == ATA_PROT_PIO) { ide_init_sg_cmd(cmd, blk_rq_sectors(rq) << 9); ide_map_sg(drive, cmd); } return do_rw_taskfile(drive, cmd); } /* * NULL is actually a valid way of waiting for * all current requests to be flushed from the queue. */ #ifdef DEBUG printk("%s: DRIVE_CMD (null)\n", drive->name); #endif rq->errors = 0; ide_complete_rq(drive, 0, blk_rq_bytes(rq)); return ide_stopped; } static ide_startstop_t ide_special_rq(ide_drive_t *drive, struct request *rq) { u8 cmd = rq->cmd[0]; switch (cmd) { case REQ_PARK_HEADS: case REQ_UNPARK_HEADS: return ide_do_park_unpark(drive, rq); case REQ_DEVSET_EXEC: return ide_do_devset(drive, rq); case REQ_DRIVE_RESET: return ide_do_reset(drive); default: BUG(); } } /** * start_request - start of I/O and command issuing for IDE * * start_request() initiates handling of a new I/O request. It * accepts commands and I/O (read/write) requests. * * FIXME: this function needs a rename */ static ide_startstop_t start_request (ide_drive_t *drive, struct request *rq) { ide_startstop_t startstop; BUG_ON(!(rq->cmd_flags & REQ_STARTED)); #ifdef DEBUG printk("%s: start_request: current=0x%08lx\n", drive->hwif->name, (unsigned long) rq); #endif /* bail early if we've exceeded max_failures */ if (drive->max_failures && (drive->failures > drive->max_failures)) { rq->cmd_flags |= REQ_FAILED; goto kill_rq; } if (blk_pm_request(rq)) ide_check_pm_state(drive, rq); drive->hwif->tp_ops->dev_select(drive); if (ide_wait_stat(&startstop, drive, drive->ready_stat, ATA_BUSY | ATA_DRQ, WAIT_READY)) { printk(KERN_ERR "%s: drive not ready for command\n", drive->name); return startstop; } if (drive->special_flags == 0) { struct ide_driver *drv; /* * We reset the drive so we need to issue a SETFEATURES. * Do it _after_ do_special() restored device parameters. */ if (drive->current_speed == 0xff) ide_config_drive_speed(drive, drive->desired_speed); if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) return execute_drive_cmd(drive, rq); else if (blk_pm_request(rq)) { struct request_pm_state *pm = rq->special; #ifdef DEBUG_PM printk("%s: start_power_step(step: %d)\n", drive->name, pm->pm_step); #endif startstop = ide_start_power_step(drive, rq); if (startstop == ide_stopped && pm->pm_step == IDE_PM_COMPLETED) ide_complete_pm_rq(drive, rq); return startstop; } else if (!rq->rq_disk && rq->cmd_type == REQ_TYPE_SPECIAL) /* * TODO: Once all ULDs have been modified to * check for specific op codes rather than * blindly accepting any special request, the * check for ->rq_disk above may be replaced * by a more suitable mechanism or even * dropped entirely. */ return ide_special_rq(drive, rq); drv = *(struct ide_driver **)rq->rq_disk->private_data; return drv->do_request(drive, rq, blk_rq_pos(rq)); } return do_special(drive); kill_rq: ide_kill_rq(drive, rq); return ide_stopped; } /** * ide_stall_queue - pause an IDE device * @drive: drive to stall * @timeout: time to stall for (jiffies) * * ide_stall_queue() can be used by a drive to give excess bandwidth back * to the port by sleeping for timeout jiffies. */ void ide_stall_queue (ide_drive_t *drive, unsigned long timeout) { if (timeout > WAIT_WORSTCASE) timeout = WAIT_WORSTCASE; drive->sleep = timeout + jiffies; drive->dev_flags |= IDE_DFLAG_SLEEPING; } EXPORT_SYMBOL(ide_stall_queue); static inline int ide_lock_port(ide_hwif_t *hwif) { if (hwif->busy) return 1; hwif->busy = 1; return 0; } static inline void ide_unlock_port(ide_hwif_t *hwif) { hwif->busy = 0; } static inline int ide_lock_host(struct ide_host *host, ide_hwif_t *hwif) { int rc = 0; if (host->host_flags & IDE_HFLAG_SERIALIZE) { rc = test_and_set_bit_lock(IDE_HOST_BUSY, &host->host_busy); if (rc == 0) { if (host->get_lock) host->get_lock(ide_intr, hwif); } } return rc; } static inline void ide_unlock_host(struct ide_host *host) { if (host->host_flags & IDE_HFLAG_SERIALIZE) { if (host->release_lock) host->release_lock(); clear_bit_unlock(IDE_HOST_BUSY, &host->host_busy); } } static void __ide_requeue_and_plug(struct request_queue *q, struct request *rq) { if (rq) blk_requeue_request(q, rq); if (rq || blk_peek_request(q)) { /* Use 3ms as that was the old plug delay */ blk_delay_queue(q, 3); } } void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; unsigned long flags; spin_lock_irqsave(q->queue_lock, flags); __ide_requeue_and_plug(q, rq); spin_unlock_irqrestore(q->queue_lock, flags); } /* * Issue a new request to a device. */ void do_ide_request(struct request_queue *q) { ide_drive_t *drive = q->queuedata; ide_hwif_t *hwif = drive->hwif; struct ide_host *host = hwif->host; struct request *rq = NULL; ide_startstop_t startstop; unsigned long queue_run_ms = 3; /* old plug delay */ spin_unlock_irq(q->queue_lock); /* HLD do_request() callback might sleep, make sure it's okay */ might_sleep(); if (ide_lock_host(host, hwif)) goto plug_device_2; spin_lock_irq(&hwif->lock); if (!ide_lock_port(hwif)) { ide_hwif_t *prev_port; WARN_ON_ONCE(hwif->rq); repeat: prev_port = hwif->host->cur_port; if (drive->dev_flags & IDE_DFLAG_SLEEPING && time_after(drive->sleep, jiffies)) { unsigned long left = jiffies - drive->sleep; queue_run_ms = jiffies_to_msecs(left + 1); ide_unlock_port(hwif); goto plug_device; } if ((hwif->host->host_flags & IDE_HFLAG_SERIALIZE) && hwif != prev_port) { ide_drive_t *cur_dev = prev_port ? prev_port->cur_dev : NULL; /* * set nIEN for previous port, drives in the * quirk list may not like intr setups/cleanups */ if (cur_dev && (cur_dev->dev_flags & IDE_DFLAG_NIEN_QUIRK) == 0) prev_port->tp_ops->write_devctl(prev_port, ATA_NIEN | ATA_DEVCTL_OBS); hwif->host->cur_port = hwif; } hwif->cur_dev = drive; drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); spin_unlock_irq(&hwif->lock); spin_lock_irq(q->queue_lock); /* * we know that the queue isn't empty, but this can happen * if the q->prep_rq_fn() decides to kill a request */ if (!rq) rq = blk_fetch_request(drive->queue); spin_unlock_irq(q->queue_lock); spin_lock_irq(&hwif->lock); if (!rq) { ide_unlock_port(hwif); goto out; } /* * Sanity: don't accept a request that isn't a PM request * if we are currently power managed. This is very important as * blk_stop_queue() doesn't prevent the blk_fetch_request() * above to return us whatever is in the queue. Since we call * ide_do_request() ourselves, we end up taking requests while * the queue is blocked... * * We let requests forced at head of queue with ide-preempt * though. I hope that doesn't happen too much, hopefully not * unless the subdriver triggers such a thing in its own PM * state machine. */ if ((drive->dev_flags & IDE_DFLAG_BLOCKED) && blk_pm_request(rq) == 0 && (rq->cmd_flags & REQ_PREEMPT) == 0) { /* there should be no pending command at this point */ ide_unlock_port(hwif); goto plug_device; } hwif->rq = rq; spin_unlock_irq(&hwif->lock); startstop = start_request(drive, rq); spin_lock_irq(&hwif->lock); if (startstop == ide_stopped) { rq = hwif->rq; hwif->rq = NULL; goto repeat; } } else goto plug_device; out: spin_unlock_irq(&hwif->lock); if (rq == NULL) ide_unlock_host(host); spin_lock_irq(q->queue_lock); return; plug_device: spin_unlock_irq(&hwif->lock); ide_unlock_host(host); plug_device_2: spin_lock_irq(q->queue_lock); __ide_requeue_and_plug(q, rq); } static int drive_is_ready(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 stat = 0; if (drive->waiting_for_dma) return hwif->dma_ops->dma_test_irq(drive); if (hwif->io_ports.ctl_addr && (hwif->host_flags & IDE_HFLAG_BROKEN_ALTSTATUS) == 0) stat = hwif->tp_ops->read_altstatus(hwif); else /* Note: this may clear a pending IRQ!! */ stat = hwif->tp_ops->read_status(hwif); if (stat & ATA_BUSY) /* drive busy: definitely not interrupting */ return 0; /* drive ready: *might* be interrupting */ return 1; } /** * ide_timer_expiry - handle lack of an IDE interrupt * @data: timer callback magic (hwif) * * An IDE command has timed out before the expected drive return * occurred. At this point we attempt to clean up the current * mess. If the current handler includes an expiry handler then * we invoke the expiry handler, and providing it is happy the * work is done. If that fails we apply generic recovery rules * invoking the handler and checking the drive DMA status. We * have an excessively incestuous relationship with the DMA * logic that wants cleaning up. */ void ide_timer_expiry (unsigned long data) { ide_hwif_t *hwif = (ide_hwif_t *)data; ide_drive_t *uninitialized_var(drive); ide_handler_t *handler; unsigned long flags; int wait = -1; int plug_device = 0; struct request *uninitialized_var(rq_in_flight); spin_lock_irqsave(&hwif->lock, flags); handler = hwif->handler; if (handler == NULL || hwif->req_gen != hwif->req_gen_timer) { /* * Either a marginal timeout occurred * (got the interrupt just as timer expired), * or we were "sleeping" to give other devices a chance. * Either way, we don't really want to complain about anything. */ } else { ide_expiry_t *expiry = hwif->expiry; ide_startstop_t startstop = ide_stopped; drive = hwif->cur_dev; if (expiry) { wait = expiry(drive); if (wait > 0) { /* continue */ /* reset timer */ hwif->timer.expires = jiffies + wait; hwif->req_gen_timer = hwif->req_gen; add_timer(&hwif->timer); spin_unlock_irqrestore(&hwif->lock, flags); return; } } hwif->handler = NULL; hwif->expiry = NULL; /* * We need to simulate a real interrupt when invoking * the handler() function, which means we need to * globally mask the specific IRQ: */ spin_unlock(&hwif->lock); /* disable_irq_nosync ?? */ disable_irq(hwif->irq); /* local CPU only, as if we were handling an interrupt */ local_irq_disable(); if (hwif->polling) { startstop = handler(drive); } else if (drive_is_ready(drive)) { if (drive->waiting_for_dma) hwif->dma_ops->dma_lost_irq(drive); if (hwif->port_ops && hwif->port_ops->clear_irq) hwif->port_ops->clear_irq(drive); printk(KERN_WARNING "%s: lost interrupt\n", drive->name); startstop = handler(drive); } else { if (drive->waiting_for_dma) startstop = ide_dma_timeout_retry(drive, wait); else startstop = ide_error(drive, "irq timeout", hwif->tp_ops->read_status(hwif)); } spin_lock_irq(&hwif->lock); enable_irq(hwif->irq); if (startstop == ide_stopped && hwif->polling == 0) { rq_in_flight = hwif->rq; hwif->rq = NULL; ide_unlock_port(hwif); plug_device = 1; } } spin_unlock_irqrestore(&hwif->lock, flags); if (plug_device) { ide_unlock_host(hwif->host); ide_requeue_and_plug(drive, rq_in_flight); } } /** * unexpected_intr - handle an unexpected IDE interrupt * @irq: interrupt line * @hwif: port being processed * * There's nothing really useful we can do with an unexpected interrupt, * other than reading the status register (to clear it), and logging it. * There should be no way that an irq can happen before we're ready for it, * so we needn't worry much about losing an "important" interrupt here. * * On laptops (and "green" PCs), an unexpected interrupt occurs whenever * the drive enters "idle", "standby", or "sleep" mode, so if the status * looks "good", we just ignore the interrupt completely. * * This routine assumes __cli() is in effect when called. * * If an unexpected interrupt happens on irq15 while we are handling irq14 * and if the two interfaces are "serialized" (CMD640), then it looks like * we could screw up by interfering with a new request being set up for * irq15. * * In reality, this is a non-issue. The new command is not sent unless * the drive is ready to accept one, in which case we know the drive is * not trying to interrupt us. And ide_set_handler() is always invoked * before completing the issuance of any new drive command, so we will not * be accidentally invoked as a result of any valid command completion * interrupt. */ static void unexpected_intr(int irq, ide_hwif_t *hwif) { u8 stat = hwif->tp_ops->read_status(hwif); if (!OK_STAT(stat, ATA_DRDY, BAD_STAT)) { /* Try to not flood the console with msgs */ static unsigned long last_msgtime, count; ++count; if (time_after(jiffies, last_msgtime + HZ)) { last_msgtime = jiffies; printk(KERN_ERR "%s: unexpected interrupt, " "status=0x%02x, count=%ld\n", hwif->name, stat, count); } } } /** * ide_intr - default IDE interrupt handler * @irq: interrupt number * @dev_id: hwif * @regs: unused weirdness from the kernel irq layer * * This is the default IRQ handler for the IDE layer. You should * not need to override it. If you do be aware it is subtle in * places * * hwif is the interface in the group currently performing * a command. hwif->cur_dev is the drive and hwif->handler is * the IRQ handler to call. As we issue a command the handlers * step through multiple states, reassigning the handler to the * next step in the process. Unlike a smart SCSI controller IDE * expects the main processor to sequence the various transfer * stages. We also manage a poll timer to catch up with most * timeout situations. There are still a few where the handlers * don't ever decide to give up. * * The handler eventually returns ide_stopped to indicate the * request completed. At this point we issue the next request * on the port and the process begins again. */ irqreturn_t ide_intr (int irq, void *dev_id) { ide_hwif_t *hwif = (ide_hwif_t *)dev_id; struct ide_host *host = hwif->host; ide_drive_t *uninitialized_var(drive); ide_handler_t *handler; unsigned long flags; ide_startstop_t startstop; irqreturn_t irq_ret = IRQ_NONE; int plug_device = 0; struct request *uninitialized_var(rq_in_flight); if (host->host_flags & IDE_HFLAG_SERIALIZE) { if (hwif != host->cur_port) goto out_early; } spin_lock_irqsave(&hwif->lock, flags); if (hwif->port_ops && hwif->port_ops->test_irq && hwif->port_ops->test_irq(hwif) == 0) goto out; handler = hwif->handler; if (handler == NULL || hwif->polling) { /* * Not expecting an interrupt from this drive. * That means this could be: * (1) an interrupt from another PCI device * sharing the same PCI INT# as us. * or (2) a drive just entered sleep or standby mode, * and is interrupting to let us know. * or (3) a spurious interrupt of unknown origin. * * For PCI, we cannot tell the difference, * so in that case we just ignore it and hope it goes away. */ if ((host->irq_flags & IRQF_SHARED) == 0) { /* * Probably not a shared PCI interrupt, * so we can safely try to do something about it: */ unexpected_intr(irq, hwif); } else { /* * Whack the status register, just in case * we have a leftover pending IRQ. */ (void)hwif->tp_ops->read_status(hwif); } goto out; } drive = hwif->cur_dev; if (!drive_is_ready(drive)) /* * This happens regularly when we share a PCI IRQ with * another device. Unfortunately, it can also happen * with some buggy drives that trigger the IRQ before * their status register is up to date. Hopefully we have * enough advance overhead that the latter isn't a problem. */ goto out; hwif->handler = NULL; hwif->expiry = NULL; hwif->req_gen++; del_timer(&hwif->timer); spin_unlock(&hwif->lock); if (hwif->port_ops && hwif->port_ops->clear_irq) hwif->port_ops->clear_irq(drive); if (drive->dev_flags & IDE_DFLAG_UNMASK) local_irq_enable_in_hardirq(); /* service this interrupt, may set handler for next interrupt */ startstop = handler(drive); spin_lock_irq(&hwif->lock); /* * Note that handler() may have set things up for another * interrupt to occur soon, but it cannot happen until * we exit from this routine, because it will be the * same irq as is currently being serviced here, and Linux * won't allow another of the same (on any CPU) until we return. */ if (startstop == ide_stopped && hwif->polling == 0) { BUG_ON(hwif->handler); rq_in_flight = hwif->rq; hwif->rq = NULL; ide_unlock_port(hwif); plug_device = 1; } irq_ret = IRQ_HANDLED; out: spin_unlock_irqrestore(&hwif->lock, flags); out_early: if (plug_device) { ide_unlock_host(hwif->host); ide_requeue_and_plug(drive, rq_in_flight); } return irq_ret; } EXPORT_SYMBOL_GPL(ide_intr); void ide_pad_transfer(ide_drive_t *drive, int write, int len) { ide_hwif_t *hwif = drive->hwif; u8 buf[4] = { 0 }; while (len > 0) { if (write) hwif->tp_ops->output_data(drive, NULL, buf, min(4, len)); else hwif->tp_ops->input_data(drive, NULL, buf, min(4, len)); len -= 4; } } EXPORT_SYMBOL_GPL(ide_pad_transfer);
gpl-2.0
Validus-Kernel/android_kernel_motorola_msm8226
scripts/mod/mk_elfconfig.c
12047
1234
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <elf.h> int main(int argc, char **argv) { unsigned char ei[EI_NIDENT]; union { short s; char c[2]; } endian_test; if (fread(ei, 1, EI_NIDENT, stdin) != EI_NIDENT) { fprintf(stderr, "Error: input truncated\n"); return 1; } if (memcmp(ei, ELFMAG, SELFMAG) != 0) { fprintf(stderr, "Error: not ELF\n"); return 1; } switch (ei[EI_CLASS]) { case ELFCLASS32: printf("#define KERNEL_ELFCLASS ELFCLASS32\n"); break; case ELFCLASS64: printf("#define KERNEL_ELFCLASS ELFCLASS64\n"); break; default: exit(1); } switch (ei[EI_DATA]) { case ELFDATA2LSB: printf("#define KERNEL_ELFDATA ELFDATA2LSB\n"); break; case ELFDATA2MSB: printf("#define KERNEL_ELFDATA ELFDATA2MSB\n"); break; default: exit(1); } if (sizeof(unsigned long) == 4) { printf("#define HOST_ELFCLASS ELFCLASS32\n"); } else if (sizeof(unsigned long) == 8) { printf("#define HOST_ELFCLASS ELFCLASS64\n"); } endian_test.s = 0x0102; if (memcmp(endian_test.c, "\x01\x02", 2) == 0) printf("#define HOST_ELFDATA ELFDATA2MSB\n"); else if (memcmp(endian_test.c, "\x02\x01", 2) == 0) printf("#define HOST_ELFDATA ELFDATA2LSB\n"); else exit(1); return 0; }
gpl-2.0
animania260/android_kernel_samsung_prevail2spr--Galaxy-Rush-
lib/bitrev.c
13071
2157
#include <linux/types.h> #include <linux/module.h> #include <linux/bitrev.h> MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); MODULE_DESCRIPTION("Bit ordering reversal functions"); MODULE_LICENSE("GPL"); const u8 byte_rev_table[256] = { 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, }; EXPORT_SYMBOL_GPL(byte_rev_table); u16 bitrev16(u16 x) { return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); } EXPORT_SYMBOL(bitrev16); /** * bitrev32 - reverse the order of bits in a u32 value * @x: value to be bit-reversed */ u32 bitrev32(u32 x) { return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); } EXPORT_SYMBOL(bitrev32);
gpl-2.0
amphorion/kernel_pyramid
lib/bitrev.c
13071
2157
#include <linux/types.h> #include <linux/module.h> #include <linux/bitrev.h> MODULE_AUTHOR("Akinobu Mita <akinobu.mita@gmail.com>"); MODULE_DESCRIPTION("Bit ordering reversal functions"); MODULE_LICENSE("GPL"); const u8 byte_rev_table[256] = { 0x00, 0x80, 0x40, 0xc0, 0x20, 0xa0, 0x60, 0xe0, 0x10, 0x90, 0x50, 0xd0, 0x30, 0xb0, 0x70, 0xf0, 0x08, 0x88, 0x48, 0xc8, 0x28, 0xa8, 0x68, 0xe8, 0x18, 0x98, 0x58, 0xd8, 0x38, 0xb8, 0x78, 0xf8, 0x04, 0x84, 0x44, 0xc4, 0x24, 0xa4, 0x64, 0xe4, 0x14, 0x94, 0x54, 0xd4, 0x34, 0xb4, 0x74, 0xf4, 0x0c, 0x8c, 0x4c, 0xcc, 0x2c, 0xac, 0x6c, 0xec, 0x1c, 0x9c, 0x5c, 0xdc, 0x3c, 0xbc, 0x7c, 0xfc, 0x02, 0x82, 0x42, 0xc2, 0x22, 0xa2, 0x62, 0xe2, 0x12, 0x92, 0x52, 0xd2, 0x32, 0xb2, 0x72, 0xf2, 0x0a, 0x8a, 0x4a, 0xca, 0x2a, 0xaa, 0x6a, 0xea, 0x1a, 0x9a, 0x5a, 0xda, 0x3a, 0xba, 0x7a, 0xfa, 0x06, 0x86, 0x46, 0xc6, 0x26, 0xa6, 0x66, 0xe6, 0x16, 0x96, 0x56, 0xd6, 0x36, 0xb6, 0x76, 0xf6, 0x0e, 0x8e, 0x4e, 0xce, 0x2e, 0xae, 0x6e, 0xee, 0x1e, 0x9e, 0x5e, 0xde, 0x3e, 0xbe, 0x7e, 0xfe, 0x01, 0x81, 0x41, 0xc1, 0x21, 0xa1, 0x61, 0xe1, 0x11, 0x91, 0x51, 0xd1, 0x31, 0xb1, 0x71, 0xf1, 0x09, 0x89, 0x49, 0xc9, 0x29, 0xa9, 0x69, 0xe9, 0x19, 0x99, 0x59, 0xd9, 0x39, 0xb9, 0x79, 0xf9, 0x05, 0x85, 0x45, 0xc5, 0x25, 0xa5, 0x65, 0xe5, 0x15, 0x95, 0x55, 0xd5, 0x35, 0xb5, 0x75, 0xf5, 0x0d, 0x8d, 0x4d, 0xcd, 0x2d, 0xad, 0x6d, 0xed, 0x1d, 0x9d, 0x5d, 0xdd, 0x3d, 0xbd, 0x7d, 0xfd, 0x03, 0x83, 0x43, 0xc3, 0x23, 0xa3, 0x63, 0xe3, 0x13, 0x93, 0x53, 0xd3, 0x33, 0xb3, 0x73, 0xf3, 0x0b, 0x8b, 0x4b, 0xcb, 0x2b, 0xab, 0x6b, 0xeb, 0x1b, 0x9b, 0x5b, 0xdb, 0x3b, 0xbb, 0x7b, 0xfb, 0x07, 0x87, 0x47, 0xc7, 0x27, 0xa7, 0x67, 0xe7, 0x17, 0x97, 0x57, 0xd7, 0x37, 0xb7, 0x77, 0xf7, 0x0f, 0x8f, 0x4f, 0xcf, 0x2f, 0xaf, 0x6f, 0xef, 0x1f, 0x9f, 0x5f, 0xdf, 0x3f, 0xbf, 0x7f, 0xff, }; EXPORT_SYMBOL_GPL(byte_rev_table); u16 bitrev16(u16 x) { return (bitrev8(x & 0xff) << 8) | bitrev8(x >> 8); } EXPORT_SYMBOL(bitrev16); /** * bitrev32 - reverse the order of bits in a u32 value * @x: value to be bit-reversed */ u32 bitrev32(u32 x) { return (bitrev16(x & 0xffff) << 16) | bitrev16(x >> 16); } EXPORT_SYMBOL(bitrev32);
gpl-2.0
wanam/Adam-Kernel-GS4-LTE
arch/powerpc/boot/cuboot-rainier.c
14095
1453
/* * Old U-boot compatibility for Rainier * * Valentine Barshak <vbarshak@ru.mvista.com> * Copyright 2007 MontaVista Software, Inc * * Based on Ebony code by David Gibson <david@gibson.dropbear.id.au> * Copyright IBM Corporation, 2007 * * Based on Bamboo code by Josh Boyer <jwboyer@linux.vnet.ibm.com> * Copyright IBM Corporation, 2007 * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 of the License */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "dcr.h" #include "4xx.h" #include "44x.h" #include "cuboot.h" #define TARGET_4xx #define TARGET_44x #include "ppcboot.h" static bd_t bd; static void rainier_fixups(void) { unsigned long sysclk = 33333333; ibm440ep_fixup_clocks(sysclk, 11059200, 50000000); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ibm4xx_denali_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = rainier_fixups; platform_ops.exit = ibm44x_dbcr_reset; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
CurtisMJ/g800f_custom_kernel
drivers/regulator/s2mpa01.c
16
21810
/* * s2mpa01.c * * Copyright (c) 2013 Samsung Electronics Co., Ltd * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/bug.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/mfd/samsung/core.h> #include <linux/mfd/samsung/s2mpa01.h> struct s2mpa01_info { struct device *dev; #ifdef CONFIG_SEC_PM struct device *sec_power; #endif struct sec_pmic_dev *iodev; int num_regulators; struct regulator_dev **rdev; struct sec_opmode_data *opmode_data; struct mutex lock; int ramp_delay24; int ramp_delay3; int ramp_delay5; int ramp_delay16; int ramp_delay7; int ramp_delay8910; bool buck1_ramp; bool buck2_ramp; bool buck3_ramp; bool buck4_ramp; bool dvs1_en; bool dvs2_en; bool dvs3_en; bool dvs4_en; bool dvs6_en; }; struct s2mpa01_voltage_desc { int max; int min; int step; }; static const struct s2mpa01_voltage_desc buck_voltage_val1 = { .max = 1500000, .min = 600000, .step = 6250, }; static const struct s2mpa01_voltage_desc buck_voltage_val2 = { .max = 3000000, .min = 800000, .step = 12500, }; static const struct s2mpa01_voltage_desc ldo_voltage_val1 = { .max = 3950000, .min = 800000, .step = 50000, }; static const struct s2mpa01_voltage_desc ldo_voltage_val2 = { .max = 2375000, .min = 800000, .step = 25000, }; static const struct s2mpa01_voltage_desc *reg_voltage_map[] = { [S2MPA01_LDO1] = &ldo_voltage_val2, [S2MPA01_LDO2] = &ldo_voltage_val1, [S2MPA01_LDO3] = &ldo_voltage_val1, [S2MPA01_LDO4] = &ldo_voltage_val1, [S2MPA01_LDO5] = &ldo_voltage_val2, [S2MPA01_LDO6] = &ldo_voltage_val2, [S2MPA01_LDO7] = &ldo_voltage_val1, [S2MPA01_LDO8] = &ldo_voltage_val1, [S2MPA01_LDO9] = &ldo_voltage_val1, [S2MPA01_LDO10] = &ldo_voltage_val1, [S2MPA01_LDO11] = &ldo_voltage_val1, [S2MPA01_LDO12] = &ldo_voltage_val1, [S2MPA01_LDO13] = &ldo_voltage_val1, [S2MPA01_LDO14] = &ldo_voltage_val1, [S2MPA01_LDO15] = &ldo_voltage_val1, [S2MPA01_LDO16] = &ldo_voltage_val1, [S2MPA01_LDO17] = &ldo_voltage_val1, [S2MPA01_LDO18] = &ldo_voltage_val1, [S2MPA01_LDO19] = &ldo_voltage_val1, [S2MPA01_LDO20] = &ldo_voltage_val1, [S2MPA01_LDO21] = &ldo_voltage_val1, [S2MPA01_LDO22] = &ldo_voltage_val1, [S2MPA01_LDO23] = &ldo_voltage_val1, [S2MPA01_LDO24] = &ldo_voltage_val1, [S2MPA01_LDO25] = &ldo_voltage_val1, [S2MPA01_LDO26] = &ldo_voltage_val2, [S2MPA01_BUCK1] = &buck_voltage_val1, [S2MPA01_BUCK2] = &buck_voltage_val1, [S2MPA01_BUCK3] = &buck_voltage_val1, [S2MPA01_BUCK4] = &buck_voltage_val1, [S2MPA01_BUCK5] = &buck_voltage_val1, [S2MPA01_BUCK6] = &buck_voltage_val1, [S2MPA01_BUCK7] = &buck_voltage_val1, [S2MPA01_BUCK8] = &buck_voltage_val2, [S2MPA01_BUCK9] = &buck_voltage_val2, [S2MPA01_BUCK10] = &buck_voltage_val2, }; static int s2mpa01_list_voltage(struct regulator_dev *rdev, unsigned int selector) { const struct s2mpa01_voltage_desc *desc; int reg_id = rdev_get_id(rdev); int val; if (reg_id >= ARRAY_SIZE(reg_voltage_map) || reg_id < 0) return -EINVAL; desc = reg_voltage_map[reg_id]; if (desc == NULL) return -EINVAL; val = desc->min + desc->step * selector; if (val > desc->max) return -EINVAL; return val; } unsigned int s2mpa01_opmode_reg[][3] = { {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, {0x3, 0x2, 0x1}, }; static int s2mpa01_get_register(struct regulator_dev *rdev, unsigned int *reg, int *pmic_en) { int reg_id = rdev_get_id(rdev); unsigned int mode; struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); switch (reg_id) { case S2MPA01_LDO1 ... S2MPA01_LDO26: *reg = S2MPA01_REG_L1CTRL + (reg_id - S2MPA01_LDO1); break; case S2MPA01_BUCK1 ... S2MPA01_BUCK5: *reg = S2MPA01_REG_B1CTRL1 + (reg_id - S2MPA01_BUCK1) * 2; break; case S2MPA01_BUCK6 ... S2MPA01_BUCK10: *reg = S2MPA01_REG_B6CTRL1 + (reg_id - S2MPA01_BUCK6) * 2; break; case S2MPA01_AP_EN32KHZ ... S2MPA01_CP_EN32KHZ: *reg = S2MPA01_REG_RTC_BUF; *pmic_en = 0x01 << (reg_id - S2MPA01_AP_EN32KHZ); return 0; default: return -EINVAL; } mode = s2mpa01->opmode_data[reg_id].mode; *pmic_en = s2mpa01_opmode_reg[reg_id][mode] << S2MPA01_PMIC_EN_SHIFT; return 0; } static int s2mpa01_reg_is_enabled(struct regulator_dev *rdev) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int reg_id = rdev_get_id(rdev); unsigned int reg, val; int ret, mask = 0xc0, pmic_en; ret = s2mpa01_get_register(rdev, &reg, &pmic_en); if (ret == -EINVAL) return 1; else if (ret) return ret; ret = sec_reg_read(s2mpa01->iodev, reg, &val); if (ret) return ret; switch (reg_id) { case S2MPA01_LDO1 ... S2MPA01_BUCK10: mask = 0xc0; break; case S2MPA01_AP_EN32KHZ: mask = 0x01; break; case S2MPA01_CP_EN32KHZ: mask = 0x02; break; default: return -EINVAL; } return (val & mask) == pmic_en; } static int s2mpa01_reg_enable(struct regulator_dev *rdev) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int reg_id = rdev_get_id(rdev); unsigned int reg; int ret, mask, pmic_en; ret = s2mpa01_get_register(rdev, &reg, &pmic_en); if (ret) return ret; switch (reg_id) { case S2MPA01_LDO1 ... S2MPA01_BUCK10: mask = 0xc0; break; case S2MPA01_AP_EN32KHZ: mask = 0x01; break; case S2MPA01_CP_EN32KHZ: mask = 0x02; break; default: return -EINVAL; } return sec_reg_update(s2mpa01->iodev, reg, pmic_en, mask); } static int s2mpa01_reg_disable(struct regulator_dev *rdev) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int reg_id = rdev_get_id(rdev); unsigned int reg; int ret, mask, pmic_en; ret = s2mpa01_get_register(rdev, &reg, &pmic_en); if (ret) return ret; switch (reg_id) { case S2MPA01_LDO1 ... S2MPA01_BUCK10: mask = 0xc0; break; case S2MPA01_AP_EN32KHZ: mask = 0x01; break; case S2MPA01_CP_EN32KHZ: mask = 0x02; break; default: return -EINVAL; } return sec_reg_update(s2mpa01->iodev, reg, ~mask, mask); } static int s2mpa01_get_voltage_register(struct regulator_dev *rdev, unsigned int *_reg) { int reg_id = rdev_get_id(rdev); unsigned int reg; switch (reg_id) { case S2MPA01_LDO1 ... S2MPA01_LDO26: reg = S2MPA01_REG_L1CTRL + (reg_id - S2MPA01_LDO1); break; case S2MPA01_BUCK1 ... S2MPA01_BUCK4: case S2MPA01_BUCK6: reg = S2MPA01_REG_DVS_DATA; break; case S2MPA01_BUCK5: reg = S2MPA01_REG_B5CTRL3; break; case S2MPA01_BUCK7 ... S2MPA01_BUCK10: reg = S2MPA01_REG_B7CTRL2 + (reg_id - S2MPA01_BUCK7) * 2; break; default: return -EINVAL; } *_reg = reg; return 0; } static int s2mpa01_get_voltage_sel(struct regulator_dev *rdev) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int mask, ret; int reg_id = rdev_get_id(rdev); unsigned int reg, val, dvs_ptr; ret = s2mpa01_get_voltage_register(rdev, &reg); if (ret) return ret; switch (reg_id) { case S2MPA01_BUCK1 ... S2MPA01_BUCK10: mask = 0xff; break; case S2MPA01_LDO1 ... S2MPA01_LDO26: mask = 0x3f; break; default: return -EINVAL; } if (reg_id >= S2MPA01_BUCK1 && reg_id <= S2MPA01_BUCK4) dvs_ptr = 0x01 | ((reg_id - S2MPA01_BUCK1) << 3); else if (reg_id == S2MPA01_BUCK6) dvs_ptr = 0x01 | (0x04 << 3); else dvs_ptr = 0; mutex_lock(&s2mpa01->lock); if (dvs_ptr) { ret = sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, dvs_ptr); if (ret) goto err_exit; } ret = sec_reg_read(s2mpa01->iodev, reg, &val); if (ret) goto err_exit; ret = val & mask; err_exit: mutex_unlock(&s2mpa01->lock); return ret; } static inline int s2mpa01_convert_voltage_to_sel( const struct s2mpa01_voltage_desc *desc, int min_vol, int max_vol) { int selector = 0; if (desc == NULL) return -EINVAL; if (max_vol < desc->min || min_vol > desc->max) return -EINVAL; selector = (min_vol - desc->min) / desc->step; if (desc->min + desc->step * selector > max_vol) return -EINVAL; return selector; } static int s2mpa01_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int min_vol = min_uV, max_vol = max_uV; const struct s2mpa01_voltage_desc *desc; int ret, reg_id = rdev_get_id(rdev); unsigned int reg, mask; int sel; mask = (reg_id < S2MPA01_BUCK1) ? 0x3f : 0xff; desc = reg_voltage_map[reg_id]; sel = s2mpa01_convert_voltage_to_sel(desc, min_vol, max_vol); if (sel < 0) return sel; ret = s2mpa01_get_voltage_register(rdev, &reg); if (ret) return ret; mutex_lock(&s2mpa01->lock); ret = sec_reg_update(s2mpa01->iodev, reg, sel, mask); mutex_unlock(&s2mpa01->lock); *selector = sel; return ret; } static int s2mpa01_set_voltage_time_sel(struct regulator_dev *rdev, unsigned int old_sel, unsigned int new_sel) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); const struct s2mpa01_voltage_desc *desc; int reg_id = rdev_get_id(rdev); int ramp_delay = 0; switch (reg_id) { case S2MPA01_BUCK1: case S2MPA01_BUCK6: ramp_delay = s2mpa01->ramp_delay16; break; case S2MPA01_BUCK2: case S2MPA01_BUCK4: ramp_delay = s2mpa01->ramp_delay24; break; case S2MPA01_BUCK3: ramp_delay = s2mpa01->ramp_delay3; break; case S2MPA01_BUCK5: ramp_delay = s2mpa01->ramp_delay5; break; case S2MPA01_BUCK7: ramp_delay = s2mpa01->ramp_delay7; break; case S2MPA01_BUCK8 ... S2MPA01_BUCK10: ramp_delay = s2mpa01->ramp_delay8910; break; default: return -EINVAL; } desc = reg_voltage_map[reg_id]; if (((old_sel < new_sel) && (reg_id >= S2MPA01_BUCK1)) && ramp_delay) { return DIV_ROUND_UP(desc->step * (new_sel - old_sel), ramp_delay * 1000); } return 0; } static int s2mpa01_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct s2mpa01_info *s2mpa01 = rdev_get_drvdata(rdev); int mask, ret; int reg_id = rdev_get_id(rdev); unsigned int reg; u8 data[2]; ret = s2mpa01_get_voltage_register(rdev, &reg); if (ret) return ret; switch (reg_id) { case S2MPA01_BUCK1 ... S2MPA01_BUCK10: mask = 0xff; break; case S2MPA01_LDO1 ... S2MPA01_LDO26: mask = 0x3f; break; default: return -EINVAL; } if ((reg_id >= S2MPA01_BUCK1 && reg_id <= S2MPA01_BUCK4)) { data[0] = 0x01 | ((reg_id - S2MPA01_BUCK1) << 3); data[1] = selector; } else if (reg_id == S2MPA01_BUCK6) { data[0] = 0x01 | (0x04 << 3); data[1] = selector; } else { data[0] = 0; } mutex_lock(&s2mpa01->lock); if (data[0]) ret = sec_bulk_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 2, data); else ret = sec_reg_update(s2mpa01->iodev, reg, selector, mask); mutex_unlock(&s2mpa01->lock); return ret; } static int get_ramp_delay(int ramp_delay) { unsigned char cnt = 0; ramp_delay /= 6; while (true) { ramp_delay = ramp_delay >> 1; if (ramp_delay == 0) break; cnt++; } return cnt; } static struct regulator_ops s2mpa01_ldo_ops = { .list_voltage = s2mpa01_list_voltage, .is_enabled = s2mpa01_reg_is_enabled, .enable = s2mpa01_reg_enable, .disable = s2mpa01_reg_disable, .get_voltage_sel = s2mpa01_get_voltage_sel, .set_voltage = s2mpa01_set_voltage, .set_voltage_time_sel = s2mpa01_set_voltage_time_sel, }; static struct regulator_ops s2mpa01_buck_ops = { .list_voltage = s2mpa01_list_voltage, .is_enabled = s2mpa01_reg_is_enabled, .enable = s2mpa01_reg_enable, .disable = s2mpa01_reg_disable, .get_voltage_sel = s2mpa01_get_voltage_sel, .set_voltage_sel = s2mpa01_set_voltage_sel, .set_voltage_time_sel = s2mpa01_set_voltage_time_sel, }; static struct regulator_ops s2mpa01_others_ops = { .is_enabled = s2mpa01_reg_is_enabled, .enable = s2mpa01_reg_enable, .disable = s2mpa01_reg_disable, }; #define regulator_desc_ldo(num) { \ .name = "LDO"#num, \ .id = S2MPA01_LDO##num, \ .ops = &s2mpa01_ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ } #define regulator_desc_buck(num) { \ .name = "BUCK"#num, \ .id = S2MPA01_BUCK##num, \ .ops = &s2mpa01_buck_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ } static struct regulator_desc regulators[] = { regulator_desc_ldo(1), regulator_desc_ldo(2), regulator_desc_ldo(3), regulator_desc_ldo(4), regulator_desc_ldo(5), regulator_desc_ldo(6), regulator_desc_ldo(7), regulator_desc_ldo(8), regulator_desc_ldo(9), regulator_desc_ldo(10), regulator_desc_ldo(11), regulator_desc_ldo(12), regulator_desc_ldo(13), regulator_desc_ldo(14), regulator_desc_ldo(15), regulator_desc_ldo(16), regulator_desc_ldo(17), regulator_desc_ldo(18), regulator_desc_ldo(19), regulator_desc_ldo(20), regulator_desc_ldo(21), regulator_desc_ldo(22), regulator_desc_ldo(23), regulator_desc_ldo(24), regulator_desc_ldo(25), regulator_desc_ldo(26), regulator_desc_buck(1), regulator_desc_buck(2), regulator_desc_buck(3), regulator_desc_buck(4), regulator_desc_buck(5), regulator_desc_buck(6), regulator_desc_buck(7), regulator_desc_buck(8), regulator_desc_buck(9), regulator_desc_buck(10), { .name = "EN32KHz AP", .id = S2MPA01_AP_EN32KHZ, .ops = &s2mpa01_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "EN32KHz CP", .id = S2MPA01_CP_EN32KHZ, .ops = &s2mpa01_others_ops, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, }; #ifdef CONFIG_SEC_PM extern struct class *sec_class; #define MRSTB_EN_BIT BIT(3) static ssize_t switch_show_manual_reset(struct device *dev, struct device_attribute *attr, char *buf) { struct s2mpa01_info *s2mpa01 = dev_get_drvdata(dev); unsigned int val; sec_reg_read(s2mpa01->iodev, S2MPA01_REG_CTRL1, &val); val &= MRSTB_EN_BIT; return sprintf(buf, "%d", val ? 1 : 0); } static ssize_t switch_store_manual_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct s2mpa01_info *s2mpa01 = dev_get_drvdata(dev); unsigned int val, mask; int ret; if (!strncmp(buf, "0", 1)) { val = 0; } else if (!strncmp(buf, "1", 1)) { val = MRSTB_EN_BIT; } else { pr_warn("%s: Wrong command:%s\n", __func__, buf); return -EINVAL; } mask = MRSTB_EN_BIT; ret = sec_reg_update(s2mpa01->iodev, S2MPA01_REG_CTRL1, val, mask); if (ret < 0) return ret; return count; } static DEVICE_ATTR(enable_hw_reset, 0664, switch_show_manual_reset, switch_store_manual_reset); #endif /* CONFIG_SEC_PM */ static __devinit int s2mpa01_pmic_probe(struct platform_device *pdev) { struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); struct sec_pmic_platform_data *pdata = dev_get_platdata(iodev->dev); struct regulator_dev **rdev; struct s2mpa01_info *s2mpa01; int i, ret, size; unsigned int ramp_enable, ramp_reg = 0; unsigned int buck_init; if (!pdata) { dev_err(pdev->dev.parent, "Platform data not supplied\n"); return -ENODEV; } s2mpa01 = devm_kzalloc(&pdev->dev, sizeof(struct s2mpa01_info), GFP_KERNEL); if (!s2mpa01) return -ENOMEM; size = sizeof(struct regulator_dev *) * pdata->num_regulators; s2mpa01->rdev = devm_kzalloc(&pdev->dev, size, GFP_KERNEL); if (!s2mpa01->rdev) return -ENOMEM; ret = sec_reg_read(iodev, S2MPA01_REG_ID, &iodev->rev_num); if (ret < 0) return ret; mutex_init(&s2mpa01->lock); rdev = s2mpa01->rdev; s2mpa01->dev = &pdev->dev; s2mpa01->iodev = iodev; s2mpa01->num_regulators = pdata->num_regulators; platform_set_drvdata(pdev, s2mpa01); #ifdef CONFIG_SEC_PM s2mpa01->sec_power= device_create(sec_class, NULL, 0, NULL, "sec_power"); if (IS_ERR(s2mpa01->sec_power)) { ret = PTR_ERR(s2mpa01->sec_power); dev_err(&pdev->dev, "Failed to create sec_power:%d\n", ret); return ret; } dev_set_drvdata(s2mpa01->sec_power, s2mpa01); ret = device_create_file(s2mpa01->sec_power, &dev_attr_enable_hw_reset); if (ret < 0) { dev_err(&pdev->dev, "Failed to create enable_hw_reset:%d\n", ret); return ret; } #endif s2mpa01->ramp_delay24 = pdata->buck24_ramp_delay; s2mpa01->ramp_delay3 = pdata->buck3_ramp_delay; s2mpa01->ramp_delay5 = pdata->buck5_ramp_delay; s2mpa01->ramp_delay16 = pdata->buck16_ramp_delay; s2mpa01->ramp_delay7 = pdata->buck7_ramp_delay; s2mpa01->ramp_delay8910 = pdata->buck8910_ramp_delay; s2mpa01->buck1_ramp = pdata->buck1_ramp_enable; s2mpa01->buck2_ramp = pdata->buck2_ramp_enable; s2mpa01->buck3_ramp = pdata->buck3_ramp_enable; s2mpa01->buck4_ramp = pdata->buck4_ramp_enable; s2mpa01->opmode_data = pdata->opmode_data; sec_reg_update(s2mpa01->iodev, S2MPA01_REG_LEE_NO, 0x20, 0x20); if (pdata->buck1_init) { buck_init = s2mpa01_convert_voltage_to_sel(&buck_voltage_val1, pdata->buck1_init, pdata->buck1_init + buck_voltage_val1.step); } else { sec_reg_read(s2mpa01->iodev, S2MPA01_REG_B1CTRL2, &buck_init); } sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 0x01); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_DATA, buck_init); if (pdata->buck2_init) { buck_init = s2mpa01_convert_voltage_to_sel(&buck_voltage_val1, pdata->buck2_init, pdata->buck2_init + buck_voltage_val1.step); } else { sec_reg_read(s2mpa01->iodev, S2MPA01_REG_B2CTRL2, &buck_init); } sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 0x09); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_DATA, buck_init); if (pdata->buck3_init) { buck_init = s2mpa01_convert_voltage_to_sel(&buck_voltage_val1, pdata->buck3_init, pdata->buck3_init + buck_voltage_val1.step); } else { sec_reg_read(s2mpa01->iodev, S2MPA01_REG_B3CTRL2, &buck_init); } sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 0x11); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_DATA, buck_init); if (pdata->buck4_init) { buck_init = s2mpa01_convert_voltage_to_sel(&buck_voltage_val1, pdata->buck4_init, pdata->buck4_init + buck_voltage_val1.step); } else { sec_reg_read(s2mpa01->iodev, S2MPA01_REG_B4CTRL2, &buck_init); } sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 0x19); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_DATA, buck_init); if (pdata->buck6_init) { buck_init = s2mpa01_convert_voltage_to_sel(&buck_voltage_val1, pdata->buck6_init, pdata->buck6_init + buck_voltage_val1.step); } else { sec_reg_read(s2mpa01->iodev, S2MPA01_REG_B6CTRL2, &buck_init); } sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_PTR, 0x21); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_DATA, buck_init); sec_reg_write(s2mpa01->iodev, S2MPA01_REG_DVS_SEL, 0x01); ramp_enable = (s2mpa01->buck1_ramp << 3) | (s2mpa01->buck2_ramp << 2) | (s2mpa01->buck3_ramp << 1) | s2mpa01->buck4_ramp ; if (ramp_enable) { if (s2mpa01->buck2_ramp || s2mpa01->buck4_ramp) ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay24) << 6; if (s2mpa01->buck3_ramp) ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay3) << 4; sec_reg_update(s2mpa01->iodev, S2MPA01_REG_BUCK_RAMP1, ramp_reg | ramp_enable, 0xff); } ramp_reg &= 0x00; ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay5) << 6; ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay16) << 4; ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay7) << 2; ramp_reg |= get_ramp_delay(s2mpa01->ramp_delay8910); sec_reg_update(s2mpa01->iodev, S2MPA01_REG_BUCK_RAMP2, ramp_reg, 0xff); for (i = 0; i < pdata->num_regulators; i++) { const struct s2mpa01_voltage_desc *desc; int id = pdata->regulators[i].id; desc = reg_voltage_map[id]; if (desc) regulators[id].n_voltages = (desc->max - desc->min) / desc->step + 1; rdev[i] = regulator_register(&regulators[id], s2mpa01->dev, pdata->regulators[i].initdata, s2mpa01, NULL); if (IS_ERR(rdev[i])) { ret = PTR_ERR(rdev[i]); dev_err(s2mpa01->dev, "regulator init failed for %d\n", id); rdev[i] = NULL; goto err; } } return 0; err: for (i = 0; i < s2mpa01->num_regulators; i++) if (rdev[i]) regulator_unregister(rdev[i]); return ret; } static int __devexit s2mpa01_pmic_remove(struct platform_device *pdev) { struct s2mpa01_info *s2mpa01 = platform_get_drvdata(pdev); struct regulator_dev **rdev = s2mpa01->rdev; int i; for (i = 0; i < s2mpa01->num_regulators; i++) if (rdev[i]) regulator_unregister(rdev[i]); return 0; } static const struct platform_device_id s2mpa01_pmic_id[] = { { "s2mpa01-pmic", 0}, { }, }; MODULE_DEVICE_TABLE(platform, s2mpa01_pmic_id); static struct platform_driver s2mpa01_pmic_driver = { .driver = { .name = "s2mpa01-pmic", .owner = THIS_MODULE, }, .probe = s2mpa01_pmic_probe, .remove = __devexit_p(s2mpa01_pmic_remove), .id_table = s2mpa01_pmic_id, }; static int __init s2mpa01_pmic_init(void) { return platform_driver_register(&s2mpa01_pmic_driver); } subsys_initcall(s2mpa01_pmic_init); static void __exit s2mpa01_pmic_exit(void) { platform_driver_unregister(&s2mpa01_pmic_driver); } module_exit(s2mpa01_pmic_exit); /* Module information */ MODULE_AUTHOR("Sangbeom Kim <sbkim73@samsung.com>"); MODULE_AUTHOR("Dongsu Ha <dsfine.ha@samsung.com>"); MODULE_DESCRIPTION("SAMSUNG S2MPA01 Regulator Driver"); MODULE_LICENSE("GPL");
gpl-2.0
MartynShaw/audacity
lib-src/lv2/lilv/src/query.c
16
4178
/* Copyright 2007-2014 David Robillard <http://drobilla.net> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <assert.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include "lilv_internal.h" typedef enum { LILV_LANG_MATCH_NONE, ///< Language does not match at all LILV_LANG_MATCH_PARTIAL, ///< Partial (language, but not country) match LILV_LANG_MATCH_EXACT ///< Exact (language and country) match } LilvLangMatch; static LilvLangMatch lilv_lang_matches(const char* a, const char* b) { if (!strcmp(a, b)) { return LILV_LANG_MATCH_EXACT; } const char* a_dash = strchr(a, '-'); const size_t a_lang_len = a_dash ? (size_t)(a_dash - a) : strlen(a); const char* b_dash = strchr(b, '-'); const size_t b_lang_len = b_dash ? (size_t)(b_dash - b) : strlen(b); if (a_lang_len == b_lang_len && !strncmp(a, b, a_lang_len)) { return LILV_LANG_MATCH_PARTIAL; } return LILV_LANG_MATCH_NONE; } static LilvNodes* lilv_nodes_from_stream_objects_i18n(LilvWorld* world, SordIter* stream, SordQuadIndex field) { LilvNodes* values = lilv_nodes_new(); const SordNode* nolang = NULL; // Untranslated value const SordNode* partial = NULL; // Partial language match char* syslang = lilv_get_lang(); FOREACH_MATCH(stream) { const SordNode* value = sord_iter_get_node(stream, field); if (sord_node_get_type(value) == SORD_LITERAL) { const char* lang = sord_node_get_language(value); LilvLangMatch lm = LILV_LANG_MATCH_NONE; if (lang) { lm = (syslang) ? lilv_lang_matches(lang, syslang) : LILV_LANG_MATCH_PARTIAL; } else { nolang = value; if (!syslang) { lm = LILV_LANG_MATCH_EXACT; } } if (lm == LILV_LANG_MATCH_EXACT) { // Exact language match, add to results zix_tree_insert((ZixTree*)values, lilv_node_new_from_node(world, value), NULL); } else if (lm == LILV_LANG_MATCH_PARTIAL) { // Partial language match, save in case we find no exact partial = value; } } else { zix_tree_insert((ZixTree*)values, lilv_node_new_from_node(world, value), NULL); } } sord_iter_free(stream); free(syslang); if (lilv_nodes_size(values) > 0) { return values; } const SordNode* best = nolang; if (syslang && partial) { // Partial language match for system language best = partial; } else if (!best) { // No languages matches at all, and no untranslated value // Use any value, if possible best = partial; } if (best) { zix_tree_insert( (ZixTree*)values, lilv_node_new_from_node(world, best), NULL); } else { // No matches whatsoever lilv_nodes_free(values); values = NULL; } return values; } LilvNodes* lilv_nodes_from_stream_objects(LilvWorld* world, SordIter* stream, SordQuadIndex field) { if (sord_iter_end(stream)) { sord_iter_free(stream); return NULL; } else if (world->opt.filter_language) { return lilv_nodes_from_stream_objects_i18n(world, stream, field); } else { LilvNodes* values = lilv_nodes_new(); FOREACH_MATCH(stream) { const SordNode* value = sord_iter_get_node(stream, field); LilvNode* node = lilv_node_new_from_node(world, value); if (node) { zix_tree_insert((ZixTree*)values, node, NULL); } } sord_iter_free(stream); return values; } }
gpl-2.0
Zeken/audacity
lib-src/lv2/lilv/src/instance.c
16
3661
/* Copyright 2007-2014 David Robillard <http://drobilla.net> Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee is hereby granted, provided that the above copyright notice and this permission notice appear in all copies. THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include "lilv_internal.h" LILV_API LilvInstance* lilv_plugin_instantiate(const LilvPlugin* plugin, double sample_rate, const LV2_Feature*const* features) { lilv_plugin_load_if_necessary(plugin); LilvInstance* result = NULL; const LilvNode* const lib_uri = lilv_plugin_get_library_uri(plugin); const LilvNode* const bundle_uri = lilv_plugin_get_bundle_uri(plugin); const char* bundle_path = lilv_uri_to_path( lilv_node_as_uri(lilv_plugin_get_bundle_uri(plugin))); LilvLib* lib = lilv_lib_open(plugin->world, lib_uri, bundle_path, features); if (!lib) { return NULL; } // Parse bundle URI to use as base URI const char* bundle_uri_str = lilv_node_as_uri(bundle_uri); SerdURI base_uri; if (serd_uri_parse((const uint8_t*)bundle_uri_str, &base_uri)) { lilv_lib_close(lib); return NULL; } const LV2_Feature** local_features = NULL; if (features == NULL) { local_features = (const LV2_Feature**)malloc(sizeof(LV2_Feature)); local_features[0] = NULL; } // Search for plugin by URI for (uint32_t i = 0; true; ++i) { const LV2_Descriptor* ld = lilv_lib_get_plugin(lib, i); if (!ld) { LILV_ERRORF("No plugin <%s> in <%s>\n", lilv_node_as_uri(lilv_plugin_get_uri(plugin)), lilv_node_as_uri(lib_uri)); lilv_lib_close(lib); break; // return NULL } // Resolve library plugin URI against base URI SerdURI abs_uri; SerdNode abs_uri_node = serd_node_new_uri_from_string( (const uint8_t*)ld->URI, &base_uri, &abs_uri); if (!abs_uri_node.buf) { LILV_ERRORF("Failed to parse plugin URI `%s'\n", ld->URI); lilv_lib_close(lib); break; } if (!strcmp((const char*)abs_uri_node.buf, lilv_node_as_uri(lilv_plugin_get_uri(plugin)))) { // Create LilvInstance to return result = (LilvInstance*)malloc(sizeof(LilvInstance)); result->lv2_descriptor = ld; result->lv2_handle = ld->instantiate( ld, sample_rate, bundle_path, (features) ? features : local_features); result->pimpl = lib; serd_node_free(&abs_uri_node); break; } else { serd_node_free(&abs_uri_node); } } free(local_features); if (result) { // Failed to instantiate if (result->lv2_handle == NULL) { free(result); return NULL; } // "Connect" all ports to NULL (catches bugs) for (uint32_t i = 0; i < lilv_plugin_get_num_ports(plugin); ++i) result->lv2_descriptor->connect_port(result->lv2_handle, i, NULL); } return result; } LILV_API void lilv_instance_free(LilvInstance* instance) { if (!instance) return; instance->lv2_descriptor->cleanup(instance->lv2_handle); instance->lv2_descriptor = NULL; lilv_lib_close((LilvLib*)instance->pimpl); instance->pimpl = NULL; free(instance); }
gpl-2.0
gh2o/rk3066-linux
drivers/video/rockchip/lcdc/rk3066b_lcdc.c
16
34214
/* * drivers/video/rockchip/chips/rk3066b_lcdc.c * * Copyright (C) 2012 ROCKCHIP, Inc. *Author:yzq<yzq@rock-chips.com> * yxj<yxj@rock-chips.com> *This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/earlysuspend.h> #include <asm/div64.h> #include <asm/uaccess.h> #include <mach/iomux.h> #include "rk3066b_lcdc.h" static int dbg_thresd = 0; module_param(dbg_thresd, int, S_IRUGO|S_IWUSR); #define DBG(level,x...) do { if(unlikely(dbg_thresd >= level)) printk(KERN_INFO x); } while (0) static int init_rk3066b_lcdc(struct rk_lcdc_device_driver *dev_drv) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); if(lcdc_dev->id == 0) //lcdc0 { lcdc_dev->pd = clk_get(NULL,"pd_lcdc0"); lcdc_dev->hclk = clk_get(NULL,"hclk_lcdc0"); lcdc_dev->aclk = clk_get(NULL,"aclk_lcdc0"); lcdc_dev->dclk = clk_get(NULL,"dclk_lcdc0"); } else if(lcdc_dev->id == 1) { lcdc_dev->pd = clk_get(NULL,"pd_lcdc1"); lcdc_dev->hclk = clk_get(NULL,"hclk_lcdc1"); lcdc_dev->aclk = clk_get(NULL,"aclk_lcdc1"); lcdc_dev->dclk = clk_get(NULL,"dclk_lcdc1"); } else { printk(KERN_ERR "invalid lcdc device!\n"); return -EINVAL; } if (IS_ERR(lcdc_dev->pd) || (IS_ERR(lcdc_dev->aclk)) ||(IS_ERR(lcdc_dev->dclk)) || (IS_ERR(lcdc_dev->hclk))) { printk(KERN_ERR "failed to get lcdc%d clk source\n",lcdc_dev->id); } clk_enable(lcdc_dev->pd); clk_enable(lcdc_dev->hclk); //enable aclk and hclk for register config clk_enable(lcdc_dev->aclk); lcdc_dev->clk_on = 1; if(lcdc_dev->id == 1) //iomux for lcdc1 { rk30_mux_api_set(GPIO2D0_LCDC1DCLK_SMCCSN0_NAME,GPIO2D_LCDC1DCLK); rk30_mux_api_set(GPIO2D1_LCDC1DEN_SMCWEN_NAME,GPIO2D_LCDC1DEN); rk30_mux_api_set(GPIO2D2_LCDC1HSYNC_SMCOEN_NAME,GPIO2D_LCDC1HSYNC); rk30_mux_api_set(GPIO2D3_LCDC1VSYNC_SMCADVN_NAME,GPIO2D_LCDC1VSYNC); rk30_mux_api_set(GPIO2A0_LCDC1DATA0_SMCDATA0_TRACEDATA0_NAME,GPIO2A_LCDC1DATA0); rk30_mux_api_set(GPIO2A1_LCDC1DATA1_SMCDATA1_TRACEDATA1_NAME,GPIO2A_LCDC1DATA1); rk30_mux_api_set(GPIO2A2_LCDC1DATA2_SMCDATA2_TRACEDATA2_NAME,GPIO2A_LCDC1DATA2); rk30_mux_api_set(GPIO2A3_LCDC1DATA3_SMCDATA3_TRACEDATA3_NAME,GPIO2A_LCDC1DATA3); rk30_mux_api_set(GPIO2A4_LCDC1DATA4_SMCDATA4_TRACEDATA4_NAME,GPIO2A_LCDC1DATA4); rk30_mux_api_set(GPIO2A5_LCDC1DATA5_SMCDATA5_TRACEDATA5_NAME,GPIO2A_LCDC1DATA5); rk30_mux_api_set(GPIO2A6_LCDC1DATA6_SMCDATA6_TRACEDATA6_NAME,GPIO2A_LCDC1DATA6); rk30_mux_api_set(GPIO2A7_LCDC1DATA7_SMCDATA7_TRACEDATA7_NAME,GPIO2A_LCDC1DATA7); rk30_mux_api_set(GPIO2B0_LCDC1DATA8_SMCDATA8_TRACEDATA8_NAME,GPIO2B_LCDC1DATA8); rk30_mux_api_set(GPIO2B1_LCDC1DATA9_SMCDATA9_TRACEDATA9_NAME,GPIO2B_LCDC1DATA9); rk30_mux_api_set(GPIO2B2_LCDC1DATA10_SMCDATA10_TRACEDATA10_NAME,GPIO2B_LCDC1DATA10); rk30_mux_api_set(GPIO2B3_LCDC1DATA11_SMCDATA11_TRACEDATA11_NAME,GPIO2B_LCDC1DATA11); rk30_mux_api_set(GPIO2B4_LCDC1DATA12_SMCDATA12_TRACEDATA12_NAME,GPIO2B_LCDC1DATA12); rk30_mux_api_set(GPIO2B5_LCDC1DATA13_SMCDATA13_TRACEDATA13_NAME,GPIO2B_LCDC1DATA13); rk30_mux_api_set(GPIO2B6_LCDC1DATA14_SMCDATA14_TRACEDATA14_NAME,GPIO2B_LCDC1DATA14); rk30_mux_api_set(GPIO2B7_LCDC1DATA15_SMCDATA15_TRACEDATA15_NAME,GPIO2B_LCDC1DATA15); rk30_mux_api_set(GPIO2C0_LCDC1DATA16_SMCADDR0_TRACECLK_NAME,GPIO2C_LCDC1DATA16); rk30_mux_api_set(GPIO2C1_LCDC1DATA17_SMCADDR1_TRACECTL_NAME,GPIO2C_LCDC1DATA17); rk30_mux_api_set(GPIO2C2_LCDC1DATA18_SMCADDR2_NAME,GPIO2C_LCDC1DATA18); rk30_mux_api_set(GPIO2C3_LCDC1DATA19_SMCADDR3_NAME,GPIO2C_LCDC1DATA19); rk30_mux_api_set(GPIO2C4_LCDC1DATA20_SMCADDR4_NAME,GPIO2C_LCDC1DATA20); rk30_mux_api_set(GPIO2C5_LCDC1DATA21_SMCADDR5_NAME,GPIO2C_LCDC1DATA21); rk30_mux_api_set(GPIO2C6_LCDC1DATA22_SMCADDR6_NAME,GPIO2C_LCDC1DATA22); rk30_mux_api_set(GPIO2C7_LCDC1DATA23_SMCADDR7_NAME,GPIO2C_LCDC1DATA23); } LcdMskReg(lcdc_dev,SYS_CFG, m_LCDC_AXICLK_AUTO_ENABLE | m_W0_AXI_OUTSTANDING2 | m_W1_AXI_OUTSTANDING2,v_LCDC_AXICLK_AUTO_ENABLE(1) | v_W0_AXI_OUTSTANDING2(1) | v_W1_AXI_OUTSTANDING2(1));//eanble axi-clk auto gating for low power LcdWrReg(lcdc_dev,AXI_MS_ID,v_HWC_CHANNEL_ID(5) | v_WIN2_CHANNEL_ID(4) | v_WIN1_YRGB_CHANNEL_ID(3) | v_WIN0_CBR_CHANNEL_ID(2) | v_WIN0_YRGB_CHANNEL_ID(1)); LcdMskReg(lcdc_dev, INT_STATUS,m_HOR_STARTMASK| m_FRM_STARTMASK | m_SCANNING_MASK, v_HOR_STARTMASK(1) | v_FRM_STARTMASK(1) | v_SCANNING_MASK(1)); //mask all interrupt in init LcdMskReg(lcdc_dev,FIFO_WATER_MARK,m_WIN1_FIFO_FULL_LEVEL,v_WIN1_FIFO_FULL_LEVEL(0x1e0)); //LCDC_REG_CFG_DONE(); // write any value to REG_CFG_DONE let config become effective return 0; } static int rk3066b_lcdc_deinit(struct rk3066b_lcdc_device *lcdc_dev) { spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { lcdc_dev->clk_on = 0; LcdMskReg(lcdc_dev, INT_STATUS, m_FRM_STARTCLEAR, v_FRM_STARTCLEAR(1)); LcdMskReg(lcdc_dev, INT_STATUS,m_HOR_STARTMASK| m_FRM_STARTMASK | m_SCANNING_MASK, v_HOR_STARTMASK(1) | v_FRM_STARTMASK(1) | v_SCANNING_MASK(1)); //mask all interrupt in init LcdSetBit(lcdc_dev,SYS_CFG,m_LCDC_STANDBY); LCDC_REG_CFG_DONE(); spin_unlock(&lcdc_dev->reg_lock); } else //clk already disabled { spin_unlock(&lcdc_dev->reg_lock); return 0; } mdelay(1); return 0; } static int rk3066b_load_screen(struct rk_lcdc_device_driver *dev_drv, bool initscreen) { int ret = -EINVAL; struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); rk_screen *screen = dev_drv->cur_screen; u64 ft; int fps; u16 face; u16 mcu_total, mcu_rwstart, mcu_csstart, mcu_rwend, mcu_csend; u16 right_margin = screen->right_margin; u16 lower_margin = screen->lower_margin; u16 x_res = screen->x_res, y_res = screen->y_res; // set the rgb or mcu spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { if(screen->type==SCREEN_MCU) { LcdMskReg(lcdc_dev, MCU_CTRL, m_MCU_OUTPUT_SELECT,v_MCU_OUTPUT_SELECT(1)); // set out format and mcu timing mcu_total = (screen->mcu_wrperiod*150*1000)/1000000; if(mcu_total>31) mcu_total = 31; if(mcu_total<3) mcu_total = 3; mcu_rwstart = (mcu_total+1)/4 - 1; mcu_rwend = ((mcu_total+1)*3)/4 - 1; mcu_csstart = (mcu_rwstart>2) ? (mcu_rwstart-3) : (0); mcu_csend = (mcu_rwend>15) ? (mcu_rwend-1) : (mcu_rwend); //DBG(1,">> mcu_total=%d, mcu_rwstart=%d, mcu_csstart=%d, mcu_rwend=%d, mcu_csend=%d \n", // mcu_total, mcu_rwstart, mcu_csstart, mcu_rwend, mcu_csend); // set horizontal & vertical out timing right_margin = x_res/6; screen->pixclock = 150000000; //mcu fix to 150 MHz LcdMskReg(lcdc_dev, MCU_CTRL,m_MCU_CS_ST | m_MCU_CS_END| m_MCU_RW_ST | m_MCU_RW_END | m_MCU_WRITE_PERIOD | m_MCU_HOLDMODE_SELECT | m_MCU_HOLDMODE_FRAME_ST, v_MCU_CS_ST(mcu_csstart) | v_MCU_CS_END(mcu_csend) | v_MCU_RW_ST(mcu_rwstart) | v_MCU_RW_END(mcu_rwend) | v_MCU_WRITE_PERIOD(mcu_total) | v_MCU_HOLDMODE_SELECT((SCREEN_MCU==screen->type)?(1):(0)) | v_MCU_HOLDMODE_FRAME_ST(0)); } switch (screen->face) { case OUT_P565: face = OUT_P565; LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(0)); break; case OUT_P666: face = OUT_P666; LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(1)); break; case OUT_D888_P565: face = OUT_P888; LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(0)); break; case OUT_D888_P666: face = OUT_P888; LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(1) | v_DITHER_DOWN_MODE(1)); break; case OUT_P888: face = OUT_P888; LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_UP_EN, v_DITHER_UP_EN(1)); LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(0) | v_DITHER_DOWN_MODE(0)); break; default: LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_UP_EN, v_DITHER_UP_EN(0)); LcdMskReg(lcdc_dev, DSP_CTRL0, m_DITHER_DOWN_EN | m_DITHER_DOWN_MODE, v_DITHER_DOWN_EN(0) | v_DITHER_DOWN_MODE(0)); face = screen->face; break; } //use default overlay,set vsyn hsync den dclk polarity LcdMskReg(lcdc_dev, DSP_CTRL0,m_DISPLAY_FORMAT | m_HSYNC_POLARITY | m_VSYNC_POLARITY | m_DEN_POLARITY |m_DCLK_POLARITY,v_DISPLAY_FORMAT(face) | v_HSYNC_POLARITY(screen->pin_hsync) | v_VSYNC_POLARITY(screen->pin_vsync) | v_DEN_POLARITY(screen->pin_den) | v_DCLK_POLARITY(screen->pin_dclk)); //set background color to black,set swap according to the screen panel,disable blank mode LcdMskReg(lcdc_dev, DSP_CTRL1, m_BG_COLOR | m_OUTPUT_RB_SWAP | m_OUTPUT_RG_SWAP | m_DELTA_SWAP | m_DUMMY_SWAP | m_BLANK_MODE,v_BG_COLOR(0x000000) | v_OUTPUT_RB_SWAP(screen->swap_rb) | v_OUTPUT_RG_SWAP(screen->swap_rg) | v_DELTA_SWAP(screen->swap_delta) | v_DUMMY_SWAP(screen->swap_dumy) | v_BLACK_MODE(0)); LcdWrReg(lcdc_dev, DSP_HTOTAL_HS_END,v_HSYNC(screen->hsync_len) | v_HORPRD(screen->hsync_len + screen->left_margin + x_res + right_margin)); LcdWrReg(lcdc_dev, DSP_HACT_ST_END, v_HAEP(screen->hsync_len + screen->left_margin + x_res) | v_HASP(screen->hsync_len + screen->left_margin)); LcdWrReg(lcdc_dev, DSP_VTOTAL_VS_END, v_VSYNC(screen->vsync_len) | v_VERPRD(screen->vsync_len + screen->upper_margin + y_res + lower_margin)); LcdWrReg(lcdc_dev, DSP_VACT_ST_END, v_VAEP(screen->vsync_len + screen->upper_margin+y_res)| v_VASP(screen->vsync_len + screen->upper_margin)); // let above to take effect //LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); ret = clk_set_rate(lcdc_dev->dclk, screen->pixclock); if(ret) { printk(KERN_ERR ">>>>>> set lcdc%d dclk failed\n",lcdc_dev->id); } lcdc_dev->driver.pixclock = lcdc_dev->pixclock = div_u64(1000000000000llu, clk_get_rate(lcdc_dev->dclk)); clk_enable(lcdc_dev->dclk); ft = (u64)(screen->upper_margin + screen->lower_margin + screen->y_res +screen->vsync_len)* (screen->left_margin + screen->right_margin + screen->x_res + screen->hsync_len)* (dev_drv->pixclock); // one frame time ,(pico seconds) fps = div64_u64(1000000000000llu,ft); screen->ft = 1000/fps; printk("%s: dclk:%lu>>fps:%d ",lcdc_dev->driver.name,clk_get_rate(lcdc_dev->dclk),fps); if(screen->init) { screen->init(); } if(screen->sscreen_set) { screen->sscreen_set(screen,!initscreen); } printk("%s for lcdc%d ok!\n",__func__,lcdc_dev->id); return 0; } static int mcu_refresh(struct rk3066b_lcdc_device *lcdc_dev) { return 0; } //enable layer,open:1,enable;0 disable static int win0_open(struct rk3066b_lcdc_device *lcdc_dev,bool open) { spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { if(open) { if(!lcdc_dev->atv_layer_cnt) { LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(0)); } lcdc_dev->atv_layer_cnt++; } else { lcdc_dev->atv_layer_cnt--; } lcdc_dev->driver.layer_par[0]->state = open; LcdMskReg(lcdc_dev, SYS_CFG, m_W0_EN, v_W0_EN(open)); if(!lcdc_dev->atv_layer_cnt) //if no layer used,disable lcdc { LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(1)); } //LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); printk(KERN_INFO "lcdc%d win0 %s\n",lcdc_dev->id,open?"open":"closed"); return 0; } static int win1_open(struct rk3066b_lcdc_device *lcdc_dev,bool open) { spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { if(open) { if(!lcdc_dev->atv_layer_cnt) { printk("lcdc%d wakeup from stanby\n",lcdc_dev->id); LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(0)); } lcdc_dev->atv_layer_cnt++; } else { lcdc_dev->atv_layer_cnt--; } lcdc_dev->driver.layer_par[1]->state = open; LcdMskReg(lcdc_dev, SYS_CFG, m_W1_EN, v_W1_EN(open)); if(!lcdc_dev->atv_layer_cnt) //if no layer used,disable lcdc { printk(KERN_INFO "no layer of lcdc%d is used,go to standby!",lcdc_dev->id); LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(1)); } LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); printk(KERN_INFO "lcdc%d win1 %s\n",lcdc_dev->id,open?"open":"closed"); return 0; } static int rk3066b_lcdc_blank(struct rk_lcdc_device_driver*lcdc_drv,int layer_id,int blank_mode) { struct rk3066b_lcdc_device * lcdc_dev = container_of(lcdc_drv,struct rk3066b_lcdc_device ,driver); printk(KERN_INFO "%s>>>>>%d\n",__func__, blank_mode); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { switch(blank_mode) { case FB_BLANK_UNBLANK: LcdMskReg(lcdc_dev,DSP_CTRL1,m_BLANK_MODE ,v_BLANK_MODE(0)); break; case FB_BLANK_NORMAL: LcdMskReg(lcdc_dev,DSP_CTRL1,m_BLANK_MODE ,v_BLANK_MODE(1)); break; default: LcdMskReg(lcdc_dev,DSP_CTRL1,m_BLANK_MODE ,v_BLANK_MODE(1)); break; } LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); return 0; } static int win0_display(struct rk3066b_lcdc_device *lcdc_dev,struct layer_par *par ) { u32 y_addr; u32 uv_addr; y_addr = par->smem_start + par->y_offset; uv_addr = par->cbr_start + par->c_offset; DBG(2,KERN_INFO "lcdc%d>>%s:y_addr:0x%x>>uv_addr:0x%x\n",lcdc_dev->id,__func__,y_addr,uv_addr); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { LcdWrReg(lcdc_dev, WIN0_YRGB_MST,y_addr); LcdWrReg(lcdc_dev, WIN0_CBR_MST,uv_addr); LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); return 0; } static int win1_display(struct rk3066b_lcdc_device *lcdc_dev,struct layer_par *par ) { u32 y_addr; u32 uv_addr; y_addr = par->smem_start + par->y_offset; uv_addr = par->cbr_start + par->c_offset; DBG(2,KERN_INFO "lcdc%d>>%s>>y_addr:0x%x>>uv_addr:0x%x\n",lcdc_dev->id,__func__,y_addr,uv_addr); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { LcdWrReg(lcdc_dev, WIN1_YRGB_MST, y_addr); LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); return 0; } static int win0_set_par(struct rk3066b_lcdc_device *lcdc_dev,rk_screen *screen, struct layer_par *par ) { u32 xact, yact, xvir, yvir, xpos, ypos; u32 ScaleYrgbX = 0x1000; u32 ScaleYrgbY = 0x1000; u32 ScaleCbrX = 0x1000; u32 ScaleCbrY = 0x1000; u8 fmt_cfg =0 ; //data format register config value xact = par->xact; //active (origin) picture window width/height yact = par->yact; xvir = par->xvir; // virtual resolution yvir = par->yvir; xpos = par->xpos+screen->left_margin + screen->hsync_len; ypos = par->ypos+screen->upper_margin + screen->vsync_len; ScaleYrgbX = CalScale(xact, par->xsize); //both RGB and yuv need this two factor ScaleYrgbY = CalScale(yact, par->ysize); switch (par->format) { case ARGB888: fmt_cfg = 0; break; case RGB565: fmt_cfg = 1; break; case YUV422:// yuv422 fmt_cfg = 2; ScaleCbrX = CalScale((xact/2), par->xsize); ScaleCbrY = CalScale(yact, par->ysize); break; case YUV420: // yuv420 fmt_cfg = 3; ScaleCbrX = CalScale(xact/2, par->xsize); ScaleCbrY = CalScale(yact/2, par->ysize); break; case YUV444:// yuv444 fmt_cfg = 4; ScaleCbrX = CalScale(xact, par->xsize); ScaleCbrY = CalScale(yact, par->ysize); break; default: break; } DBG(1,"%s for lcdc%d>>format:%d>>>xact:%d>>yact:%d>>xsize:%d>>ysize:%d>>xvir:%d>>yvir:%d>>xpos:%d>>ypos:%d>>\n", __func__,lcdc_dev->id,par->format,xact,yact,par->xsize,par->ysize,xvir,yvir,xpos,ypos); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { LcdWrReg(lcdc_dev, WIN0_SCL_FACTOR_YRGB, v_X_SCL_FACTOR(ScaleYrgbX) | v_Y_SCL_FACTOR(ScaleYrgbY)); LcdWrReg(lcdc_dev, WIN0_SCL_FACTOR_CBR,v_X_SCL_FACTOR(ScaleCbrX)| v_Y_SCL_FACTOR(ScaleCbrY)); LcdMskReg(lcdc_dev,SYS_CFG, m_W0_FORMAT, v_W0_FORMAT(fmt_cfg)); //(inf->video_mode==0) LcdWrReg(lcdc_dev, WIN0_ACT_INFO,v_ACT_WIDTH(xact) | v_ACT_HEIGHT(yact)); LcdWrReg(lcdc_dev, WIN0_DSP_ST, v_DSP_STX(xpos) | v_DSP_STY(ypos)); LcdWrReg(lcdc_dev, WIN0_DSP_INFO, v_DSP_WIDTH(par->xsize)| v_DSP_HEIGHT(par->ysize)); LcdMskReg(lcdc_dev, WIN0_COLOR_KEY_CTRL, m_COLORKEY_EN | m_KEYCOLOR, v_COLORKEY_EN(0) | v_KEYCOLOR(0)); LcdWrReg(lcdc_dev,WIN0_VIR,v_VIRWIDTH(xvir)); //LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); return 0; } static int win1_set_par(struct rk3066b_lcdc_device *lcdc_dev,rk_screen *screen, struct layer_par *par ) { u32 xact, yact, xvir, yvir, xpos, ypos; u32 ScaleYrgbX = 0x1000; u32 ScaleYrgbY = 0x1000; u32 ScaleCbrX = 0x1000; u32 ScaleCbrY = 0x1000; u8 fmt_cfg; xact = par->xact; yact = par->yact; xvir = par->xvir; yvir = par->yvir; xpos = par->xpos+screen->left_margin + screen->hsync_len; ypos = par->ypos+screen->upper_margin + screen->vsync_len; ScaleYrgbX = CalScale(xact, par->xsize); ScaleYrgbY = CalScale(yact, par->ysize); DBG(1,"%s for lcdc%d>>format:%d>>>xact:%d>>yact:%d>>xsize:%d>>ysize:%d>>xvir:%d>>yvir:%d>>xpos:%d>>ypos:%d>>\n", __func__,lcdc_dev->id,par->format,xact,yact,par->xsize,par->ysize,xvir,yvir,xpos,ypos); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { switch (par->format) { case ARGB888: fmt_cfg = 0; break; case RGB565: fmt_cfg = 1; break; default: break; } LcdMskReg(lcdc_dev,SYS_CFG, m_W1_FORMAT, v_W1_FORMAT(fmt_cfg)); LcdWrReg(lcdc_dev, WIN1_DSP_ST,v_DSP_STX(xpos) | v_DSP_STY(ypos)); LcdWrReg(lcdc_dev, WIN1_DSP_INFO,v_DSP_WIDTH(par->xsize) | v_DSP_HEIGHT(par->ysize)); // enable win1 color key and set the color to black(rgb=0) LcdMskReg(lcdc_dev,WIN1_COLOR_KEY_CTRL, m_COLORKEY_EN | m_KEYCOLOR,v_COLORKEY_EN(0) | v_KEYCOLOR(0)); LcdWrReg(lcdc_dev,WIN1_VIR,v_VIRWIDTH(xvir)); //LCDC_REG_CFG_DONE(); } spin_unlock(&lcdc_dev->reg_lock); return 0; } static int rk3066b_lcdc_open(struct rk_lcdc_device_driver *dev_drv,int layer_id,bool open) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); if(layer_id == 0) { win0_open(lcdc_dev,open); } else if(layer_id == 1) { win1_open(lcdc_dev,open); } return 0; } static int rk3066b_lcdc_set_par(struct rk_lcdc_device_driver *dev_drv,int layer_id) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); struct layer_par *par = NULL; rk_screen *screen = dev_drv->cur_screen; if(!screen) { printk(KERN_ERR "screen is null!\n"); return -ENOENT; } if(layer_id==0) { par = dev_drv->layer_par[0]; win0_set_par(lcdc_dev,screen,par); } else if(layer_id==1) { par = dev_drv->layer_par[1]; win1_set_par(lcdc_dev,screen,par); } return 0; } int rk3066b_lcdc_pan_display(struct rk_lcdc_device_driver * dev_drv,int layer_id) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); struct layer_par *par = NULL; rk_screen *screen = dev_drv->cur_screen; unsigned long flags; int timeout; if(!screen) { printk(KERN_ERR "screen is null!\n"); return -ENOENT; } if(layer_id==0) { par = dev_drv->layer_par[0]; win0_display(lcdc_dev,par); } else if(layer_id==1) { par = dev_drv->layer_par[1]; win1_display(lcdc_dev,par); } if((dev_drv->first_frame)) //this is the first frame of the system ,enable frame start interrupt { dev_drv->first_frame = 0; LcdMskReg(lcdc_dev,INT_STATUS,m_FRM_STARTCLEAR | m_FRM_STARTMASK , v_FRM_STARTCLEAR(1) | v_FRM_STARTMASK(0)); LCDC_REG_CFG_DONE(); // write any value to REG_CFG_DONE let config become effective } if(dev_drv->num_buf < 3) //3buffer ,no need to wait for sysn { spin_lock_irqsave(&dev_drv->cpl_lock,flags); init_completion(&dev_drv->frame_done); spin_unlock_irqrestore(&dev_drv->cpl_lock,flags); timeout = wait_for_completion_timeout(&dev_drv->frame_done,msecs_to_jiffies(dev_drv->cur_screen->ft+5)); if(!timeout&&(!dev_drv->frame_done.done)) { printk(KERN_ERR "wait for new frame start time out!\n"); return -ETIMEDOUT; } } return 0; } int rk3066b_lcdc_ioctl(struct rk_lcdc_device_driver * dev_drv,unsigned int cmd, unsigned long arg,int layer_id) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); u32 panel_size[2]; void __user *argp = (void __user *)arg; int ret = 0; switch(cmd) { case FBIOGET_PANEL_SIZE: //get panel size panel_size[0] = lcdc_dev->screen->x_res; panel_size[1] = lcdc_dev->screen->y_res; if(copy_to_user(argp, panel_size, 8)) return -EFAULT; break; default: break; } return ret; } static int rk3066b_lcdc_get_layer_state(struct rk_lcdc_device_driver *dev_drv,int layer_id) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); struct layer_par *par = dev_drv->layer_par[layer_id]; spin_lock(&lcdc_dev->reg_lock); if(lcdc_dev->clk_on) { if(layer_id == 0) { par->state = LcdReadBit(lcdc_dev,SYS_CFG,m_W0_EN); } else if( layer_id == 1) { par->state = LcdReadBit(lcdc_dev,SYS_CFG,m_W1_EN); } } spin_unlock(&lcdc_dev->reg_lock); return par->state; } /*********************************** overlay manager swap:1 win0 on the top of win1 0 win1 on the top of win0 set : 1 set overlay 0 get overlay state ************************************/ static int rk3066b_lcdc_ovl_mgr(struct rk_lcdc_device_driver *dev_drv,int swap,bool set) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); int ovl; spin_lock(&lcdc_dev->reg_lock); if(lcdc_dev->clk_on) { if(set) //set overlay { LcdMskReg(lcdc_dev,DSP_CTRL0,m_W0W1_POSITION_SWAP,v_W0W1_POSITION_SWAP(swap)); LcdWrReg(lcdc_dev, REG_CFG_DONE, 0x01); LCDC_REG_CFG_DONE(); ovl = swap; } else //get overlay { ovl = LcdReadBit(lcdc_dev,DSP_CTRL0,m_W0W1_POSITION_SWAP); } } else { ovl = -EPERM; } spin_unlock(&lcdc_dev->reg_lock); return ovl; } static int rk3066b_lcdc_get_disp_info(struct rk_lcdc_device_driver *dev_drv,int layer_id) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); return 0; } /******************************************* lcdc fps manager,set or get lcdc fps set:0 get 1 set ********************************************/ static int rk3066b_lcdc_fps_mgr(struct rk_lcdc_device_driver *dev_drv,int fps,bool set) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); rk_screen * screen = dev_drv->cur_screen; u64 ft = 0; u32 dotclk; int ret; if(set) { ft = div_u64(1000000000000llu,fps); dev_drv->pixclock = div_u64(ft,(screen->upper_margin + screen->lower_margin + screen->y_res +screen->vsync_len)* (screen->left_margin + screen->right_margin + screen->x_res + screen->hsync_len)); dotclk = div_u64(1000000000000llu,dev_drv->pixclock); ret = clk_set_rate(lcdc_dev->dclk, dotclk); if(ret) { printk(KERN_ERR ">>>>>> set lcdc%d dclk failed\n",lcdc_dev->id); } dev_drv->pixclock = lcdc_dev->pixclock = div_u64(1000000000000llu, clk_get_rate(lcdc_dev->dclk)); } ft = (u64)(screen->upper_margin + screen->lower_margin + screen->y_res +screen->vsync_len)* (screen->left_margin + screen->right_margin + screen->x_res + screen->hsync_len)* (dev_drv->pixclock); // one frame time ,(pico seconds) fps = div64_u64(1000000000000llu,ft); screen->ft = 1000/fps ; //one frame time in ms return fps; } static int rk3066b_fb_layer_remap(struct rk_lcdc_device_driver *dev_drv, enum fb_win_map_order order) { mutex_lock(&dev_drv->fb_win_id_mutex); if(order == FB_DEFAULT_ORDER) { order = FB0_WIN0_FB1_WIN1_FB2_WIN2; } dev_drv->fb2_win_id = order/100; dev_drv->fb1_win_id = (order/10)%10; dev_drv->fb0_win_id = order%10; mutex_unlock(&dev_drv->fb_win_id_mutex); printk("fb0:win%d\nfb1:win%d\nfb2:win%d\n",dev_drv->fb0_win_id,dev_drv->fb1_win_id, dev_drv->fb2_win_id); return 0; } static int rk3066b_fb_get_layer(struct rk_lcdc_device_driver *dev_drv,const char *id) { int layer_id = 0; mutex_lock(&dev_drv->fb_win_id_mutex); if(!strcmp(id,"fb0") || !strcmp(id,"fb2")) { layer_id = dev_drv->fb0_win_id; } else if(!strcmp(id,"fb1") || !strcmp(id,"fb3")) { layer_id = dev_drv->fb1_win_id; } else { printk(KERN_ERR "%s>>un supported %s\n",__func__,id); layer_id = -1; } mutex_unlock(&dev_drv->fb_win_id_mutex); return layer_id; } int rk3066b_lcdc_early_suspend(struct rk_lcdc_device_driver *dev_drv) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); spin_lock(&lcdc_dev->reg_lock); if(likely(lcdc_dev->clk_on)) { lcdc_dev->clk_on = 0; LcdMskReg(lcdc_dev, INT_STATUS, m_FRM_STARTCLEAR, v_FRM_STARTCLEAR(1)); LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(1)); LCDC_REG_CFG_DONE(); spin_unlock(&lcdc_dev->reg_lock); } else //clk already disabled { spin_unlock(&lcdc_dev->reg_lock); return 0; } mdelay(1); clk_disable(lcdc_dev->dclk); clk_disable(lcdc_dev->hclk); clk_disable(lcdc_dev->aclk); clk_disable(lcdc_dev->pd); return 0; } int rk3066b_lcdc_early_resume(struct rk_lcdc_device_driver *dev_drv) { struct rk3066b_lcdc_device *lcdc_dev = container_of(dev_drv,struct rk3066b_lcdc_device,driver); if(!lcdc_dev->clk_on) { clk_enable(lcdc_dev->pd); clk_enable(lcdc_dev->hclk); clk_enable(lcdc_dev->dclk); clk_enable(lcdc_dev->aclk); } memcpy((u8*)lcdc_dev->preg, (u8*)&lcdc_dev->regbak, 0xc4); //resume reg spin_lock(&lcdc_dev->reg_lock); if(lcdc_dev->atv_layer_cnt) { LcdMskReg(lcdc_dev, SYS_CFG,m_LCDC_STANDBY,v_LCDC_STANDBY(0)); LCDC_REG_CFG_DONE(); } lcdc_dev->clk_on = 1; spin_unlock(&lcdc_dev->reg_lock); return 0; } static irqreturn_t rk3066b_lcdc_isr(int irq, void *dev_id) { struct rk3066b_lcdc_device *lcdc_dev = (struct rk3066b_lcdc_device *)dev_id; LcdMskReg(lcdc_dev, INT_STATUS, m_FRM_STARTCLEAR, v_FRM_STARTCLEAR(1)); LCDC_REG_CFG_DONE(); //LcdMskReg(lcdc_dev, INT_STATUS, m_LINE_FLAG_INT_CLEAR, v_LINE_FLAG_INT_CLEAR(1)); if(lcdc_dev->driver.num_buf < 3) //three buffer ,no need to wait for sync { spin_lock(&(lcdc_dev->driver.cpl_lock)); complete(&(lcdc_dev->driver.frame_done)); spin_unlock(&(lcdc_dev->driver.cpl_lock)); } return IRQ_HANDLED; } static struct layer_par lcdc_layer[] = { [0] = { .name = "win0", .id = 0, .support_3d = true, }, [1] = { .name = "win1", .id = 1, .support_3d = false, }, }; static struct rk_lcdc_device_driver lcdc_driver = { .name = "lcdc", .def_layer_par = lcdc_layer, .num_layer = ARRAY_SIZE(lcdc_layer), .open = rk3066b_lcdc_open, .init_lcdc = init_rk3066b_lcdc, .ioctl = rk3066b_lcdc_ioctl, .suspend = rk3066b_lcdc_early_suspend, .resume = rk3066b_lcdc_early_resume, .set_par = rk3066b_lcdc_set_par, .blank = rk3066b_lcdc_blank, .pan_display = rk3066b_lcdc_pan_display, .load_screen = rk3066b_load_screen, .get_layer_state = rk3066b_lcdc_get_layer_state, .ovl_mgr = rk3066b_lcdc_ovl_mgr, .get_disp_info = rk3066b_lcdc_get_disp_info, .fps_mgr = rk3066b_lcdc_fps_mgr, .fb_get_layer = rk3066b_fb_get_layer, .fb_layer_remap = rk3066b_fb_layer_remap, }; #ifdef CONFIG_PM static int rk3066b_lcdc_suspend(struct platform_device *pdev, pm_message_t state) { return 0; } static int rk3066b_lcdc_resume(struct platform_device *pdev) { return 0; } #else #define rk3066b_lcdc_suspend NULL #define rk3066b_lcdc_resume NULL #endif static int __devinit rk3066b_lcdc_probe (struct platform_device *pdev) { struct rk3066b_lcdc_device *lcdc_dev=NULL; rk_screen *screen; rk_screen *screen1; struct rk29fb_info *screen_ctr_info; struct resource *res = NULL; struct resource *mem; int ret = 0; /*************Malloc rk3066blcdc_inf and set it to pdev for drvdata**********/ lcdc_dev = kzalloc(sizeof(struct rk3066b_lcdc_device), GFP_KERNEL); if(!lcdc_dev) { dev_err(&pdev->dev, ">>rk3066b lcdc device kmalloc fail!"); return -ENOMEM; } platform_set_drvdata(pdev, lcdc_dev); lcdc_dev->id = pdev->id; screen_ctr_info = (struct rk29fb_info * )pdev->dev.platform_data; screen = kzalloc(sizeof(rk_screen), GFP_KERNEL); if(!screen) { dev_err(&pdev->dev, ">>rk3066b lcdc screen kmalloc fail!"); ret = -ENOMEM; goto err0; } else { lcdc_dev->screen = screen; } screen->lcdc_id = lcdc_dev->id; screen->screen_id = 0; #if defined(CONFIG_ONE_LCDC_DUAL_OUTPUT_INF)&& defined(CONFIG_RK610_LVDS) screen1 = kzalloc(sizeof(rk_screen), GFP_KERNEL); if(!screen1) { dev_err(&pdev->dev, ">>rk3066b lcdc screen1 kmalloc fail!"); ret = -ENOMEM; goto err0; } screen1->lcdc_id = 1; screen1->screen_id = 1; printk("use lcdc%d and rk610 implemention dual display!\n",lcdc_dev->id); #endif /****************get lcdc0 reg *************************/ res = platform_get_resource(pdev, IORESOURCE_MEM,0); if (res == NULL) { dev_err(&pdev->dev, "failed to get io resource for lcdc%d \n",lcdc_dev->id); ret = -ENOENT; goto err1; } lcdc_dev->reg_phy_base = res->start; lcdc_dev->len = resource_size(res); mem = request_mem_region(lcdc_dev->reg_phy_base, resource_size(res), pdev->name); if (mem == NULL) { dev_err(&pdev->dev, "failed to request mem region for lcdc%d\n",lcdc_dev->id); ret = -ENOENT; goto err1; } lcdc_dev->reg_vir_base = ioremap(lcdc_dev->reg_phy_base, resource_size(res)); if (lcdc_dev->reg_vir_base == NULL) { dev_err(&pdev->dev, "cannot map IO\n"); ret = -ENXIO; goto err2; } lcdc_dev->preg = (LCDC_REG*)lcdc_dev->reg_vir_base; printk("lcdc%d:reg_phy_base = 0x%08x,reg_vir_base:0x%p\n",pdev->id,lcdc_dev->reg_phy_base, lcdc_dev->preg); lcdc_dev->driver.dev=&pdev->dev; lcdc_dev->driver.screen0 = screen; #if defined(CONFIG_ONE_LCDC_DUAL_OUTPUT_INF)&& defined(CONFIG_RK610_LVDS) lcdc_dev->driver.screen1 = screen1; #endif lcdc_dev->driver.cur_screen = screen; lcdc_dev->driver.screen_ctr_info = screen_ctr_info; spin_lock_init(&lcdc_dev->reg_lock); lcdc_dev->irq = platform_get_irq(pdev, 0); if(lcdc_dev->irq < 0) { dev_err(&pdev->dev, "cannot find IRQ\n"); goto err3; } ret = request_irq(lcdc_dev->irq, rk3066b_lcdc_isr, IRQF_DISABLED,dev_name(&pdev->dev),lcdc_dev); if (ret) { dev_err(&pdev->dev, "cannot requeset irq %d - err %d\n", lcdc_dev->irq, ret); ret = -EBUSY; goto err3; } ret = rk_fb_register(&(lcdc_dev->driver),&lcdc_driver,lcdc_dev->id); if(ret < 0) { printk(KERN_ERR "register fb for lcdc%d failed!\n",lcdc_dev->id); goto err4; } printk("rk3066b lcdc%d probe ok!\n",lcdc_dev->id); return 0; err4: free_irq(lcdc_dev->irq,lcdc_dev); err3: iounmap(lcdc_dev->reg_vir_base); err2: release_mem_region(lcdc_dev->reg_phy_base,resource_size(res)); err1: kfree(screen); err0: platform_set_drvdata(pdev, NULL); kfree(lcdc_dev); return ret; } static int __devexit rk3066b_lcdc_remove(struct platform_device *pdev) { struct rk3066b_lcdc_device *lcdc_dev = platform_get_drvdata(pdev); rk_fb_unregister(&(lcdc_dev->driver)); rk3066b_lcdc_deinit(lcdc_dev); iounmap(lcdc_dev->reg_vir_base); release_mem_region(lcdc_dev->reg_phy_base,lcdc_dev->len); kfree(lcdc_dev->screen); kfree(lcdc_dev); return 0; } static void rk3066b_lcdc_shutdown(struct platform_device *pdev) { struct rk3066b_lcdc_device *lcdc_dev = platform_get_drvdata(pdev); if(lcdc_dev->driver.cur_screen->standby) //standby the screen if necessary lcdc_dev->driver.cur_screen->standby(1); if(lcdc_dev->driver.screen_ctr_info->io_disable) //power off the screen if necessary lcdc_dev->driver.screen_ctr_info->io_disable(); if(lcdc_dev->driver.cur_screen->sscreen_set) //turn off lvds if necessary lcdc_dev->driver.cur_screen->sscreen_set(lcdc_dev->driver.cur_screen , 0); rk_fb_unregister(&(lcdc_dev->driver)); rk3066b_lcdc_deinit(lcdc_dev); /*iounmap(lcdc_dev->reg_vir_base); release_mem_region(lcdc_dev->reg_phy_base,lcdc_dev->len); kfree(lcdc_dev->screen); kfree(lcdc_dev);*/ } static struct platform_driver rk3066b_lcdc_driver = { .probe = rk3066b_lcdc_probe, .remove = __devexit_p(rk3066b_lcdc_remove), .driver = { .name = "rk30-lcdc", .owner = THIS_MODULE, }, .suspend = rk3066b_lcdc_suspend, .resume = rk3066b_lcdc_resume, .shutdown = rk3066b_lcdc_shutdown, }; static int __init rk3066b_lcdc_init(void) { return platform_driver_register(&rk3066b_lcdc_driver); } static void __exit rk3066b_lcdc_exit(void) { platform_driver_unregister(&rk3066b_lcdc_driver); } fs_initcall(rk3066b_lcdc_init); module_exit(rk3066b_lcdc_exit);
gpl-2.0
CreativeCimmons/ORB-SLAM-Android-app
slam_ext/Thirdparty/g2o/g2o/core/parameter_container.cpp
16
4459
// g2o - General Graph Optimization // Copyright (C) 2011 R. Kuemmerle, G. Grisetti, W. Burgard // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS // IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A // PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #include "parameter_container.h" #include <iostream> #include "factory.h" #include "parameter.h" #include "../stuff/macros.h" #include "../stuff/color_macros.h" #include "../stuff/string_tools.h" namespace g2o { using namespace std; ParameterContainer::ParameterContainer(bool isMainStorage_) : _isMainStorage(isMainStorage_) { } void ParameterContainer::clear() { if (!_isMainStorage) return; for (iterator it = begin(); it!=end(); it++){ delete it->second; } BaseClass::clear(); } ParameterContainer::~ParameterContainer(){ clear(); } bool ParameterContainer::addParameter(Parameter* p){ if (p->id()<0) return false; iterator it=find(p->id()); if (it!=end()) return false; insert(make_pair(p->id(), p)); return true; } Parameter* ParameterContainer::getParameter(int id) { iterator it=find(id); if (it==end()) return 0; return it->second; } Parameter* ParameterContainer::detachParameter(int id){ iterator it=find(id); if (it==end()) return 0; Parameter* p=it->second; erase(it); return p; } bool ParameterContainer::write(std::ostream& os) const{ Factory* factory = Factory::instance(); for (const_iterator it=begin(); it!=end(); it++){ os << factory->tag(it->second) << " "; os << it->second->id() << " "; it->second->write(os); os << endl; } return true; } bool ParameterContainer::read(std::istream& is, const std::map<std::string, std::string>* _renamedTypesLookup){ stringstream currentLine; string token; Factory* factory = Factory::instance(); HyperGraph::GraphElemBitset elemBitset; elemBitset[HyperGraph::HGET_PARAMETER] = 1; while (1) { int bytesRead = readLine(is, currentLine); if (bytesRead == -1) break; currentLine >> token; if (bytesRead == 0 || token.size() == 0 || token[0] == '#') continue; if (_renamedTypesLookup && _renamedTypesLookup->size()>0){ map<string, string>::const_iterator foundIt = _renamedTypesLookup->find(token); if (foundIt != _renamedTypesLookup->end()) { token = foundIt->second; } } HyperGraph::HyperGraphElement* element = factory->construct(token, elemBitset); if (! element) // not a parameter or otherwise unknown tag continue; assert(element->elementType() == HyperGraph::HGET_PARAMETER && "Should be a param"); Parameter* p = static_cast<Parameter*>(element); int pid; currentLine >> pid; p->setId(pid); bool r = p->read(currentLine); if (! r) { cerr << __PRETTY_FUNCTION__ << ": Error reading data " << token << " for parameter " << pid << endl; delete p; } else { if (! addParameter(p) ){ cerr << __PRETTY_FUNCTION__ << ": Parameter of type:" << token << " id:" << pid << " already defined" << endl; } } } // while read line return true; } } // end namespace
gpl-2.0
monokoo/lede-source
target/linux/ramips/files-4.9/drivers/net/ethernet/mtk/mtk_eth_soc.c
16
39459
/* This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * Copyright (C) 2009-2015 John Crispin <blogic@openwrt.org> * Copyright (C) 2009-2015 Felix Fietkau <nbd@nbd.name> * Copyright (C) 2013-2015 Michael Lee <igvtee@gmail.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/of_device.h> #include <linux/clk.h> #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/if_vlan.h> #include <linux/reset.h> #include <linux/tcp.h> #include <linux/io.h> #include <linux/bug.h> #include <asm/mach-ralink/ralink_regs.h> #include "mtk_eth_soc.h" #include "mdio.h" #include "ethtool.h" #define MAX_RX_LENGTH 1536 #define FE_RX_ETH_HLEN (VLAN_ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) #define FE_RX_HLEN (NET_SKB_PAD + FE_RX_ETH_HLEN + NET_IP_ALIGN) #define DMA_DUMMY_DESC 0xffffffff #define FE_DEFAULT_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) #define TX_DMA_DESP2_DEF (TX_DMA_LS0 | TX_DMA_DONE) #define TX_DMA_DESP4_DEF (TX_DMA_QN(3) | TX_DMA_PN(1)) #define NEXT_TX_DESP_IDX(X) (((X) + 1) & (ring->tx_ring_size - 1)) #define NEXT_RX_DESP_IDX(X) (((X) + 1) & (ring->rx_ring_size - 1)) #define SYSC_REG_RSTCTRL 0x34 static int fe_msg_level = -1; module_param_named(msg_level, fe_msg_level, int, 0); MODULE_PARM_DESC(msg_level, "Message level (-1=defaults,0=none,...,16=all)"); static const u16 fe_reg_table_default[FE_REG_COUNT] = { [FE_REG_PDMA_GLO_CFG] = FE_PDMA_GLO_CFG, [FE_REG_PDMA_RST_CFG] = FE_PDMA_RST_CFG, [FE_REG_DLY_INT_CFG] = FE_DLY_INT_CFG, [FE_REG_TX_BASE_PTR0] = FE_TX_BASE_PTR0, [FE_REG_TX_MAX_CNT0] = FE_TX_MAX_CNT0, [FE_REG_TX_CTX_IDX0] = FE_TX_CTX_IDX0, [FE_REG_TX_DTX_IDX0] = FE_TX_DTX_IDX0, [FE_REG_RX_BASE_PTR0] = FE_RX_BASE_PTR0, [FE_REG_RX_MAX_CNT0] = FE_RX_MAX_CNT0, [FE_REG_RX_CALC_IDX0] = FE_RX_CALC_IDX0, [FE_REG_RX_DRX_IDX0] = FE_RX_DRX_IDX0, [FE_REG_FE_INT_ENABLE] = FE_FE_INT_ENABLE, [FE_REG_FE_INT_STATUS] = FE_FE_INT_STATUS, [FE_REG_FE_DMA_VID_BASE] = FE_DMA_VID0, [FE_REG_FE_COUNTER_BASE] = FE_GDMA1_TX_GBCNT, [FE_REG_FE_RST_GL] = FE_FE_RST_GL, }; static const u16 *fe_reg_table = fe_reg_table_default; struct fe_work_t { int bitnr; void (*action)(struct fe_priv *); }; static void __iomem *fe_base; void fe_w32(u32 val, unsigned reg) { __raw_writel(val, fe_base + reg); } u32 fe_r32(unsigned reg) { return __raw_readl(fe_base + reg); } void fe_reg_w32(u32 val, enum fe_reg reg) { fe_w32(val, fe_reg_table[reg]); } u32 fe_reg_r32(enum fe_reg reg) { return fe_r32(fe_reg_table[reg]); } void fe_reset(u32 reset_bits) { u32 t; t = rt_sysc_r32(SYSC_REG_RSTCTRL); t |= reset_bits; rt_sysc_w32(t, SYSC_REG_RSTCTRL); usleep_range(10, 20); t &= ~reset_bits; rt_sysc_w32(t, SYSC_REG_RSTCTRL); usleep_range(10, 20); } static inline void fe_int_disable(u32 mask) { fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) & ~mask, FE_REG_FE_INT_ENABLE); /* flush write */ fe_reg_r32(FE_REG_FE_INT_ENABLE); } static inline void fe_int_enable(u32 mask) { fe_reg_w32(fe_reg_r32(FE_REG_FE_INT_ENABLE) | mask, FE_REG_FE_INT_ENABLE); /* flush write */ fe_reg_r32(FE_REG_FE_INT_ENABLE); } static inline void fe_hw_set_macaddr(struct fe_priv *priv, unsigned char *mac) { unsigned long flags; spin_lock_irqsave(&priv->page_lock, flags); fe_w32((mac[0] << 8) | mac[1], FE_GDMA1_MAC_ADRH); fe_w32((mac[2] << 24) | (mac[3] << 16) | (mac[4] << 8) | mac[5], FE_GDMA1_MAC_ADRL); spin_unlock_irqrestore(&priv->page_lock, flags); } static int fe_set_mac_address(struct net_device *dev, void *p) { int ret = eth_mac_addr(dev, p); if (!ret) { struct fe_priv *priv = netdev_priv(dev); if (priv->soc->set_mac) priv->soc->set_mac(priv, dev->dev_addr); else fe_hw_set_macaddr(priv, p); } return ret; } static inline int fe_max_frag_size(int mtu) { /* make sure buf_size will be at least MAX_RX_LENGTH */ if (mtu + FE_RX_ETH_HLEN < MAX_RX_LENGTH) mtu = MAX_RX_LENGTH - FE_RX_ETH_HLEN; return SKB_DATA_ALIGN(FE_RX_HLEN + mtu) + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); } static inline int fe_max_buf_size(int frag_size) { int buf_size = frag_size - NET_SKB_PAD - NET_IP_ALIGN - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); BUG_ON(buf_size < MAX_RX_LENGTH); return buf_size; } static inline void fe_get_rxd(struct fe_rx_dma *rxd, struct fe_rx_dma *dma_rxd) { rxd->rxd1 = dma_rxd->rxd1; rxd->rxd2 = dma_rxd->rxd2; rxd->rxd3 = dma_rxd->rxd3; rxd->rxd4 = dma_rxd->rxd4; } static inline void fe_set_txd(struct fe_tx_dma *txd, struct fe_tx_dma *dma_txd) { dma_txd->txd1 = txd->txd1; dma_txd->txd3 = txd->txd3; dma_txd->txd4 = txd->txd4; /* clean dma done flag last */ dma_txd->txd2 = txd->txd2; } static void fe_clean_rx(struct fe_priv *priv) { int i; struct fe_rx_ring *ring = &priv->rx_ring; if (ring->rx_data) { for (i = 0; i < ring->rx_ring_size; i++) if (ring->rx_data[i]) { if (ring->rx_dma && ring->rx_dma[i].rxd1) dma_unmap_single(&priv->netdev->dev, ring->rx_dma[i].rxd1, ring->rx_buf_size, DMA_FROM_DEVICE); put_page(virt_to_head_page(ring->rx_data[i])); } kfree(ring->rx_data); ring->rx_data = NULL; } if (ring->rx_dma) { dma_free_coherent(&priv->netdev->dev, ring->rx_ring_size * sizeof(*ring->rx_dma), ring->rx_dma, ring->rx_phys); ring->rx_dma = NULL; } } static int fe_alloc_rx(struct fe_priv *priv) { struct net_device *netdev = priv->netdev; struct fe_rx_ring *ring = &priv->rx_ring; int i, pad; ring->rx_data = kcalloc(ring->rx_ring_size, sizeof(*ring->rx_data), GFP_KERNEL); if (!ring->rx_data) goto no_rx_mem; for (i = 0; i < ring->rx_ring_size; i++) { ring->rx_data[i] = netdev_alloc_frag(ring->frag_size); if (!ring->rx_data[i]) goto no_rx_mem; } ring->rx_dma = dma_alloc_coherent(&netdev->dev, ring->rx_ring_size * sizeof(*ring->rx_dma), &ring->rx_phys, GFP_ATOMIC | __GFP_ZERO); if (!ring->rx_dma) goto no_rx_mem; if (priv->flags & FE_FLAG_RX_2B_OFFSET) pad = 0; else pad = NET_IP_ALIGN; for (i = 0; i < ring->rx_ring_size; i++) { dma_addr_t dma_addr = dma_map_single(&netdev->dev, ring->rx_data[i] + NET_SKB_PAD + pad, ring->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) goto no_rx_mem; ring->rx_dma[i].rxd1 = (unsigned int)dma_addr; if (priv->flags & FE_FLAG_RX_SG_DMA) ring->rx_dma[i].rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); else ring->rx_dma[i].rxd2 = RX_DMA_LSO; } ring->rx_calc_idx = ring->rx_ring_size - 1; /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); fe_reg_w32(ring->rx_phys, FE_REG_RX_BASE_PTR0); fe_reg_w32(ring->rx_ring_size, FE_REG_RX_MAX_CNT0); fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); fe_reg_w32(FE_PST_DRX_IDX0, FE_REG_PDMA_RST_CFG); return 0; no_rx_mem: return -ENOMEM; } static void fe_txd_unmap(struct device *dev, struct fe_tx_buf *tx_buf) { if (tx_buf->flags & FE_TX_FLAGS_SINGLE0) { dma_unmap_single(dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } else if (tx_buf->flags & FE_TX_FLAGS_PAGE0) { dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma_addr0), dma_unmap_len(tx_buf, dma_len0), DMA_TO_DEVICE); } if (tx_buf->flags & FE_TX_FLAGS_PAGE1) dma_unmap_page(dev, dma_unmap_addr(tx_buf, dma_addr1), dma_unmap_len(tx_buf, dma_len1), DMA_TO_DEVICE); tx_buf->flags = 0; if (tx_buf->skb && (tx_buf->skb != (struct sk_buff *)DMA_DUMMY_DESC)) dev_kfree_skb_any(tx_buf->skb); tx_buf->skb = NULL; } static void fe_clean_tx(struct fe_priv *priv) { int i; struct device *dev = &priv->netdev->dev; struct fe_tx_ring *ring = &priv->tx_ring; if (ring->tx_buf) { for (i = 0; i < ring->tx_ring_size; i++) fe_txd_unmap(dev, &ring->tx_buf[i]); kfree(ring->tx_buf); ring->tx_buf = NULL; } if (ring->tx_dma) { dma_free_coherent(dev, ring->tx_ring_size * sizeof(*ring->tx_dma), ring->tx_dma, ring->tx_phys); ring->tx_dma = NULL; } netdev_reset_queue(priv->netdev); } static int fe_alloc_tx(struct fe_priv *priv) { int i; struct fe_tx_ring *ring = &priv->tx_ring; ring->tx_free_idx = 0; ring->tx_next_idx = 0; ring->tx_thresh = max((unsigned long)ring->tx_ring_size >> 2, MAX_SKB_FRAGS); ring->tx_buf = kcalloc(ring->tx_ring_size, sizeof(*ring->tx_buf), GFP_KERNEL); if (!ring->tx_buf) goto no_tx_mem; ring->tx_dma = dma_alloc_coherent(&priv->netdev->dev, ring->tx_ring_size * sizeof(*ring->tx_dma), &ring->tx_phys, GFP_ATOMIC | __GFP_ZERO); if (!ring->tx_dma) goto no_tx_mem; for (i = 0; i < ring->tx_ring_size; i++) { if (priv->soc->tx_dma) priv->soc->tx_dma(&ring->tx_dma[i]); ring->tx_dma[i].txd2 = TX_DMA_DESP2_DEF; } /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); fe_reg_w32(ring->tx_phys, FE_REG_TX_BASE_PTR0); fe_reg_w32(ring->tx_ring_size, FE_REG_TX_MAX_CNT0); fe_reg_w32(0, FE_REG_TX_CTX_IDX0); fe_reg_w32(FE_PST_DTX_IDX0, FE_REG_PDMA_RST_CFG); return 0; no_tx_mem: return -ENOMEM; } static int fe_init_dma(struct fe_priv *priv) { int err; err = fe_alloc_tx(priv); if (err) return err; err = fe_alloc_rx(priv); if (err) return err; return 0; } static void fe_free_dma(struct fe_priv *priv) { fe_clean_tx(priv); fe_clean_rx(priv); } void fe_stats_update(struct fe_priv *priv) { struct fe_hw_stats *hwstats = priv->hw_stats; unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; u64 stats; u64_stats_update_begin(&hwstats->syncp); if (IS_ENABLED(CONFIG_SOC_MT7621)) { hwstats->rx_bytes += fe_r32(base); stats = fe_r32(base + 0x04); if (stats) hwstats->rx_bytes += (stats << 32); hwstats->rx_packets += fe_r32(base + 0x08); hwstats->rx_overflow += fe_r32(base + 0x10); hwstats->rx_fcs_errors += fe_r32(base + 0x14); hwstats->rx_short_errors += fe_r32(base + 0x18); hwstats->rx_long_errors += fe_r32(base + 0x1c); hwstats->rx_checksum_errors += fe_r32(base + 0x20); hwstats->rx_flow_control_packets += fe_r32(base + 0x24); hwstats->tx_skip += fe_r32(base + 0x28); hwstats->tx_collisions += fe_r32(base + 0x2c); hwstats->tx_bytes += fe_r32(base + 0x30); stats = fe_r32(base + 0x34); if (stats) hwstats->tx_bytes += (stats << 32); hwstats->tx_packets += fe_r32(base + 0x38); } else { hwstats->tx_bytes += fe_r32(base); hwstats->tx_packets += fe_r32(base + 0x04); hwstats->tx_skip += fe_r32(base + 0x08); hwstats->tx_collisions += fe_r32(base + 0x0c); hwstats->rx_bytes += fe_r32(base + 0x20); hwstats->rx_packets += fe_r32(base + 0x24); hwstats->rx_overflow += fe_r32(base + 0x28); hwstats->rx_fcs_errors += fe_r32(base + 0x2c); hwstats->rx_short_errors += fe_r32(base + 0x30); hwstats->rx_long_errors += fe_r32(base + 0x34); hwstats->rx_checksum_errors += fe_r32(base + 0x38); hwstats->rx_flow_control_packets += fe_r32(base + 0x3c); } u64_stats_update_end(&hwstats->syncp); } static struct rtnl_link_stats64 *fe_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) { struct fe_priv *priv = netdev_priv(dev); struct fe_hw_stats *hwstats = priv->hw_stats; unsigned int base = fe_reg_table[FE_REG_FE_COUNTER_BASE]; unsigned int start; if (!base) { netdev_stats_to_stats64(storage, &dev->stats); return storage; } if (netif_running(dev) && netif_device_present(dev)) { if (spin_trylock(&hwstats->stats_lock)) { fe_stats_update(priv); spin_unlock(&hwstats->stats_lock); } } do { start = u64_stats_fetch_begin_irq(&hwstats->syncp); storage->rx_packets = hwstats->rx_packets; storage->tx_packets = hwstats->tx_packets; storage->rx_bytes = hwstats->rx_bytes; storage->tx_bytes = hwstats->tx_bytes; storage->collisions = hwstats->tx_collisions; storage->rx_length_errors = hwstats->rx_short_errors + hwstats->rx_long_errors; storage->rx_over_errors = hwstats->rx_overflow; storage->rx_crc_errors = hwstats->rx_fcs_errors; storage->rx_errors = hwstats->rx_checksum_errors; storage->tx_aborted_errors = hwstats->tx_skip; } while (u64_stats_fetch_retry_irq(&hwstats->syncp, start)); storage->tx_errors = priv->netdev->stats.tx_errors; storage->rx_dropped = priv->netdev->stats.rx_dropped; storage->tx_dropped = priv->netdev->stats.tx_dropped; return storage; } static int fe_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid) { struct fe_priv *priv = netdev_priv(dev); u32 idx = (vid & 0xf); u32 vlan_cfg; if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) return 0; if (test_bit(idx, &priv->vlan_map)) { netdev_warn(dev, "disable tx vlan offload\n"); dev->wanted_features &= ~NETIF_F_HW_VLAN_CTAG_TX; netdev_update_features(dev); } else { vlan_cfg = fe_r32(fe_reg_table[FE_REG_FE_DMA_VID_BASE] + ((idx >> 1) << 2)); if (idx & 0x1) { vlan_cfg &= 0xffff; vlan_cfg |= (vid << 16); } else { vlan_cfg &= 0xffff0000; vlan_cfg |= vid; } fe_w32(vlan_cfg, fe_reg_table[FE_REG_FE_DMA_VID_BASE] + ((idx >> 1) << 2)); set_bit(idx, &priv->vlan_map); } return 0; } static int fe_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid) { struct fe_priv *priv = netdev_priv(dev); u32 idx = (vid & 0xf); if (!((fe_reg_table[FE_REG_FE_DMA_VID_BASE]) && (dev->features & NETIF_F_HW_VLAN_CTAG_TX))) return 0; clear_bit(idx, &priv->vlan_map); return 0; } static inline u32 fe_empty_txd(struct fe_tx_ring *ring) { barrier(); return (u32)(ring->tx_ring_size - ((ring->tx_next_idx - ring->tx_free_idx) & (ring->tx_ring_size - 1))); } static int fe_tx_map_dma(struct sk_buff *skb, struct net_device *dev, int tx_num, struct fe_tx_ring *ring) { struct fe_priv *priv = netdev_priv(dev); struct skb_frag_struct *frag; struct fe_tx_dma txd, *ptxd; struct fe_tx_buf *tx_buf; dma_addr_t mapped_addr; unsigned int nr_frags; u32 def_txd4; int i, j, k, frag_size, frag_map_size, offset; tx_buf = &ring->tx_buf[ring->tx_next_idx]; memset(tx_buf, 0, sizeof(*tx_buf)); memset(&txd, 0, sizeof(txd)); nr_frags = skb_shinfo(skb)->nr_frags; /* init tx descriptor */ if (priv->soc->tx_dma) priv->soc->tx_dma(&txd); else txd.txd4 = TX_DMA_DESP4_DEF; def_txd4 = txd.txd4; /* TX Checksum offload */ if (skb->ip_summed == CHECKSUM_PARTIAL) txd.txd4 |= TX_DMA_CHKSUM; /* VLAN header offload */ if (skb_vlan_tag_present(skb)) { u16 tag = skb_vlan_tag_get(skb); if (IS_ENABLED(CONFIG_SOC_MT7621)) txd.txd4 |= TX_DMA_INS_VLAN_MT7621 | tag; else txd.txd4 |= TX_DMA_INS_VLAN | ((tag >> VLAN_PRIO_SHIFT) << 4) | (tag & 0xF); } /* TSO: fill MSS info in tcp checksum field */ if (skb_is_gso(skb)) { if (skb_cow_head(skb, 0)) { netif_warn(priv, tx_err, dev, "GSO expand head fail.\n"); goto err_out; } if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) { txd.txd4 |= TX_DMA_TSO; tcp_hdr(skb)->check = htons(skb_shinfo(skb)->gso_size); } } mapped_addr = dma_map_single(&dev->dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) goto err_out; txd.txd1 = mapped_addr; txd.txd2 = TX_DMA_PLEN0(skb_headlen(skb)); tx_buf->flags |= FE_TX_FLAGS_SINGLE0; dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); dma_unmap_len_set(tx_buf, dma_len0, skb_headlen(skb)); /* TX SG offload */ j = ring->tx_next_idx; k = 0; for (i = 0; i < nr_frags; i++) { offset = 0; frag = &skb_shinfo(skb)->frags[i]; frag_size = skb_frag_size(frag); while (frag_size > 0) { frag_map_size = min(frag_size, TX_DMA_BUF_LEN); mapped_addr = skb_frag_dma_map(&dev->dev, frag, offset, frag_map_size, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(&dev->dev, mapped_addr))) goto err_dma; if (k & 0x1) { j = NEXT_TX_DESP_IDX(j); txd.txd1 = mapped_addr; txd.txd2 = TX_DMA_PLEN0(frag_map_size); txd.txd4 = def_txd4; tx_buf = &ring->tx_buf[j]; memset(tx_buf, 0, sizeof(*tx_buf)); tx_buf->flags |= FE_TX_FLAGS_PAGE0; dma_unmap_addr_set(tx_buf, dma_addr0, mapped_addr); dma_unmap_len_set(tx_buf, dma_len0, frag_map_size); } else { txd.txd3 = mapped_addr; txd.txd2 |= TX_DMA_PLEN1(frag_map_size); tx_buf->skb = (struct sk_buff *)DMA_DUMMY_DESC; tx_buf->flags |= FE_TX_FLAGS_PAGE1; dma_unmap_addr_set(tx_buf, dma_addr1, mapped_addr); dma_unmap_len_set(tx_buf, dma_len1, frag_map_size); if (!((i == (nr_frags - 1)) && (frag_map_size == frag_size))) { fe_set_txd(&txd, &ring->tx_dma[j]); memset(&txd, 0, sizeof(txd)); } } frag_size -= frag_map_size; offset += frag_map_size; k++; } } /* set last segment */ if (k & 0x1) txd.txd2 |= TX_DMA_LS1; else txd.txd2 |= TX_DMA_LS0; fe_set_txd(&txd, &ring->tx_dma[j]); /* store skb to cleanup */ tx_buf->skb = skb; netdev_sent_queue(dev, skb->len); skb_tx_timestamp(skb); ring->tx_next_idx = NEXT_TX_DESP_IDX(j); /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); if (unlikely(fe_empty_txd(ring) <= ring->tx_thresh)) { netif_stop_queue(dev); smp_mb(); if (unlikely(fe_empty_txd(ring) > ring->tx_thresh)) netif_wake_queue(dev); } if (netif_xmit_stopped(netdev_get_tx_queue(dev, 0)) || !skb->xmit_more) fe_reg_w32(ring->tx_next_idx, FE_REG_TX_CTX_IDX0); return 0; err_dma: j = ring->tx_next_idx; for (i = 0; i < tx_num; i++) { ptxd = &ring->tx_dma[j]; tx_buf = &ring->tx_buf[j]; /* unmap dma */ fe_txd_unmap(&dev->dev, tx_buf); ptxd->txd2 = TX_DMA_DESP2_DEF; j = NEXT_TX_DESP_IDX(j); } /* make sure that all changes to the dma ring are flushed before we * continue */ wmb(); err_out: return -1; } static inline int fe_skb_padto(struct sk_buff *skb, struct fe_priv *priv) { unsigned int len; int ret; ret = 0; if (unlikely(skb->len < VLAN_ETH_ZLEN)) { if ((priv->flags & FE_FLAG_PADDING_64B) && !(priv->flags & FE_FLAG_PADDING_BUG)) return ret; if (skb_vlan_tag_present(skb)) len = ETH_ZLEN; else if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) len = VLAN_ETH_ZLEN; else if (!(priv->flags & FE_FLAG_PADDING_64B)) len = ETH_ZLEN; else return ret; if (skb->len < len) { ret = skb_pad(skb, len - skb->len); if (ret < 0) return ret; skb->len = len; skb_set_tail_pointer(skb, len); } } return ret; } static inline int fe_cal_txd_req(struct sk_buff *skb) { int i, nfrags; struct skb_frag_struct *frag; nfrags = 1; if (skb_is_gso(skb)) { for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { frag = &skb_shinfo(skb)->frags[i]; nfrags += DIV_ROUND_UP(frag->size, TX_DMA_BUF_LEN); } } else { nfrags += skb_shinfo(skb)->nr_frags; } return DIV_ROUND_UP(nfrags, 2); } static int fe_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); struct fe_tx_ring *ring = &priv->tx_ring; struct net_device_stats *stats = &dev->stats; int tx_num; int len = skb->len; if (fe_skb_padto(skb, priv)) { netif_warn(priv, tx_err, dev, "tx padding failed!\n"); return NETDEV_TX_OK; } tx_num = fe_cal_txd_req(skb); if (unlikely(fe_empty_txd(ring) <= tx_num)) { netif_stop_queue(dev); netif_err(priv, tx_queued, dev, "Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } if (fe_tx_map_dma(skb, dev, tx_num, ring) < 0) { stats->tx_dropped++; } else { stats->tx_packets++; stats->tx_bytes += len; } return NETDEV_TX_OK; } static int fe_poll_rx(struct napi_struct *napi, int budget, struct fe_priv *priv, u32 rx_intr) { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; struct fe_soc_data *soc = priv->soc; struct fe_rx_ring *ring = &priv->rx_ring; int idx = ring->rx_calc_idx; u32 checksum_bit; struct sk_buff *skb; u8 *data, *new_data; struct fe_rx_dma *rxd, trxd; int done = 0, pad; if (netdev->features & NETIF_F_RXCSUM) checksum_bit = soc->checksum_bit; else checksum_bit = 0; if (priv->flags & FE_FLAG_RX_2B_OFFSET) pad = 0; else pad = NET_IP_ALIGN; while (done < budget) { unsigned int pktlen; dma_addr_t dma_addr; idx = NEXT_RX_DESP_IDX(idx); rxd = &ring->rx_dma[idx]; data = ring->rx_data[idx]; fe_get_rxd(&trxd, rxd); if (!(trxd.rxd2 & RX_DMA_DONE)) break; /* alloc new buffer */ new_data = netdev_alloc_frag(ring->frag_size); if (unlikely(!new_data)) { stats->rx_dropped++; goto release_desc; } dma_addr = dma_map_single(&netdev->dev, new_data + NET_SKB_PAD + pad, ring->rx_buf_size, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(&netdev->dev, dma_addr))) { put_page(virt_to_head_page(new_data)); goto release_desc; } /* receive data */ skb = build_skb(data, ring->frag_size); if (unlikely(!skb)) { put_page(virt_to_head_page(new_data)); goto release_desc; } skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); dma_unmap_single(&netdev->dev, trxd.rxd1, ring->rx_buf_size, DMA_FROM_DEVICE); pktlen = RX_DMA_GET_PLEN0(trxd.rxd2); skb->dev = netdev; skb_put(skb, pktlen); if (trxd.rxd4 & checksum_bit) skb->ip_summed = CHECKSUM_UNNECESSARY; else skb_checksum_none_assert(skb); skb->protocol = eth_type_trans(skb, netdev); stats->rx_packets++; stats->rx_bytes += pktlen; napi_gro_receive(napi, skb); ring->rx_data[idx] = new_data; rxd->rxd1 = (unsigned int)dma_addr; release_desc: if (priv->flags & FE_FLAG_RX_SG_DMA) rxd->rxd2 = RX_DMA_PLEN0(ring->rx_buf_size); else rxd->rxd2 = RX_DMA_LSO; ring->rx_calc_idx = idx; /* make sure that all changes to the dma ring are flushed before * we continue */ wmb(); fe_reg_w32(ring->rx_calc_idx, FE_REG_RX_CALC_IDX0); done++; } if (done < budget) fe_reg_w32(rx_intr, FE_REG_FE_INT_STATUS); return done; } static int fe_poll_tx(struct fe_priv *priv, int budget, u32 tx_intr, int *tx_again) { struct net_device *netdev = priv->netdev; struct device *dev = &netdev->dev; unsigned int bytes_compl = 0; struct sk_buff *skb; struct fe_tx_buf *tx_buf; int done = 0; u32 idx, hwidx; struct fe_tx_ring *ring = &priv->tx_ring; idx = ring->tx_free_idx; hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); while ((idx != hwidx) && budget) { tx_buf = &ring->tx_buf[idx]; skb = tx_buf->skb; if (!skb) break; if (skb != (struct sk_buff *)DMA_DUMMY_DESC) { bytes_compl += skb->len; done++; budget--; } fe_txd_unmap(dev, tx_buf); idx = NEXT_TX_DESP_IDX(idx); } ring->tx_free_idx = idx; if (idx == hwidx) { /* read hw index again make sure no new tx packet */ hwidx = fe_reg_r32(FE_REG_TX_DTX_IDX0); if (idx == hwidx) fe_reg_w32(tx_intr, FE_REG_FE_INT_STATUS); else *tx_again = 1; } else { *tx_again = 1; } if (done) { netdev_completed_queue(netdev, done, bytes_compl); smp_mb(); if (unlikely(netif_queue_stopped(netdev) && (fe_empty_txd(ring) > ring->tx_thresh))) netif_wake_queue(netdev); } return done; } static int fe_poll(struct napi_struct *napi, int budget) { struct fe_priv *priv = container_of(napi, struct fe_priv, rx_napi); struct fe_hw_stats *hwstat = priv->hw_stats; int tx_done, rx_done, tx_again; u32 status, fe_status, status_reg, mask; u32 tx_intr, rx_intr, status_intr; status = fe_reg_r32(FE_REG_FE_INT_STATUS); fe_status = status; tx_intr = priv->soc->tx_int; rx_intr = priv->soc->rx_int; status_intr = priv->soc->status_int; tx_done = 0; rx_done = 0; tx_again = 0; if (fe_reg_table[FE_REG_FE_INT_STATUS2]) { fe_status = fe_reg_r32(FE_REG_FE_INT_STATUS2); status_reg = FE_REG_FE_INT_STATUS2; } else { status_reg = FE_REG_FE_INT_STATUS; } if (status & tx_intr) tx_done = fe_poll_tx(priv, budget, tx_intr, &tx_again); if (status & rx_intr) rx_done = fe_poll_rx(napi, budget, priv, rx_intr); if (unlikely(fe_status & status_intr)) { if (hwstat && spin_trylock(&hwstat->stats_lock)) { fe_stats_update(priv); spin_unlock(&hwstat->stats_lock); } fe_reg_w32(status_intr, status_reg); } if (unlikely(netif_msg_intr(priv))) { mask = fe_reg_r32(FE_REG_FE_INT_ENABLE); netdev_info(priv->netdev, "done tx %d, rx %d, intr 0x%08x/0x%x\n", tx_done, rx_done, status, mask); } if (!tx_again && (rx_done < budget)) { status = fe_reg_r32(FE_REG_FE_INT_STATUS); if (status & (tx_intr | rx_intr)) { /* let napi poll again */ rx_done = budget; goto poll_again; } napi_complete(napi); fe_int_enable(tx_intr | rx_intr); } else { rx_done = budget; } poll_again: return rx_done; } static void fe_tx_timeout(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); struct fe_tx_ring *ring = &priv->tx_ring; priv->netdev->stats.tx_errors++; netif_err(priv, tx_err, dev, "transmit timed out\n"); netif_info(priv, drv, dev, "dma_cfg:%08x\n", fe_reg_r32(FE_REG_PDMA_GLO_CFG)); netif_info(priv, drv, dev, "tx_ring=%d, " "base=%08x, max=%u, ctx=%u, dtx=%u, fdx=%hu, next=%hu\n", 0, fe_reg_r32(FE_REG_TX_BASE_PTR0), fe_reg_r32(FE_REG_TX_MAX_CNT0), fe_reg_r32(FE_REG_TX_CTX_IDX0), fe_reg_r32(FE_REG_TX_DTX_IDX0), ring->tx_free_idx, ring->tx_next_idx); netif_info(priv, drv, dev, "rx_ring=%d, base=%08x, max=%u, calc=%u, drx=%u\n", 0, fe_reg_r32(FE_REG_RX_BASE_PTR0), fe_reg_r32(FE_REG_RX_MAX_CNT0), fe_reg_r32(FE_REG_RX_CALC_IDX0), fe_reg_r32(FE_REG_RX_DRX_IDX0)); if (!test_and_set_bit(FE_FLAG_RESET_PENDING, priv->pending_flags)) schedule_work(&priv->pending_work); } static irqreturn_t fe_handle_irq(int irq, void *dev) { struct fe_priv *priv = netdev_priv(dev); u32 status, int_mask; status = fe_reg_r32(FE_REG_FE_INT_STATUS); if (unlikely(!status)) return IRQ_NONE; int_mask = (priv->soc->rx_int | priv->soc->tx_int); if (likely(status & int_mask)) { if (likely(napi_schedule_prep(&priv->rx_napi))) { fe_int_disable(int_mask); __napi_schedule(&priv->rx_napi); } } else { fe_reg_w32(status, FE_REG_FE_INT_STATUS); } return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER static void fe_poll_controller(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); u32 int_mask = priv->soc->tx_int | priv->soc->rx_int; fe_int_disable(int_mask); fe_handle_irq(dev->irq, dev); fe_int_enable(int_mask); } #endif int fe_set_clock_cycle(struct fe_priv *priv) { unsigned long sysclk = priv->sysclk; sysclk /= FE_US_CYC_CNT_DIVISOR; sysclk <<= FE_US_CYC_CNT_SHIFT; fe_w32((fe_r32(FE_FE_GLO_CFG) & ~(FE_US_CYC_CNT_MASK << FE_US_CYC_CNT_SHIFT)) | sysclk, FE_FE_GLO_CFG); return 0; } void fe_fwd_config(struct fe_priv *priv) { u32 fwd_cfg; fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); /* disable jumbo frame */ if (priv->flags & FE_FLAG_JUMBO_FRAME) fwd_cfg &= ~FE_GDM1_JMB_EN; /* set unicast/multicast/broadcast frame to cpu */ fwd_cfg &= ~0xffff; fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); } static void fe_rxcsum_config(bool enable) { if (enable) fe_w32(fe_r32(FE_GDMA1_FWD_CFG) | (FE_GDM1_ICS_EN | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), FE_GDMA1_FWD_CFG); else fe_w32(fe_r32(FE_GDMA1_FWD_CFG) & ~(FE_GDM1_ICS_EN | FE_GDM1_TCS_EN | FE_GDM1_UCS_EN), FE_GDMA1_FWD_CFG); } static void fe_txcsum_config(bool enable) { if (enable) fe_w32(fe_r32(FE_CDMA_CSG_CFG) | (FE_ICS_GEN_EN | FE_TCS_GEN_EN | FE_UCS_GEN_EN), FE_CDMA_CSG_CFG); else fe_w32(fe_r32(FE_CDMA_CSG_CFG) & ~(FE_ICS_GEN_EN | FE_TCS_GEN_EN | FE_UCS_GEN_EN), FE_CDMA_CSG_CFG); } void fe_csum_config(struct fe_priv *priv) { struct net_device *dev = priv_netdev(priv); fe_txcsum_config((dev->features & NETIF_F_IP_CSUM)); fe_rxcsum_config((dev->features & NETIF_F_RXCSUM)); } static int fe_hw_init(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); int i, err; err = devm_request_irq(priv->device, dev->irq, fe_handle_irq, 0, dev_name(priv->device), dev); if (err) return err; if (priv->soc->set_mac) priv->soc->set_mac(priv, dev->dev_addr); else fe_hw_set_macaddr(priv, dev->dev_addr); /* disable delay interrupt */ fe_reg_w32(0, FE_REG_DLY_INT_CFG); fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); /* frame engine will push VLAN tag regarding to VIDX feild in Tx desc */ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) for (i = 0; i < 16; i += 2) fe_w32(((i + 1) << 16) + i, fe_reg_table[FE_REG_FE_DMA_VID_BASE] + (i * 2)); if (priv->soc->fwd_config(priv)) netdev_err(dev, "unable to get clock\n"); if (fe_reg_table[FE_REG_FE_RST_GL]) { fe_reg_w32(1, FE_REG_FE_RST_GL); fe_reg_w32(0, FE_REG_FE_RST_GL); } return 0; } static int fe_open(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); unsigned long flags; u32 val; int err; err = fe_init_dma(priv); if (err) { fe_free_dma(priv); return err; } spin_lock_irqsave(&priv->page_lock, flags); val = FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN; if (priv->flags & FE_FLAG_RX_2B_OFFSET) val |= FE_RX_2B_OFFSET; val |= priv->soc->pdma_glo_cfg; fe_reg_w32(val, FE_REG_PDMA_GLO_CFG); spin_unlock_irqrestore(&priv->page_lock, flags); if (priv->phy) priv->phy->start(priv); if (priv->soc->has_carrier && priv->soc->has_carrier(priv)) netif_carrier_on(dev); napi_enable(&priv->rx_napi); fe_int_enable(priv->soc->tx_int | priv->soc->rx_int); netif_start_queue(dev); return 0; } static int fe_stop(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); unsigned long flags; int i; netif_tx_disable(dev); fe_int_disable(priv->soc->tx_int | priv->soc->rx_int); napi_disable(&priv->rx_napi); if (priv->phy) priv->phy->stop(priv); spin_lock_irqsave(&priv->page_lock, flags); fe_reg_w32(fe_reg_r32(FE_REG_PDMA_GLO_CFG) & ~(FE_TX_WB_DDONE | FE_RX_DMA_EN | FE_TX_DMA_EN), FE_REG_PDMA_GLO_CFG); spin_unlock_irqrestore(&priv->page_lock, flags); /* wait dma stop */ for (i = 0; i < 10; i++) { if (fe_reg_r32(FE_REG_PDMA_GLO_CFG) & (FE_TX_DMA_BUSY | FE_RX_DMA_BUSY)) { msleep(20); continue; } break; } fe_free_dma(priv); return 0; } static int __init fe_init(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); struct device_node *port; const char *mac_addr; int err; priv->soc->reset_fe(); if (priv->soc->switch_init) if (priv->soc->switch_init(priv)) { netdev_err(dev, "failed to initialize switch core\n"); return -ENODEV; } mac_addr = of_get_mac_address(priv->device->of_node); if (mac_addr) ether_addr_copy(dev->dev_addr, mac_addr); /* If the mac address is invalid, use random mac address */ if (!is_valid_ether_addr(dev->dev_addr)) { random_ether_addr(dev->dev_addr); dev_err(priv->device, "generated random MAC address %pM\n", dev->dev_addr); } err = fe_mdio_init(priv); if (err) return err; if (priv->soc->port_init) for_each_child_of_node(priv->device->of_node, port) if (of_device_is_compatible(port, "mediatek,eth-port") && of_device_is_available(port)) priv->soc->port_init(priv, port); if (priv->phy) { err = priv->phy->connect(priv); if (err) goto err_phy_disconnect; } err = fe_hw_init(dev); if (err) goto err_phy_disconnect; if ((priv->flags & FE_FLAG_HAS_SWITCH) && priv->soc->switch_config) priv->soc->switch_config(priv); return 0; err_phy_disconnect: if (priv->phy) priv->phy->disconnect(priv); fe_mdio_cleanup(priv); return err; } static void fe_uninit(struct net_device *dev) { struct fe_priv *priv = netdev_priv(dev); if (priv->phy) priv->phy->disconnect(priv); fe_mdio_cleanup(priv); fe_reg_w32(0, FE_REG_FE_INT_ENABLE); free_irq(dev->irq, dev); } static int fe_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct fe_priv *priv = netdev_priv(dev); if (!priv->phy_dev) return -ENODEV; switch (cmd) { case SIOCETHTOOL: return phy_ethtool_ioctl(priv->phy_dev, (void *) ifr->ifr_data); case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: return phy_mii_ioctl(priv->phy_dev, ifr, cmd); default: break; } return -EOPNOTSUPP; } static int fe_change_mtu(struct net_device *dev, int new_mtu) { struct fe_priv *priv = netdev_priv(dev); int frag_size, old_mtu; u32 fwd_cfg; if (!(priv->flags & FE_FLAG_JUMBO_FRAME)) return eth_change_mtu(dev, new_mtu); frag_size = fe_max_frag_size(new_mtu); if (new_mtu < 68 || frag_size > PAGE_SIZE) return -EINVAL; old_mtu = dev->mtu; dev->mtu = new_mtu; /* return early if the buffer sizes will not change */ if (old_mtu <= ETH_DATA_LEN && new_mtu <= ETH_DATA_LEN) return 0; if (old_mtu > ETH_DATA_LEN && new_mtu > ETH_DATA_LEN) return 0; if (new_mtu <= ETH_DATA_LEN) priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); else priv->rx_ring.frag_size = PAGE_SIZE; priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); if (!netif_running(dev)) return 0; fe_stop(dev); fwd_cfg = fe_r32(FE_GDMA1_FWD_CFG); if (new_mtu <= ETH_DATA_LEN) { fwd_cfg &= ~FE_GDM1_JMB_EN; } else { fwd_cfg &= ~(FE_GDM1_JMB_LEN_MASK << FE_GDM1_JMB_LEN_SHIFT); fwd_cfg |= (DIV_ROUND_UP(frag_size, 1024) << FE_GDM1_JMB_LEN_SHIFT) | FE_GDM1_JMB_EN; } fe_w32(fwd_cfg, FE_GDMA1_FWD_CFG); return fe_open(dev); } static const struct net_device_ops fe_netdev_ops = { .ndo_init = fe_init, .ndo_uninit = fe_uninit, .ndo_open = fe_open, .ndo_stop = fe_stop, .ndo_start_xmit = fe_start_xmit, .ndo_set_mac_address = fe_set_mac_address, .ndo_validate_addr = eth_validate_addr, .ndo_do_ioctl = fe_do_ioctl, .ndo_change_mtu = fe_change_mtu, .ndo_tx_timeout = fe_tx_timeout, .ndo_get_stats64 = fe_get_stats64, .ndo_vlan_rx_add_vid = fe_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = fe_vlan_rx_kill_vid, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = fe_poll_controller, #endif }; static void fe_reset_pending(struct fe_priv *priv) { struct net_device *dev = priv->netdev; int err; rtnl_lock(); fe_stop(dev); err = fe_open(dev); if (err) { netif_alert(priv, ifup, dev, "Driver up/down cycle failed, closing device.\n"); dev_close(dev); } rtnl_unlock(); } static const struct fe_work_t fe_work[] = { {FE_FLAG_RESET_PENDING, fe_reset_pending}, }; static void fe_pending_work(struct work_struct *work) { struct fe_priv *priv = container_of(work, struct fe_priv, pending_work); int i; bool pending; for (i = 0; i < ARRAY_SIZE(fe_work); i++) { pending = test_and_clear_bit(fe_work[i].bitnr, priv->pending_flags); if (pending) fe_work[i].action(priv); } } static int fe_probe(struct platform_device *pdev) { struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); const struct of_device_id *match; struct fe_soc_data *soc; struct net_device *netdev; struct fe_priv *priv; struct clk *sysclk; int err, napi_weight; device_reset(&pdev->dev); match = of_match_device(of_fe_match, &pdev->dev); soc = (struct fe_soc_data *)match->data; if (soc->reg_table) fe_reg_table = soc->reg_table; else soc->reg_table = fe_reg_table; fe_base = devm_ioremap_resource(&pdev->dev, res); if (!fe_base) { err = -EADDRNOTAVAIL; goto err_out; } netdev = alloc_etherdev(sizeof(*priv)); if (!netdev) { dev_err(&pdev->dev, "alloc_etherdev failed\n"); err = -ENOMEM; goto err_iounmap; } SET_NETDEV_DEV(netdev, &pdev->dev); netdev->netdev_ops = &fe_netdev_ops; netdev->base_addr = (unsigned long)fe_base; netdev->irq = platform_get_irq(pdev, 0); if (netdev->irq < 0) { dev_err(&pdev->dev, "no IRQ resource found\n"); err = -ENXIO; goto err_free_dev; } if (soc->init_data) soc->init_data(soc, netdev); netdev->vlan_features = netdev->hw_features & ~NETIF_F_HW_VLAN_CTAG_TX; netdev->features |= netdev->hw_features; /* fake rx vlan filter func. to support tx vlan offload func */ if (fe_reg_table[FE_REG_FE_DMA_VID_BASE]) netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; priv = netdev_priv(netdev); spin_lock_init(&priv->page_lock); if (fe_reg_table[FE_REG_FE_COUNTER_BASE]) { priv->hw_stats = kzalloc(sizeof(*priv->hw_stats), GFP_KERNEL); if (!priv->hw_stats) { err = -ENOMEM; goto err_free_dev; } spin_lock_init(&priv->hw_stats->stats_lock); } sysclk = devm_clk_get(&pdev->dev, NULL); if (!IS_ERR(sysclk)) { priv->sysclk = clk_get_rate(sysclk); } else if ((priv->flags & FE_FLAG_CALIBRATE_CLK)) { dev_err(&pdev->dev, "this soc needs a clk for calibration\n"); err = -ENXIO; goto err_free_dev; } priv->switch_np = of_parse_phandle(pdev->dev.of_node, "mediatek,switch", 0); if ((priv->flags & FE_FLAG_HAS_SWITCH) && !priv->switch_np) { dev_err(&pdev->dev, "failed to read switch phandle\n"); err = -ENODEV; goto err_free_dev; } priv->netdev = netdev; priv->device = &pdev->dev; priv->soc = soc; priv->msg_enable = netif_msg_init(fe_msg_level, FE_DEFAULT_MSG_ENABLE); priv->rx_ring.frag_size = fe_max_frag_size(ETH_DATA_LEN); priv->rx_ring.rx_buf_size = fe_max_buf_size(priv->rx_ring.frag_size); priv->tx_ring.tx_ring_size = NUM_DMA_DESC; priv->rx_ring.rx_ring_size = NUM_DMA_DESC; INIT_WORK(&priv->pending_work, fe_pending_work); napi_weight = 32; if (priv->flags & FE_FLAG_NAPI_WEIGHT) { napi_weight *= 4; priv->tx_ring.tx_ring_size *= 4; priv->rx_ring.rx_ring_size *= 4; } netif_napi_add(netdev, &priv->rx_napi, fe_poll, napi_weight); fe_set_ethtool_ops(netdev); err = register_netdev(netdev); if (err) { dev_err(&pdev->dev, "error bringing up device\n"); goto err_free_dev; } platform_set_drvdata(pdev, netdev); netif_info(priv, probe, netdev, "mediatek frame engine at 0x%08lx, irq %d\n", netdev->base_addr, netdev->irq); return 0; err_free_dev: free_netdev(netdev); err_iounmap: devm_iounmap(&pdev->dev, fe_base); err_out: return err; } static int fe_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct fe_priv *priv = netdev_priv(dev); netif_napi_del(&priv->rx_napi); kfree(priv->hw_stats); cancel_work_sync(&priv->pending_work); unregister_netdev(dev); free_netdev(dev); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver fe_driver = { .probe = fe_probe, .remove = fe_remove, .driver = { .name = "mtk_soc_eth", .owner = THIS_MODULE, .of_match_table = of_fe_match, }, }; module_platform_driver(fe_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); MODULE_DESCRIPTION("Ethernet driver for Ralink SoC"); MODULE_VERSION(MTK_FE_DRV_VERSION);
gpl-2.0
Spartonos/android_kernel_motorola_falcon_umts
kernel/module.c
16
93682
/* Copyright (C) 2002 Richard Henderson Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/export.h> #include <linux/moduleloader.h> #include <linux/ftrace_event.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/fs.h> #include <linux/sysfs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/fcntl.h> #include <linux/rcupdate.h> #include <linux/capability.h> #include <linux/cpu.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/vermagic.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/stop_machine.h> #include <linux/device.h> #include <linux/string.h> #include <linux/mutex.h> #include <linux/rculist.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/mmu_context.h> #include <linux/license.h> #include <asm/sections.h> #include <linux/tracepoint.h> #include <linux/ftrace.h> #include <linux/async.h> #include <linux/percpu.h> #include <linux/kmemleak.h> #include <linux/jump_label.h> #include <linux/pfn.h> #include <linux/bsearch.h> #include "module-whitelist.h" #define CREATE_TRACE_POINTS #include <trace/events/module.h> #ifndef ARCH_SHF_SMALL #define ARCH_SHF_SMALL 0 #endif /* * Modules' sections will be aligned on page boundaries * to ensure complete separation of code and data, but * only when CONFIG_DEBUG_SET_MODULE_RONX=y */ #ifdef CONFIG_DEBUG_SET_MODULE_RONX # define debug_align(X) ALIGN(X, PAGE_SIZE) #else # define debug_align(X) (X) #endif /* * Given BASE and SIZE this macro calculates the number of pages the * memory regions occupies */ #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \ (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \ PFN_DOWN((unsigned long)BASE) + 1) \ : (0UL)) /* If this is set, the section belongs in the init part of the module */ #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) /* * Mutex protects: * 1) List of modules (also safely readable with preempt_disable), * 2) module_use links, * 3) module_addr_min/module_addr_max. * (delete uses stop_machine/add uses RCU list operations). */ DEFINE_MUTEX(module_mutex); EXPORT_SYMBOL_GPL(module_mutex); static LIST_HEAD(modules); #ifdef CONFIG_KGDB_KDB struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */ #endif /* CONFIG_KGDB_KDB */ /* Block module loading/unloading? */ int modules_disabled = 0; core_param(nomodule, modules_disabled, bint, 0); /* Waiting for a module to finish initializing? */ static DECLARE_WAIT_QUEUE_HEAD(module_wq); static BLOCKING_NOTIFIER_HEAD(module_notify_list); /* Bounds of module allocation, for speeding __module_address. * Protected by module_mutex. */ static unsigned long module_addr_min = -1UL, module_addr_max = 0; int register_module_notifier(struct notifier_block * nb) { return blocking_notifier_chain_register(&module_notify_list, nb); } EXPORT_SYMBOL(register_module_notifier); int unregister_module_notifier(struct notifier_block * nb) { return blocking_notifier_chain_unregister(&module_notify_list, nb); } EXPORT_SYMBOL(unregister_module_notifier); struct load_info { Elf_Ehdr *hdr; unsigned long len; Elf_Shdr *sechdrs; char *secstrings, *strtab; unsigned long symoffs, stroffs; struct _ddebug *debug; unsigned int num_debug; struct { unsigned int sym, str, mod, vers, info, pcpu; } index; }; /* We require a truly strong try_module_get(): 0 means failure due to ongoing or failed initialization etc. */ static inline int strong_try_module_get(struct module *mod) { if (mod && mod->state == MODULE_STATE_COMING) return -EBUSY; if (try_module_get(mod)) return 0; else return -ENOENT; } static inline void add_taint_module(struct module *mod, unsigned flag) { add_taint(flag); mod->taints |= (1U << flag); } /* * A thread that wants to hold a reference to a module only while it * is running can call this to safely exit. nfsd and lockd use this. */ void __module_put_and_exit(struct module *mod, long code) { module_put(mod); do_exit(code); } EXPORT_SYMBOL(__module_put_and_exit); /* Find a module section: 0 means not found. */ static unsigned int find_sec(const struct load_info *info, const char *name) { unsigned int i; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; /* Alloc bit cleared means "ignore it." */ if ((shdr->sh_flags & SHF_ALLOC) && strcmp(info->secstrings + shdr->sh_name, name) == 0) return i; } return 0; } /* Find a module section, or NULL. */ static void *section_addr(const struct load_info *info, const char *name) { /* Section 0 has sh_addr 0. */ return (void *)info->sechdrs[find_sec(info, name)].sh_addr; } /* Find a module section, or NULL. Fill in number of "objects" in section. */ static void *section_objs(const struct load_info *info, const char *name, size_t object_size, unsigned int *num) { unsigned int sec = find_sec(info, name); /* Section 0 has sh_addr 0 and sh_size 0. */ *num = info->sechdrs[sec].sh_size / object_size; return (void *)info->sechdrs[sec].sh_addr; } /* Provided by the linker */ extern const struct kernel_symbol __start___ksymtab[]; extern const struct kernel_symbol __stop___ksymtab[]; extern const struct kernel_symbol __start___ksymtab_gpl[]; extern const struct kernel_symbol __stop___ksymtab_gpl[]; extern const struct kernel_symbol __start___ksymtab_gpl_future[]; extern const struct kernel_symbol __stop___ksymtab_gpl_future[]; extern const unsigned long __start___kcrctab[]; extern const unsigned long __start___kcrctab_gpl[]; extern const unsigned long __start___kcrctab_gpl_future[]; #ifdef CONFIG_UNUSED_SYMBOLS extern const struct kernel_symbol __start___ksymtab_unused[]; extern const struct kernel_symbol __stop___ksymtab_unused[]; extern const struct kernel_symbol __start___ksymtab_unused_gpl[]; extern const struct kernel_symbol __stop___ksymtab_unused_gpl[]; extern const unsigned long __start___kcrctab_unused[]; extern const unsigned long __start___kcrctab_unused_gpl[]; #endif #ifndef CONFIG_MODVERSIONS #define symversion(base, idx) NULL #else #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL) #endif static bool each_symbol_in_section(const struct symsearch *arr, unsigned int arrsize, struct module *owner, bool (*fn)(const struct symsearch *syms, struct module *owner, void *data), void *data) { unsigned int j; for (j = 0; j < arrsize; j++) { if (fn(&arr[j], owner, data)) return true; } return false; } /* Returns true as soon as fn returns true, otherwise false. */ bool each_symbol_section(bool (*fn)(const struct symsearch *arr, struct module *owner, void *data), void *data) { struct module *mod; static const struct symsearch arr[] = { { __start___ksymtab, __stop___ksymtab, __start___kcrctab, NOT_GPL_ONLY, false }, { __start___ksymtab_gpl, __stop___ksymtab_gpl, __start___kcrctab_gpl, GPL_ONLY, false }, { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future, __start___kcrctab_gpl_future, WILL_BE_GPL_ONLY, false }, #ifdef CONFIG_UNUSED_SYMBOLS { __start___ksymtab_unused, __stop___ksymtab_unused, __start___kcrctab_unused, NOT_GPL_ONLY, true }, { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl, __start___kcrctab_unused_gpl, GPL_ONLY, true }, #endif }; if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data)) return true; list_for_each_entry_rcu(mod, &modules, list) { struct symsearch arr[] = { { mod->syms, mod->syms + mod->num_syms, mod->crcs, NOT_GPL_ONLY, false }, { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, mod->gpl_crcs, GPL_ONLY, false }, { mod->gpl_future_syms, mod->gpl_future_syms + mod->num_gpl_future_syms, mod->gpl_future_crcs, WILL_BE_GPL_ONLY, false }, #ifdef CONFIG_UNUSED_SYMBOLS { mod->unused_syms, mod->unused_syms + mod->num_unused_syms, mod->unused_crcs, NOT_GPL_ONLY, true }, { mod->unused_gpl_syms, mod->unused_gpl_syms + mod->num_unused_gpl_syms, mod->unused_gpl_crcs, GPL_ONLY, true }, #endif }; if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) return true; } return false; } EXPORT_SYMBOL_GPL(each_symbol_section); struct find_symbol_arg { /* Input */ const char *name; bool gplok; bool warn; /* Output */ struct module *owner; const unsigned long *crc; const struct kernel_symbol *sym; }; static bool check_symbol(const struct symsearch *syms, struct module *owner, unsigned int symnum, void *data) { struct find_symbol_arg *fsa = data; if (!fsa->gplok) { if (syms->licence == GPL_ONLY) return false; if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) { printk(KERN_WARNING "Symbol %s is being used " "by a non-GPL module, which will not " "be allowed in the future\n", fsa->name); printk(KERN_WARNING "Please see the file " "Documentation/feature-removal-schedule.txt " "in the kernel source tree for more details.\n"); } } #ifdef CONFIG_UNUSED_SYMBOLS if (syms->unused && fsa->warn) { printk(KERN_WARNING "Symbol %s is marked as UNUSED, " "however this module is using it.\n", fsa->name); printk(KERN_WARNING "This symbol will go away in the future.\n"); printk(KERN_WARNING "Please evalute if this is the right api to use and if " "it really is, submit a report the linux kernel " "mailinglist together with submitting your code for " "inclusion.\n"); } #endif fsa->owner = owner; fsa->crc = symversion(syms->crcs, symnum); fsa->sym = &syms->start[symnum]; return true; } static int cmp_name(const void *va, const void *vb) { const char *a; const struct kernel_symbol *b; a = va; b = vb; return strcmp(a, b->name); } static bool find_symbol_in_section(const struct symsearch *syms, struct module *owner, void *data) { struct find_symbol_arg *fsa = data; struct kernel_symbol *sym; sym = bsearch(fsa->name, syms->start, syms->stop - syms->start, sizeof(struct kernel_symbol), cmp_name); if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data)) return true; return false; } /* Find a symbol and return it, along with, (optional) crc and * (optional) module which owns it. Needs preempt disabled or module_mutex. */ const struct kernel_symbol *find_symbol(const char *name, struct module **owner, const unsigned long **crc, bool gplok, bool warn) { struct find_symbol_arg fsa; fsa.name = name; fsa.gplok = gplok; fsa.warn = warn; if (each_symbol_section(find_symbol_in_section, &fsa)) { if (owner) *owner = fsa.owner; if (crc) *crc = fsa.crc; return fsa.sym; } pr_debug("Failed to find symbol %s\n", name); return NULL; } EXPORT_SYMBOL_GPL(find_symbol); /* Search for module by name: must hold module_mutex. */ struct module *find_module(const char *name) { struct module *mod; list_for_each_entry(mod, &modules, list) { if (strcmp(mod->name, name) == 0) return mod; } return NULL; } EXPORT_SYMBOL_GPL(find_module); #ifdef CONFIG_SMP static inline void __percpu *mod_percpu(struct module *mod) { return mod->percpu; } static int percpu_modalloc(struct module *mod, unsigned long size, unsigned long align) { if (align > PAGE_SIZE) { printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", mod->name, align, PAGE_SIZE); align = PAGE_SIZE; } mod->percpu = __alloc_reserved_percpu(size, align); if (!mod->percpu) { printk(KERN_WARNING "%s: Could not allocate %lu bytes percpu data\n", mod->name, size); return -ENOMEM; } mod->percpu_size = size; return 0; } static void percpu_modfree(struct module *mod) { free_percpu(mod->percpu); } static unsigned int find_pcpusec(struct load_info *info) { return find_sec(info, ".data..percpu"); } static void percpu_modcopy(struct module *mod, const void *from, unsigned long size) { int cpu; for_each_possible_cpu(cpu) memcpy(per_cpu_ptr(mod->percpu, cpu), from, size); } /** * is_module_percpu_address - test whether address is from module static percpu * @addr: address to test * * Test whether @addr belongs to module static percpu area. * * RETURNS: * %true if @addr is from module static percpu area */ bool is_module_percpu_address(unsigned long addr) { struct module *mod; unsigned int cpu; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (!mod->percpu_size) continue; for_each_possible_cpu(cpu) { void *start = per_cpu_ptr(mod->percpu, cpu); if ((void *)addr >= start && (void *)addr < start + mod->percpu_size) { preempt_enable(); return true; } } } preempt_enable(); return false; } #else /* ... !CONFIG_SMP */ static inline void __percpu *mod_percpu(struct module *mod) { return NULL; } static inline int percpu_modalloc(struct module *mod, unsigned long size, unsigned long align) { return -ENOMEM; } static inline void percpu_modfree(struct module *mod) { } static unsigned int find_pcpusec(struct load_info *info) { return 0; } static inline void percpu_modcopy(struct module *mod, const void *from, unsigned long size) { /* pcpusec should be 0, and size of that section should be 0. */ BUG_ON(size != 0); } bool is_module_percpu_address(unsigned long addr) { return false; } #endif /* CONFIG_SMP */ #define MODINFO_ATTR(field) \ static void setup_modinfo_##field(struct module *mod, const char *s) \ { \ mod->field = kstrdup(s, GFP_KERNEL); \ } \ static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ struct module_kobject *mk, char *buffer) \ { \ return sprintf(buffer, "%s\n", mk->mod->field); \ } \ static int modinfo_##field##_exists(struct module *mod) \ { \ return mod->field != NULL; \ } \ static void free_modinfo_##field(struct module *mod) \ { \ kfree(mod->field); \ mod->field = NULL; \ } \ static struct module_attribute modinfo_##field = { \ .attr = { .name = __stringify(field), .mode = 0444 }, \ .show = show_modinfo_##field, \ .setup = setup_modinfo_##field, \ .test = modinfo_##field##_exists, \ .free = free_modinfo_##field, \ }; MODINFO_ATTR(version); MODINFO_ATTR(srcversion); static char last_unloaded_module[MODULE_NAME_LEN+1]; #ifdef CONFIG_MODULE_UNLOAD EXPORT_TRACEPOINT_SYMBOL(module_get); /* Init the unload section of the module. */ static int module_unload_init(struct module *mod) { mod->refptr = alloc_percpu(struct module_ref); if (!mod->refptr) return -ENOMEM; INIT_LIST_HEAD(&mod->source_list); INIT_LIST_HEAD(&mod->target_list); /* Hold reference count during initialization. */ __this_cpu_write(mod->refptr->incs, 1); /* Backwards compatibility macros put refcount during init. */ mod->waiter = current; return 0; } /* Does a already use b? */ static int already_uses(struct module *a, struct module *b) { struct module_use *use; list_for_each_entry(use, &b->source_list, source_list) { if (use->source == a) { pr_debug("%s uses %s!\n", a->name, b->name); return 1; } } pr_debug("%s does not use %s!\n", a->name, b->name); return 0; } /* * Module a uses b * - we add 'a' as a "source", 'b' as a "target" of module use * - the module_use is added to the list of 'b' sources (so * 'b' can walk the list to see who sourced them), and of 'a' * targets (so 'a' can see what modules it targets). */ static int add_module_usage(struct module *a, struct module *b) { struct module_use *use; pr_debug("Allocating new usage for %s.\n", a->name); use = kmalloc(sizeof(*use), GFP_ATOMIC); if (!use) { printk(KERN_WARNING "%s: out of memory loading\n", a->name); return -ENOMEM; } use->source = a; use->target = b; list_add(&use->source_list, &b->source_list); list_add(&use->target_list, &a->target_list); return 0; } /* Module a uses b: caller needs module_mutex() */ int ref_module(struct module *a, struct module *b) { int err; if (b == NULL || already_uses(a, b)) return 0; /* If module isn't available, we fail. */ err = strong_try_module_get(b); if (err) return err; err = add_module_usage(a, b); if (err) { module_put(b); return err; } return 0; } EXPORT_SYMBOL_GPL(ref_module); /* Clear the unload stuff of the module. */ static void module_unload_free(struct module *mod) { struct module_use *use, *tmp; mutex_lock(&module_mutex); list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) { struct module *i = use->target; pr_debug("%s unusing %s\n", mod->name, i->name); module_put(i); list_del(&use->source_list); list_del(&use->target_list); kfree(use); } mutex_unlock(&module_mutex); free_percpu(mod->refptr); } #ifdef CONFIG_MODULE_FORCE_UNLOAD static inline int try_force_unload(unsigned int flags) { int ret = (flags & O_TRUNC); if (ret) add_taint(TAINT_FORCED_RMMOD); return ret; } #else static inline int try_force_unload(unsigned int flags) { return 0; } #endif /* CONFIG_MODULE_FORCE_UNLOAD */ struct stopref { struct module *mod; int flags; int *forced; }; /* Whole machine is stopped with interrupts off when this runs. */ static int __try_stop_module(void *_sref) { struct stopref *sref = _sref; /* If it's not unused, quit unless we're forcing. */ if (module_refcount(sref->mod) != 0) { if (!(*sref->forced = try_force_unload(sref->flags))) return -EWOULDBLOCK; } /* Mark it as dying. */ sref->mod->state = MODULE_STATE_GOING; return 0; } static int try_stop_module(struct module *mod, int flags, int *forced) { if (flags & O_NONBLOCK) { struct stopref sref = { mod, flags, forced }; return stop_machine(__try_stop_module, &sref, NULL); } else { /* We don't need to stop the machine for this. */ mod->state = MODULE_STATE_GOING; synchronize_sched(); return 0; } } unsigned long module_refcount(struct module *mod) { unsigned long incs = 0, decs = 0; int cpu; for_each_possible_cpu(cpu) decs += per_cpu_ptr(mod->refptr, cpu)->decs; /* * ensure the incs are added up after the decs. * module_put ensures incs are visible before decs with smp_wmb. * * This 2-count scheme avoids the situation where the refcount * for CPU0 is read, then CPU0 increments the module refcount, * then CPU1 drops that refcount, then the refcount for CPU1 is * read. We would record a decrement but not its corresponding * increment so we would see a low count (disaster). * * Rare situation? But module_refcount can be preempted, and we * might be tallying up 4096+ CPUs. So it is not impossible. */ smp_rmb(); for_each_possible_cpu(cpu) incs += per_cpu_ptr(mod->refptr, cpu)->incs; return incs - decs; } EXPORT_SYMBOL(module_refcount); /* This exists whether we can unload or not */ static void free_module(struct module *mod); static void wait_for_zero_refcount(struct module *mod) { /* Since we might sleep for some time, release the mutex first */ mutex_unlock(&module_mutex); for (;;) { pr_debug("Looking at refcount...\n"); set_current_state(TASK_UNINTERRUPTIBLE); if (module_refcount(mod) == 0) break; schedule(); } current->state = TASK_RUNNING; mutex_lock(&module_mutex); } SYSCALL_DEFINE2(delete_module, const char __user *, name_user, unsigned int, flags) { struct module *mod; char name[MODULE_NAME_LEN]; int ret, forced = 0; if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0) return -EFAULT; name[MODULE_NAME_LEN-1] = '\0'; if (mutex_lock_interruptible(&module_mutex) != 0) return -EINTR; mod = find_module(name); if (!mod) { ret = -ENOENT; goto out; } if (!list_empty(&mod->source_list)) { /* Other modules depend on us: get rid of them first. */ ret = -EWOULDBLOCK; goto out; } /* Doing init or already dying? */ if (mod->state != MODULE_STATE_LIVE) { /* FIXME: if (force), slam module count and wake up waiter --RR */ pr_debug("%s already dying\n", mod->name); ret = -EBUSY; goto out; } /* If it has an init func, it must have an exit func to unload */ if (mod->init && !mod->exit) { forced = try_force_unload(flags); if (!forced) { /* This module can't be removed */ ret = -EBUSY; goto out; } } /* Set this up before setting mod->state */ mod->waiter = current; /* Stop the machine so refcounts can't move and disable module. */ ret = try_stop_module(mod, flags, &forced); if (ret != 0) goto out; /* Never wait if forced. */ if (!forced && module_refcount(mod) != 0) wait_for_zero_refcount(mod); mutex_unlock(&module_mutex); /* Final destruction now no one is using it. */ if (mod->exit != NULL) mod->exit(); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); async_synchronize_full(); /* Store the name of the last unloaded module for diagnostic purposes */ strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); free_module(mod); return 0; out: mutex_unlock(&module_mutex); return ret; } static inline void print_unload_info(struct seq_file *m, struct module *mod) { struct module_use *use; int printed_something = 0; seq_printf(m, " %lu ", module_refcount(mod)); /* Always include a trailing , so userspace can differentiate between this and the old multi-field proc format. */ list_for_each_entry(use, &mod->source_list, source_list) { printed_something = 1; seq_printf(m, "%s,", use->source->name); } if (mod->init != NULL && mod->exit == NULL) { printed_something = 1; seq_printf(m, "[permanent],"); } if (!printed_something) seq_printf(m, "-"); } void __symbol_put(const char *symbol) { struct module *owner; preempt_disable(); if (!find_symbol(symbol, &owner, NULL, true, false)) BUG(); module_put(owner); preempt_enable(); } EXPORT_SYMBOL(__symbol_put); /* Note this assumes addr is a function, which it currently always is. */ void symbol_put_addr(void *addr) { struct module *modaddr; unsigned long a = (unsigned long)dereference_function_descriptor(addr); if (core_kernel_text(a)) return; /* module_text_address is safe here: we're supposed to have reference * to module from symbol_get, so it can't go away. */ modaddr = __module_text_address(a); BUG_ON(!modaddr); module_put(modaddr); } EXPORT_SYMBOL_GPL(symbol_put_addr); static ssize_t show_refcnt(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { return sprintf(buffer, "%lu\n", module_refcount(mk->mod)); } static struct module_attribute modinfo_refcnt = __ATTR(refcnt, 0444, show_refcnt, NULL); void __module_get(struct module *module) { if (module) { preempt_disable(); __this_cpu_inc(module->refptr->incs); trace_module_get(module, _RET_IP_); preempt_enable(); } } EXPORT_SYMBOL(__module_get); bool try_module_get(struct module *module) { bool ret = true; if (module) { preempt_disable(); if (likely(module_is_live(module))) { __this_cpu_inc(module->refptr->incs); trace_module_get(module, _RET_IP_); } else ret = false; preempt_enable(); } return ret; } EXPORT_SYMBOL(try_module_get); void module_put(struct module *module) { if (module) { preempt_disable(); smp_wmb(); /* see comment in module_refcount */ __this_cpu_inc(module->refptr->decs); trace_module_put(module, _RET_IP_); /* Maybe they're waiting for us to drop reference? */ if (unlikely(!module_is_live(module))) wake_up_process(module->waiter); preempt_enable(); } } EXPORT_SYMBOL(module_put); #else /* !CONFIG_MODULE_UNLOAD */ static inline void print_unload_info(struct seq_file *m, struct module *mod) { /* We don't know the usage count, or what modules are using. */ seq_printf(m, " - -"); } static inline void module_unload_free(struct module *mod) { } int ref_module(struct module *a, struct module *b) { return strong_try_module_get(b); } EXPORT_SYMBOL_GPL(ref_module); static inline int module_unload_init(struct module *mod) { return 0; } #endif /* CONFIG_MODULE_UNLOAD */ static size_t module_flags_taint(struct module *mod, char *buf) { size_t l = 0; if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE)) buf[l++] = 'P'; if (mod->taints & (1 << TAINT_OOT_MODULE)) buf[l++] = 'O'; if (mod->taints & (1 << TAINT_FORCED_MODULE)) buf[l++] = 'F'; if (mod->taints & (1 << TAINT_CRAP)) buf[l++] = 'C'; /* * TAINT_FORCED_RMMOD: could be added. * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't * apply to modules. */ return l; } static ssize_t show_initstate(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { const char *state = "unknown"; switch (mk->mod->state) { case MODULE_STATE_LIVE: state = "live"; break; case MODULE_STATE_COMING: state = "coming"; break; case MODULE_STATE_GOING: state = "going"; break; } return sprintf(buffer, "%s\n", state); } static struct module_attribute modinfo_initstate = __ATTR(initstate, 0444, show_initstate, NULL); static ssize_t store_uevent(struct module_attribute *mattr, struct module_kobject *mk, const char *buffer, size_t count) { enum kobject_action action; if (kobject_action_type(buffer, count, &action) == 0) kobject_uevent(&mk->kobj, action); return count; } struct module_attribute module_uevent = __ATTR(uevent, 0200, NULL, store_uevent); static ssize_t show_coresize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { return sprintf(buffer, "%u\n", mk->mod->core_size); } static struct module_attribute modinfo_coresize = __ATTR(coresize, 0444, show_coresize, NULL); static ssize_t show_initsize(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { return sprintf(buffer, "%u\n", mk->mod->init_size); } static struct module_attribute modinfo_initsize = __ATTR(initsize, 0444, show_initsize, NULL); static ssize_t show_taint(struct module_attribute *mattr, struct module_kobject *mk, char *buffer) { size_t l; l = module_flags_taint(mk->mod, buffer); buffer[l++] = '\n'; return l; } static struct module_attribute modinfo_taint = __ATTR(taint, 0444, show_taint, NULL); static struct module_attribute *modinfo_attrs[] = { &module_uevent, &modinfo_version, &modinfo_srcversion, &modinfo_initstate, &modinfo_coresize, &modinfo_initsize, &modinfo_taint, #ifdef CONFIG_MODULE_UNLOAD &modinfo_refcnt, #endif NULL, }; static const char vermagic[] = VERMAGIC_STRING; static int try_to_force_load(struct module *mod, const char *reason) { #ifdef CONFIG_MODULE_FORCE_LOAD if (!test_taint(TAINT_FORCED_MODULE)) printk(KERN_WARNING "%s: %s: kernel tainted.\n", mod->name, reason); add_taint_module(mod, TAINT_FORCED_MODULE); return 0; #else return -ENOEXEC; #endif } #ifdef CONFIG_MODVERSIONS /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */ static unsigned long maybe_relocated(unsigned long crc, const struct module *crc_owner) { #ifdef ARCH_RELOCATES_KCRCTAB if (crc_owner == NULL) return crc - (unsigned long)reloc_start; #endif return crc; } static int check_version(Elf_Shdr *sechdrs, unsigned int versindex, const char *symname, struct module *mod, const unsigned long *crc, const struct module *crc_owner) { unsigned int i, num_versions; struct modversion_info *versions; /* Exporting module didn't supply crcs? OK, we're already tainted. */ if (!crc) return 1; /* No versions at all? modprobe --force does this. */ if (versindex == 0) return try_to_force_load(mod, symname) == 0; versions = (void *) sechdrs[versindex].sh_addr; num_versions = sechdrs[versindex].sh_size / sizeof(struct modversion_info); for (i = 0; i < num_versions; i++) { if (strcmp(versions[i].name, symname) != 0) continue; if (versions[i].crc == maybe_relocated(*crc, crc_owner)) return 1; pr_debug("Found checksum %lX vs module %lX\n", maybe_relocated(*crc, crc_owner), versions[i].crc); goto bad_version; } printk(KERN_WARNING "%s: no symbol version for %s\n", mod->name, symname); return 0; bad_version: printk("%s: disagrees about version of symbol %s\n", mod->name, symname); return 0; } static inline int check_modstruct_version(Elf_Shdr *sechdrs, unsigned int versindex, struct module *mod) { const unsigned long *crc; /* Since this should be found in kernel (which can't be removed), * no locking is necessary. */ if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL, &crc, true, false)) BUG(); return check_version(sechdrs, versindex, "module_layout", mod, crc, NULL); } /* First part is kernel version, which we ignore if module has crcs. */ static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) { if (has_crcs) { amagic += strcspn(amagic, " "); bmagic += strcspn(bmagic, " "); } return strcmp(amagic, bmagic) == 0; } #else static inline int check_version(Elf_Shdr *sechdrs, unsigned int versindex, const char *symname, struct module *mod, const unsigned long *crc, const struct module *crc_owner) { return 1; } static inline int check_modstruct_version(Elf_Shdr *sechdrs, unsigned int versindex, struct module *mod) { return 1; } static inline int same_magic(const char *amagic, const char *bmagic, bool has_crcs) { return strcmp(amagic, bmagic) == 0; } #endif /* CONFIG_MODVERSIONS */ /* Resolve a symbol for this module. I.e. if we find one, record usage. */ static const struct kernel_symbol *resolve_symbol(struct module *mod, const struct load_info *info, const char *name, char ownername[]) { struct module *owner; const struct kernel_symbol *sym; const unsigned long *crc; int err; mutex_lock(&module_mutex); sym = find_symbol(name, &owner, &crc, !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true); if (!sym) goto unlock; if (!check_version(info->sechdrs, info->index.vers, name, mod, crc, owner)) { sym = ERR_PTR(-EINVAL); goto getname; } err = ref_module(mod, owner); if (err) { sym = ERR_PTR(err); goto getname; } getname: /* We must make copy under the lock if we failed to get ref. */ strncpy(ownername, module_name(owner), MODULE_NAME_LEN); unlock: mutex_unlock(&module_mutex); return sym; } static const struct kernel_symbol * resolve_symbol_wait(struct module *mod, const struct load_info *info, const char *name) { const struct kernel_symbol *ksym; char owner[MODULE_NAME_LEN]; if (wait_event_interruptible_timeout(module_wq, !IS_ERR(ksym = resolve_symbol(mod, info, name, owner)) || PTR_ERR(ksym) != -EBUSY, 30 * HZ) <= 0) { printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n", mod->name, owner); } return ksym; } /* * /sys/module/foo/sections stuff * J. Corbet <corbet@lwn.net> */ #ifdef CONFIG_SYSFS #ifdef CONFIG_KALLSYMS static inline bool sect_empty(const Elf_Shdr *sect) { return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0; } struct module_sect_attr { struct module_attribute mattr; char *name; unsigned long address; }; struct module_sect_attrs { struct attribute_group grp; unsigned int nsections; struct module_sect_attr attrs[0]; }; static ssize_t module_sect_show(struct module_attribute *mattr, struct module_kobject *mk, char *buf) { struct module_sect_attr *sattr = container_of(mattr, struct module_sect_attr, mattr); return sprintf(buf, "0x%pK\n", (void *)sattr->address); } static void free_sect_attrs(struct module_sect_attrs *sect_attrs) { unsigned int section; for (section = 0; section < sect_attrs->nsections; section++) kfree(sect_attrs->attrs[section].name); kfree(sect_attrs); } static void add_sect_attrs(struct module *mod, const struct load_info *info) { unsigned int nloaded = 0, i, size[2]; struct module_sect_attrs *sect_attrs; struct module_sect_attr *sattr; struct attribute **gattr; /* Count loaded sections and allocate structures */ for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i])) nloaded++; size[0] = ALIGN(sizeof(*sect_attrs) + nloaded * sizeof(sect_attrs->attrs[0]), sizeof(sect_attrs->grp.attrs[0])); size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]); sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL); if (sect_attrs == NULL) return; /* Setup section attributes. */ sect_attrs->grp.name = "sections"; sect_attrs->grp.attrs = (void *)sect_attrs + size[0]; sect_attrs->nsections = 0; sattr = &sect_attrs->attrs[0]; gattr = &sect_attrs->grp.attrs[0]; for (i = 0; i < info->hdr->e_shnum; i++) { Elf_Shdr *sec = &info->sechdrs[i]; if (sect_empty(sec)) continue; sattr->address = sec->sh_addr; sattr->name = kstrdup(info->secstrings + sec->sh_name, GFP_KERNEL); if (sattr->name == NULL) goto out; sect_attrs->nsections++; sysfs_attr_init(&sattr->mattr.attr); sattr->mattr.show = module_sect_show; sattr->mattr.store = NULL; sattr->mattr.attr.name = sattr->name; sattr->mattr.attr.mode = S_IRUGO; *(gattr++) = &(sattr++)->mattr.attr; } *gattr = NULL; if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp)) goto out; mod->sect_attrs = sect_attrs; return; out: free_sect_attrs(sect_attrs); } static void remove_sect_attrs(struct module *mod) { if (mod->sect_attrs) { sysfs_remove_group(&mod->mkobj.kobj, &mod->sect_attrs->grp); /* We are positive that no one is using any sect attrs * at this point. Deallocate immediately. */ free_sect_attrs(mod->sect_attrs); mod->sect_attrs = NULL; } } /* * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections. */ struct module_notes_attrs { struct kobject *dir; unsigned int notes; struct bin_attribute attrs[0]; }; static ssize_t module_notes_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t pos, size_t count) { /* * The caller checked the pos and count against our size. */ memcpy(buf, bin_attr->private + pos, count); return count; } static void free_notes_attrs(struct module_notes_attrs *notes_attrs, unsigned int i) { if (notes_attrs->dir) { while (i-- > 0) sysfs_remove_bin_file(notes_attrs->dir, &notes_attrs->attrs[i]); kobject_put(notes_attrs->dir); } kfree(notes_attrs); } static void add_notes_attrs(struct module *mod, const struct load_info *info) { unsigned int notes, loaded, i; struct module_notes_attrs *notes_attrs; struct bin_attribute *nattr; /* failed to create section attributes, so can't create notes */ if (!mod->sect_attrs) return; /* Count notes sections and allocate structures. */ notes = 0; for (i = 0; i < info->hdr->e_shnum; i++) if (!sect_empty(&info->sechdrs[i]) && (info->sechdrs[i].sh_type == SHT_NOTE)) ++notes; if (notes == 0) return; notes_attrs = kzalloc(sizeof(*notes_attrs) + notes * sizeof(notes_attrs->attrs[0]), GFP_KERNEL); if (notes_attrs == NULL) return; notes_attrs->notes = notes; nattr = &notes_attrs->attrs[0]; for (loaded = i = 0; i < info->hdr->e_shnum; ++i) { if (sect_empty(&info->sechdrs[i])) continue; if (info->sechdrs[i].sh_type == SHT_NOTE) { sysfs_bin_attr_init(nattr); nattr->attr.name = mod->sect_attrs->attrs[loaded].name; nattr->attr.mode = S_IRUGO; nattr->size = info->sechdrs[i].sh_size; nattr->private = (void *) info->sechdrs[i].sh_addr; nattr->read = module_notes_read; ++nattr; } ++loaded; } notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj); if (!notes_attrs->dir) goto out; for (i = 0; i < notes; ++i) if (sysfs_create_bin_file(notes_attrs->dir, &notes_attrs->attrs[i])) goto out; mod->notes_attrs = notes_attrs; return; out: free_notes_attrs(notes_attrs, i); } static void remove_notes_attrs(struct module *mod) { if (mod->notes_attrs) free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes); } #else static inline void add_sect_attrs(struct module *mod, const struct load_info *info) { } static inline void remove_sect_attrs(struct module *mod) { } static inline void add_notes_attrs(struct module *mod, const struct load_info *info) { } static inline void remove_notes_attrs(struct module *mod) { } #endif /* CONFIG_KALLSYMS */ static void add_usage_links(struct module *mod) { #ifdef CONFIG_MODULE_UNLOAD struct module_use *use; int nowarn; mutex_lock(&module_mutex); list_for_each_entry(use, &mod->target_list, target_list) { nowarn = sysfs_create_link(use->target->holders_dir, &mod->mkobj.kobj, mod->name); } mutex_unlock(&module_mutex); #endif } static void del_usage_links(struct module *mod) { #ifdef CONFIG_MODULE_UNLOAD struct module_use *use; mutex_lock(&module_mutex); list_for_each_entry(use, &mod->target_list, target_list) sysfs_remove_link(use->target->holders_dir, mod->name); mutex_unlock(&module_mutex); #endif } static int module_add_modinfo_attrs(struct module *mod) { struct module_attribute *attr; struct module_attribute *temp_attr; int error = 0; int i; mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) * (ARRAY_SIZE(modinfo_attrs) + 1)), GFP_KERNEL); if (!mod->modinfo_attrs) return -ENOMEM; temp_attr = mod->modinfo_attrs; for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) { if (!attr->test || (attr->test && attr->test(mod))) { memcpy(temp_attr, attr, sizeof(*temp_attr)); sysfs_attr_init(&temp_attr->attr); error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr); ++temp_attr; } } return error; } static void module_remove_modinfo_attrs(struct module *mod) { struct module_attribute *attr; int i; for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) { /* pick a field to test for end of list */ if (!attr->attr.name) break; sysfs_remove_file(&mod->mkobj.kobj,&attr->attr); if (attr->free) attr->free(mod); } kfree(mod->modinfo_attrs); } static int mod_sysfs_init(struct module *mod) { int err; struct kobject *kobj; if (!module_sysfs_initialized) { printk(KERN_ERR "%s: module sysfs not initialized\n", mod->name); err = -EINVAL; goto out; } kobj = kset_find_obj(module_kset, mod->name); if (kobj) { printk(KERN_ERR "%s: module is already loaded\n", mod->name); kobject_put(kobj); err = -EINVAL; goto out; } mod->mkobj.mod = mod; memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj)); mod->mkobj.kobj.kset = module_kset; err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, "%s", mod->name); if (err) kobject_put(&mod->mkobj.kobj); /* delay uevent until full sysfs population */ out: return err; } static int mod_sysfs_setup(struct module *mod, const struct load_info *info, struct kernel_param *kparam, unsigned int num_params) { int err; err = mod_sysfs_init(mod); if (err) goto out; mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj); if (!mod->holders_dir) { err = -ENOMEM; goto out_unreg; } err = module_param_sysfs_setup(mod, kparam, num_params); if (err) goto out_unreg_holders; err = module_add_modinfo_attrs(mod); if (err) goto out_unreg_param; add_usage_links(mod); add_sect_attrs(mod, info); add_notes_attrs(mod, info); kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD); return 0; out_unreg_param: module_param_sysfs_remove(mod); out_unreg_holders: kobject_put(mod->holders_dir); out_unreg: kobject_put(&mod->mkobj.kobj); out: return err; } static void mod_sysfs_fini(struct module *mod) { remove_notes_attrs(mod); remove_sect_attrs(mod); kobject_put(&mod->mkobj.kobj); } #else /* !CONFIG_SYSFS */ static int mod_sysfs_setup(struct module *mod, const struct load_info *info, struct kernel_param *kparam, unsigned int num_params) { return 0; } static void mod_sysfs_fini(struct module *mod) { } static void module_remove_modinfo_attrs(struct module *mod) { } static void del_usage_links(struct module *mod) { } #endif /* CONFIG_SYSFS */ static void mod_sysfs_teardown(struct module *mod) { del_usage_links(mod); module_remove_modinfo_attrs(mod); module_param_sysfs_remove(mod); kobject_put(mod->mkobj.drivers_dir); kobject_put(mod->holders_dir); mod_sysfs_fini(mod); } /* * unlink the module with the whole machine is stopped with interrupts off * - this defends against kallsyms not taking locks */ static int __unlink_module(void *_mod) { struct module *mod = _mod; list_del(&mod->list); module_bug_cleanup(mod); return 0; } #ifdef CONFIG_DEBUG_SET_MODULE_RONX /* * LKM RO/NX protection: protect module's text/ro-data * from modification and any data from execution. */ void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages)) { unsigned long begin_pfn = PFN_DOWN((unsigned long)start); unsigned long end_pfn = PFN_DOWN((unsigned long)end); if (end_pfn > begin_pfn) set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); } static void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { /* begin and end PFNs of the current subsection */ unsigned long begin_pfn; unsigned long end_pfn; /* * Set RO for module text and RO-data: * - Always protect first page. * - Do not protect last partial page. */ if (ro_size > 0) set_page_attributes(base, base + ro_size, set_memory_ro); /* * Set NX permissions for module data: * - Do not protect first partial page. * - Always protect last page. */ if (total_size > text_size) { begin_pfn = PFN_UP((unsigned long)base + text_size); end_pfn = PFN_UP((unsigned long)base + total_size); if (end_pfn > begin_pfn) set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn); } } static void unset_module_core_ro_nx(struct module *mod) { set_page_attributes(mod->module_core + mod->core_text_size, mod->module_core + mod->core_size, set_memory_x); set_page_attributes(mod->module_core, mod->module_core + mod->core_ro_size, set_memory_rw); } static void unset_module_init_ro_nx(struct module *mod) { set_page_attributes(mod->module_init + mod->init_text_size, mod->module_init + mod->init_size, set_memory_x); set_page_attributes(mod->module_init, mod->module_init + mod->init_ro_size, set_memory_rw); } /* Iterate through all modules and set each module's text as RW */ void set_all_modules_text_rw(void) { struct module *mod; mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, set_memory_rw); } if ((mod->module_init) && (mod->init_text_size)) { set_page_attributes(mod->module_init, mod->module_init + mod->init_text_size, set_memory_rw); } } mutex_unlock(&module_mutex); } /* Iterate through all modules and set each module's text as RO */ void set_all_modules_text_ro(void) { struct module *mod; mutex_lock(&module_mutex); list_for_each_entry_rcu(mod, &modules, list) { if ((mod->module_core) && (mod->core_text_size)) { set_page_attributes(mod->module_core, mod->module_core + mod->core_text_size, set_memory_ro); } if ((mod->module_init) && (mod->init_text_size)) { set_page_attributes(mod->module_init, mod->module_init + mod->init_text_size, set_memory_ro); } } mutex_unlock(&module_mutex); } #else static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { } static void unset_module_core_ro_nx(struct module *mod) { } static void unset_module_init_ro_nx(struct module *mod) { } #endif void __weak module_free(struct module *mod, void *module_region) { vfree(module_region); } void __weak module_arch_cleanup(struct module *mod) { } /* Free a module, remove from lists, etc. */ static void free_module(struct module *mod) { trace_module_free(mod); /* Delete from various lists */ mutex_lock(&module_mutex); stop_machine(__unlink_module, mod, NULL); mutex_unlock(&module_mutex); mod_sysfs_teardown(mod); /* Remove dynamic debug info */ ddebug_remove_module(mod->name); /* Arch-specific cleanup. */ module_arch_cleanup(mod); /* Module unload stuff */ module_unload_free(mod); /* Free any allocated parameters. */ destroy_params(mod->kp, mod->num_kp); /* This may be NULL, but that's OK */ unset_module_init_ro_nx(mod); module_free(mod, mod->module_init); kfree(mod->args); percpu_modfree(mod); /* Free lock-classes: */ lockdep_free_key_range(mod->module_core, mod->core_size); /* Finally, free the core (containing the module structure) */ unset_module_core_ro_nx(mod); module_free(mod, mod->module_core); #ifdef CONFIG_MPU update_protections(current->mm); #endif } void *__symbol_get(const char *symbol) { struct module *owner; const struct kernel_symbol *sym; preempt_disable(); sym = find_symbol(symbol, &owner, NULL, true, true); if (sym && strong_try_module_get(owner)) sym = NULL; preempt_enable(); return sym ? (void *)sym->value : NULL; } EXPORT_SYMBOL_GPL(__symbol_get); /* * Ensure that an exported symbol [global namespace] does not already exist * in the kernel or in some other module's exported symbol table. * * You must hold the module_mutex. */ static int verify_export_symbols(struct module *mod) { unsigned int i; struct module *owner; const struct kernel_symbol *s; struct { const struct kernel_symbol *sym; unsigned int num; } arr[] = { { mod->syms, mod->num_syms }, { mod->gpl_syms, mod->num_gpl_syms }, { mod->gpl_future_syms, mod->num_gpl_future_syms }, #ifdef CONFIG_UNUSED_SYMBOLS { mod->unused_syms, mod->num_unused_syms }, { mod->unused_gpl_syms, mod->num_unused_gpl_syms }, #endif }; for (i = 0; i < ARRAY_SIZE(arr); i++) { for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) { if (find_symbol(s->name, &owner, NULL, true, false)) { printk(KERN_ERR "%s: exports duplicate symbol %s" " (owned by %s)\n", mod->name, s->name, module_name(owner)); return -ENOEXEC; } } } return 0; } /* Change all symbols so that st_value encodes the pointer directly. */ static int simplify_symbols(struct module *mod, const struct load_info *info) { Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; Elf_Sym *sym = (void *)symsec->sh_addr; unsigned long secbase; unsigned int i; int ret = 0; const struct kernel_symbol *ksym; for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) { const char *name = info->strtab + sym[i].st_name; switch (sym[i].st_shndx) { case SHN_COMMON: /* We compiled with -fno-common. These are not supposed to happen. */ pr_debug("Common symbol: %s\n", name); printk("%s: please compile with -fno-common\n", mod->name); ret = -ENOEXEC; break; case SHN_ABS: /* Don't need to do anything */ pr_debug("Absolute symbol: 0x%08lx\n", (long)sym[i].st_value); break; case SHN_UNDEF: ksym = resolve_symbol_wait(mod, info, name); /* Ok if resolved. */ if (ksym && !IS_ERR(ksym)) { sym[i].st_value = ksym->value; break; } /* Ok if weak. */ if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK) break; printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n", mod->name, name, PTR_ERR(ksym)); ret = PTR_ERR(ksym) ?: -ENOENT; break; default: /* Divert to percpu allocation if a percpu var. */ if (sym[i].st_shndx == info->index.pcpu) secbase = (unsigned long)mod_percpu(mod); else secbase = info->sechdrs[sym[i].st_shndx].sh_addr; sym[i].st_value += secbase; break; } } return ret; } int __weak apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { pr_err("module %s: REL relocation unsupported\n", me->name); return -ENOEXEC; } int __weak apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { pr_err("module %s: RELA relocation unsupported\n", me->name); return -ENOEXEC; } static int apply_relocations(struct module *mod, const struct load_info *info) { unsigned int i; int err = 0; /* Now do relocations. */ for (i = 1; i < info->hdr->e_shnum; i++) { unsigned int infosec = info->sechdrs[i].sh_info; /* Not a valid relocation section? */ if (infosec >= info->hdr->e_shnum) continue; /* Don't bother with non-allocated sections */ if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC)) continue; if (info->sechdrs[i].sh_type == SHT_REL) err = apply_relocate(info->sechdrs, info->strtab, info->index.sym, i, mod); else if (info->sechdrs[i].sh_type == SHT_RELA) err = apply_relocate_add(info->sechdrs, info->strtab, info->index.sym, i, mod); if (err < 0) break; } return err; } /* Additional bytes needed by arch in front of individual sections */ unsigned int __weak arch_mod_section_prepend(struct module *mod, unsigned int section) { /* default implementation just returns zero */ return 0; } /* Update size with this section: return offset. */ static long get_offset(struct module *mod, unsigned int *size, Elf_Shdr *sechdr, unsigned int section) { long ret; *size += arch_mod_section_prepend(mod, section); ret = ALIGN(*size, sechdr->sh_addralign ?: 1); *size = ret + sechdr->sh_size; return ret; } /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld might -- code, read-only data, read-write data, small data. Tally sizes, and place the offsets into sh_entsize fields: high bit means it belongs in init. */ static void layout_sections(struct module *mod, struct load_info *info) { static unsigned long const masks[][2] = { /* NOTE: all executable code must be the first section * in this array; otherwise modify the text_size * finder in the two loops below */ { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL }, { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL }, { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL }, { ARCH_SHF_SMALL | SHF_ALLOC, 0 } }; unsigned int m, i; for (i = 0; i < info->hdr->e_shnum; i++) info->sechdrs[i].sh_entsize = ~0UL; pr_debug("Core section allocation order:\n"); for (m = 0; m < ARRAY_SIZE(masks); ++m) { for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL || strstarts(sname, ".init")) continue; s->sh_entsize = get_offset(mod, &mod->core_size, s, i); pr_debug("\t%s\n", sname); } switch (m) { case 0: /* executable */ mod->core_size = debug_align(mod->core_size); mod->core_text_size = mod->core_size; break; case 1: /* RO: text and ro-data */ mod->core_size = debug_align(mod->core_size); mod->core_ro_size = mod->core_size; break; case 3: /* whole core */ mod->core_size = debug_align(mod->core_size); break; } } pr_debug("Init section allocation order:\n"); for (m = 0; m < ARRAY_SIZE(masks); ++m) { for (i = 0; i < info->hdr->e_shnum; ++i) { Elf_Shdr *s = &info->sechdrs[i]; const char *sname = info->secstrings + s->sh_name; if ((s->sh_flags & masks[m][0]) != masks[m][0] || (s->sh_flags & masks[m][1]) || s->sh_entsize != ~0UL || !strstarts(sname, ".init")) continue; s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) | INIT_OFFSET_MASK); pr_debug("\t%s\n", sname); } switch (m) { case 0: /* executable */ mod->init_size = debug_align(mod->init_size); mod->init_text_size = mod->init_size; break; case 1: /* RO: text and ro-data */ mod->init_size = debug_align(mod->init_size); mod->init_ro_size = mod->init_size; break; case 3: /* whole init */ mod->init_size = debug_align(mod->init_size); break; } } } static void set_license(struct module *mod, const char *license) { if (!license) license = "unspecified"; if (!license_is_gpl_compatible(license)) { if (!test_taint(TAINT_PROPRIETARY_MODULE)) printk(KERN_WARNING "%s: module license '%s' taints " "kernel.\n", mod->name, license); add_taint_module(mod, TAINT_PROPRIETARY_MODULE); } } /* Parse tag=value strings from .modinfo section */ static char *next_string(char *string, unsigned long *secsize) { /* Skip non-zero chars */ while (string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } /* Skip any zero padding. */ while (!string[0]) { string++; if ((*secsize)-- <= 1) return NULL; } return string; } static char *get_modinfo(struct load_info *info, const char *tag) { char *p; unsigned int taglen = strlen(tag); Elf_Shdr *infosec = &info->sechdrs[info->index.info]; unsigned long size = infosec->sh_size; for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) { if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=') return p + taglen + 1; } return NULL; } static void setup_modinfo(struct module *mod, struct load_info *info) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->setup) attr->setup(mod, get_modinfo(info, attr->attr.name)); } } static void free_modinfo(struct module *mod) { struct module_attribute *attr; int i; for (i = 0; (attr = modinfo_attrs[i]); i++) { if (attr->free) attr->free(mod); } } #ifdef CONFIG_KALLSYMS /* lookup symbol in given range of kernel_symbols */ static const struct kernel_symbol *lookup_symbol(const char *name, const struct kernel_symbol *start, const struct kernel_symbol *stop) { return bsearch(name, start, stop - start, sizeof(struct kernel_symbol), cmp_name); } static int is_exported(const char *name, unsigned long value, const struct module *mod) { const struct kernel_symbol *ks; if (!mod) ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); else ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); return ks != NULL && ks->value == value; } /* As per nm */ static char elf_type(const Elf_Sym *sym, const struct load_info *info) { const Elf_Shdr *sechdrs = info->sechdrs; if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) return 'v'; else return 'w'; } if (sym->st_shndx == SHN_UNDEF) return 'U'; if (sym->st_shndx == SHN_ABS) return 'a'; if (sym->st_shndx >= SHN_LORESERVE) return '?'; if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR) return 't'; if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) { if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE)) return 'r'; else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) return 'g'; else return 'd'; } if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) { if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL) return 's'; else return 'b'; } if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name, ".debug")) { return 'n'; } return '?'; } static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs, unsigned int shnum) { const Elf_Shdr *sec; if (src->st_shndx == SHN_UNDEF || src->st_shndx >= shnum || !src->st_name) return false; sec = sechdrs + src->st_shndx; if (!(sec->sh_flags & SHF_ALLOC) #ifndef CONFIG_KALLSYMS_ALL || !(sec->sh_flags & SHF_EXECINSTR) #endif || (sec->sh_entsize & INIT_OFFSET_MASK)) return false; return true; } /* * We only allocate and copy the strings needed by the parts of symtab * we keep. This is simple, but has the effect of making multiple * copies of duplicates. We could be more sophisticated, see * linux-kernel thread starting with * <73defb5e4bca04a6431392cc341112b1@localhost>. */ static void layout_symtab(struct module *mod, struct load_info *info) { Elf_Shdr *symsect = info->sechdrs + info->index.sym; Elf_Shdr *strsect = info->sechdrs + info->index.str; const Elf_Sym *src; unsigned int i, nsrc, ndst, strtab_size; /* Put symbol section at end of init part of module. */ symsect->sh_flags |= SHF_ALLOC; symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, info->index.sym) | INIT_OFFSET_MASK; pr_debug("\t%s\n", info->secstrings + symsect->sh_name); src = (void *)info->hdr + symsect->sh_offset; nsrc = symsect->sh_size / sizeof(*src); /* Compute total space required for the core symbols' strtab. */ for (ndst = i = strtab_size = 1; i < nsrc; ++i, ++src) if (is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) { strtab_size += strlen(&info->strtab[src->st_name]) + 1; ndst++; } /* Append room for core symbols at end of core part. */ info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym); mod->core_size += strtab_size; /* Put string table section at end of init part of module. */ strsect->sh_flags |= SHF_ALLOC; strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, info->index.str) | INIT_OFFSET_MASK; pr_debug("\t%s\n", info->secstrings + strsect->sh_name); } static void add_kallsyms(struct module *mod, const struct load_info *info) { unsigned int i, ndst; const Elf_Sym *src; Elf_Sym *dst; char *s; Elf_Shdr *symsec = &info->sechdrs[info->index.sym]; mod->symtab = (void *)symsec->sh_addr; mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym); /* Make sure we get permanent strtab: don't use info->strtab. */ mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr; /* Set types up while we still have access to sections. */ for (i = 0; i < mod->num_symtab; i++) mod->symtab[i].st_info = elf_type(&mod->symtab[i], info); mod->core_symtab = dst = mod->module_core + info->symoffs; mod->core_strtab = s = mod->module_core + info->stroffs; src = mod->symtab; *dst = *src; *s++ = 0; for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { if (!is_core_symbol(src, info->sechdrs, info->hdr->e_shnum)) continue; dst[ndst] = *src; dst[ndst++].st_name = s - mod->core_strtab; s += strlcpy(s, &mod->strtab[src->st_name], KSYM_NAME_LEN) + 1; } mod->core_num_syms = ndst; } #else static inline void layout_symtab(struct module *mod, struct load_info *info) { } static void add_kallsyms(struct module *mod, const struct load_info *info) { } #endif /* CONFIG_KALLSYMS */ static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num) { if (!debug) return; #ifdef CONFIG_DYNAMIC_DEBUG if (ddebug_add_module(debug, num, debug->modname)) printk(KERN_ERR "dynamic debug error adding module: %s\n", debug->modname); #endif } static void dynamic_debug_remove(struct _ddebug *debug) { if (debug) ddebug_remove_module(debug->modname); } void * __weak module_alloc(unsigned long size) { return size == 0 ? NULL : vmalloc_exec(size); } static void *module_alloc_update_bounds(unsigned long size) { void *ret = module_alloc(size); if (ret) { mutex_lock(&module_mutex); /* Update module bounds. */ if ((unsigned long)ret < module_addr_min) module_addr_min = (unsigned long)ret; if ((unsigned long)ret + size > module_addr_max) module_addr_max = (unsigned long)ret + size; mutex_unlock(&module_mutex); } return ret; } #ifdef CONFIG_DEBUG_KMEMLEAK static void kmemleak_load_module(const struct module *mod, const struct load_info *info) { unsigned int i; /* only scan the sections containing data */ kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL); for (i = 1; i < info->hdr->e_shnum; i++) { const char *name = info->secstrings + info->sechdrs[i].sh_name; if (!(info->sechdrs[i].sh_flags & SHF_ALLOC)) continue; if (!strstarts(name, ".data") && !strstarts(name, ".bss")) continue; kmemleak_scan_area((void *)info->sechdrs[i].sh_addr, info->sechdrs[i].sh_size, GFP_KERNEL); } } #else static inline void kmemleak_load_module(const struct module *mod, const struct load_info *info) { } #endif /* Sets info->hdr and info->len. */ static int copy_and_check(struct load_info *info, const void __user *umod, unsigned long len, const char __user *uargs) { int err; Elf_Ehdr *hdr; if (len < sizeof(*hdr)) return -ENOEXEC; /* Suck in entire file: we'll want most of it. */ if ((hdr = vmalloc(len)) == NULL) return -ENOMEM; if (copy_from_user(hdr, umod, len) != 0) { err = -EFAULT; goto free_hdr; } /* Sanity checks against insmoding binaries or wrong arch, weird elf version */ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 || hdr->e_type != ET_REL || !elf_check_arch(hdr) || hdr->e_shentsize != sizeof(Elf_Shdr)) { err = -ENOEXEC; goto free_hdr; } if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { err = -ENOEXEC; goto free_hdr; } info->hdr = hdr; info->len = len; return 0; free_hdr: vfree(hdr); return err; } static void free_copy(struct load_info *info) { vfree(info->hdr); } static int rewrite_section_headers(struct load_info *info) { unsigned int i; /* This should always be true, but let's be sure. */ info->sechdrs[0].sh_addr = 0; for (i = 1; i < info->hdr->e_shnum; i++) { Elf_Shdr *shdr = &info->sechdrs[i]; if (shdr->sh_type != SHT_NOBITS && info->len < shdr->sh_offset + shdr->sh_size) { printk(KERN_ERR "Module len %lu truncated\n", info->len); return -ENOEXEC; } /* Mark all sections sh_addr with their address in the temporary image. */ shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset; #ifndef CONFIG_MODULE_UNLOAD /* Don't load .exit sections */ if (strstarts(info->secstrings+shdr->sh_name, ".exit")) shdr->sh_flags &= ~(unsigned long)SHF_ALLOC; #endif } /* Track but don't keep modinfo and version sections. */ info->index.vers = find_sec(info, "__versions"); info->index.info = find_sec(info, ".modinfo"); info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC; info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; return 0; } /* * Set up our basic convenience variables (pointers to section headers, * search for module section index etc), and do some basic section * verification. * * Return the temporary module pointer (we'll replace it with the final * one when we move the module sections around). */ static struct module *setup_load_info(struct load_info *info) { unsigned int i; int err; struct module *mod; /* Set up the convenience variables */ info->sechdrs = (void *)info->hdr + info->hdr->e_shoff; info->secstrings = (void *)info->hdr + info->sechdrs[info->hdr->e_shstrndx].sh_offset; err = rewrite_section_headers(info); if (err) return ERR_PTR(err); /* Find internal symbols and strings. */ for (i = 1; i < info->hdr->e_shnum; i++) { if (info->sechdrs[i].sh_type == SHT_SYMTAB) { info->index.sym = i; info->index.str = info->sechdrs[i].sh_link; info->strtab = (char *)info->hdr + info->sechdrs[info->index.str].sh_offset; break; } } info->index.mod = find_sec(info, ".gnu.linkonce.this_module"); if (!info->index.mod) { printk(KERN_WARNING "No module found in object\n"); return ERR_PTR(-ENOEXEC); } /* This is temporary: point mod into copy of data. */ mod = (void *)info->sechdrs[info->index.mod].sh_addr; if (info->index.sym == 0) { printk(KERN_WARNING "%s: module has no symbols (stripped?)\n", mod->name); return ERR_PTR(-ENOEXEC); } info->index.pcpu = find_pcpusec(info); /* Check module struct version now, before we try to use module. */ if (!check_modstruct_version(info->sechdrs, info->index.vers, mod)) return ERR_PTR(-ENOEXEC); return mod; } static int check_modinfo(struct module *mod, struct load_info *info) { const char *modmagic = get_modinfo(info, "vermagic"); int err; /* This is allowed: modprobe --force will invalidate it. */ if (!modmagic) { err = try_to_force_load(mod, "bad vermagic"); if (err) return err; } else if (!same_magic(modmagic, vermagic, info->index.vers)) { printk(KERN_ERR "%s: version magic '%s' should be '%s'\n", mod->name, modmagic, vermagic); return -ENOEXEC; } if (!get_modinfo(info, "intree")) add_taint_module(mod, TAINT_OOT_MODULE); if (get_modinfo(info, "staging")) { add_taint_module(mod, TAINT_CRAP); printk(KERN_WARNING "%s: module is from the staging directory," " the quality is unknown, you have been warned.\n", mod->name); } /* Set up license info based on the info section */ set_license(mod, get_modinfo(info, "license")); return 0; } static void find_module_sections(struct module *mod, struct load_info *info) { mod->kp = section_objs(info, "__param", sizeof(*mod->kp), &mod->num_kp); mod->syms = section_objs(info, "__ksymtab", sizeof(*mod->syms), &mod->num_syms); mod->crcs = section_addr(info, "__kcrctab"); mod->gpl_syms = section_objs(info, "__ksymtab_gpl", sizeof(*mod->gpl_syms), &mod->num_gpl_syms); mod->gpl_crcs = section_addr(info, "__kcrctab_gpl"); mod->gpl_future_syms = section_objs(info, "__ksymtab_gpl_future", sizeof(*mod->gpl_future_syms), &mod->num_gpl_future_syms); mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future"); #ifdef CONFIG_UNUSED_SYMBOLS mod->unused_syms = section_objs(info, "__ksymtab_unused", sizeof(*mod->unused_syms), &mod->num_unused_syms); mod->unused_crcs = section_addr(info, "__kcrctab_unused"); mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl", sizeof(*mod->unused_gpl_syms), &mod->num_unused_gpl_syms); mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl"); #endif #ifdef CONFIG_CONSTRUCTORS mod->ctors = section_objs(info, ".ctors", sizeof(*mod->ctors), &mod->num_ctors); #endif #ifdef CONFIG_TRACEPOINTS mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs", sizeof(*mod->tracepoints_ptrs), &mod->num_tracepoints); #endif #ifdef HAVE_JUMP_LABEL mod->jump_entries = section_objs(info, "__jump_table", sizeof(*mod->jump_entries), &mod->num_jump_entries); #endif #ifdef CONFIG_EVENT_TRACING mod->trace_events = section_objs(info, "_ftrace_events", sizeof(*mod->trace_events), &mod->num_trace_events); /* * This section contains pointers to allocated objects in the trace * code and not scanning it leads to false positives. */ kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) * mod->num_trace_events, GFP_KERNEL); #endif #ifdef CONFIG_TRACING mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt", sizeof(*mod->trace_bprintk_fmt_start), &mod->num_trace_bprintk_fmt); /* * This section contains pointers to allocated objects in the trace * code and not scanning it leads to false positives. */ kmemleak_scan_area(mod->trace_bprintk_fmt_start, sizeof(*mod->trace_bprintk_fmt_start) * mod->num_trace_bprintk_fmt, GFP_KERNEL); #endif #ifdef CONFIG_FTRACE_MCOUNT_RECORD /* sechdrs[0].sh_size is always zero */ mod->ftrace_callsites = section_objs(info, "__mcount_loc", sizeof(*mod->ftrace_callsites), &mod->num_ftrace_callsites); #endif mod->extable = section_objs(info, "__ex_table", sizeof(*mod->extable), &mod->num_exentries); if (section_addr(info, "__obsparm")) printk(KERN_WARNING "%s: Ignoring obsolete parameters\n", mod->name); info->debug = section_objs(info, "__verbose", sizeof(*info->debug), &info->num_debug); } static int move_module(struct module *mod, struct load_info *info) { int i; void *ptr; /* Do the allocs. */ ptr = module_alloc_update_bounds(mod->core_size); /* * The pointer to this block is stored in the module structure * which is inside the block. Just mark it as not being a * leak. */ kmemleak_not_leak(ptr); if (!ptr) return -ENOMEM; memset(ptr, 0, mod->core_size); mod->module_core = ptr; ptr = module_alloc_update_bounds(mod->init_size); /* * The pointer to this block is stored in the module structure * which is inside the block. This block doesn't need to be * scanned as it contains data and code that will be freed * after the module is initialized. */ kmemleak_ignore(ptr); if (!ptr && mod->init_size) { module_free(mod, mod->module_core); return -ENOMEM; } memset(ptr, 0, mod->init_size); mod->module_init = ptr; /* Transfer each section which specifies SHF_ALLOC */ pr_debug("final section addresses:\n"); for (i = 0; i < info->hdr->e_shnum; i++) { void *dest; Elf_Shdr *shdr = &info->sechdrs[i]; if (!(shdr->sh_flags & SHF_ALLOC)) continue; if (shdr->sh_entsize & INIT_OFFSET_MASK) dest = mod->module_init + (shdr->sh_entsize & ~INIT_OFFSET_MASK); else dest = mod->module_core + shdr->sh_entsize; if (shdr->sh_type != SHT_NOBITS) memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size); /* Update sh_addr to point to copy in image. */ shdr->sh_addr = (unsigned long)dest; pr_debug("\t0x%lx %s\n", (long)shdr->sh_addr, info->secstrings + shdr->sh_name); } return 0; } static int check_module_license_and_versions(struct module *mod) { /* * ndiswrapper is under GPL by itself, but loads proprietary modules. * Don't use add_taint_module(), as it would prevent ndiswrapper from * using GPL-only symbols it needs. */ if (strcmp(mod->name, "ndiswrapper") == 0) add_taint(TAINT_PROPRIETARY_MODULE); /* driverloader was caught wrongly pretending to be under GPL */ if (strcmp(mod->name, "driverloader") == 0) add_taint_module(mod, TAINT_PROPRIETARY_MODULE); #ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs) || (mod->num_gpl_future_syms && !mod->gpl_future_crcs) #ifdef CONFIG_UNUSED_SYMBOLS || (mod->num_unused_syms && !mod->unused_crcs) || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs) #endif ) { return try_to_force_load(mod, "no versions for exported symbols"); } #endif return 0; } static void flush_module_icache(const struct module *mod) { mm_segment_t old_fs; /* flush the icache in correct context */ old_fs = get_fs(); set_fs(KERNEL_DS); /* * Flush the instruction cache, since we've played with text. * Do it before processing of module parameters, so the module * can provide parameter accessor functions of its own. */ if (mod->module_init) flush_icache_range((unsigned long)mod->module_init, (unsigned long)mod->module_init + mod->init_size); flush_icache_range((unsigned long)mod->module_core, (unsigned long)mod->module_core + mod->core_size); set_fs(old_fs); } int __weak module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, char *secstrings, struct module *mod) { return 0; } static struct module *layout_and_allocate(struct load_info *info) { /* Module within temporary copy. */ struct module *mod; Elf_Shdr *pcpusec; int err; mod = setup_load_info(info); if (IS_ERR(mod)) return mod; err = check_modinfo(mod, info); if (err) return ERR_PTR(err); /* Allow arches to frob section contents and sizes. */ err = module_frob_arch_sections(info->hdr, info->sechdrs, info->secstrings, mod); if (err < 0) goto out; pcpusec = &info->sechdrs[info->index.pcpu]; if (pcpusec->sh_size) { /* We have a special allocation for this section. */ err = percpu_modalloc(mod, pcpusec->sh_size, pcpusec->sh_addralign); if (err) goto out; pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC; } /* Determine total sizes, and put offsets in sh_entsize. For now this is done generically; there doesn't appear to be any special cases for the architectures. */ layout_sections(mod, info); layout_symtab(mod, info); /* Allocate and move to the final place */ err = move_module(mod, info); if (err) goto free_percpu; /* Module has been copied to its final place now: return it. */ mod = (void *)info->sechdrs[info->index.mod].sh_addr; kmemleak_load_module(mod, info); return mod; free_percpu: percpu_modfree(mod); out: return ERR_PTR(err); } /* mod is no longer valid after this! */ static void module_deallocate(struct module *mod, struct load_info *info) { percpu_modfree(mod); module_free(mod, mod->module_init); module_free(mod, mod->module_core); } int __weak module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { return 0; } static int post_relocation(struct module *mod, const struct load_info *info) { /* Sort exception table now relocations are done. */ sort_extable(mod->extable, mod->extable + mod->num_exentries); /* Copy relocated percpu area over. */ percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr, info->sechdrs[info->index.pcpu].sh_size); /* Setup kallsyms-specific fields. */ add_kallsyms(mod, info); /* Arch-specific module finalizing. */ return module_finalize(info->hdr, info->sechdrs, mod); } #ifdef CONFIG_MODULE_EXTRA_COPY /* Make an extra copy of the module. */ static int make_extra_copy(Elf_Ehdr *elf_hdr, unsigned long elf_len, void **extra_copy) { void *dest = *extra_copy = vmalloc(elf_len); if (dest == NULL) return -ENOMEM; memcpy(dest, elf_hdr, elf_len); return 0; } /* Keep the linked copy as well as the raw copy, in case the * module wants to inspect both. */ static int keep_extra_copy_info(struct module *mod, void *extra_copy, Elf_Ehdr *elf_hdr, unsigned long elf_len) { mod->raw_binary_ptr = extra_copy; mod->raw_binary_size = elf_len; mod->linked_binary_ptr = elf_hdr; mod->linked_binary_size = elf_len; return 1; } /* Release module extra copy information. */ static void cleanup_extra_copy_info(struct module *mod) { vfree(mod->raw_binary_ptr); vfree(mod->linked_binary_ptr); mod->raw_binary_ptr = mod->linked_binary_ptr = NULL; mod->raw_binary_size = mod->linked_binary_size = 0; } #else /* !CONFIG_MODULE_EXTRA_COPY */ static inline int make_extra_copy(Elf_Ehdr *elf_hdr, unsigned long elf_len, void **extra_copy) { *extra_copy = NULL; return 0; } static inline int keep_extra_copy_info(struct module *mod, void *extra_copy, Elf_Ehdr *elf_hdr, unsigned long elf_len) { return 0; } static inline void cleanup_extra_copy_info(struct module *mod) { } #endif /* CONFIG_MODULE_EXTRA_COPY */ /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static struct module *load_module(void __user *umod, unsigned long len, const char __user *uargs) { struct load_info info = { NULL, }; struct module *mod; long err; void *extra_copy = NULL; pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n", umod, len, uargs); /* Copy in the blobs from userspace, check they are vaguely sane. */ err = copy_and_check(&info, umod, len, uargs); if (err) return ERR_PTR(err); /* check module hash */ err = check_module_hash(info.hdr, info.len); if (err) goto free_copy; /* Make extra copy of the module, if needed. */ err = make_extra_copy(info.hdr, info.len, &extra_copy); if (err) goto free_copy; /* Figure out module layout, and allocate all the memory. */ mod = layout_and_allocate(&info); if (IS_ERR(mod)) { err = PTR_ERR(mod); goto free_extra_copy; } /* Now module is in final location, initialize linked lists, etc. */ err = module_unload_init(mod); if (err) goto free_module; /* Now we've got everything in the final locations, we can * find optional sections. */ find_module_sections(mod, &info); err = check_module_license_and_versions(mod); if (err) goto free_unload; /* Set up MODINFO_ATTR fields */ setup_modinfo(mod, &info); /* Fix up syms, so that st_value is a pointer to location. */ err = simplify_symbols(mod, &info); if (err < 0) goto free_modinfo; err = apply_relocations(mod, &info); if (err < 0) goto free_modinfo; err = post_relocation(mod, &info); if (err < 0) goto free_modinfo; flush_module_icache(mod); /* Now copy in args */ mod->args = strndup_user(uargs, ~0UL >> 1); if (IS_ERR(mod->args)) { err = PTR_ERR(mod->args); goto free_arch_cleanup; } /* Mark state as coming so strong_try_module_get() ignores us. */ mod->state = MODULE_STATE_COMING; /* Now sew it into the lists so we can get lockdep and oops * info during argument parsing. No one should access us, since * strong_try_module_get() will fail. * lockdep/oops can run asynchronous, so use the RCU list insertion * function to insert in a way safe to concurrent readers. * The mutex protects against concurrent writers. */ mutex_lock(&module_mutex); if (find_module(mod->name)) { err = -EEXIST; goto unlock; } /* This has to be done once we're sure module name is unique. */ dynamic_debug_setup(info.debug, info.num_debug); /* Find duplicate symbols */ err = verify_export_symbols(mod); if (err < 0) goto ddebug; module_bug_finalize(info.hdr, info.sechdrs, mod); list_add_rcu(&mod->list, &modules); mutex_unlock(&module_mutex); /* Module is ready to execute: parsing args may do that. */ err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp, -32768, 32767, NULL); if (err < 0) goto unlink; /* Link in to syfs. */ err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); if (err < 0) goto unlink; /* Keep extra copy information, if needed. */ if (!keep_extra_copy_info(mod, extra_copy, info.hdr, info.len)) { /* Get rid of temporary copy. */ free_copy(&info); } /* Done! */ trace_module_load(mod); return mod; unlink: mutex_lock(&module_mutex); /* Unlink carefully: kallsyms could be walking list. */ list_del_rcu(&mod->list); module_bug_cleanup(mod); ddebug: dynamic_debug_remove(info.debug); unlock: mutex_unlock(&module_mutex); synchronize_sched(); kfree(mod->args); free_arch_cleanup: module_arch_cleanup(mod); free_modinfo: free_modinfo(mod); free_unload: module_unload_free(mod); free_module: module_deallocate(mod, &info); free_extra_copy: vfree(extra_copy); free_copy: free_copy(&info); return ERR_PTR(err); } /* Call module constructors. */ static void do_mod_ctors(struct module *mod) { #ifdef CONFIG_CONSTRUCTORS unsigned long i; for (i = 0; i < mod->num_ctors; i++) mod->ctors[i](); #endif } /* This is where the real work happens */ SYSCALL_DEFINE3(init_module, void __user *, umod, unsigned long, len, const char __user *, uargs) { struct module *mod; int ret = 0; /* Must have permission */ if (!capable(CAP_SYS_MODULE) || modules_disabled) return -EPERM; /* Do all the hard work */ mod = load_module(umod, len, uargs); if (IS_ERR(mod)) return PTR_ERR(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_COMING, mod); /* Set RO and NX regions for core */ set_section_ro_nx(mod->module_core, mod->core_text_size, mod->core_ro_size, mod->core_size); /* Set RO and NX regions for init */ set_section_ro_nx(mod->module_init, mod->init_text_size, mod->init_ro_size, mod->init_size); do_mod_ctors(mod); /* Start the module */ if (mod->init != NULL) ret = do_one_initcall(mod->init); cleanup_extra_copy_info(mod); if (ret < 0) { /* Init routine failed: abort. Try to protect us from buggy refcounters. */ mod->state = MODULE_STATE_GOING; synchronize_sched(); module_put(mod); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_GOING, mod); free_module(mod); wake_up(&module_wq); return ret; } if (ret > 0) { printk(KERN_WARNING "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" "%s: loading module anyway...\n", __func__, mod->name, ret, __func__); dump_stack(); } /* Now it's a first class citizen! Wake up anyone waiting for it. */ mod->state = MODULE_STATE_LIVE; wake_up(&module_wq); blocking_notifier_call_chain(&module_notify_list, MODULE_STATE_LIVE, mod); /* We need to finish all async code before the module init sequence is done */ async_synchronize_full(); mutex_lock(&module_mutex); /* Drop initial reference. */ module_put(mod); trim_init_extable(mod); #ifdef CONFIG_KALLSYMS mod->num_symtab = mod->core_num_syms; mod->symtab = mod->core_symtab; mod->strtab = mod->core_strtab; #endif unset_module_init_ro_nx(mod); module_free(mod, mod->module_init); mod->module_init = NULL; mod->init_size = 0; mod->init_ro_size = 0; mod->init_text_size = 0; mutex_unlock(&module_mutex); return 0; } static inline int within(unsigned long addr, void *start, unsigned long size) { return ((void *)addr >= start && (void *)addr < start + size); } #ifdef CONFIG_KALLSYMS /* * This ignores the intensely annoying "mapping symbols" found * in ARM ELF files: $a, $t and $d. */ static inline int is_arm_mapping_symbol(const char *str) { return str[0] == '$' && strchr("atd", str[1]) && (str[2] == '\0' || str[2] == '.'); } static const char *get_ksymbol(struct module *mod, unsigned long addr, unsigned long *size, unsigned long *offset) { unsigned int i, best = 0; unsigned long nextval; /* At worse, next value is at end of module */ if (within_module_init(addr, mod)) nextval = (unsigned long)mod->module_init+mod->init_text_size; else nextval = (unsigned long)mod->module_core+mod->core_text_size; /* Scan for closest preceding symbol, and next symbol. (ELF starts real symbols at 1). */ for (i = 1; i < mod->num_symtab; i++) { if (mod->symtab[i].st_shndx == SHN_UNDEF) continue; /* We ignore unnamed symbols: they're uninformative * and inserted at a whim. */ if (mod->symtab[i].st_value <= addr && mod->symtab[i].st_value > mod->symtab[best].st_value && *(mod->strtab + mod->symtab[i].st_name) != '\0' && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) best = i; if (mod->symtab[i].st_value > addr && mod->symtab[i].st_value < nextval && *(mod->strtab + mod->symtab[i].st_name) != '\0' && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name)) nextval = mod->symtab[i].st_value; } if (!best) return NULL; if (size) *size = nextval - mod->symtab[best].st_value; if (offset) *offset = addr - mod->symtab[best].st_value; return mod->strtab + mod->symtab[best].st_name; } /* For kallsyms to ask for address resolution. NULL means not found. Careful * not to lock to avoid deadlock on oopses, simply disable preemption. */ const char *module_address_lookup(unsigned long addr, unsigned long *size, unsigned long *offset, char **modname, char *namebuf) { struct module *mod; const char *ret = NULL; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (within_module_init(addr, mod) || within_module_core(addr, mod)) { if (modname) *modname = mod->name; ret = get_ksymbol(mod, addr, size, offset); break; } } /* Make a copy in here where it's safe */ if (ret) { strncpy(namebuf, ret, KSYM_NAME_LEN - 1); ret = namebuf; } preempt_enable(); return ret; } int lookup_module_symbol_name(unsigned long addr, char *symname) { struct module *mod; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; sym = get_ksymbol(mod, addr, NULL, NULL); if (!sym) goto out; strlcpy(symname, sym, KSYM_NAME_LEN); preempt_enable(); return 0; } } out: preempt_enable(); return -ERANGE; } int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size, unsigned long *offset, char *modname, char *name) { struct module *mod; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (within_module_init(addr, mod) || within_module_core(addr, mod)) { const char *sym; sym = get_ksymbol(mod, addr, size, offset); if (!sym) goto out; if (modname) strlcpy(modname, mod->name, MODULE_NAME_LEN); if (name) strlcpy(name, sym, KSYM_NAME_LEN); preempt_enable(); return 0; } } out: preempt_enable(); return -ERANGE; } int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type, char *name, char *module_name, int *exported) { struct module *mod; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (symnum < mod->num_symtab) { *value = mod->symtab[symnum].st_value; *type = mod->symtab[symnum].st_info; strlcpy(name, mod->strtab + mod->symtab[symnum].st_name, KSYM_NAME_LEN); strlcpy(module_name, mod->name, MODULE_NAME_LEN); *exported = is_exported(name, *value, mod); preempt_enable(); return 0; } symnum -= mod->num_symtab; } preempt_enable(); return -ERANGE; } static unsigned long mod_find_symname(struct module *mod, const char *name) { unsigned int i; for (i = 0; i < mod->num_symtab; i++) if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 && mod->symtab[i].st_info != 'U') return mod->symtab[i].st_value; return 0; } /* Look for this name: can be of form module:name. */ unsigned long module_kallsyms_lookup_name(const char *name) { struct module *mod; char *colon; unsigned long ret = 0; /* Don't lock: we're in enough trouble already. */ preempt_disable(); if ((colon = strchr(name, ':')) != NULL) { *colon = '\0'; if ((mod = find_module(name)) != NULL) ret = mod_find_symname(mod, colon+1); *colon = ':'; } else { list_for_each_entry_rcu(mod, &modules, list) if ((ret = mod_find_symname(mod, name)) != 0) break; } preempt_enable(); return ret; } int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *, unsigned long), void *data) { struct module *mod; unsigned int i; int ret; list_for_each_entry(mod, &modules, list) { for (i = 0; i < mod->num_symtab; i++) { ret = fn(data, mod->strtab + mod->symtab[i].st_name, mod, mod->symtab[i].st_value); if (ret != 0) return ret; } } return 0; } #endif /* CONFIG_KALLSYMS */ static char *module_flags(struct module *mod, char *buf) { int bx = 0; if (mod->taints || mod->state == MODULE_STATE_GOING || mod->state == MODULE_STATE_COMING) { buf[bx++] = '('; bx += module_flags_taint(mod, buf + bx); /* Show a - for module-is-being-unloaded */ if (mod->state == MODULE_STATE_GOING) buf[bx++] = '-'; /* Show a + for module-is-being-loaded */ if (mod->state == MODULE_STATE_COMING) buf[bx++] = '+'; buf[bx++] = ')'; } buf[bx] = '\0'; return buf; } #ifdef CONFIG_PROC_FS /* Called by the /proc file system to return a list of modules. */ static void *m_start(struct seq_file *m, loff_t *pos) { mutex_lock(&module_mutex); return seq_list_start(&modules, *pos); } static void *m_next(struct seq_file *m, void *p, loff_t *pos) { return seq_list_next(p, &modules, pos); } static void m_stop(struct seq_file *m, void *p) { mutex_unlock(&module_mutex); } static int m_show(struct seq_file *m, void *p) { struct module *mod = list_entry(p, struct module, list); char buf[8]; seq_printf(m, "%s %u", mod->name, mod->init_size + mod->core_size); print_unload_info(m, mod); /* Informative for users. */ seq_printf(m, " %s", mod->state == MODULE_STATE_GOING ? "Unloading": mod->state == MODULE_STATE_COMING ? "Loading": "Live"); /* Used by oprofile and other similar tools. */ seq_printf(m, " 0x%pK", mod->module_core); /* Taints info */ if (mod->taints) seq_printf(m, " %s", module_flags(mod, buf)); seq_printf(m, "\n"); return 0; } /* Format: modulename size refcount deps address Where refcount is a number or -, and deps is a comma-separated list of depends or -. */ static const struct seq_operations modules_op = { .start = m_start, .next = m_next, .stop = m_stop, .show = m_show }; static int modules_open(struct inode *inode, struct file *file) { return seq_open(file, &modules_op); } static const struct file_operations proc_modules_operations = { .open = modules_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_modules_init(void) { proc_create("modules", 0, NULL, &proc_modules_operations); return 0; } module_init(proc_modules_init); #endif /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_extables(unsigned long addr) { const struct exception_table_entry *e = NULL; struct module *mod; preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) { if (mod->num_exentries == 0) continue; e = search_extable(mod->extable, mod->extable + mod->num_exentries - 1, addr); if (e) break; } preempt_enable(); /* Now, if we found one, we are running inside it now, hence we cannot unload the module, hence no refcnt needed. */ return e; } /* * is_module_address - is this address inside a module? * @addr: the address to check. * * See is_module_text_address() if you simply want to see if the address * is code (not data). */ bool is_module_address(unsigned long addr) { bool ret; preempt_disable(); ret = __module_address(addr) != NULL; preempt_enable(); return ret; } /* * __module_address - get the module which contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ struct module *__module_address(unsigned long addr) { struct module *mod; if (addr < module_addr_min || addr > module_addr_max) return NULL; list_for_each_entry_rcu(mod, &modules, list) if (within_module_core(addr, mod) || within_module_init(addr, mod)) return mod; return NULL; } EXPORT_SYMBOL_GPL(__module_address); /* * is_module_text_address - is this address inside module code? * @addr: the address to check. * * See is_module_address() if you simply want to see if the address is * anywhere in a module. See kernel_text_address() for testing if an * address corresponds to kernel or module code. */ bool is_module_text_address(unsigned long addr) { bool ret; preempt_disable(); ret = __module_text_address(addr) != NULL; preempt_enable(); return ret; } /* * __module_text_address - get the module whose code contains an address. * @addr: the address. * * Must be called with preempt disabled or module mutex held so that * module doesn't get freed during this. */ struct module *__module_text_address(unsigned long addr) { struct module *mod = __module_address(addr); if (mod) { /* Make sure it's within the text section. */ if (!within(addr, mod->module_init, mod->init_text_size) && !within(addr, mod->module_core, mod->core_text_size)) mod = NULL; } return mod; } EXPORT_SYMBOL_GPL(__module_text_address); /* Don't grab lock, we're oopsing. */ void print_modules(void) { struct module *mod; char buf[8]; printk(KERN_DEFAULT "Modules linked in:"); /* Most callers should already have preempt disabled, but make sure */ preempt_disable(); list_for_each_entry_rcu(mod, &modules, list) printk(" %s%s", mod->name, module_flags(mod, buf)); preempt_enable(); if (last_unloaded_module[0]) printk(" [last unloaded: %s]", last_unloaded_module); printk("\n"); } #ifdef CONFIG_MODVERSIONS /* Generate the signature for all relevant module structures here. * If these change, we don't want to try to parse the module. */ void module_layout(struct module *mod, struct modversion_info *ver, struct kernel_param *kp, struct kernel_symbol *ks, struct tracepoint * const *tp) { } EXPORT_SYMBOL(module_layout); #endif
gpl-2.0
JPRasquin/Ubuntu14.04
drivers/net/dpa/NetCommSw/integrations/P3040_P4080_P5020/module_strings.c
16
3213
/* Copyright (c) 2008-2011 Freescale Semiconductor, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Module names for debug messages */ const char *moduleStrings[] = { "???" /* MODULE_UNKNOWN */ ,"MEM" /* MODULE_ */ ,"MM" /* MODULE_MM */ ,"CORE" /* MODULE_CORE */ ,"P4080" /* MODULE_P4080 */ ,"P4080-Platform" /* MODULE_P4080_PLTFRM */ ,"PM" /* MODULE_PM */ ,"MMU" /* MODULE_MMU */ ,"PIC" /* MODULE_PIC */ ,"L3 cache (CPC)" /* MODULE_CPC */ ,"DUART" /* MODULE_DUART */ ,"SerDes" /* MODULE_SERDES */ ,"PIO" /* MODULE_PIO */ ,"QM" /* MODULE_QM */ ,"BM" /* MODULE_BM */ ,"SEC" /* MODULE_SEC */ ,"LAW" /* MODULE_LAW */ ,"LBC" /* MODULE_LBC */ ,"PAMU" /* MODULE_PAMU */ ,"FM" /* MODULE_FM */ ,"FM-MURAM" /* MODULE_FM_MURAM */ ,"FM-PCD" /* MODULE_FM_PCD */ ,"FM-RTC" /* MODULE_FM_RTC */ ,"FM-MAC" /* MODULE_FM_MAC */ ,"FM-Port" /* MODULE_FM_PORT */ ,"DPA" /* MODULE_DPA */ };
gpl-2.0
HydraCompany/HydraKernel
drivers/net/can/xilinx_can.c
272
34378
/* Xilinx CAN device driver * * Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/led.h> #define DRIVER_NAME "xilinx_can" /* CAN registers set */ enum xcan_reg { XCAN_SRR_OFFSET = 0x00, /* Software reset */ XCAN_MSR_OFFSET = 0x04, /* Mode select */ XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ XCAN_ECR_OFFSET = 0x10, /* Error counter */ XCAN_ESR_OFFSET = 0x14, /* Error status */ XCAN_SR_OFFSET = 0x18, /* Status */ XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ XCAN_TXFIFO_ID_OFFSET = 0x30,/* TX FIFO ID */ XCAN_TXFIFO_DLC_OFFSET = 0x34, /* TX FIFO DLC */ XCAN_TXFIFO_DW1_OFFSET = 0x38, /* TX FIFO Data Word 1 */ XCAN_TXFIFO_DW2_OFFSET = 0x3C, /* TX FIFO Data Word 2 */ XCAN_RXFIFO_ID_OFFSET = 0x50, /* RX FIFO ID */ XCAN_RXFIFO_DLC_OFFSET = 0x54, /* RX FIFO DLC */ XCAN_RXFIFO_DW1_OFFSET = 0x58, /* RX FIFO Data Word 1 */ XCAN_RXFIFO_DW2_OFFSET = 0x5C, /* RX FIFO Data Word 2 */ }; /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ /* CAN frame length constants */ #define XCAN_FRAME_MAX_DATA_LEN 8 #define XCAN_TIMEOUT (1 * HZ) /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. * @tx_head: Tx CAN packets ready to send on the queue * @tx_tail: Tx CAN packets successfully sended on the queue * @tx_max: Maximum number packets the driver can send * @napi: NAPI structure * @read_reg: For reading data from CAN registers * @write_reg: For writing data to CAN registers * @dev: Network device data structure * @reg_base: Ioremapped address to registers * @irq_flags: For request_irq() * @bus_clk: Pointer to struct clk * @can_clk: Pointer to struct clk */ struct xcan_priv { struct can_priv can; unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; struct napi_struct napi; u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, u32 val); struct net_device *dev; void __iomem *reg_base; unsigned long irq_flags; struct clk *bus_clk; struct clk *can_clk; }; /* CAN Bittiming constants as per Xilinx CAN specs */ static const struct can_bittiming_const xcan_bittiming_const = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure * @reg: Register offset * @val: Value to write at the Register offset * * Write data to the paricular CAN register */ static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, u32 val) { iowrite32(val, priv->reg_base + reg); } /** * xcan_read_reg_le - Read a value from the device register little endian * @priv: Driver private data structure * @reg: Register offset * * Read data from the particular CAN register * Return: value read from the CAN register */ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) { return ioread32(priv->reg_base + reg); } /** * xcan_write_reg_be - Write a value to the device register big endian * @priv: Driver private data structure * @reg: Register offset * @val: Value to write at the Register offset * * Write data to the paricular CAN register */ static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, u32 val) { iowrite32be(val, priv->reg_base + reg); } /** * xcan_read_reg_be - Read a value from the device register big endian * @priv: Driver private data structure * @reg: Register offset * * Read data from the particular CAN register * Return: value read from the CAN register */ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) { return ioread32be(priv->reg_base + reg); } /** * set_reset_mode - Resets the CAN device mode * @ndev: Pointer to net_device structure * * This is the driver reset mode routine.The driver * enters into configuration mode. * * Return: 0 on success and failure value on error */ static int set_reset_mode(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); unsigned long timeout; priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); timeout = jiffies + XCAN_TIMEOUT; while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { if (time_after(jiffies, timeout)) { netdev_warn(ndev, "timed out for config mode\n"); return -ETIMEDOUT; } usleep_range(500, 10000); } return 0; } /** * xcan_set_bittiming - CAN set bit timing routine * @ndev: Pointer to net_device structure * * This is the driver set bittiming routine. * Return: 0 on success and failure value on error */ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; u32 btr0, btr1; u32 is_config_mode; /* Check whether Xilinx CAN is in configuration mode. * It cannot set bit timing if Xilinx CAN is not in configuration mode. */ is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK; if (!is_config_mode) { netdev_alert(ndev, "BUG! Cannot set bittiming - CAN is not in config mode\n"); return -EPERM; } /* Setting Baud Rate prescalar value in BRPR Register */ btr0 = (bt->brp - 1); /* Setting Time Segment 1 in BTR Register */ btr1 = (bt->prop_seg + bt->phase_seg1 - 1); /* Setting Time Segment 2 in BTR Register */ btr1 |= (bt->phase_seg2 - 1) << XCAN_BTR_TS2_SHIFT; /* Setting Synchronous jump width in BTR Register */ btr1 |= (bt->sjw - 1) << XCAN_BTR_SJW_SHIFT; priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", priv->read_reg(priv, XCAN_BRPR_OFFSET), priv->read_reg(priv, XCAN_BTR_OFFSET)); return 0; } /** * xcan_chip_start - This the drivers start routine * @ndev: Pointer to net_device structure * * This is the drivers start routine. * Based on the State of the CAN device it puts * the CAN device into a proper mode. * * Return: 0 on success and failure value on error */ static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); u32 reg_msr, reg_sr_mask; int err; unsigned long timeout; /* Check if it is in reset mode */ err = set_reset_mode(ndev); if (err < 0) return err; err = xcan_set_bittiming(ndev); if (err < 0) return err; /* Enable interrupts */ priv->write_reg(priv, XCAN_IER_OFFSET, XCAN_INTR_ALL); /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { reg_msr = XCAN_MSR_LBACK_MASK; reg_sr_mask = XCAN_SR_LBACK_MASK; } else { reg_msr = 0x0; reg_sr_mask = XCAN_SR_NORMAL_MASK; } priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); timeout = jiffies + XCAN_TIMEOUT; while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & reg_sr_mask)) { if (time_after(jiffies, timeout)) { netdev_warn(ndev, "timed out for correct mode\n"); return -ETIMEDOUT; } } netdev_dbg(ndev, "status:#x%08x\n", priv->read_reg(priv, XCAN_SR_OFFSET)); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } /** * xcan_do_set_mode - This sets the mode of the driver * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * * This check the drivers state and calls the * the corresponding modes to set. * * Return: 0 on success and failure value on error */ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: ret = xcan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "xcan_chip_start failed!\n"); return ret; } netif_wake_queue(ndev); break; default: ret = -EOPNOTSUPP; break; } return ret; } /** * xcan_start_xmit - Starts the transmission * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * This function is invoked from upper layers to initiate transmission. This * function uses the next available free txbuff and populates their fields to * start the transmission. * * Return: 0 on success and failure value on error */ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, dlc, data[2] = {0, 0}; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; /* Check if the TX buffer is full */ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_TXFLL_MASK)) { netif_stop_queue(ndev); netdev_err(ndev, "BUG!, TX FIFO full when queue awake!\n"); return NETDEV_TX_BUSY; } /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { /* Extended CAN ID format */ id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & XCAN_IDR_ID2_MASK; id |= (((cf->can_id & CAN_EFF_MASK) >> (CAN_EFF_ID_BITS-CAN_SFF_ID_BITS)) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; /* The substibute remote TX request bit should be "1" * for extended frames as in the Xilinx CAN datasheet */ id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; if (cf->can_id & CAN_RTR_FLAG) /* Extended frames remote TX request */ id |= XCAN_IDR_RTR_MASK; } else { /* Standard CAN ID format */ id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; if (cf->can_id & CAN_RTR_FLAG) /* Standard frames remote TX request */ id |= XCAN_IDR_SRR_MASK; } dlc = cf->can_dlc << XCAN_DLCR_DLC_SHIFT; if (cf->can_dlc > 0) data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); if (cf->can_dlc > 4) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); priv->tx_head++; /* Write the Frame to Xilinx CAN TX FIFO */ priv->write_reg(priv, XCAN_TXFIFO_ID_OFFSET, id); /* If the CAN frame is RTR frame this write triggers tranmission */ priv->write_reg(priv, XCAN_TXFIFO_DLC_OFFSET, dlc); if (!(cf->can_id & CAN_RTR_FLAG)) { priv->write_reg(priv, XCAN_TXFIFO_DW1_OFFSET, data[0]); /* If the CAN frame is Standard/Extended frame this * write triggers tranmission */ priv->write_reg(priv, XCAN_TXFIFO_DW2_OFFSET, data[1]); stats->tx_bytes += cf->can_dlc; } /* Check if the TX buffer is full */ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) netif_stop_queue(ndev); return NETDEV_TX_OK; } /** * xcan_rx - Is called from CAN isr to complete the received * frame processing * @ndev: Pointer to net_device structure * * This function is invoked from the CAN isr(poll) to process the Rx frames. It * does minimal processing and invokes "netif_receive_skb" to complete further * processing. * Return: 1 on success and 0 on failure. */ static int xcan_rx(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 id_xcan, dlc, data[2] = {0, 0}; skb = alloc_can_skb(ndev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return 0; } /* Read a frame from Xilinx zynq CANPS */ id_xcan = priv->read_reg(priv, XCAN_RXFIFO_ID_OFFSET); dlc = priv->read_reg(priv, XCAN_RXFIFO_DLC_OFFSET) >> XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ cf->can_dlc = get_can_dlc(dlc); /* Change Xilinx CAN ID format to socketCAN ID format */ if (id_xcan & XCAN_IDR_IDE_MASK) { /* The received frame is an Extended format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> XCAN_IDR_ID2_SHIFT; cf->can_id |= CAN_EFF_FLAG; if (id_xcan & XCAN_IDR_RTR_MASK) cf->can_id |= CAN_RTR_FLAG; } else { /* The received frame is a standard format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> XCAN_IDR_ID1_SHIFT; if (id_xcan & XCAN_IDR_SRR_MASK) cf->can_id |= CAN_RTR_FLAG; } if (!(id_xcan & XCAN_IDR_SRR_MASK)) { data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET); data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET); /* Change Xilinx CAN data format to socketCAN data format */ if (cf->can_dlc > 0) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); if (cf->can_dlc > 4) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); } stats->rx_bytes += cf->can_dlc; stats->rx_packets++; netif_receive_skb(skb); return 1; } /** * xcan_err_interrupt - error frame Isr * @ndev: net_device pointer * @isr: interrupt status register value * * This is the CAN error interrupt and it will * check the the type of error and forward the error * frame to upper layers. */ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 err_status, status, txerr = 0, rxerr = 0; skb = alloc_can_err_skb(ndev, &cf); err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); status = priv->read_reg(priv, XCAN_SR_OFFSET); if (isr & XCAN_IXR_BSOFF_MASK) { priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; /* Leave device in Config Mode in bus-off state */ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; } else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (rxerr > 127) ? CAN_ERR_CRTL_RX_PASSIVE : CAN_ERR_CRTL_TX_PASSIVE; cf->data[6] = txerr; cf->data[7] = rxerr; } } else if (status & XCAN_SR_ERRWRN_MASK) { priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = txerr; cf->data[7] = rxerr; } } /* Check for Arbitration lost interrupt */ if (isr & XCAN_IXR_ARBLST_MASK) { priv->can.can_stats.arbitration_lost++; if (skb) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; } } /* Check for RX FIFO Overflow interrupt */ if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } } /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_UNSPEC; } /* Check for Ack error interrupt */ if (err_status & XCAN_ESR_ACKER_MASK) { stats->tx_errors++; if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] |= CAN_ERR_PROT_LOC_ACK; } } /* Check for Bit error interrupt */ if (err_status & XCAN_ESR_BERR_MASK) { stats->tx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_BIT; } } /* Check for Stuff error interrupt */ if (err_status & XCAN_ESR_STER_MASK) { stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_STUFF; } } /* Check for Form error interrupt */ if (err_status & XCAN_ESR_FMER_MASK) { stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_FORM; } } /* Check for CRC error interrupt */ if (err_status & XCAN_ESR_CRCER_MASK) { stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ | CAN_ERR_PROT_LOC_CRC_DEL; } } priv->can.can_stats.bus_error++; } if (skb) { stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } netdev_dbg(ndev, "%s: error status register:0x%x\n", __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); } /** * xcan_state_interrupt - It will check the state of the CAN device * @ndev: net_device pointer * @isr: interrupt status register value * * This will checks the state of the CAN device * and puts the device into appropriate state. */ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); /* Check for Sleep interrupt if set put CAN device in sleep state */ if (isr & XCAN_IXR_SLP_MASK) priv->can.state = CAN_STATE_SLEEPING; /* Check for Wake up interrupt if set put CAN device in Active state */ if (isr & XCAN_IXR_WKUP_MASK) priv->can.state = CAN_STATE_ERROR_ACTIVE; } /** * xcan_rx_poll - Poll routine for rx packets (NAPI) * @napi: napi structure pointer * @quota: Max number of rx packets to be processed. * * This is the poll routine for rx part. * It will process the packets maximux quota value. * * Return: number of packets received */ static int xcan_rx_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; int work_done = 0; isr = priv->read_reg(priv, XCAN_ISR_OFFSET); while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { if (isr & XCAN_IXR_RXOK_MASK) { priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); work_done += xcan_rx(ndev); } else { priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); break; } priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } if (work_done) can_led_event(ndev, CAN_LED_EVENT_RX); if (work_done < quota) { napi_complete(napi); ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); priv->write_reg(priv, XCAN_IER_OFFSET, ier); } return work_done; } /** * xcan_tx_interrupt - Tx Done Isr * @ndev: net_device pointer * @isr: Interrupt status register value */ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; while ((priv->tx_head - priv->tx_tail > 0) && (isr & XCAN_IXR_TXOK_MASK)) { priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max); priv->tx_tail++; stats->tx_packets++; isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } can_led_event(ndev, CAN_LED_EVENT_TX); netif_wake_queue(ndev); } /** * xcan_interrupt - CAN Isr * @irq: irq number * @dev_id: device id poniter * * This is the xilinx CAN Isr. It checks for the type of interrupt * and invokes the corresponding ISR. * * Return: * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise */ static irqreturn_t xcan_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); if (!isr) return IRQ_NONE; /* Check for the type of interrupt and Processing it */ if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)); xcan_state_interrupt(ndev, isr); } /* Check for Tx interrupt and Processing it */ if (isr & XCAN_IXR_TXOK_MASK) xcan_tx_interrupt(ndev, isr); /* Check for the type of error interrupt and Processing it */ if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } return IRQ_HANDLED; } /** * xcan_chip_stop - Driver stop routine * @ndev: Pointer to net_device structure * * This is the drivers stop routine. It will disable the * interrupts and put the device into configuration mode. */ static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); u32 ier; /* Disable interrupts and leave the can in configuration mode */ ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier &= ~XCAN_INTR_ALL; priv->write_reg(priv, XCAN_IER_OFFSET, ier); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); priv->can.state = CAN_STATE_STOPPED; } /** * xcan_open - Driver open routine * @ndev: Pointer to net_device structure * * This is the driver open routine. * Return: 0 on success and failure value on error */ static int xcan_open(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err; } ret = clk_prepare_enable(priv->can_clk); if (ret) { netdev_err(ndev, "unable to enable device clock\n"); goto err_irq; } ret = clk_prepare_enable(priv->bus_clk); if (ret) { netdev_err(ndev, "unable to enable bus clock\n"); goto err_can_clk; } /* Set chip into reset mode */ ret = set_reset_mode(ndev); if (ret < 0) { netdev_err(ndev, "mode resetting failed!\n"); goto err_bus_clk; } /* Common open */ ret = open_candev(ndev); if (ret) goto err_bus_clk; ret = xcan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "xcan_chip_start failed!\n"); goto err_candev; } can_led_event(ndev, CAN_LED_EVENT_OPEN); napi_enable(&priv->napi); netif_start_queue(ndev); return 0; err_candev: close_candev(ndev); err_bus_clk: clk_disable_unprepare(priv->bus_clk); err_can_clk: clk_disable_unprepare(priv->can_clk); err_irq: free_irq(ndev->irq, ndev); err: return ret; } /** * xcan_close - Driver close routine * @ndev: Pointer to net_device structure * * Return: 0 always */ static int xcan_close(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); napi_disable(&priv->napi); xcan_chip_stop(ndev); clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); free_irq(ndev->irq, ndev); close_candev(ndev); can_led_event(ndev, CAN_LED_EVENT_STOP); return 0; } /** * xcan_get_berr_counter - error counter routine * @ndev: Pointer to net_device structure * @bec: Pointer to can_berr_counter structure * * This is the driver error counter routine. * Return: 0 on success and failure value on error */ static int xcan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = clk_prepare_enable(priv->can_clk); if (ret) goto err; ret = clk_prepare_enable(priv->bus_clk); if (ret) goto err_clk; bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); return 0; err_clk: clk_disable_unprepare(priv->can_clk); err: return ret; } static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, .ndo_change_mtu = can_change_mtu, }; /** * xcan_suspend - Suspend method for the driver * @dev: Address of the platform_device structure * * Put the driver into low power mode. * Return: 0 always */ static int __maybe_unused xcan_suspend(struct device *dev) { struct platform_device *pdev = dev_get_drvdata(dev); struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); } priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK); priv->can.state = CAN_STATE_SLEEPING; clk_disable(priv->bus_clk); clk_disable(priv->can_clk); return 0; } /** * xcan_resume - Resume from suspend * @dev: Address of the platformdevice structure * * Resume operation after suspend. * Return: 0 on success and failure value on error */ static int __maybe_unused xcan_resume(struct device *dev) { struct platform_device *pdev = dev_get_drvdata(dev); struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = clk_enable(priv->bus_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); return ret; } ret = clk_enable(priv->can_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); clk_disable_unprepare(priv->bus_clk); return ret; } priv->write_reg(priv, XCAN_MSR_OFFSET, 0); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { netif_device_attach(ndev); netif_start_queue(ndev); } return 0; } static SIMPLE_DEV_PM_OPS(xcan_dev_pm_ops, xcan_suspend, xcan_resume); /** * xcan_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the CAN * device. * * Return: 0 on success and failure value on error */ static int xcan_probe(struct platform_device *pdev) { struct resource *res; /* IO mem resources */ struct net_device *ndev; struct xcan_priv *priv; void __iomem *addr; int ret, rx_max, tx_max; /* Get the virtual base address for the device */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); addr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto err; } ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); if (ret < 0) goto err; ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &rx_max); if (ret < 0) goto err; /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) return -ENOMEM; priv = netdev_priv(ndev); priv->dev = ndev; priv->can.bittiming_const = &xcan_bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; priv->reg_base = addr; priv->tx_max = tx_max; /* Get IRQ for the device */ ndev->irq = platform_get_irq(pdev, 0); ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &xcan_netdev_ops; /* Getting the CAN can_clk info */ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); if (IS_ERR(priv->can_clk)) { dev_err(&pdev->dev, "Device clock not found.\n"); ret = PTR_ERR(priv->can_clk); goto err_free; } /* Check for type of CAN device */ if (of_device_is_compatible(pdev->dev.of_node, "xlnx,zynq-can-1.0")) { priv->bus_clk = devm_clk_get(&pdev->dev, "pclk"); if (IS_ERR(priv->bus_clk)) { dev_err(&pdev->dev, "bus clock not found\n"); ret = PTR_ERR(priv->bus_clk); goto err_free; } } else { priv->bus_clk = devm_clk_get(&pdev->dev, "s_axi_aclk"); if (IS_ERR(priv->bus_clk)) { dev_err(&pdev->dev, "bus clock not found\n"); ret = PTR_ERR(priv->bus_clk); goto err_free; } } ret = clk_prepare_enable(priv->can_clk); if (ret) { dev_err(&pdev->dev, "unable to enable device clock\n"); goto err_free; } ret = clk_prepare_enable(priv->bus_clk); if (ret) { dev_err(&pdev->dev, "unable to enable bus clock\n"); goto err_unprepare_disable_dev; } priv->write_reg = xcan_write_reg_le; priv->read_reg = xcan_read_reg_le; if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { priv->write_reg = xcan_write_reg_be; priv->read_reg = xcan_read_reg_be; } priv->can.clock.freq = clk_get_rate(priv->can_clk); netif_napi_add(ndev, &priv->napi, xcan_rx_poll, rx_max); ret = register_candev(ndev); if (ret) { dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); goto err_unprepare_disable_busclk; } devm_can_led_init(ndev); clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, priv->tx_max); return 0; err_unprepare_disable_busclk: clk_disable_unprepare(priv->bus_clk); err_unprepare_disable_dev: clk_disable_unprepare(priv->can_clk); err_free: free_candev(ndev); err: return ret; } /** * xcan_remove - Unregister the device after releasing the resources * @pdev: Handle to the platform device structure * * This function frees all the resources allocated to the device. * Return: 0 always */ static int xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); if (set_reset_mode(ndev) < 0) netdev_err(ndev, "mode resetting failed!\n"); unregister_candev(ndev); netif_napi_del(&priv->napi); free_candev(ndev); return 0; } /* Match table for OF platform binding */ static struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", }, { .compatible = "xlnx,axi-can-1.00.a", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); static struct platform_driver xcan_driver = { .probe = xcan_probe, .remove = xcan_remove, .driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, .pm = &xcan_dev_pm_ops, .of_match_table = xcan_of_match, }, }; module_platform_driver(xcan_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Xilinx Inc"); MODULE_DESCRIPTION("Xilinx CAN interface");
gpl-2.0
tprrt/linux-stable
fs/nfs/nfsroot.c
272
9947
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 1995, 1996 Gero Kuhlmann <gero@gkminix.han.de> * * Allow an NFS filesystem to be mounted as root. The way this works is: * (1) Use the IP autoconfig mechanism to set local IP addresses and routes. * (2) Construct the device string and the options string using DHCP * option 17 and/or kernel command line options. * (3) When mount_root() sets up the root file system, pass these strings * to the NFS client's regular mount interface via sys_mount(). * * * Changes: * * Alan Cox : Removed get_address name clash with FPU. * Alan Cox : Reformatted a bit. * Gero Kuhlmann : Code cleanup * Michael Rausch : Fixed recognition of an incoming RARP answer. * Martin Mares : (2.0) Auto-configuration via BOOTP supported. * Martin Mares : Manual selection of interface & BOOTP/RARP. * Martin Mares : Using network routes instead of host routes, * allowing the default configuration to be used * for normal operation of the host. * Martin Mares : Randomized timer with exponential backoff * installed to minimize network congestion. * Martin Mares : Code cleanup. * Martin Mares : (2.1) BOOTP and RARP made configuration options. * Martin Mares : Server hostname generation fixed. * Gerd Knorr : Fixed wired inode handling * Martin Mares : (2.2) "0.0.0.0" addresses from command line ignored. * Martin Mares : RARP replies not tested for server address. * Gero Kuhlmann : (2.3) Some bug fixes and code cleanup again (please * send me your new patches _before_ bothering * Linus so that I don' always have to cleanup * _afterwards_ - thanks) * Gero Kuhlmann : Last changes of Martin Mares undone. * Gero Kuhlmann : RARP replies are tested for specified server * again. However, it's now possible to have * different RARP and NFS servers. * Gero Kuhlmann : "0.0.0.0" addresses from command line are * now mapped to INADDR_NONE. * Gero Kuhlmann : Fixed a bug which prevented BOOTP path name * from being used (thanks to Leo Spiekman) * Andy Walker : Allow to specify the NFS server in nfs_root * without giving a path name * Swen Thümmler : Allow to specify the NFS options in nfs_root * without giving a path name. Fix BOOTP request * for domainname (domainname is NIS domain, not * DNS domain!). Skip dummy devices for BOOTP. * Jacek Zapala : Fixed a bug which prevented server-ip address * from nfsroot parameter from being used. * Olaf Kirch : Adapted to new NFS code. * Jakub Jelinek : Free used code segment. * Marko Kohtala : Fixed some bugs. * Martin Mares : Debug message cleanup * Martin Mares : Changed to use the new generic IP layer autoconfig * code. BOOTP and RARP moved there. * Martin Mares : Default path now contains host name instead of * host IP address (but host name defaults to IP * address anyway). * Martin Mares : Use root_server_addr appropriately during setup. * Martin Mares : Rewrote parameter parsing, now hopefully giving * correct overriding. * Trond Myklebust : Add in preliminary support for NFSv3 and TCP. * Fix bug in root_nfs_addr(). nfs_data.namlen * is NOT for the length of the hostname. * Hua Qin : Support for mounting root file system via * NFS over TCP. * Fabian Frederick: Option parser rebuilt (using parser lib) * Chuck Lever : Use super.c's text-based mount option parsing * Chuck Lever : Add "nfsrootdebug". */ #include <linux/types.h> #include <linux/string.h> #include <linux/init.h> #include <linux/nfs.h> #include <linux/nfs_fs.h> #include <linux/utsname.h> #include <linux/root_dev.h> #include <net/ipconfig.h> #include "internal.h" #define NFSDBG_FACILITY NFSDBG_ROOT /* Default path we try to mount. "%s" gets replaced by our IP address */ #define NFS_ROOT "/tftpboot/%s" /* Default NFSROOT mount options. */ #if defined(CONFIG_NFS_V2) #define NFS_DEF_OPTIONS "vers=2,tcp,rsize=4096,wsize=4096" #elif defined(CONFIG_NFS_V3) #define NFS_DEF_OPTIONS "vers=3,tcp,rsize=4096,wsize=4096" #else #define NFS_DEF_OPTIONS "vers=4,tcp,rsize=4096,wsize=4096" #endif /* Parameters passed from the kernel command line */ static char nfs_root_parms[NFS_MAXPATHLEN + 1] __initdata = ""; /* Text-based mount options passed to super.c */ static char nfs_root_options[256] __initdata = NFS_DEF_OPTIONS; /* Address of NFS server */ static __be32 servaddr __initdata = htonl(INADDR_NONE); /* Name of directory to mount */ static char nfs_export_path[NFS_MAXPATHLEN + 1] __initdata = ""; /* server:export path string passed to super.c */ static char nfs_root_device[NFS_MAXPATHLEN + 1] __initdata = ""; #ifdef NFS_DEBUG /* * When the "nfsrootdebug" kernel command line option is specified, * enable debugging messages for NFSROOT. */ static int __init nfs_root_debug(char *__unused) { nfs_debug |= NFSDBG_ROOT | NFSDBG_MOUNT; return 1; } __setup("nfsrootdebug", nfs_root_debug); #endif /* * Parse NFS server and directory information passed on the kernel * command line. * * nfsroot=[<server-ip>:]<root-dir>[,<nfs-options>] * * If there is a "%s" token in the <root-dir> string, it is replaced * by the ASCII-representation of the client's IP address. */ static int __init nfs_root_setup(char *line) { ROOT_DEV = Root_NFS; if (line[0] == '/' || line[0] == ',' || (line[0] >= '0' && line[0] <= '9')) { strlcpy(nfs_root_parms, line, sizeof(nfs_root_parms)); } else { size_t n = strlen(line) + sizeof(NFS_ROOT) - 1; if (n >= sizeof(nfs_root_parms)) line[sizeof(nfs_root_parms) - sizeof(NFS_ROOT) - 2] = '\0'; sprintf(nfs_root_parms, NFS_ROOT, line); } /* * Extract the IP address of the NFS server containing our * root file system, if one was specified. * * Note: root_nfs_parse_addr() removes the server-ip from * nfs_root_parms, if it exists. */ root_server_addr = root_nfs_parse_addr(nfs_root_parms); return 1; } __setup("nfsroot=", nfs_root_setup); static int __init root_nfs_copy(char *dest, const char *src, const size_t destlen) { if (strlcpy(dest, src, destlen) > destlen) return -1; return 0; } static int __init root_nfs_cat(char *dest, const char *src, const size_t destlen) { size_t len = strlen(dest); if (len && dest[len - 1] != ',') if (strlcat(dest, ",", destlen) > destlen) return -1; if (strlcat(dest, src, destlen) > destlen) return -1; return 0; } /* * Parse out root export path and mount options from * passed-in string @incoming. * * Copy the export path into @exppath. */ static int __init root_nfs_parse_options(char *incoming, char *exppath, const size_t exppathlen) { char *p; /* * Set the NFS remote path */ p = strsep(&incoming, ","); if (*p != '\0' && strcmp(p, "default") != 0) if (root_nfs_copy(exppath, p, exppathlen)) return -1; /* * @incoming now points to the rest of the string; if it * contains something, append it to our root options buffer */ if (incoming != NULL && *incoming != '\0') if (root_nfs_cat(nfs_root_options, incoming, sizeof(nfs_root_options))) return -1; return 0; } /* * Decode the export directory path name and NFS options from * the kernel command line. This has to be done late in order to * use a dynamically acquired client IP address for the remote * root directory path. * * Returns zero if successful; otherwise -1 is returned. */ static int __init root_nfs_data(char *cmdline) { char mand_options[sizeof("nolock,addr=") + INET_ADDRSTRLEN + 1]; int len, retval = -1; char *tmp = NULL; const size_t tmplen = sizeof(nfs_export_path); tmp = kzalloc(tmplen, GFP_KERNEL); if (tmp == NULL) goto out_nomem; strcpy(tmp, NFS_ROOT); if (root_server_path[0] != '\0') { dprintk("Root-NFS: DHCPv4 option 17: %s\n", root_server_path); if (root_nfs_parse_options(root_server_path, tmp, tmplen)) goto out_optionstoolong; } if (cmdline[0] != '\0') { dprintk("Root-NFS: nfsroot=%s\n", cmdline); if (root_nfs_parse_options(cmdline, tmp, tmplen)) goto out_optionstoolong; } /* * Append mandatory options for nfsroot so they override * what has come before */ snprintf(mand_options, sizeof(mand_options), "nolock,addr=%pI4", &servaddr); if (root_nfs_cat(nfs_root_options, mand_options, sizeof(nfs_root_options))) goto out_optionstoolong; /* * Set up nfs_root_device. For NFS mounts, this looks like * * server:/path * * At this point, utsname()->nodename contains our local * IP address or hostname, set by ipconfig. If "%s" exists * in tmp, substitute the nodename, then shovel the whole * mess into nfs_root_device. */ len = snprintf(nfs_export_path, sizeof(nfs_export_path), tmp, utsname()->nodename); if (len >= (int)sizeof(nfs_export_path)) goto out_devnametoolong; len = snprintf(nfs_root_device, sizeof(nfs_root_device), "%pI4:%s", &servaddr, nfs_export_path); if (len >= (int)sizeof(nfs_root_device)) goto out_devnametoolong; retval = 0; out: kfree(tmp); return retval; out_nomem: printk(KERN_ERR "Root-NFS: could not allocate memory\n"); goto out; out_optionstoolong: printk(KERN_ERR "Root-NFS: mount options string too long\n"); goto out; out_devnametoolong: printk(KERN_ERR "Root-NFS: root device name too long.\n"); goto out; } /** * nfs_root_data - Return prepared 'data' for NFSROOT mount * @root_device: OUT: address of string containing NFSROOT device * @root_data: OUT: address of string containing NFSROOT mount options * * Returns zero and sets @root_device and @root_data if successful, * otherwise -1 is returned. */ int __init nfs_root_data(char **root_device, char **root_data) { servaddr = root_server_addr; if (servaddr == htonl(INADDR_NONE)) { printk(KERN_ERR "Root-NFS: no NFS server address\n"); return -1; } if (root_nfs_data(nfs_root_parms) < 0) return -1; *root_device = nfs_root_device; *root_data = nfs_root_options; return 0; }
gpl-2.0
Galaxy-Tab-S2/android_kernel_samsung_gts210wifi
drivers/md/dm-delay.c
528
8617
/* * Copyright (C) 2005-2007 Red Hat GmbH * * A target that delays reads and/or writes and can send * them to different devices. * * This file is released under the GPL. */ #include <linux/module.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/bio.h> #include <linux/slab.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "delay" struct delay_c { struct timer_list delay_timer; struct mutex timer_lock; struct work_struct flush_expired_bios; struct list_head delayed_bios; atomic_t may_delay; mempool_t *delayed_pool; struct dm_dev *dev_read; sector_t start_read; unsigned read_delay; unsigned reads; struct dm_dev *dev_write; sector_t start_write; unsigned write_delay; unsigned writes; }; struct dm_delay_info { struct delay_c *context; struct list_head list; struct bio *bio; unsigned long expires; }; static DEFINE_MUTEX(delayed_bios_lock); static struct workqueue_struct *kdelayd_wq; static struct kmem_cache *delayed_cache; static void handle_delayed_timer(unsigned long data) { struct delay_c *dc = (struct delay_c *)data; queue_work(kdelayd_wq, &dc->flush_expired_bios); } static void queue_timeout(struct delay_c *dc, unsigned long expires) { mutex_lock(&dc->timer_lock); if (!timer_pending(&dc->delay_timer) || expires < dc->delay_timer.expires) mod_timer(&dc->delay_timer, expires); mutex_unlock(&dc->timer_lock); } static void flush_bios(struct bio *bio) { struct bio *n; while (bio) { n = bio->bi_next; bio->bi_next = NULL; generic_make_request(bio); bio = n; } } static struct bio *flush_delayed_bios(struct delay_c *dc, int flush_all) { struct dm_delay_info *delayed, *next; unsigned long next_expires = 0; int start_timer = 0; struct bio_list flush_bios = { }; mutex_lock(&delayed_bios_lock); list_for_each_entry_safe(delayed, next, &dc->delayed_bios, list) { if (flush_all || time_after_eq(jiffies, delayed->expires)) { list_del(&delayed->list); bio_list_add(&flush_bios, delayed->bio); if ((bio_data_dir(delayed->bio) == WRITE)) delayed->context->writes--; else delayed->context->reads--; mempool_free(delayed, dc->delayed_pool); continue; } if (!start_timer) { start_timer = 1; next_expires = delayed->expires; } else next_expires = min(next_expires, delayed->expires); } mutex_unlock(&delayed_bios_lock); if (start_timer) queue_timeout(dc, next_expires); return bio_list_get(&flush_bios); } static void flush_expired_bios(struct work_struct *work) { struct delay_c *dc; dc = container_of(work, struct delay_c, flush_expired_bios); flush_bios(flush_delayed_bios(dc, 0)); } /* * Mapping parameters: * <device> <offset> <delay> [<write_device> <write_offset> <write_delay>] * * With separate write parameters, the first set is only used for reads. * Delays are specified in milliseconds. */ static int delay_ctr(struct dm_target *ti, unsigned int argc, char **argv) { struct delay_c *dc; unsigned long long tmpll; char dummy; if (argc != 3 && argc != 6) { ti->error = "requires exactly 3 or 6 arguments"; return -EINVAL; } dc = kmalloc(sizeof(*dc), GFP_KERNEL); if (!dc) { ti->error = "Cannot allocate context"; return -ENOMEM; } dc->reads = dc->writes = 0; if (sscanf(argv[1], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid device sector"; goto bad; } dc->start_read = tmpll; if (sscanf(argv[2], "%u%c", &dc->read_delay, &dummy) != 1) { ti->error = "Invalid delay"; goto bad; } if (dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dc->dev_read)) { ti->error = "Device lookup failed"; goto bad; } dc->dev_write = NULL; if (argc == 3) goto out; if (sscanf(argv[4], "%llu%c", &tmpll, &dummy) != 1) { ti->error = "Invalid write device sector"; goto bad_dev_read; } dc->start_write = tmpll; if (sscanf(argv[5], "%u%c", &dc->write_delay, &dummy) != 1) { ti->error = "Invalid write delay"; goto bad_dev_read; } if (dm_get_device(ti, argv[3], dm_table_get_mode(ti->table), &dc->dev_write)) { ti->error = "Write device lookup failed"; goto bad_dev_read; } out: dc->delayed_pool = mempool_create_slab_pool(128, delayed_cache); if (!dc->delayed_pool) { DMERR("Couldn't create delayed bio pool."); goto bad_dev_write; } setup_timer(&dc->delay_timer, handle_delayed_timer, (unsigned long)dc); INIT_WORK(&dc->flush_expired_bios, flush_expired_bios); INIT_LIST_HEAD(&dc->delayed_bios); mutex_init(&dc->timer_lock); atomic_set(&dc->may_delay, 1); ti->num_flush_bios = 1; ti->num_discard_bios = 1; ti->private = dc; return 0; bad_dev_write: if (dc->dev_write) dm_put_device(ti, dc->dev_write); bad_dev_read: dm_put_device(ti, dc->dev_read); bad: kfree(dc); return -EINVAL; } static void delay_dtr(struct dm_target *ti) { struct delay_c *dc = ti->private; flush_workqueue(kdelayd_wq); dm_put_device(ti, dc->dev_read); if (dc->dev_write) dm_put_device(ti, dc->dev_write); mempool_destroy(dc->delayed_pool); kfree(dc); } static int delay_bio(struct delay_c *dc, int delay, struct bio *bio) { struct dm_delay_info *delayed; unsigned long expires = 0; if (!delay || !atomic_read(&dc->may_delay)) return 1; delayed = mempool_alloc(dc->delayed_pool, GFP_NOIO); delayed->context = dc; delayed->bio = bio; delayed->expires = expires = jiffies + (delay * HZ / 1000); mutex_lock(&delayed_bios_lock); if (bio_data_dir(bio) == WRITE) dc->writes++; else dc->reads++; list_add_tail(&delayed->list, &dc->delayed_bios); mutex_unlock(&delayed_bios_lock); queue_timeout(dc, expires); return 0; } static void delay_presuspend(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 0); del_timer_sync(&dc->delay_timer); flush_bios(flush_delayed_bios(dc, 1)); } static void delay_resume(struct dm_target *ti) { struct delay_c *dc = ti->private; atomic_set(&dc->may_delay, 1); } static int delay_map(struct dm_target *ti, struct bio *bio) { struct delay_c *dc = ti->private; if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { bio->bi_bdev = dc->dev_write->bdev; if (bio_sectors(bio)) bio->bi_sector = dc->start_write + dm_target_offset(ti, bio->bi_sector); return delay_bio(dc, dc->write_delay, bio); } bio->bi_bdev = dc->dev_read->bdev; bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); return delay_bio(dc, dc->read_delay, bio); } static void delay_status(struct dm_target *ti, status_type_t type, unsigned status_flags, char *result, unsigned maxlen) { struct delay_c *dc = ti->private; int sz = 0; switch (type) { case STATUSTYPE_INFO: DMEMIT("%u %u", dc->reads, dc->writes); break; case STATUSTYPE_TABLE: DMEMIT("%s %llu %u", dc->dev_read->name, (unsigned long long) dc->start_read, dc->read_delay); if (dc->dev_write) DMEMIT(" %s %llu %u", dc->dev_write->name, (unsigned long long) dc->start_write, dc->write_delay); break; } } static int delay_iterate_devices(struct dm_target *ti, iterate_devices_callout_fn fn, void *data) { struct delay_c *dc = ti->private; int ret = 0; ret = fn(ti, dc->dev_read, dc->start_read, ti->len, data); if (ret) goto out; if (dc->dev_write) ret = fn(ti, dc->dev_write, dc->start_write, ti->len, data); out: return ret; } static struct target_type delay_target = { .name = "delay", .version = {1, 2, 1}, .module = THIS_MODULE, .ctr = delay_ctr, .dtr = delay_dtr, .map = delay_map, .presuspend = delay_presuspend, .resume = delay_resume, .status = delay_status, .iterate_devices = delay_iterate_devices, }; static int __init dm_delay_init(void) { int r = -ENOMEM; kdelayd_wq = alloc_workqueue("kdelayd", WQ_MEM_RECLAIM, 0); if (!kdelayd_wq) { DMERR("Couldn't start kdelayd"); goto bad_queue; } delayed_cache = KMEM_CACHE(dm_delay_info, 0); if (!delayed_cache) { DMERR("Couldn't create delayed bio cache."); goto bad_memcache; } r = dm_register_target(&delay_target); if (r < 0) { DMERR("register failed %d", r); goto bad_register; } return 0; bad_register: kmem_cache_destroy(delayed_cache); bad_memcache: destroy_workqueue(kdelayd_wq); bad_queue: return r; } static void __exit dm_delay_exit(void) { dm_unregister_target(&delay_target); kmem_cache_destroy(delayed_cache); destroy_workqueue(kdelayd_wq); } /* Module hooks */ module_init(dm_delay_init); module_exit(dm_delay_exit); MODULE_DESCRIPTION(DM_NAME " delay target"); MODULE_AUTHOR("Heinz Mauelshagen <mauelshagen@redhat.com>"); MODULE_LICENSE("GPL");
gpl-2.0
sudeepdutt/mic
security/inode.c
528
6698
/* * inode.c - securityfs * * Copyright (C) 2005 Greg Kroah-Hartman <gregkh@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * Based on fs/debugfs/inode.c which had the following copyright notice: * Copyright (C) 2004 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2004 IBM Inc. */ /* #define DEBUG */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include <linux/security.h> #include <linux/magic.h> static struct vfsmount *mount; static int mount_count; static int fill_super(struct super_block *sb, void *data, int silent) { static struct tree_descr files[] = {{""}}; return simple_fill_super(sb, SECURITYFS_MAGIC, files); } static struct dentry *get_sb(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_single(fs_type, flags, data, fill_super); } static struct file_system_type fs_type = { .owner = THIS_MODULE, .name = "securityfs", .mount = get_sb, .kill_sb = kill_litter_super, }; /** * securityfs_create_file - create a file in the securityfs filesystem * * @name: a pointer to a string containing the name of the file to create. * @mode: the permission that the file should have * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * file will be created in the root of the securityfs filesystem. * @data: a pointer to something that the caller will want to get to later * on. The inode.i_private pointer will point to this value on * the open() call. * @fops: a pointer to a struct file_operations that should be used for * this file. * * This is the basic "create a file" function for securityfs. It allows for a * wide range of flexibility in creating a file, or a directory (if you * want to create a directory, the securityfs_create_dir() function is * recommended to be used instead). * * This function returns a pointer to a dentry if it succeeds. This * pointer must be passed to the securityfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here). If an error occurs, the function will return * the error value (via ERR_PTR). * * If securityfs is not enabled in the kernel, the value %-ENODEV is * returned. */ struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { struct dentry *dentry; int is_dir = S_ISDIR(mode); struct inode *dir, *inode; int error; if (!is_dir) { BUG_ON(!fops); mode = (mode & S_IALLUGO) | S_IFREG; } pr_debug("securityfs: creating file '%s'\n",name); error = simple_pin_fs(&fs_type, &mount, &mount_count); if (error) return ERR_PTR(error); if (!parent) parent = mount->mnt_root; dir = d_inode(parent); mutex_lock(&dir->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) goto out; if (d_really_is_positive(dentry)) { error = -EEXIST; goto out1; } inode = new_inode(dir->i_sb); if (!inode) { error = -ENOMEM; goto out1; } inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; inode->i_private = data; if (is_dir) { inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inc_nlink(inode); inc_nlink(dir); } else { inode->i_fop = fops; } d_instantiate(dentry, inode); dget(dentry); mutex_unlock(&dir->i_mutex); return dentry; out1: dput(dentry); dentry = ERR_PTR(error); out: mutex_unlock(&dir->i_mutex); simple_release_fs(&mount, &mount_count); return dentry; } EXPORT_SYMBOL_GPL(securityfs_create_file); /** * securityfs_create_dir - create a directory in the securityfs filesystem * * @name: a pointer to a string containing the name of the directory to * create. * @parent: a pointer to the parent dentry for this file. This should be a * directory dentry if set. If this parameter is %NULL, then the * directory will be created in the root of the securityfs filesystem. * * This function creates a directory in securityfs with the given @name. * * This function returns a pointer to a dentry if it succeeds. This * pointer must be passed to the securityfs_remove() function when the file is * to be removed (no automatic cleanup happens if your module is unloaded, * you are responsible here). If an error occurs, %NULL will be returned. * * If securityfs is not enabled in the kernel, the value %-ENODEV is * returned. It is not wise to check for this value, but rather, check for * %NULL or !%NULL instead as to eliminate the need for #ifdef in the calling * code. */ struct dentry *securityfs_create_dir(const char *name, struct dentry *parent) { return securityfs_create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO, parent, NULL, NULL); } EXPORT_SYMBOL_GPL(securityfs_create_dir); /** * securityfs_remove - removes a file or directory from the securityfs filesystem * * @dentry: a pointer to a the dentry of the file or directory to be removed. * * This function removes a file or directory in securityfs that was previously * created with a call to another securityfs function (like * securityfs_create_file() or variants thereof.) * * This function is required to be called in order for the file to be * removed. No automatic cleanup of files will happen when a module is * removed; you are responsible here. */ void securityfs_remove(struct dentry *dentry) { struct dentry *parent; if (!dentry || IS_ERR(dentry)) return; parent = dentry->d_parent; if (!parent || d_really_is_negative(parent)) return; mutex_lock(&d_inode(parent)->i_mutex); if (simple_positive(dentry)) { if (d_is_dir(dentry)) simple_rmdir(d_inode(parent), dentry); else simple_unlink(d_inode(parent), dentry); dput(dentry); } mutex_unlock(&d_inode(parent)->i_mutex); simple_release_fs(&mount, &mount_count); } EXPORT_SYMBOL_GPL(securityfs_remove); static int __init securityfs_init(void) { int retval; retval = sysfs_create_mount_point(kernel_kobj, "security"); if (retval) return retval; retval = register_filesystem(&fs_type); if (retval) sysfs_remove_mount_point(kernel_kobj, "security"); return retval; } core_initcall(securityfs_init); MODULE_LICENSE("GPL");
gpl-2.0
Dinjesk/android_kernel_oneplus_msm8996
drivers/media/usb/uvc/uvc_queue.c
528
10491
/* * uvc_queue.c -- USB Video Class driver - Buffers management * * Copyright (C) 2005-2010 * Laurent Pinchart (laurent.pinchart@ideasonboard.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/atomic.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include <linux/vmalloc.h> #include <linux/wait.h> #include <media/videobuf2-vmalloc.h> #include "uvcvideo.h" /* ------------------------------------------------------------------------ * Video buffers queue management. * * Video queues is initialized by uvc_queue_init(). The function performs * basic initialization of the uvc_video_queue struct and never fails. * * Video buffers are managed by videobuf2. The driver uses a mutex to protect * the videobuf2 queue operations by serializing calls to videobuf2 and a * spinlock to protect the IRQ queue that holds the buffers to be processed by * the driver. */ /* ----------------------------------------------------------------------------- * videobuf2 queue operations */ static int uvc_queue_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *nbuffers, unsigned int *nplanes, unsigned int sizes[], void *alloc_ctxs[]) { struct uvc_video_queue *queue = vb2_get_drv_priv(vq); struct uvc_streaming *stream = container_of(queue, struct uvc_streaming, queue); /* Make sure the image size is large enough. */ if (fmt && fmt->fmt.pix.sizeimage < stream->ctrl.dwMaxVideoFrameSize) return -EINVAL; *nplanes = 1; sizes[0] = fmt ? fmt->fmt.pix.sizeimage : stream->ctrl.dwMaxVideoFrameSize; return 0; } static int uvc_buffer_prepare(struct vb2_buffer *vb) { struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_OUTPUT && vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); return -EINVAL; } if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) return -ENODEV; buf->state = UVC_BUF_STATE_QUEUED; buf->error = 0; buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); if (vb->v4l2_buf.type == V4L2_BUF_TYPE_VIDEO_CAPTURE) buf->bytesused = 0; else buf->bytesused = vb2_get_plane_payload(vb, 0); return 0; } static void uvc_buffer_queue(struct vb2_buffer *vb) { struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); unsigned long flags; spin_lock_irqsave(&queue->irqlock, flags); if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { list_add_tail(&buf->queue, &queue->irqqueue); } else { /* If the device is disconnected return the buffer to userspace * directly. The next QBUF call will fail with -ENODEV. */ buf->state = UVC_BUF_STATE_ERROR; vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); } spin_unlock_irqrestore(&queue->irqlock, flags); } static void uvc_buffer_finish(struct vb2_buffer *vb) { struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); struct uvc_streaming *stream = container_of(queue, struct uvc_streaming, queue); struct uvc_buffer *buf = container_of(vb, struct uvc_buffer, buf); if (vb->state == VB2_BUF_STATE_DONE) uvc_video_clock_update(stream, &vb->v4l2_buf, buf); } static void uvc_wait_prepare(struct vb2_queue *vq) { struct uvc_video_queue *queue = vb2_get_drv_priv(vq); mutex_unlock(&queue->mutex); } static void uvc_wait_finish(struct vb2_queue *vq) { struct uvc_video_queue *queue = vb2_get_drv_priv(vq); mutex_lock(&queue->mutex); } static struct vb2_ops uvc_queue_qops = { .queue_setup = uvc_queue_setup, .buf_prepare = uvc_buffer_prepare, .buf_queue = uvc_buffer_queue, .buf_finish = uvc_buffer_finish, .wait_prepare = uvc_wait_prepare, .wait_finish = uvc_wait_finish, }; int uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type, int drop_corrupted) { int ret; queue->queue.type = type; queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; queue->queue.drv_priv = queue; queue->queue.buf_struct_size = sizeof(struct uvc_buffer); queue->queue.ops = &uvc_queue_qops; queue->queue.mem_ops = &vb2_vmalloc_memops; queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC | V4L2_BUF_FLAG_TSTAMP_SRC_SOE; ret = vb2_queue_init(&queue->queue); if (ret) return ret; mutex_init(&queue->mutex); spin_lock_init(&queue->irqlock); INIT_LIST_HEAD(&queue->irqqueue); queue->flags = drop_corrupted ? UVC_QUEUE_DROP_CORRUPTED : 0; return 0; } /* ----------------------------------------------------------------------------- * V4L2 queue operations */ int uvc_alloc_buffers(struct uvc_video_queue *queue, struct v4l2_requestbuffers *rb) { int ret; mutex_lock(&queue->mutex); ret = vb2_reqbufs(&queue->queue, rb); mutex_unlock(&queue->mutex); return ret ? ret : rb->count; } void uvc_free_buffers(struct uvc_video_queue *queue) { mutex_lock(&queue->mutex); vb2_queue_release(&queue->queue); mutex_unlock(&queue->mutex); } int uvc_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) { int ret; mutex_lock(&queue->mutex); ret = vb2_querybuf(&queue->queue, buf); mutex_unlock(&queue->mutex); return ret; } int uvc_create_buffers(struct uvc_video_queue *queue, struct v4l2_create_buffers *cb) { int ret; mutex_lock(&queue->mutex); ret = vb2_create_bufs(&queue->queue, cb); mutex_unlock(&queue->mutex); return ret; } int uvc_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) { int ret; mutex_lock(&queue->mutex); ret = vb2_qbuf(&queue->queue, buf); mutex_unlock(&queue->mutex); return ret; } int uvc_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, int nonblocking) { int ret; mutex_lock(&queue->mutex); ret = vb2_dqbuf(&queue->queue, buf, nonblocking); mutex_unlock(&queue->mutex); return ret; } int uvc_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) { int ret; mutex_lock(&queue->mutex); ret = vb2_mmap(&queue->queue, vma); mutex_unlock(&queue->mutex); return ret; } #ifndef CONFIG_MMU unsigned long uvc_queue_get_unmapped_area(struct uvc_video_queue *queue, unsigned long pgoff) { unsigned long ret; mutex_lock(&queue->mutex); ret = vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); mutex_unlock(&queue->mutex); return ret; } #endif unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file, poll_table *wait) { unsigned int ret; mutex_lock(&queue->mutex); ret = vb2_poll(&queue->queue, file, wait); mutex_unlock(&queue->mutex); return ret; } /* ----------------------------------------------------------------------------- * */ /* * Check if buffers have been allocated. */ int uvc_queue_allocated(struct uvc_video_queue *queue) { int allocated; mutex_lock(&queue->mutex); allocated = vb2_is_busy(&queue->queue); mutex_unlock(&queue->mutex); return allocated; } /* * Enable or disable the video buffers queue. * * The queue must be enabled before starting video acquisition and must be * disabled after stopping it. This ensures that the video buffers queue * state can be properly initialized before buffers are accessed from the * interrupt handler. * * Enabling the video queue returns -EBUSY if the queue is already enabled. * * Disabling the video queue cancels the queue and removes all buffers from * the main queue. * * This function can't be called from interrupt context. Use * uvc_queue_cancel() instead. */ int uvc_queue_enable(struct uvc_video_queue *queue, int enable) { unsigned long flags; int ret; mutex_lock(&queue->mutex); if (enable) { ret = vb2_streamon(&queue->queue, queue->queue.type); if (ret < 0) goto done; queue->buf_used = 0; } else { ret = vb2_streamoff(&queue->queue, queue->queue.type); if (ret < 0) goto done; spin_lock_irqsave(&queue->irqlock, flags); INIT_LIST_HEAD(&queue->irqqueue); spin_unlock_irqrestore(&queue->irqlock, flags); } done: mutex_unlock(&queue->mutex); return ret; } /* * Cancel the video buffers queue. * * Cancelling the queue marks all buffers on the irq queue as erroneous, * wakes them up and removes them from the queue. * * If the disconnect parameter is set, further calls to uvc_queue_buffer will * fail with -ENODEV. * * This function acquires the irq spinlock and can be called from interrupt * context. */ void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect) { struct uvc_buffer *buf; unsigned long flags; spin_lock_irqsave(&queue->irqlock, flags); while (!list_empty(&queue->irqqueue)) { buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); list_del(&buf->queue); buf->state = UVC_BUF_STATE_ERROR; vb2_buffer_done(&buf->buf, VB2_BUF_STATE_ERROR); } /* This must be protected by the irqlock spinlock to avoid race * conditions between uvc_buffer_queue and the disconnection event that * could result in an interruptible wait in uvc_dequeue_buffer. Do not * blindly replace this logic by checking for the UVC_QUEUE_DISCONNECTED * state outside the queue code. */ if (disconnect) queue->flags |= UVC_QUEUE_DISCONNECTED; spin_unlock_irqrestore(&queue->irqlock, flags); } struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue, struct uvc_buffer *buf) { struct uvc_buffer *nextbuf; unsigned long flags; if ((queue->flags & UVC_QUEUE_DROP_CORRUPTED) && buf->error) { buf->error = 0; buf->state = UVC_BUF_STATE_QUEUED; buf->bytesused = 0; vb2_set_plane_payload(&buf->buf, 0, 0); return buf; } spin_lock_irqsave(&queue->irqlock, flags); list_del(&buf->queue); if (!list_empty(&queue->irqqueue)) nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer, queue); else nextbuf = NULL; spin_unlock_irqrestore(&queue->irqlock, flags); buf->state = buf->error ? VB2_BUF_STATE_ERROR : UVC_BUF_STATE_DONE; vb2_set_plane_payload(&buf->buf, 0, buf->bytesused); vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE); return nextbuf; }
gpl-2.0
CalcProgrammer1/ubuntu-kernel-quincyatt
drivers/net/atlx/atl1.c
1552
101009
/* * Copyright(c) 2005 - 2006 Attansic Corporation. All rights reserved. * Copyright(c) 2006 - 2007 Chris Snook <csnook@redhat.com> * Copyright(c) 2006 - 2008 Jay Cliburn <jcliburn@gmail.com> * * Derived from Intel e1000 driver * Copyright(c) 1999 - 2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called COPYING. * * Contact Information: * Xiong Huang <xiong.huang@atheros.com> * Jie Yang <jie.yang@atheros.com> * Chris Snook <csnook@redhat.com> * Jay Cliburn <jcliburn@gmail.com> * * This version is adapted from the Attansic reference driver. * * TODO: * Add more ethtool functions. * Fix abstruse irq enable/disable condition described here: * http://marc.theaimsgroup.com/?l=linux-netdev&m=116398508500553&w=2 * * NEEDS TESTING: * VLAN * multicast * promiscuous mode * interrupt coalescing * SMP torture testing */ #include <asm/atomic.h> #include <asm/byteorder.h> #include <linux/compiler.h> #include <linux/crc32.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/hardirq.h> #include <linux/if_ether.h> #include <linux/if_vlan.h> #include <linux/in.h> #include <linux/interrupt.h> #include <linux/ip.h> #include <linux/irqflags.h> #include <linux/irqreturn.h> #include <linux/jiffies.h> #include <linux/mii.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/pm.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tcp.h> #include <linux/timer.h> #include <linux/types.h> #include <linux/workqueue.h> #include <net/checksum.h> #include "atl1.h" #define ATLX_DRIVER_VERSION "2.1.3" MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, " "Chris Snook <csnook@redhat.com>, " "Jay Cliburn <jcliburn@gmail.com>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ATLX_DRIVER_VERSION); /* Temporary hack for merging atl1 and atl2 */ #include "atlx.c" static const struct ethtool_ops atl1_ethtool_ops; /* * This is the only thing that needs to be changed to adjust the * maximum number of ports that the driver can manage. */ #define ATL1_MAX_NIC 4 #define OPTION_UNSET -1 #define OPTION_DISABLED 0 #define OPTION_ENABLED 1 #define ATL1_PARAM_INIT { [0 ... ATL1_MAX_NIC] = OPTION_UNSET } /* * Interrupt Moderate Timer in units of 2 us * * Valid Range: 10-65535 * * Default Value: 100 (200us) */ static int __devinitdata int_mod_timer[ATL1_MAX_NIC+1] = ATL1_PARAM_INIT; static unsigned int num_int_mod_timer; module_param_array_named(int_mod_timer, int_mod_timer, int, &num_int_mod_timer, 0); MODULE_PARM_DESC(int_mod_timer, "Interrupt moderator timer"); #define DEFAULT_INT_MOD_CNT 100 /* 200us */ #define MAX_INT_MOD_CNT 65000 #define MIN_INT_MOD_CNT 50 struct atl1_option { enum { enable_option, range_option, list_option } type; char *name; char *err; int def; union { struct { /* range_option info */ int min; int max; } r; struct { /* list_option info */ int nr; struct atl1_opt_list { int i; char *str; } *p; } l; } arg; }; static int __devinit atl1_validate_option(int *value, struct atl1_option *opt, struct pci_dev *pdev) { if (*value == OPTION_UNSET) { *value = opt->def; return 0; } switch (opt->type) { case enable_option: switch (*value) { case OPTION_ENABLED: dev_info(&pdev->dev, "%s enabled\n", opt->name); return 0; case OPTION_DISABLED: dev_info(&pdev->dev, "%s disabled\n", opt->name); return 0; } break; case range_option: if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) { dev_info(&pdev->dev, "%s set to %i\n", opt->name, *value); return 0; } break; case list_option:{ int i; struct atl1_opt_list *ent; for (i = 0; i < opt->arg.l.nr; i++) { ent = &opt->arg.l.p[i]; if (*value == ent->i) { if (ent->str[0] != '\0') dev_info(&pdev->dev, "%s\n", ent->str); return 0; } } } break; default: break; } dev_info(&pdev->dev, "invalid %s specified (%i) %s\n", opt->name, *value, opt->err); *value = opt->def; return -1; } /* * atl1_check_options - Range Checking for Command Line Parameters * @adapter: board private structure * * This routine checks all command line parameters for valid user * input. If an invalid value is given, or if no user specified * value exists, a default value is used. The final value is stored * in a variable in the adapter structure. */ static void __devinit atl1_check_options(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; int bd = adapter->bd_number; if (bd >= ATL1_MAX_NIC) { dev_notice(&pdev->dev, "no configuration for board#%i\n", bd); dev_notice(&pdev->dev, "using defaults for all values\n"); } { /* Interrupt Moderate Timer */ struct atl1_option opt = { .type = range_option, .name = "Interrupt Moderator Timer", .err = "using default of " __MODULE_STRING(DEFAULT_INT_MOD_CNT), .def = DEFAULT_INT_MOD_CNT, .arg = {.r = {.min = MIN_INT_MOD_CNT, .max = MAX_INT_MOD_CNT} } }; int val; if (num_int_mod_timer > bd) { val = int_mod_timer[bd]; atl1_validate_option(&val, &opt, pdev); adapter->imt = (u16) val; } else adapter->imt = (u16) (opt.def); } } /* * atl1_pci_tbl - PCI Device ID Table */ static DEFINE_PCI_DEVICE_TABLE(atl1_pci_tbl) = { {PCI_DEVICE(PCI_VENDOR_ID_ATTANSIC, PCI_DEVICE_ID_ATTANSIC_L1)}, /* required last entry */ {0,} }; MODULE_DEVICE_TABLE(pci, atl1_pci_tbl); static const u32 atl1_default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; static int debug = -1; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Message level (0=none,...,16=all)"); /* * Reset the transmit and receive units; mask and clear all interrupts. * hw - Struct containing variables accessed by shared code * return : 0 or idle status (if error) */ static s32 atl1_reset_hw(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; u32 icr; int i; /* * Clear Interrupt mask to stop board from generating * interrupts & Clear any pending interrupt events */ /* * iowrite32(0, hw->hw_addr + REG_IMR); * iowrite32(0xffffffff, hw->hw_addr + REG_ISR); */ /* * Issue Soft Reset to the MAC. This will reset the chip's * transmit, receive, DMA. It will not effect * the current PCI configuration. The global reset bit is self- * clearing, and should clear within a microsecond. */ iowrite32(MASTER_CTRL_SOFT_RST, hw->hw_addr + REG_MASTER_CTRL); ioread32(hw->hw_addr + REG_MASTER_CTRL); iowrite16(1, hw->hw_addr + REG_PHY_ENABLE); ioread16(hw->hw_addr + REG_PHY_ENABLE); /* delay about 1ms */ msleep(1); /* Wait at least 10ms for All module to be Idle */ for (i = 0; i < 10; i++) { icr = ioread32(hw->hw_addr + REG_IDLE_STATUS); if (!icr) break; /* delay 1 ms */ msleep(1); /* FIXME: still the right way to do this? */ cpu_relax(); } if (icr) { if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "ICR = 0x%x\n", icr); return icr; } return 0; } /* function about EEPROM * * check_eeprom_exist * return 0 if eeprom exist */ static int atl1_check_eeprom_exist(struct atl1_hw *hw) { u32 value; value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (value & SPI_FLASH_CTRL_EN_VPD) { value &= ~SPI_FLASH_CTRL_EN_VPD; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); } value = ioread16(hw->hw_addr + REG_PCIE_CAP_LIST); return ((value & 0xFF00) == 0x6C00) ? 0 : 1; } static bool atl1_read_eeprom(struct atl1_hw *hw, u32 offset, u32 *p_value) { int i; u32 control; if (offset & 3) /* address do not align */ return false; iowrite32(0, hw->hw_addr + REG_VPD_DATA); control = (offset & VPD_CAP_VPD_ADDR_MASK) << VPD_CAP_VPD_ADDR_SHIFT; iowrite32(control, hw->hw_addr + REG_VPD_CAP); ioread32(hw->hw_addr + REG_VPD_CAP); for (i = 0; i < 10; i++) { msleep(2); control = ioread32(hw->hw_addr + REG_VPD_CAP); if (control & VPD_CAP_VPD_FLAG) break; } if (control & VPD_CAP_VPD_FLAG) { *p_value = ioread32(hw->hw_addr + REG_VPD_DATA); return true; } /* timeout */ return false; } /* * Reads the value from a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to read */ static s32 atl1_read_phy_reg(struct atl1_hw *hw, u16 reg_addr, u16 *phy_data) { u32 val; int i; val = ((u32) (reg_addr & MDIO_REG_ADDR_MASK)) << MDIO_REG_ADDR_SHIFT | MDIO_START | MDIO_SUP_PREAMBLE | MDIO_RW | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) { *phy_data = (u16) val; return 0; } return ATLX_ERR_PHY; } #define CUSTOM_SPI_CS_SETUP 2 #define CUSTOM_SPI_CLK_HI 2 #define CUSTOM_SPI_CLK_LO 2 #define CUSTOM_SPI_CS_HOLD 2 #define CUSTOM_SPI_CS_HI 3 static bool atl1_spi_read(struct atl1_hw *hw, u32 addr, u32 *buf) { int i; u32 value; iowrite32(0, hw->hw_addr + REG_SPI_DATA); iowrite32(addr, hw->hw_addr + REG_SPI_ADDR); value = SPI_FLASH_CTRL_WAIT_READY | (CUSTOM_SPI_CS_SETUP & SPI_FLASH_CTRL_CS_SETUP_MASK) << SPI_FLASH_CTRL_CS_SETUP_SHIFT | (CUSTOM_SPI_CLK_HI & SPI_FLASH_CTRL_CLK_HI_MASK) << SPI_FLASH_CTRL_CLK_HI_SHIFT | (CUSTOM_SPI_CLK_LO & SPI_FLASH_CTRL_CLK_LO_MASK) << SPI_FLASH_CTRL_CLK_LO_SHIFT | (CUSTOM_SPI_CS_HOLD & SPI_FLASH_CTRL_CS_HOLD_MASK) << SPI_FLASH_CTRL_CS_HOLD_SHIFT | (CUSTOM_SPI_CS_HI & SPI_FLASH_CTRL_CS_HI_MASK) << SPI_FLASH_CTRL_CS_HI_SHIFT | (1 & SPI_FLASH_CTRL_INS_MASK) << SPI_FLASH_CTRL_INS_SHIFT; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); value |= SPI_FLASH_CTRL_START; iowrite32(value, hw->hw_addr + REG_SPI_FLASH_CTRL); ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); for (i = 0; i < 10; i++) { msleep(1); value = ioread32(hw->hw_addr + REG_SPI_FLASH_CTRL); if (!(value & SPI_FLASH_CTRL_START)) break; } if (value & SPI_FLASH_CTRL_START) return false; *buf = ioread32(hw->hw_addr + REG_SPI_DATA); return true; } /* * get_permanent_address * return 0 if get valid mac address, */ static int atl1_get_permanent_address(struct atl1_hw *hw) { u32 addr[2]; u32 i, control; u16 reg; u8 eth_addr[ETH_ALEN]; bool key_valid; if (is_valid_ether_addr(hw->perm_mac_addr)) return 0; /* init */ addr[0] = addr[1] = 0; if (!atl1_check_eeprom_exist(hw)) { reg = 0; key_valid = false; /* Read out all EEPROM content */ i = 0; while (1) { if (atl1_read_eeprom(hw, i + 0x100, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } } /* see if SPI FLAGS exist ? */ addr[0] = addr[1] = 0; reg = 0; key_valid = false; i = 0; while (1) { if (atl1_spi_read(hw, i + 0x1f000, &control)) { if (key_valid) { if (reg == REG_MAC_STA_ADDR) addr[0] = control; else if (reg == (REG_MAC_STA_ADDR + 4)) addr[1] = control; key_valid = false; } else if ((control & 0xff) == 0x5A) { key_valid = true; reg = (u16) (control >> 16); } else /* data end */ break; } else /* read error */ break; i += 4; } *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } /* * On some motherboards, the MAC address is written by the * BIOS directly to the MAC register during POST, and is * not stored in eeprom. If all else thus far has failed * to fetch the permanent MAC address, try reading it directly. */ addr[0] = ioread32(hw->hw_addr + REG_MAC_STA_ADDR); addr[1] = ioread16(hw->hw_addr + (REG_MAC_STA_ADDR + 4)); *(u32 *) &eth_addr[2] = swab32(addr[0]); *(u16 *) &eth_addr[0] = swab16(*(u16 *) &addr[1]); if (is_valid_ether_addr(eth_addr)) { memcpy(hw->perm_mac_addr, eth_addr, ETH_ALEN); return 0; } return 1; } /* * Reads the adapter's MAC address from the EEPROM * hw - Struct containing variables accessed by shared code */ static s32 atl1_read_mac_addr(struct atl1_hw *hw) { u16 i; if (atl1_get_permanent_address(hw)) random_ether_addr(hw->perm_mac_addr); for (i = 0; i < ETH_ALEN; i++) hw->mac_addr[i] = hw->perm_mac_addr[i]; return 0; } /* * Hashes an address to determine its location in the multicast table * hw - Struct containing variables accessed by shared code * mc_addr - the multicast address to hash * * atl1_hash_mc_addr * purpose * set hash value for a multicast address * hash calcu processing : * 1. calcu 32bit CRC for multicast address * 2. reverse crc with MSB to LSB */ static u32 atl1_hash_mc_addr(struct atl1_hw *hw, u8 *mc_addr) { u32 crc32, value = 0; int i; crc32 = ether_crc_le(6, mc_addr); for (i = 0; i < 32; i++) value |= (((crc32 >> i) & 1) << (31 - i)); return value; } /* * Sets the bit in the multicast table corresponding to the hash value. * hw - Struct containing variables accessed by shared code * hash_value - Multicast address hash value */ static void atl1_hash_set(struct atl1_hw *hw, u32 hash_value) { u32 hash_bit, hash_reg; u32 mta; /* * The HASH Table is a register array of 2 32-bit registers. * It is treated like an array of 64 bits. We want to set * bit BitArray[hash_value]. So we figure out what register * the bit is in, read it, OR in the new bit, then write * back the new value. The register is determined by the * upper 7 bits of the hash value and the bit within that * register are determined by the lower 5 bits of the value. */ hash_reg = (hash_value >> 31) & 0x1; hash_bit = (hash_value >> 26) & 0x1F; mta = ioread32((hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); mta |= (1 << hash_bit); iowrite32(mta, (hw->hw_addr + REG_RX_HASH_TABLE) + (hash_reg << 2)); } /* * Writes a value to a PHY register * hw - Struct containing variables accessed by shared code * reg_addr - address of the PHY register to write * data - data to write to the PHY */ static s32 atl1_write_phy_reg(struct atl1_hw *hw, u32 reg_addr, u16 phy_data) { int i; u32 val; val = ((u32) (phy_data & MDIO_DATA_MASK)) << MDIO_DATA_SHIFT | (reg_addr & MDIO_REG_ADDR_MASK) << MDIO_REG_ADDR_SHIFT | MDIO_SUP_PREAMBLE | MDIO_START | MDIO_CLK_25_4 << MDIO_CLK_SEL_SHIFT; iowrite32(val, hw->hw_addr + REG_MDIO_CTRL); ioread32(hw->hw_addr + REG_MDIO_CTRL); for (i = 0; i < MDIO_WAIT_TIMES; i++) { udelay(2); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if (!(val & (MDIO_START | MDIO_BUSY))) return 0; return ATLX_ERR_PHY; } /* * Make L001's PHY out of Power Saving State (bug) * hw - Struct containing variables accessed by shared code * when power on, L001's PHY always on Power saving State * (Gigabit Link forbidden) */ static s32 atl1_phy_leave_power_saving(struct atl1_hw *hw) { s32 ret; ret = atl1_write_phy_reg(hw, 29, 0x0029); if (ret) return ret; return atl1_write_phy_reg(hw, 30, 0); } /* * Resets the PHY and make all config validate * hw - Struct containing variables accessed by shared code * * Sets bit 15 and 12 of the MII Control regiser (for F001 bug) */ static s32 atl1_phy_reset(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } ret_val = atl1_write_phy_reg(hw, MII_BMCR, phy_data); if (ret_val) { u32 val; int i; /* pcie serdes link may be down! */ if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "pcie phy link down\n"); for (i = 0; i < 25; i++) { msleep(1); val = ioread32(hw->hw_addr + REG_MDIO_CTRL); if (!(val & (MDIO_START | MDIO_BUSY))) break; } if ((val & (MDIO_START | MDIO_BUSY)) != 0) { if (netif_msg_hw(adapter)) dev_warn(&pdev->dev, "pcie link down at least 25ms\n"); return ret_val; } } return 0; } /* * Configures PHY autoneg and flow control advertisement settings * hw - Struct containing variables accessed by shared code */ static s32 atl1_phy_setup_autoneg_adv(struct atl1_hw *hw) { s32 ret_val; s16 mii_autoneg_adv_reg; s16 mii_1000t_ctrl_reg; /* Read the MII Auto-Neg Advertisement Register (Address 4). */ mii_autoneg_adv_reg = MII_AR_DEFAULT_CAP_MASK; /* Read the MII 1000Base-T Control Register (Address 9). */ mii_1000t_ctrl_reg = MII_ATLX_CR_1000T_DEFAULT_CAP_MASK; /* * First we clear all the 10/100 mb speed bits in the Auto-Neg * Advertisement Register (Address 4) and the 1000 mb speed bits in * the 1000Base-T Control Register (Address 9). */ mii_autoneg_adv_reg &= ~MII_AR_SPEED_MASK; mii_1000t_ctrl_reg &= ~MII_ATLX_CR_1000T_SPEED_MASK; /* * Need to parse media_type and set up * the appropriate PHY registers. */ switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: mii_autoneg_adv_reg |= (MII_AR_10T_HD_CAPS | MII_AR_10T_FD_CAPS | MII_AR_100TX_HD_CAPS | MII_AR_100TX_FD_CAPS); mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_1000M_FULL: mii_1000t_ctrl_reg |= MII_ATLX_CR_1000T_FD_CAPS; break; case MEDIA_TYPE_100M_FULL: mii_autoneg_adv_reg |= MII_AR_100TX_FD_CAPS; break; case MEDIA_TYPE_100M_HALF: mii_autoneg_adv_reg |= MII_AR_100TX_HD_CAPS; break; case MEDIA_TYPE_10M_FULL: mii_autoneg_adv_reg |= MII_AR_10T_FD_CAPS; break; default: mii_autoneg_adv_reg |= MII_AR_10T_HD_CAPS; break; } /* flow control fixed to enable all */ mii_autoneg_adv_reg |= (MII_AR_ASM_DIR | MII_AR_PAUSE); hw->mii_autoneg_adv_reg = mii_autoneg_adv_reg; hw->mii_1000t_ctrl_reg = mii_1000t_ctrl_reg; ret_val = atl1_write_phy_reg(hw, MII_ADVERTISE, mii_autoneg_adv_reg); if (ret_val) return ret_val; ret_val = atl1_write_phy_reg(hw, MII_ATLX_CR, mii_1000t_ctrl_reg); if (ret_val) return ret_val; return 0; } /* * Configures link settings. * hw - Struct containing variables accessed by shared code * Assumes the hardware has previously been reset and the * transmitter and receiver are not enabled. */ static s32 atl1_setup_link(struct atl1_hw *hw) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; /* * Options: * PHY will advertise value(s) parsed from * autoneg_advertised and fc * no matter what autoneg is , We will not wait link result. */ ret_val = atl1_phy_setup_autoneg_adv(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error setting up autonegotiation\n"); return ret_val; } /* SW.Reset , En-Auto-Neg if needed */ ret_val = atl1_phy_reset(hw); if (ret_val) { if (netif_msg_link(adapter)) dev_dbg(&pdev->dev, "error resetting phy\n"); return ret_val; } hw->phy_configured = true; return ret_val; } static void atl1_init_flash_opcode(struct atl1_hw *hw) { if (hw->flash_vendor >= ARRAY_SIZE(flash_table)) /* Atmel */ hw->flash_vendor = 0; /* Init OP table */ iowrite8(flash_table[hw->flash_vendor].cmd_program, hw->hw_addr + REG_SPI_FLASH_OP_PROGRAM); iowrite8(flash_table[hw->flash_vendor].cmd_sector_erase, hw->hw_addr + REG_SPI_FLASH_OP_SC_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_chip_erase, hw->hw_addr + REG_SPI_FLASH_OP_CHIP_ERASE); iowrite8(flash_table[hw->flash_vendor].cmd_rdid, hw->hw_addr + REG_SPI_FLASH_OP_RDID); iowrite8(flash_table[hw->flash_vendor].cmd_wren, hw->hw_addr + REG_SPI_FLASH_OP_WREN); iowrite8(flash_table[hw->flash_vendor].cmd_rdsr, hw->hw_addr + REG_SPI_FLASH_OP_RDSR); iowrite8(flash_table[hw->flash_vendor].cmd_wrsr, hw->hw_addr + REG_SPI_FLASH_OP_WRSR); iowrite8(flash_table[hw->flash_vendor].cmd_read, hw->hw_addr + REG_SPI_FLASH_OP_READ); } /* * Performs basic configuration of the adapter. * hw - Struct containing variables accessed by shared code * Assumes that the controller has previously been reset and is in a * post-reset uninitialized state. Initializes multicast table, * and Calls routines to setup link * Leaves the transmit and receive units disabled and uninitialized. */ static s32 atl1_init_hw(struct atl1_hw *hw) { u32 ret_val = 0; /* Zero out the Multicast HASH table */ iowrite32(0, hw->hw_addr + REG_RX_HASH_TABLE); /* clear the old settings from the multicast hash table */ iowrite32(0, (hw->hw_addr + REG_RX_HASH_TABLE) + (1 << 2)); atl1_init_flash_opcode(hw); if (!hw->phy_configured) { /* enable GPHY LinkChange Interrrupt */ ret_val = atl1_write_phy_reg(hw, 18, 0xC00); if (ret_val) return ret_val; /* make PHY out of power-saving state */ ret_val = atl1_phy_leave_power_saving(hw); if (ret_val) return ret_val; /* Call a subroutine to configure the link */ ret_val = atl1_setup_link(hw); } return ret_val; } /* * Detects the current speed and duplex settings of the hardware. * hw - Struct containing variables accessed by shared code * speed - Speed of the connection * duplex - Duplex setting of the connection */ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex) { struct pci_dev *pdev = hw->back->pdev; struct atl1_adapter *adapter = hw->back; s32 ret_val; u16 phy_data; /* ; --- Read PHY Specific Status Register (17) */ ret_val = atl1_read_phy_reg(hw, MII_ATLX_PSSR, &phy_data); if (ret_val) return ret_val; if (!(phy_data & MII_ATLX_PSSR_SPD_DPLX_RESOLVED)) return ATLX_ERR_PHY_RES; switch (phy_data & MII_ATLX_PSSR_SPEED) { case MII_ATLX_PSSR_1000MBS: *speed = SPEED_1000; break; case MII_ATLX_PSSR_100MBS: *speed = SPEED_100; break; case MII_ATLX_PSSR_10MBS: *speed = SPEED_10; break; default: if (netif_msg_hw(adapter)) dev_dbg(&pdev->dev, "error getting speed\n"); return ATLX_ERR_PHY_SPEED; break; } if (phy_data & MII_ATLX_PSSR_DPLX) *duplex = FULL_DUPLEX; else *duplex = HALF_DUPLEX; return 0; } static void atl1_set_mac_addr(struct atl1_hw *hw) { u32 value; /* * 00-0B-6A-F6-00-DC * 0: 6AF600DC 1: 000B * low dword */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); /* high dword */ value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, (hw->hw_addr + REG_MAC_STA_ADDR) + (1 << 2)); } /* * atl1_sw_init - Initialize general software structures (struct atl1_adapter) * @adapter: board private structure to initialize * * atl1_sw_init initializes the Adapter private data structure. * Fields are initialized based on PCI device information and * OS network device settings (MTU size). */ static int __devinit atl1_sw_init(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; hw->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; hw->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; adapter->wol = 0; device_set_wakeup_enable(&adapter->pdev->dev, false); adapter->rx_buffer_len = (hw->max_frame_size + 7) & ~7; adapter->ict = 50000; /* 100ms */ adapter->link_speed = SPEED_0; /* hardware init */ adapter->link_duplex = FULL_DUPLEX; hw->phy_configured = false; hw->preamble_len = 7; hw->ipgt = 0x60; hw->min_ifg = 0x50; hw->ipgr1 = 0x40; hw->ipgr2 = 0x60; hw->max_retry = 0xf; hw->lcol = 0x37; hw->jam_ipg = 7; hw->rfd_burst = 8; hw->rrd_burst = 8; hw->rfd_fetch_gap = 1; hw->rx_jumbo_th = adapter->rx_buffer_len / 8; hw->rx_jumbo_lkah = 1; hw->rrd_ret_timer = 16; hw->tpd_burst = 4; hw->tpd_fetch_th = 16; hw->txf_burst = 0x100; hw->tx_jumbo_task_th = (hw->max_frame_size + 7) >> 3; hw->tpd_fetch_gap = 1; hw->rcb_value = atl1_rcb_64; hw->dma_ord = atl1_dma_ord_enh; hw->dmar_block = atl1_dma_req_256; hw->dmaw_block = atl1_dma_req_256; hw->cmb_rrd = 4; hw->cmb_tpd = 4; hw->cmb_rx_timer = 1; /* about 2us */ hw->cmb_tx_timer = 1; /* about 2us */ hw->smb_timer = 100000; /* about 200ms */ spin_lock_init(&adapter->lock); spin_lock_init(&adapter->mb_lock); return 0; } static int mdio_read(struct net_device *netdev, int phy_id, int reg_num) { struct atl1_adapter *adapter = netdev_priv(netdev); u16 result; atl1_read_phy_reg(&adapter->hw, reg_num & 0x1f, &result); return result; } static void mdio_write(struct net_device *netdev, int phy_id, int reg_num, int val) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_write_phy_reg(&adapter->hw, reg_num, val); } /* * atl1_mii_ioctl - * @netdev: * @ifreq: * @cmd: */ static int atl1_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct atl1_adapter *adapter = netdev_priv(netdev); unsigned long flags; int retval; if (!netif_running(netdev)) return -EINVAL; spin_lock_irqsave(&adapter->lock, flags); retval = generic_mii_ioctl(&adapter->mii, if_mii(ifr), cmd, NULL); spin_unlock_irqrestore(&adapter->lock, flags); return retval; } /* * atl1_setup_mem_resources - allocate Tx / RX descriptor resources * @adapter: board private structure * * Return 0 on success, negative on failure */ static s32 atl1_setup_ring_resources(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; struct pci_dev *pdev = adapter->pdev; int size; u8 offset = 0; size = sizeof(struct atl1_buffer) * (tpd_ring->count + rfd_ring->count); tpd_ring->buffer_info = kzalloc(size, GFP_KERNEL); if (unlikely(!tpd_ring->buffer_info)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "kzalloc failed , size = D%d\n", size); goto err_nomem; } rfd_ring->buffer_info = (struct atl1_buffer *)(tpd_ring->buffer_info + tpd_ring->count); /* * real ring DMA buffer * each ring/block may need up to 8 bytes for alignment, hence the * additional 40 bytes tacked onto the end. */ ring_header->size = size = sizeof(struct tx_packet_desc) * tpd_ring->count + sizeof(struct rx_free_desc) * rfd_ring->count + sizeof(struct rx_return_desc) * rrd_ring->count + sizeof(struct coals_msg_block) + sizeof(struct stats_msg_block) + 40; ring_header->desc = pci_alloc_consistent(pdev, ring_header->size, &ring_header->dma); if (unlikely(!ring_header->desc)) { if (netif_msg_drv(adapter)) dev_err(&pdev->dev, "pci_alloc_consistent failed\n"); goto err_nomem; } memset(ring_header->desc, 0, ring_header->size); /* init TPD ring */ tpd_ring->dma = ring_header->dma; offset = (tpd_ring->dma & 0x7) ? (8 - (ring_header->dma & 0x7)) : 0; tpd_ring->dma += offset; tpd_ring->desc = (u8 *) ring_header->desc + offset; tpd_ring->size = sizeof(struct tx_packet_desc) * tpd_ring->count; /* init RFD ring */ rfd_ring->dma = tpd_ring->dma + tpd_ring->size; offset = (rfd_ring->dma & 0x7) ? (8 - (rfd_ring->dma & 0x7)) : 0; rfd_ring->dma += offset; rfd_ring->desc = (u8 *) tpd_ring->desc + (tpd_ring->size + offset); rfd_ring->size = sizeof(struct rx_free_desc) * rfd_ring->count; /* init RRD ring */ rrd_ring->dma = rfd_ring->dma + rfd_ring->size; offset = (rrd_ring->dma & 0x7) ? (8 - (rrd_ring->dma & 0x7)) : 0; rrd_ring->dma += offset; rrd_ring->desc = (u8 *) rfd_ring->desc + (rfd_ring->size + offset); rrd_ring->size = sizeof(struct rx_return_desc) * rrd_ring->count; /* init CMB */ adapter->cmb.dma = rrd_ring->dma + rrd_ring->size; offset = (adapter->cmb.dma & 0x7) ? (8 - (adapter->cmb.dma & 0x7)) : 0; adapter->cmb.dma += offset; adapter->cmb.cmb = (struct coals_msg_block *) ((u8 *) rrd_ring->desc + (rrd_ring->size + offset)); /* init SMB */ adapter->smb.dma = adapter->cmb.dma + sizeof(struct coals_msg_block); offset = (adapter->smb.dma & 0x7) ? (8 - (adapter->smb.dma & 0x7)) : 0; adapter->smb.dma += offset; adapter->smb.smb = (struct stats_msg_block *) ((u8 *) adapter->cmb.cmb + (sizeof(struct coals_msg_block) + offset)); return 0; err_nomem: kfree(tpd_ring->buffer_info); return -ENOMEM; } static void atl1_init_ring_ptrs(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /* * atl1_clean_rx_ring - Free RFD Buffers * @adapter: board private structure */ static void atl1_clean_rx_ring(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Rx ring sk_buffs */ for (i = 0; i < rfd_ring->count; i++) { buffer_info = &rfd_ring->buffer_info[i]; if (buffer_info->dma) { pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * rfd_ring->count; memset(rfd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(rfd_ring->desc, 0, rfd_ring->size); rfd_ring->next_to_clean = 0; atomic_set(&rfd_ring->next_to_use, 0); rrd_ring->next_to_use = 0; atomic_set(&rrd_ring->next_to_clean, 0); } /* * atl1_clean_tx_ring - Free Tx Buffers * @adapter: board private structure */ static void atl1_clean_tx_ring(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct pci_dev *pdev = adapter->pdev; unsigned long size; unsigned int i; /* Free all the Tx ring sk_buffs */ for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->dma) { pci_unmap_page(pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } } for (i = 0; i < tpd_ring->count; i++) { buffer_info = &tpd_ring->buffer_info[i]; if (buffer_info->skb) { dev_kfree_skb_any(buffer_info->skb); buffer_info->skb = NULL; } } size = sizeof(struct atl1_buffer) * tpd_ring->count; memset(tpd_ring->buffer_info, 0, size); /* Zero out the descriptor ring */ memset(tpd_ring->desc, 0, tpd_ring->size); atomic_set(&tpd_ring->next_to_use, 0); atomic_set(&tpd_ring->next_to_clean, 0); } /* * atl1_free_ring_resources - Free Tx / RX descriptor Resources * @adapter: board private structure * * Free all transmit software resources */ static void atl1_free_ring_resources(struct atl1_adapter *adapter) { struct pci_dev *pdev = adapter->pdev; struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_ring_header *ring_header = &adapter->ring_header; atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); kfree(tpd_ring->buffer_info); pci_free_consistent(pdev, ring_header->size, ring_header->desc, ring_header->dma); tpd_ring->buffer_info = NULL; tpd_ring->desc = NULL; tpd_ring->dma = 0; rfd_ring->buffer_info = NULL; rfd_ring->desc = NULL; rfd_ring->dma = 0; rrd_ring->desc = NULL; rrd_ring->dma = 0; adapter->cmb.dma = 0; adapter->cmb.cmb = NULL; adapter->smb.dma = 0; adapter->smb.smb = NULL; } static void atl1_setup_mac_ctrl(struct atl1_adapter *adapter) { u32 value; struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; /* Config MAC CTRL Register */ value = MAC_CTRL_TX_EN | MAC_CTRL_RX_EN; /* duplex */ if (FULL_DUPLEX == adapter->link_duplex) value |= MAC_CTRL_DUPLX; /* speed */ value |= ((u32) ((SPEED_1000 == adapter->link_speed) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); /* flow control */ value |= (MAC_CTRL_TX_FLOW | MAC_CTRL_RX_FLOW); /* PAD & CRC */ value |= (MAC_CTRL_ADD_CRC | MAC_CTRL_PAD); /* preamble length */ value |= (((u32) adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); /* vlan */ if (adapter->vlgrp) value |= MAC_CTRL_RMV_VLAN; /* rx checksum if (adapter->rx_csum) value |= MAC_CTRL_RX_CHKSUM_EN; */ /* filter mode */ value |= MAC_CTRL_BC_EN; if (netdev->flags & IFF_PROMISC) value |= MAC_CTRL_PROMIS_EN; else if (netdev->flags & IFF_ALLMULTI) value |= MAC_CTRL_MC_ALL_EN; /* value |= MAC_CTRL_LOOPBACK; */ iowrite32(value, hw->hw_addr + REG_MAC_CTRL); } static u32 atl1_check_link(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; struct net_device *netdev = adapter->netdev; u32 ret_val; u16 speed, duplex, phy_data; int reconfig = 0; /* MII_BMSR must read twice */ atl1_read_phy_reg(hw, MII_BMSR, &phy_data); atl1_read_phy_reg(hw, MII_BMSR, &phy_data); if (!(phy_data & BMSR_LSTATUS)) { /* link down */ if (netif_carrier_ok(netdev)) { /* old link state: Up */ if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "link is down\n"); adapter->link_speed = SPEED_0; netif_carrier_off(netdev); } return 0; } /* Link Up */ ret_val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (ret_val) return ret_val; switch (hw->media_type) { case MEDIA_TYPE_1000M_FULL: if (speed != SPEED_1000 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_FULL: if (speed != SPEED_100 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_100M_HALF: if (speed != SPEED_100 || duplex != HALF_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_FULL: if (speed != SPEED_10 || duplex != FULL_DUPLEX) reconfig = 1; break; case MEDIA_TYPE_10M_HALF: if (speed != SPEED_10 || duplex != HALF_DUPLEX) reconfig = 1; break; } /* link result is our setting */ if (!reconfig) { if (adapter->link_speed != speed || adapter->link_duplex != duplex) { adapter->link_speed = speed; adapter->link_duplex = duplex; atl1_setup_mac_ctrl(adapter); if (netif_msg_link(adapter)) dev_info(&adapter->pdev->dev, "%s link is up %d Mbps %s\n", netdev->name, adapter->link_speed, adapter->link_duplex == FULL_DUPLEX ? "full duplex" : "half duplex"); } if (!netif_carrier_ok(netdev)) { /* Link down -> Up */ netif_carrier_on(netdev); } return 0; } /* change original link status */ if (netif_carrier_ok(netdev)) { adapter->link_speed = SPEED_0; netif_carrier_off(netdev); netif_stop_queue(netdev); } if (hw->media_type != MEDIA_TYPE_AUTO_SENSOR && hw->media_type != MEDIA_TYPE_1000M_FULL) { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } atl1_write_phy_reg(hw, MII_BMCR, phy_data); return 0; } /* auto-neg, insert timer to re-config phy */ if (!adapter->phy_timer_pending) { adapter->phy_timer_pending = true; mod_timer(&adapter->phy_config_timer, round_jiffies(jiffies + 3 * HZ)); } return 0; } static void set_flow_ctrl_old(struct atl1_adapter *adapter) { u32 hi, lo, value; /* RFD Flow Control */ value = adapter->rfd_ring.count; hi = value / 16; if (hi < 2) hi = 2; lo = value * 7 / 8; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = adapter->rrd_ring.count; lo = value / 16; hi = value * 7 / 8; if (lo < 2) lo = 2; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } static void set_flow_ctrl_new(struct atl1_hw *hw) { u32 hi, lo, value; /* RXF Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RXF_LEN); lo = value / 16; if (lo < 192) lo = 192; hi = value * 7 / 8; if (hi < lo) hi = lo + 16; value = ((hi & RXQ_RXF_PAUSE_TH_HI_MASK) << RXQ_RXF_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RXF_PAUSE_TH_LO_MASK) << RXQ_RXF_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RXF_PAUSE_THRESH); /* RRD Flow Control */ value = ioread32(hw->hw_addr + REG_SRAM_RRD_LEN); lo = value / 8; hi = value * 7 / 8; if (lo < 2) lo = 2; if (hi < lo) hi = lo + 3; value = ((hi & RXQ_RRD_PAUSE_TH_HI_MASK) << RXQ_RRD_PAUSE_TH_HI_SHIFT) | ((lo & RXQ_RRD_PAUSE_TH_LO_MASK) << RXQ_RRD_PAUSE_TH_LO_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_RRD_PAUSE_THRESH); } /* * atl1_configure - Configure Transmit&Receive Unit after Reset * @adapter: board private structure * * Configure the Tx /Rx unit of the MAC after a reset. */ static u32 atl1_configure(struct atl1_adapter *adapter) { struct atl1_hw *hw = &adapter->hw; u32 value; /* clear interrupt status */ iowrite32(0xffffffff, adapter->hw.hw_addr + REG_ISR); /* set MAC Address */ value = (((u32) hw->mac_addr[2]) << 24) | (((u32) hw->mac_addr[3]) << 16) | (((u32) hw->mac_addr[4]) << 8) | (((u32) hw->mac_addr[5])); iowrite32(value, hw->hw_addr + REG_MAC_STA_ADDR); value = (((u32) hw->mac_addr[0]) << 8) | (((u32) hw->mac_addr[1])); iowrite32(value, hw->hw_addr + (REG_MAC_STA_ADDR + 4)); /* tx / rx ring */ /* HI base address */ iowrite32((u32) ((adapter->tpd_ring.dma & 0xffffffff00000000ULL) >> 32), hw->hw_addr + REG_DESC_BASE_ADDR_HI); /* LO base address */ iowrite32((u32) (adapter->rfd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RFD_ADDR_LO); iowrite32((u32) (adapter->rrd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_RRD_ADDR_LO); iowrite32((u32) (adapter->tpd_ring.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_TPD_ADDR_LO); iowrite32((u32) (adapter->cmb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_CMB_ADDR_LO); iowrite32((u32) (adapter->smb.dma & 0x00000000ffffffffULL), hw->hw_addr + REG_DESC_SMB_ADDR_LO); /* element count */ value = adapter->rrd_ring.count; value <<= 16; value += adapter->rfd_ring.count; iowrite32(value, hw->hw_addr + REG_DESC_RFD_RRD_RING_SIZE); iowrite32(adapter->tpd_ring.count, hw->hw_addr + REG_DESC_TPD_RING_SIZE); /* Load Ptr */ iowrite32(1, hw->hw_addr + REG_LOAD_PTR); /* config Mailbox */ value = ((atomic_read(&adapter->tpd_ring.next_to_use) & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT) | ((atomic_read(&adapter->rrd_ring.next_to_clean) & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((atomic_read(&adapter->rfd_ring.next_to_use) & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT); iowrite32(value, hw->hw_addr + REG_MAILBOX); /* config IPG/IFG */ value = (((u32) hw->ipgt & MAC_IPG_IFG_IPGT_MASK) << MAC_IPG_IFG_IPGT_SHIFT) | (((u32) hw->min_ifg & MAC_IPG_IFG_MIFG_MASK) << MAC_IPG_IFG_MIFG_SHIFT) | (((u32) hw->ipgr1 & MAC_IPG_IFG_IPGR1_MASK) << MAC_IPG_IFG_IPGR1_SHIFT) | (((u32) hw->ipgr2 & MAC_IPG_IFG_IPGR2_MASK) << MAC_IPG_IFG_IPGR2_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_IPG_IFG); /* config Half-Duplex Control */ value = ((u32) hw->lcol & MAC_HALF_DUPLX_CTRL_LCOL_MASK) | (((u32) hw->max_retry & MAC_HALF_DUPLX_CTRL_RETRY_MASK) << MAC_HALF_DUPLX_CTRL_RETRY_SHIFT) | MAC_HALF_DUPLX_CTRL_EXC_DEF_EN | (0xa << MAC_HALF_DUPLX_CTRL_ABEBT_SHIFT) | (((u32) hw->jam_ipg & MAC_HALF_DUPLX_CTRL_JAMIPG_MASK) << MAC_HALF_DUPLX_CTRL_JAMIPG_SHIFT); iowrite32(value, hw->hw_addr + REG_MAC_HALF_DUPLX_CTRL); /* set Interrupt Moderator Timer */ iowrite16(adapter->imt, hw->hw_addr + REG_IRQ_MODU_TIMER_INIT); iowrite32(MASTER_CTRL_ITIMER_EN, hw->hw_addr + REG_MASTER_CTRL); /* set Interrupt Clear Timer */ iowrite16(adapter->ict, hw->hw_addr + REG_CMBDISDMA_TIMER); /* set max frame size hw will accept */ iowrite32(hw->max_frame_size, hw->hw_addr + REG_MTU); /* jumbo size & rrd retirement timer */ value = (((u32) hw->rx_jumbo_th & RXQ_JMBOSZ_TH_MASK) << RXQ_JMBOSZ_TH_SHIFT) | (((u32) hw->rx_jumbo_lkah & RXQ_JMBO_LKAH_MASK) << RXQ_JMBO_LKAH_SHIFT) | (((u32) hw->rrd_ret_timer & RXQ_RRD_TIMER_MASK) << RXQ_RRD_TIMER_SHIFT); iowrite32(value, hw->hw_addr + REG_RXQ_JMBOSZ_RRDTIM); /* Flow Control */ switch (hw->dev_rev) { case 0x8001: case 0x9001: case 0x9002: case 0x9003: set_flow_ctrl_old(adapter); break; default: set_flow_ctrl_new(hw); break; } /* config TXQ */ value = (((u32) hw->tpd_burst & TXQ_CTRL_TPD_BURST_NUM_MASK) << TXQ_CTRL_TPD_BURST_NUM_SHIFT) | (((u32) hw->txf_burst & TXQ_CTRL_TXF_BURST_NUM_MASK) << TXQ_CTRL_TXF_BURST_NUM_SHIFT) | (((u32) hw->tpd_fetch_th & TXQ_CTRL_TPD_FETCH_TH_MASK) << TXQ_CTRL_TPD_FETCH_TH_SHIFT) | TXQ_CTRL_ENH_MODE | TXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_TXQ_CTRL); /* min tpd fetch gap & tx jumbo packet size threshold for taskoffload */ value = (((u32) hw->tx_jumbo_task_th & TX_JUMBO_TASK_TH_MASK) << TX_JUMBO_TASK_TH_SHIFT) | (((u32) hw->tpd_fetch_gap & TX_TPD_MIN_IPG_MASK) << TX_TPD_MIN_IPG_SHIFT); iowrite32(value, hw->hw_addr + REG_TX_JUMBO_TASK_TH_TPD_IPG); /* config RXQ */ value = (((u32) hw->rfd_burst & RXQ_CTRL_RFD_BURST_NUM_MASK) << RXQ_CTRL_RFD_BURST_NUM_SHIFT) | (((u32) hw->rrd_burst & RXQ_CTRL_RRD_BURST_THRESH_MASK) << RXQ_CTRL_RRD_BURST_THRESH_SHIFT) | (((u32) hw->rfd_fetch_gap & RXQ_CTRL_RFD_PREF_MIN_IPG_MASK) << RXQ_CTRL_RFD_PREF_MIN_IPG_SHIFT) | RXQ_CTRL_CUT_THRU_EN | RXQ_CTRL_EN; iowrite32(value, hw->hw_addr + REG_RXQ_CTRL); /* config DMA Engine */ value = ((((u32) hw->dmar_block) & DMA_CTRL_DMAR_BURST_LEN_MASK) << DMA_CTRL_DMAR_BURST_LEN_SHIFT) | ((((u32) hw->dmaw_block) & DMA_CTRL_DMAW_BURST_LEN_MASK) << DMA_CTRL_DMAW_BURST_LEN_SHIFT) | DMA_CTRL_DMAR_EN | DMA_CTRL_DMAW_EN; value |= (u32) hw->dma_ord; if (atl1_rcb_128 == hw->rcb_value) value |= DMA_CTRL_RCB_VALUE; iowrite32(value, hw->hw_addr + REG_DMA_CTRL); /* config CMB / SMB */ value = (hw->cmb_tpd > adapter->tpd_ring.count) ? hw->cmb_tpd : adapter->tpd_ring.count; value <<= 16; value |= hw->cmb_rrd; iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TH); value = hw->cmb_rx_timer | ((u32) hw->cmb_tx_timer << 16); iowrite32(value, hw->hw_addr + REG_CMB_WRITE_TIMER); iowrite32(hw->smb_timer, hw->hw_addr + REG_SMB_TIMER); /* --- enable CMB / SMB */ value = CSMB_CTRL_CMB_EN | CSMB_CTRL_SMB_EN; iowrite32(value, hw->hw_addr + REG_CSMB_CTRL); value = ioread32(adapter->hw.hw_addr + REG_ISR); if (unlikely((value & ISR_PHY_LINKDOWN) != 0)) value = 1; /* config failed */ else value = 0; /* clear all interrupt status */ iowrite32(0x3fffffff, adapter->hw.hw_addr + REG_ISR); iowrite32(0, adapter->hw.hw_addr + REG_ISR); return value; } /* * atl1_pcie_patch - Patch for PCIE module */ static void atl1_pcie_patch(struct atl1_adapter *adapter) { u32 value; /* much vendor magic here */ value = 0x6500; iowrite32(value, adapter->hw.hw_addr + 0x12FC); /* pcie flow control mode change */ value = ioread32(adapter->hw.hw_addr + 0x1008); value |= 0x8000; iowrite32(value, adapter->hw.hw_addr + 0x1008); } /* * When ACPI resume on some VIA MotherBoard, the Interrupt Disable bit/0x400 * on PCI Command register is disable. * The function enable this bit. * Brackett, 2006/03/15 */ static void atl1_via_workaround(struct atl1_adapter *adapter) { unsigned long value; value = ioread16(adapter->hw.hw_addr + PCI_COMMAND); if (value & PCI_COMMAND_INTX_DISABLE) value &= ~PCI_COMMAND_INTX_DISABLE; iowrite32(value, adapter->hw.hw_addr + PCI_COMMAND); } static void atl1_inc_smb(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; struct stats_msg_block *smb = adapter->smb.smb; /* Fill out the OS statistics structure */ adapter->soft_stats.rx_packets += smb->rx_ok; adapter->soft_stats.tx_packets += smb->tx_ok; adapter->soft_stats.rx_bytes += smb->rx_byte_cnt; adapter->soft_stats.tx_bytes += smb->tx_byte_cnt; adapter->soft_stats.multicast += smb->rx_mcast; adapter->soft_stats.collisions += (smb->tx_1_col + smb->tx_2_col * 2 + smb->tx_late_col + smb->tx_abort_col * adapter->hw.max_retry); /* Rx Errors */ adapter->soft_stats.rx_errors += (smb->rx_frag + smb->rx_fcs_err + smb->rx_len_err + smb->rx_sz_ov + smb->rx_rxf_ov + smb->rx_rrd_ov + smb->rx_align_err); adapter->soft_stats.rx_fifo_errors += smb->rx_rxf_ov; adapter->soft_stats.rx_length_errors += smb->rx_len_err; adapter->soft_stats.rx_crc_errors += smb->rx_fcs_err; adapter->soft_stats.rx_frame_errors += smb->rx_align_err; adapter->soft_stats.rx_missed_errors += (smb->rx_rrd_ov + smb->rx_rxf_ov); adapter->soft_stats.rx_pause += smb->rx_pause; adapter->soft_stats.rx_rrd_ov += smb->rx_rrd_ov; adapter->soft_stats.rx_trunc += smb->rx_sz_ov; /* Tx Errors */ adapter->soft_stats.tx_errors += (smb->tx_late_col + smb->tx_abort_col + smb->tx_underrun + smb->tx_trunc); adapter->soft_stats.tx_fifo_errors += smb->tx_underrun; adapter->soft_stats.tx_aborted_errors += smb->tx_abort_col; adapter->soft_stats.tx_window_errors += smb->tx_late_col; adapter->soft_stats.excecol += smb->tx_abort_col; adapter->soft_stats.deffer += smb->tx_defer; adapter->soft_stats.scc += smb->tx_1_col; adapter->soft_stats.mcc += smb->tx_2_col; adapter->soft_stats.latecol += smb->tx_late_col; adapter->soft_stats.tx_underun += smb->tx_underrun; adapter->soft_stats.tx_trunc += smb->tx_trunc; adapter->soft_stats.tx_pause += smb->tx_pause; netdev->stats.rx_packets = adapter->soft_stats.rx_packets; netdev->stats.tx_packets = adapter->soft_stats.tx_packets; netdev->stats.rx_bytes = adapter->soft_stats.rx_bytes; netdev->stats.tx_bytes = adapter->soft_stats.tx_bytes; netdev->stats.multicast = adapter->soft_stats.multicast; netdev->stats.collisions = adapter->soft_stats.collisions; netdev->stats.rx_errors = adapter->soft_stats.rx_errors; netdev->stats.rx_over_errors = adapter->soft_stats.rx_missed_errors; netdev->stats.rx_length_errors = adapter->soft_stats.rx_length_errors; netdev->stats.rx_crc_errors = adapter->soft_stats.rx_crc_errors; netdev->stats.rx_frame_errors = adapter->soft_stats.rx_frame_errors; netdev->stats.rx_fifo_errors = adapter->soft_stats.rx_fifo_errors; netdev->stats.rx_missed_errors = adapter->soft_stats.rx_missed_errors; netdev->stats.tx_errors = adapter->soft_stats.tx_errors; netdev->stats.tx_fifo_errors = adapter->soft_stats.tx_fifo_errors; netdev->stats.tx_aborted_errors = adapter->soft_stats.tx_aborted_errors; netdev->stats.tx_window_errors = adapter->soft_stats.tx_window_errors; netdev->stats.tx_carrier_errors = adapter->soft_stats.tx_carrier_errors; } static void atl1_update_mailbox(struct atl1_adapter *adapter) { unsigned long flags; u32 tpd_next_to_use; u32 rfd_next_to_use; u32 rrd_next_to_clean; u32 value; spin_lock_irqsave(&adapter->mb_lock, flags); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock_irqrestore(&adapter->mb_lock, flags); } static void atl1_clean_alloc_flag(struct atl1_adapter *adapter, struct rx_return_desc *rrd, u16 offset) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; while (rfd_ring->next_to_clean != (rrd->buf_indx + offset)) { rfd_ring->buffer_info[rfd_ring->next_to_clean].alloced = 0; if (++rfd_ring->next_to_clean == rfd_ring->count) { rfd_ring->next_to_clean = 0; } } } static void atl1_update_rfd_index(struct atl1_adapter *adapter, struct rx_return_desc *rrd) { u16 num_buf; num_buf = (rrd->xsz.xsum_sz.pkt_size + adapter->rx_buffer_len - 1) / adapter->rx_buffer_len; if (rrd->num_buf == num_buf) /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, num_buf); } static void atl1_rx_checksum(struct atl1_adapter *adapter, struct rx_return_desc *rrd, struct sk_buff *skb) { struct pci_dev *pdev = adapter->pdev; /* * The L1 hardware contains a bug that erroneously sets the * PACKET_FLAG_ERR and ERR_FLAG_L4_CHKSUM bits whenever a * fragmented IP packet is received, even though the packet * is perfectly valid and its checksum is correct. There's * no way to distinguish between one of these good packets * and a packet that actually contains a TCP/UDP checksum * error, so all we can do is allow it to be handed up to * the higher layers and let it be sorted out there. */ skb_checksum_none_assert(skb); if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (rrd->err_flg & (ERR_FLAG_CRC | ERR_FLAG_TRUNC | ERR_FLAG_CODE | ERR_FLAG_OV)) { adapter->hw_csum_err++; if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &pdev->dev, "rx checksum error\n"); return; } } /* not IPv4 */ if (!(rrd->pkt_flg & PACKET_FLAG_IPV4)) /* checksum is invalid, but it's not an IPv4 pkt, so ok */ return; /* IPv4 packet */ if (likely(!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM)))) { skb->ip_summed = CHECKSUM_UNNECESSARY; adapter->hw_csum_good++; return; } } /* * atl1_alloc_rx_buffers - Replace used receive buffers * @adapter: address of board private structure */ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter) { struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct pci_dev *pdev = adapter->pdev; struct page *page; unsigned long offset; struct atl1_buffer *buffer_info, *next_info; struct sk_buff *skb; u16 num_alloc = 0; u16 rfd_next_to_use, next_next; struct rx_free_desc *rfd_desc; next_next = rfd_next_to_use = atomic_read(&rfd_ring->next_to_use); if (++next_next == rfd_ring->count) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; while (!buffer_info->alloced && !next_info->alloced) { if (buffer_info->skb) { buffer_info->alloced = 1; goto next; } rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); skb = netdev_alloc_skb_ip_align(adapter->netdev, adapter->rx_buffer_len); if (unlikely(!skb)) { /* Better luck next round */ adapter->netdev->stats.rx_dropped++; break; } buffer_info->alloced = 1; buffer_info->skb = skb; buffer_info->length = (u16) adapter->rx_buffer_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(pdev, page, offset, adapter->rx_buffer_len, PCI_DMA_FROMDEVICE); rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); rfd_desc->buf_len = cpu_to_le16(adapter->rx_buffer_len); rfd_desc->coalese = 0; next: rfd_next_to_use = next_next; if (unlikely(++next_next == rfd_ring->count)) next_next = 0; buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; next_info = &rfd_ring->buffer_info[next_next]; num_alloc++; } if (num_alloc) { /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&rfd_ring->next_to_use, (int)rfd_next_to_use); } return num_alloc; } static void atl1_intr_rx(struct atl1_adapter *adapter) { int i, count; u16 length; u16 rrd_next_to_clean; u32 value; struct atl1_rfd_ring *rfd_ring = &adapter->rfd_ring; struct atl1_rrd_ring *rrd_ring = &adapter->rrd_ring; struct atl1_buffer *buffer_info; struct rx_return_desc *rrd; struct sk_buff *skb; count = 0; rrd_next_to_clean = atomic_read(&rrd_ring->next_to_clean); while (1) { rrd = ATL1_RRD_DESC(rrd_ring, rrd_next_to_clean); i = 1; if (likely(rrd->xsz.valid)) { /* packet valid */ chk_rrd: /* check rrd status */ if (likely(rrd->num_buf == 1)) goto rrd_ok; else if (netif_msg_rx_err(adapter)) { dev_printk(KERN_DEBUG, &adapter->pdev->dev, "unexpected RRD buffer count\n"); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx_buf_len = %d\n", adapter->rx_buffer_len); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD num_buf = %d\n", rrd->num_buf); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_len = %d\n", rrd->xsz.xsum_sz.pkt_size); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD pkt_flg = 0x%08X\n", rrd->pkt_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD err_flg = 0x%08X\n", rrd->err_flg); dev_printk(KERN_DEBUG, &adapter->pdev->dev, "RRD vlan_tag = 0x%08X\n", rrd->vlan_tag); } /* rrd seems to be bad */ if (unlikely(i-- > 0)) { /* rrd may not be DMAed completely */ udelay(1); goto chk_rrd; } /* bad rrd */ if (netif_msg_rx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "bad RRD\n"); /* see if update RFD index */ if (rrd->num_buf > 1) atl1_update_rfd_index(adapter, rrd); /* update rrd */ rrd->xsz.valid = 0; if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; continue; } else { /* current rrd still not be updated */ break; } rrd_ok: /* clean alloc flag for bad rrd */ atl1_clean_alloc_flag(adapter, rrd, 0); buffer_info = &rfd_ring->buffer_info[rrd->buf_indx]; if (++rfd_ring->next_to_clean == rfd_ring->count) rfd_ring->next_to_clean = 0; /* update rrd next to clean */ if (++rrd_next_to_clean == rrd_ring->count) rrd_next_to_clean = 0; count++; if (unlikely(rrd->pkt_flg & PACKET_FLAG_ERR)) { if (!(rrd->err_flg & (ERR_FLAG_IP_CHKSUM | ERR_FLAG_L4_CHKSUM | ERR_FLAG_LEN))) { /* packet error, don't need upstream */ buffer_info->alloced = 0; rrd->xsz.valid = 0; continue; } } /* Good Receive */ pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_FROMDEVICE); buffer_info->dma = 0; skb = buffer_info->skb; length = le16_to_cpu(rrd->xsz.xsum_sz.pkt_size); skb_put(skb, length - ETH_FCS_LEN); /* Receive Checksum Offload */ atl1_rx_checksum(adapter, rrd, skb); skb->protocol = eth_type_trans(skb, adapter->netdev); if (adapter->vlgrp && (rrd->pkt_flg & PACKET_FLAG_VLAN_INS)) { u16 vlan_tag = (rrd->vlan_tag >> 4) | ((rrd->vlan_tag & 7) << 13) | ((rrd->vlan_tag & 8) << 9); vlan_hwaccel_rx(skb, adapter->vlgrp, vlan_tag); } else netif_rx(skb); /* let protocol layer free skb */ buffer_info->skb = NULL; buffer_info->alloced = 0; rrd->xsz.valid = 0; } atomic_set(&rrd_ring->next_to_clean, rrd_next_to_clean); atl1_alloc_rx_buffers(adapter); /* update mailbox ? */ if (count) { u32 tpd_next_to_use; u32 rfd_next_to_use; spin_lock(&adapter->mb_lock); tpd_next_to_use = atomic_read(&adapter->tpd_ring.next_to_use); rfd_next_to_use = atomic_read(&adapter->rfd_ring.next_to_use); rrd_next_to_clean = atomic_read(&adapter->rrd_ring.next_to_clean); value = ((rfd_next_to_use & MB_RFD_PROD_INDX_MASK) << MB_RFD_PROD_INDX_SHIFT) | ((rrd_next_to_clean & MB_RRD_CONS_INDX_MASK) << MB_RRD_CONS_INDX_SHIFT) | ((tpd_next_to_use & MB_TPD_PROD_INDX_MASK) << MB_TPD_PROD_INDX_SHIFT); iowrite32(value, adapter->hw.hw_addr + REG_MAILBOX); spin_unlock(&adapter->mb_lock); } } static void atl1_intr_tx(struct atl1_adapter *adapter) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 sw_tpd_next_to_clean; u16 cmb_tpd_next_to_clean; sw_tpd_next_to_clean = atomic_read(&tpd_ring->next_to_clean); cmb_tpd_next_to_clean = le16_to_cpu(adapter->cmb.cmb->tpd_cons_idx); while (cmb_tpd_next_to_clean != sw_tpd_next_to_clean) { buffer_info = &tpd_ring->buffer_info[sw_tpd_next_to_clean]; if (buffer_info->dma) { pci_unmap_page(adapter->pdev, buffer_info->dma, buffer_info->length, PCI_DMA_TODEVICE); buffer_info->dma = 0; } if (buffer_info->skb) { dev_kfree_skb_irq(buffer_info->skb); buffer_info->skb = NULL; } if (++sw_tpd_next_to_clean == tpd_ring->count) sw_tpd_next_to_clean = 0; } atomic_set(&tpd_ring->next_to_clean, sw_tpd_next_to_clean); if (netif_queue_stopped(adapter->netdev) && netif_carrier_ok(adapter->netdev)) netif_wake_queue(adapter->netdev); } static u16 atl1_tpd_avail(struct atl1_tpd_ring *tpd_ring) { u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); u16 next_to_use = atomic_read(&tpd_ring->next_to_use); return (next_to_clean > next_to_use) ? next_to_clean - next_to_use - 1 : tpd_ring->count + next_to_clean - next_to_use - 1; } static int atl1_tso(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 hdr_len, ip_off; u32 real_len; int err; if (skb_shinfo(skb)->gso_size) { if (skb_header_cloned(skb)) { err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); if (unlikely(err)) return -1; } if (skb->protocol == htons(ETH_P_IP)) { struct iphdr *iph = ip_hdr(skb); real_len = (((unsigned char *)iph - skb->data) + ntohs(iph->tot_len)); if (real_len < skb->len) pskb_trim(skb, real_len); hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (skb->len == hdr_len) { iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, tcp_hdrlen(skb), IPPROTO_TCP, 0); ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= 1 << TPD_IP_CSUM_SHIFT; ptpd->word3 |= 1 << TPD_TCP_CSUM_SHIFT; return 1; } iph->check = 0; tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0, IPPROTO_TCP, 0); ip_off = (unsigned char *)iph - (unsigned char *) skb_network_header(skb); if (ip_off == 8) /* 802.3-SNAP frame */ ptpd->word3 |= 1 << TPD_ETHTYPE_SHIFT; else if (ip_off != 0) return -2; ptpd->word3 |= (iph->ihl & TPD_IPHL_MASK) << TPD_IPHL_SHIFT; ptpd->word3 |= ((tcp_hdrlen(skb) >> 2) & TPD_TCPHDRLEN_MASK) << TPD_TCPHDRLEN_SHIFT; ptpd->word3 |= (skb_shinfo(skb)->gso_size & TPD_MSS_MASK) << TPD_MSS_SHIFT; ptpd->word3 |= 1 << TPD_SEGMENT_EN_SHIFT; return 3; } } return false; } static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { u8 css, cso; if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { css = skb_checksum_start_offset(skb); cso = css + (u8) skb->csum_offset; if (unlikely(css & 0x1)) { /* L1 hardware requires an even number here */ if (netif_msg_tx_err(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "payload offset not an even number\n"); return -1; } ptpd->word3 |= (css & TPD_PLOADOFFSET_MASK) << TPD_PLOADOFFSET_SHIFT; ptpd->word3 |= (cso & TPD_CCSUMOFFSET_MASK) << TPD_CCSUMOFFSET_SHIFT; ptpd->word3 |= 1 << TPD_CUST_CSUM_EN_SHIFT; return true; } return 0; } static void atl1_tx_map(struct atl1_adapter *adapter, struct sk_buff *skb, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; u16 buf_len = skb->len; struct page *page; unsigned long offset; unsigned int nr_frags; unsigned int f; int retval; u16 next_to_use; u16 data_len; u8 hdr_len; buf_len -= skb->data_len; nr_frags = skb_shinfo(skb)->nr_frags; next_to_use = atomic_read(&tpd_ring->next_to_use); buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); /* put skb in last TPD */ buffer_info->skb = NULL; retval = (ptpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (retval) { /* TSO */ hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb); buffer_info->length = hdr_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, hdr_len, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; if (buf_len > hdr_len) { int i, nseg; data_len = buf_len - hdr_len; nseg = (data_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; buffer_info->skb = NULL; buffer_info->length = (ATL1_MAX_TX_BUF_LEN >= data_len) ? ATL1_MAX_TX_BUF_LEN : data_len; data_len -= buffer_info->length; page = virt_to_page(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)); offset = (unsigned long)(skb->data + (hdr_len + i * ATL1_MAX_TX_BUF_LEN)) & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buffer_info->length, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } } else { /* not TSO */ buffer_info->length = buf_len; page = virt_to_page(skb->data); offset = (unsigned long)skb->data & ~PAGE_MASK; buffer_info->dma = pci_map_page(adapter->pdev, page, offset, buf_len, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } for (f = 0; f < nr_frags; f++) { struct skb_frag_struct *frag; u16 i, nseg; frag = &skb_shinfo(skb)->frags[f]; buf_len = frag->size; nseg = (buf_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; for (i = 0; i < nseg; i++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; BUG_ON(buffer_info->skb); buffer_info->skb = NULL; buffer_info->length = (buf_len > ATL1_MAX_TX_BUF_LEN) ? ATL1_MAX_TX_BUF_LEN : buf_len; buf_len -= buffer_info->length; buffer_info->dma = pci_map_page(adapter->pdev, frag->page, frag->page_offset + (i * ATL1_MAX_TX_BUF_LEN), buffer_info->length, PCI_DMA_TODEVICE); if (++next_to_use == tpd_ring->count) next_to_use = 0; } } /* last tpd's buffer-info */ buffer_info->skb = skb; } static void atl1_tx_queue(struct atl1_adapter *adapter, u16 count, struct tx_packet_desc *ptpd) { struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; struct atl1_buffer *buffer_info; struct tx_packet_desc *tpd; u16 j; u32 val; u16 next_to_use = (u16) atomic_read(&tpd_ring->next_to_use); for (j = 0; j < count; j++) { buffer_info = &tpd_ring->buffer_info[next_to_use]; tpd = ATL1_TPD_DESC(&adapter->tpd_ring, next_to_use); if (tpd != ptpd) memcpy(tpd, ptpd, sizeof(struct tx_packet_desc)); tpd->buffer_addr = cpu_to_le64(buffer_info->dma); tpd->word2 &= ~(TPD_BUFLEN_MASK << TPD_BUFLEN_SHIFT); tpd->word2 |= (cpu_to_le16(buffer_info->length) & TPD_BUFLEN_MASK) << TPD_BUFLEN_SHIFT; /* * if this is the first packet in a TSO chain, set * TPD_HDRFLAG, otherwise, clear it. */ val = (tpd->word3 >> TPD_SEGMENT_EN_SHIFT) & TPD_SEGMENT_EN_MASK; if (val) { if (!j) tpd->word3 |= 1 << TPD_HDRFLAG_SHIFT; else tpd->word3 &= ~(1 << TPD_HDRFLAG_SHIFT); } if (j == (count - 1)) tpd->word3 |= 1 << TPD_EOP_SHIFT; if (++next_to_use == tpd_ring->count) next_to_use = 0; } /* * Force memory writes to complete before letting h/w * know there are new descriptors to fetch. (Only * applicable for weak-ordered memory model archs, * such as IA-64). */ wmb(); atomic_set(&tpd_ring->next_to_use, next_to_use); } static netdev_tx_t atl1_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpd_ring = &adapter->tpd_ring; int len; int tso; int count = 1; int ret_val; struct tx_packet_desc *ptpd; u16 frag_size; u16 vlan_tag; unsigned int nr_frags = 0; unsigned int mss = 0; unsigned int f; unsigned int proto_hdr_len; len = skb_headlen(skb); if (unlikely(skb->len <= 0)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } nr_frags = skb_shinfo(skb)->nr_frags; for (f = 0; f < nr_frags; f++) { frag_size = skb_shinfo(skb)->frags[f].size; if (frag_size) count += (frag_size + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } mss = skb_shinfo(skb)->gso_size; if (mss) { if (skb->protocol == htons(ETH_P_IP)) { proto_hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb)); if (unlikely(proto_hdr_len > len)) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } /* need additional TPD ? */ if (proto_hdr_len != len) count += (len - proto_hdr_len + ATL1_MAX_TX_BUF_LEN - 1) / ATL1_MAX_TX_BUF_LEN; } } if (atl1_tpd_avail(&adapter->tpd_ring) < count) { /* not enough descriptors */ netif_stop_queue(netdev); if (netif_msg_tx_queued(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "tx busy\n"); return NETDEV_TX_BUSY; } ptpd = ATL1_TPD_DESC(tpd_ring, (u16) atomic_read(&tpd_ring->next_to_use)); memset(ptpd, 0, sizeof(struct tx_packet_desc)); if (vlan_tx_tag_present(skb)) { vlan_tag = vlan_tx_tag_get(skb); vlan_tag = (vlan_tag << 4) | (vlan_tag >> 13) | ((vlan_tag >> 9) & 0x8); ptpd->word3 |= 1 << TPD_INS_VL_TAG_SHIFT; ptpd->word2 |= (vlan_tag & TPD_VLANTAG_MASK) << TPD_VLANTAG_SHIFT; } tso = atl1_tso(adapter, skb, ptpd); if (tso < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } if (!tso) { ret_val = atl1_tx_csum(adapter, skb, ptpd); if (ret_val < 0) { dev_kfree_skb_any(skb); return NETDEV_TX_OK; } } atl1_tx_map(adapter, skb, ptpd); atl1_tx_queue(adapter, count, ptpd); atl1_update_mailbox(adapter); mmiowb(); return NETDEV_TX_OK; } /* * atl1_intr - Interrupt Handler * @irq: interrupt number * @data: pointer to a network interface device structure * @pt_regs: CPU registers structure */ static irqreturn_t atl1_intr(int irq, void *data) { struct atl1_adapter *adapter = netdev_priv(data); u32 status; int max_ints = 10; status = adapter->cmb.cmb->int_stats; if (!status) return IRQ_NONE; do { /* clear CMB interrupt status at once */ adapter->cmb.cmb->int_stats = 0; if (status & ISR_GPHY) /* clear phy status */ atlx_clear_phy_int(adapter); /* clear ISR status, and Enable CMB DMA/Disable Interrupt */ iowrite32(status | ISR_DIS_INT, adapter->hw.hw_addr + REG_ISR); /* check if SMB intr */ if (status & ISR_SMB) atl1_inc_smb(adapter); /* check if PCIE PHY Link down */ if (status & ISR_PHY_LINKDOWN) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie phy link down %x\n", status); if (netif_running(adapter->netdev)) { /* reset MAC */ iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } } /* check if DMA read/write error ? */ if (status & (ISR_DMAR_TO_RST | ISR_DMAW_TO_RST)) { if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "pcie DMA r/w error (status = 0x%x)\n", status); iowrite32(0, adapter->hw.hw_addr + REG_IMR); schedule_work(&adapter->reset_dev_task); return IRQ_HANDLED; } /* link event */ if (status & ISR_GPHY) { adapter->soft_stats.tx_carrier_errors++; atl1_check_for_link(adapter); } /* transmit event */ if (status & ISR_CMB_TX) atl1_intr_tx(adapter); /* rx exception */ if (unlikely(status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV | ISR_CMB_RX))) { if (status & (ISR_RXF_OV | ISR_RFD_UNRUN | ISR_RRD_OV | ISR_HOST_RFD_UNRUN | ISR_HOST_RRD_OV)) if (netif_msg_intr(adapter)) dev_printk(KERN_DEBUG, &adapter->pdev->dev, "rx exception, ISR = 0x%x\n", status); atl1_intr_rx(adapter); } if (--max_ints < 0) break; } while ((status = adapter->cmb.cmb->int_stats)); /* re-enable Interrupt */ iowrite32(ISR_DIS_SMB | ISR_DIS_DMA, adapter->hw.hw_addr + REG_ISR); return IRQ_HANDLED; } /* * atl1_phy_config - Timer Call-back * @data: pointer to netdev cast into an unsigned long */ static void atl1_phy_config(unsigned long data) { struct atl1_adapter *adapter = (struct atl1_adapter *)data; struct atl1_hw *hw = &adapter->hw; unsigned long flags; spin_lock_irqsave(&adapter->lock, flags); adapter->phy_timer_pending = false; atl1_write_phy_reg(hw, MII_ADVERTISE, hw->mii_autoneg_adv_reg); atl1_write_phy_reg(hw, MII_ATLX_CR, hw->mii_1000t_ctrl_reg); atl1_write_phy_reg(hw, MII_BMCR, MII_CR_RESET | MII_CR_AUTO_NEG_EN); spin_unlock_irqrestore(&adapter->lock, flags); } /* * Orphaned vendor comment left intact here: * <vendor comment> * If TPD Buffer size equal to 0, PCIE DMAR_TO_INT * will assert. We do soft reset <0x1400=1> according * with the SPEC. BUT, it seemes that PCIE or DMA * state-machine will not be reset. DMAR_TO_INT will * assert again and again. * </vendor comment> */ static int atl1_reset(struct atl1_adapter *adapter) { int ret; ret = atl1_reset_hw(&adapter->hw); if (ret) return ret; return atl1_init_hw(&adapter->hw); } static s32 atl1_up(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; int err; int irq_flags = 0; /* hardware has been reset, we need to reload some things */ atlx_set_multi(netdev); atl1_init_ring_ptrs(adapter); atlx_restore_vlan(adapter); err = atl1_alloc_rx_buffers(adapter); if (unlikely(!err)) /* no RX BUFFER allocated */ return -ENOMEM; if (unlikely(atl1_configure(adapter))) { err = -EIO; goto err_up; } err = pci_enable_msi(adapter->pdev); if (err) { if (netif_msg_ifup(adapter)) dev_info(&adapter->pdev->dev, "Unable to enable MSI: %d\n", err); irq_flags |= IRQF_SHARED; } err = request_irq(adapter->pdev->irq, atl1_intr, irq_flags, netdev->name, netdev); if (unlikely(err)) goto err_up; atlx_irq_enable(adapter); atl1_check_link(adapter); netif_start_queue(netdev); return 0; err_up: pci_disable_msi(adapter->pdev); /* free rx_buffers */ atl1_clean_rx_ring(adapter); return err; } static void atl1_down(struct atl1_adapter *adapter) { struct net_device *netdev = adapter->netdev; netif_stop_queue(netdev); del_timer_sync(&adapter->phy_config_timer); adapter->phy_timer_pending = false; atlx_irq_disable(adapter); free_irq(adapter->pdev->irq, netdev); pci_disable_msi(adapter->pdev); atl1_reset_hw(&adapter->hw); adapter->cmb.cmb->int_stats = 0; adapter->link_speed = SPEED_0; adapter->link_duplex = -1; netif_carrier_off(netdev); atl1_clean_tx_ring(adapter); atl1_clean_rx_ring(adapter); } static void atl1_reset_dev_task(struct work_struct *work) { struct atl1_adapter *adapter = container_of(work, struct atl1_adapter, reset_dev_task); struct net_device *netdev = adapter->netdev; netif_device_detach(netdev); atl1_down(adapter); atl1_up(adapter); netif_device_attach(netdev); } /* * atl1_change_mtu - Change the Maximum Transfer Unit * @netdev: network interface device structure * @new_mtu: new value for maximum frame size * * Returns 0 on success, negative on failure */ static int atl1_change_mtu(struct net_device *netdev, int new_mtu) { struct atl1_adapter *adapter = netdev_priv(netdev); int old_mtu = netdev->mtu; int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN; if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || (max_frame > MAX_JUMBO_FRAME_SIZE)) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid MTU setting\n"); return -EINVAL; } adapter->hw.max_frame_size = max_frame; adapter->hw.tx_jumbo_task_th = (max_frame + 7) >> 3; adapter->rx_buffer_len = (max_frame + 7) & ~7; adapter->hw.rx_jumbo_th = adapter->rx_buffer_len / 8; netdev->mtu = new_mtu; if ((old_mtu != new_mtu) && netif_running(netdev)) { atl1_down(adapter); atl1_up(adapter); } return 0; } /* * atl1_open - Called when a network interface is made active * @netdev: network interface device structure * * Returns 0 on success, negative value on failure * * The open entry point is called when a network interface is made * active by the system (IFF_UP). At this point all resources needed * for transmit and receive operations are allocated, the interrupt * handler is registered with the OS, the watchdog timer is started, * and the stack is notified that the interface is ready. */ static int atl1_open(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); int err; netif_carrier_off(netdev); /* allocate transmit descriptors */ err = atl1_setup_ring_resources(adapter); if (err) return err; err = atl1_up(adapter); if (err) goto err_up; return 0; err_up: atl1_reset(adapter); return err; } /* * atl1_close - Disables a network interface * @netdev: network interface device structure * * Returns 0, this is not allowed to fail * * The close entry point is called when an interface is de-activated * by the OS. The hardware is still under the drivers control, but * needs to be disabled. A global MAC reset is issued to stop the * hardware, and all transmit and receive resources are freed. */ static int atl1_close(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); atl1_down(adapter); atl1_free_ring_resources(adapter); return 0; } #ifdef CONFIG_PM static int atl1_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u32 ctrl = 0; u32 wufc = adapter->wol; u32 val; u16 speed; u16 duplex; netif_device_detach(netdev); if (netif_running(netdev)) atl1_down(adapter); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); atl1_read_phy_reg(hw, MII_BMSR, (u16 *) & ctrl); val = ctrl & BMSR_LSTATUS; if (val) wufc &= ~ATLX_WUFC_LNKC; if (!wufc) goto disable_wol; if (val) { val = atl1_get_speed_and_duplex(hw, &speed, &duplex); if (val) { if (netif_msg_ifdown(adapter)) dev_printk(KERN_DEBUG, &pdev->dev, "error getting speed/duplex\n"); goto disable_wol; } ctrl = 0; /* enable magic packet WOL */ if (wufc & ATLX_WUFC_MAG) ctrl |= (WOL_MAGIC_EN | WOL_MAGIC_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); /* configure the mac */ ctrl = MAC_CTRL_RX_EN; ctrl |= ((u32)((speed == SPEED_1000) ? MAC_CTRL_SPEED_1000 : MAC_CTRL_SPEED_10_100) << MAC_CTRL_SPEED_SHIFT); if (duplex == FULL_DUPLEX) ctrl |= MAC_CTRL_DUPLX; ctrl |= (((u32)adapter->hw.preamble_len & MAC_CTRL_PRMLEN_MASK) << MAC_CTRL_PRMLEN_SHIFT); if (adapter->vlgrp) ctrl |= MAC_CTRL_RMV_VLAN; if (wufc & ATLX_WUFC_MAG) ctrl |= MAC_CTRL_BC_EN; iowrite32(ctrl, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); /* poke the PHY */ ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); } else { ctrl |= (WOL_LINK_CHG_EN | WOL_LINK_CHG_PME_EN); iowrite32(ctrl, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); iowrite32(0, hw->hw_addr + REG_MAC_CTRL); ioread32(hw->hw_addr + REG_MAC_CTRL); hw->phy_configured = false; } return 0; disable_wol: iowrite32(0, hw->hw_addr + REG_WOL_CTRL); ioread32(hw->hw_addr + REG_WOL_CTRL); ctrl = ioread32(hw->hw_addr + REG_PCIE_PHYMISC); ctrl |= PCIE_PHYMISC_FORCE_RCV_DET; iowrite32(ctrl, hw->hw_addr + REG_PCIE_PHYMISC); ioread32(hw->hw_addr + REG_PCIE_PHYMISC); hw->phy_configured = false; return 0; } static int atl1_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); iowrite32(0, adapter->hw.hw_addr + REG_WOL_CTRL); atl1_reset_hw(&adapter->hw); if (netif_running(netdev)) { adapter->cmb.cmb->int_stats = 0; atl1_up(adapter); } netif_device_attach(netdev); return 0; } static SIMPLE_DEV_PM_OPS(atl1_pm_ops, atl1_suspend, atl1_resume); #define ATL1_PM_OPS (&atl1_pm_ops) #else static int atl1_suspend(struct device *dev) { return 0; } #define ATL1_PM_OPS NULL #endif static void atl1_shutdown(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter = netdev_priv(netdev); atl1_suspend(&pdev->dev); pci_wake_from_d3(pdev, adapter->wol); pci_set_power_state(pdev, PCI_D3hot); } #ifdef CONFIG_NET_POLL_CONTROLLER static void atl1_poll_controller(struct net_device *netdev) { disable_irq(netdev->irq); atl1_intr(netdev->irq, netdev); enable_irq(netdev->irq); } #endif static const struct net_device_ops atl1_netdev_ops = { .ndo_open = atl1_open, .ndo_stop = atl1_close, .ndo_start_xmit = atl1_xmit_frame, .ndo_set_multicast_list = atlx_set_multi, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = atl1_set_mac, .ndo_change_mtu = atl1_change_mtu, .ndo_do_ioctl = atlx_ioctl, .ndo_tx_timeout = atlx_tx_timeout, .ndo_vlan_rx_register = atlx_vlan_rx_register, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = atl1_poll_controller, #endif }; /* * atl1_probe - Device Initialization Routine * @pdev: PCI device information struct * @ent: entry in atl1_pci_tbl * * Returns 0 on success, negative on failure * * atl1_probe initializes an adapter identified by a pci_dev structure. * The OS initialization, configuring of the adapter private structure, * and a hardware reset occur. */ static int __devinit atl1_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct atl1_adapter *adapter; static int cards_found = 0; int err; err = pci_enable_device(pdev); if (err) return err; /* * The atl1 chip can DMA to 64-bit addresses, but it uses a single * shared register for the high 32 bits, so only a single, aligned, * 4 GB physical address range can be used at a time. * * Supporting 64-bit DMA on this hardware is more trouble than it's * worth. It is far easier to limit to 32-bit DMA than update * various kernel subsystems to support the mechanics required by a * fixed-high-32-bit system. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "no usable DMA configuration\n"); goto err_dma; } /* * Mark all PCI regions associated with PCI device * pdev as being reserved by owner atl1_driver_name */ err = pci_request_regions(pdev, ATLX_DRIVER_NAME); if (err) goto err_request_regions; /* * Enables bus-mastering on the device and calls * pcibios_set_master to do the needed arch specific settings */ pci_set_master(pdev); netdev = alloc_etherdev(sizeof(struct atl1_adapter)); if (!netdev) { err = -ENOMEM; goto err_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); pci_set_drvdata(pdev, netdev); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = pdev; adapter->hw.back = adapter; adapter->msg_enable = netif_msg_init(debug, atl1_default_msg); adapter->hw.hw_addr = pci_iomap(pdev, 0, 0); if (!adapter->hw.hw_addr) { err = -EIO; goto err_pci_iomap; } /* get device revision number */ adapter->hw.dev_rev = ioread16(adapter->hw.hw_addr + (REG_MASTER_CTRL + 2)); if (netif_msg_probe(adapter)) dev_info(&pdev->dev, "version %s\n", ATLX_DRIVER_VERSION); /* set default ring resource counts */ adapter->rfd_ring.count = adapter->rrd_ring.count = ATL1_DEFAULT_RFD; adapter->tpd_ring.count = ATL1_DEFAULT_TPD; adapter->mii.dev = netdev; adapter->mii.mdio_read = mdio_read; adapter->mii.mdio_write = mdio_write; adapter->mii.phy_id_mask = 0x1f; adapter->mii.reg_num_mask = 0x1f; netdev->netdev_ops = &atl1_netdev_ops; netdev->watchdog_timeo = 5 * HZ; netdev->ethtool_ops = &atl1_ethtool_ops; adapter->bd_number = cards_found; /* setup the private structure */ err = atl1_sw_init(adapter); if (err) goto err_common; netdev->features = NETIF_F_HW_CSUM; netdev->features |= NETIF_F_SG; netdev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); netdev->hw_features = NETIF_F_HW_CSUM | NETIF_F_SG | NETIF_F_TSO; /* is this valid? see atl1_setup_mac_ctrl() */ netdev->features |= NETIF_F_RXCSUM; /* * patch for some L1 of old version, * the final version of L1 may not need these * patches */ /* atl1_pcie_patch(adapter); */ /* really reset GPHY core */ iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); /* * reset the controller to * put the device in a known good starting state */ if (atl1_reset_hw(&adapter->hw)) { err = -EIO; goto err_common; } /* copy the MAC address out of the EEPROM */ atl1_read_mac_addr(&adapter->hw); memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len); if (!is_valid_ether_addr(netdev->dev_addr)) { err = -EIO; goto err_common; } atl1_check_options(adapter); /* pre-init the MAC, and setup link */ err = atl1_init_hw(&adapter->hw); if (err) { err = -EIO; goto err_common; } atl1_pcie_patch(adapter); /* assume we have no link for now */ netif_carrier_off(netdev); setup_timer(&adapter->phy_config_timer, atl1_phy_config, (unsigned long)adapter); adapter->phy_timer_pending = false; INIT_WORK(&adapter->reset_dev_task, atl1_reset_dev_task); INIT_WORK(&adapter->link_chg_task, atlx_link_chg_task); err = register_netdev(netdev); if (err) goto err_common; cards_found++; atl1_via_workaround(adapter); return 0; err_common: pci_iounmap(pdev, adapter->hw.hw_addr); err_pci_iomap: free_netdev(netdev); err_alloc_etherdev: pci_release_regions(pdev); err_dma: err_request_regions: pci_disable_device(pdev); return err; } /* * atl1_remove - Device Removal Routine * @pdev: PCI device information struct * * atl1_remove is called by the PCI subsystem to alert the driver * that it should release a PCI device. The could be caused by a * Hot-Plug event, or because the driver is going to be removed from * memory. */ static void __devexit atl1_remove(struct pci_dev *pdev) { struct net_device *netdev = pci_get_drvdata(pdev); struct atl1_adapter *adapter; /* Device not available. Return. */ if (!netdev) return; adapter = netdev_priv(netdev); /* * Some atl1 boards lack persistent storage for their MAC, and get it * from the BIOS during POST. If we've been messing with the MAC * address, we need to save the permanent one. */ if (memcmp(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN)) { memcpy(adapter->hw.mac_addr, adapter->hw.perm_mac_addr, ETH_ALEN); atl1_set_mac_addr(&adapter->hw); } iowrite16(0, adapter->hw.hw_addr + REG_PHY_ENABLE); unregister_netdev(netdev); pci_iounmap(pdev, adapter->hw.hw_addr); pci_release_regions(pdev); free_netdev(netdev); pci_disable_device(pdev); } static struct pci_driver atl1_driver = { .name = ATLX_DRIVER_NAME, .id_table = atl1_pci_tbl, .probe = atl1_probe, .remove = __devexit_p(atl1_remove), .shutdown = atl1_shutdown, .driver.pm = ATL1_PM_OPS, }; /* * atl1_exit_module - Driver Exit Cleanup Routine * * atl1_exit_module is called just before the driver is removed * from memory. */ static void __exit atl1_exit_module(void) { pci_unregister_driver(&atl1_driver); } /* * atl1_init_module - Driver Registration Routine * * atl1_init_module is the first routine called when the driver is * loaded. All it does is register with the PCI subsystem. */ static int __init atl1_init_module(void) { return pci_register_driver(&atl1_driver); } module_init(atl1_init_module); module_exit(atl1_exit_module); struct atl1_stats { char stat_string[ETH_GSTRING_LEN]; int sizeof_stat; int stat_offset; }; #define ATL1_STAT(m) \ sizeof(((struct atl1_adapter *)0)->m), offsetof(struct atl1_adapter, m) static struct atl1_stats atl1_gstrings_stats[] = { {"rx_packets", ATL1_STAT(soft_stats.rx_packets)}, {"tx_packets", ATL1_STAT(soft_stats.tx_packets)}, {"rx_bytes", ATL1_STAT(soft_stats.rx_bytes)}, {"tx_bytes", ATL1_STAT(soft_stats.tx_bytes)}, {"rx_errors", ATL1_STAT(soft_stats.rx_errors)}, {"tx_errors", ATL1_STAT(soft_stats.tx_errors)}, {"multicast", ATL1_STAT(soft_stats.multicast)}, {"collisions", ATL1_STAT(soft_stats.collisions)}, {"rx_length_errors", ATL1_STAT(soft_stats.rx_length_errors)}, {"rx_over_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"rx_crc_errors", ATL1_STAT(soft_stats.rx_crc_errors)}, {"rx_frame_errors", ATL1_STAT(soft_stats.rx_frame_errors)}, {"rx_fifo_errors", ATL1_STAT(soft_stats.rx_fifo_errors)}, {"rx_missed_errors", ATL1_STAT(soft_stats.rx_missed_errors)}, {"tx_aborted_errors", ATL1_STAT(soft_stats.tx_aborted_errors)}, {"tx_carrier_errors", ATL1_STAT(soft_stats.tx_carrier_errors)}, {"tx_fifo_errors", ATL1_STAT(soft_stats.tx_fifo_errors)}, {"tx_window_errors", ATL1_STAT(soft_stats.tx_window_errors)}, {"tx_abort_exce_coll", ATL1_STAT(soft_stats.excecol)}, {"tx_abort_late_coll", ATL1_STAT(soft_stats.latecol)}, {"tx_deferred_ok", ATL1_STAT(soft_stats.deffer)}, {"tx_single_coll_ok", ATL1_STAT(soft_stats.scc)}, {"tx_multi_coll_ok", ATL1_STAT(soft_stats.mcc)}, {"tx_underun", ATL1_STAT(soft_stats.tx_underun)}, {"tx_trunc", ATL1_STAT(soft_stats.tx_trunc)}, {"tx_pause", ATL1_STAT(soft_stats.tx_pause)}, {"rx_pause", ATL1_STAT(soft_stats.rx_pause)}, {"rx_rrd_ov", ATL1_STAT(soft_stats.rx_rrd_ov)}, {"rx_trunc", ATL1_STAT(soft_stats.rx_trunc)} }; static void atl1_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, u64 *data) { struct atl1_adapter *adapter = netdev_priv(netdev); int i; char *p; for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { p = (char *)adapter+atl1_gstrings_stats[i].stat_offset; data[i] = (atl1_gstrings_stats[i].sizeof_stat == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } static int atl1_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(atl1_gstrings_stats); default: return -EOPNOTSUPP; } } static int atl1_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_1000baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP); ecmd->advertising = ADVERTISED_TP; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { ecmd->advertising |= ADVERTISED_Autoneg; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR) { ecmd->advertising |= ADVERTISED_Autoneg; ecmd->advertising |= (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full); } else ecmd->advertising |= (ADVERTISED_1000baseT_Full); } ecmd->port = PORT_TP; ecmd->phy_address = 0; ecmd->transceiver = XCVR_INTERNAL; if (netif_carrier_ok(adapter->netdev)) { u16 link_speed, link_duplex; atl1_get_speed_and_duplex(hw, &link_speed, &link_duplex); ethtool_cmd_speed_set(ecmd, link_speed); if (link_duplex == FULL_DUPLEX) ecmd->duplex = DUPLEX_FULL; else ecmd->duplex = DUPLEX_HALF; } else { ethtool_cmd_speed_set(ecmd, -1); ecmd->duplex = -1; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) ecmd->autoneg = AUTONEG_ENABLE; else ecmd->autoneg = AUTONEG_DISABLE; return 0; } static int atl1_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; u16 phy_data; int ret_val = 0; u16 old_media_type = hw->media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool shutting down adapter\n"); atl1_down(adapter); } if (ecmd->autoneg == AUTONEG_ENABLE) hw->media_type = MEDIA_TYPE_AUTO_SENSOR; else { u32 speed = ethtool_cmd_speed(ecmd); if (speed == SPEED_1000) { if (ecmd->duplex != DUPLEX_FULL) { if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "1000M half is invalid\n"); ret_val = -EINVAL; goto exit_sset; } hw->media_type = MEDIA_TYPE_1000M_FULL; } else if (speed == SPEED_100) { if (ecmd->duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_100M_FULL; else hw->media_type = MEDIA_TYPE_100M_HALF; } else { if (ecmd->duplex == DUPLEX_FULL) hw->media_type = MEDIA_TYPE_10M_FULL; else hw->media_type = MEDIA_TYPE_10M_HALF; } } switch (hw->media_type) { case MEDIA_TYPE_AUTO_SENSOR: ecmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP; break; case MEDIA_TYPE_1000M_FULL: ecmd->advertising = ADVERTISED_1000baseT_Full | ADVERTISED_Autoneg | ADVERTISED_TP; break; default: ecmd->advertising = 0; break; } if (atl1_phy_setup_autoneg_adv(hw)) { ret_val = -EINVAL; if (netif_msg_link(adapter)) dev_warn(&adapter->pdev->dev, "invalid ethtool speed/duplex setting\n"); goto exit_sset; } if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF: */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; break; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); exit_sset: if (ret_val) hw->media_type = old_media_type; if (netif_running(adapter->netdev)) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool starting adapter\n"); atl1_up(adapter); } else if (!ret_val) { if (netif_msg_link(adapter)) dev_dbg(&adapter->pdev->dev, "ethtool resetting adapter\n"); atl1_reset(adapter); } return ret_val; } static void atl1_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct atl1_adapter *adapter = netdev_priv(netdev); strlcpy(drvinfo->driver, ATLX_DRIVER_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, ATLX_DRIVER_VERSION, sizeof(drvinfo->version)); strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->eedump_len = ATL1_EEDUMP_LEN; } static void atl1_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); wol->supported = WAKE_MAGIC; wol->wolopts = 0; if (adapter->wol & ATLX_WUFC_MAG) wol->wolopts |= WAKE_MAGIC; } static int atl1_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct atl1_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & (WAKE_PHY | WAKE_UCAST | WAKE_MCAST | WAKE_BCAST | WAKE_ARP | WAKE_MAGICSECURE)) return -EOPNOTSUPP; adapter->wol = 0; if (wol->wolopts & WAKE_MAGIC) adapter->wol |= ATLX_WUFC_MAG; device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); return 0; } static u32 atl1_get_msglevel(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); return adapter->msg_enable; } static void atl1_set_msglevel(struct net_device *netdev, u32 value) { struct atl1_adapter *adapter = netdev_priv(netdev); adapter->msg_enable = value; } static int atl1_get_regs_len(struct net_device *netdev) { return ATL1_REG_COUNT * sizeof(u32); } static void atl1_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; unsigned int i; u32 *regbuf = p; for (i = 0; i < ATL1_REG_COUNT; i++) { /* * This switch statement avoids reserved regions * of register space. */ switch (i) { case 6 ... 9: case 14: case 29 ... 31: case 34 ... 63: case 75 ... 127: case 136 ... 1023: case 1027 ... 1087: case 1091 ... 1151: case 1194 ... 1195: case 1200 ... 1201: case 1206 ... 1213: case 1216 ... 1279: case 1290 ... 1311: case 1323 ... 1343: case 1358 ... 1359: case 1368 ... 1375: case 1378 ... 1383: case 1388 ... 1391: case 1393 ... 1395: case 1402 ... 1403: case 1410 ... 1471: case 1522 ... 1535: /* reserved region; don't read it */ regbuf[i] = 0; break; default: /* unreserved region */ regbuf[i] = ioread32(hw->hw_addr + (i * sizeof(u32))); } } } static void atl1_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *txdr = &adapter->tpd_ring; struct atl1_rfd_ring *rxdr = &adapter->rfd_ring; ring->rx_max_pending = ATL1_MAX_RFD; ring->tx_max_pending = ATL1_MAX_TPD; ring->rx_mini_max_pending = 0; ring->rx_jumbo_max_pending = 0; ring->rx_pending = rxdr->count; ring->tx_pending = txdr->count; ring->rx_mini_pending = 0; ring->rx_jumbo_pending = 0; } static int atl1_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_tpd_ring *tpdr = &adapter->tpd_ring; struct atl1_rrd_ring *rrdr = &adapter->rrd_ring; struct atl1_rfd_ring *rfdr = &adapter->rfd_ring; struct atl1_tpd_ring tpd_old, tpd_new; struct atl1_rfd_ring rfd_old, rfd_new; struct atl1_rrd_ring rrd_old, rrd_new; struct atl1_ring_header rhdr_old, rhdr_new; struct atl1_smb smb; struct atl1_cmb cmb; int err; tpd_old = adapter->tpd_ring; rfd_old = adapter->rfd_ring; rrd_old = adapter->rrd_ring; rhdr_old = adapter->ring_header; if (netif_running(adapter->netdev)) atl1_down(adapter); rfdr->count = (u16) max(ring->rx_pending, (u32) ATL1_MIN_RFD); rfdr->count = rfdr->count > ATL1_MAX_RFD ? ATL1_MAX_RFD : rfdr->count; rfdr->count = (rfdr->count + 3) & ~3; rrdr->count = rfdr->count; tpdr->count = (u16) max(ring->tx_pending, (u32) ATL1_MIN_TPD); tpdr->count = tpdr->count > ATL1_MAX_TPD ? ATL1_MAX_TPD : tpdr->count; tpdr->count = (tpdr->count + 3) & ~3; if (netif_running(adapter->netdev)) { /* try to get new resources before deleting old */ err = atl1_setup_ring_resources(adapter); if (err) goto err_setup_ring; /* * save the new, restore the old in order to free it, * then restore the new back again */ rfd_new = adapter->rfd_ring; rrd_new = adapter->rrd_ring; tpd_new = adapter->tpd_ring; rhdr_new = adapter->ring_header; adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; /* * Save SMB and CMB, since atl1_free_ring_resources * will clear them. */ smb = adapter->smb; cmb = adapter->cmb; atl1_free_ring_resources(adapter); adapter->rfd_ring = rfd_new; adapter->rrd_ring = rrd_new; adapter->tpd_ring = tpd_new; adapter->ring_header = rhdr_new; adapter->smb = smb; adapter->cmb = cmb; err = atl1_up(adapter); if (err) return err; } return 0; err_setup_ring: adapter->rfd_ring = rfd_old; adapter->rrd_ring = rrd_old; adapter->tpd_ring = tpd_old; adapter->ring_header = rhdr_old; atl1_up(adapter); return err; } static void atl1_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; } static int atl1_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *epause) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { epause->autoneg = AUTONEG_ENABLE; } else { epause->autoneg = AUTONEG_DISABLE; } epause->rx_pause = 1; epause->tx_pause = 1; return 0; } static void atl1_get_strings(struct net_device *netdev, u32 stringset, u8 *data) { u8 *p = data; int i; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ARRAY_SIZE(atl1_gstrings_stats); i++) { memcpy(p, atl1_gstrings_stats[i].stat_string, ETH_GSTRING_LEN); p += ETH_GSTRING_LEN; } break; } } static int atl1_nway_reset(struct net_device *netdev) { struct atl1_adapter *adapter = netdev_priv(netdev); struct atl1_hw *hw = &adapter->hw; if (netif_running(netdev)) { u16 phy_data; atl1_down(adapter); if (hw->media_type == MEDIA_TYPE_AUTO_SENSOR || hw->media_type == MEDIA_TYPE_1000M_FULL) { phy_data = MII_CR_RESET | MII_CR_AUTO_NEG_EN; } else { switch (hw->media_type) { case MEDIA_TYPE_100M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_100M_HALF: phy_data = MII_CR_SPEED_100 | MII_CR_RESET; break; case MEDIA_TYPE_10M_FULL: phy_data = MII_CR_FULL_DUPLEX | MII_CR_SPEED_10 | MII_CR_RESET; break; default: /* MEDIA_TYPE_10M_HALF */ phy_data = MII_CR_SPEED_10 | MII_CR_RESET; } } atl1_write_phy_reg(hw, MII_BMCR, phy_data); atl1_up(adapter); } return 0; } static const struct ethtool_ops atl1_ethtool_ops = { .get_settings = atl1_get_settings, .set_settings = atl1_set_settings, .get_drvinfo = atl1_get_drvinfo, .get_wol = atl1_get_wol, .set_wol = atl1_set_wol, .get_msglevel = atl1_get_msglevel, .set_msglevel = atl1_set_msglevel, .get_regs_len = atl1_get_regs_len, .get_regs = atl1_get_regs, .get_ringparam = atl1_get_ringparam, .set_ringparam = atl1_set_ringparam, .get_pauseparam = atl1_get_pauseparam, .set_pauseparam = atl1_set_pauseparam, .get_link = ethtool_op_get_link, .get_strings = atl1_get_strings, .nway_reset = atl1_nway_reset, .get_ethtool_stats = atl1_get_ethtool_stats, .get_sset_count = atl1_get_sset_count, };
gpl-2.0
InfinitiveOS-Devices/android_kernel_xiaomi_ferrari
drivers/net/ethernet/amd/7990.c
2320
21625
/* * 7990.c -- LANCE ethernet IC generic routines. * This is an attempt to separate out the bits of various ethernet * drivers that are common because they all use the AMD 7990 LANCE * (Local Area Network Controller for Ethernet) chip. * * Copyright (C) 05/1998 Peter Maydell <pmaydell@chiark.greenend.org.uk> * * Most of this stuff was obtained by looking at other LANCE drivers, * in particular a2065.[ch]. The AMD C-LANCE datasheet was also helpful. * NB: this was made easy by the fact that Jes Sorensen had cleaned up * most of a2025 and sunlance with the aim of merging them, so the * common code was pretty obvious. */ #include <linux/crc32.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/route.h> #include <linux/string.h> #include <linux/skbuff.h> #include <asm/irq.h> /* Used for the temporal inet entries and routing */ #include <linux/socket.h> #include <linux/bitops.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/pgtable.h> #ifdef CONFIG_HP300 #include <asm/blinken.h> #endif #include "7990.h" #define WRITERAP(lp,x) out_be16(lp->base + LANCE_RAP, (x)) #define WRITERDP(lp,x) out_be16(lp->base + LANCE_RDP, (x)) #define READRDP(lp) in_be16(lp->base + LANCE_RDP) #if defined(CONFIG_HPLANCE) || defined(CONFIG_HPLANCE_MODULE) #include "hplance.h" #undef WRITERAP #undef WRITERDP #undef READRDP #if defined(CONFIG_MVME147_NET) || defined(CONFIG_MVME147_NET_MODULE) /* Lossage Factor Nine, Mr Sulu. */ #define WRITERAP(lp,x) (lp->writerap(lp,x)) #define WRITERDP(lp,x) (lp->writerdp(lp,x)) #define READRDP(lp) (lp->readrdp(lp)) #else /* These inlines can be used if only CONFIG_HPLANCE is defined */ static inline void WRITERAP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RAP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline void WRITERDP(struct lance_private *lp, __u16 value) { do { out_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP, value); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); } static inline __u16 READRDP(struct lance_private *lp) { __u16 value; do { value = in_be16(lp->base + HPLANCE_REGOFF + LANCE_RDP); } while ((in_8(lp->base + HPLANCE_STATUS) & LE_ACK) == 0); return value; } #endif #endif /* CONFIG_HPLANCE || CONFIG_HPLANCE_MODULE */ /* debugging output macros, various flavours */ /* #define TEST_HITS */ #ifdef UNDEF #define PRINT_RINGS() \ do { \ int t; \ for (t=0; t < RX_RING_SIZE; t++) { \ printk("R%d: @(%02X %04X) len %04X, mblen %04X, bits %02X\n",\ t, ib->brx_ring[t].rmd1_hadr, ib->brx_ring[t].rmd0,\ ib->brx_ring[t].length,\ ib->brx_ring[t].mblength, ib->brx_ring[t].rmd1_bits);\ }\ for (t=0; t < TX_RING_SIZE; t++) { \ printk("T%d: @(%02X %04X) len %04X, misc %04X, bits %02X\n",\ t, ib->btx_ring[t].tmd1_hadr, ib->btx_ring[t].tmd0,\ ib->btx_ring[t].length,\ ib->btx_ring[t].misc, ib->btx_ring[t].tmd1_bits);\ }\ } while (0) #else #define PRINT_RINGS() #endif /* Load the CSR registers. The LANCE has to be STOPped when we do this! */ static void load_csrs (struct lance_private *lp) { volatile struct lance_init_block *aib = lp->lance_init_block; int leptr; leptr = LANCE_ADDR (aib); WRITERAP(lp, LE_CSR1); /* load address of init block */ WRITERDP(lp, leptr & 0xFFFF); WRITERAP(lp, LE_CSR2); WRITERDP(lp, leptr >> 16); WRITERAP(lp, LE_CSR3); WRITERDP(lp, lp->busmaster_regval); /* set byteswap/ALEctrl/byte ctrl */ /* Point back to csr0 */ WRITERAP(lp, LE_CSR0); } /* #define to 0 or 1 appropriately */ #define DEBUG_IRING 0 /* Set up the Lance Rx and Tx rings and the init block */ static void lance_init_ring (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_init_block *aib; /* for LANCE_ADDR computations */ int leptr; int i; aib = lp->lance_init_block; lp->rx_new = lp->tx_new = 0; lp->rx_old = lp->tx_old = 0; ib->mode = LE_MO_PROM; /* normal, enable Tx & Rx */ /* Copy the ethernet address to the lance init block * Notice that we do a byteswap if we're big endian. * [I think this is the right criterion; at least, sunlance, * a2065 and atarilance do the byteswap and lance.c (PC) doesn't. * However, the datasheet says that the BSWAP bit doesn't affect * the init block, so surely it should be low byte first for * everybody? Um.] * We could define the ib->physaddr as three 16bit values and * use (addr[1] << 8) | addr[0] & co, but this is more efficient. */ #ifdef __BIG_ENDIAN ib->phys_addr [0] = dev->dev_addr [1]; ib->phys_addr [1] = dev->dev_addr [0]; ib->phys_addr [2] = dev->dev_addr [3]; ib->phys_addr [3] = dev->dev_addr [2]; ib->phys_addr [4] = dev->dev_addr [5]; ib->phys_addr [5] = dev->dev_addr [4]; #else for (i=0; i<6; i++) ib->phys_addr[i] = dev->dev_addr[i]; #endif if (DEBUG_IRING) printk ("TX rings:\n"); lp->tx_full = 0; /* Setup the Tx ring entries */ for (i = 0; i < (1<<lp->lance_log_tx_bufs); i++) { leptr = LANCE_ADDR(&aib->tx_buf[i][0]); ib->btx_ring [i].tmd0 = leptr; ib->btx_ring [i].tmd1_hadr = leptr >> 16; ib->btx_ring [i].tmd1_bits = 0; ib->btx_ring [i].length = 0xf000; /* The ones required by tmd2 */ ib->btx_ring [i].misc = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the Rx ring entries */ if (DEBUG_IRING) printk ("RX rings:\n"); for (i = 0; i < (1<<lp->lance_log_rx_bufs); i++) { leptr = LANCE_ADDR(&aib->rx_buf[i][0]); ib->brx_ring [i].rmd0 = leptr; ib->brx_ring [i].rmd1_hadr = leptr >> 16; ib->brx_ring [i].rmd1_bits = LE_R1_OWN; /* 0xf000 == bits that must be one (reserved, presumably) */ ib->brx_ring [i].length = -RX_BUFF_SIZE | 0xf000; ib->brx_ring [i].mblength = 0; if (DEBUG_IRING) printk ("%d: 0x%8.8x\n", i, leptr); } /* Setup the initialization block */ /* Setup rx descriptor pointer */ leptr = LANCE_ADDR(&aib->brx_ring); ib->rx_len = (lp->lance_log_rx_bufs << 13) | (leptr >> 16); ib->rx_ptr = leptr; if (DEBUG_IRING) printk ("RX ptr: %8.8x\n", leptr); /* Setup tx descriptor pointer */ leptr = LANCE_ADDR(&aib->btx_ring); ib->tx_len = (lp->lance_log_tx_bufs << 13) | (leptr >> 16); ib->tx_ptr = leptr; if (DEBUG_IRING) printk ("TX ptr: %8.8x\n", leptr); /* Clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; PRINT_RINGS(); } /* LANCE must be STOPped before we do this, too... */ static int init_restart_lance (struct lance_private *lp) { int i; WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_INIT); /* Need a hook here for sunlance ledma stuff */ /* Wait for the lance to complete initialization */ for (i = 0; (i < 100) && !(READRDP(lp) & (LE_C0_ERR | LE_C0_IDON)); i++) barrier(); if ((i == 100) || (READRDP(lp) & LE_C0_ERR)) { printk ("LANCE unopened after %d ticks, csr0=%4.4x.\n", i, READRDP(lp)); return -1; } /* Clear IDON by writing a "1", enable interrupts and start lance */ WRITERDP(lp, LE_C0_IDON); WRITERDP(lp, LE_C0_INEA | LE_C0_STRT); return 0; } static int lance_reset (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int status; /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); load_csrs (lp); lance_init_ring (dev); dev->trans_start = jiffies; /* prevent tx timeout */ status = init_restart_lance (lp); #ifdef DEBUG_DRIVER printk ("Lance restart=%d\n", status); #endif return status; } static int lance_rx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_rx_desc *rd; unsigned char bits; #ifdef TEST_HITS int i; #endif #ifdef TEST_HITS printk ("["); for (i = 0; i < RX_RING_SIZE; i++) { if (i == lp->rx_new) printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "_" : "X"); else printk ("%s", ib->brx_ring [i].rmd1_bits & LE_R1_OWN ? "." : "1"); } printk ("]"); #endif #ifdef CONFIG_HP300 blinken_leds(0x40, 0); #endif WRITERDP(lp, LE_C0_RINT | LE_C0_INEA); /* ack Rx int, reenable ints */ for (rd = &ib->brx_ring [lp->rx_new]; /* For each Rx ring we own... */ !((bits = rd->rmd1_bits) & LE_R1_OWN); rd = &ib->brx_ring [lp->rx_new]) { /* We got an incomplete frame? */ if ((bits & LE_R1_POK) != LE_R1_POK) { dev->stats.rx_over_errors++; dev->stats.rx_errors++; continue; } else if (bits & LE_R1_ERR) { /* Count only the end frame as a rx error, * not the beginning */ if (bits & LE_R1_BUF) dev->stats.rx_fifo_errors++; if (bits & LE_R1_CRC) dev->stats.rx_crc_errors++; if (bits & LE_R1_OFL) dev->stats.rx_over_errors++; if (bits & LE_R1_FRA) dev->stats.rx_frame_errors++; if (bits & LE_R1_EOP) dev->stats.rx_errors++; } else { int len = (rd->mblength & 0xfff) - 4; struct sk_buff *skb = netdev_alloc_skb(dev, len + 2); if (!skb) { dev->stats.rx_dropped++; rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; return 0; } skb_reserve (skb, 2); /* 16 byte align */ skb_put (skb, len); /* make room */ skb_copy_to_linear_data(skb, (unsigned char *)&(ib->rx_buf [lp->rx_new][0]), len); skb->protocol = eth_type_trans (skb, dev); netif_rx (skb); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } /* Return the packet to the pool */ rd->mblength = 0; rd->rmd1_bits = LE_R1_OWN; lp->rx_new = (lp->rx_new + 1) & lp->rx_ring_mod_mask; } return 0; } static int lance_tx (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile struct lance_tx_desc *td; int i, j; int status; #ifdef CONFIG_HP300 blinken_leds(0x80, 0); #endif /* csr0 is 2f3 */ WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); /* csr0 is 73 */ j = lp->tx_old; for (i = j; i != lp->tx_new; i = j) { td = &ib->btx_ring [i]; /* If we hit a packet not owned by us, stop */ if (td->tmd1_bits & LE_T1_OWN) break; if (td->tmd1_bits & LE_T1_ERR) { status = td->misc; dev->stats.tx_errors++; if (status & LE_T3_RTY) dev->stats.tx_aborted_errors++; if (status & LE_T3_LCOL) dev->stats.tx_window_errors++; if (status & LE_T3_CLOS) { dev->stats.tx_carrier_errors++; if (lp->auto_select) { lp->tpe = 1 - lp->tpe; printk("%s: Carrier Lost, trying %s\n", dev->name, lp->tpe?"TPE":"AUI"); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } /* buffer errors and underflows turn off the transmitter */ /* Restart the adapter */ if (status & (LE_T3_BUF|LE_T3_UFL)) { dev->stats.tx_fifo_errors++; printk ("%s: Tx: ERR_BUF|ERR_UFL, restarting\n", dev->name); /* Stop the lance */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); load_csrs (lp); init_restart_lance (lp); return 0; } } else if ((td->tmd1_bits & LE_T1_POK) == LE_T1_POK) { /* * So we don't count the packet more than once. */ td->tmd1_bits &= ~(LE_T1_POK); /* One collision before packet was sent. */ if (td->tmd1_bits & LE_T1_EONE) dev->stats.collisions++; /* More than one collision, be optimistic. */ if (td->tmd1_bits & LE_T1_EMORE) dev->stats.collisions += 2; dev->stats.tx_packets++; } j = (j + 1) & lp->tx_ring_mod_mask; } lp->tx_old = j; WRITERDP(lp, LE_C0_TINT | LE_C0_INEA); return 0; } static irqreturn_t lance_interrupt (int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct lance_private *lp = netdev_priv(dev); int csr0; spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); /* LANCE Controller Status */ csr0 = READRDP(lp); PRINT_RINGS(); if (!(csr0 & LE_C0_INTR)) { /* Check if any interrupt has */ spin_unlock (&lp->devlock); return IRQ_NONE; /* been generated by the Lance. */ } /* Acknowledge all the interrupt sources ASAP */ WRITERDP(lp, csr0 & ~(LE_C0_INEA|LE_C0_TDMD|LE_C0_STOP|LE_C0_STRT|LE_C0_INIT)); if ((csr0 & LE_C0_ERR)) { /* Clear the error condition */ WRITERDP(lp, LE_C0_BABL|LE_C0_ERR|LE_C0_MISS|LE_C0_INEA); } if (csr0 & LE_C0_RINT) lance_rx (dev); if (csr0 & LE_C0_TINT) lance_tx (dev); /* Log misc errors. */ if (csr0 & LE_C0_BABL) dev->stats.tx_errors++; /* Tx babble. */ if (csr0 & LE_C0_MISS) dev->stats.rx_errors++; /* Missed a Rx frame. */ if (csr0 & LE_C0_MERR) { printk("%s: Bus master arbitration failure, status %4.4x.\n", dev->name, csr0); /* Restart the chip. */ WRITERDP(lp, LE_C0_STRT); } if (lp->tx_full && netif_queue_stopped(dev) && (TX_BUFFS_AVAIL >= 0)) { lp->tx_full = 0; netif_wake_queue (dev); } WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_BABL|LE_C0_CERR|LE_C0_MISS|LE_C0_MERR|LE_C0_IDON|LE_C0_INEA); spin_unlock (&lp->devlock); return IRQ_HANDLED; } int lance_open (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); int res; /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */ if (request_irq(lp->irq, lance_interrupt, IRQF_SHARED, lp->name, dev)) return -EAGAIN; res = lance_reset(dev); spin_lock_init(&lp->devlock); netif_start_queue (dev); return res; } EXPORT_SYMBOL_GPL(lance_open); int lance_close (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); netif_stop_queue (dev); /* Stop the LANCE */ WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); free_irq(lp->irq, dev); return 0; } EXPORT_SYMBOL_GPL(lance_close); void lance_tx_timeout(struct net_device *dev) { printk("lance_tx_timeout\n"); lance_reset(dev); dev->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue (dev); } EXPORT_SYMBOL_GPL(lance_tx_timeout); int lance_start_xmit (struct sk_buff *skb, struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int entry, skblen, len; static int outs; unsigned long flags; if (!TX_BUFFS_AVAIL) return NETDEV_TX_LOCKED; netif_stop_queue (dev); skblen = skb->len; #ifdef DEBUG_DRIVER /* dump the packet */ { int i; for (i = 0; i < 64; i++) { if ((i % 16) == 0) printk ("\n"); printk ("%2.2x ", skb->data [i]); } } #endif len = (skblen <= ETH_ZLEN) ? ETH_ZLEN : skblen; entry = lp->tx_new & lp->tx_ring_mod_mask; ib->btx_ring [entry].length = (-len) | 0xf000; ib->btx_ring [entry].misc = 0; if (skb->len < ETH_ZLEN) memset((void *)&ib->tx_buf[entry][0], 0, ETH_ZLEN); skb_copy_from_linear_data(skb, (void *)&ib->tx_buf[entry][0], skblen); /* Now, give the packet to the lance */ ib->btx_ring [entry].tmd1_bits = (LE_T1_POK|LE_T1_OWN); lp->tx_new = (lp->tx_new+1) & lp->tx_ring_mod_mask; outs++; /* Kick the lance: transmit now */ WRITERDP(lp, LE_C0_INEA | LE_C0_TDMD); dev_kfree_skb (skb); spin_lock_irqsave (&lp->devlock, flags); if (TX_BUFFS_AVAIL) netif_start_queue (dev); else lp->tx_full = 1; spin_unlock_irqrestore (&lp->devlock, flags); return NETDEV_TX_OK; } EXPORT_SYMBOL_GPL(lance_start_xmit); /* taken from the depca driver via a2065.c */ static void lance_load_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; volatile u16 *mcast_table = (u16 *)&ib->filter; struct netdev_hw_addr *ha; u32 crc; /* set all multicast bits */ if (dev->flags & IFF_ALLMULTI){ ib->filter [0] = 0xffffffff; ib->filter [1] = 0xffffffff; return; } /* clear the multicast filter */ ib->filter [0] = 0; ib->filter [1] = 0; /* Add addresses */ netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc = crc >> 26; mcast_table [crc >> 4] |= 1 << (crc & 0xf); } } void lance_set_multicast (struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); volatile struct lance_init_block *ib = lp->init_block; int stopped; stopped = netif_queue_stopped(dev); if (!stopped) netif_stop_queue (dev); while (lp->tx_old != lp->tx_new) schedule(); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STOP); lance_init_ring (dev); if (dev->flags & IFF_PROMISC) { ib->mode |= LE_MO_PROM; } else { ib->mode &= ~LE_MO_PROM; lance_load_multicast (dev); } load_csrs (lp); init_restart_lance (lp); if (!stopped) netif_start_queue (dev); } EXPORT_SYMBOL_GPL(lance_set_multicast); #ifdef CONFIG_NET_POLL_CONTROLLER void lance_poll(struct net_device *dev) { struct lance_private *lp = netdev_priv(dev); spin_lock (&lp->devlock); WRITERAP(lp, LE_CSR0); WRITERDP(lp, LE_C0_STRT); spin_unlock (&lp->devlock); lance_interrupt(dev->irq, dev); } #endif MODULE_LICENSE("GPL");
gpl-2.0
jjhiza/Monarch
drivers/input/tablet/wacom_wac.c
2832
64274
/* * drivers/input/tablet/wacom_wac.c * * USB Wacom tablet support - Wacom specific code * */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include "wacom_wac.h" #include "wacom.h" #include <linux/input/mt.h> #include <linux/hid.h> /* resolution for penabled devices */ #define WACOM_PL_RES 20 #define WACOM_PENPRTN_RES 40 #define WACOM_VOLITO_RES 50 #define WACOM_GRAPHIRE_RES 80 #define WACOM_INTUOS_RES 100 #define WACOM_INTUOS3_RES 200 static int wacom_penpartner_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->input; switch (data[0]) { case 1: if (data[5] & 0x80) { wacom->tool[0] = (data[5] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; wacom->id[0] = (data[5] & 0x20) ? ERASER_DEVICE_ID : STYLUS_DEVICE_ID; input_report_key(input, wacom->tool[0], 1); input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3])); input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127); input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -127)); input_report_key(input, BTN_STYLUS, (data[5] & 0x40)); } else { input_report_key(input, wacom->tool[0], 0); input_report_abs(input, ABS_MISC, 0); /* report tool id */ input_report_abs(input, ABS_PRESSURE, -1); input_report_key(input, BTN_TOUCH, 0); } break; case 2: input_report_key(input, BTN_TOOL_PEN, 1); input_report_abs(input, ABS_MISC, STYLUS_DEVICE_ID); /* report tool id */ input_report_abs(input, ABS_X, get_unaligned_le16(&data[1])); input_report_abs(input, ABS_Y, get_unaligned_le16(&data[3])); input_report_abs(input, ABS_PRESSURE, (signed char)data[6] + 127); input_report_key(input, BTN_TOUCH, ((signed char)data[6] > -80) && !(data[5] & 0x20)); input_report_key(input, BTN_STYLUS, (data[5] & 0x40)); break; default: printk(KERN_INFO "wacom_penpartner_irq: received unknown report #%d\n", data[0]); return 0; } return 1; } static int wacom_pl_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; int prox, pressure; if (data[0] != WACOM_REPORT_PENABLED) { dbg("wacom_pl_irq: received unknown report #%d", data[0]); return 0; } prox = data[1] & 0x40; if (prox) { wacom->id[0] = ERASER_DEVICE_ID; pressure = (signed char)((data[7] << 1) | ((data[4] >> 2) & 1)); if (features->pressure_max > 255) pressure = (pressure << 1) | ((data[4] >> 6) & 1); pressure += (features->pressure_max + 1) / 2; /* * if going from out of proximity into proximity select between the eraser * and the pen based on the state of the stylus2 button, choose eraser if * pressed else choose pen. if not a proximity change from out to in, send * an out of proximity for previous tool then a in for new tool. */ if (!wacom->tool[0]) { /* Eraser bit set for DTF */ if (data[1] & 0x10) wacom->tool[1] = BTN_TOOL_RUBBER; else /* Going into proximity select tool */ wacom->tool[1] = (data[4] & 0x20) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; } else { /* was entered with stylus2 pressed */ if (wacom->tool[1] == BTN_TOOL_RUBBER && !(data[4] & 0x20)) { /* report out proximity for previous tool */ input_report_key(input, wacom->tool[1], 0); input_sync(input); wacom->tool[1] = BTN_TOOL_PEN; return 0; } } if (wacom->tool[1] != BTN_TOOL_RUBBER) { /* Unknown tool selected default to pen tool */ wacom->tool[1] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; } input_report_key(input, wacom->tool[1], prox); /* report in proximity for tool */ input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_abs(input, ABS_X, data[3] | (data[2] << 7) | ((data[1] & 0x03) << 14)); input_report_abs(input, ABS_Y, data[6] | (data[5] << 7) | ((data[4] & 0x03) << 14)); input_report_abs(input, ABS_PRESSURE, pressure); input_report_key(input, BTN_TOUCH, data[4] & 0x08); input_report_key(input, BTN_STYLUS, data[4] & 0x10); /* Only allow the stylus2 button to be reported for the pen tool. */ input_report_key(input, BTN_STYLUS2, (wacom->tool[1] == BTN_TOOL_PEN) && (data[4] & 0x20)); } else { /* report proximity-out of a (valid) tool */ if (wacom->tool[1] != BTN_TOOL_RUBBER) { /* Unknown tool selected default to pen tool */ wacom->tool[1] = BTN_TOOL_PEN; } input_report_key(input, wacom->tool[1], prox); } wacom->tool[0] = prox; /* Save proximity state */ return 1; } static int wacom_ptu_irq(struct wacom_wac *wacom) { unsigned char *data = wacom->data; struct input_dev *input = wacom->input; if (data[0] != WACOM_REPORT_PENABLED) { printk(KERN_INFO "wacom_ptu_irq: received unknown report #%d\n", data[0]); return 0; } if (data[1] & 0x04) { input_report_key(input, BTN_TOOL_RUBBER, data[1] & 0x20); input_report_key(input, BTN_TOUCH, data[1] & 0x08); wacom->id[0] = ERASER_DEVICE_ID; } else { input_report_key(input, BTN_TOOL_PEN, data[1] & 0x20); input_report_key(input, BTN_TOUCH, data[1] & 0x01); wacom->id[0] = STYLUS_DEVICE_ID; } input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); input_report_abs(input, ABS_PRESSURE, le16_to_cpup((__le16 *)&data[6])); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); return 1; } static int wacom_dtu_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; char *data = wacom->data; struct input_dev *input = wacom->input; int prox = data[1] & 0x20, pressure; dbg("wacom_dtu_irq: received report #%d", data[0]); if (prox) { /* Going into proximity select tool */ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; if (wacom->tool[0] == BTN_TOOL_PEN) wacom->id[0] = STYLUS_DEVICE_ID; else wacom->id[0] = ERASER_DEVICE_ID; } input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); pressure = ((data[7] & 0x01) << 8) | data[6]; if (pressure < 0) pressure = features->pressure_max + pressure + 1; input_report_abs(input, ABS_PRESSURE, pressure); input_report_key(input, BTN_TOUCH, data[1] & 0x05); if (!prox) /* out-prox */ wacom->id[0] = 0; input_report_key(input, wacom->tool[0], prox); input_report_abs(input, ABS_MISC, wacom->id[0]); return 1; } static int wacom_graphire_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; int prox; int rw = 0; int retval = 0; if (data[0] != WACOM_REPORT_PENABLED) { dbg("wacom_graphire_irq: received unknown report #%d", data[0]); goto exit; } prox = data[1] & 0x80; if (prox || wacom->id[0]) { if (prox) { switch ((data[1] >> 5) & 3) { case 0: /* Pen */ wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; break; case 1: /* Rubber */ wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; break; case 2: /* Mouse with wheel */ input_report_key(input, BTN_MIDDLE, data[1] & 0x04); /* fall through */ case 3: /* Mouse without wheel */ wacom->tool[0] = BTN_TOOL_MOUSE; wacom->id[0] = CURSOR_DEVICE_ID; break; } } input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); if (wacom->tool[0] != BTN_TOOL_MOUSE) { input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8)); input_report_key(input, BTN_TOUCH, data[1] & 0x01); input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x04); } else { input_report_key(input, BTN_LEFT, data[1] & 0x01); input_report_key(input, BTN_RIGHT, data[1] & 0x02); if (features->type == WACOM_G4 || features->type == WACOM_MO) { input_report_abs(input, ABS_DISTANCE, data[6] & 0x3f); rw = (data[7] & 0x04) - (data[7] & 0x03); } else { input_report_abs(input, ABS_DISTANCE, data[7] & 0x3f); rw = -(signed char)data[6]; } input_report_rel(input, REL_WHEEL, rw); } if (!prox) wacom->id[0] = 0; input_report_abs(input, ABS_MISC, wacom->id[0]); /* report tool id */ input_report_key(input, wacom->tool[0], prox); input_event(input, EV_MSC, MSC_SERIAL, 1); input_sync(input); /* sync last event */ } /* send pad data */ switch (features->type) { case WACOM_G4: prox = data[7] & 0xf8; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; input_report_key(input, BTN_BACK, (data[7] & 0x40)); input_report_key(input, BTN_FORWARD, (data[7] & 0x80)); rw = ((data[7] & 0x18) >> 3) - ((data[7] & 0x20) >> 3); input_report_rel(input, REL_WHEEL, rw); if (!prox) wacom->id[1] = 0; input_report_abs(input, ABS_MISC, wacom->id[1]); input_event(input, EV_MSC, MSC_SERIAL, 0xf0); retval = 1; } break; case WACOM_MO: prox = (data[7] & 0xf8) || data[8]; if (prox || wacom->id[1]) { wacom->id[1] = PAD_DEVICE_ID; input_report_key(input, BTN_BACK, (data[7] & 0x08)); input_report_key(input, BTN_LEFT, (data[7] & 0x20)); input_report_key(input, BTN_FORWARD, (data[7] & 0x10)); input_report_key(input, BTN_RIGHT, (data[7] & 0x40)); input_report_abs(input, ABS_WHEEL, (data[8] & 0x7f)); if (!prox) wacom->id[1] = 0; input_report_abs(input, ABS_MISC, wacom->id[1]); input_event(input, EV_MSC, MSC_SERIAL, 0xf0); retval = 1; } break; } exit: return retval; } static int wacom_intuos_inout(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; int idx = 0; /* tool number */ if (features->type == INTUOS) idx = data[1] & 0x01; /* Enter report */ if ((data[1] & 0xfc) == 0xc0) { /* serial number of the tool */ wacom->serial[idx] = ((data[3] & 0x0f) << 28) + (data[4] << 20) + (data[5] << 12) + (data[6] << 4) + (data[7] >> 4); wacom->id[idx] = (data[2] << 4) | (data[3] >> 4) | ((data[7] & 0x0f) << 20) | ((data[8] & 0xf0) << 12); switch (wacom->id[idx] & 0xfffff) { case 0x812: /* Inking pen */ case 0x801: /* Intuos3 Inking pen */ case 0x20802: /* Intuos4 Inking Pen */ case 0x012: wacom->tool[idx] = BTN_TOOL_PENCIL; break; case 0x822: /* Pen */ case 0x842: case 0x852: case 0x823: /* Intuos3 Grip Pen */ case 0x813: /* Intuos3 Classic Pen */ case 0x885: /* Intuos3 Marker Pen */ case 0x802: /* Intuos4 General Pen */ case 0x804: /* Intuos4 Marker Pen */ case 0x40802: /* Intuos4 Classic Pen */ case 0x022: wacom->tool[idx] = BTN_TOOL_PEN; break; case 0x832: /* Stroke pen */ case 0x032: wacom->tool[idx] = BTN_TOOL_BRUSH; break; case 0x007: /* Mouse 4D and 2D */ case 0x09c: case 0x094: case 0x017: /* Intuos3 2D Mouse */ case 0x806: /* Intuos4 Mouse */ wacom->tool[idx] = BTN_TOOL_MOUSE; break; case 0x096: /* Lens cursor */ case 0x097: /* Intuos3 Lens cursor */ case 0x006: /* Intuos4 Lens cursor */ wacom->tool[idx] = BTN_TOOL_LENS; break; case 0x82a: /* Eraser */ case 0x85a: case 0x91a: case 0xd1a: case 0x0fa: case 0x82b: /* Intuos3 Grip Pen Eraser */ case 0x81b: /* Intuos3 Classic Pen Eraser */ case 0x91b: /* Intuos3 Airbrush Eraser */ case 0x80c: /* Intuos4 Marker Pen Eraser */ case 0x80a: /* Intuos4 General Pen Eraser */ case 0x4080a: /* Intuos4 Classic Pen Eraser */ case 0x90a: /* Intuos4 Airbrush Eraser */ wacom->tool[idx] = BTN_TOOL_RUBBER; break; case 0xd12: case 0x912: case 0x112: case 0x913: /* Intuos3 Airbrush */ case 0x902: /* Intuos4 Airbrush */ wacom->tool[idx] = BTN_TOOL_AIRBRUSH; break; default: /* Unknown tool */ wacom->tool[idx] = BTN_TOOL_PEN; break; } return 1; } /* older I4 styli don't work with new Cintiqs */ if (!((wacom->id[idx] >> 20) & 0x01) && (features->type == WACOM_21UX2)) return 1; /* Exit report */ if ((data[1] & 0xfe) == 0x80) { /* * Reset all states otherwise we lose the initial states * when in-prox next time */ input_report_abs(input, ABS_X, 0); input_report_abs(input, ABS_Y, 0); input_report_abs(input, ABS_DISTANCE, 0); input_report_abs(input, ABS_TILT_X, 0); input_report_abs(input, ABS_TILT_Y, 0); if (wacom->tool[idx] >= BTN_TOOL_MOUSE) { input_report_key(input, BTN_LEFT, 0); input_report_key(input, BTN_MIDDLE, 0); input_report_key(input, BTN_RIGHT, 0); input_report_key(input, BTN_SIDE, 0); input_report_key(input, BTN_EXTRA, 0); input_report_abs(input, ABS_THROTTLE, 0); input_report_abs(input, ABS_RZ, 0); } else { input_report_abs(input, ABS_PRESSURE, 0); input_report_key(input, BTN_STYLUS, 0); input_report_key(input, BTN_STYLUS2, 0); input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_WHEEL, 0); if (features->type >= INTUOS3S) input_report_abs(input, ABS_Z, 0); } input_report_key(input, wacom->tool[idx], 0); input_report_abs(input, ABS_MISC, 0); /* reset tool id */ input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); wacom->id[idx] = 0; return 2; } return 0; } static void wacom_intuos_general(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; unsigned int t; /* general pen packet */ if ((data[1] & 0xb8) == 0xa0) { t = (data[6] << 2) | ((data[7] >> 6) & 3); if ((features->type >= INTUOS4S && features->type <= INTUOS4L) || features->type == WACOM_21UX2 || features->type == WACOM_24HD) { t = (t << 1) | (data[1] & 1); } input_report_abs(input, ABS_PRESSURE, t); input_report_abs(input, ABS_TILT_X, ((data[7] << 1) & 0x7e) | (data[8] >> 7)); input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); input_report_key(input, BTN_STYLUS, data[1] & 2); input_report_key(input, BTN_STYLUS2, data[1] & 4); input_report_key(input, BTN_TOUCH, t > 10); } /* airbrush second packet */ if ((data[1] & 0xbc) == 0xb4) { input_report_abs(input, ABS_WHEEL, (data[6] << 2) | ((data[7] >> 6) & 3)); input_report_abs(input, ABS_TILT_X, ((data[7] << 1) & 0x7e) | (data[8] >> 7)); input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); } } static int wacom_intuos_irq(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; unsigned char *data = wacom->data; struct input_dev *input = wacom->input; unsigned int t; int idx = 0, result; if (data[0] != WACOM_REPORT_PENABLED && data[0] != WACOM_REPORT_INTUOSREAD && data[0] != WACOM_REPORT_INTUOSWRITE && data[0] != WACOM_REPORT_INTUOSPAD) { dbg("wacom_intuos_irq: received unknown report #%d", data[0]); return 0; } /* tool number */ if (features->type == INTUOS) idx = data[1] & 0x01; /* pad packets. Works as a second tool and is always in prox */ if (data[0] == WACOM_REPORT_INTUOSPAD) { if (features->type >= INTUOS4S && features->type <= INTUOS4L) { input_report_key(input, BTN_0, (data[2] & 0x01)); input_report_key(input, BTN_1, (data[3] & 0x01)); input_report_key(input, BTN_2, (data[3] & 0x02)); input_report_key(input, BTN_3, (data[3] & 0x04)); input_report_key(input, BTN_4, (data[3] & 0x08)); input_report_key(input, BTN_5, (data[3] & 0x10)); input_report_key(input, BTN_6, (data[3] & 0x20)); if (data[1] & 0x80) { input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f)); } else { /* Out of proximity, clear wheel value. */ input_report_abs(input, ABS_WHEEL, 0); } if (features->type != INTUOS4S) { input_report_key(input, BTN_7, (data[3] & 0x40)); input_report_key(input, BTN_8, (data[3] & 0x80)); } if (data[1] | (data[2] & 0x01) | data[3]) { input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else if (features->type == WACOM_24HD) { input_report_key(input, BTN_0, (data[6] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x02)); input_report_key(input, BTN_2, (data[6] & 0x04)); input_report_key(input, BTN_3, (data[6] & 0x08)); input_report_key(input, BTN_4, (data[6] & 0x10)); input_report_key(input, BTN_5, (data[6] & 0x20)); input_report_key(input, BTN_6, (data[6] & 0x40)); input_report_key(input, BTN_7, (data[6] & 0x80)); input_report_key(input, BTN_8, (data[8] & 0x01)); input_report_key(input, BTN_9, (data[8] & 0x02)); input_report_key(input, BTN_A, (data[8] & 0x04)); input_report_key(input, BTN_B, (data[8] & 0x08)); input_report_key(input, BTN_C, (data[8] & 0x10)); input_report_key(input, BTN_X, (data[8] & 0x20)); input_report_key(input, BTN_Y, (data[8] & 0x40)); input_report_key(input, BTN_Z, (data[8] & 0x80)); /* * Three "buttons" are available on the 24HD which are * physically implemented as a touchstrip. Each button * is approximately 3 bits wide with a 2 bit spacing. * The raw touchstrip bits are stored at: * ((data[3] & 0x1f) << 8) | data[4]) */ input_report_key(input, KEY_PROG1, data[4] & 0x07); input_report_key(input, KEY_PROG2, data[4] & 0xE0); input_report_key(input, KEY_PROG3, data[3] & 0x1C); if (data[1] & 0x80) { input_report_abs(input, ABS_WHEEL, (data[1] & 0x7f)); } else { /* Out of proximity, clear wheel value. */ input_report_abs(input, ABS_WHEEL, 0); } if (data[2] & 0x80) { input_report_abs(input, ABS_THROTTLE, (data[2] & 0x7f)); } else { /* Out of proximity, clear second wheel value. */ input_report_abs(input, ABS_THROTTLE, 0); } if (data[1] | data[2] | (data[3] & 0x1f) | data[4] | data[6] | data[8]) { input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } else { if (features->type == WACOM_21UX2) { input_report_key(input, BTN_0, (data[5] & 0x01)); input_report_key(input, BTN_1, (data[6] & 0x01)); input_report_key(input, BTN_2, (data[6] & 0x02)); input_report_key(input, BTN_3, (data[6] & 0x04)); input_report_key(input, BTN_4, (data[6] & 0x08)); input_report_key(input, BTN_5, (data[6] & 0x10)); input_report_key(input, BTN_6, (data[6] & 0x20)); input_report_key(input, BTN_7, (data[6] & 0x40)); input_report_key(input, BTN_8, (data[6] & 0x80)); input_report_key(input, BTN_9, (data[7] & 0x01)); input_report_key(input, BTN_A, (data[8] & 0x01)); input_report_key(input, BTN_B, (data[8] & 0x02)); input_report_key(input, BTN_C, (data[8] & 0x04)); input_report_key(input, BTN_X, (data[8] & 0x08)); input_report_key(input, BTN_Y, (data[8] & 0x10)); input_report_key(input, BTN_Z, (data[8] & 0x20)); input_report_key(input, BTN_BASE, (data[8] & 0x40)); input_report_key(input, BTN_BASE2, (data[8] & 0x80)); } else { input_report_key(input, BTN_0, (data[5] & 0x01)); input_report_key(input, BTN_1, (data[5] & 0x02)); input_report_key(input, BTN_2, (data[5] & 0x04)); input_report_key(input, BTN_3, (data[5] & 0x08)); input_report_key(input, BTN_4, (data[6] & 0x01)); input_report_key(input, BTN_5, (data[6] & 0x02)); input_report_key(input, BTN_6, (data[6] & 0x04)); input_report_key(input, BTN_7, (data[6] & 0x08)); input_report_key(input, BTN_8, (data[5] & 0x10)); input_report_key(input, BTN_9, (data[6] & 0x10)); } input_report_abs(input, ABS_RX, ((data[1] & 0x1f) << 8) | data[2]); input_report_abs(input, ABS_RY, ((data[3] & 0x1f) << 8) | data[4]); if ((data[5] & 0x1f) | data[6] | (data[1] & 0x1f) | data[2] | (data[3] & 0x1f) | data[4] | data[8] | (data[7] & 0x01)) { input_report_key(input, wacom->tool[1], 1); input_report_abs(input, ABS_MISC, PAD_DEVICE_ID); } else { input_report_key(input, wacom->tool[1], 0); input_report_abs(input, ABS_MISC, 0); } } input_event(input, EV_MSC, MSC_SERIAL, 0xffffffff); return 1; } /* process in/out prox events */ result = wacom_intuos_inout(wacom); if (result) return result - 1; /* don't proceed if we don't know the ID */ if (!wacom->id[idx]) return 0; /* Only large Intuos support Lense Cursor */ if (wacom->tool[idx] == BTN_TOOL_LENS && (features->type == INTUOS3 || features->type == INTUOS3S || features->type == INTUOS4 || features->type == INTUOS4S)) { return 0; } /* Cintiq doesn't send data when RDY bit isn't set */ if (features->type == CINTIQ && !(data[1] & 0x40)) return 0; if (features->type >= INTUOS3S) { input_report_abs(input, ABS_X, (data[2] << 9) | (data[3] << 1) | ((data[9] >> 1) & 1)); input_report_abs(input, ABS_Y, (data[4] << 9) | (data[5] << 1) | (data[9] & 1)); input_report_abs(input, ABS_DISTANCE, ((data[9] >> 2) & 0x3f)); } else { input_report_abs(input, ABS_X, be16_to_cpup((__be16 *)&data[2])); input_report_abs(input, ABS_Y, be16_to_cpup((__be16 *)&data[4])); input_report_abs(input, ABS_DISTANCE, ((data[9] >> 3) & 0x1f)); } /* process general packets */ wacom_intuos_general(wacom); /* 4D mouse, 2D mouse, marker pen rotation, tilt mouse, or Lens cursor packets */ if ((data[1] & 0xbc) == 0xa8 || (data[1] & 0xbe) == 0xb0 || (data[1] & 0xbc) == 0xac) { if (data[1] & 0x02) { /* Rotation packet */ if (features->type >= INTUOS3S) { /* I3 marker pen rotation */ t = (data[6] << 3) | ((data[7] >> 5) & 7); t = (data[7] & 0x20) ? ((t > 900) ? ((t-1) / 2 - 1350) : ((t-1) / 2 + 450)) : (450 - t / 2) ; input_report_abs(input, ABS_Z, t); } else { /* 4D mouse rotation packet */ t = (data[6] << 3) | ((data[7] >> 5) & 7); input_report_abs(input, ABS_RZ, (data[7] & 0x20) ? ((t - 1) / 2) : -t / 2); } } else if (!(data[1] & 0x10) && features->type < INTUOS3S) { /* 4D mouse packet */ input_report_key(input, BTN_LEFT, data[8] & 0x01); input_report_key(input, BTN_MIDDLE, data[8] & 0x02); input_report_key(input, BTN_RIGHT, data[8] & 0x04); input_report_key(input, BTN_SIDE, data[8] & 0x20); input_report_key(input, BTN_EXTRA, data[8] & 0x10); t = (data[6] << 2) | ((data[7] >> 6) & 3); input_report_abs(input, ABS_THROTTLE, (data[8] & 0x08) ? -t : t); } else if (wacom->tool[idx] == BTN_TOOL_MOUSE) { /* I4 mouse */ if (features->type >= INTUOS4S && features->type <= INTUOS4L) { input_report_key(input, BTN_LEFT, data[6] & 0x01); input_report_key(input, BTN_MIDDLE, data[6] & 0x02); input_report_key(input, BTN_RIGHT, data[6] & 0x04); input_report_rel(input, REL_WHEEL, ((data[7] & 0x80) >> 7) - ((data[7] & 0x40) >> 6)); input_report_key(input, BTN_SIDE, data[6] & 0x08); input_report_key(input, BTN_EXTRA, data[6] & 0x10); input_report_abs(input, ABS_TILT_X, ((data[7] << 1) & 0x7e) | (data[8] >> 7)); input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); } else { /* 2D mouse packet */ input_report_key(input, BTN_LEFT, data[8] & 0x04); input_report_key(input, BTN_MIDDLE, data[8] & 0x08); input_report_key(input, BTN_RIGHT, data[8] & 0x10); input_report_rel(input, REL_WHEEL, (data[8] & 0x01) - ((data[8] & 0x02) >> 1)); /* I3 2D mouse side buttons */ if (features->type >= INTUOS3S && features->type <= INTUOS3L) { input_report_key(input, BTN_SIDE, data[8] & 0x40); input_report_key(input, BTN_EXTRA, data[8] & 0x20); } } } else if ((features->type < INTUOS3S || features->type == INTUOS3L || features->type == INTUOS4L) && wacom->tool[idx] == BTN_TOOL_LENS) { /* Lens cursor packets */ input_report_key(input, BTN_LEFT, data[8] & 0x01); input_report_key(input, BTN_MIDDLE, data[8] & 0x02); input_report_key(input, BTN_RIGHT, data[8] & 0x04); input_report_key(input, BTN_SIDE, data[8] & 0x10); input_report_key(input, BTN_EXTRA, data[8] & 0x08); } } input_report_abs(input, ABS_MISC, wacom->id[idx]); /* report tool id */ input_report_key(input, wacom->tool[idx], 1); input_event(input, EV_MSC, MSC_SERIAL, wacom->serial[idx]); return 1; } static int wacom_tpc_mt_touch(struct wacom_wac *wacom) { struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int contact_with_no_pen_down_count = 0; int i; for (i = 0; i < 2; i++) { int p = data[1] & (1 << i); bool touch = p && !wacom->shared->stylus_in_proximity; input_mt_slot(input, i); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = le16_to_cpup((__le16 *)&data[i * 2 + 2]) & 0x7fff; int y = le16_to_cpup((__le16 *)&data[i * 2 + 6]) & 0x7fff; input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); contact_with_no_pen_down_count++; } } /* keep touch state for pen event */ wacom->shared->touch_down = (contact_with_no_pen_down_count > 0); input_mt_report_pointer_emulation(input, true); return 1; } static int wacom_tpc_single_touch(struct wacom_wac *wacom, size_t len) { char *data = wacom->data; struct input_dev *input = wacom->input; bool prox; int x = 0, y = 0; if (!wacom->shared->stylus_in_proximity) { if (len == WACOM_PKGLEN_TPC1FG) { prox = data[0] & 0x01; x = get_unaligned_le16(&data[1]); y = get_unaligned_le16(&data[3]); } else { /* with capacity */ prox = data[1] & 0x01; x = le16_to_cpup((__le16 *)&data[2]); y = le16_to_cpup((__le16 *)&data[4]); } } else /* force touch out when pen is in prox */ prox = 0; if (prox) { input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); } input_report_key(input, BTN_TOUCH, prox); /* keep touch state for pen events */ wacom->shared->touch_down = prox; return 1; } static int wacom_tpc_pen(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; char *data = wacom->data; struct input_dev *input = wacom->input; int pressure; bool prox = data[1] & 0x20; if (!wacom->shared->stylus_in_proximity) /* first in prox */ /* Going into proximity select tool */ wacom->tool[0] = (data[1] & 0x0c) ? BTN_TOOL_RUBBER : BTN_TOOL_PEN; /* keep pen state for touch events */ wacom->shared->stylus_in_proximity = prox; /* send pen events only when touch is up or forced out */ if (!wacom->shared->touch_down) { input_report_key(input, BTN_STYLUS, data[1] & 0x02); input_report_key(input, BTN_STYLUS2, data[1] & 0x10); input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2])); input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4])); pressure = ((data[7] & 0x01) << 8) | data[6]; if (pressure < 0) pressure = features->pressure_max + pressure + 1; input_report_abs(input, ABS_PRESSURE, pressure); input_report_key(input, BTN_TOUCH, data[1] & 0x05); input_report_key(input, wacom->tool[0], prox); return 1; } return 0; } static int wacom_tpc_irq(struct wacom_wac *wacom, size_t len) { char *data = wacom->data; dbg("wacom_tpc_irq: received report #%d", data[0]); switch (len) { case WACOM_PKGLEN_TPC1FG: return wacom_tpc_single_touch(wacom, len); case WACOM_PKGLEN_TPC2FG: return wacom_tpc_mt_touch(wacom); default: switch (data[0]) { case WACOM_REPORT_TPC1FG: case WACOM_REPORT_TPCHID: case WACOM_REPORT_TPCST: return wacom_tpc_single_touch(wacom, len); case WACOM_REPORT_PENABLED: return wacom_tpc_pen(wacom); } } return 0; } static int wacom_bpt_touch(struct wacom_wac *wacom) { struct wacom_features *features = &wacom->features; struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int i; if (data[0] != 0x02) return 0; for (i = 0; i < 2; i++) { int offset = (data[1] & 0x80) ? (8 * i) : (9 * i); bool touch = data[offset + 3] & 0x80; /* * Touch events need to be disabled while stylus is * in proximity because user's hand is resting on touchpad * and sending unwanted events. User expects tablet buttons * to continue working though. */ touch = touch && !wacom->shared->stylus_in_proximity; input_mt_slot(input, i); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = get_unaligned_be16(&data[offset + 3]) & 0x7ff; int y = get_unaligned_be16(&data[offset + 5]) & 0x7ff; if (features->quirks & WACOM_QUIRK_BBTOUCH_LOWRES) { x <<= 5; y <<= 5; } input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); } } input_mt_report_pointer_emulation(input, true); input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0); input_sync(input); return 0; } static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data) { struct input_dev *input = wacom->input; int slot_id = data[0] - 2; /* data[0] is between 2 and 17 */ bool touch = data[1] & 0x80; touch = touch && !wacom->shared->stylus_in_proximity; input_mt_slot(input, slot_id); input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); if (touch) { int x = (data[2] << 4) | (data[4] >> 4); int y = (data[3] << 4) | (data[4] & 0x0f); int w = data[6]; input_report_abs(input, ABS_MT_POSITION_X, x); input_report_abs(input, ABS_MT_POSITION_Y, y); input_report_abs(input, ABS_MT_TOUCH_MAJOR, w); } } static void wacom_bpt3_button_msg(struct wacom_wac *wacom, unsigned char *data) { struct input_dev *input = wacom->input; input_report_key(input, BTN_LEFT, (data[1] & 0x08) != 0); input_report_key(input, BTN_FORWARD, (data[1] & 0x04) != 0); input_report_key(input, BTN_BACK, (data[1] & 0x02) != 0); input_report_key(input, BTN_RIGHT, (data[1] & 0x01) != 0); } static int wacom_bpt3_touch(struct wacom_wac *wacom) { struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int count = data[1] & 0x07; int i; if (data[0] != 0x02) return 0; /* data has up to 7 fixed sized 8-byte messages starting at data[2] */ for (i = 0; i < count; i++) { int offset = (8 * i) + 2; int msg_id = data[offset]; if (msg_id >= 2 && msg_id <= 17) wacom_bpt3_touch_msg(wacom, data + offset); else if (msg_id == 128) wacom_bpt3_button_msg(wacom, data + offset); } input_mt_report_pointer_emulation(input, true); input_sync(input); return 0; } static int wacom_bpt_pen(struct wacom_wac *wacom) { struct input_dev *input = wacom->input; unsigned char *data = wacom->data; int prox = 0, x = 0, y = 0, p = 0, d = 0, pen = 0, btn1 = 0, btn2 = 0; if (data[0] != 0x02) return 0; prox = (data[1] & 0x20) == 0x20; /* * All reports shared between PEN and RUBBER tool must be * forced to a known starting value (zero) when transitioning to * out-of-prox. * * If not reset then, to userspace, it will look like lost events * if new tool comes in-prox with same values as previous tool sent. * * Hardware does report zero in most out-of-prox cases but not all. */ if (prox) { if (!wacom->shared->stylus_in_proximity) { if (data[1] & 0x08) { wacom->tool[0] = BTN_TOOL_RUBBER; wacom->id[0] = ERASER_DEVICE_ID; } else { wacom->tool[0] = BTN_TOOL_PEN; wacom->id[0] = STYLUS_DEVICE_ID; } wacom->shared->stylus_in_proximity = true; } x = le16_to_cpup((__le16 *)&data[2]); y = le16_to_cpup((__le16 *)&data[4]); p = le16_to_cpup((__le16 *)&data[6]); /* * Convert distance from out prox to distance from tablet. * distance will be greater than distance_max once * touching and applying pressure; do not report negative * distance. */ if (data[8] <= wacom->features.distance_max) d = wacom->features.distance_max - data[8]; pen = data[1] & 0x01; btn1 = data[1] & 0x02; btn2 = data[1] & 0x04; } input_report_key(input, BTN_TOUCH, pen); input_report_key(input, BTN_STYLUS, btn1); input_report_key(input, BTN_STYLUS2, btn2); input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_PRESSURE, p); input_report_abs(input, ABS_DISTANCE, d); if (!prox) { wacom->id[0] = 0; wacom->shared->stylus_in_proximity = false; } input_report_key(input, wacom->tool[0], prox); /* PEN or RUBBER */ input_report_abs(input, ABS_MISC, wacom->id[0]); /* TOOL ID */ return 1; } static int wacom_bpt_irq(struct wacom_wac *wacom, size_t len) { if (len == WACOM_PKGLEN_BBTOUCH) return wacom_bpt_touch(wacom); else if (len == WACOM_PKGLEN_BBTOUCH3) return wacom_bpt3_touch(wacom); else if (len == WACOM_PKGLEN_BBFUN || len == WACOM_PKGLEN_BBPEN) return wacom_bpt_pen(wacom); return 0; } static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len) { unsigned char *data = wacom->data; int connected; if (len != WACOM_PKGLEN_WIRELESS || data[0] != 0x80) return 0; connected = data[1] & 0x01; if (connected) { int pid, battery; pid = get_unaligned_be16(&data[6]); battery = data[5] & 0x3f; if (wacom->pid != pid) { wacom->pid = pid; wacom_schedule_work(wacom); } wacom->battery_capacity = battery; } else if (wacom->pid != 0) { /* disconnected while previously connected */ wacom->pid = 0; wacom_schedule_work(wacom); wacom->battery_capacity = 0; } return 0; } void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len) { bool sync; switch (wacom_wac->features.type) { case PENPARTNER: sync = wacom_penpartner_irq(wacom_wac); break; case PL: sync = wacom_pl_irq(wacom_wac); break; case WACOM_G4: case GRAPHIRE: case WACOM_MO: sync = wacom_graphire_irq(wacom_wac); break; case PTU: sync = wacom_ptu_irq(wacom_wac); break; case DTU: sync = wacom_dtu_irq(wacom_wac); break; case INTUOS: case INTUOS3S: case INTUOS3: case INTUOS3L: case INTUOS4S: case INTUOS4: case INTUOS4L: case CINTIQ: case WACOM_BEE: case WACOM_21UX2: case WACOM_24HD: sync = wacom_intuos_irq(wacom_wac); break; case TABLETPC: case TABLETPC2FG: sync = wacom_tpc_irq(wacom_wac, len); break; case BAMBOO_PT: sync = wacom_bpt_irq(wacom_wac, len); break; case WIRELESS: sync = wacom_wireless_irq(wacom_wac, len); break; default: sync = false; break; } if (sync) input_sync(wacom_wac->input); } static void wacom_setup_cintiq(struct wacom_wac *wacom_wac) { struct input_dev *input_dev = wacom_wac->input; input_set_capability(input_dev, EV_MSC, MSC_SERIAL); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_BRUSH, input_dev->keybit); __set_bit(BTN_TOOL_PENCIL, input_dev->keybit); __set_bit(BTN_TOOL_AIRBRUSH, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); input_set_abs_params(input_dev, ABS_DISTANCE, 0, wacom_wac->features.distance_max, 0, 0); input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0); input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0); input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0); } static void wacom_setup_intuos(struct wacom_wac *wacom_wac) { struct input_dev *input_dev = wacom_wac->input; input_set_capability(input_dev, EV_REL, REL_WHEEL); wacom_setup_cintiq(wacom_wac); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_SIDE, input_dev->keybit); __set_bit(BTN_EXTRA, input_dev->keybit); __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_TOOL_LENS, input_dev->keybit); input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0); input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0); } void wacom_setup_device_quirks(struct wacom_features *features) { /* touch device found but size is not defined. use default */ if (features->device_type == BTN_TOOL_FINGER && !features->x_max) { features->x_max = 1023; features->y_max = 1023; } /* these device have multiple inputs */ if (features->type == TABLETPC || features->type == TABLETPC2FG || features->type == BAMBOO_PT || features->type == WIRELESS) features->quirks |= WACOM_QUIRK_MULTI_INPUT; /* quirk for bamboo touch with 2 low res touches */ if (features->type == BAMBOO_PT && features->pktlen == WACOM_PKGLEN_BBTOUCH) { features->x_max <<= 5; features->y_max <<= 5; features->x_fuzz <<= 5; features->y_fuzz <<= 5; features->quirks |= WACOM_QUIRK_BBTOUCH_LOWRES; } if (features->type == WIRELESS) { /* monitor never has input and pen/touch have delayed create */ features->quirks |= WACOM_QUIRK_NO_INPUT; /* must be monitor interface if no device_type set */ if (!features->device_type) features->quirks |= WACOM_QUIRK_MONITOR; } } static unsigned int wacom_calculate_touch_res(unsigned int logical_max, unsigned int physical_max) { /* Touch physical dimensions are in 100th of mm */ return (logical_max * 100) / physical_max; } void wacom_setup_input_capabilities(struct input_dev *input_dev, struct wacom_wac *wacom_wac) { struct wacom_features *features = &wacom_wac->features; int i; input_dev->evbit[0] |= BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); __set_bit(BTN_TOUCH, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_Y, 0, features->y_max, features->y_fuzz, 0); if (features->device_type == BTN_TOOL_PEN) { input_set_abs_params(input_dev, ABS_PRESSURE, 0, features->pressure_max, features->pressure_fuzz, 0); /* penabled devices have fixed resolution for each model */ input_abs_set_res(input_dev, ABS_X, features->x_resolution); input_abs_set_res(input_dev, ABS_Y, features->y_resolution); } else { input_abs_set_res(input_dev, ABS_X, wacom_calculate_touch_res(features->x_max, features->x_phy)); input_abs_set_res(input_dev, ABS_Y, wacom_calculate_touch_res(features->y_max, features->y_phy)); } __set_bit(ABS_MISC, input_dev->absbit); switch (wacom_wac->features.type) { case WACOM_MO: input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0); /* fall through */ case WACOM_G4: input_set_capability(input_dev, EV_MSC, MSC_SERIAL); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); /* fall through */ case GRAPHIRE: input_set_capability(input_dev, EV_REL, REL_WHEEL); __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_MIDDLE, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_MOUSE, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case WACOM_24HD: __set_bit(BTN_A, input_dev->keybit); __set_bit(BTN_B, input_dev->keybit); __set_bit(BTN_C, input_dev->keybit); __set_bit(BTN_X, input_dev->keybit); __set_bit(BTN_Y, input_dev->keybit); __set_bit(BTN_Z, input_dev->keybit); for (i = 0; i < 10; i++) __set_bit(BTN_0 + i, input_dev->keybit); __set_bit(KEY_PROG1, input_dev->keybit); __set_bit(KEY_PROG2, input_dev->keybit); __set_bit(KEY_PROG3, input_dev->keybit); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); wacom_setup_cintiq(wacom_wac); break; case WACOM_21UX2: __set_bit(BTN_A, input_dev->keybit); __set_bit(BTN_B, input_dev->keybit); __set_bit(BTN_C, input_dev->keybit); __set_bit(BTN_X, input_dev->keybit); __set_bit(BTN_Y, input_dev->keybit); __set_bit(BTN_Z, input_dev->keybit); __set_bit(BTN_BASE, input_dev->keybit); __set_bit(BTN_BASE2, input_dev->keybit); /* fall through */ case WACOM_BEE: __set_bit(BTN_8, input_dev->keybit); __set_bit(BTN_9, input_dev->keybit); /* fall through */ case CINTIQ: for (i = 0; i < 8; i++) __set_bit(BTN_0 + i, input_dev->keybit); input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); wacom_setup_cintiq(wacom_wac); break; case INTUOS3: case INTUOS3L: __set_bit(BTN_4, input_dev->keybit); __set_bit(BTN_5, input_dev->keybit); __set_bit(BTN_6, input_dev->keybit); __set_bit(BTN_7, input_dev->keybit); input_set_abs_params(input_dev, ABS_RY, 0, 4096, 0, 0); /* fall through */ case INTUOS3S: __set_bit(BTN_0, input_dev->keybit); __set_bit(BTN_1, input_dev->keybit); __set_bit(BTN_2, input_dev->keybit); __set_bit(BTN_3, input_dev->keybit); input_set_abs_params(input_dev, ABS_RX, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); /* fall through */ case INTUOS: __set_bit(INPUT_PROP_POINTER, input_dev->propbit); wacom_setup_intuos(wacom_wac); break; case INTUOS4: case INTUOS4L: __set_bit(BTN_7, input_dev->keybit); __set_bit(BTN_8, input_dev->keybit); /* fall through */ case INTUOS4S: for (i = 0; i < 7; i++) __set_bit(BTN_0 + i, input_dev->keybit); input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); wacom_setup_intuos(wacom_wac); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case TABLETPC2FG: if (features->device_type == BTN_TOOL_FINGER) { input_mt_init_slots(input_dev, 2); input_set_abs_params(input_dev, ABS_MT_TOOL_TYPE, 0, MT_TOOL_MAX, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, features->x_max, 0, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, features->y_max, 0, 0); } /* fall through */ case TABLETPC: __clear_bit(ABS_MISC, input_dev->absbit); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); if (features->device_type != BTN_TOOL_PEN) break; /* no need to process stylus stuff */ /* fall through */ case PL: case DTU: __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); break; case PTU: __set_bit(BTN_STYLUS2, input_dev->keybit); /* fall through */ case PENPARTNER: __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); break; case BAMBOO_PT: __clear_bit(ABS_MISC, input_dev->absbit); __set_bit(INPUT_PROP_POINTER, input_dev->propbit); if (features->device_type == BTN_TOOL_FINGER) { __set_bit(BTN_LEFT, input_dev->keybit); __set_bit(BTN_FORWARD, input_dev->keybit); __set_bit(BTN_BACK, input_dev->keybit); __set_bit(BTN_RIGHT, input_dev->keybit); __set_bit(BTN_TOOL_FINGER, input_dev->keybit); __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit); if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) { __set_bit(BTN_TOOL_TRIPLETAP, input_dev->keybit); __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); input_mt_init_slots(input_dev, 16); input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 255, 0, 0); } else { input_mt_init_slots(input_dev, 2); } input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, features->x_max, features->x_fuzz, 0); input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, features->y_max, features->y_fuzz, 0); } else if (features->device_type == BTN_TOOL_PEN) { __set_bit(BTN_TOOL_RUBBER, input_dev->keybit); __set_bit(BTN_TOOL_PEN, input_dev->keybit); __set_bit(BTN_STYLUS, input_dev->keybit); __set_bit(BTN_STYLUS2, input_dev->keybit); input_set_abs_params(input_dev, ABS_DISTANCE, 0, features->distance_max, 0, 0); } break; } } static const struct wacom_features wacom_features_0x00 = { "Wacom Penpartner", WACOM_PKGLEN_PENPRTN, 5040, 3780, 255, 0, PENPARTNER, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x10 = { "Wacom Graphire", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x11 = { "Wacom Graphire2 4x5", WACOM_PKGLEN_GRAPHIRE, 10206, 7422, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x12 = { "Wacom Graphire2 5x7", WACOM_PKGLEN_GRAPHIRE, 13918, 10206, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x13 = { "Wacom Graphire3", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x14 = { "Wacom Graphire3 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x15 = { "Wacom Graphire4 4x5", WACOM_PKGLEN_GRAPHIRE, 10208, 7424, 511, 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x16 = { "Wacom Graphire4 6x8", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, WACOM_G4, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x17 = { "Wacom BambooFun 4x5", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x18 = { "Wacom BambooFun 6x8", WACOM_PKGLEN_BBFUN, 21648, 13530, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x19 = { "Wacom Bamboo1 Medium", WACOM_PKGLEN_GRAPHIRE, 16704, 12064, 511, 63, GRAPHIRE, WACOM_GRAPHIRE_RES, WACOM_GRAPHIRE_RES }; static const struct wacom_features wacom_features_0x60 = { "Wacom Volito", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x61 = { "Wacom PenStation2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 255, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x62 = { "Wacom Volito2 4x5", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x63 = { "Wacom Volito2 2x3", WACOM_PKGLEN_GRAPHIRE, 3248, 2320, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x64 = { "Wacom PenPartner2", WACOM_PKGLEN_GRAPHIRE, 3250, 2320, 511, 63, GRAPHIRE, WACOM_VOLITO_RES, WACOM_VOLITO_RES }; static const struct wacom_features wacom_features_0x65 = { "Wacom Bamboo", WACOM_PKGLEN_BBFUN, 14760, 9225, 511, 63, WACOM_MO, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x69 = { "Wacom Bamboo1", WACOM_PKGLEN_GRAPHIRE, 5104, 3712, 511, 63, GRAPHIRE, WACOM_PENPRTN_RES, WACOM_PENPRTN_RES }; static const struct wacom_features wacom_features_0x6A = { "Wacom Bamboo1 4x6", WACOM_PKGLEN_GRAPHIRE, 14760, 9225, 1023, 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6B = { "Wacom Bamboo1 5x8", WACOM_PKGLEN_GRAPHIRE, 21648, 13530, 1023, 63, GRAPHIRE, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x20 = { "Wacom Intuos 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x21 = { "Wacom Intuos 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x22 = { "Wacom Intuos 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x23 = { "Wacom Intuos 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x24 = { "Wacom Intuos 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x30 = { "Wacom PL400", WACOM_PKGLEN_GRAPHIRE, 5408, 4056, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x31 = { "Wacom PL500", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x32 = { "Wacom PL600", WACOM_PKGLEN_GRAPHIRE, 6126, 4604, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x33 = { "Wacom PL600SX", WACOM_PKGLEN_GRAPHIRE, 6260, 5016, 255, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x34 = { "Wacom PL550", WACOM_PKGLEN_GRAPHIRE, 6144, 4608, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x35 = { "Wacom PL800", WACOM_PKGLEN_GRAPHIRE, 7220, 5780, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x37 = { "Wacom PL700", WACOM_PKGLEN_GRAPHIRE, 6758, 5406, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x38 = { "Wacom PL510", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x39 = { "Wacom DTU710", WACOM_PKGLEN_GRAPHIRE, 34080, 27660, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC4 = { "Wacom DTF521", WACOM_PKGLEN_GRAPHIRE, 6282, 4762, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC0 = { "Wacom DTF720", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0xC2 = { "Wacom DTF720a", WACOM_PKGLEN_GRAPHIRE, 6858, 5506, 511, 0, PL, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x03 = { "Wacom Cintiq Partner", WACOM_PKGLEN_GRAPHIRE, 20480, 15360, 511, 0, PTU, WACOM_PL_RES, WACOM_PL_RES }; static const struct wacom_features wacom_features_0x41 = { "Wacom Intuos2 4x5", WACOM_PKGLEN_INTUOS, 12700, 10600, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x42 = { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x43 = { "Wacom Intuos2 9x12", WACOM_PKGLEN_INTUOS, 30480, 24060, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x44 = { "Wacom Intuos2 12x12", WACOM_PKGLEN_INTUOS, 30480, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x45 = { "Wacom Intuos2 12x18", WACOM_PKGLEN_INTUOS, 45720, 31680, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xB0 = { "Wacom Intuos3 4x5", WACOM_PKGLEN_INTUOS, 25400, 20320, 1023, 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB1 = { "Wacom Intuos3 6x8", WACOM_PKGLEN_INTUOS, 40640, 30480, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB2 = { "Wacom Intuos3 9x12", WACOM_PKGLEN_INTUOS, 60960, 45720, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB3 = { "Wacom Intuos3 12x12", WACOM_PKGLEN_INTUOS, 60960, 60960, 1023, 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB4 = { "Wacom Intuos3 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 1023, 63, INTUOS3L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB5 = { "Wacom Intuos3 6x11", WACOM_PKGLEN_INTUOS, 54204, 31750, 1023, 63, INTUOS3, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB7 = { "Wacom Intuos3 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 1023, 63, INTUOS3S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB8 = { "Wacom Intuos4 4x6", WACOM_PKGLEN_INTUOS, 31496, 19685, 2047, 63, INTUOS4S, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xB9 = { "Wacom Intuos4 6x9", WACOM_PKGLEN_INTUOS, 44704, 27940, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBA = { "Wacom Intuos4 8x13", WACOM_PKGLEN_INTUOS, 65024, 40640, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBB = { "Wacom Intuos4 12x19", WACOM_PKGLEN_INTUOS, 97536, 60960, 2047, 63, INTUOS4L, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xBC = { "Wacom Intuos4 WL", WACOM_PKGLEN_INTUOS, 40840, 25400, 2047, 63, INTUOS4, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xF4 = { "Wacom Cintiq 24HD", WACOM_PKGLEN_INTUOS, 104480, 65600, 2047, 63, WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x3F = { "Wacom Cintiq 21UX", WACOM_PKGLEN_INTUOS, 87200, 65600, 1023, 63, CINTIQ, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC5 = { "Wacom Cintiq 20WSX", WACOM_PKGLEN_INTUOS, 86680, 54180, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC6 = { "Wacom Cintiq 12WX", WACOM_PKGLEN_INTUOS, 53020, 33440, 1023, 63, WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0xC7 = { "Wacom DTU1931", WACOM_PKGLEN_GRAPHIRE, 37832, 30305, 511, 0, PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xCE = { "Wacom DTU2231", WACOM_PKGLEN_GRAPHIRE, 47864, 27011, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xF0 = { "Wacom DTU1631", WACOM_PKGLEN_GRAPHIRE, 34623, 19553, 511, 0, DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xCC = { "Wacom Cintiq 21UX2", WACOM_PKGLEN_INTUOS, 87200, 65600, 2047, 63, WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; static const struct wacom_features wacom_features_0x90 = { "Wacom ISDv4 90", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x93 = { "Wacom ISDv4 93", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x97 = { "Wacom ISDv4 97", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 511, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x9A = { "Wacom ISDv4 9A", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x9F = { "Wacom ISDv4 9F", WACOM_PKGLEN_GRAPHIRE, 26202, 16325, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE2 = { "Wacom ISDv4 E2", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE3 = { "Wacom ISDv4 E3", WACOM_PKGLEN_TPC2FG, 26202, 16325, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xE6 = { "Wacom ISDv4 E6", WACOM_PKGLEN_TPC2FG, 27760, 15694, 255, 0, TABLETPC2FG, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xEC = { "Wacom ISDv4 EC", WACOM_PKGLEN_GRAPHIRE, 25710, 14500, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x47 = { "Wacom Intuos2 6x8", WACOM_PKGLEN_INTUOS, 20320, 16240, 1023, 31, INTUOS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x84 = { "Wacom Wireless Receiver", WACOM_PKGLEN_WIRELESS, 0, 0, 0, 0, WIRELESS, 0, 0 }; static const struct wacom_features wacom_features_0xD0 = { "Wacom Bamboo 2FG", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD1 = { "Wacom Bamboo 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD2 = { "Wacom Bamboo Craft", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD3 = { "Wacom Bamboo 2FG 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD4 = { "Wacom Bamboo Pen", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD5 = { "Wacom Bamboo Pen 6x8", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD6 = { "Wacom BambooPT 2FG 4x5", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD7 = { "Wacom BambooPT 2FG Small", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xD8 = { "Wacom Bamboo Comic 2FG", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDA = { "Wacom Bamboo 2FG 4x5 SE", WACOM_PKGLEN_BBFUN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static struct wacom_features wacom_features_0xDB = { "Wacom Bamboo 2FG 6x8 SE", WACOM_PKGLEN_BBFUN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDD = { "Wacom Bamboo Connect", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDE = { "Wacom Bamboo 16FG 4x5", WACOM_PKGLEN_BBPEN, 14720, 9200, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0xDF = { "Wacom Bamboo 16FG 6x8", WACOM_PKGLEN_BBPEN, 21648, 13700, 1023, 31, BAMBOO_PT, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; static const struct wacom_features wacom_features_0x6004 = { "ISD-V4", WACOM_PKGLEN_GRAPHIRE, 12800, 8000, 255, 0, TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; #define USB_DEVICE_WACOM(prod) \ USB_DEVICE(USB_VENDOR_ID_WACOM, prod), \ .driver_info = (kernel_ulong_t)&wacom_features_##prod #define USB_DEVICE_DETAILED(prod, class, sub, proto) \ USB_DEVICE_AND_INTERFACE_INFO(USB_VENDOR_ID_WACOM, prod, class, \ sub, proto), \ .driver_info = (kernel_ulong_t)&wacom_features_##prod #define USB_DEVICE_LENOVO(prod) \ USB_DEVICE(USB_VENDOR_ID_LENOVO, prod), \ .driver_info = (kernel_ulong_t)&wacom_features_##prod const struct usb_device_id wacom_ids[] = { { USB_DEVICE_WACOM(0x00) }, { USB_DEVICE_WACOM(0x10) }, { USB_DEVICE_WACOM(0x11) }, { USB_DEVICE_WACOM(0x12) }, { USB_DEVICE_WACOM(0x13) }, { USB_DEVICE_WACOM(0x14) }, { USB_DEVICE_WACOM(0x15) }, { USB_DEVICE_WACOM(0x16) }, { USB_DEVICE_WACOM(0x17) }, { USB_DEVICE_WACOM(0x18) }, { USB_DEVICE_WACOM(0x19) }, { USB_DEVICE_WACOM(0x60) }, { USB_DEVICE_WACOM(0x61) }, { USB_DEVICE_WACOM(0x62) }, { USB_DEVICE_WACOM(0x63) }, { USB_DEVICE_WACOM(0x64) }, { USB_DEVICE_WACOM(0x65) }, { USB_DEVICE_WACOM(0x69) }, { USB_DEVICE_WACOM(0x6A) }, { USB_DEVICE_WACOM(0x6B) }, { USB_DEVICE_WACOM(0x20) }, { USB_DEVICE_WACOM(0x21) }, { USB_DEVICE_WACOM(0x22) }, { USB_DEVICE_WACOM(0x23) }, { USB_DEVICE_WACOM(0x24) }, { USB_DEVICE_WACOM(0x30) }, { USB_DEVICE_WACOM(0x31) }, { USB_DEVICE_WACOM(0x32) }, { USB_DEVICE_WACOM(0x33) }, { USB_DEVICE_WACOM(0x34) }, { USB_DEVICE_WACOM(0x35) }, { USB_DEVICE_WACOM(0x37) }, { USB_DEVICE_WACOM(0x38) }, { USB_DEVICE_WACOM(0x39) }, { USB_DEVICE_WACOM(0xC4) }, { USB_DEVICE_WACOM(0xC0) }, { USB_DEVICE_WACOM(0xC2) }, { USB_DEVICE_WACOM(0x03) }, { USB_DEVICE_WACOM(0x41) }, { USB_DEVICE_WACOM(0x42) }, { USB_DEVICE_WACOM(0x43) }, { USB_DEVICE_WACOM(0x44) }, { USB_DEVICE_WACOM(0x45) }, { USB_DEVICE_WACOM(0xB0) }, { USB_DEVICE_WACOM(0xB1) }, { USB_DEVICE_WACOM(0xB2) }, { USB_DEVICE_WACOM(0xB3) }, { USB_DEVICE_WACOM(0xB4) }, { USB_DEVICE_WACOM(0xB5) }, { USB_DEVICE_WACOM(0xB7) }, { USB_DEVICE_WACOM(0xB8) }, { USB_DEVICE_WACOM(0xB9) }, { USB_DEVICE_WACOM(0xBA) }, { USB_DEVICE_WACOM(0xBB) }, { USB_DEVICE_WACOM(0xBC) }, { USB_DEVICE_WACOM(0x3F) }, { USB_DEVICE_WACOM(0xC5) }, { USB_DEVICE_WACOM(0xC6) }, { USB_DEVICE_WACOM(0xC7) }, /* * DTU-2231 has two interfaces on the same configuration, * only one is used. */ { USB_DEVICE_DETAILED(0xCE, USB_CLASS_HID, USB_INTERFACE_SUBCLASS_BOOT, USB_INTERFACE_PROTOCOL_MOUSE) }, { USB_DEVICE_WACOM(0x84) }, { USB_DEVICE_WACOM(0xD0) }, { USB_DEVICE_WACOM(0xD1) }, { USB_DEVICE_WACOM(0xD2) }, { USB_DEVICE_WACOM(0xD3) }, { USB_DEVICE_WACOM(0xD4) }, { USB_DEVICE_WACOM(0xD5) }, { USB_DEVICE_WACOM(0xD6) }, { USB_DEVICE_WACOM(0xD7) }, { USB_DEVICE_WACOM(0xD8) }, { USB_DEVICE_WACOM(0xDA) }, { USB_DEVICE_WACOM(0xDB) }, { USB_DEVICE_WACOM(0xDD) }, { USB_DEVICE_WACOM(0xDE) }, { USB_DEVICE_WACOM(0xDF) }, { USB_DEVICE_WACOM(0xF0) }, { USB_DEVICE_WACOM(0xCC) }, { USB_DEVICE_WACOM(0x90) }, { USB_DEVICE_WACOM(0x93) }, { USB_DEVICE_WACOM(0x97) }, { USB_DEVICE_WACOM(0x9A) }, { USB_DEVICE_WACOM(0x9F) }, { USB_DEVICE_WACOM(0xE2) }, { USB_DEVICE_WACOM(0xE3) }, { USB_DEVICE_WACOM(0xE6) }, { USB_DEVICE_WACOM(0xEC) }, { USB_DEVICE_WACOM(0x47) }, { USB_DEVICE_WACOM(0xF4) }, { USB_DEVICE_LENOVO(0x6004) }, { } }; MODULE_DEVICE_TABLE(usb, wacom_ids);
gpl-2.0
TV-LP51-Devices/kernel_asus_grouper
drivers/staging/comedi/drivers/me_daq.c
3344
24918
/* comedi/drivers/me_daq.c Hardware driver for Meilhaus data acquisition cards: ME-2000i, ME-2600i, ME-3000vm1 Copyright (C) 2002 Michael Hillmann <hillmann@syscongroup.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: me_daq Description: Meilhaus PCI data acquisition cards Author: Michael Hillmann <hillmann@syscongroup.de> Devices: [Meilhaus] ME-2600i (me_daq), ME-2000i Status: experimental Supports: Analog Output Configuration options: [0] - PCI bus number (optional) [1] - PCI slot number (optional) If bus/slot is not specified, the first available PCI device will be used. The 2600 requires a firmware upload, which can be accomplished using the -i or --init-data option of comedi_config. The firmware can be found in the comedi_nonfree_firmware tarball available from http://www.comedi.org */ #include <linux/interrupt.h> #include <linux/sched.h> #include "../comedidev.h" #include "comedi_pci.h" /*#include "me2600_fw.h" */ #define ME_DRIVER_NAME "me_daq" #define PCI_VENDOR_ID_MEILHAUS 0x1402 #define ME2000_DEVICE_ID 0x2000 #define ME2600_DEVICE_ID 0x2600 #define PLX_INTCSR 0x4C /* PLX interrupt status register */ #define XILINX_DOWNLOAD_RESET 0x42 /* Xilinx registers */ #define ME_CONTROL_1 0x0000 /* - | W */ #define INTERRUPT_ENABLE (1<<15) #define COUNTER_B_IRQ (1<<12) #define COUNTER_A_IRQ (1<<11) #define CHANLIST_READY_IRQ (1<<10) #define EXT_IRQ (1<<9) #define ADFIFO_HALFFULL_IRQ (1<<8) #define SCAN_COUNT_ENABLE (1<<5) #define SIMULTANEOUS_ENABLE (1<<4) #define TRIGGER_FALLING_EDGE (1<<3) #define CONTINUOUS_MODE (1<<2) #define DISABLE_ADC (0<<0) #define SOFTWARE_TRIGGERED_ADC (1<<0) #define SCAN_TRIGGERED_ADC (2<<0) #define EXT_TRIGGERED_ADC (3<<0) #define ME_ADC_START 0x0000 /* R | - */ #define ME_CONTROL_2 0x0002 /* - | W */ #define ENABLE_ADFIFO (1<<10) #define ENABLE_CHANLIST (1<<9) #define ENABLE_PORT_B (1<<7) #define ENABLE_PORT_A (1<<6) #define ENABLE_COUNTER_B (1<<4) #define ENABLE_COUNTER_A (1<<3) #define ENABLE_DAC (1<<1) #define BUFFERED_DAC (1<<0) #define ME_DAC_UPDATE 0x0002 /* R | - */ #define ME_STATUS 0x0004 /* R | - */ #define COUNTER_B_IRQ_PENDING (1<<12) #define COUNTER_A_IRQ_PENDING (1<<11) #define CHANLIST_READY_IRQ_PENDING (1<<10) #define EXT_IRQ_PENDING (1<<9) #define ADFIFO_HALFFULL_IRQ_PENDING (1<<8) #define ADFIFO_FULL (1<<4) #define ADFIFO_HALFFULL (1<<3) #define ADFIFO_EMPTY (1<<2) #define CHANLIST_FULL (1<<1) #define FST_ACTIVE (1<<0) #define ME_RESET_INTERRUPT 0x0004 /* - | W */ #define ME_DIO_PORT_A 0x0006 /* R | W */ #define ME_DIO_PORT_B 0x0008 /* R | W */ #define ME_TIMER_DATA_0 0x000A /* - | W */ #define ME_TIMER_DATA_1 0x000C /* - | W */ #define ME_TIMER_DATA_2 0x000E /* - | W */ #define ME_CHANNEL_LIST 0x0010 /* - | W */ #define ADC_UNIPOLAR (1<<6) #define ADC_GAIN_0 (0<<4) #define ADC_GAIN_1 (1<<4) #define ADC_GAIN_2 (2<<4) #define ADC_GAIN_3 (3<<4) #define ME_READ_AD_FIFO 0x0010 /* R | - */ #define ME_DAC_CONTROL 0x0012 /* - | W */ #define DAC_UNIPOLAR_D (0<<4) #define DAC_BIPOLAR_D (1<<4) #define DAC_UNIPOLAR_C (0<<5) #define DAC_BIPOLAR_C (1<<5) #define DAC_UNIPOLAR_B (0<<6) #define DAC_BIPOLAR_B (1<<6) #define DAC_UNIPOLAR_A (0<<7) #define DAC_BIPOLAR_A (1<<7) #define DAC_GAIN_0_D (0<<8) #define DAC_GAIN_1_D (1<<8) #define DAC_GAIN_0_C (0<<9) #define DAC_GAIN_1_C (1<<9) #define DAC_GAIN_0_B (0<<10) #define DAC_GAIN_1_B (1<<10) #define DAC_GAIN_0_A (0<<11) #define DAC_GAIN_1_A (1<<11) #define ME_DAC_CONTROL_UPDATE 0x0012 /* R | - */ #define ME_DAC_DATA_A 0x0014 /* - | W */ #define ME_DAC_DATA_B 0x0016 /* - | W */ #define ME_DAC_DATA_C 0x0018 /* - | W */ #define ME_DAC_DATA_D 0x001A /* - | W */ #define ME_COUNTER_ENDDATA_A 0x001C /* - | W */ #define ME_COUNTER_ENDDATA_B 0x001E /* - | W */ #define ME_COUNTER_STARTDATA_A 0x0020 /* - | W */ #define ME_COUNTER_VALUE_A 0x0020 /* R | - */ #define ME_COUNTER_STARTDATA_B 0x0022 /* - | W */ #define ME_COUNTER_VALUE_B 0x0022 /* R | - */ /* Function prototypes */ static int me_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int me_detach(struct comedi_device *dev); static const struct comedi_lrange me2000_ai_range = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange me2600_ai_range = { 8, { BIP_RANGE(10), BIP_RANGE(5), BIP_RANGE(2.5), BIP_RANGE(1.25), UNI_RANGE(10), UNI_RANGE(5), UNI_RANGE(2.5), UNI_RANGE(1.25) } }; static const struct comedi_lrange me2600_ao_range = { 3, { BIP_RANGE(10), BIP_RANGE(5), UNI_RANGE(10) } }; static DEFINE_PCI_DEVICE_TABLE(me_pci_table) = { { PCI_VENDOR_ID_MEILHAUS, ME2600_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { PCI_VENDOR_ID_MEILHAUS, ME2000_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, me_pci_table); /* Board specification structure */ struct me_board { const char *name; /* driver name */ int device_id; int ao_channel_nbr; /* DA config */ int ao_resolution; int ao_resolution_mask; const struct comedi_lrange *ao_range_list; int ai_channel_nbr; /* AD config */ int ai_resolution; int ai_resolution_mask; const struct comedi_lrange *ai_range_list; int dio_channel_nbr; /* DIO config */ }; static const struct me_board me_boards[] = { { /* -- ME-2600i -- */ .name = ME_DRIVER_NAME, .device_id = ME2600_DEVICE_ID, /* Analog Output */ .ao_channel_nbr = 4, .ao_resolution = 12, .ao_resolution_mask = 0x0fff, .ao_range_list = &me2600_ao_range, .ai_channel_nbr = 16, /* Analog Input */ .ai_resolution = 12, .ai_resolution_mask = 0x0fff, .ai_range_list = &me2600_ai_range, .dio_channel_nbr = 32, }, { /* -- ME-2000i -- */ .name = ME_DRIVER_NAME, .device_id = ME2000_DEVICE_ID, /* Analog Output */ .ao_channel_nbr = 0, .ao_resolution = 0, .ao_resolution_mask = 0, .ao_range_list = NULL, .ai_channel_nbr = 16, /* Analog Input */ .ai_resolution = 12, .ai_resolution_mask = 0x0fff, .ai_range_list = &me2000_ai_range, .dio_channel_nbr = 32, } }; #define me_board_nbr (sizeof(me_boards)/sizeof(struct me_board)) static struct comedi_driver me_driver = { .driver_name = ME_DRIVER_NAME, .module = THIS_MODULE, .attach = me_attach, .detach = me_detach, }; static int __devinit me_driver_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, me_driver.driver_name); } static void __devexit me_driver_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver me_driver_pci_driver = { .id_table = me_pci_table, .probe = &me_driver_pci_probe, .remove = __devexit_p(&me_driver_pci_remove) }; static int __init me_driver_init_module(void) { int retval; retval = comedi_driver_register(&me_driver); if (retval < 0) return retval; me_driver_pci_driver.name = (char *)me_driver.driver_name; return pci_register_driver(&me_driver_pci_driver); } static void __exit me_driver_cleanup_module(void) { pci_unregister_driver(&me_driver_pci_driver); comedi_driver_unregister(&me_driver); } module_init(me_driver_init_module); module_exit(me_driver_cleanup_module); /* Private data structure */ struct me_private_data { struct pci_dev *pci_device; void __iomem *plx_regbase; /* PLX configuration base address */ void __iomem *me_regbase; /* Base address of the Meilhaus card */ unsigned long plx_regbase_size; /* Size of PLX configuration space */ unsigned long me_regbase_size; /* Size of Meilhaus space */ unsigned short control_1; /* Mirror of CONTROL_1 register */ unsigned short control_2; /* Mirror of CONTROL_2 register */ unsigned short dac_control; /* Mirror of the DAC_CONTROL register */ int ao_readback[4]; /* Mirror of analog output data */ }; #define dev_private ((struct me_private_data *)dev->private) /* * ------------------------------------------------------------------ * * Helpful functions * * ------------------------------------------------------------------ */ static inline void sleep(unsigned sec) { current->state = TASK_INTERRUPTIBLE; schedule_timeout(sec * HZ); } /* * ------------------------------------------------------------------ * * DIGITAL INPUT/OUTPUT SECTION * * ------------------------------------------------------------------ */ static int me_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int bits; int mask = 1 << CR_CHAN(insn->chanspec); /* calculate port */ if (mask & 0x0000ffff) { /* Port A in use */ bits = 0x0000ffff; /* Enable Port A */ dev_private->control_2 |= ENABLE_PORT_A; writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); } else { /* Port B in use */ bits = 0xffff0000; /* Enable Port B */ dev_private->control_2 |= ENABLE_PORT_B; writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); } if (data[0]) { /* Config port as output */ s->io_bits |= bits; } else { /* Config port as input */ s->io_bits &= ~bits; } return 1; } /* Digital instant input/outputs */ static int me_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned int mask = data[0]; s->state &= ~mask; s->state |= (mask & data[1]); mask &= s->io_bits; if (mask & 0x0000ffff) { /* Port A */ writew((s->state & 0xffff), dev_private->me_regbase + ME_DIO_PORT_A); } else { data[1] &= ~0x0000ffff; data[1] |= readw(dev_private->me_regbase + ME_DIO_PORT_A); } if (mask & 0xffff0000) { /* Port B */ writew(((s->state >> 16) & 0xffff), dev_private->me_regbase + ME_DIO_PORT_B); } else { data[1] &= ~0xffff0000; data[1] |= readw(dev_private->me_regbase + ME_DIO_PORT_B) << 16; } return 2; } /* * ------------------------------------------------------------------ * * ANALOG INPUT SECTION * * ------------------------------------------------------------------ */ /* Analog instant input */ static int me_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *subdevice, struct comedi_insn *insn, unsigned int *data) { unsigned short value; int chan = CR_CHAN((&insn->chanspec)[0]); int rang = CR_RANGE((&insn->chanspec)[0]); int aref = CR_AREF((&insn->chanspec)[0]); int i; /* stop any running conversion */ dev_private->control_1 &= 0xFFFC; writew(dev_private->control_1, dev_private->me_regbase + ME_CONTROL_1); /* clear chanlist and ad fifo */ dev_private->control_2 &= ~(ENABLE_ADFIFO | ENABLE_CHANLIST); writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); /* reset any pending interrupt */ writew(0x00, dev_private->me_regbase + ME_RESET_INTERRUPT); /* enable the chanlist and ADC fifo */ dev_private->control_2 |= (ENABLE_ADFIFO | ENABLE_CHANLIST); writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); /* write to channel list fifo */ /* b3:b0 are the channel number */ value = chan & 0x0f; /* b5:b4 are the channel gain */ value |= (rang & 0x03) << 4; /* b6 channel polarity */ value |= (rang & 0x04) << 4; /* b7 single or differential */ value |= ((aref & AREF_DIFF) ? 0x80 : 0); writew(value & 0xff, dev_private->me_regbase + ME_CHANNEL_LIST); /* set ADC mode to software trigger */ dev_private->control_1 |= SOFTWARE_TRIGGERED_ADC; writew(dev_private->control_1, dev_private->me_regbase + ME_CONTROL_1); /* start conversion by reading from ADC_START */ readw(dev_private->me_regbase + ME_ADC_START); /* wait for ADC fifo not empty flag */ for (i = 100000; i > 0; i--) if (!(readw(dev_private->me_regbase + ME_STATUS) & 0x0004)) break; /* get value from ADC fifo */ if (i) { data[0] = (readw(dev_private->me_regbase + ME_READ_AD_FIFO) ^ 0x800) & 0x0FFF; } else { printk(KERN_ERR "comedi%d: Cannot get single value\n", dev->minor); return -EIO; } /* stop any running conversion */ dev_private->control_1 &= 0xFFFC; writew(dev_private->control_1, dev_private->me_regbase + ME_CONTROL_1); return 1; } /* * ------------------------------------------------------------------ * * HARDWARE TRIGGERED ANALOG INPUT SECTION * * ------------------------------------------------------------------ */ /* Cancel analog input autoscan */ static int me_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { /* disable interrupts */ /* stop any running conversion */ dev_private->control_1 &= 0xFFFC; writew(dev_private->control_1, dev_private->me_regbase + ME_CONTROL_1); return 0; } /* Test analog input command */ static int me_ai_do_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { return 0; } /* Analog input command */ static int me_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *subdevice) { return 0; } /* * ------------------------------------------------------------------ * * ANALOG OUTPUT SECTION * * ------------------------------------------------------------------ */ /* Analog instant output */ static int me_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan; int rang; int i; /* Enable all DAC */ dev_private->control_2 |= ENABLE_DAC; writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); /* and set DAC to "buffered" mode */ dev_private->control_2 |= BUFFERED_DAC; writew(dev_private->control_2, dev_private->me_regbase + ME_CONTROL_2); /* Set dac-control register */ for (i = 0; i < insn->n; i++) { chan = CR_CHAN((&insn->chanspec)[i]); rang = CR_RANGE((&insn->chanspec)[i]); /* clear bits for this channel */ dev_private->dac_control &= ~(0x0880 >> chan); if (rang == 0) dev_private->dac_control |= ((DAC_BIPOLAR_A | DAC_GAIN_1_A) >> chan); else if (rang == 1) dev_private->dac_control |= ((DAC_BIPOLAR_A | DAC_GAIN_0_A) >> chan); } writew(dev_private->dac_control, dev_private->me_regbase + ME_DAC_CONTROL); /* Update dac-control register */ readw(dev_private->me_regbase + ME_DAC_CONTROL_UPDATE); /* Set data register */ for (i = 0; i < insn->n; i++) { chan = CR_CHAN((&insn->chanspec)[i]); writew((data[0] & s->maxdata), dev_private->me_regbase + ME_DAC_DATA_A + (chan << 1)); dev_private->ao_readback[chan] = (data[0] & s->maxdata); } /* Update dac with data registers */ readw(dev_private->me_regbase + ME_DAC_UPDATE); return i; } /* Analog output readback */ static int me_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; for (i = 0; i < insn->n; i++) { data[i] = dev_private->ao_readback[CR_CHAN((&insn->chanspec)[i])]; } return 1; } /* * ------------------------------------------------------------------ * * INITIALISATION SECTION * * ------------------------------------------------------------------ */ /* Xilinx firmware download for card: ME-2600i */ static int me2600_xilinx_download(struct comedi_device *dev, unsigned char *me2600_firmware, unsigned int length) { unsigned int value; unsigned int file_length; unsigned int i; /* disable irq's on PLX */ writel(0x00, dev_private->plx_regbase + PLX_INTCSR); /* First, make a dummy read to reset xilinx */ value = readw(dev_private->me_regbase + XILINX_DOWNLOAD_RESET); /* Wait until reset is over */ sleep(1); /* Write a dummy value to Xilinx */ writeb(0x00, dev_private->me_regbase + 0x0); sleep(1); /* * Format of the firmware * Build longs from the byte-wise coded header * Byte 1-3: length of the array * Byte 4-7: version * Byte 8-11: date * Byte 12-15: reserved */ if (length < 16) return -EINVAL; file_length = (((unsigned int)me2600_firmware[0] & 0xff) << 24) + (((unsigned int)me2600_firmware[1] & 0xff) << 16) + (((unsigned int)me2600_firmware[2] & 0xff) << 8) + ((unsigned int)me2600_firmware[3] & 0xff); /* * Loop for writing firmware byte by byte to xilinx * Firmware data start at offfset 16 */ for (i = 0; i < file_length; i++) writeb((me2600_firmware[16 + i] & 0xff), dev_private->me_regbase + 0x0); /* Write 5 dummy values to xilinx */ for (i = 0; i < 5; i++) writeb(0x00, dev_private->me_regbase + 0x0); /* Test if there was an error during download -> INTB was thrown */ value = readl(dev_private->plx_regbase + PLX_INTCSR); if (value & 0x20) { /* Disable interrupt */ writel(0x00, dev_private->plx_regbase + PLX_INTCSR); printk(KERN_ERR "comedi%d: Xilinx download failed\n", dev->minor); return -EIO; } /* Wait until the Xilinx is ready for real work */ sleep(1); /* Enable PLX-Interrupts */ writel(0x43, dev_private->plx_regbase + PLX_INTCSR); return 0; } /* Reset device */ static int me_reset(struct comedi_device *dev) { /* Reset board */ writew(0x00, dev_private->me_regbase + ME_CONTROL_1); writew(0x00, dev_private->me_regbase + ME_CONTROL_2); writew(0x00, dev_private->me_regbase + ME_RESET_INTERRUPT); writew(0x00, dev_private->me_regbase + ME_DAC_CONTROL); /* Save values in the board context */ dev_private->dac_control = 0; dev_private->control_1 = 0; dev_private->control_2 = 0; return 0; } /* * Attach * * - Register PCI device * - Declare device driver capability */ static int me_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pci_device = NULL; struct comedi_subdevice *subdevice; struct me_board *board; resource_size_t plx_regbase_tmp; unsigned long plx_regbase_size_tmp; resource_size_t me_regbase_tmp; unsigned long me_regbase_size_tmp; resource_size_t swap_regbase_tmp; unsigned long swap_regbase_size_tmp; resource_size_t regbase_tmp; int result, error, i; /* Allocate private memory */ if (alloc_private(dev, sizeof(struct me_private_data)) < 0) return -ENOMEM; /* Probe the device to determine what device in the series it is. */ for_each_pci_dev(pci_device) { if (pci_device->vendor == PCI_VENDOR_ID_MEILHAUS) { for (i = 0; i < me_board_nbr; i++) { if (me_boards[i].device_id == pci_device->device) { /* * was a particular bus/slot requested? */ if ((it->options[0] != 0) || (it->options[1] != 0)) { /* * are we on the wrong bus/slot? */ if (pci_device->bus->number != it->options[0] || PCI_SLOT(pci_device->devfn) != it->options[1]) { continue; } } dev->board_ptr = me_boards + i; board = (struct me_board *)dev->board_ptr; dev_private->pci_device = pci_device; goto found; } } } } printk(KERN_ERR "comedi%d: no supported board found! (req. bus/slot : %d/%d)\n", dev->minor, it->options[0], it->options[1]); return -EIO; found: printk(KERN_INFO "comedi%d: found %s at PCI bus %d, slot %d\n", dev->minor, me_boards[i].name, pci_device->bus->number, PCI_SLOT(pci_device->devfn)); /* Enable PCI device and request PCI regions */ if (comedi_pci_enable(pci_device, ME_DRIVER_NAME) < 0) { printk(KERN_ERR "comedi%d: Failed to enable PCI device and " "request regions\n", dev->minor); return -EIO; } /* Set data in device structure */ dev->board_name = board->name; /* Read PLX register base address [PCI_BASE_ADDRESS #0]. */ plx_regbase_tmp = pci_resource_start(pci_device, 0); plx_regbase_size_tmp = pci_resource_len(pci_device, 0); dev_private->plx_regbase = ioremap(plx_regbase_tmp, plx_regbase_size_tmp); dev_private->plx_regbase_size = plx_regbase_size_tmp; if (!dev_private->plx_regbase) { printk("comedi%d: Failed to remap I/O memory\n", dev->minor); return -ENOMEM; } /* Read Swap base address [PCI_BASE_ADDRESS #5]. */ swap_regbase_tmp = pci_resource_start(pci_device, 5); swap_regbase_size_tmp = pci_resource_len(pci_device, 5); if (!swap_regbase_tmp) printk(KERN_ERR "comedi%d: Swap not present\n", dev->minor); /*---------------------------------------------- Workaround start ---*/ if (plx_regbase_tmp & 0x0080) { printk(KERN_ERR "comedi%d: PLX-Bug detected\n", dev->minor); if (swap_regbase_tmp) { regbase_tmp = plx_regbase_tmp; plx_regbase_tmp = swap_regbase_tmp; swap_regbase_tmp = regbase_tmp; result = pci_write_config_dword(pci_device, PCI_BASE_ADDRESS_0, plx_regbase_tmp); if (result != PCIBIOS_SUCCESSFUL) return -EIO; result = pci_write_config_dword(pci_device, PCI_BASE_ADDRESS_5, swap_regbase_tmp); if (result != PCIBIOS_SUCCESSFUL) return -EIO; } else { plx_regbase_tmp -= 0x80; result = pci_write_config_dword(pci_device, PCI_BASE_ADDRESS_0, plx_regbase_tmp); if (result != PCIBIOS_SUCCESSFUL) return -EIO; } } /*--------------------------------------------- Workaround end -----*/ /* Read Meilhaus register base address [PCI_BASE_ADDRESS #2]. */ me_regbase_tmp = pci_resource_start(pci_device, 2); me_regbase_size_tmp = pci_resource_len(pci_device, 2); dev_private->me_regbase_size = me_regbase_size_tmp; dev_private->me_regbase = ioremap(me_regbase_tmp, me_regbase_size_tmp); if (!dev_private->me_regbase) { printk(KERN_ERR "comedi%d: Failed to remap I/O memory\n", dev->minor); return -ENOMEM; } /* Download firmware and reset card */ if (board->device_id == ME2600_DEVICE_ID) { unsigned char *aux_data; int aux_len; aux_data = comedi_aux_data(it->options, 0); aux_len = it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]; if (!aux_data || aux_len < 1) { comedi_error(dev, "You must provide me2600 firmware " "using the --init-data option of " "comedi_config"); return -EINVAL; } me2600_xilinx_download(dev, aux_data, aux_len); } me_reset(dev); /* device driver capabilities */ error = alloc_subdevices(dev, 3); if (error < 0) return error; subdevice = dev->subdevices + 0; subdevice->type = COMEDI_SUBD_AI; subdevice->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_CMD_READ; subdevice->n_chan = board->ai_channel_nbr; subdevice->maxdata = board->ai_resolution_mask; subdevice->len_chanlist = board->ai_channel_nbr; subdevice->range_table = board->ai_range_list; subdevice->cancel = me_ai_cancel; subdevice->insn_read = me_ai_insn_read; subdevice->do_cmdtest = me_ai_do_cmd_test; subdevice->do_cmd = me_ai_do_cmd; subdevice = dev->subdevices + 1; subdevice->type = COMEDI_SUBD_AO; subdevice->subdev_flags = SDF_WRITEABLE | SDF_COMMON; subdevice->n_chan = board->ao_channel_nbr; subdevice->maxdata = board->ao_resolution_mask; subdevice->len_chanlist = board->ao_channel_nbr; subdevice->range_table = board->ao_range_list; subdevice->insn_read = me_ao_insn_read; subdevice->insn_write = me_ao_insn_write; subdevice = dev->subdevices + 2; subdevice->type = COMEDI_SUBD_DIO; subdevice->subdev_flags = SDF_READABLE | SDF_WRITEABLE; subdevice->n_chan = board->dio_channel_nbr; subdevice->maxdata = 1; subdevice->len_chanlist = board->dio_channel_nbr; subdevice->range_table = &range_digital; subdevice->insn_bits = me_dio_insn_bits; subdevice->insn_config = me_dio_insn_config; subdevice->io_bits = 0; printk(KERN_INFO "comedi%d: " ME_DRIVER_NAME " attached.\n", dev->minor); return 0; } /* Detach */ static int me_detach(struct comedi_device *dev) { if (dev_private) { if (dev_private->me_regbase) { me_reset(dev); iounmap(dev_private->me_regbase); } if (dev_private->plx_regbase) iounmap(dev_private->plx_regbase); if (dev_private->pci_device) { if (dev_private->plx_regbase_size) comedi_pci_disable(dev_private->pci_device); pci_dev_put(dev_private->pci_device); } } return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
mdo-rom/platform_kernel_samsung_crespo
arch/mips/emma/common/prom.c
4368
1880
/* * Copyright (C) NEC Electronics Corporation 2004-2006 * * This file is based on the arch/mips/ddb5xxx/common/prom.c * * Copyright 2001 MontaVista Software Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/bootmem.h> #include <asm/addrspace.h> #include <asm/bootinfo.h> #include <asm/emma/emma2rh.h> const char *get_system_type(void) { #ifdef CONFIG_NEC_MARKEINS return "NEC EMMA2RH Mark-eins"; #else #error Unknown NEC board #endif } /* [jsun@junsun.net] PMON passes arguments in C main() style */ void __init prom_init(void) { int argc = fw_arg0; char **arg = (char **)fw_arg1; int i; /* if user passes kernel args, ignore the default one */ if (argc > 1) arcs_cmdline[0] = '\0'; /* arg[0] is "g", the rest is boot parameters */ for (i = 1; i < argc; i++) { if (strlen(arcs_cmdline) + strlen(arg[i] + 1) >= sizeof(arcs_cmdline)) break; strcat(arcs_cmdline, arg[i]); strcat(arcs_cmdline, " "); } #ifdef CONFIG_NEC_MARKEINS add_memory_region(0, EMMA2RH_RAM_SIZE, BOOT_MEM_RAM); #else #error Unknown NEC board #endif } void __init prom_free_prom_memory(void) { }
gpl-2.0
KylinUI/android_kernel_samsung_d2
drivers/nubus/nubus.c
5136
27007
/* * Macintosh Nubus Interface Code * * Originally by Alan Cox * * Mostly rewritten by David Huggins-Daines, C. Scott Ananian, * and others. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/hwtest.h> #include <linux/proc_fs.h> #include <asm/mac_via.h> #include <asm/mac_oss.h> extern void via_nubus_init(void); extern void oss_nubus_init(void); /* Constants */ /* This is, of course, the size in bytelanes, rather than the size in actual bytes */ #define FORMAT_BLOCK_SIZE 20 #define ROM_DIR_OFFSET 0x24 #define NUBUS_TEST_PATTERN 0x5A932BC7 /* Define this if you like to live dangerously - it is known not to work on pretty much every machine except the Quadra 630 and the LC III. */ #undef I_WANT_TO_PROBE_SLOT_ZERO /* This sometimes helps combat failure to boot */ #undef TRY_TO_DODGE_WSOD /* Globals */ struct nubus_dev* nubus_devices; struct nubus_board* nubus_boards; /* Meaning of "bytelanes": The card ROM may appear on any or all bytes of each long word in NuBus memory. The low 4 bits of the "map" value found in the format block (at the top of the slot address space, as well as at the top of the MacOS ROM) tells us which bytelanes, i.e. which byte offsets within each longword, are valid. Thus: A map of 0x0f, as found in the MacOS ROM, means that all bytelanes are valid. A map of 0xf0 means that no bytelanes are valid (We pray that we will never encounter this, but stranger things have happened) A map of 0xe1 means that only the MSB of each long word is actually part of the card ROM. (We hope to never encounter NuBus on a little-endian machine. Again, stranger things have happened) A map of 0x78 means that only the LSB of each long word is valid. Etcetera, etcetera. Hopefully this clears up some confusion over what the following code actually does. */ static inline int not_useful(void *p, int map) { unsigned long pv=(unsigned long)p; pv &= 3; if(map & (1<<pv)) return 0; return 1; } static unsigned long nubus_get_rom(unsigned char **ptr, int len, int map) { /* This will hold the result */ unsigned long v = 0; unsigned char *p = *ptr; while(len) { v <<= 8; while(not_useful(p,map)) p++; v |= *p++; len--; } *ptr = p; return v; } static void nubus_rewind(unsigned char **ptr, int len, int map) { unsigned char *p=*ptr; /* Sanity check */ if(len > 65536) printk(KERN_ERR "rewind of 0x%08x!\n", len); while(len) { do { p--; } while(not_useful(p, map)); len--; } *ptr=p; } static void nubus_advance(unsigned char **ptr, int len, int map) { unsigned char *p = *ptr; if(len>65536) printk(KERN_ERR "advance of 0x%08x!\n", len); while(len) { while(not_useful(p,map)) p++; p++; len--; } *ptr = p; } static void nubus_move(unsigned char **ptr, int len, int map) { if(len > 0) nubus_advance(ptr, len, map); else if(len < 0) nubus_rewind(ptr, -len, map); } /* Now, functions to read the sResource tree */ /* Each sResource entry consists of a 1-byte ID and a 3-byte data field. If that data field contains an offset, then obviously we have to expand it from a 24-bit signed number to a 32-bit signed number. */ static inline long nubus_expand32(long foo) { if(foo & 0x00800000) /* 24bit negative */ foo |= 0xFF000000; return foo; } static inline void *nubus_rom_addr(int slot) { /* * Returns the first byte after the card. We then walk * backwards to get the lane register and the config */ return (void *)(0xF1000000+(slot<<24)); } static unsigned char *nubus_dirptr(const struct nubus_dirent *nd) { unsigned char *p = nd->base; /* Essentially, just step over the bytelanes using whatever offset we might have found */ nubus_move(&p, nubus_expand32(nd->data), nd->mask); /* And return the value */ return p; } /* These two are for pulling resource data blocks (i.e. stuff that's pointed to with offsets) out of the card ROM. */ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent* dirent, int len) { unsigned char *t = (unsigned char *)dest; unsigned char *p = nubus_dirptr(dirent); while(len) { *t++ = nubus_get_rom(&p, 1, dirent->mask); len--; } } EXPORT_SYMBOL(nubus_get_rsrc_mem); void nubus_get_rsrc_str(void *dest, const struct nubus_dirent* dirent, int len) { unsigned char *t=(unsigned char *)dest; unsigned char *p = nubus_dirptr(dirent); while(len) { *t = nubus_get_rom(&p, 1, dirent->mask); if(!*t++) break; len--; } } EXPORT_SYMBOL(nubus_get_rsrc_str); int nubus_get_root_dir(const struct nubus_board* board, struct nubus_dir* dir) { dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_root_dir); /* This is a slyly renamed version of the above */ int nubus_get_func_dir(const struct nubus_dev* dev, struct nubus_dir* dir) { dir->ptr = dir->base = dev->directory; dir->done = 0; dir->mask = dev->board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_func_dir); int nubus_get_board_dir(const struct nubus_board* board, struct nubus_dir* dir) { struct nubus_dirent ent; dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; /* Now dereference it (the first directory is always the board directory) */ if (nubus_readdir(dir, &ent) == -1) return -1; if (nubus_get_subdir(&ent, dir) == -1) return -1; return 0; } EXPORT_SYMBOL(nubus_get_board_dir); int nubus_get_subdir(const struct nubus_dirent *ent, struct nubus_dir *dir) { dir->ptr = dir->base = nubus_dirptr(ent); dir->done = 0; dir->mask = ent->mask; return 0; } EXPORT_SYMBOL(nubus_get_subdir); int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) { u32 resid; if (nd->done) return -1; /* Do this first, otherwise nubus_rewind & co are off by 4 */ ent->base = nd->ptr; /* This moves nd->ptr forward */ resid = nubus_get_rom(&nd->ptr, 4, nd->mask); /* EOL marker, as per the Apple docs */ if((resid&0xff000000) == 0xff000000) { /* Mark it as done */ nd->done = 1; return -1; } /* First byte is the resource ID */ ent->type = resid >> 24; /* Low 3 bytes might contain data (or might not) */ ent->data = resid & 0xffffff; ent->mask = nd->mask; return 0; } EXPORT_SYMBOL(nubus_readdir); int nubus_rewinddir(struct nubus_dir* dir) { dir->ptr = dir->base; return 0; } EXPORT_SYMBOL(nubus_rewinddir); /* Driver interface functions, more or less like in pci.c */ struct nubus_dev* nubus_find_device(unsigned short category, unsigned short type, unsigned short dr_hw, unsigned short dr_sw, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->category == category && itor->type == type && itor->dr_hw == dr_hw && itor->dr_sw == dr_sw) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_device); struct nubus_dev* nubus_find_type(unsigned short category, unsigned short type, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->category == category && itor->type == type) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_type); struct nubus_dev* nubus_find_slot(unsigned int slot, const struct nubus_dev* from) { struct nubus_dev* itor = from ? from->next : nubus_devices; while (itor) { if (itor->board->slot == slot) return itor; itor = itor->next; } return NULL; } EXPORT_SYMBOL(nubus_find_slot); int nubus_find_rsrc(struct nubus_dir* dir, unsigned char rsrc_type, struct nubus_dirent* ent) { while (nubus_readdir(dir, ent) != -1) { if (ent->type == rsrc_type) return 0; } return -1; } EXPORT_SYMBOL(nubus_find_rsrc); /* Initialization functions - decide which slots contain stuff worth looking at, and print out lots and lots of information from the resource blocks. */ /* FIXME: A lot of this stuff will eventually be useful after initialization, for intelligently probing Ethernet and video chips, among other things. The rest of it should go in the /proc code. For now, we just use it to give verbose boot logs. */ static int __init nubus_show_display_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_GAMMADIR: printk(KERN_INFO " gamma directory offset: 0x%06x\n", ent->data); break; case 0x0080 ... 0x0085: printk(KERN_INFO " mode %02X info offset: 0x%06x\n", ent->type, ent->data); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_network_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_MAC_ADDRESS: { char addr[6]; int i; nubus_get_rsrc_mem(addr, ent, 6); printk(KERN_INFO " MAC address: "); for (i = 0; i < 6; i++) printk("%02x%s", addr[i] & 0xff, i == 5 ? "" : ":"); printk("\n"); break; } default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_cpu_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (ent->type) { case NUBUS_RESID_MEMINFO: { unsigned long meminfo[2]; nubus_get_rsrc_mem(&meminfo, ent, 8); printk(KERN_INFO " memory: [ 0x%08lx 0x%08lx ]\n", meminfo[0], meminfo[1]); break; } case NUBUS_RESID_ROMINFO: { unsigned long rominfo[2]; nubus_get_rsrc_mem(&rominfo, ent, 8); printk(KERN_INFO " ROM: [ 0x%08lx 0x%08lx ]\n", rominfo[0], rominfo[1]); break; } default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static int __init nubus_show_private_resource(struct nubus_dev* dev, const struct nubus_dirent* ent) { switch (dev->category) { case NUBUS_CAT_DISPLAY: nubus_show_display_resource(dev, ent); break; case NUBUS_CAT_NETWORK: nubus_show_network_resource(dev, ent); break; case NUBUS_CAT_CPU: nubus_show_cpu_resource(dev, ent); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent->type, ent->data); } return 0; } static struct nubus_dev* __init nubus_get_functional_resource(struct nubus_board* board, int slot, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; struct nubus_dev* dev; printk(KERN_INFO " Function 0x%02x:\n", parent->type); nubus_get_subdir(parent, &dir); /* Apple seems to have botched the ROM on the IIx */ if (slot == 0 && (unsigned long)dir.base % 2) dir.base += 1; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_functional_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); /* Actually we should probably panic if this fails */ if ((dev = kzalloc(sizeof(*dev), GFP_ATOMIC)) == NULL) return NULL; dev->resid = parent->type; dev->directory = dir.base; dev->board = board; while (nubus_readdir(&dir, &ent) != -1) { switch(ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; nubus_get_rsrc_mem(nbtdata, &ent, 8); dev->category = nbtdata[0]; dev->type = nbtdata[1]; dev->dr_sw = nbtdata[2]; dev->dr_hw = nbtdata[3]; printk(KERN_INFO " type: [cat 0x%x type 0x%x hw 0x%x sw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); break; } case NUBUS_RESID_NAME: { nubus_get_rsrc_str(dev->name, &ent, 64); printk(KERN_INFO " name: %s\n", dev->name); break; } case NUBUS_RESID_DRVRDIR: { /* MacOS driver. If we were NetBSD we might use this :-) */ struct nubus_dir drvr_dir; struct nubus_dirent drvr_ent; nubus_get_subdir(&ent, &drvr_dir); nubus_readdir(&drvr_dir, &drvr_ent); dev->driver = nubus_dirptr(&drvr_ent); printk(KERN_INFO " driver at: 0x%p\n", dev->driver); break; } case NUBUS_RESID_MINOR_BASEOS: /* We will need this in order to support multiple framebuffers. It might be handy for Ethernet as well */ nubus_get_rsrc_mem(&dev->iobase, &ent, 4); printk(KERN_INFO " memory offset: 0x%08lx\n", dev->iobase); break; case NUBUS_RESID_MINOR_LENGTH: /* Ditto */ nubus_get_rsrc_mem(&dev->iosize, &ent, 4); printk(KERN_INFO " memory length: 0x%08lx\n", dev->iosize); break; case NUBUS_RESID_FLAGS: dev->flags = ent.data; printk(KERN_INFO " flags: 0x%06x\n", dev->flags); break; case NUBUS_RESID_HWDEVID: dev->hwdevid = ent.data; printk(KERN_INFO " hwdevid: 0x%06x\n", dev->hwdevid); break; default: /* Local/Private resources have their own function */ nubus_show_private_resource(dev, &ent); } } return dev; } /* This is cool. */ static int __init nubus_get_vidnames(struct nubus_board* board, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; /* FIXME: obviously we want to put this in a header file soon */ struct vidmode { u32 size; /* Don't know what this is yet */ u16 id; /* Longest one I've seen so far is 26 characters */ char name[32]; }; printk(KERN_INFO " video modes supported:\n"); nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_vidnames: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { struct vidmode mode; u32 size; /* First get the length */ nubus_get_rsrc_mem(&size, &ent, 4); /* Now clobber the whole thing */ if (size > sizeof(mode) - 1) size = sizeof(mode) - 1; memset(&mode, 0, sizeof(mode)); nubus_get_rsrc_mem(&mode, &ent, size); printk (KERN_INFO " %02X: (%02X) %s\n", ent.type, mode.id, mode.name); } return 0; } /* This is *really* cool. */ static int __init nubus_get_icon(struct nubus_board* board, const struct nubus_dirent* ent) { /* Should be 32x32 if my memory serves me correctly */ unsigned char icon[128]; int x, y; nubus_get_rsrc_mem(&icon, ent, 128); printk(KERN_INFO " icon:\n"); /* We should actually plot these somewhere in the framebuffer init. This is just to demonstrate that they do, in fact, exist */ for (y = 0; y < 32; y++) { printk(KERN_INFO " "); for (x = 0; x < 32; x++) { if (icon[y*4 + x/8] & (0x80 >> (x%8))) printk("*"); else printk(" "); } printk("\n"); } return 0; } static int __init nubus_get_vendorinfo(struct nubus_board* board, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; static char* vendor_fields[6] = {"ID", "serial", "revision", "part", "date", "unknown field"}; printk(KERN_INFO " vendor info:\n"); nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_vendorinfo: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { char name[64]; /* These are all strings, we think */ nubus_get_rsrc_str(name, &ent, 64); if (ent.type > 5) ent.type = 5; printk(KERN_INFO " %s: %s\n", vendor_fields[ent.type-1], name); } return 0; } static int __init nubus_get_board_resource(struct nubus_board* board, int slot, const struct nubus_dirent* parent) { struct nubus_dir dir; struct nubus_dirent ent; nubus_get_subdir(parent, &dir); if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_board_resource: parent is 0x%p, dir is 0x%p\n", parent->base, dir.base); while(nubus_readdir(&dir, &ent) != -1) { switch (ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; /* This type is always the same, and is not useful except insofar as it tells us that we really are looking at a board resource. */ nubus_get_rsrc_mem(nbtdata, &ent, 8); printk(KERN_INFO " type: [cat 0x%x type 0x%x hw 0x%x sw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); if (nbtdata[0] != 1 || nbtdata[1] != 0 || nbtdata[2] != 0 || nbtdata[3] != 0) printk(KERN_ERR "this sResource is not a board resource!\n"); break; } case NUBUS_RESID_NAME: nubus_get_rsrc_str(board->name, &ent, 64); printk(KERN_INFO " name: %s\n", board->name); break; case NUBUS_RESID_ICON: nubus_get_icon(board, &ent); break; case NUBUS_RESID_BOARDID: printk(KERN_INFO " board id: 0x%x\n", ent.data); break; case NUBUS_RESID_PRIMARYINIT: printk(KERN_INFO " primary init offset: 0x%06x\n", ent.data); break; case NUBUS_RESID_VENDORINFO: nubus_get_vendorinfo(board, &ent); break; case NUBUS_RESID_FLAGS: printk(KERN_INFO " flags: 0x%06x\n", ent.data); break; case NUBUS_RESID_HWDEVID: printk(KERN_INFO " hwdevid: 0x%06x\n", ent.data); break; case NUBUS_RESID_SECONDINIT: printk(KERN_INFO " secondary init offset: 0x%06x\n", ent.data); break; /* WTF isn't this in the functional resources? */ case NUBUS_RESID_VIDNAMES: nubus_get_vidnames(board, &ent); break; /* Same goes for this */ case NUBUS_RESID_VIDMODES: printk(KERN_INFO " video mode parameter directory offset: 0x%06x\n", ent.data); break; default: printk(KERN_INFO " unknown resource %02X, data 0x%06x\n", ent.type, ent.data); } } return 0; } /* Attempt to bypass the somewhat non-obvious arrangement of sResources in the motherboard ROM */ static void __init nubus_find_rom_dir(struct nubus_board* board) { unsigned char* rp; unsigned char* romdir; struct nubus_dir dir; struct nubus_dirent ent; /* Check for the extra directory just under the format block */ rp = board->fblock; nubus_rewind(&rp, 4, board->lanes); if (nubus_get_rom(&rp, 4, board->lanes) != NUBUS_TEST_PATTERN) { /* OK, the ROM was telling the truth */ board->directory = board->fblock; nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes); return; } /* On "slot zero", you have to walk down a few more directories to get to the equivalent of a real card's root directory. We don't know what they were smoking when they came up with this. */ romdir = nubus_rom_addr(board->slot); nubus_rewind(&romdir, ROM_DIR_OFFSET, board->lanes); dir.base = dir.ptr = romdir; dir.done = 0; dir.mask = board->lanes; /* This one points to an "Unknown Macintosh" directory */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_INFO "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* This one takes us to where we want to go. */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); nubus_get_subdir(&ent, &dir); /* Resource ID 01, also an "Unknown Macintosh" */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* FIXME: the first one is *not* always the right one. We suspect this has something to do with the ROM revision. "The HORROR ROM" (LC-series) uses 0x7e, while "The HORROR Continues" (Q630) uses 0x7b. The DAFB Macs evidently use something else. Please run "Slots" on your Mac (see include/linux/nubus.h for where to get this program) and tell us where the 'SiDirPtr' for Slot 0 is. If you feel brave, you should also use MacsBug to walk down the ROM directories like this function does and try to find the path to that address... */ if (nubus_readdir(&dir, &ent) == -1) goto badrom; if (console_loglevel >= 10) printk(KERN_DEBUG "nubus_get_rom_dir: entry %02x %06x\n", ent.type, ent.data); /* Bwahahahaha... */ nubus_get_subdir(&ent, &dir); board->directory = dir.base; return; /* Even more evil laughter... */ badrom: board->directory = board->fblock; nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes); printk(KERN_ERR "nubus_get_rom_dir: ROM weirdness! Notify the developers...\n"); } /* Add a board (might be many devices) to the list */ static struct nubus_board* __init nubus_add_board(int slot, int bytelanes) { struct nubus_board* board; struct nubus_board** boardp; unsigned char *rp; unsigned long dpat; struct nubus_dir dir; struct nubus_dirent ent; /* Move to the start of the format block */ rp = nubus_rom_addr(slot); nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes); /* Actually we should probably panic if this fails */ if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL) return NULL; board->fblock = rp; /* Dump the format block for debugging purposes */ if (console_loglevel >= 10) { int i; printk(KERN_DEBUG "Slot %X, format block at 0x%p\n", slot, rp); printk(KERN_DEBUG "Format block: "); for (i = 0; i < FORMAT_BLOCK_SIZE; i += 4) { unsigned short foo, bar; foo = nubus_get_rom(&rp, 2, bytelanes); bar = nubus_get_rom(&rp, 2, bytelanes); printk("%04x %04x ", foo, bar); } printk("\n"); rp = board->fblock; } board->slot = slot; board->slot_addr = (unsigned long) nubus_slot_addr(slot); board->doffset = nubus_get_rom(&rp, 4, bytelanes); /* rom_length is *supposed* to be the total length of the * ROM. In practice it is the "amount of ROM used to compute * the CRC." So some jokers decide to set it to zero and * set the crc to zero so they don't have to do any math. * See the Performa 460 ROM, for example. Those Apple "engineers". */ board->rom_length = nubus_get_rom(&rp, 4, bytelanes); board->crc = nubus_get_rom(&rp, 4, bytelanes); board->rev = nubus_get_rom(&rp, 1, bytelanes); board->format = nubus_get_rom(&rp,1, bytelanes); board->lanes = bytelanes; /* Directory offset should be small and negative... */ if(!(board->doffset & 0x00FF0000)) printk(KERN_WARNING "Dodgy doffset!\n"); dpat = nubus_get_rom(&rp, 4, bytelanes); if(dpat != NUBUS_TEST_PATTERN) printk(KERN_WARNING "Wrong test pattern %08lx!\n", dpat); /* * I wonder how the CRC is meant to work - * any takers ? * CSA: According to MAC docs, not all cards pass the CRC anyway, * since the initial Macintosh ROM releases skipped the check. */ /* Attempt to work around slot zero weirdness */ nubus_find_rom_dir(board); nubus_get_root_dir(board, &dir); /* We're ready to rock */ printk(KERN_INFO "Slot %X:\n", slot); /* Each slot should have one board resource and any number of functional resources. So we'll fill in some fields in the struct nubus_board from the board resource, then walk down the list of functional resources, spinning out a nubus_dev for each of them. */ if (nubus_readdir(&dir, &ent) == -1) { /* We can't have this! */ printk(KERN_ERR "Board resource not found!\n"); return NULL; } else { printk(KERN_INFO " Board resource:\n"); nubus_get_board_resource(board, slot, &ent); } /* Aaaarrrrgghh! The LC III motherboard has *two* board resources. I have no idea WTF to do about this. */ while (nubus_readdir(&dir, &ent) != -1) { struct nubus_dev* dev; struct nubus_dev** devp; dev = nubus_get_functional_resource(board, slot, &ent); if (dev == NULL) continue; /* We zeroed this out above */ if (board->first_dev == NULL) board->first_dev = dev; /* Put it on the global NuBus device chain. Keep entries in order. */ for (devp=&nubus_devices; *devp!=NULL; devp=&((*devp)->next)) /* spin */; *devp = dev; dev->next = NULL; } /* Put it on the global NuBus board chain. Keep entries in order. */ for (boardp=&nubus_boards; *boardp!=NULL; boardp=&((*boardp)->next)) /* spin */; *boardp = board; board->next = NULL; return board; } void __init nubus_probe_slot(int slot) { unsigned char dp; unsigned char* rp; int i; rp = nubus_rom_addr(slot); for(i = 4; i; i--) { unsigned long flags; int card_present; rp--; local_irq_save(flags); card_present = hwreg_present(rp); local_irq_restore(flags); if (!card_present) continue; printk(KERN_DEBUG "Now probing slot %X at %p\n", slot, rp); dp = *rp; if(dp == 0) continue; /* The last byte of the format block consists of two nybbles which are "mirror images" of each other. These show us the valid bytelanes */ if ((((dp>>4) ^ dp) & 0x0F) != 0x0F) continue; /* Check that this value is actually *on* one of the bytelanes it claims are valid! */ if ((dp & 0x0F) >= (1<<i)) continue; /* Looks promising. Let's put it on the list. */ nubus_add_board(slot, dp); return; } } #if defined(CONFIG_PROC_FS) /* /proc/nubus stuff */ static int sprint_nubus_board(struct nubus_board* board, char* ptr, int len) { if(len < 100) return -1; sprintf(ptr, "Slot %X: %s\n", board->slot, board->name); return strlen(ptr); } static int nubus_read_proc(char *page, char **start, off_t off, int count, int *eof, void *data) { int nprinted, len, begin = 0; int size = PAGE_SIZE; struct nubus_board* board; len = sprintf(page, "Nubus devices found:\n"); /* Walk the list of NuBus boards */ for (board = nubus_boards; board != NULL; board = board->next) { nprinted = sprint_nubus_board(board, page + len, size - len); if (nprinted < 0) break; len += nprinted; if (len+begin < off) { begin += len; len = 0; } if (len+begin >= off+count) break; } if (len+begin < off) *eof = 1; off -= begin; *start = page + off; len -= off; if (len>count) len = count; if (len<0) len = 0; return len; } #endif void __init nubus_scan_bus(void) { int slot; /* This might not work on your machine */ #ifdef I_WANT_TO_PROBE_SLOT_ZERO nubus_probe_slot(0); #endif for(slot = 9; slot < 15; slot++) { nubus_probe_slot(slot); } } static int __init nubus_init(void) { if (!MACH_IS_MAC) return 0; /* Initialize the NuBus interrupts */ if (oss_present) { oss_nubus_init(); } else { via_nubus_init(); } #ifdef TRY_TO_DODGE_WSOD /* Rogue Ethernet interrupts can kill the machine if we don't do this. Obviously this is bogus. Hopefully the local VIA gurus can fix the real cause of the problem. */ mdelay(1000); #endif /* And probe */ printk("NuBus: Scanning NuBus slots.\n"); nubus_devices = NULL; nubus_boards = NULL; nubus_scan_bus(); #ifdef CONFIG_PROC_FS create_proc_read_entry("nubus", 0, NULL, nubus_read_proc, NULL); nubus_proc_init(); #endif return 0; } subsys_initcall(nubus_init);
gpl-2.0
hakcenter/android_kernel_samsung_hlte
sound/pci/ice1712/juli.c
9232
20286
/* * ALSA driver for ICEnsemble VT1724 (Envy24HT) * * Lowlevel functions for ESI Juli@ cards * * Copyright (c) 2004 Jaroslav Kysela <perex@perex.cz> * 2008 Pavel Hofman <dustin@seznam.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/tlv.h> #include "ice1712.h" #include "envy24ht.h" #include "juli.h" struct juli_spec { struct ak4114 *ak4114; unsigned int analog:1; }; /* * chip addresses on I2C bus */ #define AK4114_ADDR 0x20 /* S/PDIF receiver */ #define AK4358_ADDR 0x22 /* DAC */ /* * Juli does not use the standard ICE1724 clock scheme. Juli's ice1724 chip is * supplied by external clock provided by Xilinx array and MK73-1 PLL frequency * multiplier. Actual frequency is set by ice1724 GPIOs hooked to the Xilinx. * * The clock circuitry is supplied by the two ice1724 crystals. This * arrangement allows to generate independent clock signal for AK4114's input * rate detection circuit. As a result, Juli, unlike most other * ice1724+ak4114-based cards, detects spdif input rate correctly. * This fact is applied in the driver, allowing to modify PCM stream rate * parameter according to the actual input rate. * * Juli uses the remaining three stereo-channels of its DAC to optionally * monitor analog input, digital input, and digital output. The corresponding * I2S signals are routed by Xilinx, controlled by GPIOs. * * The master mute is implemented using output muting transistors (GPIO) in * combination with smuting the DAC. * * The card itself has no HW master volume control, implemented using the * vmaster control. * * TODO: * researching and fixing the input monitors */ /* * GPIO pins */ #define GPIO_FREQ_MASK (3<<0) #define GPIO_FREQ_32KHZ (0<<0) #define GPIO_FREQ_44KHZ (1<<0) #define GPIO_FREQ_48KHZ (2<<0) #define GPIO_MULTI_MASK (3<<2) #define GPIO_MULTI_4X (0<<2) #define GPIO_MULTI_2X (1<<2) #define GPIO_MULTI_1X (2<<2) /* also external */ #define GPIO_MULTI_HALF (3<<2) #define GPIO_INTERNAL_CLOCK (1<<4) /* 0 = external, 1 = internal */ #define GPIO_CLOCK_MASK (1<<4) #define GPIO_ANALOG_PRESENT (1<<5) /* RO only: 0 = present */ #define GPIO_RXMCLK_SEL (1<<7) /* must be 0 */ #define GPIO_AK5385A_CKS0 (1<<8) #define GPIO_AK5385A_DFS1 (1<<9) #define GPIO_AK5385A_DFS0 (1<<10) #define GPIO_DIGOUT_MONITOR (1<<11) /* 1 = active */ #define GPIO_DIGIN_MONITOR (1<<12) /* 1 = active */ #define GPIO_ANAIN_MONITOR (1<<13) /* 1 = active */ #define GPIO_AK5385A_CKS1 (1<<14) /* must be 0 */ #define GPIO_MUTE_CONTROL (1<<15) /* output mute, 1 = muted */ #define GPIO_RATE_MASK (GPIO_FREQ_MASK | GPIO_MULTI_MASK | \ GPIO_CLOCK_MASK) #define GPIO_AK5385A_MASK (GPIO_AK5385A_CKS0 | GPIO_AK5385A_DFS0 | \ GPIO_AK5385A_DFS1 | GPIO_AK5385A_CKS1) #define JULI_PCM_RATE (SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_64000 | \ SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \ SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000) #define GPIO_RATE_16000 (GPIO_FREQ_32KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_22050 (GPIO_FREQ_44KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_24000 (GPIO_FREQ_48KHZ | GPIO_MULTI_HALF | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_32000 (GPIO_FREQ_32KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_44100 (GPIO_FREQ_44KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_48000 (GPIO_FREQ_48KHZ | GPIO_MULTI_1X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_64000 (GPIO_FREQ_32KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_88200 (GPIO_FREQ_44KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_96000 (GPIO_FREQ_48KHZ | GPIO_MULTI_2X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_176400 (GPIO_FREQ_44KHZ | GPIO_MULTI_4X | \ GPIO_INTERNAL_CLOCK) #define GPIO_RATE_192000 (GPIO_FREQ_48KHZ | GPIO_MULTI_4X | \ GPIO_INTERNAL_CLOCK) /* * Initial setup of the conversion array GPIO <-> rate */ static unsigned int juli_rates[] = { 16000, 22050, 24000, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, }; static unsigned int gpio_vals[] = { GPIO_RATE_16000, GPIO_RATE_22050, GPIO_RATE_24000, GPIO_RATE_32000, GPIO_RATE_44100, GPIO_RATE_48000, GPIO_RATE_64000, GPIO_RATE_88200, GPIO_RATE_96000, GPIO_RATE_176400, GPIO_RATE_192000, }; static struct snd_pcm_hw_constraint_list juli_rates_info = { .count = ARRAY_SIZE(juli_rates), .list = juli_rates, .mask = 0, }; static int get_gpio_val(int rate) { int i; for (i = 0; i < ARRAY_SIZE(juli_rates); i++) if (juli_rates[i] == rate) return gpio_vals[i]; return 0; } static void juli_ak4114_write(void *private_data, unsigned char reg, unsigned char val) { snd_vt1724_write_i2c((struct snd_ice1712 *)private_data, AK4114_ADDR, reg, val); } static unsigned char juli_ak4114_read(void *private_data, unsigned char reg) { return snd_vt1724_read_i2c((struct snd_ice1712 *)private_data, AK4114_ADDR, reg); } /* * If SPDIF capture and slaved to SPDIF-IN, setting runtime rate * to the external rate */ static void juli_spdif_in_open(struct snd_ice1712 *ice, struct snd_pcm_substream *substream) { struct juli_spec *spec = ice->spec; struct snd_pcm_runtime *runtime = substream->runtime; int rate; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK || !ice->is_spdif_master(ice)) return; rate = snd_ak4114_external_rate(spec->ak4114); if (rate >= runtime->hw.rate_min && rate <= runtime->hw.rate_max) { runtime->hw.rate_min = rate; runtime->hw.rate_max = rate; } } /* * AK4358 section */ static void juli_akm_lock(struct snd_akm4xxx *ak, int chip) { } static void juli_akm_unlock(struct snd_akm4xxx *ak, int chip) { } static void juli_akm_write(struct snd_akm4xxx *ak, int chip, unsigned char addr, unsigned char data) { struct snd_ice1712 *ice = ak->private_data[0]; if (snd_BUG_ON(chip)) return; snd_vt1724_write_i2c(ice, AK4358_ADDR, addr, data); } /* * change the rate of envy24HT, AK4358, AK5385 */ static void juli_akm_set_rate_val(struct snd_akm4xxx *ak, unsigned int rate) { unsigned char old, tmp, ak4358_dfs; unsigned int ak5385_pins, old_gpio, new_gpio; struct snd_ice1712 *ice = ak->private_data[0]; struct juli_spec *spec = ice->spec; if (rate == 0) /* no hint - S/PDIF input is master or the new spdif input rate undetected, simply return */ return; /* adjust DFS on codecs */ if (rate > 96000) { ak4358_dfs = 2; ak5385_pins = GPIO_AK5385A_DFS1 | GPIO_AK5385A_CKS0; } else if (rate > 48000) { ak4358_dfs = 1; ak5385_pins = GPIO_AK5385A_DFS0; } else { ak4358_dfs = 0; ak5385_pins = 0; } /* AK5385 first, since it requires cold reset affecting both codecs */ old_gpio = ice->gpio.get_data(ice); new_gpio = (old_gpio & ~GPIO_AK5385A_MASK) | ak5385_pins; /* printk(KERN_DEBUG "JULI - ak5385 set_rate_val: new gpio 0x%x\n", new_gpio); */ ice->gpio.set_data(ice, new_gpio); /* cold reset */ old = inb(ICEMT1724(ice, AC97_CMD)); outb(old | VT1724_AC97_COLD, ICEMT1724(ice, AC97_CMD)); udelay(1); outb(old & ~VT1724_AC97_COLD, ICEMT1724(ice, AC97_CMD)); /* AK4358 */ /* set new value, reset DFS */ tmp = snd_akm4xxx_get(ak, 0, 2); snd_akm4xxx_reset(ak, 1); tmp = snd_akm4xxx_get(ak, 0, 2); tmp &= ~(0x03 << 4); tmp |= ak4358_dfs << 4; snd_akm4xxx_set(ak, 0, 2, tmp); snd_akm4xxx_reset(ak, 0); /* reinit ak4114 */ snd_ak4114_reinit(spec->ak4114); } #define AK_DAC(xname, xch) { .name = xname, .num_channels = xch } #define PCM_VOLUME "PCM Playback Volume" #define MONITOR_AN_IN_VOLUME "Monitor Analog In Volume" #define MONITOR_DIG_IN_VOLUME "Monitor Digital In Volume" #define MONITOR_DIG_OUT_VOLUME "Monitor Digital Out Volume" static const struct snd_akm4xxx_dac_channel juli_dac[] = { AK_DAC(PCM_VOLUME, 2), AK_DAC(MONITOR_AN_IN_VOLUME, 2), AK_DAC(MONITOR_DIG_OUT_VOLUME, 2), AK_DAC(MONITOR_DIG_IN_VOLUME, 2), }; static struct snd_akm4xxx akm_juli_dac __devinitdata = { .type = SND_AK4358, .num_dacs = 8, /* DAC1 - analog out DAC2 - analog in monitor DAC3 - digital out monitor DAC4 - digital in monitor */ .ops = { .lock = juli_akm_lock, .unlock = juli_akm_unlock, .write = juli_akm_write, .set_rate_val = juli_akm_set_rate_val }, .dac_info = juli_dac, }; #define juli_mute_info snd_ctl_boolean_mono_info static int juli_mute_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int val; val = ice->gpio.get_data(ice) & (unsigned int) kcontrol->private_value; if (kcontrol->private_value == GPIO_MUTE_CONTROL) /* val 0 = signal on */ ucontrol->value.integer.value[0] = (val) ? 0 : 1; else /* val 1 = signal on */ ucontrol->value.integer.value[0] = (val) ? 1 : 0; return 0; } static int juli_mute_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_ice1712 *ice = snd_kcontrol_chip(kcontrol); unsigned int old_gpio, new_gpio; old_gpio = ice->gpio.get_data(ice); if (ucontrol->value.integer.value[0]) { /* unmute */ if (kcontrol->private_value == GPIO_MUTE_CONTROL) { /* 0 = signal on */ new_gpio = old_gpio & ~GPIO_MUTE_CONTROL; /* un-smuting DAC */ snd_akm4xxx_write(ice->akm, 0, 0x01, 0x01); } else /* 1 = signal on */ new_gpio = old_gpio | (unsigned int) kcontrol->private_value; } else { /* mute */ if (kcontrol->private_value == GPIO_MUTE_CONTROL) { /* 1 = signal off */ new_gpio = old_gpio | GPIO_MUTE_CONTROL; /* smuting DAC */ snd_akm4xxx_write(ice->akm, 0, 0x01, 0x03); } else /* 0 = signal off */ new_gpio = old_gpio & ~((unsigned int) kcontrol->private_value); } /* printk(KERN_DEBUG "JULI - mute/unmute: control_value: 0x%x, old_gpio: 0x%x, " "new_gpio 0x%x\n", (unsigned int)ucontrol->value.integer.value[0], old_gpio, new_gpio); */ if (old_gpio != new_gpio) { ice->gpio.set_data(ice, new_gpio); return 1; } /* no change */ return 0; } static struct snd_kcontrol_new juli_mute_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Master Playback Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_MUTE_CONTROL, }, /* Although the following functionality respects the succint NDA'd * documentation from the card manufacturer, and the same way of * operation is coded in OSS Juli driver, only Digital Out monitor * seems to work. Surprisingly, Analog input monitor outputs Digital * output data. The two are independent, as enabling both doubles * volume of the monitor sound. * * Checking traces on the board suggests the functionality described * by the manufacturer is correct - I2S from ADC and AK4114 * go to ICE as well as to Xilinx, I2S inputs of DAC2,3,4 (the monitor * inputs) are fed from Xilinx. * * I even checked traces on board and coded a support in driver for * an alternative possibility - the unused I2S ICE output channels * switched to HW-IN/SPDIF-IN and providing the monitoring signal to * the DAC - to no avail. The I2S outputs seem to be unconnected. * * The windows driver supports the monitoring correctly. */ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Analog In Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_ANAIN_MONITOR, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Digital Out Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_DIGOUT_MONITOR, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Monitor Digital In Switch", .info = juli_mute_info, .get = juli_mute_get, .put = juli_mute_put, .private_value = GPIO_DIGIN_MONITOR, }, }; static char *slave_vols[] __devinitdata = { PCM_VOLUME, MONITOR_AN_IN_VOLUME, MONITOR_DIG_IN_VOLUME, MONITOR_DIG_OUT_VOLUME, NULL }; static __devinitdata DECLARE_TLV_DB_SCALE(juli_master_db_scale, -6350, 50, 1); static struct snd_kcontrol __devinit *ctl_find(struct snd_card *card, const char *name) { struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); /* FIXME: strcpy is bad. */ strcpy(sid.name, name); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; return snd_ctl_find_id(card, &sid); } static void __devinit add_slaves(struct snd_card *card, struct snd_kcontrol *master, char **list) { for (; *list; list++) { struct snd_kcontrol *slave = ctl_find(card, *list); /* printk(KERN_DEBUG "add_slaves - %s\n", *list); */ if (slave) { /* printk(KERN_DEBUG "slave %s found\n", *list); */ snd_ctl_add_slave(master, slave); } } } static int __devinit juli_add_controls(struct snd_ice1712 *ice) { struct juli_spec *spec = ice->spec; int err; unsigned int i; struct snd_kcontrol *vmaster; err = snd_ice1712_akm4xxx_build_controls(ice); if (err < 0) return err; for (i = 0; i < ARRAY_SIZE(juli_mute_controls); i++) { err = snd_ctl_add(ice->card, snd_ctl_new1(&juli_mute_controls[i], ice)); if (err < 0) return err; } /* Create virtual master control */ vmaster = snd_ctl_make_virtual_master("Master Playback Volume", juli_master_db_scale); if (!vmaster) return -ENOMEM; add_slaves(ice->card, vmaster, slave_vols); err = snd_ctl_add(ice->card, vmaster); if (err < 0) return err; /* only capture SPDIF over AK4114 */ err = snd_ak4114_build(spec->ak4114, NULL, ice->pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream); if (err < 0) return err; return 0; } /* * suspend/resume * */ #ifdef CONFIG_PM static int juli_resume(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak = ice->akm; struct juli_spec *spec = ice->spec; /* akm4358 un-reset, un-mute */ snd_akm4xxx_reset(ak, 0); /* reinit ak4114 */ snd_ak4114_reinit(spec->ak4114); return 0; } static int juli_suspend(struct snd_ice1712 *ice) { struct snd_akm4xxx *ak = ice->akm; /* akm4358 reset and soft-mute */ snd_akm4xxx_reset(ak, 1); return 0; } #endif /* * initialize the chip */ static inline int juli_is_spdif_master(struct snd_ice1712 *ice) { return (ice->gpio.get_data(ice) & GPIO_INTERNAL_CLOCK) ? 0 : 1; } static unsigned int juli_get_rate(struct snd_ice1712 *ice) { int i; unsigned char result; result = ice->gpio.get_data(ice) & GPIO_RATE_MASK; for (i = 0; i < ARRAY_SIZE(gpio_vals); i++) if (gpio_vals[i] == result) return juli_rates[i]; return 0; } /* setting new rate */ static void juli_set_rate(struct snd_ice1712 *ice, unsigned int rate) { unsigned int old, new; unsigned char val; old = ice->gpio.get_data(ice); new = (old & ~GPIO_RATE_MASK) | get_gpio_val(rate); /* printk(KERN_DEBUG "JULI - set_rate: old %x, new %x\n", old & GPIO_RATE_MASK, new & GPIO_RATE_MASK); */ ice->gpio.set_data(ice, new); /* switching to external clock - supplied by external circuits */ val = inb(ICEMT1724(ice, RATE)); outb(val | VT1724_SPDIF_MASTER, ICEMT1724(ice, RATE)); } static inline unsigned char juli_set_mclk(struct snd_ice1712 *ice, unsigned int rate) { /* no change in master clock */ return 0; } /* setting clock to external - SPDIF */ static int juli_set_spdif_clock(struct snd_ice1712 *ice, int type) { unsigned int old; old = ice->gpio.get_data(ice); /* external clock (= 0), multiply 1x, 48kHz */ ice->gpio.set_data(ice, (old & ~GPIO_RATE_MASK) | GPIO_MULTI_1X | GPIO_FREQ_48KHZ); return 0; } /* Called when ak4114 detects change in the input SPDIF stream */ static void juli_ak4114_change(struct ak4114 *ak4114, unsigned char c0, unsigned char c1) { struct snd_ice1712 *ice = ak4114->change_callback_private; int rate; if (ice->is_spdif_master(ice) && c1) { /* only for SPDIF master mode, rate was changed */ rate = snd_ak4114_external_rate(ak4114); /* printk(KERN_DEBUG "ak4114 - input rate changed to %d\n", rate); */ juli_akm_set_rate_val(ice->akm, rate); } } static int __devinit juli_init(struct snd_ice1712 *ice) { static const unsigned char ak4114_init_vals[] = { /* AK4117_REG_PWRDN */ AK4114_RST | AK4114_PWN | AK4114_OCKS0 | AK4114_OCKS1, /* AK4114_REQ_FORMAT */ AK4114_DIF_I24I2S, /* AK4114_REG_IO0 */ AK4114_TX1E, /* AK4114_REG_IO1 */ AK4114_EFH_1024 | AK4114_DIT | AK4114_IPS(1), /* AK4114_REG_INT0_MASK */ 0, /* AK4114_REG_INT1_MASK */ 0 }; static const unsigned char ak4114_init_txcsb[] = { 0x41, 0x02, 0x2c, 0x00, 0x00 }; int err; struct juli_spec *spec; struct snd_akm4xxx *ak; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; ice->spec = spec; err = snd_ak4114_create(ice->card, juli_ak4114_read, juli_ak4114_write, ak4114_init_vals, ak4114_init_txcsb, ice, &spec->ak4114); if (err < 0) return err; /* callback for codecs rate setting */ spec->ak4114->change_callback = juli_ak4114_change; spec->ak4114->change_callback_private = ice; /* AK4114 in Juli can detect external rate correctly */ spec->ak4114->check_flags = 0; #if 0 /* * it seems that the analog doughter board detection does not work reliably, so * force the analog flag; it should be very rare (if ever) to come at Juli@ * used without the analog daughter board */ spec->analog = (ice->gpio.get_data(ice) & GPIO_ANALOG_PRESENT) ? 0 : 1; #else spec->analog = 1; #endif if (spec->analog) { printk(KERN_INFO "juli@: analog I/O detected\n"); ice->num_total_dacs = 2; ice->num_total_adcs = 2; ice->akm = kzalloc(sizeof(struct snd_akm4xxx), GFP_KERNEL); ak = ice->akm; if (!ak) return -ENOMEM; ice->akm_codecs = 1; err = snd_ice1712_akm4xxx_init(ak, &akm_juli_dac, NULL, ice); if (err < 0) return err; } /* juli is clocked by Xilinx array */ ice->hw_rates = &juli_rates_info; ice->is_spdif_master = juli_is_spdif_master; ice->get_rate = juli_get_rate; ice->set_rate = juli_set_rate; ice->set_mclk = juli_set_mclk; ice->set_spdif_clock = juli_set_spdif_clock; ice->spdif.ops.open = juli_spdif_in_open; #ifdef CONFIG_PM ice->pm_resume = juli_resume; ice->pm_suspend = juli_suspend; ice->pm_suspend_enabled = 1; #endif return 0; } /* * Juli@ boards don't provide the EEPROM data except for the vendor IDs. * hence the driver needs to sets up it properly. */ static unsigned char juli_eeprom[] __devinitdata = { [ICE_EEP2_SYSCONF] = 0x2b, /* clock 512, mpu401, 1xADC, 1xDACs, SPDIF in */ [ICE_EEP2_ACLINK] = 0x80, /* I2S */ [ICE_EEP2_I2S] = 0xf8, /* vol, 96k, 24bit, 192k */ [ICE_EEP2_SPDIF] = 0xc3, /* out-en, out-int, spdif-in */ [ICE_EEP2_GPIO_DIR] = 0x9f, /* 5, 6:inputs; 7, 4-0 outputs*/ [ICE_EEP2_GPIO_DIR1] = 0xff, [ICE_EEP2_GPIO_DIR2] = 0x7f, [ICE_EEP2_GPIO_MASK] = 0x60, /* 5, 6: locked; 7, 4-0 writable */ [ICE_EEP2_GPIO_MASK1] = 0x00, /* 0-7 writable */ [ICE_EEP2_GPIO_MASK2] = 0x7f, [ICE_EEP2_GPIO_STATE] = GPIO_FREQ_48KHZ | GPIO_MULTI_1X | GPIO_INTERNAL_CLOCK, /* internal clock, multiple 1x, 48kHz*/ [ICE_EEP2_GPIO_STATE1] = 0x00, /* unmuted */ [ICE_EEP2_GPIO_STATE2] = 0x00, }; /* entry point */ struct snd_ice1712_card_info snd_vt1724_juli_cards[] __devinitdata = { { .subvendor = VT1724_SUBDEVICE_JULI, .name = "ESI Juli@", .model = "juli", .chip_init = juli_init, .build_controls = juli_add_controls, .eeprom_size = sizeof(juli_eeprom), .eeprom_data = juli_eeprom, }, { } /* terminator */ };
gpl-2.0
syhost/A810S_KERNEL_3.4
drivers/uwb/i1480/dfu/phy.c
14608
6445
/* * Intel Wireless UWB Link 1480 * PHY parameters upload * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Code for uploading the PHY parameters to the PHY through the UWB * Radio Control interface. * * We just send the data through the MPI interface using HWA-like * commands and then reset the PHY to make sure it is ok. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb/wusb.h> #include "i1480-dfu.h" /** * Write a value array to an address of the MPI interface * * @i1480: Device descriptor * @data: Data array to write * @size: Size of the data array * @returns: 0 if ok, < 0 errno code on error. * * The data array is organized into pairs: * * ADDRESS VALUE * * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has * to be a multiple of three. */ static int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size) { int result; struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf; struct i1480_evt_confirm *reply = i1480->evt_buf; BUG_ON(size > 480); result = -ENOMEM; cmd->rccb.bCommandType = i1480_CET_VS1; cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE); cmd->size = cpu_to_le16(size); memcpy(cmd->data, data, size); reply->rceb.bEventType = i1480_CET_VS1; reply->rceb.wEvent = i1480_CMD_MPI_WRITE; result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); if (result < 0) goto out; if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n", reply->bResultCode); result = -EIO; } out: return result; } /** * Read a value array to from an address of the MPI interface * * @i1480: Device descriptor * @data: where to place the read array * @srcaddr: Where to read from * @size: Size of the data read array * @returns: 0 if ok, < 0 errno code on error. * * The command data array is organized into pairs ADDR0 ADDR1..., and * the returned data in ADDR0 VALUE0 ADDR1 VALUE1... * * We generate the command array to be a sequential read and then * rearrange the result. * * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply. * * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount * of values we can read is (512 - sizeof(*reply)) / 3 */ static int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size) { int result; struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf; struct i1480_evt_mpi_read *reply = i1480->evt_buf; unsigned cnt; memset(i1480->cmd_buf, 0x69, 512); memset(i1480->evt_buf, 0x69, 512); BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); result = -ENOMEM; cmd->rccb.bCommandType = i1480_CET_VS1; cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ); cmd->size = cpu_to_le16(3*size); for (cnt = 0; cnt < size; cnt++) { cmd->data[cnt].page = (srcaddr + cnt) >> 8; cmd->data[cnt].offset = (srcaddr + cnt) & 0xff; } reply->rceb.bEventType = i1480_CET_VS1; reply->rceb.wEvent = i1480_CMD_MPI_READ; result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size, sizeof(*reply) + 3*size); if (result < 0) goto out; if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n", reply->bResultCode); result = -EIO; } for (cnt = 0; cnt < size; cnt++) { if (reply->data[cnt].page != (srcaddr + cnt) >> 8) dev_err(i1480->dev, "MPI-READ: page inconsistency at " "index %u: expected 0x%02x, got 0x%02x\n", cnt, (srcaddr + cnt) >> 8, reply->data[cnt].page); if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) dev_err(i1480->dev, "MPI-READ: offset inconsistency at " "index %u: expected 0x%02x, got 0x%02x\n", cnt, (srcaddr + cnt) & 0x00ff, reply->data[cnt].offset); data[cnt] = reply->data[cnt].value; } result = 0; out: return result; } /** * Upload a PHY firmware, wait for it to start * * @i1480: Device instance * @fw_name: Name of the file that contains the firmware * * We assume the MAC fw is up and running. This means we can use the * MPI interface to write the PHY firmware. Once done, we issue an * MBOA Reset, which will force the MAC to reset and reinitialize the * PHY. If that works, we are ready to go. * * Max packet size for the MPI write is 512, so the max buffer is 480 * (which gives us 160 byte triads of MSB, LSB and VAL for the data). */ int i1480_phy_fw_upload(struct i1480 *i1480) { int result; const struct firmware *fw; const char *data_itr, *data_top; const size_t MAX_BLK_SIZE = 480; /* 160 triads */ size_t data_size; u8 phy_stat; result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev); if (result < 0) goto out; /* Loop writing data in chunks as big as possible until done. */ for (data_itr = fw->data, data_top = data_itr + fw->size; data_itr < data_top; data_itr += MAX_BLK_SIZE) { data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr)); result = i1480_mpi_write(i1480, data_itr, data_size); if (result < 0) goto error_mpi_write; } /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */ result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1); if (result < 0) { dev_err(i1480->dev, "PHY: can't get status: %d\n", result); goto error_mpi_status; } if (phy_stat != 0) { result = -ENODEV; dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat); goto error_phy_status; } dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name); error_phy_status: error_mpi_status: error_mpi_write: release_firmware(fw); if (result < 0) dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), " "power cycle device\n", i1480->phy_fw_name, result); out: return result; }
gpl-2.0
sudosurootdev/caf_kernels
drivers/uwb/i1480/dfu/phy.c
14608
6445
/* * Intel Wireless UWB Link 1480 * PHY parameters upload * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Code for uploading the PHY parameters to the PHY through the UWB * Radio Control interface. * * We just send the data through the MPI interface using HWA-like * commands and then reset the PHY to make sure it is ok. */ #include <linux/delay.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb/wusb.h> #include "i1480-dfu.h" /** * Write a value array to an address of the MPI interface * * @i1480: Device descriptor * @data: Data array to write * @size: Size of the data array * @returns: 0 if ok, < 0 errno code on error. * * The data array is organized into pairs: * * ADDRESS VALUE * * ADDRESS is BE 16 bit unsigned, VALUE 8 bit unsigned. Size thus has * to be a multiple of three. */ static int i1480_mpi_write(struct i1480 *i1480, const void *data, size_t size) { int result; struct i1480_cmd_mpi_write *cmd = i1480->cmd_buf; struct i1480_evt_confirm *reply = i1480->evt_buf; BUG_ON(size > 480); result = -ENOMEM; cmd->rccb.bCommandType = i1480_CET_VS1; cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_WRITE); cmd->size = cpu_to_le16(size); memcpy(cmd->data, data, size); reply->rceb.bEventType = i1480_CET_VS1; reply->rceb.wEvent = i1480_CMD_MPI_WRITE; result = i1480_cmd(i1480, "MPI-WRITE", sizeof(*cmd) + size, sizeof(*reply)); if (result < 0) goto out; if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(i1480->dev, "MPI-WRITE: command execution failed: %d\n", reply->bResultCode); result = -EIO; } out: return result; } /** * Read a value array to from an address of the MPI interface * * @i1480: Device descriptor * @data: where to place the read array * @srcaddr: Where to read from * @size: Size of the data read array * @returns: 0 if ok, < 0 errno code on error. * * The command data array is organized into pairs ADDR0 ADDR1..., and * the returned data in ADDR0 VALUE0 ADDR1 VALUE1... * * We generate the command array to be a sequential read and then * rearrange the result. * * We use the i1480->cmd_buf for the command, i1480->evt_buf for the reply. * * As the reply has to fit in 512 bytes (i1480->evt_buffer), the max amount * of values we can read is (512 - sizeof(*reply)) / 3 */ static int i1480_mpi_read(struct i1480 *i1480, u8 *data, u16 srcaddr, size_t size) { int result; struct i1480_cmd_mpi_read *cmd = i1480->cmd_buf; struct i1480_evt_mpi_read *reply = i1480->evt_buf; unsigned cnt; memset(i1480->cmd_buf, 0x69, 512); memset(i1480->evt_buf, 0x69, 512); BUG_ON(size > (i1480->buf_size - sizeof(*reply)) / 3); result = -ENOMEM; cmd->rccb.bCommandType = i1480_CET_VS1; cmd->rccb.wCommand = cpu_to_le16(i1480_CMD_MPI_READ); cmd->size = cpu_to_le16(3*size); for (cnt = 0; cnt < size; cnt++) { cmd->data[cnt].page = (srcaddr + cnt) >> 8; cmd->data[cnt].offset = (srcaddr + cnt) & 0xff; } reply->rceb.bEventType = i1480_CET_VS1; reply->rceb.wEvent = i1480_CMD_MPI_READ; result = i1480_cmd(i1480, "MPI-READ", sizeof(*cmd) + 2*size, sizeof(*reply) + 3*size); if (result < 0) goto out; if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(i1480->dev, "MPI-READ: command execution failed: %d\n", reply->bResultCode); result = -EIO; } for (cnt = 0; cnt < size; cnt++) { if (reply->data[cnt].page != (srcaddr + cnt) >> 8) dev_err(i1480->dev, "MPI-READ: page inconsistency at " "index %u: expected 0x%02x, got 0x%02x\n", cnt, (srcaddr + cnt) >> 8, reply->data[cnt].page); if (reply->data[cnt].offset != ((srcaddr + cnt) & 0x00ff)) dev_err(i1480->dev, "MPI-READ: offset inconsistency at " "index %u: expected 0x%02x, got 0x%02x\n", cnt, (srcaddr + cnt) & 0x00ff, reply->data[cnt].offset); data[cnt] = reply->data[cnt].value; } result = 0; out: return result; } /** * Upload a PHY firmware, wait for it to start * * @i1480: Device instance * @fw_name: Name of the file that contains the firmware * * We assume the MAC fw is up and running. This means we can use the * MPI interface to write the PHY firmware. Once done, we issue an * MBOA Reset, which will force the MAC to reset and reinitialize the * PHY. If that works, we are ready to go. * * Max packet size for the MPI write is 512, so the max buffer is 480 * (which gives us 160 byte triads of MSB, LSB and VAL for the data). */ int i1480_phy_fw_upload(struct i1480 *i1480) { int result; const struct firmware *fw; const char *data_itr, *data_top; const size_t MAX_BLK_SIZE = 480; /* 160 triads */ size_t data_size; u8 phy_stat; result = request_firmware(&fw, i1480->phy_fw_name, i1480->dev); if (result < 0) goto out; /* Loop writing data in chunks as big as possible until done. */ for (data_itr = fw->data, data_top = data_itr + fw->size; data_itr < data_top; data_itr += MAX_BLK_SIZE) { data_size = min(MAX_BLK_SIZE, (size_t) (data_top - data_itr)); result = i1480_mpi_write(i1480, data_itr, data_size); if (result < 0) goto error_mpi_write; } /* Read MPI page 0, offset 6; if 0, PHY was initialized correctly. */ result = i1480_mpi_read(i1480, &phy_stat, 0x0006, 1); if (result < 0) { dev_err(i1480->dev, "PHY: can't get status: %d\n", result); goto error_mpi_status; } if (phy_stat != 0) { result = -ENODEV; dev_info(i1480->dev, "error, PHY not ready: %u\n", phy_stat); goto error_phy_status; } dev_info(i1480->dev, "PHY fw '%s': uploaded\n", i1480->phy_fw_name); error_phy_status: error_mpi_status: error_mpi_write: release_firmware(fw); if (result < 0) dev_err(i1480->dev, "PHY fw '%s': failed to upload (%d), " "power cycle device\n", i1480->phy_fw_name, result); out: return result; }
gpl-2.0
acheron1502/android_kernel_BLU_BLU_PURE_XL
drivers/misc/mediatek/aee/ipanic/ipanic_rom.c
17
18399
#include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <asm/memory.h> #include <asm/cacheflush.h> #include <linux/kdebug.h> #include <linux/module.h> #include <linux/mrdump.h> #include <linux/mtk_ram_console.h> #include <linux/mrdump.h> #include <mach/wd_api.h> #include "ipanic.h" static u32 ipanic_iv = 0xaabbccdd; static spinlock_t ipanic_lock; struct ipanic_ops *ipanic_ops; typedef int (*fn_next) (void *data, unsigned char *buffer, size_t sz_buf); static bool ipanic_enable = 1; extern void mrdump_mini_per_cpu_regs(int cpu, struct pt_regs *regs); extern void mrdump_mini_ke_cpu_regs(struct pt_regs *regs); extern void mrdump_mini_add_misc(unsigned long addr, unsigned long size, unsigned long start, char *name); extern void mrdump_mini_ipanic_done(void); extern int mrdump_task_info(unsigned char *buffer, size_t sz_buf); extern void aee_rr_rec_exp_type(unsigned int type); extern unsigned int aee_rr_curr_exp_type(void); int __weak ipanic_atflog_buffer(void *data, unsigned char *buffer, size_t sz_buf) { return 0; } #if 1 void ipanic_block_scramble(u8 *buf, int buflen) { int i; u32 *p = (u32 *) buf; for (i = 0; i < buflen; i += 4, p++) { *p = *p ^ ipanic_iv; } } #else void ipanic_block_scramble(u8 *buf, int buflen) { } #endif static void ipanic_kick_wdt(void) { int res = 0; struct wd_api *wd_api = NULL; res = get_wd_api(&wd_api); if (res == 0) wd_api->wd_restart(WD_TYPE_NOLOCK); } void register_ipanic_ops(struct ipanic_ops *ops) { #ifndef IPANIC_USERSPACE_READ ipanic_ops = ops; #endif } struct aee_oops *ipanic_oops_copy(void) { if (ipanic_ops) { return ipanic_ops->oops_copy(); } else { return NULL; } } EXPORT_SYMBOL(ipanic_oops_copy); void ipanic_oops_free(struct aee_oops *oops, int erase) { if (ipanic_ops) { ipanic_ops->oops_free(oops, erase); } } EXPORT_SYMBOL(ipanic_oops_free); static int ipanic_alog_buffer(void *data, unsigned char *buffer, size_t sz_buf); static int ipanic_current_task_info(void *data, unsigned char *buffer, size_t sz_buf) { return mrdump_task_info(buffer, sz_buf); } #ifdef CONFIG_MTK_MMPROFILE_SUPPORT extern unsigned int MMProfileGetDumpSize(void); extern void MMProfileGetDumpBuffer(unsigned int Start, unsigned int *pAddr, unsigned int *pSize); static int ipanic_mmprofile(void *data, unsigned char *buffer, size_t sz_buf) { int errno = 0; static unsigned int index; static unsigned int mmprofile_dump_size; unsigned long pbuf = 0; unsigned int bufsize = 0; if (mmprofile_dump_size == 0) { mmprofile_dump_size = MMProfileGetDumpSize(); if (mmprofile_dump_size == 0 || mmprofile_dump_size > IPANIC_MMPROFILE_LIMIT) { LOGE("%s: INVALID MMProfile size[%x]", __func__, mmprofile_dump_size); return -3; } } MMProfileGetDumpBuffer(index, (unsigned int*)&pbuf, &bufsize); if (bufsize == 0) { errno = 0; } else if (bufsize > sz_buf) { errno = -4; } else { memcpy(buffer, (void *)pbuf, bufsize); index += bufsize; errno = bufsize; } return errno; } #endif const ipanic_dt_op_t ipanic_dt_ops[] = { {"IPANIC_HEADER", 0, NULL}, {"SYS_KERNEL_LOG", __LOG_BUF_LEN, ipanic_klog_buffer}, {"SYS_WDT_LOG", WDT_LOG_LEN, ipanic_klog_buffer}, #ifdef CONFIG_MTK_WQ_DEBUG {"SYS_WQ_LOG", WQ_LOG_LEN, ipanic_klog_buffer}, #else {"SYS_WQ_LOG", 0, NULL}, #endif {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"PROC_CUR_TSK", sizeof(struct aee_process_info), ipanic_current_task_info}, {"_exp_detail.txt", OOPS_LOG_LEN, ipanic_klog_buffer}, {"SYS_MINI_RDUMP", MRDUMP_MINI_BUF_SIZE, NULL}, /* 8 */ #ifdef CONFIG_MTK_MMPROFILE_SUPPORT {"SYS_MMPROFILE", IPANIC_MMPROFILE_LIMIT, ipanic_mmprofile}, #else {"SYS_MMPROFILE", 0, NULL}, #endif {"SYS_MAIN_LOG_RAW", __MAIN_BUF_SIZE, ipanic_alog_buffer}, {"SYS_SYSTEM_LOG_RAW", __SYSTEM_BUF_SIZE, ipanic_alog_buffer}, {"SYS_EVENTS_LOG_RAW", __EVENTS_BUF_SIZE, ipanic_alog_buffer}, {"SYS_RADIO_LOG_RAW", __RADIO_BUF_SIZE, ipanic_alog_buffer}, {"SYS_LAST_LOG", LAST_LOG_LEN, ipanic_klog_buffer}, {"SYS_ATF_LOG", ATF_LOG_SIZE, ipanic_atflog_buffer}, {"reserved", 0, NULL}, /* 16 */ {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, /* 24 */ {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, {"reserved", 0, NULL}, }; static const char IPANIC_DT_STR[][16] = { "Undefined", "kernel log", "main log", "system log", "radio log" }; static const char IPANIC_ERR_MSG[][16] = { "unaligned", "blk alignment" }; static struct ipanic_header ipanic_hdr, *iheader; /* data: indicate dump scope; buffer: dump to; sz_buf: buffer size; return: real size dumped */ static int ipanic_memory_buffer(void *data, unsigned char *buffer, size_t sz_buf) { unsigned long sz_real; struct ipanic_memory_block *mem = (struct ipanic_memory_block *)data; unsigned long start = mem->kstart; unsigned long end = mem->kend; unsigned long pos = mem->pos; if (pos > end || pos < start) { return -1; } sz_real = (end - pos) > sz_buf ? sz_buf : (end - pos); memcpy(buffer, (void *)pos, sz_real); mem->pos += sz_real; return sz_real; } static int ipanic_alog_buffer(void *data, unsigned char *buffer, size_t sz_buf) { int rc; rc = panic_dump_android_log(buffer, sz_buf, (unsigned long)data); if (rc < 0) rc = -1; return rc; } inline int ipanic_func_write(fn_next next, void *data, int off, int total, int encrypt) { int errno = 0; size_t size; int start = off; struct ipanic_header *iheader = ipanic_header(); unsigned char *ipanic_buffer = (unsigned char *)(unsigned long)iheader->buf; size_t sz_ipanic_buffer = iheader->bufsize; size_t blksize = iheader->blksize; int many = total > iheader->bufsize; LOGV("off[%x], encrypt[%d]\n", off, encrypt); if (off & (blksize - 1)) return -2; /*invalid offset, not block aligned */ do { errno = next(data, ipanic_buffer, sz_ipanic_buffer); if (IS_ERR(ERR_PTR(errno))) break; size = (size_t) errno; if (size == 0) return (off - start); if ((off - start + size) > total) { LOGE("%s: data oversize(%zx>%x@%x)\n", __func__, off - start + size, total, start); errno = -EFBIG; break; } if (encrypt) ipanic_block_scramble(ipanic_buffer, size); if (size != sz_ipanic_buffer) { memset(ipanic_buffer + size, 0, sz_ipanic_buffer - size); } LOGV("%x@%x\n", size, off); if (ipanic_enable) errno = ipanic_write_size(ipanic_buffer, off, ALIGN(size, blksize)); else errno = -10; if (IS_ERR(ERR_PTR(errno))) break; off += size; if (many == 0) return size; } while (many); return errno; } inline int ipanic_next_write(fn_next next, void *data, int off, int total, int encrypt) { return ipanic_func_write(next, data, off, total, encrypt); } inline int ipanic_mem_write(void *buf, int off, int len, int encrypt) { struct ipanic_memory_block mem_info = { .kstart = (unsigned long)buf, .kend = (unsigned long)buf + len, .pos = (unsigned long)buf, }; return ipanic_next_write(ipanic_memory_buffer, &mem_info, off, len, encrypt); } static int ipanic_header_to_sd(struct ipanic_data_header *header) { int errno = 0; int first_write = 0; struct ipanic_header *ipanic_hdr = ipanic_header(); if (!ipanic_hdr->datas) first_write = 1; if (header) { ipanic_hdr->datas |= (0x1 < header->type); header->valid = 1; } if (ipanic_hdr->dhblk == 0 || header == NULL || first_write == 1) errno = ipanic_mem_write(ipanic_hdr, 0, sizeof(struct ipanic_header), 0); if (ipanic_hdr->dhblk && header) errno = ipanic_mem_write(header, header->offset - ipanic_hdr->dhblk, sizeof(struct ipanic_data_header), 0); if (IS_ERR(ERR_PTR(errno))) { LOGW("%s: failed[%x-%d]\n", __func__, header ? header->type : 0, errno); } return errno; } static int ipanic_data_is_valid(int dt) { struct ipanic_header *ipanic_hdr = ipanic_header(); struct ipanic_data_header *dheader = &ipanic_hdr->data_hdr[dt]; return (dheader->valid == 1); } int ipanic_data_to_sd(int dt, void *data) { int errno = 0; int (*next) (void *data, unsigned char *buffer, size_t sz_buf); struct ipanic_header *ipanic_hdr = ipanic_header(); struct ipanic_data_header *dheader = &ipanic_hdr->data_hdr[dt]; if (!ipanic_dt_active(dt) || dheader->valid == 1) return -4; next = ipanic_dt_ops[dt].next; if (next == NULL) { errno = -3; } else { errno = ipanic_next_write(next, data, dheader->offset, dheader->total, dheader->encrypt); } if (IS_ERR(ERR_PTR(errno))) { LOGW("%s: dump %s failed[%d]\n", __func__, dheader->name, errno); if (errno == -EFBIG) dheader->used = dheader->total; else return errno; } else { dheader->used = (size_t) errno; } ipanic_header_to_sd(dheader); return errno; } void ipanic_mrdump_mini(AEE_REBOOT_MODE reboot_mode, const char *msg, ...) { int ret; struct ipanic_header *ipanic_hdr; loff_t sd_offset; struct ipanic_data_header *dheader; va_list ap; /* write sd is unreliable, so gen mrdump header first */ if (ipanic_data_is_valid(IPANIC_DT_MINI_RDUMP)) return; va_start(ap, msg); ipanic_hdr = ipanic_header(); sd_offset = ipanic_hdr->data_hdr[IPANIC_DT_MINI_RDUMP].offset; dheader = &ipanic_hdr->data_hdr[IPANIC_DT_MINI_RDUMP]; ret = mrdump_mini_create_oops_dump(reboot_mode, ipanic_mem_write, sd_offset, msg, ap); va_end(ap); if (!IS_ERR(ERR_PTR(ret))) { dheader->used = ret; ipanic_header_to_sd(dheader); } } void *ipanic_data_from_sd(struct ipanic_data_header *dheader, int encrypt) { void *data; data = ipanic_read_size(dheader->offset, dheader->used); if (data != 0 && encrypt != 0) ipanic_block_scramble((unsigned char *)data, dheader->used); return data; } struct ipanic_header *ipanic_header_from_sd(unsigned int offset, unsigned int magic) { struct ipanic_data_header *dheader; int dt; char str[256]; size_t size = 0; struct ipanic_header *header; struct ipanic_data_header dheader_header = { .type = IPANIC_DT_HEADER, .offset = offset, .used = sizeof(struct ipanic_header), }; header = (struct ipanic_header *)ipanic_data_from_sd(&dheader_header, 0); if (IS_ERR_OR_NULL((void *)header)) { LOGD("read header failed[%ld]\n", PTR_ERR((void *)header)); header = NULL; } else if (header->magic != magic) { LOGD("no ipanic data[%x]\n", header->magic); kfree(header); header = NULL; ipanic_erase(); } else { for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) { dheader = &header->data_hdr[dt]; if (dheader->valid) { size += snprintf(str + size, 256 - size, "%s[%x@%x],", dheader->name, dheader->used, dheader->offset); } } LOGD("ipanic data available^v^%s^v^\n", str); } return header; } struct aee_oops *ipanic_oops_from_sd(void) { struct aee_oops *oops = NULL; struct ipanic_header *hdr = NULL; struct ipanic_data_header *dheader; char *data; int i; hdr = ipanic_header_from_sd(0, AEE_IPANIC_MAGIC); if (hdr == NULL) { return NULL; } oops = aee_oops_create(AE_DEFECT_FATAL, AE_KE, IPANIC_MODULE_TAG); if (oops == NULL) { LOGE("%s: can not allocate buffer\n", __func__); return NULL; } for (i = IPANIC_DT_HEADER + 1; i < IPANIC_DT_RESERVED31; i++) { dheader = &hdr->data_hdr[i]; if (dheader->valid == 0) { continue; } data = ipanic_data_from_sd(dheader, 1); if (data) { switch (i) { case IPANIC_DT_KERNEL_LOG: oops->console = data; oops->console_len = dheader->used; break; case IPANIC_DT_MINI_RDUMP: oops->mini_rdump = data; oops->mini_rdump_len = dheader->used; break; case IPANIC_DT_MAIN_LOG: oops->android_main = data; oops->android_main_len = dheader->used; break; case IPANIC_DT_SYSTEM_LOG: oops->android_system = data; oops->android_system_len = dheader->used; break; case IPANIC_DT_EVENTS_LOG: /* Todo .. */ break; case IPANIC_DT_RADIO_LOG: oops->android_radio = data; oops->android_radio_len = dheader->used; break; case IPANIC_DT_CURRENT_TSK: memcpy(oops->process_path, data, sizeof(struct aee_process_info)); break; case IPANIC_DT_MMPROFILE: oops->mmprofile = data; oops->mmprofile_len = dheader->used; break; default: LOGI("%s: [%d] NOT USED.\n", __func__, i); } } else { LOGW("%s: read %s failed, %x@%x\n", __func__, dheader->name, dheader->used, dheader->offset); } } return oops; } int ipanic(struct notifier_block *this, unsigned long event, void *ptr) { struct ipanic_data_header *dheader; struct kmsg_dumper dumper; ipanic_atf_log_rec_t atf_log = {ATF_LOG_SIZE, 0, 0}; int dt; int errno; struct ipanic_header *ipanic_hdr; aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_START); aee_rr_rec_exp_type(2); bust_spinlocks(1); spin_lock_irq(&ipanic_lock); aee_disable_api(); mrdump_mini_ke_cpu_regs(NULL); ipanic_mrdump_mini(AEE_REBOOT_MODE_KERNEL_PANIC, "kernel PANIC"); if (!ipanic_data_is_valid(IPANIC_DT_KERNEL_LOG)) { ipanic_klog_region(&dumper); errno = ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper); if (errno == -1) aee_nested_printf("$"); } ipanic_klog_region(&dumper); errno = ipanic_data_to_sd(IPANIC_DT_OOPS_LOG, &dumper); if (errno == -1) aee_nested_printf("$"); ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0); /* kick wdt after save the most critical infos */ ipanic_kick_wdt(); ipanic_data_to_sd(IPANIC_DT_MAIN_LOG, (void *)1); ipanic_data_to_sd(IPANIC_DT_SYSTEM_LOG, (void *)4); ipanic_data_to_sd(IPANIC_DT_EVENTS_LOG, (void *)2); ipanic_data_to_sd(IPANIC_DT_RADIO_LOG, (void *)3); aee_wdt_dump_info(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_WDT_LOG, &dumper); #ifdef CONFIG_MTK_WQ_DEBUG mt_dump_wq_debugger(); #endif ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_WQ_LOG, &dumper); ipanic_data_to_sd(IPANIC_DT_MMPROFILE, 0); ipanic_data_to_sd(IPANIC_DT_ATF_LOG, &atf_log); errno = ipanic_header_to_sd(0); if (!IS_ERR(ERR_PTR(errno))) mrdump_mini_ipanic_done(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_LAST_LOG, &dumper); LOGD("ipanic done^_^"); ipanic_hdr = ipanic_header(); for (dt = IPANIC_DT_HEADER + 1; dt < IPANIC_DT_RESERVED31; dt++) { dheader = &ipanic_hdr->data_hdr[dt]; if (dheader->valid) { LOGD("%s[%x@%x],", dheader->name, dheader->used, dheader->offset); } } LOGD("^_^\n"); aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_DONE); return NOTIFY_DONE; } void ipanic_recursive_ke(struct pt_regs *regs, struct pt_regs *excp_regs, int cpu) { int errno; struct kmsg_dumper dumper; aee_nested_printf("minidump\n"); aee_rr_rec_exp_type(3); bust_spinlocks(1); flush_cache_all(); #ifdef __aarch64__ cpu_cache_off(); #else cpu_proc_fin(); #endif mrdump_mini_ke_cpu_regs(excp_regs); mrdump_mini_per_cpu_regs(cpu, regs); flush_cache_all(); ipanic_mrdump_mini(AEE_REBOOT_MODE_NESTED_EXCEPTION, "Nested Panic"); ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, 0); ipanic_kick_wdt(); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper); errno = ipanic_header_to_sd(0); if (!IS_ERR(ERR_PTR(errno))) mrdump_mini_ipanic_done(); if (ipanic_dt_active(IPANIC_DT_RAM_DUMP)) { aee_nested_printf("RAMDUMP.\n"); __mrdump_create_oops_dump(AEE_REBOOT_MODE_NESTED_EXCEPTION, excp_regs, "Nested Panic"); } bust_spinlocks(0); } EXPORT_SYMBOL(ipanic_recursive_ke); struct ipanic_header *ipanic_header(void) { int i; struct ipanic_data_header *dheader; int next_offset; if (iheader) return iheader; iheader = &ipanic_hdr; iheader->magic = AEE_IPANIC_MAGIC; iheader->version = AEE_IPANIC_PHDR_VERSION; if (ipanic_msdc_info(iheader)) { LOGE("ipanic initialize msdc fail."); aee_nested_printf("$"); return NULL; } iheader->size = sizeof(struct ipanic_header); iheader->datas = 0; #if 1 iheader->dhblk = ALIGN(sizeof(struct ipanic_data_header), iheader->blksize); #else iheader->dhblk = 0; #endif next_offset = ALIGN(sizeof(struct ipanic_header), iheader->blksize); for (i = IPANIC_DT_HEADER + 1; i < IPANIC_DT_RESERVED31; i++) { dheader = &iheader->data_hdr[i]; dheader->type = i; dheader->valid = 0; dheader->used = 0; strncpy(dheader->name, ipanic_dt_ops[i].string, 32); if (ipanic_dt_active(i) && ipanic_dt_ops[i].size) { dheader->encrypt = ipanic_dt_encrypt(i); dheader->offset = next_offset + iheader->dhblk; dheader->total = ALIGN(ipanic_dt_ops[i].size, iheader->blksize); if (iheader->partsize < (dheader->offset + dheader->total)) { LOGW("skip %s[%x@%x>%x]\n", dheader->name, dheader->total, dheader->offset, iheader->partsize); dheader->offset = INT_MAX; dheader->total = 0; continue; } next_offset += dheader->total + iheader->dhblk; } else { dheader->offset = INT_MAX; dheader->total = 0; } } return iheader; } EXPORT_SYMBOL(ipanic_header); static void ipanic_oops_done(struct aee_oops *oops, int erase) { if (oops) aee_oops_free(oops); if (erase) ipanic_erase(); } static int ipanic_die(struct notifier_block *self, unsigned long cmd, void *ptr) { struct kmsg_dumper dumper; struct die_args *dargs = (struct die_args *)ptr; aee_disable_api(); aee_rr_rec_fiq_step(AEE_FIQ_STEP_KE_IPANIC_DIE); aee_rr_rec_exp_type(2); mrdump_mini_ke_cpu_regs(dargs->regs); flush_cache_all(); if (aee_rr_curr_exp_type() == 2) /* No return if mrdump is enable */ __mrdump_create_oops_dump(AEE_REBOOT_MODE_KERNEL_OOPS, dargs->regs, "Kernel Oops"); smp_send_stop(); ipanic_mrdump_mini(AEE_REBOOT_MODE_KERNEL_PANIC, "kernel Oops"); ipanic_klog_region(&dumper); ipanic_data_to_sd(IPANIC_DT_KERNEL_LOG, &dumper); ipanic_data_to_sd(IPANIC_DT_CURRENT_TSK, dargs->regs); return NOTIFY_DONE; } static struct notifier_block panic_blk = { .notifier_call = ipanic, }; static struct ipanic_ops ipanic_oops_ops = { .oops_copy = ipanic_oops_from_sd, .oops_free = ipanic_oops_done, }; static struct notifier_block die_blk = { .notifier_call = ipanic_die, }; int __init aee_ipanic_init(void) { spin_lock_init(&ipanic_lock); atomic_notifier_chain_register(&panic_notifier_list, &panic_blk); register_die_notifier(&die_blk); register_ipanic_ops(&ipanic_oops_ops); ipanic_log_temp_init(); ipanic_msdc_init(); LOGI("ipanic: startup, partition assgined %s\n", AEE_IPANIC_PLABEL); return 0; } module_init(aee_ipanic_init); module_param(ipanic_enable, bool, S_IRUGO | S_IWUSR);
gpl-2.0
boa19861105/android_444_KitKat_kernel_htc_B2_UHL
fs/fat/move_cluster.c
17
31681
#include <linux/fs.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/module.h> #include <linux/version.h> #include <linux/io.h> #include <linux/kobject.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/netlink.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/dcache.h> #include <linux/mount.h> #include <linux/file.h> #include <linux/fs_struct.h> #include <linux/msdos_fs.h> #include <linux/dirent.h> #include <linux/stat.h> #include <linux/falloc.h> #include <linux/buffer_head.h> #include <linux/namei.h> #include <linux/fadvise.h> #include <net/sock.h> #include <asm/uaccess.h> #include <asm/mach/arch.h> #include <asm/uaccess.h> #include <asm/unistd.h> #include "fat.h" #define FALLOC_FLAG 0xAA #define FAT_READA_SIZE (128 * 1024) #define PKG_COMMAND_FALLOCATE_FILE 1 #define PKG_COMMAND_FALLOCATE_DIR 2 #define PKG_COMMAND_IFCONTIGUOUS 3 #define PKG_COMMAND_GET_SUPERBLOCK 4 #define PKG_COMMAND_GET_BOOT_INFO 5 #define PKG_COMMAND_GET_FILE_INFO 6 #define PKG_COMMAND_GET_FREE_CLUSTER_INFO 7 #define PKG_COMMAND_GET_FILE_REF_COUNT 8 #define PKG_COMMAND_SET_NEW_HEAD 9 #define PKG_COMMAND_GET_CLUSTER_DATA 10 #define PKG_COMMAND_FADVISE 11 #define PKG_COMMAND_FALLOCATE_FILE_OLD 12 #define PKG_COMMAND_GET_CLUSTERS 13 #define PKG_COMMAND_MOVE_CLUSTERS 14 struct bootInfo { u_int BytesPerSec; u_int SecPerClust; u_int ResSectors; u_int FATs; u_int RootDirEnts; u_int Media; u_int FATsmall; u_int SecPerTrack; u_int Heads; u_int32_t Sectors; u_int32_t HiddenSecs; u_int32_t HugeSectors; u_int FSInfo; u_int Backup; u_int32_t RootCl; u_int32_t FSFree; u_int32_t FSNext; u_int flags; int ValidFat; u_int32_t ClustMask; u_int32_t NumClusters; u_int32_t NumSectors; u_int32_t FATsecs; u_int32_t NumFatEntries; u_int ClusterOffset; u_int ClusterSize; u_int NumFiles; u_int NumFree; u_int NumBad; unsigned long BlockSize; }; struct fileInfo { u_int32_t lcn; u_int32_t realClusters; u_int32_t uncompressedClusters; }; struct freeClusterInfo { u_int32_t MinimumLcn; u_int32_t MinimumSize; u_int32_t BeginLcn; u_int32_t EndLcn; }; struct fallocInfo { int mode; loff_t offset; loff_t len; u_int32_t newLcn; }; struct refInfo { int f_count; }; struct setNewHeadInfo { u_int32_t newLcn; }; struct getClusterDataInfo { u_int32_t lcnHead; u_int32_t lcnNums; unsigned char *data; }; struct getClustersInfo { u_int32_t *lcnList; u_int32_t lcnNum; }; struct moveCluster { u_int32_t lcn; u_int32_t lcnPrev; u_int32_t lcnNext; }; struct moveClustersInfo { __u32 donor_fd; __u32 orig_fd; __u64 orig_start; __u64 donor_start; __u64 len; __u64 moved_len; struct moveCluster lcnOrig; struct moveCluster lcnDonor; }; struct dataPkg { char checkFlag; char fileName[500]; char cmd; ino_t inodeNumber; dev_t mainDevNumber; dev_t minorDevNumber; void *data; }; struct fatent_operations { void (*ent_blocknr)(struct super_block *, int, int *, sector_t *); void (*ent_set_ptr)(struct fat_entry *, int); int (*ent_bread)(struct super_block *, struct fat_entry *, int, sector_t); int (*ent_get)(struct fat_entry *); void (*ent_put)(struct fat_entry *, int); int (*ent_next)(struct fat_entry *); }; static inline int fat_ent_update_ptr_ext(struct super_block *sb, struct fat_entry *fatent, int offset, sector_t blocknr) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct buffer_head **bhs = fatent->bhs; if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr) return 0; if (sbi->fat_bits == 12) { if ((offset + 1) < sb->s_blocksize) { if (fatent->nr_bhs == 2) { brelse(bhs[1]); fatent->nr_bhs = 1; } } else { if (fatent->nr_bhs != 2) return 0; if (bhs[1]->b_blocknr != (blocknr + 1)) return 0; } } ops->ent_set_ptr(fatent, offset); return 1; } static inline void lock_fat2(struct msdos_sb_info *sbi) { mutex_lock(&sbi->fat_lock); } static inline void unlock_fat2(struct msdos_sb_info *sbi) { mutex_unlock(&sbi->fat_lock); } static inline int fat_ent_read_block2(struct super_block *sb, struct fat_entry *fatent) { struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops; sector_t blocknr; int offset; fatent_brelse(fatent); ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr); return ops->ent_bread(sb, fatent, offset, blocknr); } static inline int fat_ent_next2(struct msdos_sb_info *sbi, struct fat_entry *fatent) { if (sbi->fatent_ops->ent_next(fatent)) { if (fatent->entry < sbi->max_cluster) return 1; } return 0; } unsigned long fat_find_free_clusters(struct super_block *sb, unsigned long clusterSum, int *contiguous) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; unsigned long reada_blocks, reada_mask, cur_block; int err = 0, free; unsigned long clusters = 0; unsigned long start = 0; *contiguous = 0; lock_fat2(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid && sbi->free_clusters < clusterSum) { printk(KERN_INFO "fat_find_free_clusters failed, free_clusters:%d, free_clus_valid:%d\n", sbi->free_clusters, sbi->free_clus_valid); start = 0; goto out2; } reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; reada_mask = reada_blocks - 1; cur_block = 0; free = 0; fatent_init(&fatent); fatent_set_entry(&fatent, FAT_START_ENT); while (fatent.entry < sbi->max_cluster) { fatent_set_entry(&fatent, fatent.entry); err = fat_ent_read_block2(sb, &fatent); if (err) { printk(KERN_INFO "fat_find_first_free_clusters fat_ent_read_block2 failed\n"); start = 0; goto out1; } do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) { free++; if (start == 0) { start = fatent.entry; } clusters++; if (clusters >= clusterSum) { start--; if (start < FAT_START_ENT) { start = sbi->max_cluster - 1; } sbi->prev_free = start; sb->s_dirt = 1; *contiguous = 1; goto out1; } } else { start = 0; clusters = 0; } if (fatent.entry >= sbi->max_cluster) { start = 0; break; } } while (fat_ent_next2(sbi, &fatent)); } out1: fatent_brelse(&fatent); out2: unlock_fat2(sbi); return start; } int fat_set_prev_free_cluster(struct super_block *sb, u_int32_t newLcn) { struct msdos_sb_info *sbi = MSDOS_SB(sb); int ret = 0; lock_fat2(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid && sbi->free_clusters < 1) { printk(KERN_INFO "fat_set_prev_free_cluster failed, free_clusters:%d, free_clus_valid:%d\n", sbi->free_clusters, sbi->free_clus_valid); goto out; } newLcn--; if (newLcn < FAT_START_ENT) { newLcn = sbi->max_cluster - 1; } sbi->prev_free = newLcn; sb->s_dirt = 1; ret = 1; out: unlock_fat2(sbi); return ret; } int fat_find_first_free_clusters(struct super_block *sb, u_int32_t MinimumLcn, u_int32_t MinimumSize, u_int32_t *BeginLcn, u_int32_t *EndLcn) { struct msdos_sb_info *sbi = MSDOS_SB(sb); struct fatent_operations *ops = sbi->fatent_ops; struct fat_entry fatent; unsigned long reada_blocks, reada_mask, cur_block; int err = 0; u_int32_t start = 0; *BeginLcn = 0; *EndLcn = 0; if (MinimumLcn >= sbi->max_cluster) { printk(KERN_INFO "fat_find_first_free_clusters minimum lcn is out of side\n"); return 0; } lock_fat2(sbi); if (sbi->free_clusters != -1 && sbi->free_clus_valid && sbi->free_clusters < MinimumSize) { printk(KERN_INFO "fat_find_first_free_clusters failed, free_clusters:%d, free_clus_valid:%d\n", sbi->free_clusters, sbi->free_clus_valid); goto out2; } reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits; reada_mask = reada_blocks - 1; cur_block = 0; fatent_init(&fatent); fatent_set_entry(&fatent, MinimumLcn); while (fatent.entry < sbi->max_cluster) { fatent_set_entry(&fatent, fatent.entry); err = fat_ent_read_block2(sb, &fatent); if (err) { printk(KERN_INFO "fat_find_first_free_clusters fat_ent_read_block2 failed\n"); goto out1; } do { if (ops->ent_get(&fatent) == FAT_ENT_FREE) { if (start == 0) { start = fatent.entry; } if (fatent.entry >= sbi->max_cluster) { if ((fatent.entry - start) >= MinimumSize) { *BeginLcn = start; *EndLcn = fatent.entry; goto out1; } } } else { if (0 != start) { if ((fatent.entry - start) >= MinimumSize) { *BeginLcn = start; *EndLcn = fatent.entry; goto out1; } } start = 0; } } while (fat_ent_next2(sbi, &fatent)); if (fatent.entry >= sbi->max_cluster) { if (0 != start) { if ((fatent.entry - start) >= MinimumSize) { *BeginLcn = start; *EndLcn = fatent.entry; } } } } out1: fatent_brelse(&fatent); out2: unlock_fat2(sbi); return *BeginLcn; } int isNodeContinuous(struct inode *inode) { struct fat_entry fatent; int dclus = 0; int nr = 0; struct msdos_inode_info * msinode= NULL; struct super_block *sb = NULL; struct msdos_sb_info *sbi = NULL; struct msdos_dir_entry *uninitialized_var(de); if (NULL != inode) { msinode= MSDOS_I(inode); sb = inode->i_sb; } else { printk(KERN_INFO "inode is null\n"); return 0; } if (NULL == msinode) { printk(KERN_INFO "msinode is null\n"); return 0; } if (NULL != sb) { sbi = MSDOS_SB(sb); } if (NULL == sbi) { printk(KERN_INFO "sbi is null\n"); return 0; } if (msinode->i_start < FAT_START_ENT || msinode->i_start > sbi->max_cluster) { printk(KERN_INFO "!!!start cluster is not correct\n"); return 1; } lock_fat2(sbi); dclus = msinode->i_start; fatent_init(&fatent); while (dclus < FAT_ENT_EOF) { nr = fat_ent_read(inode, &fatent, dclus); if (FAT_ENT_EOF != nr && nr != (dclus + 1)) { fatent_brelse(&fatent); unlock_fat2(sbi); printk(KERN_INFO "file is not contiguous\n"); return 0; } dclus = nr; } fatent_brelse(&fatent); unlock_fat2(sbi); return 1; } int fat_relocation(struct super_block *sb, loff_t len) { struct msdos_sb_info *sbi = MSDOS_SB(sb); unsigned long clusterNums = 0, start = 0; int contiguous = 0; clusterNums = (len + sbi->cluster_size - 1) >> sbi->cluster_bits; start = fat_find_free_clusters(sb, clusterNums, &contiguous); return contiguous; } static int fat_cont_expand(struct inode *inode, loff_t size) { struct address_space *mapping = inode->i_mapping; loff_t start = inode->i_size, count = size - inode->i_size; int err; err = generic_cont_expand_simple(inode, size); if (err) goto out; inode->i_ctime = inode->i_mtime = CURRENT_TIME_SEC; mark_inode_dirty(inode); if (IS_SYNC(inode)) { int err2; err = filemap_fdatawrite_range(mapping, start, start + count - 1); err2 = sync_mapping_buffers(mapping); if (!err) err = err2; err2 = write_inode_now(inode, 1); if (!err) err = err2; if (!err) { err = filemap_fdatawait_range(mapping, start, start + count - 1); } } out: return err; } static long fat_fallocate(struct inode *inode, int mode, loff_t offset, loff_t len, u_int32_t newLcn) { int err = 0; int contiguous = 0; struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); unsigned long clusterNums = 0, start = 0; if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; if (0 == newLcn) { clusterNums = (len + sbi->cluster_size - 1) >> sbi->cluster_bits; start = fat_find_free_clusters(sb, clusterNums, &contiguous); if (!contiguous) { return -EINVAL; } } else { if (!fat_set_prev_free_cluster(sb, newLcn)) { printk(KERN_INFO "fat_set_prev_free_cluster failed\n"); return -EINVAL; } } if ((offset + len) <= MSDOS_I(inode)->mmu_private) { fat_msg(sb, KERN_ERR, "fat_fallocate():Blocks already allocated"); return -EINVAL; } if ((mode & FALLOC_FL_KEEP_SIZE)) { } else { mutex_lock(&inode->i_mutex); err = fat_cont_expand(inode, (offset + len)); if (err) { fat_msg(sb, KERN_ERR, "fat_fallocate():fat_cont_expand() error"); } } mutex_unlock(&inode->i_mutex); return err; } static long fat_fallocate_old(struct inode *inode, int mode, loff_t offset, loff_t len) { int err = 0; struct super_block *sb = inode->i_sb; if (mode & ~FALLOC_FL_KEEP_SIZE) return -EOPNOTSUPP; if ((offset + len) <= MSDOS_I(inode)->mmu_private) { fat_msg(sb, KERN_ERR, "fat_fallocate():Blocks already allocated"); return -EINVAL; } if ((mode & FALLOC_FL_KEEP_SIZE)) { } else { mutex_lock(&inode->i_mutex); err = fat_cont_expand(inode, (offset + len)); if (err) { fat_msg(sb, KERN_ERR, "fat_fallocate():fat_cont_expand() error"); } } mutex_unlock(&inode->i_mutex); return err; } static int fat_fallocate_dir(struct super_block *sb, loff_t len, u_int32_t newLcn) { if (0 == newLcn) { if (!fat_relocation(sb, len)) { printk(KERN_INFO "fat_relocation failed\n"); return 0; } } else { if (!fat_set_prev_free_cluster(sb, newLcn)) { printk(KERN_INFO "fat_set_prev_free_cluster failed\n"); return 0; } } return 1; } int fat_get_boot_info(struct inode *inode, struct bootInfo *boot) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); if (!boot) { printk(KERN_INFO "fat_get_boot_info failed\n"); return 0; } memset(boot, 0, sizeof(struct bootInfo)); boot->ClusterSize = sbi->cluster_size; boot->SecPerClust = sbi->sec_per_clus; boot->NumClusters = sbi->max_cluster; boot->BlockSize = sb->s_blocksize; return 1; } int fat_get_file_info(struct inode *inode, struct fileInfo *info) { struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_inode_info * msinode= MSDOS_I(inode); if (!info || !inode) { printk(KERN_INFO "fat_get_file_info failed\n"); return 0; } info->lcn = msinode->i_start; info->realClusters = (inode->i_size + sbi->cluster_size - 1) >> sbi->cluster_bits; info->uncompressedClusters = 0; return 1; } long get_files_info(char * filesystem_type) { struct fs_struct *fs ; struct vfsmount *mnt ; struct super_block *mnt_sb ; struct file_system_type *s_type; spin_lock(&current->fs->lock); fs = current->fs; mnt = fs->pwd.mnt; mnt_sb = mnt-> mnt_sb ; s_type = mnt_sb -> s_type; strcpy(filesystem_type,s_type->name); spin_unlock(&current->fs->lock); return 0; } int fat_fadvise(struct inode *inode, loff_t offset, loff_t len, int advice) { struct address_space *mapping= inode->i_mapping; struct backing_dev_info *bdi; loff_t endbyte; pgoff_t start_index; pgoff_t end_index; int ret = 0; if (S_ISFIFO(inode->i_mode)) { ret = -ESPIPE; goto out; } if (!mapping || len < 0) { ret = -EINVAL; goto out; } endbyte = offset + len; if (!len || endbyte < len) endbyte = -1; else endbyte--; bdi = mapping->backing_dev_info; switch (advice) { case POSIX_FADV_DONTNEED: start_index = (offset+(PAGE_CACHE_SIZE-1)) >> PAGE_CACHE_SHIFT; end_index = (endbyte >> PAGE_CACHE_SHIFT); if (end_index >= start_index) invalidate_mapping_pages(mapping, start_index, end_index); break; default: ret = -EINVAL; } out: return ret; } int fat_set_file_first_cluster(struct inode *inode, u_int32_t newLcn) { struct msdos_inode_info * msInode= MSDOS_I(inode); int ret = 0; msInode->i_start = newLcn; msInode->i_logstart = newLcn; if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) { ret = fat_sync_inode(inode); if (ret) return ret; } else { mark_inode_dirty(inode); } return ret; } int fat_get_cluster_data(struct inode *inode, struct getClusterDataInfo *info) { struct msdos_inode_info * msinode= MSDOS_I(inode); struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_dir_entry *uninitialized_var(de); struct fat_entry fatent; struct buffer_head * bh=NULL; int dclus = 0, ret = 0; int i = 0, count = 0; unsigned char *buf = info->data; unsigned long sectorAddress = 0; if (msinode->i_start < FAT_START_ENT || msinode->i_start > sbi->max_cluster) { printk(KERN_INFO "!!!getClusterData:start cluster is not correct\n"); return 1; } lock_fat2(sbi); dclus = msinode->i_start; fatent_init(&fatent); while (dclus < FAT_ENT_EOF) { sectorAddress = sbi->data_start + sbi->sec_per_clus * (dclus - 2); for (i = 0; i < sbi->sec_per_clus; i++) { bh = sb_bread(sb, sectorAddress); if (bh == NULL) { printk(KERN_ERR "!!!getClusterData: Directory bread failed\n"); goto out; } if (copy_to_user(buf, bh->b_data, 512) != 0) { printk(KERN_ERR "!!!getClusterData: copy to user failed\n"); goto out; } buf += 512; sectorAddress++; } count++; dclus = fat_ent_read(inode, &fatent, dclus); if (FAT_ENT_EOF == dclus && count != info->lcnNums) { printk(KERN_INFO "!!!getClusterData: cluster nums not correct\n"); goto out; } } ret = 1; out: fatent_brelse(&fatent); unlock_fat2(sbi); return ret; } int fat_get_clusters(struct inode *inode, struct getClustersInfo *info) { struct fat_entry fatent; int dclus = 0; int index = 0; struct msdos_inode_info * msinode= MSDOS_I(inode); struct super_block *sb = inode->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_dir_entry *uninitialized_var(de); if (msinode->i_start < FAT_START_ENT || msinode->i_start > sbi->max_cluster) { printk(KERN_INFO "!!!start cluster is not correct\n"); return 0; } lock_fat2(sbi); dclus = msinode->i_start; fatent_init(&fatent); while (dclus < FAT_ENT_EOF) { if (copy_to_user(info->lcnList + index++, &dclus, sizeof(u_int32_t)) != 0) { printk(KERN_ERR "!!!getClusters: copy to user failed\n"); goto out; } dclus = fat_ent_read(inode, &fatent, dclus); } out: fatent_brelse(&fatent); unlock_fat2(sbi); return (index == info->lcnNum ? 1 : 0); } void double_down_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { struct inode *first = orig_inode, *second = donor_inode; if (donor_inode->i_ino < orig_inode->i_ino) { first = donor_inode; second = orig_inode; } down_write(&MSDOS_I(first)->truncate_lock); down_write_nested(&MSDOS_I(second)->truncate_lock, SINGLE_DEPTH_NESTING); } void double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { up_write(&MSDOS_I(orig_inode)->truncate_lock); up_write(&MSDOS_I(donor_inode)->truncate_lock); } static int mext_check_null_inode(struct inode *inode1, struct inode *inode2, const char *function, unsigned int line) { int ret = 0; if (inode1 == NULL) { ret = -EIO; } else if (inode2 == NULL) { ret = -EIO; } return ret; } static int mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1 == inode2) { mutex_lock(&inode1->i_mutex); goto out; } if (inode1->i_ino < inode2->i_ino) { mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); } else { mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } out: return ret; } static int mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { int ret = 0; BUG_ON(inode1 == NULL && inode2 == NULL); ret = mext_check_null_inode(inode1, inode2, __func__, __LINE__); if (ret < 0) goto out; if (inode1) mutex_unlock(&inode1->i_mutex); if (inode2 && inode2 != inode1) mutex_unlock(&inode2->i_mutex); out: return ret; } int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs, int nr_bhs); int fat_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_result, int create); int fat_replace_one_cluster(struct inode *inode_orig, struct inode *inode_donor, struct moveCluster clst_orig, struct moveCluster clst_donor) { struct fat_entry fatent; struct buffer_head *bhs[1]; int dclus = 0; int err = 0; struct msdos_inode_info * msinode_orig= NULL; struct msdos_inode_info * msinode_donor= NULL; struct super_block *sb = inode_orig->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_dir_entry *uninitialized_var(de); struct fatent_operations *ops = sbi->fatent_ops; msinode_orig= MSDOS_I(inode_orig); msinode_donor= MSDOS_I(inode_donor); if (msinode_orig->i_start < FAT_START_ENT || msinode_orig->i_start > sbi->max_cluster) { printk(KERN_INFO "!!!start cluster is not correct\n"); return 0; } lock_fat2(sbi); printk(KERN_INFO "!!!start fat_replace_one_cluster\n"); if (msinode_orig->i_start == clst_orig.lcn) { printk(KERN_INFO "!!!fat_replace_one_cluster i_start\n"); msinode_orig->i_start = clst_donor.lcn; msinode_orig->i_logstart = clst_donor.lcn; mark_inode_dirty(inode_orig); } else { printk(KERN_INFO "!!!fat_replace_one_cluster not i_start\n"); dclus = clst_orig.lcnPrev; fatent_init(&fatent); dclus = fat_ent_read(inode_orig, &fatent, dclus); ops->ent_put(&fatent, clst_donor.lcn); sb->s_dirt = 1; } dclus = clst_donor.lcn; fatent_init(&fatent); dclus = fat_ent_read(inode_orig, &fatent, dclus); ops->ent_put(&fatent, clst_orig.lcnNext); get_bh(fatent.bhs[0]); bhs[0] = fatent.bhs[0]; sb->s_dirt = 1; mark_inode_dirty(inode_orig); unlock_fat2(sbi); fatent_brelse(&fatent); if (inode_needs_sync(inode_orig)) { write_dirty_buffer(bhs[0], WRITE); wait_on_buffer(bhs[0]); if (!err && !buffer_uptodate(bhs[0])) err = -EIO; } if (!err) err = fat_mirror_bhs(sb, bhs, 1); brelse(bhs[0]); return 1; } int fat_move_clusters(struct inode *inode_orig, struct moveClustersInfo *info) { struct file *filp_orig = NULL; struct file *filp_donor = NULL; struct inode *inode_donor = NULL; struct fat_entry fatent; struct msdos_inode_info * msinode_orig= NULL; struct msdos_inode_info * msinode_donor= NULL; struct super_block *sb = inode_orig->i_sb; struct msdos_sb_info *sbi = MSDOS_SB(sb); struct msdos_dir_entry *uninitialized_var(de); struct fatent_operations *ops = sbi->fatent_ops; struct address_space *mapping = inode_orig->i_mapping; struct buffer_head *bh; struct page *page = NULL; const struct address_space_operations *a_ops = mapping->a_ops; __u32 orig_blk_offset = 0; int dclus = 0; int err = 0, i = 0; int err2 = 0; int ret1, ret2; int data_offset_in_page = 0; int block_len_in_page = PAGE_CACHE_SIZE >> inode_orig->i_blkbits; long long offs = 0; unsigned int sum_size = 0, finished_size = 0, data_size = 0, replaced_size = 0; unsigned int w_flags = 0; void *fsdata = NULL; bh = NULL; ops = ops; dclus = dclus; fatent.entry = 0; printk(KERN_INFO "!!!start fat_move_clusters, orig_start:%lld, donor_start:%lld, len:%lld, page size:%ld\n", info->orig_start, info->donor_start, info->len, PAGE_CACHE_SIZE); printk(KERN_INFO "!!!orig.lcn:%d, orig:prev:%d, orig.next:%d\n", info->lcnOrig.lcn, info->lcnOrig.lcnPrev, info->lcnOrig.lcnNext); printk(KERN_INFO "!!!donor.lcn:%d, donor:prev:%d, donor.next:%d\n", info->lcnDonor.lcn, info->lcnDonor.lcnPrev, info->lcnDonor.lcnNext); filp_orig = fget(info->orig_fd); if (!filp_orig) return 0; filp_donor = fget(info->donor_fd); if (!filp_donor) return 0; inode_donor = filp_donor->f_dentry->d_inode; msinode_orig= MSDOS_I(inode_orig); msinode_donor= MSDOS_I(inode_donor); if (msinode_orig->i_start < FAT_START_ENT || msinode_orig->i_start > sbi->max_cluster) { printk(KERN_INFO "!!! fat_move_clusters: start cluster is not correct\n"); return 0; } err = mnt_want_write_file(filp_orig); if (err) goto out2; ret1 = mext_inode_double_lock(inode_orig, inode_donor); if (ret1 < 0) return ret1; sum_size= info->len << inode_orig->i_blkbits; finished_size = 0; while (sum_size) { printk(KERN_INFO "!!! fat_move_clusters sum_size:%d\n", sum_size); if (segment_eq(get_fs(), KERNEL_DS)) w_flags |= AOP_FLAG_UNINTERRUPTIBLE; orig_blk_offset = info->orig_start + (finished_size >> inode_orig->i_blkbits) + data_offset_in_page; offs = (long long)(info->orig_start << inode_orig->i_blkbits) + finished_size; data_size = PAGE_CACHE_SIZE; replaced_size = data_size; printk(KERN_INFO "\n!!! write begin offs:%lld, data_size:%d, w_flags:%d, orig_blk_offset:%d\n", offs, data_size, w_flags, orig_blk_offset); err = a_ops->write_begin(filp_orig, mapping, offs, data_size, w_flags, &page, &fsdata); printk(KERN_INFO "!!! err:%d\n", err); if (unlikely(err < 0)) goto out; if (!PageUptodate(page)) { printk(KERN_INFO "!!! up to date\n"); mapping->a_ops->readpage(filp_orig, page); lock_page(page); } wait_on_page_writeback(page); try_to_release_page(page, 0); fat_replace_one_cluster(inode_orig, inode_donor, info->lcnOrig, info->lcnDonor); fat_replace_one_cluster(inode_donor, inode_orig, info->lcnDonor, info->lcnOrig); if (!page_has_buffers(page)) { printk(KERN_INFO "!!! create buffer\n"); create_empty_buffers(page, 1 << inode_orig->i_blkbits, 0); } bh = page_buffers(page); printk(KERN_INFO "!!! data_offset_in_page:%d\n", data_offset_in_page); for (i = 0; i < data_offset_in_page; i++) bh = bh->b_this_page; for (i = 0; i < block_len_in_page; i++) { err = fat_get_block(inode_orig, (sector_t)(orig_blk_offset + i), bh, 0); printk(KERN_INFO "!!! fat get block err:%d, orig_blk_offset:%d\n", err, orig_blk_offset); if (err < 0) goto out; if (bh->b_this_page != NULL) bh = bh->b_this_page; } err = a_ops->write_end(filp_orig, mapping, offs, data_size, replaced_size, page, fsdata); printk(KERN_INFO "!!! write end err:%d\n", err); page = NULL; out: if (unlikely(page)) { printk(KERN_INFO "!!! release page\n"); if (PageLocked(page)) unlock_page(page); page_cache_release(page); } sum_size -= replaced_size; finished_size += replaced_size; } if (err2) err = err2; printk(KERN_INFO "!!! out\n"); ret2 = mext_inode_double_unlock(inode_orig, inode_donor); mnt_drop_write_file(filp_orig); out2: printk(KERN_INFO "!!! out2\n"); fput(filp_orig); fput(filp_donor); return 1; } int fat_ioctl_move_cluster(struct file *filp, u32 __user *user_arg) { struct dataPkg pkg; struct inode *inode = NULL; struct super_block *sb = filp->f_dentry->d_inode->i_sb; int ret = 0; if (copy_from_user(&pkg, (struct dataPkg __user *)user_arg, sizeof(pkg))) { printk(KERN_INFO "!!!fat_ioctl_move_cluster copy pkg failed\n"); return 0; } if (FALLOC_FLAG != pkg.checkFlag) { printk(KERN_INFO "!!!fallocate checkflag is not correct\n"); return 0; } switch (pkg.cmd) { case PKG_COMMAND_FALLOCATE_FILE: { struct fallocInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct fallocInfo)) == 0) { inode = ilookup(sb, pkg.inodeNumber); if (0 != fat_fallocate(inode, info.mode, info.offset, info.len, info.newLcn)) { ret = 0; } else { ret = isNodeContinuous(inode); } iput(inode); } } break; } case PKG_COMMAND_FALLOCATE_DIR: { struct fallocInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct fallocInfo)) == 0) { ret = fat_fallocate_dir(sb, info.len, info.newLcn); } } break; } case PKG_COMMAND_IFCONTIGUOUS: inode = ilookup(sb, pkg.inodeNumber); ret = isNodeContinuous(inode); iput(inode); break; case PKG_COMMAND_GET_BOOT_INFO: { struct bootInfo boot; inode = ilookup(sb, pkg.inodeNumber); if (fat_get_boot_info(inode, &boot)) { ret = (copy_to_user(pkg.data, &boot, sizeof(struct bootInfo)) == 0 ? 1 : 0); } else { ret = 0; } iput(inode); break; } case PKG_COMMAND_GET_FILE_INFO: { struct fileInfo info; inode = ilookup(sb, pkg.inodeNumber); if (fat_get_file_info(inode, &info)) { ret = (copy_to_user(pkg.data, &info, sizeof(struct fileInfo)) == 0 ? 1 : 0); } else { ret = 0; } iput(inode); break; } case PKG_COMMAND_GET_FREE_CLUSTER_INFO: { struct freeClusterInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct freeClusterInfo)) == 0) { if (fat_find_first_free_clusters(sb, info.MinimumLcn, info.MinimumSize, &info.BeginLcn, &info.EndLcn)) { ret = (copy_to_user(pkg.data, &info, sizeof(struct freeClusterInfo)) == 0 ? 1 : 0); } else { printk(KERN_INFO "fat_find_first_free_clusters failed\n"); } } } break; } case PKG_COMMAND_GET_FILE_REF_COUNT: { struct refInfo info; struct file *fp = NULL; fp = filp_open(pkg.fileName, O_RDWR, 0); if (NULL != fp) { info.f_count = 0; if (fp->f_dentry) { spin_lock(&fp->f_dentry->d_lock); info.f_count = fp->f_dentry->d_count; spin_unlock(&fp->f_dentry->d_lock); } if (info.f_count > 0) info.f_count--; filp_close(fp, 0); ret = (copy_to_user(pkg.data, &info, sizeof(struct refInfo)) == 0 ? 1 : 0); } else { printk(KERN_INFO "!!!get file:%s ref count failed\n", pkg.fileName); ret = 0; } break; } case PKG_COMMAND_SET_NEW_HEAD: { struct setNewHeadInfo info; ret = 0; if (NULL != pkg.data && copy_from_user(&info, pkg.data, sizeof(struct setNewHeadInfo)) == 0) { inode = ilookup(sb, pkg.inodeNumber); ret = fat_set_file_first_cluster(inode, info.newLcn); iput(inode); } break; } case PKG_COMMAND_GET_CLUSTER_DATA: { struct getClusterDataInfo info; ret = 0; if (copy_from_user(&info, pkg.data, sizeof(struct getClusterDataInfo)) == 0) { inode = ilookup(sb, pkg.inodeNumber); ret = fat_get_cluster_data(inode, &info); iput(inode); } break; } case PKG_COMMAND_FADVISE: { inode = ilookup(sb, pkg.inodeNumber); ret = fat_fadvise(inode, 0, inode->i_size, POSIX_FADV_DONTNEED); iput(inode); break; } case PKG_COMMAND_FALLOCATE_FILE_OLD: { struct fallocInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct fallocInfo)) == 0) { inode = ilookup(sb, pkg.inodeNumber); ret = (0 != fat_fallocate_old(inode, info.mode, info.offset, info.len) ? 0 : 1); iput(inode); } } break; } case PKG_COMMAND_GET_CLUSTERS: { struct getClustersInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct getClustersInfo)) == 0) { inode = ilookup(sb, pkg.inodeNumber); ret = fat_get_clusters(inode, &info); iput(inode); } } break; } case PKG_COMMAND_MOVE_CLUSTERS: { struct moveClustersInfo info; ret = 0; if (NULL != pkg.data) { if (copy_from_user(&info, pkg.data, sizeof(struct moveClustersInfo)) == 0) { } } break; } default: ret = 0; break; } return ret; }
gpl-2.0
computersforpeace/UBIFS-backports
sound/atmel/abdac.c
17
15767
/* * Driver for the Atmel on-chip Audio Bitstream DAC (ABDAC) * * Copyright (C) 2006-2009 Atmel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/clk.h> #include <linux/bitmap.h> #include <linux/dw_dmac.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/io.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/atmel-abdac.h> /* DAC register offsets */ #define DAC_DATA 0x0000 #define DAC_CTRL 0x0008 #define DAC_INT_MASK 0x000c #define DAC_INT_EN 0x0010 #define DAC_INT_DIS 0x0014 #define DAC_INT_CLR 0x0018 #define DAC_INT_STATUS 0x001c /* Bitfields in CTRL */ #define DAC_SWAP_OFFSET 30 #define DAC_SWAP_SIZE 1 #define DAC_EN_OFFSET 31 #define DAC_EN_SIZE 1 /* Bitfields in INT_MASK/INT_EN/INT_DIS/INT_STATUS/INT_CLR */ #define DAC_UNDERRUN_OFFSET 28 #define DAC_UNDERRUN_SIZE 1 #define DAC_TX_READY_OFFSET 29 #define DAC_TX_READY_SIZE 1 /* Bit manipulation macros */ #define DAC_BIT(name) \ (1 << DAC_##name##_OFFSET) #define DAC_BF(name, value) \ (((value) & ((1 << DAC_##name##_SIZE) - 1)) \ << DAC_##name##_OFFSET) #define DAC_BFEXT(name, value) \ (((value) >> DAC_##name##_OFFSET) \ & ((1 << DAC_##name##_SIZE) - 1)) #define DAC_BFINS(name, value, old) \ (((old) & ~(((1 << DAC_##name##_SIZE) - 1) \ << DAC_##name##_OFFSET)) \ | DAC_BF(name, value)) /* Register access macros */ #define dac_readl(port, reg) \ __raw_readl((port)->regs + DAC_##reg) #define dac_writel(port, reg, value) \ __raw_writel((value), (port)->regs + DAC_##reg) /* * ABDAC supports a maximum of 6 different rates from a generic clock. The * generic clock has a power of two divider, which gives 6 steps from 192 kHz * to 5112 Hz. */ #define MAX_NUM_RATES 6 /* ALSA seems to use rates between 192000 Hz and 5112 Hz. */ #define RATE_MAX 192000 #define RATE_MIN 5112 enum { DMA_READY = 0, }; struct atmel_abdac_dma { struct dma_chan *chan; struct dw_cyclic_desc *cdesc; }; struct atmel_abdac { struct clk *pclk; struct clk *sample_clk; struct platform_device *pdev; struct atmel_abdac_dma dma; struct snd_pcm_hw_constraint_list constraints_rates; struct snd_pcm_substream *substream; struct snd_card *card; struct snd_pcm *pcm; void __iomem *regs; unsigned long flags; unsigned int rates[MAX_NUM_RATES]; unsigned int rates_num; int irq; }; #define get_dac(card) ((struct atmel_abdac *)(card)->private_data) /* This function is called by the DMA driver. */ static void atmel_abdac_dma_period_done(void *arg) { struct atmel_abdac *dac = arg; snd_pcm_period_elapsed(dac->substream); } static int atmel_abdac_prepare_dma(struct atmel_abdac *dac, struct snd_pcm_substream *substream, enum dma_data_direction direction) { struct dma_chan *chan = dac->dma.chan; struct dw_cyclic_desc *cdesc; struct snd_pcm_runtime *runtime = substream->runtime; unsigned long buffer_len, period_len; /* * We don't do DMA on "complex" transfers, i.e. with * non-halfword-aligned buffers or lengths. */ if (runtime->dma_addr & 1 || runtime->buffer_size & 1) { dev_dbg(&dac->pdev->dev, "too complex transfer\n"); return -EINVAL; } buffer_len = frames_to_bytes(runtime, runtime->buffer_size); period_len = frames_to_bytes(runtime, runtime->period_size); cdesc = dw_dma_cyclic_prep(chan, runtime->dma_addr, buffer_len, period_len, DMA_MEM_TO_DEV); if (IS_ERR(cdesc)) { dev_dbg(&dac->pdev->dev, "could not prepare cyclic DMA\n"); return PTR_ERR(cdesc); } cdesc->period_callback = atmel_abdac_dma_period_done; cdesc->period_callback_param = dac; dac->dma.cdesc = cdesc; set_bit(DMA_READY, &dac->flags); return 0; } static struct snd_pcm_hardware atmel_abdac_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_PAUSE), .formats = (SNDRV_PCM_FMTBIT_S16_BE), .rates = (SNDRV_PCM_RATE_KNOT), .rate_min = RATE_MIN, .rate_max = RATE_MAX, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 64 * 4096, .period_bytes_min = 4096, .period_bytes_max = 4096, .periods_min = 6, .periods_max = 64, }; static int atmel_abdac_open(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); dac->substream = substream; atmel_abdac_hw.rate_max = dac->rates[dac->rates_num - 1]; atmel_abdac_hw.rate_min = dac->rates[0]; substream->runtime->hw = atmel_abdac_hw; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &dac->constraints_rates); } static int atmel_abdac_close(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); dac->substream = NULL; return 0; } static int atmel_abdac_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval; retval = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (retval < 0) return retval; /* snd_pcm_lib_malloc_pages returns 1 if buffer is changed. */ if (retval == 1) if (test_and_clear_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_free(dac->dma.chan); return retval; } static int atmel_abdac_hw_free(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); if (test_and_clear_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_free(dac->dma.chan); return snd_pcm_lib_free_pages(substream); } static int atmel_abdac_prepare(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval; retval = clk_set_rate(dac->sample_clk, 256 * substream->runtime->rate); if (retval) return retval; if (!test_bit(DMA_READY, &dac->flags)) retval = atmel_abdac_prepare_dma(dac, substream, DMA_TO_DEVICE); return retval; } static int atmel_abdac_trigger(struct snd_pcm_substream *substream, int cmd) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); int retval = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: /* fall through */ case SNDRV_PCM_TRIGGER_RESUME: /* fall through */ case SNDRV_PCM_TRIGGER_START: clk_enable(dac->sample_clk); retval = dw_dma_cyclic_start(dac->dma.chan); if (retval) goto out; dac_writel(dac, CTRL, DAC_BIT(EN)); break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: /* fall through */ case SNDRV_PCM_TRIGGER_SUSPEND: /* fall through */ case SNDRV_PCM_TRIGGER_STOP: dw_dma_cyclic_stop(dac->dma.chan); dac_writel(dac, DATA, 0); dac_writel(dac, CTRL, 0); clk_disable(dac->sample_clk); break; default: retval = -EINVAL; break; } out: return retval; } static snd_pcm_uframes_t atmel_abdac_pointer(struct snd_pcm_substream *substream) { struct atmel_abdac *dac = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_pcm_uframes_t frames; unsigned long bytes; bytes = dw_dma_get_src_addr(dac->dma.chan); bytes -= runtime->dma_addr; frames = bytes_to_frames(runtime, bytes); if (frames >= runtime->buffer_size) frames -= runtime->buffer_size; return frames; } static irqreturn_t abdac_interrupt(int irq, void *dev_id) { struct atmel_abdac *dac = dev_id; u32 status; status = dac_readl(dac, INT_STATUS); if (status & DAC_BIT(UNDERRUN)) { dev_err(&dac->pdev->dev, "underrun detected\n"); dac_writel(dac, INT_CLR, DAC_BIT(UNDERRUN)); } else { dev_err(&dac->pdev->dev, "spurious interrupt (status=0x%x)\n", status); dac_writel(dac, INT_CLR, status); } return IRQ_HANDLED; } static struct snd_pcm_ops atmel_abdac_ops = { .open = atmel_abdac_open, .close = atmel_abdac_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = atmel_abdac_hw_params, .hw_free = atmel_abdac_hw_free, .prepare = atmel_abdac_prepare, .trigger = atmel_abdac_trigger, .pointer = atmel_abdac_pointer, }; static int __devinit atmel_abdac_pcm_new(struct atmel_abdac *dac) { struct snd_pcm_hardware hw = atmel_abdac_hw; struct snd_pcm *pcm; int retval; retval = snd_pcm_new(dac->card, dac->card->shortname, dac->pdev->id, 1, 0, &pcm); if (retval) return retval; strcpy(pcm->name, dac->card->shortname); pcm->private_data = dac; pcm->info_flags = 0; dac->pcm = pcm; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &atmel_abdac_ops); retval = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, &dac->pdev->dev, hw.periods_min * hw.period_bytes_min, hw.buffer_bytes_max); return retval; } static bool filter(struct dma_chan *chan, void *slave) { struct dw_dma_slave *dws = slave; if (dws->dma_dev == chan->device->dev) { chan->private = dws; return true; } else return false; } static int set_sample_rates(struct atmel_abdac *dac) { long new_rate = RATE_MAX; int retval = -EINVAL; int index = 0; /* we start at 192 kHz and work our way down to 5112 Hz */ while (new_rate >= RATE_MIN && index < (MAX_NUM_RATES + 1)) { new_rate = clk_round_rate(dac->sample_clk, 256 * new_rate); if (new_rate < 0) break; /* make sure we are below the ABDAC clock */ if (new_rate <= clk_get_rate(dac->pclk)) { dac->rates[index] = new_rate / 256; index++; } /* divide by 256 and then by two to get next rate */ new_rate /= 256 * 2; } if (index) { int i; /* reverse array, smallest go first */ for (i = 0; i < (index / 2); i++) { unsigned int tmp = dac->rates[index - 1 - i]; dac->rates[index - 1 - i] = dac->rates[i]; dac->rates[i] = tmp; } dac->constraints_rates.count = index; dac->constraints_rates.list = dac->rates; dac->constraints_rates.mask = 0; dac->rates_num = index; retval = 0; } return retval; } static int __devinit atmel_abdac_probe(struct platform_device *pdev) { struct snd_card *card; struct atmel_abdac *dac; struct resource *regs; struct atmel_abdac_pdata *pdata; struct clk *pclk; struct clk *sample_clk; int retval; int irq; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_dbg(&pdev->dev, "no memory resource\n"); return -ENXIO; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_dbg(&pdev->dev, "could not get IRQ number\n"); return irq; } pdata = pdev->dev.platform_data; if (!pdata) { dev_dbg(&pdev->dev, "no platform data\n"); return -ENXIO; } pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(pclk)) { dev_dbg(&pdev->dev, "no peripheral clock\n"); return PTR_ERR(pclk); } sample_clk = clk_get(&pdev->dev, "sample_clk"); if (IS_ERR(sample_clk)) { dev_dbg(&pdev->dev, "no sample clock\n"); retval = PTR_ERR(sample_clk); goto out_put_pclk; } clk_enable(pclk); retval = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct atmel_abdac), &card); if (retval) { dev_dbg(&pdev->dev, "could not create sound card device\n"); goto out_put_sample_clk; } dac = get_dac(card); dac->irq = irq; dac->card = card; dac->pclk = pclk; dac->sample_clk = sample_clk; dac->pdev = pdev; retval = set_sample_rates(dac); if (retval < 0) { dev_dbg(&pdev->dev, "could not set supported rates\n"); goto out_free_card; } dac->regs = ioremap(regs->start, resource_size(regs)); if (!dac->regs) { dev_dbg(&pdev->dev, "could not remap register memory\n"); goto out_free_card; } /* make sure the DAC is silent and disabled */ dac_writel(dac, DATA, 0); dac_writel(dac, CTRL, 0); retval = request_irq(irq, abdac_interrupt, 0, "abdac", dac); if (retval) { dev_dbg(&pdev->dev, "could not request irq\n"); goto out_unmap_regs; } snd_card_set_dev(card, &pdev->dev); if (pdata->dws.dma_dev) { dma_cap_mask_t mask; dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); dac->dma.chan = dma_request_channel(mask, filter, &pdata->dws); if (dac->dma.chan) { struct dma_slave_config dma_conf = { .dst_addr = regs->start + DAC_DATA, .dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES, .src_maxburst = 1, .dst_maxburst = 1, .direction = DMA_MEM_TO_DEV, .device_fc = false, }; dmaengine_slave_config(dac->dma.chan, &dma_conf); } } if (!pdata->dws.dma_dev || !dac->dma.chan) { dev_dbg(&pdev->dev, "DMA not available\n"); retval = -ENODEV; goto out_unset_card_dev; } strcpy(card->driver, "Atmel ABDAC"); strcpy(card->shortname, "Atmel ABDAC"); sprintf(card->longname, "Atmel Audio Bitstream DAC"); retval = atmel_abdac_pcm_new(dac); if (retval) { dev_dbg(&pdev->dev, "could not register ABDAC pcm device\n"); goto out_release_dma; } retval = snd_card_register(card); if (retval) { dev_dbg(&pdev->dev, "could not register sound card\n"); goto out_release_dma; } platform_set_drvdata(pdev, card); dev_info(&pdev->dev, "Atmel ABDAC at 0x%p using %s\n", dac->regs, dev_name(&dac->dma.chan->dev->device)); return retval; out_release_dma: dma_release_channel(dac->dma.chan); dac->dma.chan = NULL; out_unset_card_dev: snd_card_set_dev(card, NULL); free_irq(irq, dac); out_unmap_regs: iounmap(dac->regs); out_free_card: snd_card_free(card); out_put_sample_clk: clk_put(sample_clk); clk_disable(pclk); out_put_pclk: clk_put(pclk); return retval; } #ifdef CONFIG_PM static int atmel_abdac_suspend(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); struct atmel_abdac *dac = card->private_data; dw_dma_cyclic_stop(dac->dma.chan); clk_disable(dac->sample_clk); clk_disable(dac->pclk); return 0; } static int atmel_abdac_resume(struct device *pdev) { struct snd_card *card = dev_get_drvdata(pdev); struct atmel_abdac *dac = card->private_data; clk_enable(dac->pclk); clk_enable(dac->sample_clk); if (test_bit(DMA_READY, &dac->flags)) dw_dma_cyclic_start(dac->dma.chan); return 0; } static SIMPLE_DEV_PM_OPS(atmel_abdac_pm, atmel_abdac_suspend, atmel_abdac_resume); #define ATMEL_ABDAC_PM_OPS &atmel_abdac_pm #else #define ATMEL_ABDAC_PM_OPS NULL #endif static int __devexit atmel_abdac_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); struct atmel_abdac *dac = get_dac(card); clk_put(dac->sample_clk); clk_disable(dac->pclk); clk_put(dac->pclk); dma_release_channel(dac->dma.chan); dac->dma.chan = NULL; snd_card_set_dev(card, NULL); iounmap(dac->regs); free_irq(dac->irq, dac); snd_card_free(card); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver atmel_abdac_driver = { .remove = __devexit_p(atmel_abdac_remove), .driver = { .name = "atmel_abdac", .owner = THIS_MODULE, .pm = ATMEL_ABDAC_PM_OPS, }, }; static int __init atmel_abdac_init(void) { return platform_driver_probe(&atmel_abdac_driver, atmel_abdac_probe); } module_init(atmel_abdac_init); static void __exit atmel_abdac_exit(void) { platform_driver_unregister(&atmel_abdac_driver); } module_exit(atmel_abdac_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Driver for Atmel Audio Bitstream DAC (ABDAC)"); MODULE_AUTHOR("Hans-Christian Egtvedt <egtvedt@samfundet.no>");
gpl-2.0
raghav3276/QEMU-Device-Emulation
hw/scsi/esp-pci.c
17
15025
/* * QEMU ESP/NCR53C9x emulation * * Copyright (c) 2005-2006 Fabrice Bellard * Copyright (c) 2012 Herve Poussineau * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "hw/pci/pci.h" #include "hw/nvram/eeprom93xx.h" #include "hw/scsi/esp.h" #include "trace.h" #include "qemu/log.h" #define TYPE_AM53C974_DEVICE "am53c974" #define PCI_ESP(obj) \ OBJECT_CHECK(PCIESPState, (obj), TYPE_AM53C974_DEVICE) #define DMA_CMD 0x0 #define DMA_STC 0x1 #define DMA_SPA 0x2 #define DMA_WBC 0x3 #define DMA_WAC 0x4 #define DMA_STAT 0x5 #define DMA_SMDLA 0x6 #define DMA_WMAC 0x7 #define DMA_CMD_MASK 0x03 #define DMA_CMD_DIAG 0x04 #define DMA_CMD_MDL 0x10 #define DMA_CMD_INTE_P 0x20 #define DMA_CMD_INTE_D 0x40 #define DMA_CMD_DIR 0x80 #define DMA_STAT_PWDN 0x01 #define DMA_STAT_ERROR 0x02 #define DMA_STAT_ABORT 0x04 #define DMA_STAT_DONE 0x08 #define DMA_STAT_SCSIINT 0x10 #define DMA_STAT_BCMBLT 0x20 #define SBAC_STATUS 0x1000 typedef struct PCIESPState { /*< private >*/ PCIDevice parent_obj; /*< public >*/ MemoryRegion io; uint32_t dma_regs[8]; uint32_t sbac; ESPState esp; } PCIESPState; static void esp_pci_handle_idle(PCIESPState *pci, uint32_t val) { trace_esp_pci_dma_idle(val); esp_dma_enable(&pci->esp, 0, 0); } static void esp_pci_handle_blast(PCIESPState *pci, uint32_t val) { trace_esp_pci_dma_blast(val); qemu_log_mask(LOG_UNIMP, "am53c974: cmd BLAST not implemented\n"); } static void esp_pci_handle_abort(PCIESPState *pci, uint32_t val) { trace_esp_pci_dma_abort(val); if (pci->esp.current_req) { scsi_req_cancel(pci->esp.current_req); } } static void esp_pci_handle_start(PCIESPState *pci, uint32_t val) { trace_esp_pci_dma_start(val); pci->dma_regs[DMA_WBC] = pci->dma_regs[DMA_STC]; pci->dma_regs[DMA_WAC] = pci->dma_regs[DMA_SPA]; pci->dma_regs[DMA_WMAC] = pci->dma_regs[DMA_SMDLA]; pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT | DMA_STAT_DONE | DMA_STAT_ABORT | DMA_STAT_ERROR | DMA_STAT_PWDN); esp_dma_enable(&pci->esp, 0, 1); } static void esp_pci_dma_write(PCIESPState *pci, uint32_t saddr, uint32_t val) { trace_esp_pci_dma_write(saddr, pci->dma_regs[saddr], val); switch (saddr) { case DMA_CMD: pci->dma_regs[saddr] = val; switch (val & DMA_CMD_MASK) { case 0x0: /* IDLE */ esp_pci_handle_idle(pci, val); break; case 0x1: /* BLAST */ esp_pci_handle_blast(pci, val); break; case 0x2: /* ABORT */ esp_pci_handle_abort(pci, val); break; case 0x3: /* START */ esp_pci_handle_start(pci, val); break; default: /* can't happen */ abort(); } break; case DMA_STC: case DMA_SPA: case DMA_SMDLA: pci->dma_regs[saddr] = val; break; case DMA_STAT: if (!(pci->sbac & SBAC_STATUS)) { /* clear some bits on write */ uint32_t mask = DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE; pci->dma_regs[DMA_STAT] &= ~(val & mask); } break; default: trace_esp_pci_error_invalid_write_dma(val, saddr); return; } } static uint32_t esp_pci_dma_read(PCIESPState *pci, uint32_t saddr) { uint32_t val; val = pci->dma_regs[saddr]; if (saddr == DMA_STAT) { if (pci->esp.rregs[ESP_RSTAT] & STAT_INT) { val |= DMA_STAT_SCSIINT; } if (pci->sbac & SBAC_STATUS) { pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_ERROR | DMA_STAT_ABORT | DMA_STAT_DONE); } } trace_esp_pci_dma_read(saddr, val); return val; } static void esp_pci_io_write(void *opaque, hwaddr addr, uint64_t val, unsigned int size) { PCIESPState *pci = opaque; if (size < 4 || addr & 3) { /* need to upgrade request: we only support 4-bytes accesses */ uint32_t current = 0, mask; int shift; if (addr < 0x40) { current = pci->esp.wregs[addr >> 2]; } else if (addr < 0x60) { current = pci->dma_regs[(addr - 0x40) >> 2]; } else if (addr < 0x74) { current = pci->sbac; } shift = (4 - size) * 8; mask = (~(uint32_t)0 << shift) >> shift; shift = ((4 - (addr & 3)) & 3) * 8; val <<= shift; val |= current & ~(mask << shift); addr &= ~3; size = 4; } if (addr < 0x40) { /* SCSI core reg */ esp_reg_write(&pci->esp, addr >> 2, val); } else if (addr < 0x60) { /* PCI DMA CCB */ esp_pci_dma_write(pci, (addr - 0x40) >> 2, val); } else if (addr == 0x70) { /* DMA SCSI Bus and control */ trace_esp_pci_sbac_write(pci->sbac, val); pci->sbac = val; } else { trace_esp_pci_error_invalid_write((int)addr); } } static uint64_t esp_pci_io_read(void *opaque, hwaddr addr, unsigned int size) { PCIESPState *pci = opaque; uint32_t ret; if (addr < 0x40) { /* SCSI core reg */ ret = esp_reg_read(&pci->esp, addr >> 2); } else if (addr < 0x60) { /* PCI DMA CCB */ ret = esp_pci_dma_read(pci, (addr - 0x40) >> 2); } else if (addr == 0x70) { /* DMA SCSI Bus and control */ trace_esp_pci_sbac_read(pci->sbac); ret = pci->sbac; } else { /* Invalid region */ trace_esp_pci_error_invalid_read((int)addr); ret = 0; } /* give only requested data */ ret >>= (addr & 3) * 8; ret &= ~(~(uint64_t)0 << (8 * size)); return ret; } static void esp_pci_dma_memory_rw(PCIESPState *pci, uint8_t *buf, int len, DMADirection dir) { dma_addr_t addr; DMADirection expected_dir; if (pci->dma_regs[DMA_CMD] & DMA_CMD_DIR) { expected_dir = DMA_DIRECTION_FROM_DEVICE; } else { expected_dir = DMA_DIRECTION_TO_DEVICE; } if (dir != expected_dir) { trace_esp_pci_error_invalid_dma_direction(); return; } if (pci->dma_regs[DMA_STAT] & DMA_CMD_MDL) { qemu_log_mask(LOG_UNIMP, "am53c974: MDL transfer not implemented\n"); } addr = pci->dma_regs[DMA_SPA]; if (pci->dma_regs[DMA_WBC] < len) { len = pci->dma_regs[DMA_WBC]; } pci_dma_rw(PCI_DEVICE(pci), addr, buf, len, dir); /* update status registers */ pci->dma_regs[DMA_WBC] -= len; pci->dma_regs[DMA_WAC] += len; } static void esp_pci_dma_memory_read(void *opaque, uint8_t *buf, int len) { PCIESPState *pci = opaque; esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_TO_DEVICE); } static void esp_pci_dma_memory_write(void *opaque, uint8_t *buf, int len) { PCIESPState *pci = opaque; esp_pci_dma_memory_rw(pci, buf, len, DMA_DIRECTION_FROM_DEVICE); } static const MemoryRegionOps esp_pci_io_ops = { .read = esp_pci_io_read, .write = esp_pci_io_write, .endianness = DEVICE_LITTLE_ENDIAN, .impl = { .min_access_size = 1, .max_access_size = 4, }, }; static void esp_pci_hard_reset(DeviceState *dev) { PCIESPState *pci = PCI_ESP(dev); esp_hard_reset(&pci->esp); pci->dma_regs[DMA_CMD] &= ~(DMA_CMD_DIR | DMA_CMD_INTE_D | DMA_CMD_INTE_P | DMA_CMD_MDL | DMA_CMD_DIAG | DMA_CMD_MASK); pci->dma_regs[DMA_WBC] &= ~0xffff; pci->dma_regs[DMA_WAC] = 0xffffffff; pci->dma_regs[DMA_STAT] &= ~(DMA_STAT_BCMBLT | DMA_STAT_SCSIINT | DMA_STAT_DONE | DMA_STAT_ABORT | DMA_STAT_ERROR); pci->dma_regs[DMA_WMAC] = 0xfffffffd; } static const VMStateDescription vmstate_esp_pci_scsi = { .name = "pciespscsi", .version_id = 0, .minimum_version_id = 0, .minimum_version_id_old = 0, .fields = (VMStateField[]) { VMSTATE_PCI_DEVICE(parent_obj, PCIESPState), VMSTATE_BUFFER_UNSAFE(dma_regs, PCIESPState, 0, 8 * sizeof(uint32_t)), VMSTATE_STRUCT(esp, PCIESPState, 0, vmstate_esp, ESPState), VMSTATE_END_OF_LIST() } }; static void esp_pci_command_complete(SCSIRequest *req, uint32_t status, size_t resid) { ESPState *s = req->hba_private; PCIESPState *pci = container_of(s, PCIESPState, esp); esp_command_complete(req, status, resid); pci->dma_regs[DMA_WBC] = 0; pci->dma_regs[DMA_STAT] |= DMA_STAT_DONE; } static const struct SCSIBusInfo esp_pci_scsi_info = { .tcq = false, .max_target = ESP_MAX_DEVS, .max_lun = 7, .transfer_data = esp_transfer_data, .complete = esp_pci_command_complete, .cancel = esp_request_cancelled, }; static int esp_pci_scsi_init(PCIDevice *dev) { PCIESPState *pci = PCI_ESP(dev); DeviceState *d = DEVICE(dev); ESPState *s = &pci->esp; uint8_t *pci_conf; Error *err = NULL; pci_conf = dev->config; /* Interrupt pin A */ pci_conf[PCI_INTERRUPT_PIN] = 0x01; s->dma_memory_read = esp_pci_dma_memory_read; s->dma_memory_write = esp_pci_dma_memory_write; s->dma_opaque = pci; s->chip_id = TCHI_AM53C974; memory_region_init_io(&pci->io, OBJECT(pci), &esp_pci_io_ops, pci, "esp-io", 0x80); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &pci->io); s->irq = dev->irq[0]; scsi_bus_new(&s->bus, d, &esp_pci_scsi_info, NULL); if (!d->hotplugged) { scsi_bus_legacy_handle_cmdline(&s->bus, &err); if (err != NULL) { error_free(err); return -1; } } return 0; } static void esp_pci_scsi_uninit(PCIDevice *d) { PCIESPState *pci = PCI_ESP(d); memory_region_destroy(&pci->io); } static void esp_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = esp_pci_scsi_init; k->exit = esp_pci_scsi_uninit; k->vendor_id = PCI_VENDOR_ID_AMD; k->device_id = PCI_DEVICE_ID_AMD_SCSI; k->revision = 0x10; k->class_id = PCI_CLASS_STORAGE_SCSI; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->desc = "AMD Am53c974 PCscsi-PCI SCSI adapter"; dc->reset = esp_pci_hard_reset; dc->vmsd = &vmstate_esp_pci_scsi; } static const TypeInfo esp_pci_info = { .name = TYPE_AM53C974_DEVICE, .parent = TYPE_PCI_DEVICE, .instance_size = sizeof(PCIESPState), .class_init = esp_pci_class_init, }; typedef struct { PCIESPState pci; eeprom_t *eeprom; } DC390State; #define TYPE_DC390_DEVICE "dc390" #define DC390(obj) \ OBJECT_CHECK(DC390State, obj, TYPE_DC390_DEVICE) #define EE_ADAPT_SCSI_ID 64 #define EE_MODE2 65 #define EE_DELAY 66 #define EE_TAG_CMD_NUM 67 #define EE_ADAPT_OPTIONS 68 #define EE_BOOT_SCSI_ID 69 #define EE_BOOT_SCSI_LUN 70 #define EE_CHKSUM1 126 #define EE_CHKSUM2 127 #define EE_ADAPT_OPTION_F6_F8_AT_BOOT 0x01 #define EE_ADAPT_OPTION_BOOT_FROM_CDROM 0x02 #define EE_ADAPT_OPTION_INT13 0x04 #define EE_ADAPT_OPTION_SCAM_SUPPORT 0x08 static uint32_t dc390_read_config(PCIDevice *dev, uint32_t addr, int l) { DC390State *pci = DC390(dev); uint32_t val; val = pci_default_read_config(dev, addr, l); if (addr == 0x00 && l == 1) { /* First byte of address space is AND-ed with EEPROM DO line */ if (!eeprom93xx_read(pci->eeprom)) { val &= ~0xff; } } return val; } static void dc390_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int l) { DC390State *pci = DC390(dev); if (addr == 0x80) { /* EEPROM write */ int eesk = val & 0x80 ? 1 : 0; int eedi = val & 0x40 ? 1 : 0; eeprom93xx_write(pci->eeprom, 1, eesk, eedi); } else if (addr == 0xc0) { /* EEPROM CS low */ eeprom93xx_write(pci->eeprom, 0, 0, 0); } else { pci_default_write_config(dev, addr, val, l); } } static int dc390_scsi_init(PCIDevice *dev) { DC390State *pci = DC390(dev); uint8_t *contents; uint16_t chksum = 0; int i, ret; /* init base class */ ret = esp_pci_scsi_init(dev); if (ret < 0) { return ret; } /* EEPROM */ pci->eeprom = eeprom93xx_new(DEVICE(dev), 64); /* set default eeprom values */ contents = (uint8_t *)eeprom93xx_data(pci->eeprom); for (i = 0; i < 16; i++) { contents[i * 2] = 0x57; contents[i * 2 + 1] = 0x00; } contents[EE_ADAPT_SCSI_ID] = 7; contents[EE_MODE2] = 0x0f; contents[EE_TAG_CMD_NUM] = 0x04; contents[EE_ADAPT_OPTIONS] = EE_ADAPT_OPTION_F6_F8_AT_BOOT | EE_ADAPT_OPTION_BOOT_FROM_CDROM | EE_ADAPT_OPTION_INT13; /* update eeprom checksum */ for (i = 0; i < EE_CHKSUM1; i += 2) { chksum += contents[i] + (((uint16_t)contents[i + 1]) << 8); } chksum = 0x1234 - chksum; contents[EE_CHKSUM1] = chksum & 0xff; contents[EE_CHKSUM2] = chksum >> 8; return 0; } static void dc390_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); k->init = dc390_scsi_init; k->config_read = dc390_read_config; k->config_write = dc390_write_config; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->desc = "Tekram DC-390 SCSI adapter"; } static const TypeInfo dc390_info = { .name = "dc390", .parent = TYPE_AM53C974_DEVICE, .instance_size = sizeof(DC390State), .class_init = dc390_class_init, }; static void esp_pci_register_types(void) { type_register_static(&esp_pci_info); type_register_static(&dc390_info); } type_init(esp_pci_register_types)
gpl-2.0
caglar10ur/linux-2.6.27.y
drivers/s390/char/sclp_con.c
17
6514
/* * drivers/s390/char/sclp_con.c * SCLP line mode console driver * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Martin Peschke <mpeschke@de.ibm.com> * Martin Schwidefsky <schwidefsky@de.ibm.com> */ #include <linux/kmod.h> #include <linux/console.h> #include <linux/init.h> #include <linux/timer.h> #include <linux/jiffies.h> #include <linux/bootmem.h> #include <linux/termios.h> #include <linux/err.h> #include "sclp.h" #include "sclp_rw.h" #include "sclp_tty.h" #define sclp_console_major 4 /* TTYAUX_MAJOR */ #define sclp_console_minor 64 #define sclp_console_name "ttyS" /* Lock to guard over changes to global variables */ static spinlock_t sclp_con_lock; /* List of free pages that can be used for console output buffering */ static struct list_head sclp_con_pages; /* List of full struct sclp_buffer structures ready for output */ static struct list_head sclp_con_outqueue; /* Counter how many buffers are emitted (max 1) and how many */ /* are on the output queue. */ static int sclp_con_buffer_count; /* Pointer to current console buffer */ static struct sclp_buffer *sclp_conbuf; /* Timer for delayed output of console messages */ static struct timer_list sclp_con_timer; /* Output format for console messages */ static unsigned short sclp_con_columns; static unsigned short sclp_con_width_htab; static void sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) { unsigned long flags; void *page; do { page = sclp_unmake_buffer(buffer); spin_lock_irqsave(&sclp_con_lock, flags); /* Remove buffer from outqueue */ list_del(&buffer->list); sclp_con_buffer_count--; list_add_tail((struct list_head *) page, &sclp_con_pages); /* Check if there is a pending buffer on the out queue. */ buffer = NULL; if (!list_empty(&sclp_con_outqueue)) buffer = list_entry(sclp_con_outqueue.next, struct sclp_buffer, list); spin_unlock_irqrestore(&sclp_con_lock, flags); } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); } static void sclp_conbuf_emit(void) { struct sclp_buffer* buffer; unsigned long flags; int count; int rc; spin_lock_irqsave(&sclp_con_lock, flags); buffer = sclp_conbuf; sclp_conbuf = NULL; if (buffer == NULL) { spin_unlock_irqrestore(&sclp_con_lock, flags); return; } list_add_tail(&buffer->list, &sclp_con_outqueue); count = sclp_con_buffer_count++; spin_unlock_irqrestore(&sclp_con_lock, flags); if (count) return; rc = sclp_emit_buffer(buffer, sclp_conbuf_callback); if (rc) sclp_conbuf_callback(buffer, rc); } /* * When this routine is called from the timer then we flush the * temporary write buffer without further waiting on a final new line. */ static void sclp_console_timeout(unsigned long data) { sclp_conbuf_emit(); } /* * Writes the given message to S390 system console */ static void sclp_console_write(struct console *console, const char *message, unsigned int count) { unsigned long flags; void *page; int written; if (count == 0) return; spin_lock_irqsave(&sclp_con_lock, flags); /* * process escape characters, write message into buffer, * send buffer to SCLP */ do { /* make sure we have a console output buffer */ if (sclp_conbuf == NULL) { while (list_empty(&sclp_con_pages)) { spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_con_lock, flags); } page = sclp_con_pages.next; list_del((struct list_head *) page); sclp_conbuf = sclp_make_buffer(page, sclp_con_columns, sclp_con_width_htab); } /* try to write the string to the current output buffer */ written = sclp_write(sclp_conbuf, (const unsigned char *) message, count); if (written == count) break; /* * Not all characters could be written to the current * output buffer. Emit the buffer, create a new buffer * and then output the rest of the string. */ spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_conbuf_emit(); spin_lock_irqsave(&sclp_con_lock, flags); message += written; count -= written; } while (count > 0); /* Setup timer to output current console buffer after 1/10 second */ if (sclp_conbuf != NULL && sclp_chars_in_buffer(sclp_conbuf) != 0 && !timer_pending(&sclp_con_timer)) { init_timer(&sclp_con_timer); sclp_con_timer.function = sclp_console_timeout; sclp_con_timer.data = 0UL; sclp_con_timer.expires = jiffies + HZ/10; add_timer(&sclp_con_timer); } spin_unlock_irqrestore(&sclp_con_lock, flags); } static struct tty_driver * sclp_console_device(struct console *c, int *index) { *index = c->index; return sclp_tty_driver; } /* * This routine is called from panic when the kernel * is going to give up. We have to make sure that all buffers * will be flushed to the SCLP. */ static void sclp_console_unblank(void) { unsigned long flags; sclp_conbuf_emit(); spin_lock_irqsave(&sclp_con_lock, flags); if (timer_pending(&sclp_con_timer)) del_timer(&sclp_con_timer); while (sclp_con_buffer_count > 0) { spin_unlock_irqrestore(&sclp_con_lock, flags); sclp_sync_wait(); spin_lock_irqsave(&sclp_con_lock, flags); } spin_unlock_irqrestore(&sclp_con_lock, flags); } /* * used to register the SCLP console to the kernel and to * give printk necessary information */ static struct console sclp_console = { .name = sclp_console_name, .write = sclp_console_write, .device = sclp_console_device, .unblank = sclp_console_unblank, .flags = CON_PRINTBUFFER, .index = 0 /* ttyS0 */ }; /* * called by console_init() in drivers/char/tty_io.c at boot-time. */ static int __init sclp_console_init(void) { void *page; int i; int rc; if (!CONSOLE_IS_SCLP) return 0; rc = sclp_rw_init(); if (rc) return rc; /* Allocate pages for output buffering */ INIT_LIST_HEAD(&sclp_con_pages); for (i = 0; i < MAX_CONSOLE_PAGES; i++) { page = alloc_bootmem_low_pages(PAGE_SIZE); list_add_tail((struct list_head *) page, &sclp_con_pages); } INIT_LIST_HEAD(&sclp_con_outqueue); spin_lock_init(&sclp_con_lock); sclp_con_buffer_count = 0; sclp_conbuf = NULL; init_timer(&sclp_con_timer); /* Set output format */ if (MACHINE_IS_VM) /* * save 4 characters for the CPU number * written at start of each line by VM/CP */ sclp_con_columns = 76; else sclp_con_columns = 80; sclp_con_width_htab = 8; /* enable printk-access to this driver */ register_console(&sclp_console); return 0; } console_initcall(sclp_console_init);
gpl-2.0
openwrt/bcm63xx-next
arch/arm/kvm/coproc.c
273
33380
/* * Copyright (C) 2012 - Virtual Open Systems and Columbia University * Authors: Rusty Russell <rusty@rustcorp.com.au> * Christoffer Dall <c.dall@virtualopensystems.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ #include <linux/mm.h> #include <linux/kvm_host.h> #include <linux/uaccess.h> #include <asm/kvm_arm.h> #include <asm/kvm_host.h> #include <asm/kvm_emulate.h> #include <asm/kvm_coproc.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <trace/events/kvm.h> #include <asm/vfp.h> #include "../vfp/vfpinstr.h" #include "trace.h" #include "coproc.h" /****************************************************************************** * Co-processor emulation *****************************************************************************/ /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ static u32 cache_levels; /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ #define CSSELR_MAX 12 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); return 1; } int kvm_handle_cp_0_13_access(struct kvm_vcpu *vcpu, struct kvm_run *run) { /* * We can get here, if the host has been built without VFPv3 support, * but the guest attempted a floating point operation. */ kvm_inject_undefined(vcpu); return 1; } int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); return 1; } int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvm_inject_undefined(vcpu); return 1; } static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { /* * Compute guest MPIDR. We build a virtual cluster out of the * vcpu_id, but we read the 'U' bit from the underlying * hardware directly. */ vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) | ((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) | (vcpu->vcpu_id & 3)); } /* TRM entries A7:4.3.31 A15:4.3.28 - RO WI */ static bool access_actlr(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { if (p->is_write) return ignore_write(vcpu, p); *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR]; return true; } /* TRM entries A7:4.3.56, A15:4.3.60 - R/O. */ static bool access_cbar(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { if (p->is_write) return write_to_read_only(vcpu, p); return read_zero(vcpu, p); } /* TRM entries A7:4.3.49, A15:4.3.48 - R/O WI */ static bool access_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { if (p->is_write) return ignore_write(vcpu, p); *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR]; return true; } static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { u32 l2ctlr, ncores; asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr)); l2ctlr &= ~(3 << 24); ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1; /* How many cores in the current cluster and the next ones */ ncores -= (vcpu->vcpu_id & ~3); /* Cap it to the maximum number of cores in a single cluster */ ncores = min(ncores, 3U); l2ctlr |= (ncores & 3) << 24; vcpu->arch.cp15[c9_L2CTLR] = l2ctlr; } static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r) { u32 actlr; /* ACTLR contains SMP bit: make sure you create all cpus first! */ asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr)); /* Make the SMP bit consistent with the guest configuration */ if (atomic_read(&vcpu->kvm->online_vcpus) > 1) actlr |= 1U << 6; else actlr &= ~(1U << 6); vcpu->arch.cp15[c1_ACTLR] = actlr; } /* * TRM entries: A7:4.3.50, A15:4.3.49 * R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */ static bool access_l2ectlr(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { if (p->is_write) return ignore_write(vcpu, p); *vcpu_reg(vcpu, p->Rt1) = 0; return true; } /* See note at ARM ARM B1.14.4 */ static bool access_dcsw(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { unsigned long val; int cpu; if (!p->is_write) return read_from_write_only(vcpu, p); cpu = get_cpu(); cpumask_setall(&vcpu->arch.require_dcache_flush); cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); /* If we were already preempted, take the long way around */ if (cpu != vcpu->arch.last_pcpu) { flush_cache_all(); goto done; } val = *vcpu_reg(vcpu, p->Rt1); switch (p->CRm) { case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ case 14: /* DCCISW */ asm volatile("mcr p15, 0, %0, c7, c14, 2" : : "r" (val)); break; case 10: /* DCCSW */ asm volatile("mcr p15, 0, %0, c7, c10, 2" : : "r" (val)); break; } done: put_cpu(); return true; } /* * We could trap ID_DFR0 and tell the guest we don't support performance * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was * NAKed, so it will read the PMCR anyway. * * Therefore we tell the guest we have 0 counters. Unfortunately, we * must always support PMCCNTR (the cycle counter): we just RAZ/WI for * all PM registers, which doesn't crash the guest kernel at least. */ static bool pm_fake(struct kvm_vcpu *vcpu, const struct coproc_params *p, const struct coproc_reg *r) { if (p->is_write) return ignore_write(vcpu, p); else return read_zero(vcpu, p); } #define access_pmcr pm_fake #define access_pmcntenset pm_fake #define access_pmcntenclr pm_fake #define access_pmovsr pm_fake #define access_pmselr pm_fake #define access_pmceid0 pm_fake #define access_pmceid1 pm_fake #define access_pmccntr pm_fake #define access_pmxevtyper pm_fake #define access_pmxevcntr pm_fake #define access_pmuserenr pm_fake #define access_pmintenset pm_fake #define access_pmintenclr pm_fake /* Architected CP15 registers. * CRn denotes the primary register number, but is copied to the CRm in the * user space API for 64-bit register access in line with the terminology used * in the ARM ARM. * Important: Must be sorted ascending by CRn, CRM, Op1, Op2 and with 64-bit * registers preceding 32-bit ones. */ static const struct coproc_reg cp15_regs[] = { /* MPIDR: we use VMPIDR for guest access. */ { CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32, NULL, reset_mpidr, c0_MPIDR }, /* CSSELR: swapped by interrupt.S. */ { CRn( 0), CRm( 0), Op1( 2), Op2( 0), is32, NULL, reset_unknown, c0_CSSELR }, /* ACTLR: trapped by HCR.TAC bit. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32, access_actlr, reset_actlr, c1_ACTLR }, /* CPACR: swapped by interrupt.S. */ { CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_val, c1_CPACR, 0x00000000 }, /* TTBR0/TTBR1: swapped by interrupt.S. */ { CRm64( 2), Op1( 0), is64, NULL, reset_unknown64, c2_TTBR0 }, { CRm64( 2), Op1( 1), is64, NULL, reset_unknown64, c2_TTBR1 }, /* TTBCR: swapped by interrupt.S. */ { CRn( 2), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_val, c2_TTBCR, 0x00000000 }, /* DACR: swapped by interrupt.S. */ { CRn( 3), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_unknown, c3_DACR }, /* DFSR/IFSR/ADFSR/AIFSR: swapped by interrupt.S. */ { CRn( 5), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_unknown, c5_DFSR }, { CRn( 5), CRm( 0), Op1( 0), Op2( 1), is32, NULL, reset_unknown, c5_IFSR }, { CRn( 5), CRm( 1), Op1( 0), Op2( 0), is32, NULL, reset_unknown, c5_ADFSR }, { CRn( 5), CRm( 1), Op1( 0), Op2( 1), is32, NULL, reset_unknown, c5_AIFSR }, /* DFAR/IFAR: swapped by interrupt.S. */ { CRn( 6), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_unknown, c6_DFAR }, { CRn( 6), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_unknown, c6_IFAR }, /* PAR swapped by interrupt.S */ { CRm64( 7), Op1( 0), is64, NULL, reset_unknown64, c7_PAR }, /* * DC{C,I,CI}SW operations: */ { CRn( 7), CRm( 6), Op1( 0), Op2( 2), is32, access_dcsw}, { CRn( 7), CRm(10), Op1( 0), Op2( 2), is32, access_dcsw}, { CRn( 7), CRm(14), Op1( 0), Op2( 2), is32, access_dcsw}, /* * L2CTLR access (guest wants to know #CPUs). */ { CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32, access_l2ctlr, reset_l2ctlr, c9_L2CTLR }, { CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr}, /* * Dummy performance monitor implementation. */ { CRn( 9), CRm(12), Op1( 0), Op2( 0), is32, access_pmcr}, { CRn( 9), CRm(12), Op1( 0), Op2( 1), is32, access_pmcntenset}, { CRn( 9), CRm(12), Op1( 0), Op2( 2), is32, access_pmcntenclr}, { CRn( 9), CRm(12), Op1( 0), Op2( 3), is32, access_pmovsr}, { CRn( 9), CRm(12), Op1( 0), Op2( 5), is32, access_pmselr}, { CRn( 9), CRm(12), Op1( 0), Op2( 6), is32, access_pmceid0}, { CRn( 9), CRm(12), Op1( 0), Op2( 7), is32, access_pmceid1}, { CRn( 9), CRm(13), Op1( 0), Op2( 0), is32, access_pmccntr}, { CRn( 9), CRm(13), Op1( 0), Op2( 1), is32, access_pmxevtyper}, { CRn( 9), CRm(13), Op1( 0), Op2( 2), is32, access_pmxevcntr}, { CRn( 9), CRm(14), Op1( 0), Op2( 0), is32, access_pmuserenr}, { CRn( 9), CRm(14), Op1( 0), Op2( 1), is32, access_pmintenset}, { CRn( 9), CRm(14), Op1( 0), Op2( 2), is32, access_pmintenclr}, /* PRRR/NMRR (aka MAIR0/MAIR1): swapped by interrupt.S. */ { CRn(10), CRm( 2), Op1( 0), Op2( 0), is32, NULL, reset_unknown, c10_PRRR}, { CRn(10), CRm( 2), Op1( 0), Op2( 1), is32, NULL, reset_unknown, c10_NMRR}, /* VBAR: swapped by interrupt.S. */ { CRn(12), CRm( 0), Op1( 0), Op2( 0), is32, NULL, reset_val, c12_VBAR, 0x00000000 }, /* CONTEXTIDR/TPIDRURW/TPIDRURO/TPIDRPRW: swapped by interrupt.S. */ { CRn(13), CRm( 0), Op1( 0), Op2( 1), is32, NULL, reset_val, c13_CID, 0x00000000 }, { CRn(13), CRm( 0), Op1( 0), Op2( 2), is32, NULL, reset_unknown, c13_TID_URW }, { CRn(13), CRm( 0), Op1( 0), Op2( 3), is32, NULL, reset_unknown, c13_TID_URO }, { CRn(13), CRm( 0), Op1( 0), Op2( 4), is32, NULL, reset_unknown, c13_TID_PRIV }, /* CNTKCTL: swapped by interrupt.S. */ { CRn(14), CRm( 1), Op1( 0), Op2( 0), is32, NULL, reset_val, c14_CNTKCTL, 0x00000000 }, /* The Configuration Base Address Register. */ { CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar}, }; /* Target specific emulation tables */ static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS]; void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table) { unsigned int i; for (i = 1; i < table->num; i++) BUG_ON(cmp_reg(&table->table[i-1], &table->table[i]) >= 0); target_tables[table->target] = table; } /* Get specific register table for this target. */ static const struct coproc_reg *get_target_table(unsigned target, size_t *num) { struct kvm_coproc_target_table *table; table = target_tables[target]; *num = table->num; return table->table; } static const struct coproc_reg *find_reg(const struct coproc_params *params, const struct coproc_reg table[], unsigned int num) { unsigned int i; for (i = 0; i < num; i++) { const struct coproc_reg *r = &table[i]; if (params->is_64bit != r->is_64) continue; if (params->CRn != r->CRn) continue; if (params->CRm != r->CRm) continue; if (params->Op1 != r->Op1) continue; if (params->Op2 != r->Op2) continue; return r; } return NULL; } static int emulate_cp15(struct kvm_vcpu *vcpu, const struct coproc_params *params) { size_t num; const struct coproc_reg *table, *r; trace_kvm_emulate_cp15_imp(params->Op1, params->Rt1, params->CRn, params->CRm, params->Op2, params->is_write); table = get_target_table(vcpu->arch.target, &num); /* Search target-specific then generic table. */ r = find_reg(params, table, num); if (!r) r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); if (likely(r)) { /* If we don't have an accessor, we should never get here! */ BUG_ON(!r->access); if (likely(r->access(vcpu, params, r))) { /* Skip instruction, since it was emulated */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; } /* If access function fails, it should complain. */ } else { kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); print_cp_instr(params); } kvm_inject_undefined(vcpu); return 1; } /** * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params; params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); params.is_64bit = true; params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 16) & 0xf; params.Op2 = 0; params.Rt2 = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; params.CRn = 0; return emulate_cp15(vcpu, &params); } static void reset_coproc_regs(struct kvm_vcpu *vcpu, const struct coproc_reg *table, size_t num) { unsigned long i; for (i = 0; i < num; i++) if (table[i].reset) table[i].reset(vcpu, &table[i]); } /** * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access * @vcpu: The VCPU pointer * @run: The kvm_run struct */ int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct coproc_params params; params.CRm = (kvm_vcpu_get_hsr(vcpu) >> 1) & 0xf; params.Rt1 = (kvm_vcpu_get_hsr(vcpu) >> 5) & 0xf; params.is_write = ((kvm_vcpu_get_hsr(vcpu) & 1) == 0); params.is_64bit = false; params.CRn = (kvm_vcpu_get_hsr(vcpu) >> 10) & 0xf; params.Op1 = (kvm_vcpu_get_hsr(vcpu) >> 14) & 0x7; params.Op2 = (kvm_vcpu_get_hsr(vcpu) >> 17) & 0x7; params.Rt2 = 0; return emulate_cp15(vcpu, &params); } /****************************************************************************** * Userspace API *****************************************************************************/ static bool index_to_params(u64 id, struct coproc_params *params) { switch (id & KVM_REG_SIZE_MASK) { case KVM_REG_SIZE_U32: /* Any unused index bits means it's not valid. */ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK | KVM_REG_ARM_32_CRN_MASK | KVM_REG_ARM_CRM_MASK | KVM_REG_ARM_OPC1_MASK | KVM_REG_ARM_32_OPC2_MASK)) return false; params->is_64bit = false; params->CRn = ((id & KVM_REG_ARM_32_CRN_MASK) >> KVM_REG_ARM_32_CRN_SHIFT); params->CRm = ((id & KVM_REG_ARM_CRM_MASK) >> KVM_REG_ARM_CRM_SHIFT); params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) >> KVM_REG_ARM_OPC1_SHIFT); params->Op2 = ((id & KVM_REG_ARM_32_OPC2_MASK) >> KVM_REG_ARM_32_OPC2_SHIFT); return true; case KVM_REG_SIZE_U64: /* Any unused index bits means it's not valid. */ if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_COPROC_MASK | KVM_REG_ARM_CRM_MASK | KVM_REG_ARM_OPC1_MASK)) return false; params->is_64bit = true; /* CRm to CRn: see cp15_to_index for details */ params->CRn = ((id & KVM_REG_ARM_CRM_MASK) >> KVM_REG_ARM_CRM_SHIFT); params->Op1 = ((id & KVM_REG_ARM_OPC1_MASK) >> KVM_REG_ARM_OPC1_SHIFT); params->Op2 = 0; params->CRm = 0; return true; default: return false; } } /* Decode an index value, and find the cp15 coproc_reg entry. */ static const struct coproc_reg *index_to_coproc_reg(struct kvm_vcpu *vcpu, u64 id) { size_t num; const struct coproc_reg *table, *r; struct coproc_params params; /* We only do cp15 for now. */ if ((id & KVM_REG_ARM_COPROC_MASK) >> KVM_REG_ARM_COPROC_SHIFT != 15) return NULL; if (!index_to_params(id, &params)) return NULL; table = get_target_table(vcpu->arch.target, &num); r = find_reg(&params, table, num); if (!r) r = find_reg(&params, cp15_regs, ARRAY_SIZE(cp15_regs)); /* Not saved in the cp15 array? */ if (r && !r->reg) r = NULL; return r; } /* * These are the invariant cp15 registers: we let the guest see the host * versions of these, so they're part of the guest state. * * A future CPU may provide a mechanism to present different values to * the guest, or a future kvm may trap them. */ /* Unfortunately, there's no register-argument for mrc, so generate. */ #define FUNCTION_FOR32(crn, crm, op1, op2, name) \ static void get_##name(struct kvm_vcpu *v, \ const struct coproc_reg *r) \ { \ u32 val; \ \ asm volatile("mrc p15, " __stringify(op1) \ ", %0, c" __stringify(crn) \ ", c" __stringify(crm) \ ", " __stringify(op2) "\n" : "=r" (val)); \ ((struct coproc_reg *)r)->val = val; \ } FUNCTION_FOR32(0, 0, 0, 0, MIDR) FUNCTION_FOR32(0, 0, 0, 1, CTR) FUNCTION_FOR32(0, 0, 0, 2, TCMTR) FUNCTION_FOR32(0, 0, 0, 3, TLBTR) FUNCTION_FOR32(0, 0, 0, 6, REVIDR) FUNCTION_FOR32(0, 1, 0, 0, ID_PFR0) FUNCTION_FOR32(0, 1, 0, 1, ID_PFR1) FUNCTION_FOR32(0, 1, 0, 2, ID_DFR0) FUNCTION_FOR32(0, 1, 0, 3, ID_AFR0) FUNCTION_FOR32(0, 1, 0, 4, ID_MMFR0) FUNCTION_FOR32(0, 1, 0, 5, ID_MMFR1) FUNCTION_FOR32(0, 1, 0, 6, ID_MMFR2) FUNCTION_FOR32(0, 1, 0, 7, ID_MMFR3) FUNCTION_FOR32(0, 2, 0, 0, ID_ISAR0) FUNCTION_FOR32(0, 2, 0, 1, ID_ISAR1) FUNCTION_FOR32(0, 2, 0, 2, ID_ISAR2) FUNCTION_FOR32(0, 2, 0, 3, ID_ISAR3) FUNCTION_FOR32(0, 2, 0, 4, ID_ISAR4) FUNCTION_FOR32(0, 2, 0, 5, ID_ISAR5) FUNCTION_FOR32(0, 0, 1, 1, CLIDR) FUNCTION_FOR32(0, 0, 1, 7, AIDR) /* ->val is filled in by kvm_invariant_coproc_table_init() */ static struct coproc_reg invariant_cp15[] = { { CRn( 0), CRm( 0), Op1( 0), Op2( 0), is32, NULL, get_MIDR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 1), is32, NULL, get_CTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 2), is32, NULL, get_TCMTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR }, { CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR }, { CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 3), is32, NULL, get_ID_AFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 4), is32, NULL, get_ID_MMFR0 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 5), is32, NULL, get_ID_MMFR1 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 6), is32, NULL, get_ID_MMFR2 }, { CRn( 0), CRm( 1), Op1( 0), Op2( 7), is32, NULL, get_ID_MMFR3 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 0), is32, NULL, get_ID_ISAR0 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 1), is32, NULL, get_ID_ISAR1 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 2), is32, NULL, get_ID_ISAR2 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 }, { CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 }, { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR }, { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR }, }; static int reg_from_user(void *val, const void __user *uaddr, u64 id) { /* This Just Works because we are little endian. */ if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int reg_to_user(void __user *uaddr, const void *val, u64 id) { /* This Just Works because we are little endian. */ if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) return -EFAULT; return 0; } static int get_invariant_cp15(u64 id, void __user *uaddr) { struct coproc_params params; const struct coproc_reg *r; if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); if (!r) return -ENOENT; return reg_to_user(uaddr, &r->val, id); } static int set_invariant_cp15(u64 id, void __user *uaddr) { struct coproc_params params; const struct coproc_reg *r; int err; u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ if (!index_to_params(id, &params)) return -ENOENT; r = find_reg(&params, invariant_cp15, ARRAY_SIZE(invariant_cp15)); if (!r) return -ENOENT; err = reg_from_user(&val, uaddr, id); if (err) return err; /* This is what we mean by invariant: you can't change it. */ if (r->val != val) return -EINVAL; return 0; } static bool is_valid_cache(u32 val) { u32 level, ctype; if (val >= CSSELR_MAX) return -ENOENT; /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ level = (val >> 1); ctype = (cache_levels >> (level * 3)) & 7; switch (ctype) { case 0: /* No cache */ return false; case 1: /* Instruction cache only */ return (val & 1); case 2: /* Data cache only */ case 4: /* Unified cache */ return !(val & 1); case 3: /* Separate instruction and data caches */ return true; default: /* Reserved: we can't know instruction or data. */ return false; } } /* Which cache CCSIDR represents depends on CSSELR value. */ static u32 get_ccsidr(u32 csselr) { u32 ccsidr; /* Make sure noone else changes CSSELR during this! */ local_irq_disable(); /* Put value into CSSELR */ asm volatile("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); isb(); /* Read result out of CCSIDR */ asm volatile("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); local_irq_enable(); return ccsidr; } static int demux_c15_get(u64 id, void __user *uaddr) { u32 val; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; return put_user(get_ccsidr(val), uval); default: return -ENOENT; } } static int demux_c15_set(u64 id, void __user *uaddr) { u32 val, newval; u32 __user *uval = uaddr; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { case KVM_REG_ARM_DEMUX_ID_CCSIDR: if (KVM_REG_SIZE(id) != 4) return -ENOENT; val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) >> KVM_REG_ARM_DEMUX_VAL_SHIFT; if (!is_valid_cache(val)) return -ENOENT; if (get_user(newval, uval)) return -EFAULT; /* This is also invariant: you can't change it. */ if (newval != get_ccsidr(val)) return -EINVAL; return 0; default: return -ENOENT; } } #ifdef CONFIG_VFPv3 static const int vfp_sysregs[] = { KVM_REG_ARM_VFP_FPEXC, KVM_REG_ARM_VFP_FPSCR, KVM_REG_ARM_VFP_FPINST, KVM_REG_ARM_VFP_FPINST2, KVM_REG_ARM_VFP_MVFR0, KVM_REG_ARM_VFP_MVFR1, KVM_REG_ARM_VFP_FPSID }; static unsigned int num_fp_regs(void) { if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK) >> MVFR0_A_SIMD_BIT) == 2) return 32; else return 16; } static unsigned int num_vfp_regs(void) { /* Normal FP regs + control regs. */ return num_fp_regs() + ARRAY_SIZE(vfp_sysregs); } static int copy_vfp_regids(u64 __user *uindices) { unsigned int i; const u64 u32reg = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_VFP; const u64 u64reg = KVM_REG_ARM | KVM_REG_SIZE_U64 | KVM_REG_ARM_VFP; for (i = 0; i < num_fp_regs(); i++) { if (put_user((u64reg | KVM_REG_ARM_VFP_BASE_REG) + i, uindices)) return -EFAULT; uindices++; } for (i = 0; i < ARRAY_SIZE(vfp_sysregs); i++) { if (put_user(u32reg | vfp_sysregs[i], uindices)) return -EFAULT; uindices++; } return num_vfp_regs(); } static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) { u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); u32 val; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid], id); } /* FP control registers are all 32 bit. */ if (KVM_REG_SIZE(id) != 4) return -ENOENT; switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id); case KVM_REG_ARM_VFP_FPSCR: return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id); case KVM_REG_ARM_VFP_FPINST: return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id); case KVM_REG_ARM_VFP_FPINST2: return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id); case KVM_REG_ARM_VFP_MVFR0: val = fmrx(MVFR0); return reg_to_user(uaddr, &val, id); case KVM_REG_ARM_VFP_MVFR1: val = fmrx(MVFR1); return reg_to_user(uaddr, &val, id); case KVM_REG_ARM_VFP_FPSID: val = fmrx(FPSID); return reg_to_user(uaddr, &val, id); default: return -ENOENT; } } static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) { u32 vfpid = (id & KVM_REG_ARM_VFP_MASK); u32 val; /* Fail if we have unknown bits set. */ if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) return -ENOENT; if (vfpid < num_fp_regs()) { if (KVM_REG_SIZE(id) != 8) return -ENOENT; return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid], uaddr, id); } /* FP control registers are all 32 bit. */ if (KVM_REG_SIZE(id) != 4) return -ENOENT; switch (vfpid) { case KVM_REG_ARM_VFP_FPEXC: return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id); case KVM_REG_ARM_VFP_FPSCR: return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id); case KVM_REG_ARM_VFP_FPINST: return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id); case KVM_REG_ARM_VFP_FPINST2: return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id); /* These are invariant. */ case KVM_REG_ARM_VFP_MVFR0: if (reg_from_user(&val, uaddr, id)) return -EFAULT; if (val != fmrx(MVFR0)) return -EINVAL; return 0; case KVM_REG_ARM_VFP_MVFR1: if (reg_from_user(&val, uaddr, id)) return -EFAULT; if (val != fmrx(MVFR1)) return -EINVAL; return 0; case KVM_REG_ARM_VFP_FPSID: if (reg_from_user(&val, uaddr, id)) return -EFAULT; if (val != fmrx(FPSID)) return -EINVAL; return 0; default: return -ENOENT; } } #else /* !CONFIG_VFPv3 */ static unsigned int num_vfp_regs(void) { return 0; } static int copy_vfp_regids(u64 __user *uindices) { return 0; } static int vfp_get_reg(const struct kvm_vcpu *vcpu, u64 id, void __user *uaddr) { return -ENOENT; } static int vfp_set_reg(struct kvm_vcpu *vcpu, u64 id, const void __user *uaddr) { return -ENOENT; } #endif /* !CONFIG_VFPv3 */ int kvm_arm_coproc_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct coproc_reg *r; void __user *uaddr = (void __user *)(long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_get(reg->id, uaddr); if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) return vfp_get_reg(vcpu, reg->id, uaddr); r = index_to_coproc_reg(vcpu, reg->id); if (!r) return get_invariant_cp15(reg->id, uaddr); /* Note: copies two regs if size is 64 bit. */ return reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id); } int kvm_arm_coproc_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { const struct coproc_reg *r; void __user *uaddr = (void __user *)(long)reg->addr; if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) return demux_c15_set(reg->id, uaddr); if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_VFP) return vfp_set_reg(vcpu, reg->id, uaddr); r = index_to_coproc_reg(vcpu, reg->id); if (!r) return set_invariant_cp15(reg->id, uaddr); /* Note: copies two regs if size is 64 bit */ return reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id); } static unsigned int num_demux_regs(void) { unsigned int i, count = 0; for (i = 0; i < CSSELR_MAX; i++) if (is_valid_cache(i)) count++; return count; } static int write_demux_regids(u64 __user *uindices) { u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; unsigned int i; val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; for (i = 0; i < CSSELR_MAX; i++) { if (!is_valid_cache(i)) continue; if (put_user(val | i, uindices)) return -EFAULT; uindices++; } return 0; } static u64 cp15_to_index(const struct coproc_reg *reg) { u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT); if (reg->is_64) { val |= KVM_REG_SIZE_U64; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); /* * CRn always denotes the primary coproc. reg. nr. for the * in-kernel representation, but the user space API uses the * CRm for the encoding, because it is modelled after the * MRRC/MCRR instructions: see the ARM ARM rev. c page * B3-1445 */ val |= (reg->CRn << KVM_REG_ARM_CRM_SHIFT); } else { val |= KVM_REG_SIZE_U32; val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT); val |= (reg->Op2 << KVM_REG_ARM_32_OPC2_SHIFT); val |= (reg->CRm << KVM_REG_ARM_CRM_SHIFT); val |= (reg->CRn << KVM_REG_ARM_32_CRN_SHIFT); } return val; } static bool copy_reg_to_user(const struct coproc_reg *reg, u64 __user **uind) { if (!*uind) return true; if (put_user(cp15_to_index(reg), *uind)) return false; (*uind)++; return true; } /* Assumed ordered tables, see kvm_coproc_table_init. */ static int walk_cp15(struct kvm_vcpu *vcpu, u64 __user *uind) { const struct coproc_reg *i1, *i2, *end1, *end2; unsigned int total = 0; size_t num; /* We check for duplicates here, to allow arch-specific overrides. */ i1 = get_target_table(vcpu->arch.target, &num); end1 = i1 + num; i2 = cp15_regs; end2 = cp15_regs + ARRAY_SIZE(cp15_regs); BUG_ON(i1 == end1 || i2 == end2); /* Walk carefully, as both tables may refer to the same register. */ while (i1 || i2) { int cmp = cmp_reg(i1, i2); /* target-specific overrides generic entry. */ if (cmp <= 0) { /* Ignore registers we trap but don't save. */ if (i1->reg) { if (!copy_reg_to_user(i1, &uind)) return -EFAULT; total++; } } else { /* Ignore registers we trap but don't save. */ if (i2->reg) { if (!copy_reg_to_user(i2, &uind)) return -EFAULT; total++; } } if (cmp <= 0 && ++i1 == end1) i1 = NULL; if (cmp >= 0 && ++i2 == end2) i2 = NULL; } return total; } unsigned long kvm_arm_num_coproc_regs(struct kvm_vcpu *vcpu) { return ARRAY_SIZE(invariant_cp15) + num_demux_regs() + num_vfp_regs() + walk_cp15(vcpu, (u64 __user *)NULL); } int kvm_arm_copy_coproc_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { unsigned int i; int err; /* Then give them all the invariant registers' indices. */ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) { if (put_user(cp15_to_index(&invariant_cp15[i]), uindices)) return -EFAULT; uindices++; } err = walk_cp15(vcpu, uindices); if (err < 0) return err; uindices += err; err = copy_vfp_regids(uindices); if (err < 0) return err; uindices += err; return write_demux_regids(uindices); } void kvm_coproc_table_init(void) { unsigned int i; /* Make sure tables are unique and in order. */ for (i = 1; i < ARRAY_SIZE(cp15_regs); i++) BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0); /* We abuse the reset function to overwrite the table itself. */ for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++) invariant_cp15[i].reset(NULL, &invariant_cp15[i]); /* * CLIDR format is awkward, so clean it up. See ARM B4.1.20: * * If software reads the Cache Type fields from Ctype1 * upwards, once it has seen a value of 0b000, no caches * exist at further-out levels of the hierarchy. So, for * example, if Ctype3 is the first Cache Type field with a * value of 0b000, the values of Ctype4 to Ctype7 must be * ignored. */ asm volatile("mrc p15, 1, %0, c0, c0, 1" : "=r" (cache_levels)); for (i = 0; i < 7; i++) if (((cache_levels >> (i*3)) & 7) == 0) break; /* Clear all higher bits. */ cache_levels &= (1 << (i*3))-1; } /** * kvm_reset_coprocs - sets cp15 registers to reset value * @vcpu: The VCPU pointer * * This function finds the right table above and sets the registers on the * virtual CPU struct to their architecturally defined reset values. */ void kvm_reset_coprocs(struct kvm_vcpu *vcpu) { size_t num; const struct coproc_reg *table; /* Catch someone adding a register without putting in reset entry. */ memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15)); /* Generic chip reset first (so target could override). */ reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs)); table = get_target_table(vcpu->arch.target, &num); reset_coproc_regs(vcpu, table, num); for (num = 1; num < NR_CP15_REGS; num++) if (vcpu->arch.cp15[num] == 0x42424242) panic("Didn't reset vcpu->arch.cp15[%zi]", num); }
gpl-2.0
PRJosh/kernel_msm-3.10
arch/arm/mach-msm/board-samarium.c
273
3641
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/memory.h> #include <linux/msm_tsens.h> #include <linux/msm_thermal.h> #include <linux/clk/msm-clk-provider.h> #include <linux/regulator/rpm-smd-regulator.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <mach/board.h> #include <mach/gpiomux.h> #include <mach/msm_iomap.h> #include <mach/msm_memtypes.h> #include <mach/restart.h> #include <soc/qcom/socinfo.h> #include <soc/qcom/smem.h> #include <soc/qcom/spm.h> #include <soc/qcom/pm.h> #include <soc/qcom/rpm-smd.h> #include <mach/msm_smd.h> #include "board-dt.h" #include "clock.h" #include "platsmp.h" static struct of_dev_auxdata msmsamarium_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF9824000, \ "msm_sdcc.1", NULL), OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF9824900, "msm_sdcc.1", NULL), OF_DEV_AUXDATA("qcom,msm-sdcc", 0xF98A4000, \ "msm_sdcc.2", NULL), OF_DEV_AUXDATA("qcom,sdhci-msm", 0xF98A4900, "msm_sdcc.2", NULL), OF_DEV_AUXDATA("qcom,hsusb-otg", 0xF9A55000, "msm_otg", NULL), OF_DEV_AUXDATA("qcom,spi-qup-v2", 0xF9923000, \ "spi_qsd.1", NULL), {}, }; void __init msmsamarium_reserve(void) { of_scan_flat_dt(dt_scan_for_memory_reserve, NULL); } static void __init msmsamarium_early_memory(void) { of_scan_flat_dt(dt_scan_for_memory_hole, NULL); } /* * Used to satisfy dependencies for devices that need to be * run early or in a particular order. Most likely your device doesn't fall * into this category, and thus the driver should not be added here. The * EPROBE_DEFER can satisfy most dependency problems. */ void __init msmsamarium_add_drivers(void) { msm_smd_init(); msm_rpm_driver_init(); msm_pm_sleep_status_init(); rpm_smd_regulator_driver_init(); msm_spm_device_init(); if (of_board_is_rumi()) msm_clock_init(&msmsamarium_rumi_clock_init_data); else msm_clock_init(&msmsamarium_clock_init_data); tsens_tm_init_driver(); msm_thermal_device_init(); } static void __init msmsamarium_map_io(void) { msm_map_msmsamarium_io(); } void __init msmsamarium_init(void) { struct of_dev_auxdata *adata = msmsamarium_auxdata_lookup; /* * populate devices from DT first so smem probe will get called as part * of msm_smem_init. socinfo_init needs smem support so call * msm_smem_init before it. */ board_dt_populate(adata); msm_smem_init(); if (socinfo_init() < 0) pr_err("%s: socinfo_init() failed\n", __func__); msmsamarium_init_gpiomux(); msmsamarium_add_drivers(); } void __init msmsamarium_init_very_early(void) { msmsamarium_early_memory(); } static const char *msmsamarium_dt_match[] __initconst = { "qcom,msmsamarium", "qcom,apqsamarium", NULL }; DT_MACHINE_START(MSMSAMARIUM_DT, "Qualcomm MSM Samarium(Flattened Device Tree)") .map_io = msmsamarium_map_io, .init_machine = msmsamarium_init, .dt_compat = msmsamarium_dt_match, .reserve = msmsamarium_reserve, .init_very_early = msmsamarium_init_very_early, .restart = msm_restart, .smp = &msm8962_smp_ops, MACHINE_END
gpl-2.0
invisiblek/android_kernel_samsung_jaspervzw
drivers/hwmon/msm_adc.c
273
38786
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/msm_adc.h> #include <linux/pmic8058-xoadc.h> #include <linux/slab.h> #include <linux/semaphore.h> #include <mach/dal.h> #define MSM_ADC_DRIVER_NAME "msm_adc" #define MSM_ADC_MAX_FNAME 15 #define MSM_ADC_DALRPC_DEVICEID 0x02000067 #define MSM_ADC_DALRPC_PORT_NAME "DAL00" #define MSM_ADC_DALRPC_CPU SMD_APPS_MODEM #define MSM_ADC_DALRPC_CMD_REQ_CONV 9 #define MSM_ADC_DALRPC_CMD_INPUT_PROP 11 #define MSM_ADC_DALRC_CONV_TIMEOUT (5 * HZ) /* 5 seconds */ enum dal_error { DAL_ERROR_INVALID_DEVICE_IDX = 1, DAL_ERROR_INVALID_CHANNEL_IDX, DAL_ERROR_NULL_POINTER, DAL_ERROR_DEVICE_QUEUE_FULL, DAL_ERROR_INVALID_PROPERTY_LENGTH, DAL_ERROR_REMOTE_EVENT_POOL_FULL }; enum dal_result_status { DAL_RESULT_STATUS_INVALID, DAL_RESULT_STATUS_VALID }; struct dal_conv_state { struct dal_conv_slot context[MSM_ADC_DEV_MAX_INFLIGHT]; struct list_head slots; struct mutex list_lock; struct semaphore slot_count; }; struct adc_dev { char *name; uint32_t nchans; struct dal_conv_state conv; struct dal_translation transl; struct sensor_device_attribute *sens_attr; char **fnames; }; struct msm_adc_drv { /* Common to both XOADC and EPM */ struct platform_device *pdev; struct device *hwmon; struct miscdevice misc; /* XOADC variables */ struct sensor_device_attribute *sens_attr; struct workqueue_struct *wq; atomic_t online; atomic_t total_outst; wait_queue_head_t total_outst_wait; /* EPM variables */ void *dev_h; struct adc_dev *devs[MSM_ADC_MAX_NUM_DEVS]; struct mutex prop_lock; atomic_t rpc_online; atomic_t rpc_total_outst; wait_queue_head_t rpc_total_outst_wait; }; static bool epm_init; static bool epm_fluid_enabled; /* Needed to support file_op interfaces */ static struct msm_adc_drv *msm_adc_drv; static bool conv_first_request; static ssize_t msm_adc_show_curr(struct device *dev, struct device_attribute *devattr, char *buf); static int msm_rpc_adc_blocking_conversion(struct msm_adc_drv *msm_adc, uint32_t chan, struct adc_chan_result *result); static int msm_adc_blocking_conversion(struct msm_adc_drv *msm_adc, uint32_t chan, struct adc_chan_result *result); static int msm_adc_open(struct inode *inode, struct file *file) { struct msm_client_data *client; struct msm_adc_drv *msm_adc = msm_adc_drv; struct platform_device *pdev = msm_adc->pdev; client = kzalloc(sizeof(struct msm_client_data), GFP_KERNEL); if (!client) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } if (!try_module_get(THIS_MODULE)) { kfree(client); return -EACCES; } mutex_init(&client->lock); INIT_LIST_HEAD(&client->complete_list); init_waitqueue_head(&client->data_wait); init_waitqueue_head(&client->outst_wait); client->online = 1; file->private_data = client; return nonseekable_open(inode, file); } static inline void msm_adc_restore_slot(struct dal_conv_state *conv_s, struct dal_conv_slot *slot) { mutex_lock(&conv_s->list_lock); list_add(&slot->list, &conv_s->slots); mutex_unlock(&conv_s->list_lock); up(&conv_s->slot_count); } static int no_pending_client_requests(struct msm_client_data *client) { mutex_lock(&client->lock); if (client->num_outstanding == 0) { mutex_unlock(&client->lock); return 1; } mutex_unlock(&client->lock); return 0; } static int data_avail(struct msm_client_data *client, uint32_t *pending) { uint32_t completed; mutex_lock(&client->lock); completed = client->num_complete; mutex_unlock(&client->lock); if (completed > 0) { if (pending != NULL) *pending = completed; return 1; } return 0; } static int msm_adc_release(struct inode *inode, struct file *file) { struct msm_client_data *client = file->private_data; struct adc_conv_slot *slot, *tmp; int rc; struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = pdata->channel; module_put(THIS_MODULE); mutex_lock(&client->lock); /* prevent any further requests while we teardown the client */ client->online = 0; mutex_unlock(&client->lock); /* * We may still have outstanding transactions in flight from this * client that have not completed. Make sure they're completed * before removing the client. */ rc = wait_event_interruptible(client->outst_wait, no_pending_client_requests(client)); if (rc) { pr_err("%s: wait_event_interruptible failed rc = %d\n", __func__, rc); return rc; } /* * All transactions have completed. Add slot resources back to the * appropriate devices. */ list_for_each_entry_safe(slot, tmp, &client->complete_list, list) { slot->client = NULL; list_del(&slot->list); channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot( channel[slot->conv.result.chan].adc_dev_instance, slot); } kfree(client); return 0; } static int msm_adc_translate_dal_to_hwmon(struct msm_adc_drv *msm_adc, uint32_t chan, struct adc_dev_spec *dest) { struct dal_translation *transl; struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; int i; for (i = 0; i < pdata->num_adc; i++) { transl = &msm_adc->devs[i]->transl; if (chan >= transl->hwmon_start && chan <= transl->hwmon_end) { dest->dal.dev_idx = transl->dal_dev_idx; dest->hwmon_dev_idx = transl->hwmon_dev_idx; dest->dal.chan_idx = chan - transl->hwmon_start; return 0; } } return -EINVAL; } static int msm_adc_translate_hwmon_to_dal(struct msm_adc_drv *msm_adc, struct adc_dev_spec *source, uint32_t *chan) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; struct dal_translation *transl; int i; for (i = 0; i < pdata->num_adc; i++) { transl = &msm_adc->devs[i]->transl; if (source->dal.dev_idx != transl->dal_dev_idx) continue; *chan = transl->hwmon_start + source->dal.chan_idx; return 0; } return -EINVAL; } static int msm_adc_getinputproperties(struct msm_adc_drv *msm_adc, const char *lookup_name, struct adc_dev_spec *result) { struct device *dev = &msm_adc->pdev->dev; int rc; mutex_lock(&msm_adc->prop_lock); rc = dalrpc_fcn_8(MSM_ADC_DALRPC_CMD_INPUT_PROP, msm_adc->dev_h, lookup_name, strlen(lookup_name) + 1, &result->dal, sizeof(struct dal_dev_spec)); if (rc) { dev_err(dev, "DAL getprop request failed: rc = %d\n", rc); mutex_unlock(&msm_adc->prop_lock); return -EIO; } mutex_unlock(&msm_adc->prop_lock); return rc; } static int msm_adc_lookup(struct msm_adc_drv *msm_adc, struct msm_adc_lookup *lookup) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; struct adc_dev_spec target; int rc = 0, i = 0; uint32_t len = 0; len = strnlen(lookup->name, MSM_ADC_MAX_CHAN_STR); while (i < pdata->num_chan_supported) { if (strncmp(lookup->name, pdata->channel[i].name, len)) i++; else break; } if (pdata->num_chan_supported > 0 && i < pdata->num_chan_supported) { lookup->chan_idx = i; } else if (msm_adc->dev_h) { rc = msm_adc_getinputproperties(msm_adc, lookup->name, &target); if (rc) { pr_err("%s: Lookup failed for %s\n", __func__, lookup->name); return rc; } rc = msm_adc_translate_hwmon_to_dal(msm_adc, &target, &lookup->chan_idx); if (rc) pr_err("%s: Translation failed for %s\n", __func__, lookup->name); } else { pr_err("%s: Lookup failed for %s\n", __func__, lookup->name); rc = -EINVAL; } return rc; } static int msm_adc_aio_conversion(struct msm_adc_drv *msm_adc, struct adc_chan_result *request, struct msm_client_data *client) { struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = &pdata->channel[request->chan]; struct adc_conv_slot *slot; /* we could block here, but only for a bounded time */ channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance, &slot); if (slot) { atomic_inc(&msm_adc->total_outst); mutex_lock(&client->lock); client->num_outstanding++; mutex_unlock(&client->lock); /* indicates non blocking request to callback handler */ slot->blocking = 0; slot->compk = NULL;/*For kernel space usage; n/a for usr space*/ slot->conv.result.chan = client->adc_chan = request->chan; slot->client = client; slot->adc_request = START_OF_CONV; slot->chan_path = channel->chan_path_type; slot->chan_adc_config = channel->adc_config_type; slot->chan_adc_calib = channel->adc_calib_type; queue_work(msm_adc->wq, &slot->work); return 0; } return -EBUSY; } static int msm_adc_fluid_hw_deinit(struct msm_adc_drv *msm_adc) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; if (!epm_init) return -EINVAL; if (pdata->gpio_config == APROC_CONFIG && epm_fluid_enabled && pdata->adc_fluid_disable != NULL) { pdata->adc_fluid_disable(); epm_fluid_enabled = false; } return 0; } static int msm_adc_fluid_hw_init(struct msm_adc_drv *msm_adc) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; if (!epm_init) return -EINVAL; if (!pdata->adc_fluid_enable) return -ENODEV; printk(KERN_DEBUG "msm_adc_fluid_hw_init: Calling adc_fluid_enable.\n"); if (pdata->gpio_config == APROC_CONFIG && !epm_fluid_enabled) { pdata->adc_fluid_enable(); epm_fluid_enabled = true; } /* return success for now but check for errors from hw init configuration */ return 0; } static int msm_adc_poll_complete(struct msm_adc_drv *msm_adc, struct msm_client_data *client, uint32_t *pending) { int rc; /* * Don't proceed if there there's nothing queued on this client. * We could deadlock otherwise in a single threaded scenario. */ if (no_pending_client_requests(client) && !data_avail(client, pending)) return -EDEADLK; rc = wait_event_interruptible(client->data_wait, data_avail(client, pending)); if (rc) return rc; return 0; } static int msm_adc_read_result(struct msm_adc_drv *msm_adc, struct msm_client_data *client, struct adc_chan_result *result) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; struct msm_adc_channels *channel = pdata->channel; struct adc_conv_slot *slot; int rc = 0; mutex_lock(&client->lock); slot = list_first_entry(&client->complete_list, struct adc_conv_slot, list); if (!slot) { mutex_unlock(&client->lock); return -ENOMSG; } slot->client = NULL; list_del(&slot->list); client->num_complete--; mutex_unlock(&client->lock); *result = slot->conv.result; /* restore this slot to reserve */ channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot( channel[slot->conv.result.chan].adc_dev_instance, slot); return rc; } static long msm_adc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct msm_client_data *client = file->private_data; struct msm_adc_drv *msm_adc = msm_adc_drv; struct platform_device *pdev = msm_adc->pdev; struct msm_adc_platform_data *pdata = pdev->dev.platform_data; uint32_t block_res = 0; int rc; switch (cmd) { case MSM_ADC_REQUEST: { struct adc_chan_result conv; if (copy_from_user(&conv, (void __user *)arg, sizeof(struct adc_chan_result))) return -EFAULT; if (conv.chan < pdata->num_chan_supported) { rc = msm_adc_blocking_conversion(msm_adc, conv.chan, &conv); } else { if (!msm_adc->dev_h) return -EAGAIN; rc = msm_rpc_adc_blocking_conversion(msm_adc, conv.chan, &conv); } if (rc) { dev_dbg(&pdev->dev, "BLK conversion failed\n"); return rc; } if (copy_to_user((void __user *)arg, &conv, sizeof(struct adc_chan_result))) return -EFAULT; break; } case MSM_ADC_AIO_REQUEST_BLOCK_RES: block_res = 1; case MSM_ADC_AIO_REQUEST: { struct adc_chan_result conv; if (copy_from_user(&conv, (void __user *)arg, sizeof(struct adc_chan_result))) return -EFAULT; if (conv.chan >= pdata->num_chan_supported) return -EINVAL; rc = msm_adc_aio_conversion(msm_adc, &conv, client); if (rc) { dev_dbg(&pdev->dev, "AIO conversion failed\n"); return rc; } if (copy_to_user((void __user *)arg, &conv, sizeof(struct adc_chan_result))) return -EFAULT; break; } case MSM_ADC_AIO_POLL: { uint32_t completed; rc = msm_adc_poll_complete(msm_adc, client, &completed); if (rc) { dev_dbg(&pdev->dev, "poll request failed\n"); return rc; } if (copy_to_user((void __user *)arg, &completed, sizeof(uint32_t))) return -EFAULT; break; } case MSM_ADC_AIO_READ: { struct adc_chan_result result; rc = msm_adc_read_result(msm_adc, client, &result); if (rc) { dev_dbg(&pdev->dev, "read result failed\n"); return rc; } if (copy_to_user((void __user *)arg, &result, sizeof(struct adc_chan_result))) return -EFAULT; break; } case MSM_ADC_LOOKUP: { struct msm_adc_lookup lookup; if (copy_from_user(&lookup, (void __user *)arg, sizeof(struct msm_adc_lookup))) return -EFAULT; rc = msm_adc_lookup(msm_adc, &lookup); if (rc) { dev_dbg(&pdev->dev, "No such channel: %s\n", lookup.name); return rc; } if (copy_to_user((void __user *)arg, &lookup, sizeof(struct msm_adc_lookup))) return -EFAULT; break; } case MSM_ADC_FLUID_INIT: { uint32_t result; result = msm_adc_fluid_hw_init(msm_adc); if (copy_to_user((void __user *)arg, &result, sizeof(uint32_t))) { printk(KERN_ERR "MSM_ADC_FLUID_INIT: " "copy_to_user returned an error.\n"); return -EFAULT; } printk(KERN_DEBUG "MSM_ADC_FLUID_INIT: Success.\n"); break; } case MSM_ADC_FLUID_DEINIT: { uint32_t result; result = msm_adc_fluid_hw_deinit(msm_adc); if (copy_to_user((void __user *)arg, &result, sizeof(uint32_t))) return -EFAULT; break; } default: return -EINVAL; } return 0; } const struct file_operations msm_adc_fops = { .open = msm_adc_open, .release = msm_adc_release, .unlocked_ioctl = msm_adc_ioctl, }; static ssize_t msm_adc_show_curr(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct msm_adc_drv *msm_adc = dev_get_drvdata(dev); struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; struct adc_chan_result result; int rc; #ifdef CONFIG_PMIC8058_XOADC rc = pm8058_xoadc_registered(); if (rc <= 0) return -ENODEV; #endif if (attr->index < pdata->num_chan_supported) { rc = msm_adc_blocking_conversion(msm_adc, attr->index, &result); } else { if (pdata->gpio_config == APROC_CONFIG && !epm_fluid_enabled && pdata->adc_fluid_enable != NULL) { printk(KERN_DEBUG "This is to read ADC value for " "Fluid EPM and init. Do it only once.\n"); pdata->adc_fluid_enable(); epm_fluid_enabled = true; } rc = msm_rpc_adc_blocking_conversion(msm_adc, attr->index, &result); } if (rc) return 0; return sprintf(buf, "Result: %lld Raw: %d\n", result.physical, result.adc_code); } static int msm_rpc_adc_blocking_conversion(struct msm_adc_drv *msm_adc, uint32_t hwmon_chan, struct adc_chan_result *result) { struct msm_adc_platform_data *pdata = msm_adc->pdev->dev.platform_data; struct dal_conv_request params; struct device *dev = &msm_adc->pdev->dev; struct adc_dev *adc_dev; struct dal_conv_state *conv_s; struct dal_conv_slot *slot; struct adc_dev_spec dest; int timeout, rc = 0; if (pdata->gpio_config == APROC_CONFIG && pdata->adc_gpio_enable != NULL) pdata->adc_gpio_enable(hwmon_chan-pdata->num_chan_supported); rc = msm_adc_translate_dal_to_hwmon(msm_adc, hwmon_chan, &dest); if (rc) { dev_err(dev, "%s: translation from chan %u failed\n", __func__, hwmon_chan); if (pdata->gpio_config == APROC_CONFIG && pdata->adc_gpio_disable != NULL) pdata->adc_gpio_disable(hwmon_chan -pdata->num_chan_supported); return -EINVAL; } adc_dev = msm_adc->devs[dest.hwmon_dev_idx]; conv_s = &adc_dev->conv; down(&conv_s->slot_count); mutex_lock(&conv_s->list_lock); slot = list_first_entry(&conv_s->slots, struct dal_conv_slot, list); list_del(&slot->list); BUG_ON(!slot); mutex_unlock(&conv_s->list_lock); /* indicates blocking request to callback handler */ slot->blocking = 1; params.target.dev_idx = dest.dal.dev_idx; params.target.chan_idx = dest.dal.chan_idx; params.cb_h = slot->cb_h; rc = dalrpc_fcn_8(MSM_ADC_DALRPC_CMD_REQ_CONV, msm_adc->dev_h, &params, sizeof(params), NULL, 0); if (rc) { dev_err(dev, "%s: Conversion for device = %u channel = %u" " failed\n", __func__, params.target.dev_idx, params.target.chan_idx); rc = -EIO; goto blk_conv_err; } timeout = wait_for_completion_interruptible_timeout(&slot->comp, MSM_ADC_DALRC_CONV_TIMEOUT); if (timeout == 0) { dev_err(dev, "read for device = %u channel = %u timed out\n", params.target.dev_idx, params.target.chan_idx); rc = -ETIMEDOUT; goto blk_conv_err; } else if (timeout < 0) { rc = -EINTR; goto blk_conv_err; } result->physical = (int64_t)slot->result.physical; if (slot->result.status == DAL_RESULT_STATUS_INVALID) rc = -ENODATA; blk_conv_err: if (pdata->gpio_config == APROC_CONFIG && pdata->adc_gpio_disable != NULL) pdata->adc_gpio_disable(hwmon_chan-pdata->num_chan_supported); msm_adc_restore_slot(conv_s, slot); return rc; } static int msm_adc_blocking_conversion(struct msm_adc_drv *msm_adc, uint32_t hwmon_chan, struct adc_chan_result *result) { struct adc_conv_slot *slot; struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = &pdata->channel[hwmon_chan]; int ret = 0; if (conv_first_request) { ret = pm8058_xoadc_calib_device(channel->adc_dev_instance); if (ret) { pr_err("pmic8058 xoadc calibration failed, retry\n"); return ret; } conv_first_request = false; } channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance, &slot); if (slot) { slot->conv.result.chan = hwmon_chan; /* indicates blocking request to callback handler */ slot->blocking = 1; slot->adc_request = START_OF_CONV; slot->chan_path = channel->chan_path_type; slot->chan_adc_config = channel->adc_config_type; slot->chan_adc_calib = channel->adc_calib_type; queue_work(msm_adc_drv->wq, &slot->work); wait_for_completion_interruptible(&slot->comp); *result = slot->conv.result; channel->adc_access_fn->adc_restore_slot( channel->adc_dev_instance, slot); return 0; } return -EBUSY; } int32_t adc_channel_open(uint32_t channel, void **h) { struct msm_client_data *client; struct msm_adc_drv *msm_adc = msm_adc_drv; struct msm_adc_platform_data *pdata; struct platform_device *pdev; int i = 0; if (!msm_adc_drv) return -EFAULT; #ifdef CONFIG_PMIC8058_XOADC if (pm8058_xoadc_registered() <= 0) return -ENODEV; #endif pdata = msm_adc->pdev->dev.platform_data; pdev = msm_adc->pdev; while (i < pdata->num_chan_supported) { if (channel == pdata->channel[i].channel_name) break; else i++; } if (i == pdata->num_chan_supported) return -EBADF; /* unknown channel */ client = kzalloc(sizeof(struct msm_client_data), GFP_KERNEL); if (!client) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } if (!try_module_get(THIS_MODULE)) { kfree(client); return -EACCES; } mutex_init(&client->lock); INIT_LIST_HEAD(&client->complete_list); init_waitqueue_head(&client->data_wait); init_waitqueue_head(&client->outst_wait); client->online = 1; client->adc_chan = i; *h = (void *)client; return 0; } int32_t adc_channel_close(void *h) { struct msm_client_data *client = (struct msm_client_data *)h; kfree(client); return 0; } int32_t adc_channel_request_conv(void *h, struct completion *conv_complete_evt) { struct msm_client_data *client = (struct msm_client_data *)h; struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = &pdata->channel[client->adc_chan]; struct adc_conv_slot *slot; int ret; if (conv_first_request) { ret = pm8058_xoadc_calib_device(channel->adc_dev_instance); if (ret) { pr_err("pmic8058 xoadc calibration failed, retry\n"); return ret; } conv_first_request = false; } channel->adc_access_fn->adc_slot_request(channel->adc_dev_instance, &slot); if (slot) { atomic_inc(&msm_adc_drv->total_outst); mutex_lock(&client->lock); client->num_outstanding++; mutex_unlock(&client->lock); slot->conv.result.chan = client->adc_chan; slot->blocking = 0; slot->compk = conv_complete_evt; slot->client = client; slot->adc_request = START_OF_CONV; slot->chan_path = channel->chan_path_type; slot->chan_adc_config = channel->adc_config_type; slot->chan_adc_calib = channel->adc_calib_type; queue_work(msm_adc_drv->wq, &slot->work); return 0; } return -EBUSY; } int32_t adc_channel_read_result(void *h, struct adc_chan_result *chan_result) { struct msm_client_data *client = (struct msm_client_data *)h; struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = pdata->channel; struct adc_conv_slot *slot; int rc = 0; mutex_lock(&client->lock); slot = list_first_entry(&client->complete_list, struct adc_conv_slot, list); if (!slot) { mutex_unlock(&client->lock); return -ENOMSG; } slot->client = NULL; list_del(&slot->list); client->num_complete--; mutex_unlock(&client->lock); *chan_result = slot->conv.result; /* restore this slot to reserve */ channel[slot->conv.result.chan].adc_access_fn->adc_restore_slot( channel[slot->conv.result.chan].adc_dev_instance, slot); return rc; } static void msm_rpc_adc_conv_cb(void *context, u32 param, void *evt_buf, u32 len) { struct dal_adc_result *result = evt_buf; struct dal_conv_slot *slot = context; struct msm_adc_drv *msm_adc = msm_adc_drv; memcpy(&slot->result, result, sizeof(slot->result)); /* for blocking requests, signal complete */ if (slot->blocking) complete(&slot->comp); /* for non-blocking requests, add slot to the client completed list */ else { struct msm_client_data *client = slot->client; mutex_lock(&client->lock); list_add(&slot->list, &client->complete_list); client->num_complete++; client->num_outstanding--; /* * if the client release has been invoked and this is call * corresponds to the last request, then signal release * to complete. */ if (slot->client->online == 0 && client->num_outstanding == 0) wake_up_interruptible_all(&client->outst_wait); mutex_unlock(&client->lock); wake_up_interruptible_all(&client->data_wait); atomic_dec(&msm_adc->total_outst); /* verify driver remove has not been invoked */ if (atomic_read(&msm_adc->online) == 0 && atomic_read(&msm_adc->total_outst) == 0) wake_up_interruptible_all(&msm_adc->total_outst_wait); } } void msm_adc_conv_cb(void *context, u32 param, void *evt_buf, u32 len) { struct adc_conv_slot *slot = context; struct msm_adc_drv *msm_adc = msm_adc_drv; switch (slot->adc_request) { case START_OF_CONV: slot->adc_request = END_OF_CONV; break; case START_OF_CALIBRATION: slot->adc_request = END_OF_CALIBRATION; break; case END_OF_CALIBRATION: case END_OF_CONV: break; } queue_work(msm_adc->wq, &slot->work); } static void msm_adc_teardown_device_conv(struct platform_device *pdev, struct adc_dev *adc_dev) { struct dal_conv_state *conv_s = &adc_dev->conv; struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev); struct dal_conv_slot *slot; int i; for (i = 0; i < MSM_ADC_DEV_MAX_INFLIGHT; i++) { slot = &conv_s->context[i]; if (slot->cb_h) { dalrpc_dealloc_cb(msm_adc->dev_h, slot->cb_h); slot->cb_h = NULL; } } } static void msm_rpc_adc_teardown_device(struct platform_device *pdev, struct adc_dev *adc_dev) { struct dal_translation *transl = &adc_dev->transl; int i, num_chans = transl->hwmon_end - transl->hwmon_start + 1; if (adc_dev->sens_attr) for (i = 0; i < num_chans; i++) device_remove_file(&pdev->dev, &adc_dev->sens_attr[i].dev_attr); msm_adc_teardown_device_conv(pdev, adc_dev); kfree(adc_dev->fnames); kfree(adc_dev->sens_attr); kfree(adc_dev); } static void msm_rpc_adc_teardown_devices(struct platform_device *pdev) { struct msm_adc_platform_data *pdata = pdev->dev.platform_data; struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev); int i, rc = 0; for (i = 0; i < pdata->num_adc; i++) { if (msm_adc->devs[i]) { msm_rpc_adc_teardown_device(pdev, msm_adc->devs[i]); msm_adc->devs[i] = NULL; } else break; } if (msm_adc->dev_h) { rc = daldevice_detach(msm_adc->dev_h); if (rc) dev_err(&pdev->dev, "Cannot detach from dal device\n"); msm_adc->dev_h = NULL; } } static void msm_adc_teardown_device(struct platform_device *pdev, struct msm_adc_drv *msm_adc) { struct msm_adc_platform_data *pdata = pdev->dev.platform_data; int i, num_chans = pdata->num_chan_supported; if (pdata->num_chan_supported > 0) { if (msm_adc->sens_attr) for (i = 0; i < num_chans; i++) device_remove_file(&pdev->dev, &msm_adc->sens_attr[i].dev_attr); kfree(msm_adc->sens_attr); } } static void msm_adc_teardown(struct platform_device *pdev) { struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev); if (!msm_adc) return; misc_deregister(&msm_adc->misc); if (msm_adc->hwmon) hwmon_device_unregister(msm_adc->hwmon); msm_rpc_adc_teardown_devices(pdev); msm_adc_teardown_device(pdev, msm_adc); kfree(msm_adc); platform_set_drvdata(pdev, NULL); } static int __devinit msm_adc_device_conv_init(struct msm_adc_drv *msm_adc, struct adc_dev *adc_dev) { struct platform_device *pdev = msm_adc->pdev; struct dal_conv_state *conv_s = &adc_dev->conv; struct dal_conv_slot *slot = conv_s->context; int rc, i; sema_init(&conv_s->slot_count, MSM_ADC_DEV_MAX_INFLIGHT); mutex_init(&conv_s->list_lock); INIT_LIST_HEAD(&conv_s->slots); for (i = 0; i < MSM_ADC_DEV_MAX_INFLIGHT; i++) { list_add(&slot->list, &conv_s->slots); slot->cb_h = dalrpc_alloc_cb(msm_adc->dev_h, msm_rpc_adc_conv_cb, slot); if (!slot->cb_h) { dev_err(&pdev->dev, "Unable to allocate DAL callback" " for slot %d\n", i); rc = -ENOMEM; goto dal_err_cb; } init_completion(&slot->comp); slot->idx = i; slot++; } return 0; dal_err_cb: msm_adc_teardown_device_conv(pdev, adc_dev); return rc; } static struct sensor_device_attribute msm_rpc_adc_curr_in_attr = SENSOR_ATTR(NULL, S_IRUGO, msm_adc_show_curr, NULL, 0); static int __devinit msm_rpc_adc_device_init_hwmon(struct platform_device *pdev, struct adc_dev *adc_dev) { struct dal_translation *transl = &adc_dev->transl; int i, rc, num_chans = transl->hwmon_end - transl->hwmon_start + 1; const char prefix[] = "curr", postfix[] = "_input"; char tmpbuf[5]; adc_dev->fnames = kzalloc(num_chans * MSM_ADC_MAX_FNAME + num_chans * sizeof(char *), GFP_KERNEL); if (!adc_dev->fnames) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } adc_dev->sens_attr = kzalloc(num_chans * sizeof(struct sensor_device_attribute), GFP_KERNEL); if (!adc_dev->sens_attr) { dev_err(&pdev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto hwmon_err_fnames; } for (i = 0; i < num_chans; i++) { adc_dev->fnames[i] = (char *)adc_dev->fnames + i * MSM_ADC_MAX_FNAME + num_chans * sizeof(char *); strcpy(adc_dev->fnames[i], prefix); sprintf(tmpbuf, "%d", transl->hwmon_start + i); strcat(adc_dev->fnames[i], tmpbuf); strcat(adc_dev->fnames[i], postfix); msm_rpc_adc_curr_in_attr.index = transl->hwmon_start + i; msm_rpc_adc_curr_in_attr.dev_attr.attr.name = adc_dev->fnames[i]; memcpy(&adc_dev->sens_attr[i], &msm_rpc_adc_curr_in_attr, sizeof(msm_rpc_adc_curr_in_attr)); rc = device_create_file(&pdev->dev, &adc_dev->sens_attr[i].dev_attr); if (rc) { dev_err(&pdev->dev, "device_create_file failed for " "dal dev %u chan %d\n", adc_dev->transl.dal_dev_idx, i); goto hwmon_err_sens; } } return 0; hwmon_err_sens: kfree(adc_dev->sens_attr); hwmon_err_fnames: kfree(adc_dev->fnames); return rc; } static int __devinit msm_rpc_adc_device_init(struct platform_device *pdev) { struct msm_adc_platform_data *pdata = pdev->dev.platform_data; struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev); struct adc_dev *adc_dev; struct adc_dev_spec target; int i, rc; int hwmon_cntr = pdata->num_chan_supported; for (i = 0; i < pdata->num_adc; i++) { adc_dev = kzalloc(sizeof(struct adc_dev), GFP_KERNEL); if (!adc_dev) { dev_err(&pdev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto dev_init_err; } msm_adc->devs[i] = adc_dev; adc_dev->name = pdata->dev_names[i]; rc = msm_adc_device_conv_init(msm_adc, adc_dev); if (rc) { dev_err(&pdev->dev, "DAL device[%s] failed conv init\n", adc_dev->name); goto dev_init_err; } /* DAL device lookup */ rc = msm_adc_getinputproperties(msm_adc, adc_dev->name, &target); if (rc) { dev_err(&pdev->dev, "No such DAL device[%s]\n", adc_dev->name); goto dev_init_err; } adc_dev->transl.dal_dev_idx = target.dal.dev_idx; adc_dev->transl.hwmon_dev_idx = i; adc_dev->nchans = target.dal.chan_idx; adc_dev->transl.hwmon_start = hwmon_cntr; adc_dev->transl.hwmon_end = hwmon_cntr + adc_dev->nchans - 1; hwmon_cntr += adc_dev->nchans; rc = msm_rpc_adc_device_init_hwmon(pdev, adc_dev); if (rc) goto dev_init_err; } return 0; dev_init_err: msm_rpc_adc_teardown_devices(pdev); return rc; } static int __devinit msm_rpc_adc_init(struct platform_device *pdev1) { struct msm_adc_drv *msm_adc = msm_adc_drv; struct platform_device *pdev = msm_adc->pdev; struct msm_adc_platform_data *pdata = pdev->dev.platform_data; int rc = 0; dev_dbg(&pdev->dev, "msm_rpc_adc_init called\n"); if (!pdata) { dev_err(&pdev->dev, "no platform data?\n"); return -EINVAL; } mutex_init(&msm_adc->prop_lock); rc = daldevice_attach(MSM_ADC_DALRPC_DEVICEID, MSM_ADC_DALRPC_PORT_NAME, MSM_ADC_DALRPC_CPU, &msm_adc->dev_h); if (rc) { dev_err(&pdev->dev, "Cannot attach to dal device\n"); return rc; } dev_dbg(&pdev->dev, "Attach to dal device Succeeded\n"); rc = msm_rpc_adc_device_init(pdev); if (rc) { dev_err(&pdev->dev, "msm_adc_dev_init failed\n"); goto err_cleanup; } init_waitqueue_head(&msm_adc->rpc_total_outst_wait); atomic_set(&msm_adc->rpc_online, 1); atomic_set(&msm_adc->rpc_total_outst, 0); epm_init = true; pr_info("msm_adc successfully registered\n"); return 0; err_cleanup: msm_rpc_adc_teardown_devices(pdev); return rc; } /* * Process the deferred job */ void msm_adc_wq_work(struct work_struct *work) { struct adc_properties *adc_properties; struct adc_conv_slot *slot = container_of(work, struct adc_conv_slot, work); uint32_t idx = slot->conv.result.chan; struct msm_adc_platform_data *pdata = msm_adc_drv->pdev->dev.platform_data; struct msm_adc_channels *channel = &pdata->channel[idx]; int32_t adc_code; switch (slot->adc_request) { case START_OF_CONV: channel->adc_access_fn->adc_select_chan_and_start_conv( channel->adc_dev_instance, slot); break; case END_OF_CONV: adc_properties = channel->adc_access_fn->adc_get_properties( channel->adc_dev_instance); if (channel->adc_access_fn->adc_read_adc_code) channel->adc_access_fn->adc_read_adc_code( channel->adc_dev_instance, &adc_code); if (channel->chan_processor) channel->chan_processor(adc_code, adc_properties, &slot->chan_properties, &slot->conv.result); /* Intentionally a fall thru here. Calibraton does not need to perform channel processing, etc. However, both end of conversion and end of calibration requires the below fall thru code to be executed. */ case END_OF_CALIBRATION: /* for blocking requests, signal complete */ if (slot->blocking) complete(&slot->comp); else { struct msm_client_data *client = slot->client; mutex_lock(&client->lock); if (slot->adc_request == END_OF_CONV) { list_add(&slot->list, &client->complete_list); client->num_complete++; } client->num_outstanding--; /* * if the client release has been invoked and this is call * corresponds to the last request, then signal release * to complete. */ if (slot->client->online == 0 && client->num_outstanding == 0) wake_up_interruptible_all(&client->outst_wait); mutex_unlock(&client->lock); wake_up_interruptible_all(&client->data_wait); atomic_dec(&msm_adc_drv->total_outst); /* verify driver remove has not been invoked */ if (atomic_read(&msm_adc_drv->online) == 0 && atomic_read(&msm_adc_drv->total_outst) == 0) wake_up_interruptible_all( &msm_adc_drv->total_outst_wait); if (slot->compk) /* Kernel space request */ complete(slot->compk); if (slot->adc_request == END_OF_CALIBRATION) channel->adc_access_fn->adc_restore_slot( channel->adc_dev_instance, slot); } break; case START_OF_CALIBRATION: /* code here to please code reviewers to satisfy silly compiler warnings */ break; } } static struct sensor_device_attribute msm_adc_curr_in_attr = SENSOR_ATTR(NULL, S_IRUGO, msm_adc_show_curr, NULL, 0); static int __devinit msm_adc_init_hwmon(struct platform_device *pdev, struct msm_adc_drv *msm_adc) { struct msm_adc_platform_data *pdata = pdev->dev.platform_data; struct msm_adc_channels *channel = pdata->channel; int i, rc, num_chans = pdata->num_chan_supported; if (!channel) return -EINVAL; msm_adc->sens_attr = kzalloc(num_chans * sizeof(struct sensor_device_attribute), GFP_KERNEL); if (!msm_adc->sens_attr) { dev_err(&pdev->dev, "Unable to allocate memory\n"); rc = -ENOMEM; goto hwmon_err_sens; } for (i = 0; i < num_chans; i++) { msm_adc_curr_in_attr.index = i; msm_adc_curr_in_attr.dev_attr.attr.name = channel[i].name; memcpy(&msm_adc->sens_attr[i], &msm_adc_curr_in_attr, sizeof(msm_adc_curr_in_attr)); rc = device_create_file(&pdev->dev, &msm_adc->sens_attr[i].dev_attr); if (rc) { dev_err(&pdev->dev, "device_create_file failed for " "dal dev %s\n", channel[i].name); goto hwmon_err_sens; } } return 0; hwmon_err_sens: kfree(msm_adc->sens_attr); return rc; } static struct platform_driver msm_adc_rpcrouter_remote_driver = { .probe = msm_rpc_adc_init, .driver = { .name = MSM_ADC_DALRPC_PORT_NAME, .owner = THIS_MODULE, }, }; static int msm_adc_probe(struct platform_device *pdev) { struct msm_adc_platform_data *pdata = pdev->dev.platform_data; struct msm_adc_drv *msm_adc; int rc = 0; if (!pdata) { dev_err(&pdev->dev, "no platform data?\n"); return -EINVAL; } msm_adc = kzalloc(sizeof(struct msm_adc_drv), GFP_KERNEL); if (!msm_adc) { dev_err(&pdev->dev, "Unable to allocate memory\n"); return -ENOMEM; } platform_set_drvdata(pdev, msm_adc); msm_adc_drv = msm_adc; msm_adc->pdev = pdev; if (pdata->target_hw == MSM_8x60 || pdata->target_hw == FSM_9xxx) { rc = msm_adc_init_hwmon(pdev, msm_adc); if (rc) { dev_err(&pdev->dev, "msm_adc_dev_init failed\n"); goto err_cleanup; } } msm_adc->hwmon = hwmon_device_register(&pdev->dev); if (IS_ERR(msm_adc->hwmon)) { dev_err(&pdev->dev, "hwmon_device_register failed\n"); rc = PTR_ERR(msm_adc->hwmon); goto err_cleanup; } msm_adc->misc.name = MSM_ADC_DRIVER_NAME; msm_adc->misc.minor = MISC_DYNAMIC_MINOR; msm_adc->misc.fops = &msm_adc_fops; if (misc_register(&msm_adc->misc)) { dev_err(&pdev->dev, "Unable to register misc device!\n"); goto err_cleanup; } init_waitqueue_head(&msm_adc->total_outst_wait); atomic_set(&msm_adc->online, 1); atomic_set(&msm_adc->total_outst, 0); msm_adc->wq = create_singlethread_workqueue("msm_adc"); if (!msm_adc->wq) goto err_cleanup; if (pdata->num_adc > 0) { if (pdata->target_hw == MSM_8x60) platform_driver_register( &msm_adc_rpcrouter_remote_driver); else msm_rpc_adc_init(pdev); } conv_first_request = true; pr_info("msm_adc successfully registered\n"); return 0; err_cleanup: msm_adc_teardown(pdev); return rc; } static int __devexit msm_adc_remove(struct platform_device *pdev) { int rc; struct msm_adc_drv *msm_adc = platform_get_drvdata(pdev); atomic_set(&msm_adc->online, 0); atomic_set(&msm_adc->rpc_online, 0); misc_deregister(&msm_adc->misc); hwmon_device_unregister(msm_adc->hwmon); msm_adc->hwmon = NULL; /* * We may still have outstanding transactions in flight that have not * completed. Make sure they're completed before tearing down. */ rc = wait_event_interruptible(msm_adc->total_outst_wait, atomic_read(&msm_adc->total_outst) == 0); if (rc) { pr_err("%s: wait_event_interruptible failed rc = %d\n", __func__, rc); return rc; } rc = wait_event_interruptible(msm_adc->rpc_total_outst_wait, atomic_read(&msm_adc->rpc_total_outst) == 0); if (rc) { pr_err("%s: wait_event_interruptible failed rc = %d\n", __func__, rc); return rc; } msm_adc_teardown(pdev); pr_info("msm_adc unregistered\n"); return 0; } static struct platform_driver msm_adc_driver = { .probe = msm_adc_probe, .remove = __devexit_p(msm_adc_remove), .driver = { .name = MSM_ADC_DRIVER_NAME, .owner = THIS_MODULE, }, }; static int __init msm_adc_init(void) { return platform_driver_register(&msm_adc_driver); } module_init(msm_adc_init); static void __exit msm_adc_exit(void) { platform_driver_unregister(&msm_adc_driver); } module_exit(msm_adc_exit); MODULE_DESCRIPTION("MSM ADC Driver"); MODULE_ALIAS("platform:msm_adc"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1");
gpl-2.0
garwedgess/LuPuS-STOCK-ICS-Xperia2011
drivers/net/phy/mdio_bus.c
529
8007
/* * drivers/net/phy/mdio_bus.c * * MDIO Bus interface * * Author: Andy Fleming * * Copyright (c) 2004 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/mii.h> #include <linux/ethtool.h> #include <linux/phy.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/uaccess.h> /** * mdiobus_alloc - allocate a mii_bus structure * * Description: called by a bus driver to allocate an mii_bus * structure to fill in. */ struct mii_bus *mdiobus_alloc(void) { struct mii_bus *bus; bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (bus != NULL) bus->state = MDIOBUS_ALLOCATED; return bus; } EXPORT_SYMBOL(mdiobus_alloc); /** * mdiobus_release - mii_bus device release callback * @d: the target struct device that contains the mii_bus * * Description: called when the last reference to an mii_bus is * dropped, to free the underlying memory. */ static void mdiobus_release(struct device *d) { struct mii_bus *bus = to_mii_bus(d); BUG_ON(bus->state != MDIOBUS_RELEASED && /* for compatibility with error handling in drivers */ bus->state != MDIOBUS_ALLOCATED); kfree(bus); } static struct class mdio_bus_class = { .name = "mdio_bus", .dev_release = mdiobus_release, }; /** * mdiobus_register - bring up all the PHYs on a given bus and attach them to bus * @bus: target mii_bus * * Description: Called by a bus driver to bring up all the PHYs * on a given bus, and attach them to the bus. * * Returns 0 on success or < 0 on error. */ int mdiobus_register(struct mii_bus *bus) { int i, err; if (NULL == bus || NULL == bus->name || NULL == bus->read || NULL == bus->write) return -EINVAL; BUG_ON(bus->state != MDIOBUS_ALLOCATED && bus->state != MDIOBUS_UNREGISTERED); bus->dev.parent = bus->parent; bus->dev.class = &mdio_bus_class; bus->dev.groups = NULL; dev_set_name(&bus->dev, "%s", bus->id); err = device_register(&bus->dev); if (err) { printk(KERN_ERR "mii_bus %s failed to register\n", bus->id); return -EINVAL; } mutex_init(&bus->mdio_lock); if (bus->reset) bus->reset(bus); for (i = 0; i < PHY_MAX_ADDR; i++) { if ((bus->phy_mask & (1 << i)) == 0) { struct phy_device *phydev; phydev = mdiobus_scan(bus, i); if (IS_ERR(phydev)) { err = PTR_ERR(phydev); goto error; } } } bus->state = MDIOBUS_REGISTERED; pr_info("%s: probed\n", bus->name); return 0; error: while (--i >= 0) { if (bus->phy_map[i]) device_unregister(&bus->phy_map[i]->dev); } device_del(&bus->dev); return err; } EXPORT_SYMBOL(mdiobus_register); void mdiobus_unregister(struct mii_bus *bus) { int i; BUG_ON(bus->state != MDIOBUS_REGISTERED); bus->state = MDIOBUS_UNREGISTERED; device_del(&bus->dev); for (i = 0; i < PHY_MAX_ADDR; i++) { if (bus->phy_map[i]) device_unregister(&bus->phy_map[i]->dev); bus->phy_map[i] = NULL; } } EXPORT_SYMBOL(mdiobus_unregister); /** * mdiobus_free - free a struct mii_bus * @bus: mii_bus to free * * This function releases the reference to the underlying device * object in the mii_bus. If this is the last reference, the mii_bus * will be freed. */ void mdiobus_free(struct mii_bus *bus) { /* * For compatibility with error handling in drivers. */ if (bus->state == MDIOBUS_ALLOCATED) { kfree(bus); return; } BUG_ON(bus->state != MDIOBUS_UNREGISTERED); bus->state = MDIOBUS_RELEASED; put_device(&bus->dev); } EXPORT_SYMBOL(mdiobus_free); struct phy_device *mdiobus_scan(struct mii_bus *bus, int addr) { struct phy_device *phydev; int err; phydev = get_phy_device(bus, addr); if (IS_ERR(phydev) || phydev == NULL) return phydev; err = phy_device_register(phydev); if (err) { phy_device_free(phydev); return NULL; } return phydev; } EXPORT_SYMBOL(mdiobus_scan); /** * mdiobus_read - Convenience function for reading a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to read * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_read(struct mii_bus *bus, int addr, u16 regnum) { int retval; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); retval = bus->read(bus, addr, regnum); mutex_unlock(&bus->mdio_lock); return retval; } EXPORT_SYMBOL(mdiobus_read); /** * mdiobus_write - Convenience function for writing a given MII mgmt register * @bus: the mii_bus struct * @addr: the phy address * @regnum: register number to write * @val: value to write to @regnum * * NOTE: MUST NOT be called from interrupt context, * because the bus read/write functions may wait for an interrupt * to conclude the operation. */ int mdiobus_write(struct mii_bus *bus, int addr, u16 regnum, u16 val) { int err; BUG_ON(in_interrupt()); mutex_lock(&bus->mdio_lock); err = bus->write(bus, addr, regnum, val); mutex_unlock(&bus->mdio_lock); return err; } EXPORT_SYMBOL(mdiobus_write); /** * mdio_bus_match - determine if given PHY driver supports the given PHY device * @dev: target PHY device * @drv: given PHY driver * * Description: Given a PHY device, and a PHY driver, return 1 if * the driver supports the device. Otherwise, return 0. */ static int mdio_bus_match(struct device *dev, struct device_driver *drv) { struct phy_device *phydev = to_phy_device(dev); struct phy_driver *phydrv = to_phy_driver(drv); return ((phydrv->phy_id & phydrv->phy_id_mask) == (phydev->phy_id & phydrv->phy_id_mask)); } static bool mdio_bus_phy_may_suspend(struct phy_device *phydev) { struct device_driver *drv = phydev->dev.driver; struct phy_driver *phydrv = to_phy_driver(drv); struct net_device *netdev = phydev->attached_dev; if (!drv || !phydrv->suspend) return false; /* PHY not attached? May suspend. */ if (!netdev) return true; /* * Don't suspend PHY if the attched netdev parent may wakeup. * The parent may point to a PCI device, as in tg3 driver. */ if (netdev->dev.parent && device_may_wakeup(netdev->dev.parent)) return false; /* * Also don't suspend PHY if the netdev itself may wakeup. This * is the case for devices w/o underlaying pwr. mgmt. aware bus, * e.g. SoC devices. */ if (device_may_wakeup(&netdev->dev)) return false; return true; } /* Suspend and resume. Copied from platform_suspend and * platform_resume */ static int mdio_bus_suspend(struct device * dev, pm_message_t state) { struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); if (!mdio_bus_phy_may_suspend(phydev)) return 0; return phydrv->suspend(phydev); } static int mdio_bus_resume(struct device * dev) { struct phy_driver *phydrv = to_phy_driver(dev->driver); struct phy_device *phydev = to_phy_device(dev); if (!mdio_bus_phy_may_suspend(phydev)) return 0; return phydrv->resume(phydev); } struct bus_type mdio_bus_type = { .name = "mdio_bus", .match = mdio_bus_match, .suspend = mdio_bus_suspend, .resume = mdio_bus_resume, }; EXPORT_SYMBOL(mdio_bus_type); int __init mdio_bus_init(void) { int ret; ret = class_register(&mdio_bus_class); if (!ret) { ret = bus_register(&mdio_bus_type); if (ret) class_unregister(&mdio_bus_class); } return ret; } void mdio_bus_exit(void) { class_unregister(&mdio_bus_class); bus_unregister(&mdio_bus_type); }
gpl-2.0
ericli1989/ali_kernel
drivers/video/pxa168fb.c
529
20124
/* * linux/drivers/video/pxa168fb.c -- Marvell PXA168 LCD Controller * * Copyright (C) 2008 Marvell International Ltd. * All rights reserved. * * 2009-02-16 adapted from original version for PXA168/910 * Jun Nie <njun@marvell.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/uaccess.h> #include <video/pxa168fb.h> #include "pxa168fb.h" #define DEFAULT_REFRESH 60 /* Hz */ static int determine_best_pix_fmt(struct fb_var_screeninfo *var) { /* * Pseudocolor mode? */ if (var->bits_per_pixel == 8) return PIX_FMT_PSEUDOCOLOR; /* * Check for 565/1555. */ if (var->bits_per_pixel == 16 && var->red.length <= 5 && var->green.length <= 6 && var->blue.length <= 5) { if (var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB565; else return PIX_FMT_BGR565; } if (var->transp.length == 1 && var->green.length <= 5) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB1555; else return PIX_FMT_BGR1555; } /* fall through */ } /* * Check for 888/A888. */ if (var->bits_per_pixel <= 32 && var->red.length <= 8 && var->green.length <= 8 && var->blue.length <= 8) { if (var->bits_per_pixel == 24 && var->transp.length == 0) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888PACK; else return PIX_FMT_BGR888PACK; } if (var->bits_per_pixel == 32 && var->transp.length == 8) { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGBA888; else return PIX_FMT_BGRA888; } else { if (var->red.offset >= var->blue.offset) return PIX_FMT_RGB888UNPACK; else return PIX_FMT_BGR888UNPACK; } /* fall through */ } return -EINVAL; } static void set_pix_fmt(struct fb_var_screeninfo *var, int pix_fmt) { switch (pix_fmt) { case PIX_FMT_RGB565: var->bits_per_pixel = 16; var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR565: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 11; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGB1555: var->bits_per_pixel = 16; var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_BGR1555: var->bits_per_pixel = 16; var->red.offset = 0; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 10; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; break; case PIX_FMT_RGB888PACK: var->bits_per_pixel = 24; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_BGR888PACK: var->bits_per_pixel = 24; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; case PIX_FMT_RGBA888: var->bits_per_pixel = 32; var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_BGRA888: var->bits_per_pixel = 32; var->red.offset = 0; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 16; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; break; case PIX_FMT_PSEUDOCOLOR: var->bits_per_pixel = 8; var->red.offset = 0; var->red.length = 8; var->green.offset = 0; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 0; var->transp.length = 0; break; } } static void set_mode(struct pxa168fb_info *fbi, struct fb_var_screeninfo *var, struct fb_videomode *mode, int pix_fmt, int ystretch) { struct fb_info *info = fbi->info; set_pix_fmt(var, pix_fmt); var->xres = mode->xres; var->yres = mode->yres; var->xres_virtual = max(var->xres, var->xres_virtual); if (ystretch) var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); else var->yres_virtual = max(var->yres, var->yres_virtual); var->grayscale = 0; var->accel_flags = FB_ACCEL_NONE; var->pixclock = mode->pixclock; var->left_margin = mode->left_margin; var->right_margin = mode->right_margin; var->upper_margin = mode->upper_margin; var->lower_margin = mode->lower_margin; var->hsync_len = mode->hsync_len; var->vsync_len = mode->vsync_len; var->sync = mode->sync; var->vmode = FB_VMODE_NONINTERLACED; var->rotate = FB_ROTATE_UR; } static int pxa168fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; int pix_fmt; /* * Determine which pixel format we're going to use. */ pix_fmt = determine_best_pix_fmt(var); if (pix_fmt < 0) return pix_fmt; set_pix_fmt(var, pix_fmt); fbi->pix_fmt = pix_fmt; /* * Basic geometry sanity checks. */ if (var->xoffset + var->xres > var->xres_virtual) return -EINVAL; if (var->yoffset + var->yres > var->yres_virtual) return -EINVAL; if (var->xres + var->right_margin + var->hsync_len + var->left_margin > 2048) return -EINVAL; if (var->yres + var->lower_margin + var->vsync_len + var->upper_margin > 2048) return -EINVAL; /* * Check size of framebuffer. */ if (var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3) > info->fix.smem_len) return -EINVAL; return 0; } /* * The hardware clock divider has an integer and a fractional * stage: * * clk2 = clk_in / integer_divider * clk_out = clk2 * (1 - (fractional_divider >> 12)) * * Calculate integer and fractional divider for given clk_in * and clk_out. */ static void set_clock_divider(struct pxa168fb_info *fbi, const struct fb_videomode *m) { int divider_int; int needed_pixclk; u64 div_result; u32 x = 0; /* * Notice: The field pixclock is used by linux fb * is in pixel second. E.g. struct fb_videomode & * struct fb_var_screeninfo */ /* * Check input values. */ if (!m || !m->pixclock || !m->refresh) { dev_err(fbi->dev, "Input refresh or pixclock is wrong.\n"); return; } /* * Using PLL/AXI clock. */ x = 0x80000000; /* * Calc divider according to refresh rate. */ div_result = 1000000000000ll; do_div(div_result, m->pixclock); needed_pixclk = (u32)div_result; divider_int = clk_get_rate(fbi->clk) / needed_pixclk; /* check whether divisor is too small. */ if (divider_int < 2) { dev_warn(fbi->dev, "Warning: clock source is too slow." "Try smaller resolution\n"); divider_int = 2; } /* * Set setting to reg. */ x |= divider_int; writel(x, fbi->reg_base + LCD_CFG_SCLK_DIV); } static void set_dma_control0(struct pxa168fb_info *fbi) { u32 x; /* * Set bit to enable graphics DMA. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL0); x |= fbi->active ? 0x00000100 : 0; fbi->active = 0; /* * If we are in a pseudo-color mode, we need to enable * palette lookup. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) x |= 0x10000000; /* * Configure hardware pixel format. */ x &= ~(0xF << 16); x |= (fbi->pix_fmt >> 1) << 16; /* * Check red and blue pixel swap. * 1. source data swap * 2. panel output data swap */ x &= ~(1 << 12); x |= ((fbi->pix_fmt & 1) ^ (fbi->panel_rbswap)) << 12; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL0); } static void set_dma_control1(struct pxa168fb_info *fbi, int sync) { u32 x; /* * Configure default bits: vsync triggers DMA, gated clock * enable, power save enable, configure alpha registers to * display 100% graphics, and set pixel command. */ x = readl(fbi->reg_base + LCD_SPU_DMA_CTRL1); x |= 0x2032ff81; /* * We trigger DMA on the falling edge of vsync if vsync is * active low, or on the rising edge if vsync is active high. */ if (!(sync & FB_SYNC_VERT_HIGH_ACT)) x |= 0x08000000; writel(x, fbi->reg_base + LCD_SPU_DMA_CTRL1); } static void set_graphics_start(struct fb_info *info, int xoffset, int yoffset) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int pixel_offset; unsigned long addr; pixel_offset = (yoffset * var->xres_virtual) + xoffset; addr = fbi->fb_start_dma + (pixel_offset * (var->bits_per_pixel >> 3)); writel(addr, fbi->reg_base + LCD_CFG_GRA_START_ADDR0); } static void set_dumb_panel_control(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct pxa168fb_mach_info *mi = fbi->dev->platform_data; u32 x; /* * Preserve enable flag. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL) & 0x00000001; x |= (fbi->is_blanked ? 0x7 : mi->dumb_mode) << 28; x |= mi->gpio_output_data << 20; x |= mi->gpio_output_mask << 12; x |= mi->panel_rgb_reverse_lanes ? 0x00000080 : 0; x |= mi->invert_composite_blank ? 0x00000040 : 0; x |= (info->var.sync & FB_SYNC_COMP_HIGH_ACT) ? 0x00000020 : 0; x |= mi->invert_pix_val_ena ? 0x00000010 : 0; x |= (info->var.sync & FB_SYNC_VERT_HIGH_ACT) ? 0 : 0x00000008; x |= (info->var.sync & FB_SYNC_HOR_HIGH_ACT) ? 0 : 0x00000004; x |= mi->invert_pixclock ? 0x00000002 : 0; writel(x, fbi->reg_base + LCD_SPU_DUMB_CTRL); } static void set_dumb_screen_dimensions(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *v = &info->var; int x; int y; x = v->xres + v->right_margin + v->hsync_len + v->left_margin; y = v->yres + v->lower_margin + v->vsync_len + v->upper_margin; writel((y << 16) | x, fbi->reg_base + LCD_SPUT_V_H_TOTAL); } static int pxa168fb_set_par(struct fb_info *info) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; struct fb_videomode mode; u32 x; struct pxa168fb_mach_info *mi; mi = fbi->dev->platform_data; /* * Set additional mode info. */ if (fbi->pix_fmt == PIX_FMT_PSEUDOCOLOR) info->fix.visual = FB_VISUAL_PSEUDOCOLOR; else info->fix.visual = FB_VISUAL_TRUECOLOR; info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; info->fix.ypanstep = var->yres; /* * Disable panel output while we setup the display. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x & ~1, fbi->reg_base + LCD_SPU_DUMB_CTRL); /* * Configure global panel parameters. */ writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_V_H_ACTIVE); /* * convet var to video mode */ fb_var_to_videomode(&mode, &info->var); /* Calculate clock divisor. */ set_clock_divider(fbi, &mode); /* Configure dma ctrl regs. */ set_dma_control0(fbi); set_dma_control1(fbi, info->var.sync); /* * Configure graphics DMA parameters. */ x = readl(fbi->reg_base + LCD_CFG_GRA_PITCH); x = (x & ~0xFFFF) | ((var->xres_virtual * var->bits_per_pixel) >> 3); writel(x, fbi->reg_base + LCD_CFG_GRA_PITCH); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GRA_HPXL_VLN); writel((var->yres << 16) | var->xres, fbi->reg_base + LCD_SPU_GZM_HPXL_VLN); /* * Configure dumb panel ctrl regs & timings. */ set_dumb_panel_control(info); set_dumb_screen_dimensions(info); writel((var->left_margin << 16) | var->right_margin, fbi->reg_base + LCD_SPU_H_PORCH); writel((var->upper_margin << 16) | var->lower_margin, fbi->reg_base + LCD_SPU_V_PORCH); /* * Re-enable panel output. */ x = readl(fbi->reg_base + LCD_SPU_DUMB_CTRL); writel(x | 1, fbi->reg_base + LCD_SPU_DUMB_CTRL); return 0; } static unsigned int chan_to_field(unsigned int chan, struct fb_bitfield *bf) { return ((chan & 0xffff) >> (16 - bf->length)) << bf->offset; } static u32 to_rgb(u16 red, u16 green, u16 blue) { red >>= 8; green >>= 8; blue >>= 8; return (red << 16) | (green << 8) | blue; } static int pxa168fb_setcolreg(unsigned int regno, unsigned int red, unsigned int green, unsigned int blue, unsigned int trans, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; u32 val; if (info->var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; if (info->fix.visual == FB_VISUAL_TRUECOLOR && regno < 16) { val = chan_to_field(red, &info->var.red); val |= chan_to_field(green, &info->var.green); val |= chan_to_field(blue , &info->var.blue); fbi->pseudo_palette[regno] = val; } if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR && regno < 256) { val = to_rgb(red, green, blue); writel(val, fbi->reg_base + LCD_SPU_SRAM_WRDAT); writel(0x8300 | regno, fbi->reg_base + LCD_SPU_SRAM_CTRL); } return 0; } static int pxa168fb_blank(int blank, struct fb_info *info) { struct pxa168fb_info *fbi = info->par; fbi->is_blanked = (blank == FB_BLANK_UNBLANK) ? 0 : 1; set_dumb_panel_control(info); return 0; } static int pxa168fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { set_graphics_start(info, var->xoffset, var->yoffset); return 0; } static irqreturn_t pxa168fb_handle_irq(int irq, void *dev_id) { struct pxa168fb_info *fbi = dev_id; u32 isr = readl(fbi->reg_base + SPU_IRQ_ISR); if ((isr & GRA_FRAME_IRQ0_ENA_MASK)) { writel(isr & (~GRA_FRAME_IRQ0_ENA_MASK), fbi->reg_base + SPU_IRQ_ISR); return IRQ_HANDLED; } return IRQ_NONE; } static struct fb_ops pxa168fb_ops = { .owner = THIS_MODULE, .fb_check_var = pxa168fb_check_var, .fb_set_par = pxa168fb_set_par, .fb_setcolreg = pxa168fb_setcolreg, .fb_blank = pxa168fb_blank, .fb_pan_display = pxa168fb_pan_display, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static int __init pxa168fb_init_mode(struct fb_info *info, struct pxa168fb_mach_info *mi) { struct pxa168fb_info *fbi = info->par; struct fb_var_screeninfo *var = &info->var; int ret = 0; u32 total_w, total_h, refresh; u64 div_result; const struct fb_videomode *m; /* * Set default value */ refresh = DEFAULT_REFRESH; /* try to find best video mode. */ m = fb_find_best_mode(&info->var, &info->modelist); if (m) fb_videomode_to_var(&info->var, m); /* Init settings. */ var->xres_virtual = var->xres; var->yres_virtual = info->fix.smem_len / (var->xres_virtual * (var->bits_per_pixel >> 3)); dev_dbg(fbi->dev, "pxa168fb: find best mode: res = %dx%d\n", var->xres, var->yres); /* correct pixclock. */ total_w = var->xres + var->left_margin + var->right_margin + var->hsync_len; total_h = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; div_result = 1000000000000ll; do_div(div_result, total_w * total_h * refresh); var->pixclock = (u32)div_result; return ret; } static int __init pxa168fb_probe(struct platform_device *pdev) { struct pxa168fb_mach_info *mi; struct fb_info *info = 0; struct pxa168fb_info *fbi = 0; struct resource *res; struct clk *clk; int irq, ret; mi = pdev->dev.platform_data; if (mi == NULL) { dev_err(&pdev->dev, "no platform data defined\n"); return -EINVAL; } clk = clk_get(&pdev->dev, "LCDCLK"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to get LCDCLK"); return PTR_ERR(clk); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no IO memory defined\n"); return -ENOENT; } irq = platform_get_irq(pdev, 0); if (irq < 0) { dev_err(&pdev->dev, "no IRQ defined\n"); return -ENOENT; } info = framebuffer_alloc(sizeof(struct pxa168fb_info), &pdev->dev); if (info == NULL) { clk_put(clk); return -ENOMEM; } /* Initialize private data */ fbi = info->par; fbi->info = info; fbi->clk = clk; fbi->dev = info->dev = &pdev->dev; fbi->panel_rbswap = mi->panel_rbswap; fbi->is_blanked = 0; fbi->active = mi->active; /* * Initialise static fb parameters. */ info->flags = FBINFO_DEFAULT | FBINFO_PARTIAL_PAN_OK | FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; info->node = -1; strlcpy(info->fix.id, mi->id, 16); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.type_aux = 0; info->fix.xpanstep = 0; info->fix.ypanstep = 0; info->fix.ywrapstep = 0; info->fix.mmio_start = res->start; info->fix.mmio_len = res->end - res->start + 1; info->fix.accel = FB_ACCEL_NONE; info->fbops = &pxa168fb_ops; info->pseudo_palette = fbi->pseudo_palette; /* * Map LCD controller registers. */ fbi->reg_base = ioremap_nocache(res->start, res->end - res->start); if (fbi->reg_base == NULL) { ret = -ENOMEM; goto failed; } /* * Allocate framebuffer memory. */ info->fix.smem_len = PAGE_ALIGN(DEFAULT_FB_SIZE); info->screen_base = dma_alloc_writecombine(fbi->dev, info->fix.smem_len, &fbi->fb_start_dma, GFP_KERNEL); if (info->screen_base == NULL) { ret = -ENOMEM; goto failed; } info->fix.smem_start = (unsigned long)fbi->fb_start_dma; /* * Set video mode according to platform data. */ set_mode(fbi, &info->var, mi->modes, mi->pix_fmt, 1); fb_videomode_to_modelist(mi->modes, mi->num_modes, &info->modelist); /* * init video mode data. */ pxa168fb_init_mode(info, mi); ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed_free_fbmem; /* * Fill in sane defaults. */ ret = pxa168fb_check_var(&info->var, info); if (ret) goto failed; /* * enable controller clock */ clk_enable(fbi->clk); pxa168fb_set_par(info); /* * Configure default register values. */ writel(0, fbi->reg_base + LCD_SPU_BLANKCOLOR); writel(mi->io_pin_allocation_mode, fbi->reg_base + SPU_IOPAD_CONTROL); writel(0, fbi->reg_base + LCD_CFG_GRA_START_ADDR1); writel(0, fbi->reg_base + LCD_SPU_GRA_OVSA_HPXL_VLN); writel(0, fbi->reg_base + LCD_SPU_SRAM_PARA0); writel(CFG_CSB_256x32(0x1)|CFG_CSB_256x24(0x1)|CFG_CSB_256x8(0x1), fbi->reg_base + LCD_SPU_SRAM_PARA1); /* * Allocate color map. */ if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) { ret = -ENOMEM; goto failed_free_clk; } /* * Register irq handler. */ ret = request_irq(irq, pxa168fb_handle_irq, IRQF_SHARED, info->fix.id, fbi); if (ret < 0) { dev_err(&pdev->dev, "unable to request IRQ\n"); ret = -ENXIO; goto failed_free_cmap; } /* * Enable GFX interrupt */ writel(GRA_FRAME_IRQ0_ENA(0x1), fbi->reg_base + SPU_IRQ_ENA); /* * Register framebuffer. */ ret = register_framebuffer(info); if (ret < 0) { dev_err(&pdev->dev, "Failed to register pxa168-fb: %d\n", ret); ret = -ENXIO; goto failed_free_irq; } platform_set_drvdata(pdev, fbi); return 0; failed_free_irq: free_irq(irq, fbi); failed_free_cmap: fb_dealloc_cmap(&info->cmap); failed_free_clk: clk_disable(fbi->clk); failed_free_fbmem: dma_free_coherent(fbi->dev, info->fix.smem_len, info->screen_base, fbi->fb_start_dma); failed: kfree(info); clk_put(clk); dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret); return ret; } static struct platform_driver pxa168fb_driver = { .driver = { .name = "pxa168-fb", .owner = THIS_MODULE, }, .probe = pxa168fb_probe, }; static int __devinit pxa168fb_init(void) { return platform_driver_register(&pxa168fb_driver); } module_init(pxa168fb_init); MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com> " "Green Wan <gwan@marvell.com>"); MODULE_DESCRIPTION("Framebuffer driver for PXA168/910"); MODULE_LICENSE("GPL");
gpl-2.0
mmukadam/linuxv3.12
drivers/regulator/gpio-regulator.c
529
10511
/* * gpio-regulator.c * * Copyright 2011 Heiko Stuebner <heiko@sntech.de> * * based on fixed.c * * Copyright 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * Copyright (c) 2009 Nokia Corporation * Roger Quadros <ext-roger.quadros@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/regulator/gpio-regulator.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_gpio.h> struct gpio_regulator_data { struct regulator_desc desc; struct regulator_dev *dev; struct gpio *gpios; int nr_gpios; struct gpio_regulator_state *states; int nr_states; int state; }; static int gpio_regulator_get_value(struct regulator_dev *dev) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].gpios == data->state) return data->states[ptr].value; return -EINVAL; } static int gpio_regulator_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr, target = 0, state, best_val = INT_MAX; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value < best_val && data->states[ptr].value >= min_uV && data->states[ptr].value <= max_uV) { target = data->states[ptr].gpios; best_val = data->states[ptr].value; if (selector) *selector = ptr; } if (best_val == INT_MAX) return -EINVAL; for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; gpio_set_value_cansleep(data->gpios[ptr].gpio, state); } data->state = target; return 0; } static int gpio_regulator_list_voltage(struct regulator_dev *dev, unsigned selector) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); if (selector >= data->nr_states) return -EINVAL; return data->states[selector].value; } static int gpio_regulator_set_current_limit(struct regulator_dev *dev, int min_uA, int max_uA) { struct gpio_regulator_data *data = rdev_get_drvdata(dev); int ptr, target = 0, state, best_val = 0; for (ptr = 0; ptr < data->nr_states; ptr++) if (data->states[ptr].value > best_val && data->states[ptr].value >= min_uA && data->states[ptr].value <= max_uA) { target = data->states[ptr].gpios; best_val = data->states[ptr].value; } if (best_val == 0) return -EINVAL; for (ptr = 0; ptr < data->nr_gpios; ptr++) { state = (target & (1 << ptr)) >> ptr; gpio_set_value_cansleep(data->gpios[ptr].gpio, state); } data->state = target; return 0; } static struct regulator_ops gpio_regulator_voltage_ops = { .get_voltage = gpio_regulator_get_value, .set_voltage = gpio_regulator_set_voltage, .list_voltage = gpio_regulator_list_voltage, }; static struct gpio_regulator_config * of_get_gpio_regulator_config(struct device *dev, struct device_node *np) { struct gpio_regulator_config *config; const char *regtype; int proplen, gpio, i; int ret; config = devm_kzalloc(dev, sizeof(struct gpio_regulator_config), GFP_KERNEL); if (!config) return ERR_PTR(-ENOMEM); config->init_data = of_get_regulator_init_data(dev, np); if (!config->init_data) return ERR_PTR(-EINVAL); config->supply_name = config->init_data->constraints.name; if (of_property_read_bool(np, "enable-active-high")) config->enable_high = true; if (of_property_read_bool(np, "enable-at-boot")) config->enabled_at_boot = true; of_property_read_u32(np, "startup-delay-us", &config->startup_delay); config->enable_gpio = of_get_named_gpio(np, "enable-gpio", 0); /* Fetch GPIOs. */ config->nr_gpios = of_gpio_count(np); config->gpios = devm_kzalloc(dev, sizeof(struct gpio) * config->nr_gpios, GFP_KERNEL); if (!config->gpios) return ERR_PTR(-ENOMEM); proplen = of_property_count_u32_elems(np, "gpios-states"); /* optional property */ if (proplen < 0) proplen = 0; if (proplen > 0 && proplen != config->nr_gpios) { dev_warn(dev, "gpios <-> gpios-states mismatch\n"); proplen = 0; } for (i = 0; i < config->nr_gpios; i++) { gpio = of_get_named_gpio(np, "gpios", i); if (gpio < 0) break; config->gpios[i].gpio = gpio; if (proplen > 0) { of_property_read_u32_index(np, "gpios-states", i, &ret); if (ret) config->gpios[i].flags = GPIOF_OUT_INIT_HIGH; } } /* Fetch states. */ proplen = of_property_count_u32_elems(np, "states"); if (proplen < 0) { dev_err(dev, "No 'states' property found\n"); return ERR_PTR(-EINVAL); } config->states = devm_kzalloc(dev, sizeof(struct gpio_regulator_state) * (proplen / 2), GFP_KERNEL); if (!config->states) return ERR_PTR(-ENOMEM); for (i = 0; i < proplen / 2; i++) { of_property_read_u32_index(np, "states", i * 2, &config->states[i].value); of_property_read_u32_index(np, "states", i * 2 + 1, &config->states[i].gpios); } config->nr_states = i; config->type = REGULATOR_VOLTAGE; ret = of_property_read_string(np, "regulator-type", &regtype); if (ret >= 0) { if (!strncmp("voltage", regtype, 7)) config->type = REGULATOR_VOLTAGE; else if (!strncmp("current", regtype, 7)) config->type = REGULATOR_CURRENT; else dev_warn(dev, "Unknown regulator-type '%s'\n", regtype); } return config; } static struct regulator_ops gpio_regulator_current_ops = { .get_current_limit = gpio_regulator_get_value, .set_current_limit = gpio_regulator_set_current_limit, }; static int gpio_regulator_probe(struct platform_device *pdev) { struct gpio_regulator_config *config = dev_get_platdata(&pdev->dev); struct device_node *np = pdev->dev.of_node; struct gpio_regulator_data *drvdata; struct regulator_config cfg = { }; int ptr, ret, state; if (np) { config = of_get_gpio_regulator_config(&pdev->dev, np); if (IS_ERR(config)) return PTR_ERR(config); } drvdata = devm_kzalloc(&pdev->dev, sizeof(struct gpio_regulator_data), GFP_KERNEL); if (drvdata == NULL) return -ENOMEM; drvdata->desc.name = kstrdup(config->supply_name, GFP_KERNEL); if (drvdata->desc.name == NULL) { dev_err(&pdev->dev, "Failed to allocate supply name\n"); ret = -ENOMEM; goto err; } drvdata->gpios = kmemdup(config->gpios, config->nr_gpios * sizeof(struct gpio), GFP_KERNEL); if (drvdata->gpios == NULL) { dev_err(&pdev->dev, "Failed to allocate gpio data\n"); ret = -ENOMEM; goto err_name; } drvdata->states = kmemdup(config->states, config->nr_states * sizeof(struct gpio_regulator_state), GFP_KERNEL); if (drvdata->states == NULL) { dev_err(&pdev->dev, "Failed to allocate state data\n"); ret = -ENOMEM; goto err_memgpio; } drvdata->nr_states = config->nr_states; drvdata->desc.owner = THIS_MODULE; drvdata->desc.enable_time = config->startup_delay; /* handle regulator type*/ switch (config->type) { case REGULATOR_VOLTAGE: drvdata->desc.type = REGULATOR_VOLTAGE; drvdata->desc.ops = &gpio_regulator_voltage_ops; drvdata->desc.n_voltages = config->nr_states; break; case REGULATOR_CURRENT: drvdata->desc.type = REGULATOR_CURRENT; drvdata->desc.ops = &gpio_regulator_current_ops; break; default: dev_err(&pdev->dev, "No regulator type set\n"); ret = -EINVAL; goto err_memgpio; } drvdata->nr_gpios = config->nr_gpios; ret = gpio_request_array(drvdata->gpios, drvdata->nr_gpios); if (ret) { dev_err(&pdev->dev, "Could not obtain regulator setting GPIOs: %d\n", ret); goto err_memstate; } /* build initial state from gpio init data. */ state = 0; for (ptr = 0; ptr < drvdata->nr_gpios; ptr++) { if (config->gpios[ptr].flags & GPIOF_OUT_INIT_HIGH) state |= (1 << ptr); } drvdata->state = state; cfg.dev = &pdev->dev; cfg.init_data = config->init_data; cfg.driver_data = drvdata; cfg.of_node = np; if (config->enable_gpio >= 0) cfg.ena_gpio = config->enable_gpio; cfg.ena_gpio_invert = !config->enable_high; if (config->enabled_at_boot) { if (config->enable_high) cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; else cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; } else { if (config->enable_high) cfg.ena_gpio_flags |= GPIOF_OUT_INIT_LOW; else cfg.ena_gpio_flags |= GPIOF_OUT_INIT_HIGH; } drvdata->dev = regulator_register(&drvdata->desc, &cfg); if (IS_ERR(drvdata->dev)) { ret = PTR_ERR(drvdata->dev); dev_err(&pdev->dev, "Failed to register regulator: %d\n", ret); goto err_stategpio; } platform_set_drvdata(pdev, drvdata); return 0; err_stategpio: gpio_free_array(drvdata->gpios, drvdata->nr_gpios); err_memstate: kfree(drvdata->states); err_memgpio: kfree(drvdata->gpios); err_name: kfree(drvdata->desc.name); err: return ret; } static int gpio_regulator_remove(struct platform_device *pdev) { struct gpio_regulator_data *drvdata = platform_get_drvdata(pdev); regulator_unregister(drvdata->dev); gpio_free_array(drvdata->gpios, drvdata->nr_gpios); kfree(drvdata->states); kfree(drvdata->gpios); kfree(drvdata->desc.name); return 0; } #if defined(CONFIG_OF) static const struct of_device_id regulator_gpio_of_match[] = { { .compatible = "regulator-gpio", }, {}, }; #endif static struct platform_driver gpio_regulator_driver = { .probe = gpio_regulator_probe, .remove = gpio_regulator_remove, .driver = { .name = "gpio-regulator", .owner = THIS_MODULE, .of_match_table = of_match_ptr(regulator_gpio_of_match), }, }; static int __init gpio_regulator_init(void) { return platform_driver_register(&gpio_regulator_driver); } subsys_initcall(gpio_regulator_init); static void __exit gpio_regulator_exit(void) { platform_driver_unregister(&gpio_regulator_driver); } module_exit(gpio_regulator_exit); MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>"); MODULE_DESCRIPTION("gpio voltage regulator"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio-regulator");
gpl-2.0
jdlfg/Mecha-kernel
drivers/isdn/hisax/telespci.c
785
9141
/* $Id: telespci.c,v 2.23.2.3 2004/01/13 14:31:26 keil Exp $ * * low level stuff for Teles PCI isdn cards * * Author Ton van Rosmalen * Karsten Keil * Copyright by Ton van Rosmalen * by Karsten Keil <keil@isdn4linux.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/init.h> #include "hisax.h" #include "isac.h" #include "hscx.h" #include "isdnl1.h" #include <linux/pci.h> static const char *telespci_revision = "$Revision: 2.23.2.3 $"; #define ZORAN_PO_RQ_PEN 0x02000000 #define ZORAN_PO_WR 0x00800000 #define ZORAN_PO_GID0 0x00000000 #define ZORAN_PO_GID1 0x00100000 #define ZORAN_PO_GREG0 0x00000000 #define ZORAN_PO_GREG1 0x00010000 #define ZORAN_PO_DMASK 0xFF #define WRITE_ADDR_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG0) #define READ_DATA_ISAC (ZORAN_PO_GID0 | ZORAN_PO_GREG1) #define WRITE_DATA_ISAC (ZORAN_PO_WR | ZORAN_PO_GID0 | ZORAN_PO_GREG1) #define WRITE_ADDR_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG0) #define READ_DATA_HSCX (ZORAN_PO_GID1 | ZORAN_PO_GREG1) #define WRITE_DATA_HSCX (ZORAN_PO_WR | ZORAN_PO_GID1 | ZORAN_PO_GREG1) #define ZORAN_WAIT_NOBUSY do { \ portdata = readl(adr + 0x200); \ } while (portdata & ZORAN_PO_RQ_PEN) static inline u_char readisac(void __iomem *adr, u_char off) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for ISAC */ writel(WRITE_ADDR_ISAC | off, adr + 0x200); ZORAN_WAIT_NOBUSY; /* read data from ISAC */ writel(READ_DATA_ISAC, adr + 0x200); ZORAN_WAIT_NOBUSY; return((u_char)(portdata & ZORAN_PO_DMASK)); } static inline void writeisac(void __iomem *adr, u_char off, u_char data) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for ISAC */ writel(WRITE_ADDR_ISAC | off, adr + 0x200); ZORAN_WAIT_NOBUSY; /* write data to ISAC */ writel(WRITE_DATA_ISAC | data, adr + 0x200); ZORAN_WAIT_NOBUSY; } static inline u_char readhscx(void __iomem *adr, int hscx, u_char off) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for HSCX */ writel(WRITE_ADDR_HSCX | ((hscx ? 0x40:0) + off), adr + 0x200); ZORAN_WAIT_NOBUSY; /* read data from HSCX */ writel(READ_DATA_HSCX, adr + 0x200); ZORAN_WAIT_NOBUSY; return ((u_char)(portdata & ZORAN_PO_DMASK)); } static inline void writehscx(void __iomem *adr, int hscx, u_char off, u_char data) { register unsigned int portdata; ZORAN_WAIT_NOBUSY; /* set address for HSCX */ writel(WRITE_ADDR_HSCX | ((hscx ? 0x40:0) + off), adr + 0x200); ZORAN_WAIT_NOBUSY; /* write data to HSCX */ writel(WRITE_DATA_HSCX | data, adr + 0x200); ZORAN_WAIT_NOBUSY; } static inline void read_fifo_isac(void __iomem *adr, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* read data from ISAC */ for (i = 0; i < size; i++) { /* set address for ISAC fifo */ writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200); ZORAN_WAIT_NOBUSY; writel(READ_DATA_ISAC, adr + 0x200); ZORAN_WAIT_NOBUSY; data[i] = (u_char)(portdata & ZORAN_PO_DMASK); } } static void write_fifo_isac(void __iomem *adr, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* write data to ISAC */ for (i = 0; i < size; i++) { /* set address for ISAC fifo */ writel(WRITE_ADDR_ISAC | 0x1E, adr + 0x200); ZORAN_WAIT_NOBUSY; writel(WRITE_DATA_ISAC | data[i], adr + 0x200); ZORAN_WAIT_NOBUSY; } } static inline void read_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size) { register unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* read data from HSCX */ for (i = 0; i < size; i++) { /* set address for HSCX fifo */ writel(WRITE_ADDR_HSCX |(hscx ? 0x5F:0x1F), adr + 0x200); ZORAN_WAIT_NOBUSY; writel(READ_DATA_HSCX, adr + 0x200); ZORAN_WAIT_NOBUSY; data[i] = (u_char) (portdata & ZORAN_PO_DMASK); } } static inline void write_fifo_hscx(void __iomem *adr, int hscx, u_char * data, int size) { unsigned int portdata; register int i; ZORAN_WAIT_NOBUSY; /* write data to HSCX */ for (i = 0; i < size; i++) { /* set address for HSCX fifo */ writel(WRITE_ADDR_HSCX |(hscx ? 0x5F:0x1F), adr + 0x200); ZORAN_WAIT_NOBUSY; writel(WRITE_DATA_HSCX | data[i], adr + 0x200); ZORAN_WAIT_NOBUSY; udelay(10); } } /* Interface functions */ static u_char ReadISAC(struct IsdnCardState *cs, u_char offset) { return (readisac(cs->hw.teles0.membase, offset)); } static void WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value) { writeisac(cs->hw.teles0.membase, offset, value); } static void ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size) { read_fifo_isac(cs->hw.teles0.membase, data, size); } static void WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size) { write_fifo_isac(cs->hw.teles0.membase, data, size); } static u_char ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset) { return (readhscx(cs->hw.teles0.membase, hscx, offset)); } static void WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value) { writehscx(cs->hw.teles0.membase, hscx, offset, value); } /* * fast interrupt HSCX stuff goes here */ #define READHSCX(cs, nr, reg) readhscx(cs->hw.teles0.membase, nr, reg) #define WRITEHSCX(cs, nr, reg, data) writehscx(cs->hw.teles0.membase, nr, reg, data) #define READHSCXFIFO(cs, nr, ptr, cnt) read_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #define WRITEHSCXFIFO(cs, nr, ptr, cnt) write_fifo_hscx(cs->hw.teles0.membase, nr, ptr, cnt) #include "hscx_irq.c" static irqreturn_t telespci_interrupt(int intno, void *dev_id) { struct IsdnCardState *cs = dev_id; u_char hval, ival; u_long flags; spin_lock_irqsave(&cs->lock, flags); hval = readhscx(cs->hw.teles0.membase, 1, HSCX_ISTA); if (hval) hscx_int_main(cs, hval); ival = readisac(cs->hw.teles0.membase, ISAC_ISTA); if ((hval | ival) == 0) { spin_unlock_irqrestore(&cs->lock, flags); return IRQ_NONE; } if (ival) isac_interrupt(cs, ival); /* Clear interrupt register for Zoran PCI controller */ writel(0x70000000, cs->hw.teles0.membase + 0x3C); writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0xFF); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0xFF); writeisac(cs->hw.teles0.membase, ISAC_MASK, 0x0); writehscx(cs->hw.teles0.membase, 0, HSCX_MASK, 0x0); writehscx(cs->hw.teles0.membase, 1, HSCX_MASK, 0x0); spin_unlock_irqrestore(&cs->lock, flags); return IRQ_HANDLED; } static void release_io_telespci(struct IsdnCardState *cs) { iounmap(cs->hw.teles0.membase); } static int TelesPCI_card_msg(struct IsdnCardState *cs, int mt, void *arg) { u_long flags; switch (mt) { case CARD_RESET: return(0); case CARD_RELEASE: release_io_telespci(cs); return(0); case CARD_INIT: spin_lock_irqsave(&cs->lock, flags); inithscxisac(cs, 3); spin_unlock_irqrestore(&cs->lock, flags); return(0); case CARD_TEST: return(0); } return(0); } static struct pci_dev *dev_tel __devinitdata = NULL; int __devinit setup_telespci(struct IsdnCard *card) { struct IsdnCardState *cs = card->cs; char tmp[64]; #ifdef __BIG_ENDIAN #error "not running on big endian machines now" #endif strcpy(tmp, telespci_revision); printk(KERN_INFO "HiSax: Teles/PCI driver Rev. %s\n", HiSax_getrev(tmp)); if (cs->typ != ISDN_CTYPE_TELESPCI) return (0); if ((dev_tel = pci_find_device (PCI_VENDOR_ID_ZORAN, PCI_DEVICE_ID_ZORAN_36120, dev_tel))) { if (pci_enable_device(dev_tel)) return(0); cs->irq = dev_tel->irq; if (!cs->irq) { printk(KERN_WARNING "Teles: No IRQ for PCI card found\n"); return(0); } cs->hw.teles0.membase = ioremap(pci_resource_start(dev_tel, 0), PAGE_SIZE); printk(KERN_INFO "Found: Zoran, base-address: 0x%llx, irq: 0x%x\n", (unsigned long long)pci_resource_start(dev_tel, 0), dev_tel->irq); } else { printk(KERN_WARNING "TelesPCI: No PCI card found\n"); return(0); } /* Initialize Zoran PCI controller */ writel(0x00000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x01000000, cs->hw.teles0.membase + 0x28); writel(0x7BFFFFFF, cs->hw.teles0.membase + 0x2C); writel(0x70000000, cs->hw.teles0.membase + 0x3C); writel(0x61000000, cs->hw.teles0.membase + 0x40); /* writel(0x00800000, cs->hw.teles0.membase + 0x200); */ printk(KERN_INFO "HiSax: Teles PCI config irq:%d mem:%p\n", cs->irq, cs->hw.teles0.membase); setup_isac(cs); cs->readisac = &ReadISAC; cs->writeisac = &WriteISAC; cs->readisacfifo = &ReadISACfifo; cs->writeisacfifo = &WriteISACfifo; cs->BC_Read_Reg = &ReadHSCX; cs->BC_Write_Reg = &WriteHSCX; cs->BC_Send_Data = &hscx_fill_fifo; cs->cardmsg = &TelesPCI_card_msg; cs->irq_func = &telespci_interrupt; cs->irq_flags |= IRQF_SHARED; ISACVersion(cs, "TelesPCI:"); if (HscxVersion(cs, "TelesPCI:")) { printk(KERN_WARNING "TelesPCI: wrong HSCX versions check IO/MEM addresses\n"); release_io_telespci(cs); return (0); } return (1); }
gpl-2.0
supersonicninja/NinjaKernelHW01E
drivers/net/veth.c
785
9527
/* * drivers/net/veth.c * * Copyright (C) 2007 OpenVZ http://openvz.org, SWsoft Inc * * Author: Pavel Emelianov <xemul@openvz.org> * Ethtool interface from: Eric W. Biederman <ebiederm@xmission.com> * */ #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/ethtool.h> #include <linux/etherdevice.h> #include <net/dst.h> #include <net/xfrm.h> #include <linux/veth.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" #define MIN_MTU 68 /* Min L3 MTU */ #define MAX_MTU 65535 /* Max L3 MTU (arbitrary) */ struct veth_net_stats { unsigned long rx_packets; unsigned long tx_packets; unsigned long rx_bytes; unsigned long tx_bytes; unsigned long tx_dropped; unsigned long rx_dropped; }; struct veth_priv { struct net_device *peer; struct veth_net_stats __percpu *stats; }; /* * ethtool interface */ static struct { const char string[ETH_GSTRING_LEN]; } ethtool_stats_keys[] = { { "peer_ifindex" }, }; static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { cmd->supported = 0; cmd->advertising = 0; ethtool_cmd_speed_set(cmd, SPEED_10000); cmd->duplex = DUPLEX_FULL; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 0; cmd->maxrxpkt = 0; return 0; } static void veth_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->fw_version, "N/A"); } static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) { switch(stringset) { case ETH_SS_STATS: memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys)); break; } } static int veth_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys); default: return -EOPNOTSUPP; } } static void veth_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct veth_priv *priv; priv = netdev_priv(dev); data[0] = priv->peer->ifindex; } static const struct ethtool_ops veth_ethtool_ops = { .get_settings = veth_get_settings, .get_drvinfo = veth_get_drvinfo, .get_link = ethtool_op_get_link, .get_strings = veth_get_strings, .get_sset_count = veth_get_sset_count, .get_ethtool_stats = veth_get_ethtool_stats, }; /* * xmit */ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev) { struct net_device *rcv = NULL; struct veth_priv *priv, *rcv_priv; struct veth_net_stats *stats, *rcv_stats; int length; priv = netdev_priv(dev); rcv = priv->peer; rcv_priv = netdev_priv(rcv); stats = this_cpu_ptr(priv->stats); rcv_stats = this_cpu_ptr(rcv_priv->stats); if (!(rcv->flags & IFF_UP)) goto tx_drop; /* don't change ip_summed == CHECKSUM_PARTIAL, as that will cause bad checksum on forwarded packets */ if (skb->ip_summed == CHECKSUM_NONE && rcv->features & NETIF_F_RXCSUM) skb->ip_summed = CHECKSUM_UNNECESSARY; length = skb->len; if (dev_forward_skb(rcv, skb) != NET_RX_SUCCESS) goto rx_drop; stats->tx_bytes += length; stats->tx_packets++; rcv_stats->rx_bytes += length; rcv_stats->rx_packets++; return NETDEV_TX_OK; tx_drop: kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; rx_drop: rcv_stats->rx_dropped++; return NETDEV_TX_OK; } /* * general routines */ static struct net_device_stats *veth_get_stats(struct net_device *dev) { struct veth_priv *priv; int cpu; struct veth_net_stats *stats, total = {0}; priv = netdev_priv(dev); for_each_possible_cpu(cpu) { stats = per_cpu_ptr(priv->stats, cpu); total.rx_packets += stats->rx_packets; total.tx_packets += stats->tx_packets; total.rx_bytes += stats->rx_bytes; total.tx_bytes += stats->tx_bytes; total.tx_dropped += stats->tx_dropped; total.rx_dropped += stats->rx_dropped; } dev->stats.rx_packets = total.rx_packets; dev->stats.tx_packets = total.tx_packets; dev->stats.rx_bytes = total.rx_bytes; dev->stats.tx_bytes = total.tx_bytes; dev->stats.tx_dropped = total.tx_dropped; dev->stats.rx_dropped = total.rx_dropped; return &dev->stats; } static int veth_open(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); if (priv->peer == NULL) return -ENOTCONN; if (priv->peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(priv->peer); } return 0; } static int veth_close(struct net_device *dev) { struct veth_priv *priv = netdev_priv(dev); netif_carrier_off(dev); netif_carrier_off(priv->peer); return 0; } static int is_valid_veth_mtu(int new_mtu) { return new_mtu >= MIN_MTU && new_mtu <= MAX_MTU; } static int veth_change_mtu(struct net_device *dev, int new_mtu) { if (!is_valid_veth_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static int veth_dev_init(struct net_device *dev) { struct veth_net_stats __percpu *stats; struct veth_priv *priv; stats = alloc_percpu(struct veth_net_stats); if (stats == NULL) return -ENOMEM; priv = netdev_priv(dev); priv->stats = stats; return 0; } static void veth_dev_free(struct net_device *dev) { struct veth_priv *priv; priv = netdev_priv(dev); free_percpu(priv->stats); free_netdev(dev); } static const struct net_device_ops veth_netdev_ops = { .ndo_init = veth_dev_init, .ndo_open = veth_open, .ndo_stop = veth_close, .ndo_start_xmit = veth_xmit, .ndo_change_mtu = veth_change_mtu, .ndo_get_stats = veth_get_stats, .ndo_set_mac_address = eth_mac_addr, }; static void veth_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &veth_netdev_ops; dev->ethtool_ops = &veth_ethtool_ops; dev->features |= NETIF_F_LLTX; dev->destructor = veth_dev_free; dev->hw_features = NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_RXCSUM; } /* * netlink interface */ static int veth_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (tb[IFLA_MTU]) { if (!is_valid_veth_mtu(nla_get_u32(tb[IFLA_MTU]))) return -EINVAL; } return 0; } static struct rtnl_link_ops veth_link_ops; static int veth_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { int err; struct net_device *peer; struct veth_priv *priv; char ifname[IFNAMSIZ]; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; struct ifinfomsg *ifmp; struct net *net; /* * create and register peer first */ if (data != NULL && data[VETH_INFO_PEER] != NULL) { struct nlattr *nla_peer; nla_peer = data[VETH_INFO_PEER]; ifmp = nla_data(nla_peer); err = nla_parse(peer_tb, IFLA_MAX, nla_data(nla_peer) + sizeof(struct ifinfomsg), nla_len(nla_peer) - sizeof(struct ifinfomsg), ifla_policy); if (err < 0) return err; err = veth_validate(peer_tb, NULL); if (err < 0) return err; tbp = peer_tb; } else { ifmp = NULL; tbp = tb; } if (tbp[IFLA_IFNAME]) nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); else snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); net = rtnl_link_get_net(src_net, tbp); if (IS_ERR(net)) return PTR_ERR(net); peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp); if (IS_ERR(peer)) { put_net(net); return PTR_ERR(peer); } if (tbp[IFLA_ADDRESS] == NULL) random_ether_addr(peer->dev_addr); err = register_netdevice(peer); put_net(net); net = NULL; if (err < 0) goto err_register_peer; netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp); if (err < 0) goto err_configure_peer; /* * register dev last * * note, that since we've registered new device the dev's name * should be re-allocated */ if (tb[IFLA_ADDRESS] == NULL) random_ether_addr(dev->dev_addr); if (tb[IFLA_IFNAME]) nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); if (strchr(dev->name, '%')) { err = dev_alloc_name(dev, dev->name); if (err < 0) goto err_alloc_name; } err = register_netdevice(dev); if (err < 0) goto err_register_dev; netif_carrier_off(dev); /* * tie the deviced together */ priv = netdev_priv(dev); priv->peer = peer; priv = netdev_priv(peer); priv->peer = dev; return 0; err_register_dev: /* nothing to do */ err_alloc_name: err_configure_peer: unregister_netdevice(peer); return err; err_register_peer: free_netdev(peer); return err; } static void veth_dellink(struct net_device *dev, struct list_head *head) { struct veth_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = priv->peer; unregister_netdevice_queue(dev, head); unregister_netdevice_queue(peer, head); } static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; static struct rtnl_link_ops veth_link_ops = { .kind = DRV_NAME, .priv_size = sizeof(struct veth_priv), .setup = veth_setup, .validate = veth_validate, .newlink = veth_newlink, .dellink = veth_dellink, .policy = veth_policy, .maxtype = VETH_INFO_MAX, }; /* * init/fini */ static __init int veth_init(void) { return rtnl_link_register(&veth_link_ops); } static __exit void veth_exit(void) { rtnl_link_unregister(&veth_link_ops); } module_init(veth_init); module_exit(veth_exit); MODULE_DESCRIPTION("Virtual Ethernet Tunnel"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS_RTNL_LINK(DRV_NAME);
gpl-2.0
AOKP/kernel_oppo_n1
drivers/usb/gadget/f_qc_ecm.c
1041
24598
/* * f_qc_ecm.c -- USB CDC Ethernet (ECM) link function driver * * Copyright (C) 2003-2005,2008 David Brownell * Copyright (C) 2008 Nokia Corporation * Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define VERBOSE_DEBUG */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/etherdevice.h> #include "u_ether.h" #include "u_qc_ether.h" /* * This function is a "CDC Ethernet Networking Control Model" (CDC ECM) * Ethernet link. The data transfer model is simple (packets sent and * received over bulk endpoints using normal short packet termination), * and the control model exposes various data and optional notifications. * * ECM is well standardized and (except for Microsoft) supported by most * operating systems with USB host support. It's the preferred interop * solution for Ethernet over USB, at least for firmware based solutions. * (Hardware solutions tend to be more minimalist.) A newer and simpler * "Ethernet Emulation Model" (CDC EEM) hasn't yet caught on. * * Note that ECM requires the use of "alternate settings" for its data * interface. This means that the set_alt() method has real work to do, * and also means that a get_alt() method is required. * * This function is based on USB CDC Ethernet link function driver and * contains MSM specific implementation. */ enum ecm_qc_notify_state { ECM_QC_NOTIFY_NONE, /* don't notify */ ECM_QC_NOTIFY_CONNECT, /* issue CONNECT next */ ECM_QC_NOTIFY_SPEED, /* issue SPEED_CHANGE next */ }; struct f_ecm_qc { struct qc_gether port; u8 ctrl_id, data_id; char ethaddr[14]; struct usb_ep *notify; struct usb_request *notify_req; u8 notify_state; bool is_open; }; static inline struct f_ecm_qc *func_to_ecm_qc(struct usb_function *f) { return container_of(f, struct f_ecm_qc, port.func); } /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ecm_qc_bitrate(struct usb_gadget *g) { if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else return 19 * 64 * 1 * 1000 * 8; } /*-------------------------------------------------------------------------*/ /* * Include the status endpoint if we can, even though it's optional. * * Use wMaxPacketSize big enough to fit CDC_NOTIFY_SPEED_CHANGE in one * packet, to simplify cancellation; and a big transfer interval, to * waste less bandwidth. * * Some drivers (like Linux 2.4 cdc-ether!) "need" it to exist even * if they ignore the connect/disconnect notifications that real aether * can provide. More advanced cdc configurations might want to support * encapsulated commands (vendor-specific, using control-OUT). */ #define ECM_QC_LOG2_STATUS_INTERVAL_MSEC 5 /* 1 << 5 == 32 msec */ #define ECM_QC_STATUS_BYTECOUNT 16 /* 8 byte header + data */ /* currently only one std ecm instance is supported */ #define ECM_QC_NO_PORTS 1 /* interface descriptor: */ static struct usb_interface_descriptor ecm_qc_control_intf = { .bLength = sizeof ecm_qc_control_intf, .bDescriptorType = USB_DT_INTERFACE, /* .bInterfaceNumber = DYNAMIC */ /* status endpoint is optional; this could be patched later */ .bNumEndpoints = 1, .bInterfaceClass = USB_CLASS_COMM, .bInterfaceSubClass = USB_CDC_SUBCLASS_ETHERNET, .bInterfaceProtocol = USB_CDC_PROTO_NONE, /* .iInterface = DYNAMIC */ }; static struct usb_cdc_header_desc ecm_qc_header_desc = { .bLength = sizeof ecm_qc_header_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_HEADER_TYPE, .bcdCDC = cpu_to_le16(0x0110), }; static struct usb_cdc_union_desc ecm_qc_union_desc = { .bLength = sizeof(ecm_qc_union_desc), .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_UNION_TYPE, /* .bMasterInterface0 = DYNAMIC */ /* .bSlaveInterface0 = DYNAMIC */ }; static struct usb_cdc_ether_desc ecm_qc_desc = { .bLength = sizeof ecm_qc_desc, .bDescriptorType = USB_DT_CS_INTERFACE, .bDescriptorSubType = USB_CDC_ETHERNET_TYPE, /* this descriptor actually adds value, surprise! */ /* .iMACAddress = DYNAMIC */ .bmEthernetStatistics = cpu_to_le32(0), /* no statistics */ .wMaxSegmentSize = cpu_to_le16(ETH_FRAME_LEN), .wNumberMCFilters = cpu_to_le16(0), .bNumberPowerFilters = 0, }; /* the default data interface has no endpoints ... */ static struct usb_interface_descriptor ecm_qc_data_nop_intf = { .bLength = sizeof ecm_qc_data_nop_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 0, .bNumEndpoints = 0, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* ... but the "real" data interface has two bulk endpoints */ static struct usb_interface_descriptor ecm_qc_data_intf = { .bLength = sizeof ecm_qc_data_intf, .bDescriptorType = USB_DT_INTERFACE, .bInterfaceNumber = 1, .bAlternateSetting = 1, .bNumEndpoints = 2, .bInterfaceClass = USB_CLASS_CDC_DATA, .bInterfaceSubClass = 0, .bInterfaceProtocol = 0, /* .iInterface = DYNAMIC */ }; /* full speed support: */ static struct usb_endpoint_descriptor ecm_qc_fs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT), .bInterval = 1 << ECM_QC_LOG2_STATUS_INTERVAL_MSEC, }; static struct usb_endpoint_descriptor ecm_qc_fs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_endpoint_descriptor ecm_qc_fs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, }; static struct usb_descriptor_header *ecm_qc_fs_function[] = { /* CDC ECM control descriptors */ (struct usb_descriptor_header *) &ecm_qc_control_intf, (struct usb_descriptor_header *) &ecm_qc_header_desc, (struct usb_descriptor_header *) &ecm_qc_union_desc, (struct usb_descriptor_header *) &ecm_qc_desc, /* NOTE: status endpoint might need to be removed */ (struct usb_descriptor_header *) &ecm_qc_fs_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf, (struct usb_descriptor_header *) &ecm_qc_data_intf, (struct usb_descriptor_header *) &ecm_qc_fs_in_desc, (struct usb_descriptor_header *) &ecm_qc_fs_out_desc, NULL, }; /* high speed support: */ static struct usb_endpoint_descriptor ecm_qc_hs_notify_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_INT, .wMaxPacketSize = cpu_to_le16(ECM_QC_STATUS_BYTECOUNT), .bInterval = ECM_QC_LOG2_STATUS_INTERVAL_MSEC + 4, }; static struct usb_endpoint_descriptor ecm_qc_hs_in_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_IN, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_endpoint_descriptor ecm_qc_hs_out_desc = { .bLength = USB_DT_ENDPOINT_SIZE, .bDescriptorType = USB_DT_ENDPOINT, .bEndpointAddress = USB_DIR_OUT, .bmAttributes = USB_ENDPOINT_XFER_BULK, .wMaxPacketSize = cpu_to_le16(512), }; static struct usb_descriptor_header *ecm_qc_hs_function[] = { /* CDC ECM control descriptors */ (struct usb_descriptor_header *) &ecm_qc_control_intf, (struct usb_descriptor_header *) &ecm_qc_header_desc, (struct usb_descriptor_header *) &ecm_qc_union_desc, (struct usb_descriptor_header *) &ecm_qc_desc, /* NOTE: status endpoint might need to be removed */ (struct usb_descriptor_header *) &ecm_qc_hs_notify_desc, /* data interface, altsettings 0 and 1 */ (struct usb_descriptor_header *) &ecm_qc_data_nop_intf, (struct usb_descriptor_header *) &ecm_qc_data_intf, (struct usb_descriptor_header *) &ecm_qc_hs_in_desc, (struct usb_descriptor_header *) &ecm_qc_hs_out_desc, NULL, }; /* string descriptors: */ static struct usb_string ecm_qc_string_defs[] = { [0].s = "CDC Ethernet Control Model (ECM)", [1].s = NULL /* DYNAMIC */, [2].s = "CDC Ethernet Data", { } /* end of list */ }; static struct usb_gadget_strings ecm_qc_string_table = { .language = 0x0409, /* en-us */ .strings = ecm_qc_string_defs, }; static struct usb_gadget_strings *ecm_qc_strings[] = { &ecm_qc_string_table, NULL, }; static struct data_port ecm_qc_bam_port; static int ecm_qc_bam_setup(void) { int ret; ret = bam_data_setup(ECM_QC_NO_PORTS); if (ret) { pr_err("bam_data_setup failed err: %d\n", ret); return ret; } return 0; } static int ecm_qc_bam_connect(struct f_ecm_qc *dev) { int ret; ecm_qc_bam_port.func = dev->port.func; ecm_qc_bam_port.in = dev->port.in_ep; ecm_qc_bam_port.out = dev->port.out_ep; /* currently we use the first connection */ ret = bam_data_connect(&ecm_qc_bam_port, 0, 0); if (ret) { pr_err("bam_data_connect failed: err:%d\n", ret); return ret; } else { pr_info("ecm bam connected\n"); } return 0; } static int ecm_qc_bam_disconnect(struct f_ecm_qc *dev) { pr_debug("dev:%p. %s Do nothing.\n", dev, __func__); return 0; } /*-------------------------------------------------------------------------*/ static void ecm_qc_do_notify(struct f_ecm_qc *ecm) { struct usb_request *req = ecm->notify_req; struct usb_cdc_notification *event; struct usb_composite_dev *cdev = ecm->port.func.config->cdev; __le32 *data; int status; /* notification already in flight? */ if (!req) return; event = req->buf; switch (ecm->notify_state) { case ECM_QC_NOTIFY_NONE: return; case ECM_QC_NOTIFY_CONNECT: event->bNotificationType = USB_CDC_NOTIFY_NETWORK_CONNECTION; if (ecm->is_open) event->wValue = cpu_to_le16(1); else event->wValue = cpu_to_le16(0); event->wLength = 0; req->length = sizeof *event; DBG(cdev, "notify connect %s\n", ecm->is_open ? "true" : "false"); ecm->notify_state = ECM_QC_NOTIFY_SPEED; break; case ECM_QC_NOTIFY_SPEED: event->bNotificationType = USB_CDC_NOTIFY_SPEED_CHANGE; event->wValue = cpu_to_le16(0); event->wLength = cpu_to_le16(8); req->length = ECM_QC_STATUS_BYTECOUNT; /* SPEED_CHANGE data is up/down speeds in bits/sec */ data = req->buf + sizeof *event; data[0] = cpu_to_le32(ecm_qc_bitrate(cdev->gadget)); data[1] = data[0]; DBG(cdev, "notify speed %d\n", ecm_qc_bitrate(cdev->gadget)); ecm->notify_state = ECM_QC_NOTIFY_NONE; break; } event->bmRequestType = 0xA1; event->wIndex = cpu_to_le16(ecm->ctrl_id); ecm->notify_req = NULL; status = usb_ep_queue(ecm->notify, req, GFP_ATOMIC); if (status < 0) { ecm->notify_req = req; DBG(cdev, "notify --> %d\n", status); } } static void ecm_qc_notify(struct f_ecm_qc *ecm) { /* NOTE on most versions of Linux, host side cdc-ethernet * won't listen for notifications until its netdevice opens. * The first notification then sits in the FIFO for a long * time, and the second one is queued. */ ecm->notify_state = ECM_QC_NOTIFY_CONNECT; ecm_qc_do_notify(ecm); } static void ecm_qc_notify_complete(struct usb_ep *ep, struct usb_request *req) { struct f_ecm_qc *ecm = req->context; switch (req->status) { case 0: /* no fault */ break; case -ECONNRESET: case -ESHUTDOWN: ecm->notify_state = ECM_QC_NOTIFY_NONE; break; default: DBG(cdev, "event %02x --> %d\n", event->bNotificationType, req->status); break; } ecm->notify_req = req; ecm_qc_do_notify(ecm); } static int ecm_qc_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl) { struct f_ecm_qc *ecm = func_to_ecm_qc(f); struct usb_composite_dev *cdev = f->config->cdev; struct usb_request *req = cdev->req; int value = -EOPNOTSUPP; u16 w_index = le16_to_cpu(ctrl->wIndex); u16 w_value = le16_to_cpu(ctrl->wValue); u16 w_length = le16_to_cpu(ctrl->wLength); /* composite driver infrastructure handles everything except * CDC class messages; interface activation uses set_alt(). */ switch ((ctrl->bRequestType << 8) | ctrl->bRequest) { case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8) | USB_CDC_SET_ETHERNET_PACKET_FILTER: /* see 6.2.30: no data, wIndex = interface, * wValue = packet filter bitmap */ if (w_length != 0 || w_index != ecm->ctrl_id) goto invalid; DBG(cdev, "packet filter %02x\n", w_value); /* REVISIT locking of cdc_filter. This assumes the UDC * driver won't have a concurrent packet TX irq running on * another CPU; or that if it does, this write is atomic... */ ecm->port.cdc_filter = w_value; value = 0; break; /* and optionally: * case USB_CDC_SEND_ENCAPSULATED_COMMAND: * case USB_CDC_GET_ENCAPSULATED_RESPONSE: * case USB_CDC_SET_ETHERNET_MULTICAST_FILTERS: * case USB_CDC_SET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_PM_PATTERN_FILTER: * case USB_CDC_GET_ETHERNET_STATISTIC: */ default: invalid: DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); } /* respond with data transfer or status phase? */ if (value >= 0) { DBG(cdev, "ecm req%02x.%02x v%04x i%04x l%d\n", ctrl->bRequestType, ctrl->bRequest, w_value, w_index, w_length); req->zero = 0; req->length = value; value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC); if (value < 0) pr_err("ecm req %02x.%02x response err %d\n", ctrl->bRequestType, ctrl->bRequest, value); } /* device either stalls (value < 0) or reports success */ return value; } static int ecm_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt) { struct f_ecm_qc *ecm = func_to_ecm_qc(f); struct usb_composite_dev *cdev = f->config->cdev; /* Control interface has only altsetting 0 */ if (intf == ecm->ctrl_id) { if (alt != 0) goto fail; if (ecm->notify->driver_data) { VDBG(cdev, "reset ecm control %d\n", intf); usb_ep_disable(ecm->notify); } if (!(ecm->notify->desc)) { VDBG(cdev, "init ecm ctrl %d\n", intf); if (config_ep_by_speed(cdev->gadget, f, ecm->notify)) goto fail; } usb_ep_enable(ecm->notify); ecm->notify->driver_data = ecm; /* Data interface has two altsettings, 0 and 1 */ } else if (intf == ecm->data_id) { if (alt > 1) goto fail; if (ecm->port.in_ep->driver_data) { DBG(cdev, "reset ecm\n"); gether_qc_disconnect(&ecm->port); ecm_qc_bam_disconnect(ecm); } if (!ecm->port.in_ep->desc || !ecm->port.out_ep->desc) { DBG(cdev, "init ecm\n"); if (config_ep_by_speed(cdev->gadget, f, ecm->port.in_ep) || config_ep_by_speed(cdev->gadget, f, ecm->port.out_ep)) { ecm->port.in_ep->desc = NULL; ecm->port.out_ep->desc = NULL; goto fail; } } /* CDC Ethernet only sends data in non-default altsettings. * Changing altsettings resets filters, statistics, etc. */ if (alt == 1) { struct net_device *net; /* Enable zlps by default for ECM conformance; * override for musb_hdrc (avoids txdma ovhead). */ ecm->port.is_zlp_ok = !(gadget_is_musbhdrc(cdev->gadget) ); ecm->port.cdc_filter = DEFAULT_FILTER; DBG(cdev, "activate ecm\n"); net = gether_qc_connect(&ecm->port); if (IS_ERR(net)) return PTR_ERR(net); if (ecm_qc_bam_connect(ecm)) goto fail; } /* NOTE this can be a minor disagreement with the ECM spec, * which says speed notifications will "always" follow * connection notifications. But we allow one connect to * follow another (if the first is in flight), and instead * just guarantee that a speed notification is always sent. */ ecm_qc_notify(ecm); } else goto fail; return 0; fail: return -EINVAL; } /* Because the data interface supports multiple altsettings, * this ECM function *MUST* implement a get_alt() method. */ static int ecm_qc_get_alt(struct usb_function *f, unsigned intf) { struct f_ecm_qc *ecm = func_to_ecm_qc(f); if (intf == ecm->ctrl_id) return 0; return ecm->port.in_ep->driver_data ? 1 : 0; } static void ecm_qc_disable(struct usb_function *f) { struct f_ecm_qc *ecm = func_to_ecm_qc(f); DBG(cdev, "ecm deactivated\n"); if (ecm->port.in_ep->driver_data) { gether_qc_disconnect(&ecm->port); ecm_qc_bam_disconnect(ecm); } if (ecm->notify->driver_data) { usb_ep_disable(ecm->notify); ecm->notify->driver_data = NULL; ecm->notify->desc = NULL; } } /*-------------------------------------------------------------------------*/ /* * Callbacks let us notify the host about connect/disconnect when the * net device is opened or closed. * * For testing, note that link states on this side include both opened * and closed variants of: * * - disconnected/unconfigured * - configured but inactive (data alt 0) * - configured and active (data alt 1) * * Each needs to be tested with unplug, rmmod, SET_CONFIGURATION, and * SET_INTERFACE (altsetting). Remember also that "configured" doesn't * imply the host is actually polling the notification endpoint, and * likewise that "active" doesn't imply it's actually using the data * endpoints for traffic. */ static void ecm_qc_open(struct qc_gether *geth) { struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func); DBG(ecm->port.func.config->cdev, "%s\n", __func__); ecm->is_open = true; ecm_qc_notify(ecm); } static void ecm_qc_close(struct qc_gether *geth) { struct f_ecm_qc *ecm = func_to_ecm_qc(&geth->func); DBG(ecm->port.func.config->cdev, "%s\n", __func__); ecm->is_open = false; ecm_qc_notify(ecm); } /*-------------------------------------------------------------------------*/ /* ethernet function driver setup/binding */ static int ecm_qc_bind(struct usb_configuration *c, struct usb_function *f) { struct usb_composite_dev *cdev = c->cdev; struct f_ecm_qc *ecm = func_to_ecm_qc(f); int status; struct usb_ep *ep; /* allocate instance-specific interface IDs */ status = usb_interface_id(c, f); if (status < 0) goto fail; ecm->ctrl_id = status; ecm_qc_control_intf.bInterfaceNumber = status; ecm_qc_union_desc.bMasterInterface0 = status; status = usb_interface_id(c, f); if (status < 0) goto fail; ecm->data_id = status; ecm_qc_data_nop_intf.bInterfaceNumber = status; ecm_qc_data_intf.bInterfaceNumber = status; ecm_qc_union_desc.bSlaveInterface0 = status; status = -ENODEV; /* allocate instance-specific endpoints */ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_in_desc); if (!ep) goto fail; ecm->port.in_ep = ep; ep->driver_data = cdev; /* claim */ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_out_desc); if (!ep) goto fail; ecm->port.out_ep = ep; ep->driver_data = cdev; /* claim */ /* NOTE: a status/notification endpoint is *OPTIONAL* but we * don't treat it that way. It's simpler, and some newer CDC * profiles (wireless handsets) no longer treat it as optional. */ ep = usb_ep_autoconfig(cdev->gadget, &ecm_qc_fs_notify_desc); if (!ep) goto fail; ecm->notify = ep; ep->driver_data = cdev; /* claim */ status = -ENOMEM; /* allocate notification request and buffer */ ecm->notify_req = usb_ep_alloc_request(ep, GFP_KERNEL); if (!ecm->notify_req) goto fail; ecm->notify_req->buf = kmalloc(ECM_QC_STATUS_BYTECOUNT, GFP_KERNEL); if (!ecm->notify_req->buf) goto fail; ecm->notify_req->context = ecm; ecm->notify_req->complete = ecm_qc_notify_complete; /* copy descriptors, and track endpoint copies */ f->descriptors = usb_copy_descriptors(ecm_qc_fs_function); if (!f->descriptors) goto fail; /* support all relevant hardware speeds... we expect that when * hardware is dual speed, all bulk-capable endpoints work at * both speeds */ if (gadget_is_dualspeed(c->cdev->gadget)) { ecm_qc_hs_in_desc.bEndpointAddress = ecm_qc_fs_in_desc.bEndpointAddress; ecm_qc_hs_out_desc.bEndpointAddress = ecm_qc_fs_out_desc.bEndpointAddress; ecm_qc_hs_notify_desc.bEndpointAddress = ecm_qc_fs_notify_desc.bEndpointAddress; /* copy descriptors, and track endpoint copies */ f->hs_descriptors = usb_copy_descriptors(ecm_qc_hs_function); if (!f->hs_descriptors) goto fail; } /* NOTE: all that is done without knowing or caring about * the network link ... which is unavailable to this code * until we're activated via set_alt(). */ ecm->port.open = ecm_qc_open; ecm->port.close = ecm_qc_close; DBG(cdev, "CDC Ethernet: %s speed IN/%s OUT/%s NOTIFY/%s\n", gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full", ecm->port.in_ep->name, ecm->port.out_ep->name, ecm->notify->name); return 0; fail: if (f->descriptors) usb_free_descriptors(f->descriptors); if (ecm->notify_req) { kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); } /* we might as well release our claims on endpoints */ if (ecm->notify) ecm->notify->driver_data = NULL; if (ecm->port.out_ep->desc) ecm->port.out_ep->driver_data = NULL; if (ecm->port.in_ep->desc) ecm->port.in_ep->driver_data = NULL; pr_err("%s: can't bind, err %d\n", f->name, status); return status; } static void ecm_qc_unbind(struct usb_configuration *c, struct usb_function *f) { struct f_ecm_qc *ecm = func_to_ecm_qc(f); DBG(c->cdev, "ecm unbind\n"); if (gadget_is_dualspeed(c->cdev->gadget)) usb_free_descriptors(f->hs_descriptors); usb_free_descriptors(f->descriptors); kfree(ecm->notify_req->buf); usb_ep_free_request(ecm->notify, ecm->notify_req); ecm_qc_string_defs[1].s = NULL; kfree(ecm); } /** * ecm_qc_bind_config - add CDC Ethernet network link to a configuration * @c: the configuration to support the network link * @ethaddr: a buffer in which the ethernet address of the host side * side of the link was recorded * Context: single threaded during gadget setup * * Returns zero on success, else negative errno. * * Caller must have called @gether_qc_setup(). Caller is also responsible * for calling @gether_cleanup() before module unload. */ int ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN]) { struct f_ecm_qc *ecm; int status; if (!can_support_ecm(c->cdev->gadget) || !ethaddr) return -EINVAL; status = ecm_qc_bam_setup(); if (status) { pr_err("bam setup failed"); return status; } /* maybe allocate device-global string IDs */ if (ecm_qc_string_defs[0].id == 0) { /* control interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_qc_string_defs[0].id = status; ecm_qc_control_intf.iInterface = status; /* data interface label */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_qc_string_defs[2].id = status; ecm_qc_data_intf.iInterface = status; /* MAC address */ status = usb_string_id(c->cdev); if (status < 0) return status; ecm_qc_string_defs[1].id = status; ecm_qc_desc.iMACAddress = status; } /* allocate and initialize one new instance */ ecm = kzalloc(sizeof *ecm, GFP_KERNEL); if (!ecm) return -ENOMEM; /* export host's Ethernet address in CDC format */ snprintf(ecm->ethaddr, sizeof ecm->ethaddr, "%02X%02X%02X%02X%02X%02X", ethaddr[0], ethaddr[1], ethaddr[2], ethaddr[3], ethaddr[4], ethaddr[5]); ecm_qc_string_defs[1].s = ecm->ethaddr; ecm->port.cdc_filter = DEFAULT_FILTER; ecm->port.func.name = "cdc_ethernet"; ecm->port.func.strings = ecm_qc_strings; /* descriptors are per-instance copies */ ecm->port.func.bind = ecm_qc_bind; ecm->port.func.unbind = ecm_qc_unbind; ecm->port.func.set_alt = ecm_qc_set_alt; ecm->port.func.get_alt = ecm_qc_get_alt; ecm->port.func.setup = ecm_qc_setup; ecm->port.func.disable = ecm_qc_disable; status = usb_add_function(c, &ecm->port.func); if (status) { ecm_qc_string_defs[1].s = NULL; kfree(ecm); } return status; }
gpl-2.0
vic-nation/kernel_goghvmu
fs/xfs/xfs_iget.c
1553
20173
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_types.h" #include "xfs_acl.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_alloc_btree.h" #include "xfs_ialloc_btree.h" #include "xfs_dinode.h" #include "xfs_inode.h" #include "xfs_btree.h" #include "xfs_ialloc.h" #include "xfs_quota.h" #include "xfs_utils.h" #include "xfs_trans_priv.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_btree_trace.h" #include "xfs_trace.h" /* * Define xfs inode iolock lockdep classes. We need to ensure that all active * inodes are considered the same for lockdep purposes, including inodes that * are recycled through the XFS_IRECLAIMABLE state. This is the the only way to * guarantee the locks are considered the same when there are multiple lock * initialisation siteѕ. Also, define a reclaimable inode class so it is * obvious in lockdep reports which class the report is against. */ static struct lock_class_key xfs_iolock_active; struct lock_class_key xfs_iolock_reclaimable; /* * Allocate and initialise an xfs_inode. */ STATIC struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, &xfs_iolock_active, "xfs_iolock_active"); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_update_core = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); ip->i_size = 0; ip->i_new_size = 0; return ip; } STATIC void xfs_inode_free_callback( struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct xfs_inode *ip = XFS_I(inode); INIT_LIST_HEAD(&inode->i_dentry); kmem_zone_free(xfs_inode_zone, ip); } void xfs_inode_free( struct xfs_inode *ip) { switch (ip->i_d.di_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_itemp) { /* * Only if we are shutting down the fs will we see an * inode still in the AIL. If it is there, we should remove * it to prevent a use-after-free from occurring. */ xfs_log_item_t *lip = &ip->i_itemp->ili_item; struct xfs_ail *ailp = lip->li_ailp; ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { spin_lock(&ailp->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) xfs_trans_ail_delete(ailp, lip); else spin_unlock(&ailp->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } /* asserts to verify all state is correct here */ ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); /* * Because we use RCU freeing we need to ensure the inode always * appears to be reclaimed with an invalid inode number when in the * free state. The ip->i_flags_lock provides the barrier against lookup * races. */ spin_lock(&ip->i_flags_lock); ip->i_flags = XFS_IRECLAIM; ip->i_ino = 0; spin_unlock(&ip->i_flags_lock); call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); } /* * Check the validity of the inode we just found it the cache */ static int xfs_iget_cache_hit( struct xfs_perag *pag, struct xfs_inode *ip, xfs_ino_t ino, int flags, int lock_flags) __releases(RCU) { struct inode *inode = VFS_I(ip); struct xfs_mount *mp = ip->i_mount; int error; /* * check for re-use of an inode within an RCU grace period due to the * radix tree nodes not being updated yet. We monitor for this by * setting the inode number to zero before freeing the inode structure. * If the inode has been reallocated and set up, then the inode number * will not match, so check for that, too. */ spin_lock(&ip->i_flags_lock); if (ip->i_ino != ino) { trace_xfs_iget_skip(ip); XFS_STATS_INC(xs_ig_frecycle); error = EAGAIN; goto out_error; } /* * If we are racing with another cache hit that is currently * instantiating this inode or currently recycling it out of * reclaimabe state, wait for the initialisation to complete * before continuing. * * XXX(hch): eventually we should do something equivalent to * wait_on_inode to wait for these flags to be cleared * instead of polling for it. */ if (ip->i_flags & (XFS_INEW|XFS_IRECLAIM)) { trace_xfs_iget_skip(ip); XFS_STATS_INC(xs_ig_frecycle); error = EAGAIN; goto out_error; } /* * If lookup is racing with unlink return an error immediately. */ if (ip->i_d.di_mode == 0 && !(flags & XFS_IGET_CREATE)) { error = ENOENT; goto out_error; } /* * If IRECLAIMABLE is set, we've torn down the VFS inode already. * Need to carefully get it back into useable state. */ if (ip->i_flags & XFS_IRECLAIMABLE) { trace_xfs_iget_reclaim(ip); /* * We need to set XFS_IRECLAIM to prevent xfs_reclaim_inode * from stomping over us while we recycle the inode. We can't * clear the radix tree reclaimable tag yet as it requires * pag_ici_lock to be held exclusive. */ ip->i_flags |= XFS_IRECLAIM; spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); error = -inode_init_always(mp->m_super, inode); if (error) { /* * Re-initializing the inode failed, and we are in deep * trouble. Try to re-add it to the reclaim list. */ rcu_read_lock(); spin_lock(&ip->i_flags_lock); ip->i_flags &= ~(XFS_INEW | XFS_IRECLAIM); ASSERT(ip->i_flags & XFS_IRECLAIMABLE); trace_xfs_iget_reclaim_fail(ip); goto out_error; } spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); /* * Clear the per-lifetime state in the inode as we are now * effectively a new inode and need to return to the initial * state before reuse occurs. */ ip->i_flags &= ~XFS_IRECLAIM_RESET_FLAGS; ip->i_flags |= XFS_INEW; __xfs_inode_clear_reclaim_tag(mp, pag, ip); inode->i_state = I_NEW; ASSERT(!rwsem_is_locked(&ip->i_iolock.mr_lock)); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, &xfs_iolock_active, "xfs_iolock_active"); spin_unlock(&ip->i_flags_lock); spin_unlock(&pag->pag_ici_lock); } else { /* If the VFS inode is being torn down, pause and try again. */ if (!igrab(inode)) { trace_xfs_iget_skip(ip); error = EAGAIN; goto out_error; } /* We've got a live one. */ spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); trace_xfs_iget_hit(ip); } if (lock_flags != 0) xfs_ilock(ip, lock_flags); xfs_iflags_clear(ip, XFS_ISTALE); XFS_STATS_INC(xs_ig_found); return 0; out_error: spin_unlock(&ip->i_flags_lock); rcu_read_unlock(); return error; } static int xfs_iget_cache_miss( struct xfs_mount *mp, struct xfs_perag *pag, xfs_trans_t *tp, xfs_ino_t ino, struct xfs_inode **ipp, int flags, int lock_flags) { struct xfs_inode *ip; int error; xfs_agino_t agino = XFS_INO_TO_AGINO(mp, ino); ip = xfs_inode_alloc(mp, ino); if (!ip) return ENOMEM; error = xfs_iread(mp, tp, ip, flags); if (error) goto out_destroy; trace_xfs_iget_miss(ip); if ((ip->i_d.di_mode == 0) && !(flags & XFS_IGET_CREATE)) { error = ENOENT; goto out_destroy; } /* * Preload the radix tree so we can insert safely under the * write spinlock. Note that we cannot sleep inside the preload * region. */ if (radix_tree_preload(GFP_KERNEL)) { error = EAGAIN; goto out_destroy; } /* * Because the inode hasn't been added to the radix-tree yet it can't * be found by another thread, so we can do the non-sleeping lock here. */ if (lock_flags) { if (!xfs_ilock_nowait(ip, lock_flags)) BUG(); } /* * These values must be set before inserting the inode into the radix * tree as the moment it is inserted a concurrent lookup (allowed by the * RCU locking mechanism) can find it and that lookup must see that this * is an inode currently under construction (i.e. that XFS_INEW is set). * The ip->i_flags_lock that protects the XFS_INEW flag forms the * memory barrier that ensures this detection works correctly at lookup * time. */ ip->i_udquot = ip->i_gdquot = NULL; xfs_iflags_set(ip, XFS_INEW); /* insert the new inode */ spin_lock(&pag->pag_ici_lock); error = radix_tree_insert(&pag->pag_ici_root, agino, ip); if (unlikely(error)) { WARN_ON(error != -EEXIST); XFS_STATS_INC(xs_ig_dup); error = EAGAIN; goto out_preload_end; } spin_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); *ipp = ip; return 0; out_preload_end: spin_unlock(&pag->pag_ici_lock); radix_tree_preload_end(); if (lock_flags) xfs_iunlock(ip, lock_flags); out_destroy: __destroy_inode(VFS_I(ip)); xfs_inode_free(ip); return error; } /* * Look up an inode by number in the given file system. * The inode is looked up in the cache held in each AG. * If the inode is found in the cache, initialise the vfs inode * if necessary. * * If it is not in core, read it in from the file system's device, * add it to the cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock * should be taken. * * mp -- the mount point structure for the current file system. It points * to the inode hash table. * tp -- a pointer to the current transaction if there is one. This is * simply passed through to the xfs_iread() call. * ino -- the number of the inode desired. This is the unique identifier * within the file system for the inode being requested. * lock_flags -- flags indicating how to lock the inode. See the comment * for xfs_ilock() for a list of valid values. */ int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; /* get the perag structure and ensure that it's inode capable */ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; ASSERT(ip->i_df.if_ext_max == XFS_IFORK_DSIZE(ip) / sizeof(xfs_bmbt_rec_t)); /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; } /* * This is a wrapper routine around the xfs_ilock() routine * used to centralize some grungy code. It is used in places * that wish to lock the inode solely for reading the extents. * The reason these places can't just call xfs_ilock(SHARED) * is that the inode lock also guards to bringing in of the * extents from disk for a file in b-tree format. If the inode * is in b-tree format, then we need to lock the inode exclusively * until the extents are read in. Locking it exclusively all * the time would limit our parallelism unnecessarily, though. * What we do instead is check to see if the extents have been * read in yet, and only lock the inode exclusively if they * have not. * * The function returns a value which should be given to the * corresponding xfs_iunlock_map_shared(). This value is * the mode in which the lock was actually taken. */ uint xfs_ilock_map_shared( xfs_inode_t *ip) { uint lock_mode; if ((ip->i_d.di_format == XFS_DINODE_FMT_BTREE) && ((ip->i_df.if_flags & XFS_IFEXTENTS) == 0)) { lock_mode = XFS_ILOCK_EXCL; } else { lock_mode = XFS_ILOCK_SHARED; } xfs_ilock(ip, lock_mode); return lock_mode; } /* * This is simply the unlock routine to go with xfs_ilock_map_shared(). * All it does is call xfs_iunlock() with the given lock_mode. */ void xfs_iunlock_map_shared( xfs_inode_t *ip, unsigned int lock_mode) { xfs_iunlock(ip, lock_mode); } /* * The xfs inode contains 2 locks: a multi-reader lock called the * i_iolock and a multi-reader lock called the i_lock. This routine * allows either or both of the locks to be obtained. * * The 2 locks should always be ordered so that the IO lock is * obtained first in order to prevent deadlock. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks * to be locked. It can be: * XFS_IOLOCK_SHARED, * XFS_IOLOCK_EXCL, * XFS_ILOCK_SHARED, * XFS_ILOCK_EXCL, * XFS_IOLOCK_SHARED | XFS_ILOCK_SHARED, * XFS_IOLOCK_SHARED | XFS_ILOCK_EXCL, * XFS_IOLOCK_EXCL | XFS_ILOCK_SHARED, * XFS_IOLOCK_EXCL | XFS_ILOCK_EXCL */ void xfs_ilock( xfs_inode_t *ip, uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); if (lock_flags & XFS_IOLOCK_EXCL) mrupdate_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); else if (lock_flags & XFS_IOLOCK_SHARED) mraccess_nested(&ip->i_iolock, XFS_IOLOCK_DEP(lock_flags)); if (lock_flags & XFS_ILOCK_EXCL) mrupdate_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); else if (lock_flags & XFS_ILOCK_SHARED) mraccess_nested(&ip->i_lock, XFS_ILOCK_DEP(lock_flags)); trace_xfs_ilock(ip, lock_flags, _RET_IP_); } /* * This is just like xfs_ilock(), except that the caller * is guaranteed not to sleep. It returns 1 if it gets * the requested locks and 0 otherwise. If the IO lock is * obtained but the inode lock cannot be, then the IO lock * is dropped before returning. * * ip -- the inode being locked * lock_flags -- this parameter indicates the inode's locks to be * to be locked. See the comment for xfs_ilock() for a list * of valid values. */ int xfs_ilock_nowait( xfs_inode_t *ip, uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_LOCK_DEP_MASK)) == 0); if (lock_flags & XFS_IOLOCK_EXCL) { if (!mrtryupdate(&ip->i_iolock)) goto out; } else if (lock_flags & XFS_IOLOCK_SHARED) { if (!mrtryaccess(&ip->i_iolock)) goto out; } if (lock_flags & XFS_ILOCK_EXCL) { if (!mrtryupdate(&ip->i_lock)) goto out_undo_iolock; } else if (lock_flags & XFS_ILOCK_SHARED) { if (!mrtryaccess(&ip->i_lock)) goto out_undo_iolock; } trace_xfs_ilock_nowait(ip, lock_flags, _RET_IP_); return 1; out_undo_iolock: if (lock_flags & XFS_IOLOCK_EXCL) mrunlock_excl(&ip->i_iolock); else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); out: return 0; } /* * xfs_iunlock() is used to drop the inode locks acquired with * xfs_ilock() and xfs_ilock_nowait(). The caller must pass * in the flags given to xfs_ilock() or xfs_ilock_nowait() so * that we know which locks to drop. * * ip -- the inode being unlocked * lock_flags -- this parameter indicates the inode's locks to be * to be unlocked. See the comment for xfs_ilock() for a list * of valid values for this parameter. * */ void xfs_iunlock( xfs_inode_t *ip, uint lock_flags) { /* * You can't set both SHARED and EXCL for the same lock, * and only XFS_IOLOCK_SHARED, XFS_IOLOCK_EXCL, XFS_ILOCK_SHARED, * and XFS_ILOCK_EXCL are valid values to set in lock_flags. */ ASSERT((lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) != (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)); ASSERT((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) != (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_LOCK_MASK | XFS_IUNLOCK_NONOTIFY | XFS_LOCK_DEP_MASK)) == 0); ASSERT(lock_flags != 0); if (lock_flags & XFS_IOLOCK_EXCL) mrunlock_excl(&ip->i_iolock); else if (lock_flags & XFS_IOLOCK_SHARED) mrunlock_shared(&ip->i_iolock); if (lock_flags & XFS_ILOCK_EXCL) mrunlock_excl(&ip->i_lock); else if (lock_flags & XFS_ILOCK_SHARED) mrunlock_shared(&ip->i_lock); if ((lock_flags & (XFS_ILOCK_SHARED | XFS_ILOCK_EXCL)) && !(lock_flags & XFS_IUNLOCK_NONOTIFY) && ip->i_itemp) { /* * Let the AIL know that this item has been unlocked in case * it is in the AIL and anyone is waiting on it. Don't do * this if the caller has asked us not to. */ xfs_trans_unlocked_item(ip->i_itemp->ili_item.li_ailp, (xfs_log_item_t*)(ip->i_itemp)); } trace_xfs_iunlock(ip, lock_flags, _RET_IP_); } /* * give up write locks. the i/o lock cannot be held nested * if it is being demoted. */ void xfs_ilock_demote( xfs_inode_t *ip, uint lock_flags) { ASSERT(lock_flags & (XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)); ASSERT((lock_flags & ~(XFS_IOLOCK_EXCL|XFS_ILOCK_EXCL)) == 0); if (lock_flags & XFS_ILOCK_EXCL) mrdemote(&ip->i_lock); if (lock_flags & XFS_IOLOCK_EXCL) mrdemote(&ip->i_iolock); trace_xfs_ilock_demote(ip, lock_flags, _RET_IP_); } #ifdef DEBUG int xfs_isilocked( xfs_inode_t *ip, uint lock_flags) { if (lock_flags & (XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)) { if (!(lock_flags & XFS_ILOCK_SHARED)) return !!ip->i_lock.mr_writer; return rwsem_is_locked(&ip->i_lock.mr_lock); } if (lock_flags & (XFS_IOLOCK_EXCL|XFS_IOLOCK_SHARED)) { if (!(lock_flags & XFS_IOLOCK_SHARED)) return !!ip->i_iolock.mr_writer; return rwsem_is_locked(&ip->i_iolock.mr_lock); } ASSERT(0); return 0; } #endif
gpl-2.0
Keith-N/android_kernel_nvidia_ardbeg
drivers/net/ethernet/atheros/alx/ethtool.c
2065
7335
/* * Copyright (c) 2013 Johannes Berg <johannes@sipsolutions.net> * * This file is free software: you may copy, redistribute and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation, either version 2 of the License, or (at your * option) any later version. * * This file is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * This file incorporates work covered by the following copyright and * permission notice: * * Copyright (c) 2012 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/pci.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mdio.h> #include <linux/interrupt.h> #include <asm/byteorder.h> #include "alx.h" #include "reg.h" #include "hw.h" static int alx_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; ecmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_Pause; if (alx_hw_giga(hw)) ecmd->supported |= SUPPORTED_1000baseT_Full; ecmd->advertising = ADVERTISED_TP; if (hw->adv_cfg & ADVERTISED_Autoneg) ecmd->advertising |= hw->adv_cfg; ecmd->port = PORT_TP; ecmd->phy_address = 0; if (hw->adv_cfg & ADVERTISED_Autoneg) ecmd->autoneg = AUTONEG_ENABLE; else ecmd->autoneg = AUTONEG_DISABLE; ecmd->transceiver = XCVR_INTERNAL; if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) { if (hw->flowctrl & ALX_FC_RX) { ecmd->advertising |= ADVERTISED_Pause; if (!(hw->flowctrl & ALX_FC_TX)) ecmd->advertising |= ADVERTISED_Asym_Pause; } else if (hw->flowctrl & ALX_FC_TX) { ecmd->advertising |= ADVERTISED_Asym_Pause; } } if (hw->link_speed != SPEED_UNKNOWN) { ethtool_cmd_speed_set(ecmd, hw->link_speed - hw->link_speed % 10); ecmd->duplex = hw->link_speed % 10; } else { ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN); ecmd->duplex = DUPLEX_UNKNOWN; } return 0; } static int alx_set_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; u32 adv_cfg; ASSERT_RTNL(); if (ecmd->autoneg == AUTONEG_ENABLE) { if (ecmd->advertising & ADVERTISED_1000baseT_Half) return -EINVAL; adv_cfg = ecmd->advertising | ADVERTISED_Autoneg; } else { int speed = ethtool_cmd_speed(ecmd); switch (speed + ecmd->duplex) { case SPEED_10 + DUPLEX_HALF: adv_cfg = ADVERTISED_10baseT_Half; break; case SPEED_10 + DUPLEX_FULL: adv_cfg = ADVERTISED_10baseT_Full; break; case SPEED_100 + DUPLEX_HALF: adv_cfg = ADVERTISED_100baseT_Half; break; case SPEED_100 + DUPLEX_FULL: adv_cfg = ADVERTISED_100baseT_Full; break; default: return -EINVAL; } } hw->adv_cfg = adv_cfg; return alx_setup_speed_duplex(hw, adv_cfg, hw->flowctrl); } static void alx_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; if (hw->flowctrl & ALX_FC_ANEG && hw->adv_cfg & ADVERTISED_Autoneg) pause->autoneg = AUTONEG_ENABLE; else pause->autoneg = AUTONEG_DISABLE; if (hw->flowctrl & ALX_FC_TX) pause->tx_pause = 1; else pause->tx_pause = 0; if (hw->flowctrl & ALX_FC_RX) pause->rx_pause = 1; else pause->rx_pause = 0; } static int alx_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; int err = 0; bool reconfig_phy = false; u8 fc = 0; if (pause->tx_pause) fc |= ALX_FC_TX; if (pause->rx_pause) fc |= ALX_FC_RX; if (pause->autoneg) fc |= ALX_FC_ANEG; ASSERT_RTNL(); /* restart auto-neg for auto-mode */ if (hw->adv_cfg & ADVERTISED_Autoneg) { if (!((fc ^ hw->flowctrl) & ALX_FC_ANEG)) reconfig_phy = true; if (fc & hw->flowctrl & ALX_FC_ANEG && (fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) reconfig_phy = true; } if (reconfig_phy) { err = alx_setup_speed_duplex(hw, hw->adv_cfg, fc); return err; } /* flow control on mac */ if ((fc ^ hw->flowctrl) & (ALX_FC_RX | ALX_FC_TX)) alx_cfg_mac_flowcontrol(hw, fc); hw->flowctrl = fc; return 0; } static u32 alx_get_msglevel(struct net_device *netdev) { struct alx_priv *alx = netdev_priv(netdev); return alx->msg_enable; } static void alx_set_msglevel(struct net_device *netdev, u32 data) { struct alx_priv *alx = netdev_priv(netdev); alx->msg_enable = data; } static void alx_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = 0; if (hw->sleep_ctrl & ALX_SLEEP_WOL_MAGIC) wol->wolopts |= WAKE_MAGIC; if (hw->sleep_ctrl & ALX_SLEEP_WOL_PHY) wol->wolopts |= WAKE_PHY; } static int alx_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct alx_priv *alx = netdev_priv(netdev); struct alx_hw *hw = &alx->hw; if (wol->wolopts & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_UCAST | WAKE_BCAST | WAKE_MCAST)) return -EOPNOTSUPP; hw->sleep_ctrl = 0; if (wol->wolopts & WAKE_MAGIC) hw->sleep_ctrl |= ALX_SLEEP_WOL_MAGIC; if (wol->wolopts & WAKE_PHY) hw->sleep_ctrl |= ALX_SLEEP_WOL_PHY; device_set_wakeup_enable(&alx->hw.pdev->dev, hw->sleep_ctrl); return 0; } static void alx_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct alx_priv *alx = netdev_priv(netdev); strlcpy(drvinfo->driver, alx_drv_name, sizeof(drvinfo->driver)); strlcpy(drvinfo->bus_info, pci_name(alx->hw.pdev), sizeof(drvinfo->bus_info)); } const struct ethtool_ops alx_ethtool_ops = { .get_settings = alx_get_settings, .set_settings = alx_set_settings, .get_pauseparam = alx_get_pauseparam, .set_pauseparam = alx_set_pauseparam, .get_drvinfo = alx_get_drvinfo, .get_msglevel = alx_get_msglevel, .set_msglevel = alx_set_msglevel, .get_wol = alx_get_wol, .set_wol = alx_set_wol, .get_link = ethtool_op_get_link, };
gpl-2.0
IngenicSemiconductor/kernel-inwatch
fs/gfs2/lops.c
2065
21096
/* * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved. * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved. * * This copyrighted material is made available to anyone wishing to use, * modify, copy, or redistribute it subject to the terms and conditions * of the GNU General Public License version 2. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/buffer_head.h> #include <linux/mempool.h> #include <linux/gfs2_ondisk.h> #include <linux/bio.h> #include <linux/fs.h> #include "gfs2.h" #include "incore.h" #include "inode.h" #include "glock.h" #include "log.h" #include "lops.h" #include "meta_io.h" #include "recovery.h" #include "rgrp.h" #include "trans.h" #include "util.h" #include "trace_gfs2.h" /** * gfs2_pin - Pin a buffer in memory * @sdp: The superblock * @bh: The buffer to be pinned * * The log lock must be held when calling this function */ void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh) { struct gfs2_bufdata *bd; BUG_ON(!current->journal_info); clear_buffer_dirty(bh); if (test_set_buffer_pinned(bh)) gfs2_assert_withdraw(sdp, 0); if (!buffer_uptodate(bh)) gfs2_io_error_bh(sdp, bh); bd = bh->b_private; /* If this buffer is in the AIL and it has already been written * to in-place disk block, remove it from the AIL. */ spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list); spin_unlock(&sdp->sd_ail_lock); get_bh(bh); atomic_inc(&sdp->sd_log_pinned); trace_gfs2_pin(bd, 1); } static bool buffer_is_rgrp(const struct gfs2_bufdata *bd) { return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP; } static void maybe_release_space(struct gfs2_bufdata *bd) { struct gfs2_glock *gl = bd->bd_gl; struct gfs2_sbd *sdp = gl->gl_sbd; struct gfs2_rgrpd *rgd = gl->gl_object; unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; struct gfs2_bitmap *bi = rgd->rd_bits + index; if (bi->bi_clone == 0) return; if (sdp->sd_args.ar_discard) gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL); memcpy(bi->bi_clone + bi->bi_offset, bd->bd_bh->b_data + bi->bi_offset, bi->bi_len); clear_bit(GBF_FULL, &bi->bi_flags); rgd->rd_free_clone = rgd->rd_free; } /** * gfs2_unpin - Unpin a buffer * @sdp: the filesystem the buffer belongs to * @bh: The buffer to unpin * @ai: * @flags: The inode dirty flags * */ static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh, struct gfs2_trans *tr) { struct gfs2_bufdata *bd = bh->b_private; BUG_ON(!buffer_uptodate(bh)); BUG_ON(!buffer_pinned(bh)); lock_buffer(bh); mark_buffer_dirty(bh); clear_buffer_pinned(bh); if (buffer_is_rgrp(bd)) maybe_release_space(bd); spin_lock(&sdp->sd_ail_lock); if (bd->bd_tr) { list_del(&bd->bd_ail_st_list); brelse(bh); } else { struct gfs2_glock *gl = bd->bd_gl; list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); atomic_inc(&gl->gl_ail_count); } bd->bd_tr = tr; list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list); spin_unlock(&sdp->sd_ail_lock); clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); trace_gfs2_pin(bd, 0); unlock_buffer(bh); atomic_dec(&sdp->sd_log_pinned); } static void gfs2_log_incr_head(struct gfs2_sbd *sdp) { BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) && (sdp->sd_log_flush_head != sdp->sd_log_head)); if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks) { sdp->sd_log_flush_head = 0; sdp->sd_log_flush_wrapped = 1; } } static u64 gfs2_log_bmap(struct gfs2_sbd *sdp) { unsigned int lbn = sdp->sd_log_flush_head; struct gfs2_journal_extent *je; u64 block; list_for_each_entry(je, &sdp->sd_jdesc->extent_list, extent_list) { if (lbn >= je->lblock && lbn < je->lblock + je->blocks) { block = je->dblock + lbn - je->lblock; gfs2_log_incr_head(sdp); return block; } } return -1; } /** * gfs2_end_log_write_bh - end log write of pagecache data with buffers * @sdp: The superblock * @bvec: The bio_vec * @error: The i/o status * * This finds the relavent buffers and unlocks then and sets the * error flag according to the status of the i/o request. This is * used when the log is writing data which has an in-place version * that is pinned in the pagecache. */ static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct bio_vec *bvec, int error) { struct buffer_head *bh, *next; struct page *page = bvec->bv_page; unsigned size; bh = page_buffers(page); size = bvec->bv_len; while (bh_offset(bh) < bvec->bv_offset) bh = bh->b_this_page; do { if (error) set_buffer_write_io_error(bh); unlock_buffer(bh); next = bh->b_this_page; size -= bh->b_size; brelse(bh); bh = next; } while(bh && size); } /** * gfs2_end_log_write - end of i/o to the log * @bio: The bio * @error: Status of i/o request * * Each bio_vec contains either data from the pagecache or data * relating to the log itself. Here we iterate over the bio_vec * array, processing both kinds of data. * */ static void gfs2_end_log_write(struct bio *bio, int error) { struct gfs2_sbd *sdp = bio->bi_private; struct bio_vec *bvec; struct page *page; int i; if (error) { sdp->sd_log_error = error; fs_err(sdp, "Error %d writing to log\n", error); } bio_for_each_segment_all(bvec, bio, i) { page = bvec->bv_page; if (page_has_buffers(page)) gfs2_end_log_write_bh(sdp, bvec, error); else mempool_free(page, gfs2_page_pool); } bio_put(bio); if (atomic_dec_and_test(&sdp->sd_log_in_flight)) wake_up(&sdp->sd_log_flush_wait); } /** * gfs2_log_flush_bio - Submit any pending log bio * @sdp: The superblock * @rw: The rw flags * * Submit any pending part-built or full bio to the block device. If * there is no pending bio, then this is a no-op. */ void gfs2_log_flush_bio(struct gfs2_sbd *sdp, int rw) { if (sdp->sd_log_bio) { atomic_inc(&sdp->sd_log_in_flight); submit_bio(rw, sdp->sd_log_bio); sdp->sd_log_bio = NULL; } } /** * gfs2_log_alloc_bio - Allocate a new bio for log writing * @sdp: The superblock * @blkno: The next device block number we want to write to * * This should never be called when there is a cached bio in the * super block. When it returns, there will be a cached bio in the * super block which will have as many bio_vecs as the device is * happy to handle. * * Returns: Newly allocated bio */ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno) { struct super_block *sb = sdp->sd_vfs; unsigned nrvecs = bio_get_nr_vecs(sb->s_bdev); struct bio *bio; BUG_ON(sdp->sd_log_bio); while (1) { bio = bio_alloc(GFP_NOIO, nrvecs); if (likely(bio)) break; nrvecs = max(nrvecs/2, 1U); } bio->bi_sector = blkno * (sb->s_blocksize >> 9); bio->bi_bdev = sb->s_bdev; bio->bi_end_io = gfs2_end_log_write; bio->bi_private = sdp; sdp->sd_log_bio = bio; return bio; } /** * gfs2_log_get_bio - Get cached log bio, or allocate a new one * @sdp: The superblock * @blkno: The device block number we want to write to * * If there is a cached bio, then if the next block number is sequential * with the previous one, return it, otherwise flush the bio to the * device. If there is not a cached bio, or we just flushed it, then * allocate a new one. * * Returns: The bio to use for log writes */ static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno) { struct bio *bio = sdp->sd_log_bio; u64 nblk; if (bio) { nblk = bio_end_sector(bio); nblk >>= sdp->sd_fsb2bb_shift; if (blkno == nblk) return bio; gfs2_log_flush_bio(sdp, WRITE); } return gfs2_log_alloc_bio(sdp, blkno); } /** * gfs2_log_write - write to log * @sdp: the filesystem * @page: the page to write * @size: the size of the data to write * @offset: the offset within the page * * Try and add the page segment to the current bio. If that fails, * submit the current bio to the device and create a new one, and * then add the page segment to that. */ static void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page, unsigned size, unsigned offset) { u64 blkno = gfs2_log_bmap(sdp); struct bio *bio; int ret; bio = gfs2_log_get_bio(sdp, blkno); ret = bio_add_page(bio, page, size, offset); if (ret == 0) { gfs2_log_flush_bio(sdp, WRITE); bio = gfs2_log_alloc_bio(sdp, blkno); ret = bio_add_page(bio, page, size, offset); WARN_ON(ret == 0); } } /** * gfs2_log_write_bh - write a buffer's content to the log * @sdp: The super block * @bh: The buffer pointing to the in-place location * * This writes the content of the buffer to the next available location * in the log. The buffer will be unlocked once the i/o to the log has * completed. */ static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh) { gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh)); } /** * gfs2_log_write_page - write one block stored in a page, into the log * @sdp: The superblock * @page: The struct page * * This writes the first block-sized part of the page into the log. Note * that the page must have been allocated from the gfs2_page_pool mempool * and that after this has been called, ownership has been transferred and * the page may be freed at any time. */ void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page) { struct super_block *sb = sdp->sd_vfs; gfs2_log_write(sdp, page, sb->s_blocksize, 0); } static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type, u32 ld_length, u32 ld_data1) { struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO); struct gfs2_log_descriptor *ld = page_address(page); clear_page(ld); ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC); ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD); ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD); ld->ld_type = cpu_to_be32(ld_type); ld->ld_length = cpu_to_be32(ld_length); ld->ld_data1 = cpu_to_be32(ld_data1); ld->ld_data2 = 0; return page; } static void gfs2_check_magic(struct buffer_head *bh) { void *kaddr; __be32 *ptr; clear_buffer_escaped(bh); kaddr = kmap_atomic(bh->b_page); ptr = kaddr + bh_offset(bh); if (*ptr == cpu_to_be32(GFS2_MAGIC)) set_buffer_escaped(bh); kunmap_atomic(kaddr); } static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit, unsigned int total, struct list_head *blist, bool is_databuf) { struct gfs2_log_descriptor *ld; struct gfs2_bufdata *bd1 = NULL, *bd2; struct page *page; unsigned int num; unsigned n; __be64 *ptr; gfs2_log_lock(sdp); bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list); while(total) { num = total; if (total > limit) num = limit; gfs2_log_unlock(sdp); page = gfs2_get_log_desc(sdp, is_databuf ? GFS2_LOG_DESC_JDATA : GFS2_LOG_DESC_METADATA, num + 1, num); ld = page_address(page); gfs2_log_lock(sdp); ptr = (__be64 *)(ld + 1); n = 0; list_for_each_entry_continue(bd1, blist, bd_list) { *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr); if (is_databuf) { gfs2_check_magic(bd1->bd_bh); *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0); } if (++n >= num) break; } gfs2_log_unlock(sdp); gfs2_log_write_page(sdp, page); gfs2_log_lock(sdp); n = 0; list_for_each_entry_continue(bd2, blist, bd_list) { get_bh(bd2->bd_bh); gfs2_log_unlock(sdp); lock_buffer(bd2->bd_bh); if (buffer_escaped(bd2->bd_bh)) { void *kaddr; page = mempool_alloc(gfs2_page_pool, GFP_NOIO); ptr = page_address(page); kaddr = kmap_atomic(bd2->bd_bh->b_page); memcpy(ptr, kaddr + bh_offset(bd2->bd_bh), bd2->bd_bh->b_size); kunmap_atomic(kaddr); *(__be32 *)ptr = 0; clear_buffer_escaped(bd2->bd_bh); unlock_buffer(bd2->bd_bh); brelse(bd2->bd_bh); gfs2_log_write_page(sdp, page); } else { gfs2_log_write_bh(sdp, bd2->bd_bh); } gfs2_log_lock(sdp); if (++n >= num) break; } BUG_ON(total < num); total -= num; } gfs2_log_unlock(sdp); } static void buf_lo_before_commit(struct gfs2_sbd *sdp) { unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */ gfs2_before_commit(sdp, limit, sdp->sd_log_num_buf, &sdp->sd_log_le_buf, 0); } static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head = &sdp->sd_log_le_buf; struct gfs2_bufdata *bd; if (tr == NULL) { gfs2_assert(sdp, list_empty(head)); return; } while (!list_empty(head)) { bd = list_entry(head->next, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); sdp->sd_log_num_buf--; gfs2_unpin(sdp, bd->bd_bh, tr); } gfs2_assert_warn(sdp, !sdp->sd_log_num_buf); } static void buf_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (pass != 0) return; sdp->sd_found_blocks = 0; sdp->sd_replayed_blocks = 0; } static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA) return 0; gfs2_replay_incr_blk(sdp, &start); for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { blkno = be64_to_cpu(*ptr++); sdp->sd_found_blocks++; if (gfs2_revoke_check(sdp, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); if (gfs2_meta_check(sdp, bh_ip)) error = -EIO; else mark_buffer_dirty(bh_ip); brelse(bh_log); brelse(bh_ip); if (error) break; sdp->sd_replayed_blocks++; } return error; } static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_meta_sync(ip->i_gl); return; } if (pass != 1) return; gfs2_meta_sync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n", jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); } static void revoke_lo_before_commit(struct gfs2_sbd *sdp) { struct gfs2_meta_header *mh; unsigned int offset; struct list_head *head = &sdp->sd_log_le_revoke; struct gfs2_bufdata *bd; struct page *page; unsigned int length; if (!sdp->sd_log_num_revoke) return; length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64)); page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke); offset = sizeof(struct gfs2_log_descriptor); list_for_each_entry(bd, head, bd_list) { sdp->sd_log_num_revoke--; if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) { gfs2_log_write_page(sdp, page); page = mempool_alloc(gfs2_page_pool, GFP_NOIO); mh = page_address(page); clear_page(mh); mh->mh_magic = cpu_to_be32(GFS2_MAGIC); mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB); mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB); offset = sizeof(struct gfs2_meta_header); } *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno); offset += sizeof(u64); } gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke); gfs2_log_write_page(sdp, page); } static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head = &sdp->sd_log_le_revoke; struct gfs2_bufdata *bd; struct gfs2_glock *gl; while (!list_empty(head)) { bd = list_entry(head->next, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); gl = bd->bd_gl; atomic_dec(&gl->gl_revokes); clear_bit(GLF_LFLUSH, &gl->gl_flags); kmem_cache_free(gfs2_bufdata_cachep, bd); } } static void revoke_lo_before_scan(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (pass != 0) return; sdp->sd_found_revokes = 0; sdp->sd_replay_tail = head->lh_tail; } static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); unsigned int blks = be32_to_cpu(ld->ld_length); unsigned int revokes = be32_to_cpu(ld->ld_data1); struct buffer_head *bh; unsigned int offset; u64 blkno; int first = 1; int error; if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE) return 0; offset = sizeof(struct gfs2_log_descriptor); for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { error = gfs2_replay_read_block(jd, start, &bh); if (error) return error; if (!first) gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB); while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) { blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset)); error = gfs2_revoke_add(sdp, blkno, start); if (error < 0) { brelse(bh); return error; } else if (error) sdp->sd_found_revokes++; if (!--revokes) break; offset += sizeof(u64); } brelse(bh); offset = sizeof(struct gfs2_meta_header); first = 0; } return 0; } static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_revoke_clean(sdp); return; } if (pass != 1) return; fs_info(sdp, "jid=%u: Found %u revoke tags\n", jd->jd_jid, sdp->sd_found_revokes); gfs2_revoke_clean(sdp); } /** * databuf_lo_before_commit - Scan the data buffers, writing as we go * */ static void databuf_lo_before_commit(struct gfs2_sbd *sdp) { unsigned int limit = buf_limit(sdp) / 2; gfs2_before_commit(sdp, limit, sdp->sd_log_num_databuf, &sdp->sd_log_le_databuf, 1); } static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start, struct gfs2_log_descriptor *ld, __be64 *ptr, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); struct gfs2_glock *gl = ip->i_gl; unsigned int blks = be32_to_cpu(ld->ld_data1); struct buffer_head *bh_log, *bh_ip; u64 blkno; u64 esc; int error = 0; if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA) return 0; gfs2_replay_incr_blk(sdp, &start); for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) { blkno = be64_to_cpu(*ptr++); esc = be64_to_cpu(*ptr++); sdp->sd_found_blocks++; if (gfs2_revoke_check(sdp, blkno, start)) continue; error = gfs2_replay_read_block(jd, start, &bh_log); if (error) return error; bh_ip = gfs2_meta_new(gl, blkno); memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size); /* Unescape */ if (esc) { __be32 *eptr = (__be32 *)bh_ip->b_data; *eptr = cpu_to_be32(GFS2_MAGIC); } mark_buffer_dirty(bh_ip); brelse(bh_log); brelse(bh_ip); sdp->sd_replayed_blocks++; } return error; } /* FIXME: sort out accounting for log blocks etc. */ static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass) { struct gfs2_inode *ip = GFS2_I(jd->jd_inode); struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode); if (error) { gfs2_meta_sync(ip->i_gl); return; } if (pass != 1) return; /* data sync? */ gfs2_meta_sync(ip->i_gl); fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n", jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks); } static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr) { struct list_head *head = &sdp->sd_log_le_databuf; struct gfs2_bufdata *bd; if (tr == NULL) { gfs2_assert(sdp, list_empty(head)); return; } while (!list_empty(head)) { bd = list_entry(head->next, struct gfs2_bufdata, bd_list); list_del_init(&bd->bd_list); sdp->sd_log_num_databuf--; gfs2_unpin(sdp, bd->bd_bh, tr); } gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf); } const struct gfs2_log_operations gfs2_buf_lops = { .lo_before_commit = buf_lo_before_commit, .lo_after_commit = buf_lo_after_commit, .lo_before_scan = buf_lo_before_scan, .lo_scan_elements = buf_lo_scan_elements, .lo_after_scan = buf_lo_after_scan, .lo_name = "buf", }; const struct gfs2_log_operations gfs2_revoke_lops = { .lo_before_commit = revoke_lo_before_commit, .lo_after_commit = revoke_lo_after_commit, .lo_before_scan = revoke_lo_before_scan, .lo_scan_elements = revoke_lo_scan_elements, .lo_after_scan = revoke_lo_after_scan, .lo_name = "revoke", }; const struct gfs2_log_operations gfs2_rg_lops = { .lo_name = "rg", }; const struct gfs2_log_operations gfs2_databuf_lops = { .lo_before_commit = databuf_lo_before_commit, .lo_after_commit = databuf_lo_after_commit, .lo_scan_elements = databuf_lo_scan_elements, .lo_after_scan = databuf_lo_after_scan, .lo_name = "databuf", }; const struct gfs2_log_operations *gfs2_log_ops[] = { &gfs2_databuf_lops, &gfs2_buf_lops, &gfs2_rg_lops, &gfs2_revoke_lops, NULL, };
gpl-2.0
Dopi/HuaweiAscendG600_kernel
arch/arm/mach-realview/realview_pba8.c
2321
8939
/* * linux/arch/arm/mach-realview/realview_pba8.c * * Copyright (C) 2008 ARM Limited * Copyright (C) 2000 Deep Blue Solutions Ltd * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/sysdev.h> #include <linux/amba/bus.h> #include <linux/amba/pl061.h> #include <linux/amba/mmci.h> #include <linux/amba/pl022.h> #include <linux/io.h> #include <asm/irq.h> #include <asm/leds.h> #include <asm/mach-types.h> #include <asm/pmu.h> #include <asm/pgtable.h> #include <asm/hardware/gic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/time.h> #include <mach/hardware.h> #include <mach/board-pba8.h> #include <mach/irqs.h> #include "core.h" static struct map_desc realview_pba8_io_desc[] __initdata = { { .virtual = IO_ADDRESS(REALVIEW_SYS_BASE), .pfn = __phys_to_pfn(REALVIEW_SYS_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PBA8_GIC_CPU_BASE), .pfn = __phys_to_pfn(REALVIEW_PBA8_GIC_CPU_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PBA8_GIC_DIST_BASE), .pfn = __phys_to_pfn(REALVIEW_PBA8_GIC_DIST_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_SCTL_BASE), .pfn = __phys_to_pfn(REALVIEW_SCTL_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PBA8_TIMER0_1_BASE), .pfn = __phys_to_pfn(REALVIEW_PBA8_TIMER0_1_BASE), .length = SZ_4K, .type = MT_DEVICE, }, { .virtual = IO_ADDRESS(REALVIEW_PBA8_TIMER2_3_BASE), .pfn = __phys_to_pfn(REALVIEW_PBA8_TIMER2_3_BASE), .length = SZ_4K, .type = MT_DEVICE, }, #ifdef CONFIG_PCI { .virtual = PCIX_UNIT_BASE, .pfn = __phys_to_pfn(REALVIEW_PBA8_PCI_BASE), .length = REALVIEW_PBA8_PCI_BASE_SIZE, .type = MT_DEVICE }, #endif #ifdef CONFIG_DEBUG_LL { .virtual = IO_ADDRESS(REALVIEW_PBA8_UART0_BASE), .pfn = __phys_to_pfn(REALVIEW_PBA8_UART0_BASE), .length = SZ_4K, .type = MT_DEVICE, }, #endif }; static void __init realview_pba8_map_io(void) { iotable_init(realview_pba8_io_desc, ARRAY_SIZE(realview_pba8_io_desc)); } static struct pl061_platform_data gpio0_plat_data = { .gpio_base = 0, .irq_base = -1, }; static struct pl061_platform_data gpio1_plat_data = { .gpio_base = 8, .irq_base = -1, }; static struct pl061_platform_data gpio2_plat_data = { .gpio_base = 16, .irq_base = -1, }; static struct pl022_ssp_controller ssp0_plat_data = { .bus_id = 0, .enable_dma = 0, .num_chipselect = 1, }; /* * RealView PBA8Core AMBA devices */ #define GPIO2_IRQ { IRQ_PBA8_GPIO2, NO_IRQ } #define GPIO3_IRQ { IRQ_PBA8_GPIO3, NO_IRQ } #define AACI_IRQ { IRQ_PBA8_AACI, NO_IRQ } #define MMCI0_IRQ { IRQ_PBA8_MMCI0A, IRQ_PBA8_MMCI0B } #define KMI0_IRQ { IRQ_PBA8_KMI0, NO_IRQ } #define KMI1_IRQ { IRQ_PBA8_KMI1, NO_IRQ } #define PBA8_SMC_IRQ { NO_IRQ, NO_IRQ } #define MPMC_IRQ { NO_IRQ, NO_IRQ } #define PBA8_CLCD_IRQ { IRQ_PBA8_CLCD, NO_IRQ } #define DMAC_IRQ { IRQ_PBA8_DMAC, NO_IRQ } #define SCTL_IRQ { NO_IRQ, NO_IRQ } #define PBA8_WATCHDOG_IRQ { IRQ_PBA8_WATCHDOG, NO_IRQ } #define PBA8_GPIO0_IRQ { IRQ_PBA8_GPIO0, NO_IRQ } #define GPIO1_IRQ { IRQ_PBA8_GPIO1, NO_IRQ } #define PBA8_RTC_IRQ { IRQ_PBA8_RTC, NO_IRQ } #define SCI_IRQ { IRQ_PBA8_SCI, NO_IRQ } #define PBA8_UART0_IRQ { IRQ_PBA8_UART0, NO_IRQ } #define PBA8_UART1_IRQ { IRQ_PBA8_UART1, NO_IRQ } #define PBA8_UART2_IRQ { IRQ_PBA8_UART2, NO_IRQ } #define PBA8_UART3_IRQ { IRQ_PBA8_UART3, NO_IRQ } #define PBA8_SSP_IRQ { IRQ_PBA8_SSP, NO_IRQ } /* FPGA Primecells */ AMBA_DEVICE(aaci, "fpga:aaci", AACI, NULL); AMBA_DEVICE(mmc0, "fpga:mmc0", MMCI0, &realview_mmc0_plat_data); AMBA_DEVICE(kmi0, "fpga:kmi0", KMI0, NULL); AMBA_DEVICE(kmi1, "fpga:kmi1", KMI1, NULL); AMBA_DEVICE(uart3, "fpga:uart3", PBA8_UART3, NULL); /* DevChip Primecells */ AMBA_DEVICE(smc, "dev:smc", PBA8_SMC, NULL); AMBA_DEVICE(sctl, "dev:sctl", SCTL, NULL); AMBA_DEVICE(wdog, "dev:wdog", PBA8_WATCHDOG, NULL); AMBA_DEVICE(gpio0, "dev:gpio0", PBA8_GPIO0, &gpio0_plat_data); AMBA_DEVICE(gpio1, "dev:gpio1", GPIO1, &gpio1_plat_data); AMBA_DEVICE(gpio2, "dev:gpio2", GPIO2, &gpio2_plat_data); AMBA_DEVICE(rtc, "dev:rtc", PBA8_RTC, NULL); AMBA_DEVICE(sci0, "dev:sci0", SCI, NULL); AMBA_DEVICE(uart0, "dev:uart0", PBA8_UART0, NULL); AMBA_DEVICE(uart1, "dev:uart1", PBA8_UART1, NULL); AMBA_DEVICE(uart2, "dev:uart2", PBA8_UART2, NULL); AMBA_DEVICE(ssp0, "dev:ssp0", PBA8_SSP, &ssp0_plat_data); /* Primecells on the NEC ISSP chip */ AMBA_DEVICE(clcd, "issp:clcd", PBA8_CLCD, &clcd_plat_data); AMBA_DEVICE(dmac, "issp:dmac", DMAC, NULL); static struct amba_device *amba_devs[] __initdata = { &dmac_device, &uart0_device, &uart1_device, &uart2_device, &uart3_device, &smc_device, &clcd_device, &sctl_device, &wdog_device, &gpio0_device, &gpio1_device, &gpio2_device, &rtc_device, &sci0_device, &ssp0_device, &aaci_device, &mmc0_device, &kmi0_device, &kmi1_device, }; /* * RealView PB-A8 platform devices */ static struct resource realview_pba8_flash_resource[] = { [0] = { .start = REALVIEW_PBA8_FLASH0_BASE, .end = REALVIEW_PBA8_FLASH0_BASE + REALVIEW_PBA8_FLASH0_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = REALVIEW_PBA8_FLASH1_BASE, .end = REALVIEW_PBA8_FLASH1_BASE + REALVIEW_PBA8_FLASH1_SIZE - 1, .flags = IORESOURCE_MEM, }, }; static struct resource realview_pba8_smsc911x_resources[] = { [0] = { .start = REALVIEW_PBA8_ETH_BASE, .end = REALVIEW_PBA8_ETH_BASE + SZ_64K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PBA8_ETH, .end = IRQ_PBA8_ETH, .flags = IORESOURCE_IRQ, }, }; static struct resource realview_pba8_isp1761_resources[] = { [0] = { .start = REALVIEW_PBA8_USB_BASE, .end = REALVIEW_PBA8_USB_BASE + SZ_128K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PBA8_USB, .end = IRQ_PBA8_USB, .flags = IORESOURCE_IRQ, }, }; static struct resource pmu_resource = { .start = IRQ_PBA8_PMU, .end = IRQ_PBA8_PMU, .flags = IORESOURCE_IRQ, }; static struct platform_device pmu_device = { .name = "arm-pmu", .id = ARM_PMU_DEVICE_CPU, .num_resources = 1, .resource = &pmu_resource, }; static void __init gic_init_irq(void) { /* ARM PB-A8 on-board GIC */ gic_init(0, IRQ_PBA8_GIC_START, __io_address(REALVIEW_PBA8_GIC_DIST_BASE), __io_address(REALVIEW_PBA8_GIC_CPU_BASE)); } static void __init realview_pba8_timer_init(void) { timer0_va_base = __io_address(REALVIEW_PBA8_TIMER0_1_BASE); timer1_va_base = __io_address(REALVIEW_PBA8_TIMER0_1_BASE) + 0x20; timer2_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE); timer3_va_base = __io_address(REALVIEW_PBA8_TIMER2_3_BASE) + 0x20; realview_timer_init(IRQ_PBA8_TIMER0_1); } static struct sys_timer realview_pba8_timer = { .init = realview_pba8_timer_init, }; static void realview_pba8_reset(char mode) { void __iomem *reset_ctrl = __io_address(REALVIEW_SYS_RESETCTL); void __iomem *lock_ctrl = __io_address(REALVIEW_SYS_LOCK); /* * To reset, we hit the on-board reset register * in the system FPGA */ __raw_writel(REALVIEW_SYS_LOCK_VAL, lock_ctrl); __raw_writel(0x0000, reset_ctrl); __raw_writel(0x0004, reset_ctrl); } static void __init realview_pba8_init(void) { int i; realview_flash_register(realview_pba8_flash_resource, ARRAY_SIZE(realview_pba8_flash_resource)); realview_eth_register(NULL, realview_pba8_smsc911x_resources); platform_device_register(&realview_i2c_device); platform_device_register(&realview_cf_device); realview_usb_register(realview_pba8_isp1761_resources); platform_device_register(&pmu_device); for (i = 0; i < ARRAY_SIZE(amba_devs); i++) { struct amba_device *d = amba_devs[i]; amba_device_register(d, &iomem_resource); } #ifdef CONFIG_LEDS leds_event = realview_leds_event; #endif realview_reset = realview_pba8_reset; } MACHINE_START(REALVIEW_PBA8, "ARM-RealView PB-A8") /* Maintainer: ARM Ltd/Deep Blue Solutions Ltd */ .boot_params = PLAT_PHYS_OFFSET + 0x00000100, .fixup = realview_fixup, .map_io = realview_pba8_map_io, .init_early = realview_init_early, .init_irq = gic_init_irq, .timer = &realview_pba8_timer, .init_machine = realview_pba8_init, MACHINE_END
gpl-2.0
holyangel/HTC_M8_GPE-4.4.3
drivers/platform/x86/intel_ips.c
2833
44996
/* * Copyright (c) 2009-2010 Intel Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * * The full GNU General Public License is included in this distribution in * the file called "COPYING". * * Authors: * Jesse Barnes <jbarnes@virtuousgeek.org> */ /* * Some Intel Ibex Peak based platforms support so-called "intelligent * power sharing", which allows the CPU and GPU to cooperate to maximize * performance within a given TDP (thermal design point). This driver * performs the coordination between the CPU and GPU, monitors thermal and * power statistics in the platform, and initializes power monitoring * hardware. It also provides a few tunables to control behavior. Its * primary purpose is to safely allow CPU and GPU turbo modes to be enabled * by tracking power and thermal budget; secondarily it can boost turbo * performance by allocating more power or thermal budget to the CPU or GPU * based on available headroom and activity. * * The basic algorithm is driven by a 5s moving average of tempurature. If * thermal headroom is available, the CPU and/or GPU power clamps may be * adjusted upwards. If we hit the thermal ceiling or a thermal trigger, * we scale back the clamp. Aside from trigger events (when we're critically * close or over our TDP) we don't adjust the clamps more than once every * five seconds. * * The thermal device (device 31, function 6) has a set of registers that * are updated by the ME firmware. The ME should also take the clamp values * written to those registers and write them to the CPU, but we currently * bypass that functionality and write the CPU MSR directly. * * UNSUPPORTED: * - dual MCP configs * * TODO: * - handle CPU hotplug * - provide turbo enable/disable api * * Related documents: * - CDI 403777, 403778 - Auburndale EDS vol 1 & 2 * - CDI 401376 - Ibex Peak EDS * - ref 26037, 26641 - IPS BIOS spec * - ref 26489 - Nehalem BIOS writer's guide * - ref 26921 - Ibex Peak BIOS Specification */ #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/tick.h> #include <linux/timer.h> #include <drm/i915_drm.h> #include <asm/msr.h> #include <asm/processor.h> #include "intel_ips.h" #include <asm-generic/io-64-nonatomic-lo-hi.h> #define PCI_DEVICE_ID_INTEL_THERMAL_SENSOR 0x3b32 /* * Package level MSRs for monitor/control */ #define PLATFORM_INFO 0xce #define PLATFORM_TDP (1<<29) #define PLATFORM_RATIO (1<<28) #define IA32_MISC_ENABLE 0x1a0 #define IA32_MISC_TURBO_EN (1ULL<<38) #define TURBO_POWER_CURRENT_LIMIT 0x1ac #define TURBO_TDC_OVR_EN (1UL<<31) #define TURBO_TDC_MASK (0x000000007fff0000UL) #define TURBO_TDC_SHIFT (16) #define TURBO_TDP_OVR_EN (1UL<<15) #define TURBO_TDP_MASK (0x0000000000003fffUL) /* * Core/thread MSRs for monitoring */ #define IA32_PERF_CTL 0x199 #define IA32_PERF_TURBO_DIS (1ULL<<32) /* * Thermal PCI device regs */ #define THM_CFG_TBAR 0x10 #define THM_CFG_TBAR_HI 0x14 #define THM_TSIU 0x00 #define THM_TSE 0x01 #define TSE_EN 0xb8 #define THM_TSS 0x02 #define THM_TSTR 0x03 #define THM_TSTTP 0x04 #define THM_TSCO 0x08 #define THM_TSES 0x0c #define THM_TSGPEN 0x0d #define TSGPEN_HOT_LOHI (1<<1) #define TSGPEN_CRIT_LOHI (1<<2) #define THM_TSPC 0x0e #define THM_PPEC 0x10 #define THM_CTA 0x12 #define THM_PTA 0x14 #define PTA_SLOPE_MASK (0xff00) #define PTA_SLOPE_SHIFT 8 #define PTA_OFFSET_MASK (0x00ff) #define THM_MGTA 0x16 #define MGTA_SLOPE_MASK (0xff00) #define MGTA_SLOPE_SHIFT 8 #define MGTA_OFFSET_MASK (0x00ff) #define THM_TRC 0x1a #define TRC_CORE2_EN (1<<15) #define TRC_THM_EN (1<<12) #define TRC_C6_WAR (1<<8) #define TRC_CORE1_EN (1<<7) #define TRC_CORE_PWR (1<<6) #define TRC_PCH_EN (1<<5) #define TRC_MCH_EN (1<<4) #define TRC_DIMM4 (1<<3) #define TRC_DIMM3 (1<<2) #define TRC_DIMM2 (1<<1) #define TRC_DIMM1 (1<<0) #define THM_TES 0x20 #define THM_TEN 0x21 #define TEN_UPDATE_EN 1 #define THM_PSC 0x24 #define PSC_NTG (1<<0) /* No GFX turbo support */ #define PSC_NTPC (1<<1) /* No CPU turbo support */ #define PSC_PP_DEF (0<<2) /* Perf policy up to driver */ #define PSP_PP_PC (1<<2) /* BIOS prefers CPU perf */ #define PSP_PP_BAL (2<<2) /* BIOS wants balanced perf */ #define PSP_PP_GFX (3<<2) /* BIOS prefers GFX perf */ #define PSP_PBRT (1<<4) /* BIOS run time support */ #define THM_CTV1 0x30 #define CTV_TEMP_ERROR (1<<15) #define CTV_TEMP_MASK 0x3f #define CTV_ #define THM_CTV2 0x32 #define THM_CEC 0x34 /* undocumented power accumulator in joules */ #define THM_AE 0x3f #define THM_HTS 0x50 /* 32 bits */ #define HTS_PCPL_MASK (0x7fe00000) #define HTS_PCPL_SHIFT 21 #define HTS_GPL_MASK (0x001ff000) #define HTS_GPL_SHIFT 12 #define HTS_PP_MASK (0x00000c00) #define HTS_PP_SHIFT 10 #define HTS_PP_DEF 0 #define HTS_PP_PROC 1 #define HTS_PP_BAL 2 #define HTS_PP_GFX 3 #define HTS_PCTD_DIS (1<<9) #define HTS_GTD_DIS (1<<8) #define HTS_PTL_MASK (0x000000fe) #define HTS_PTL_SHIFT 1 #define HTS_NVV (1<<0) #define THM_HTSHI 0x54 /* 16 bits */ #define HTS2_PPL_MASK (0x03ff) #define HTS2_PRST_MASK (0x3c00) #define HTS2_PRST_SHIFT 10 #define HTS2_PRST_UNLOADED 0 #define HTS2_PRST_RUNNING 1 #define HTS2_PRST_TDISOP 2 /* turbo disabled due to power */ #define HTS2_PRST_TDISHT 3 /* turbo disabled due to high temp */ #define HTS2_PRST_TDISUSR 4 /* user disabled turbo */ #define HTS2_PRST_TDISPLAT 5 /* platform disabled turbo */ #define HTS2_PRST_TDISPM 6 /* power management disabled turbo */ #define HTS2_PRST_TDISERR 7 /* some kind of error disabled turbo */ #define THM_PTL 0x56 #define THM_MGTV 0x58 #define TV_MASK 0x000000000000ff00 #define TV_SHIFT 8 #define THM_PTV 0x60 #define PTV_MASK 0x00ff #define THM_MMGPC 0x64 #define THM_MPPC 0x66 #define THM_MPCPC 0x68 #define THM_TSPIEN 0x82 #define TSPIEN_AUX_LOHI (1<<0) #define TSPIEN_HOT_LOHI (1<<1) #define TSPIEN_CRIT_LOHI (1<<2) #define TSPIEN_AUX2_LOHI (1<<3) #define THM_TSLOCK 0x83 #define THM_ATR 0x84 #define THM_TOF 0x87 #define THM_STS 0x98 #define STS_PCPL_MASK (0x7fe00000) #define STS_PCPL_SHIFT 21 #define STS_GPL_MASK (0x001ff000) #define STS_GPL_SHIFT 12 #define STS_PP_MASK (0x00000c00) #define STS_PP_SHIFT 10 #define STS_PP_DEF 0 #define STS_PP_PROC 1 #define STS_PP_BAL 2 #define STS_PP_GFX 3 #define STS_PCTD_DIS (1<<9) #define STS_GTD_DIS (1<<8) #define STS_PTL_MASK (0x000000fe) #define STS_PTL_SHIFT 1 #define STS_NVV (1<<0) #define THM_SEC 0x9c #define SEC_ACK (1<<0) #define THM_TC3 0xa4 #define THM_TC1 0xa8 #define STS_PPL_MASK (0x0003ff00) #define STS_PPL_SHIFT 16 #define THM_TC2 0xac #define THM_DTV 0xb0 #define THM_ITV 0xd8 #define ITV_ME_SEQNO_MASK 0x00ff0000 /* ME should update every ~200ms */ #define ITV_ME_SEQNO_SHIFT (16) #define ITV_MCH_TEMP_MASK 0x0000ff00 #define ITV_MCH_TEMP_SHIFT (8) #define ITV_PCH_TEMP_MASK 0x000000ff #define thm_readb(off) readb(ips->regmap + (off)) #define thm_readw(off) readw(ips->regmap + (off)) #define thm_readl(off) readl(ips->regmap + (off)) #define thm_readq(off) readq(ips->regmap + (off)) #define thm_writeb(off, val) writeb((val), ips->regmap + (off)) #define thm_writew(off, val) writew((val), ips->regmap + (off)) #define thm_writel(off, val) writel((val), ips->regmap + (off)) static const int IPS_ADJUST_PERIOD = 5000; /* ms */ static bool late_i915_load = false; /* For initial average collection */ static const int IPS_SAMPLE_PERIOD = 200; /* ms */ static const int IPS_SAMPLE_WINDOW = 5000; /* 5s moving window of samples */ #define IPS_SAMPLE_COUNT (IPS_SAMPLE_WINDOW / IPS_SAMPLE_PERIOD) /* Per-SKU limits */ struct ips_mcp_limits { int cpu_family; int cpu_model; /* includes extended model... */ int mcp_power_limit; /* mW units */ int core_power_limit; int mch_power_limit; int core_temp_limit; /* degrees C */ int mch_temp_limit; }; /* Max temps are -10 degrees C to avoid PROCHOT# */ struct ips_mcp_limits ips_sv_limits = { .mcp_power_limit = 35000, .core_power_limit = 29000, .mch_power_limit = 20000, .core_temp_limit = 95, .mch_temp_limit = 90 }; struct ips_mcp_limits ips_lv_limits = { .mcp_power_limit = 25000, .core_power_limit = 21000, .mch_power_limit = 13000, .core_temp_limit = 95, .mch_temp_limit = 90 }; struct ips_mcp_limits ips_ulv_limits = { .mcp_power_limit = 18000, .core_power_limit = 14000, .mch_power_limit = 11000, .core_temp_limit = 95, .mch_temp_limit = 90 }; struct ips_driver { struct pci_dev *dev; void *regmap; struct task_struct *monitor; struct task_struct *adjust; struct dentry *debug_root; /* Average CPU core temps (all averages in .01 degrees C for precision) */ u16 ctv1_avg_temp; u16 ctv2_avg_temp; /* GMCH average */ u16 mch_avg_temp; /* Average for the CPU (both cores?) */ u16 mcp_avg_temp; /* Average power consumption (in mW) */ u32 cpu_avg_power; u32 mch_avg_power; /* Offset values */ u16 cta_val; u16 pta_val; u16 mgta_val; /* Maximums & prefs, protected by turbo status lock */ spinlock_t turbo_status_lock; u16 mcp_temp_limit; u16 mcp_power_limit; u16 core_power_limit; u16 mch_power_limit; bool cpu_turbo_enabled; bool __cpu_turbo_on; bool gpu_turbo_enabled; bool __gpu_turbo_on; bool gpu_preferred; bool poll_turbo_status; bool second_cpu; bool turbo_toggle_allowed; struct ips_mcp_limits *limits; /* Optional MCH interfaces for if i915 is in use */ unsigned long (*read_mch_val)(void); bool (*gpu_raise)(void); bool (*gpu_lower)(void); bool (*gpu_busy)(void); bool (*gpu_turbo_disable)(void); /* For restoration at unload */ u64 orig_turbo_limit; u64 orig_turbo_ratios; }; static bool ips_gpu_turbo_enabled(struct ips_driver *ips); /** * ips_cpu_busy - is CPU busy? * @ips: IPS driver struct * * Check CPU for load to see whether we should increase its thermal budget. * * RETURNS: * True if the CPU could use more power, false otherwise. */ static bool ips_cpu_busy(struct ips_driver *ips) { if ((avenrun[0] >> FSHIFT) > 1) return true; return false; } /** * ips_cpu_raise - raise CPU power clamp * @ips: IPS driver struct * * Raise the CPU power clamp by %IPS_CPU_STEP, in accordance with TDP for * this platform. * * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR upwards (as * long as we haven't hit the TDP limit for the SKU). */ static void ips_cpu_raise(struct ips_driver *ips) { u64 turbo_override; u16 cur_tdp_limit, new_tdp_limit; if (!ips->cpu_turbo_enabled) return; rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_tdp_limit = turbo_override & TURBO_TDP_MASK; new_tdp_limit = cur_tdp_limit + 8; /* 1W increase */ /* Clamp to SKU TDP limit */ if (((new_tdp_limit * 10) / 8) > ips->core_power_limit) new_tdp_limit = cur_tdp_limit; thm_writew(THM_MPCPC, (new_tdp_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_tdp_limit; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** * ips_cpu_lower - lower CPU power clamp * @ips: IPS driver struct * * Lower CPU power clamp b %IPS_CPU_STEP if possible. * * We do this by adjusting the TURBO_POWER_CURRENT_LIMIT MSR down, going * as low as the platform limits will allow (though we could go lower there * wouldn't be much point). */ static void ips_cpu_lower(struct ips_driver *ips) { u64 turbo_override; u16 cur_limit, new_limit; rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); cur_limit = turbo_override & TURBO_TDP_MASK; new_limit = cur_limit - 8; /* 1W decrease */ /* Clamp to SKU TDP limit */ if (new_limit < (ips->orig_turbo_limit & TURBO_TDP_MASK)) new_limit = ips->orig_turbo_limit & TURBO_TDP_MASK; thm_writew(THM_MPCPC, (new_limit * 10) / 8); turbo_override |= TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~TURBO_TDP_MASK; turbo_override |= new_limit; wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); } /** * do_enable_cpu_turbo - internal turbo enable function * @data: unused * * Internal function for actually updating MSRs. When we enable/disable * turbo, we need to do it on each CPU; this function is the one called * by on_each_cpu() when needed. */ static void do_enable_cpu_turbo(void *data) { u64 perf_ctl; rdmsrl(IA32_PERF_CTL, perf_ctl); if (perf_ctl & IA32_PERF_TURBO_DIS) { perf_ctl &= ~IA32_PERF_TURBO_DIS; wrmsrl(IA32_PERF_CTL, perf_ctl); } } /** * ips_enable_cpu_turbo - enable turbo mode on all CPUs * @ips: IPS driver struct * * Enable turbo mode by clearing the disable bit in IA32_PERF_CTL on * all logical threads. */ static void ips_enable_cpu_turbo(struct ips_driver *ips) { /* Already on, no need to mess with MSRs */ if (ips->__cpu_turbo_on) return; if (ips->turbo_toggle_allowed) on_each_cpu(do_enable_cpu_turbo, ips, 1); ips->__cpu_turbo_on = true; } /** * do_disable_cpu_turbo - internal turbo disable function * @data: unused * * Internal function for actually updating MSRs. When we enable/disable * turbo, we need to do it on each CPU; this function is the one called * by on_each_cpu() when needed. */ static void do_disable_cpu_turbo(void *data) { u64 perf_ctl; rdmsrl(IA32_PERF_CTL, perf_ctl); if (!(perf_ctl & IA32_PERF_TURBO_DIS)) { perf_ctl |= IA32_PERF_TURBO_DIS; wrmsrl(IA32_PERF_CTL, perf_ctl); } } /** * ips_disable_cpu_turbo - disable turbo mode on all CPUs * @ips: IPS driver struct * * Disable turbo mode by setting the disable bit in IA32_PERF_CTL on * all logical threads. */ static void ips_disable_cpu_turbo(struct ips_driver *ips) { /* Already off, leave it */ if (!ips->__cpu_turbo_on) return; if (ips->turbo_toggle_allowed) on_each_cpu(do_disable_cpu_turbo, ips, 1); ips->__cpu_turbo_on = false; } /** * ips_gpu_busy - is GPU busy? * @ips: IPS driver struct * * Check GPU for load to see whether we should increase its thermal budget. * We need to call into the i915 driver in this case. * * RETURNS: * True if the GPU could use more power, false otherwise. */ static bool ips_gpu_busy(struct ips_driver *ips) { if (!ips_gpu_turbo_enabled(ips)) return false; return ips->gpu_busy(); } /** * ips_gpu_raise - raise GPU power clamp * @ips: IPS driver struct * * Raise the GPU frequency/power if possible. We need to call into the * i915 driver in this case. */ static void ips_gpu_raise(struct ips_driver *ips) { if (!ips_gpu_turbo_enabled(ips)) return; if (!ips->gpu_raise()) ips->gpu_turbo_enabled = false; return; } /** * ips_gpu_lower - lower GPU power clamp * @ips: IPS driver struct * * Lower GPU frequency/power if possible. Need to call i915. */ static void ips_gpu_lower(struct ips_driver *ips) { if (!ips_gpu_turbo_enabled(ips)) return; if (!ips->gpu_lower()) ips->gpu_turbo_enabled = false; return; } /** * ips_enable_gpu_turbo - notify the gfx driver turbo is available * @ips: IPS driver struct * * Call into the graphics driver indicating that it can safely use * turbo mode. */ static void ips_enable_gpu_turbo(struct ips_driver *ips) { if (ips->__gpu_turbo_on) return; ips->__gpu_turbo_on = true; } /** * ips_disable_gpu_turbo - notify the gfx driver to disable turbo mode * @ips: IPS driver struct * * Request that the graphics driver disable turbo mode. */ static void ips_disable_gpu_turbo(struct ips_driver *ips) { /* Avoid calling i915 if turbo is already disabled */ if (!ips->__gpu_turbo_on) return; if (!ips->gpu_turbo_disable()) dev_err(&ips->dev->dev, "failed to disable graphis turbo\n"); else ips->__gpu_turbo_on = false; } /** * mcp_exceeded - check whether we're outside our thermal & power limits * @ips: IPS driver struct * * Check whether the MCP is over its thermal or power budget. */ static bool mcp_exceeded(struct ips_driver *ips) { unsigned long flags; bool ret = false; u32 temp_limit; u32 avg_power; spin_lock_irqsave(&ips->turbo_status_lock, flags); temp_limit = ips->mcp_temp_limit * 100; if (ips->mcp_avg_temp > temp_limit) ret = true; avg_power = ips->cpu_avg_power + ips->mch_avg_power; if (avg_power > ips->mcp_power_limit) ret = true; spin_unlock_irqrestore(&ips->turbo_status_lock, flags); return ret; } /** * cpu_exceeded - check whether a CPU core is outside its limits * @ips: IPS driver struct * @cpu: CPU number to check * * Check a given CPU's average temp or power is over its limit. */ static bool cpu_exceeded(struct ips_driver *ips, int cpu) { unsigned long flags; int avg; bool ret = false; spin_lock_irqsave(&ips->turbo_status_lock, flags); avg = cpu ? ips->ctv2_avg_temp : ips->ctv1_avg_temp; if (avg > (ips->limits->core_temp_limit * 100)) ret = true; if (ips->cpu_avg_power > ips->core_power_limit * 100) ret = true; spin_unlock_irqrestore(&ips->turbo_status_lock, flags); if (ret) dev_info(&ips->dev->dev, "CPU power or thermal limit exceeded\n"); return ret; } /** * mch_exceeded - check whether the GPU is over budget * @ips: IPS driver struct * * Check the MCH temp & power against their maximums. */ static bool mch_exceeded(struct ips_driver *ips) { unsigned long flags; bool ret = false; spin_lock_irqsave(&ips->turbo_status_lock, flags); if (ips->mch_avg_temp > (ips->limits->mch_temp_limit * 100)) ret = true; if (ips->mch_avg_power > ips->mch_power_limit) ret = true; spin_unlock_irqrestore(&ips->turbo_status_lock, flags); return ret; } /** * verify_limits - verify BIOS provided limits * @ips: IPS structure * * BIOS can optionally provide non-default limits for power and temp. Check * them here and use the defaults if the BIOS values are not provided or * are otherwise unusable. */ static void verify_limits(struct ips_driver *ips) { if (ips->mcp_power_limit < ips->limits->mcp_power_limit || ips->mcp_power_limit > 35000) ips->mcp_power_limit = ips->limits->mcp_power_limit; if (ips->mcp_temp_limit < ips->limits->core_temp_limit || ips->mcp_temp_limit < ips->limits->mch_temp_limit || ips->mcp_temp_limit > 150) ips->mcp_temp_limit = min(ips->limits->core_temp_limit, ips->limits->mch_temp_limit); } /** * update_turbo_limits - get various limits & settings from regs * @ips: IPS driver struct * * Update the IPS power & temp limits, along with turbo enable flags, * based on latest register contents. * * Used at init time and for runtime BIOS support, which requires polling * the regs for updates (as a result of AC->DC transition for example). * * LOCKING: * Caller must hold turbo_status_lock (outside of init) */ static void update_turbo_limits(struct ips_driver *ips) { u32 hts = thm_readl(THM_HTS); ips->cpu_turbo_enabled = !(hts & HTS_PCTD_DIS); /* * Disable turbo for now, until we can figure out why the power figures * are wrong */ ips->cpu_turbo_enabled = false; if (ips->gpu_busy) ips->gpu_turbo_enabled = !(hts & HTS_GTD_DIS); ips->core_power_limit = thm_readw(THM_MPCPC); ips->mch_power_limit = thm_readw(THM_MMGPC); ips->mcp_temp_limit = thm_readw(THM_PTL); ips->mcp_power_limit = thm_readw(THM_MPPC); verify_limits(ips); /* Ignore BIOS CPU vs GPU pref */ } /** * ips_adjust - adjust power clamp based on thermal state * @data: ips driver structure * * Wake up every 5s or so and check whether we should adjust the power clamp. * Check CPU and GPU load to determine which needs adjustment. There are * several things to consider here: * - do we need to adjust up or down? * - is CPU busy? * - is GPU busy? * - is CPU in turbo? * - is GPU in turbo? * - is CPU or GPU preferred? (CPU is default) * * So, given the above, we do the following: * - up (TDP available) * - CPU not busy, GPU not busy - nothing * - CPU busy, GPU not busy - adjust CPU up * - CPU not busy, GPU busy - adjust GPU up * - CPU busy, GPU busy - adjust preferred unit up, taking headroom from * non-preferred unit if necessary * - down (at TDP limit) * - adjust both CPU and GPU down if possible * cpu+ gpu+ cpu+gpu- cpu-gpu+ cpu-gpu- cpu < gpu < cpu+gpu+ cpu+ gpu+ nothing cpu < gpu >= cpu+gpu-(mcp<) cpu+gpu-(mcp<) gpu- gpu- cpu >= gpu < cpu-gpu+(mcp<) cpu- cpu-gpu+(mcp<) cpu- cpu >= gpu >= cpu-gpu- cpu-gpu- cpu-gpu- cpu-gpu- * */ static int ips_adjust(void *data) { struct ips_driver *ips = data; unsigned long flags; dev_dbg(&ips->dev->dev, "starting ips-adjust thread\n"); /* * Adjust CPU and GPU clamps every 5s if needed. Doing it more * often isn't recommended due to ME interaction. */ do { bool cpu_busy = ips_cpu_busy(ips); bool gpu_busy = ips_gpu_busy(ips); spin_lock_irqsave(&ips->turbo_status_lock, flags); if (ips->poll_turbo_status) update_turbo_limits(ips); spin_unlock_irqrestore(&ips->turbo_status_lock, flags); /* Update turbo status if necessary */ if (ips->cpu_turbo_enabled) ips_enable_cpu_turbo(ips); else ips_disable_cpu_turbo(ips); if (ips->gpu_turbo_enabled) ips_enable_gpu_turbo(ips); else ips_disable_gpu_turbo(ips); /* We're outside our comfort zone, crank them down */ if (mcp_exceeded(ips)) { ips_cpu_lower(ips); ips_gpu_lower(ips); goto sleep; } if (!cpu_exceeded(ips, 0) && cpu_busy) ips_cpu_raise(ips); else ips_cpu_lower(ips); if (!mch_exceeded(ips) && gpu_busy) ips_gpu_raise(ips); else ips_gpu_lower(ips); sleep: schedule_timeout_interruptible(msecs_to_jiffies(IPS_ADJUST_PERIOD)); } while (!kthread_should_stop()); dev_dbg(&ips->dev->dev, "ips-adjust thread stopped\n"); return 0; } /* * Helpers for reading out temp/power values and calculating their * averages for the decision making and monitoring functions. */ static u16 calc_avg_temp(struct ips_driver *ips, u16 *array) { u64 total = 0; int i; u16 avg; for (i = 0; i < IPS_SAMPLE_COUNT; i++) total += (u64)(array[i] * 100); do_div(total, IPS_SAMPLE_COUNT); avg = (u16)total; return avg; } static u16 read_mgtv(struct ips_driver *ips) { u16 ret; u64 slope, offset; u64 val; val = thm_readq(THM_MGTV); val = (val & TV_MASK) >> TV_SHIFT; slope = offset = thm_readw(THM_MGTA); slope = (slope & MGTA_SLOPE_MASK) >> MGTA_SLOPE_SHIFT; offset = offset & MGTA_OFFSET_MASK; ret = ((val * slope + 0x40) >> 7) + offset; return 0; /* MCH temp reporting buggy */ } static u16 read_ptv(struct ips_driver *ips) { u16 val, slope, offset; slope = (ips->pta_val & PTA_SLOPE_MASK) >> PTA_SLOPE_SHIFT; offset = ips->pta_val & PTA_OFFSET_MASK; val = thm_readw(THM_PTV) & PTV_MASK; return val; } static u16 read_ctv(struct ips_driver *ips, int cpu) { int reg = cpu ? THM_CTV2 : THM_CTV1; u16 val; val = thm_readw(reg); if (!(val & CTV_TEMP_ERROR)) val = (val) >> 6; /* discard fractional component */ else val = 0; return val; } static u32 get_cpu_power(struct ips_driver *ips, u32 *last, int period) { u32 val; u32 ret; /* * CEC is in joules/65535. Take difference over time to * get watts. */ val = thm_readl(THM_CEC); /* period is in ms and we want mW */ ret = (((val - *last) * 1000) / period); ret = (ret * 1000) / 65535; *last = val; return 0; } static const u16 temp_decay_factor = 2; static u16 update_average_temp(u16 avg, u16 val) { u16 ret; /* Multiply by 100 for extra precision */ ret = (val * 100 / temp_decay_factor) + (((temp_decay_factor - 1) * avg) / temp_decay_factor); return ret; } static const u16 power_decay_factor = 2; static u16 update_average_power(u32 avg, u32 val) { u32 ret; ret = (val / power_decay_factor) + (((power_decay_factor - 1) * avg) / power_decay_factor); return ret; } static u32 calc_avg_power(struct ips_driver *ips, u32 *array) { u64 total = 0; u32 avg; int i; for (i = 0; i < IPS_SAMPLE_COUNT; i++) total += array[i]; do_div(total, IPS_SAMPLE_COUNT); avg = (u32)total; return avg; } static void monitor_timeout(unsigned long arg) { wake_up_process((struct task_struct *)arg); } /** * ips_monitor - temp/power monitoring thread * @data: ips driver structure * * This is the main function for the IPS driver. It monitors power and * tempurature in the MCP and adjusts CPU and GPU power clams accordingly. * * We keep a 5s moving average of power consumption and tempurature. Using * that data, along with CPU vs GPU preference, we adjust the power clamps * up or down. */ static int ips_monitor(void *data) { struct ips_driver *ips = data; struct timer_list timer; unsigned long seqno_timestamp, expire, last_msecs, last_sample_period; int i; u32 *cpu_samples, *mchp_samples, old_cpu_power; u16 *mcp_samples, *ctv1_samples, *ctv2_samples, *mch_samples; u8 cur_seqno, last_seqno; mcp_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); ctv1_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); ctv2_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); mch_samples = kzalloc(sizeof(u16) * IPS_SAMPLE_COUNT, GFP_KERNEL); cpu_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL); mchp_samples = kzalloc(sizeof(u32) * IPS_SAMPLE_COUNT, GFP_KERNEL); if (!mcp_samples || !ctv1_samples || !ctv2_samples || !mch_samples || !cpu_samples || !mchp_samples) { dev_err(&ips->dev->dev, "failed to allocate sample array, ips disabled\n"); kfree(mcp_samples); kfree(ctv1_samples); kfree(ctv2_samples); kfree(mch_samples); kfree(cpu_samples); kfree(mchp_samples); return -ENOMEM; } last_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> ITV_ME_SEQNO_SHIFT; seqno_timestamp = get_jiffies_64(); old_cpu_power = thm_readl(THM_CEC); schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); /* Collect an initial average */ for (i = 0; i < IPS_SAMPLE_COUNT; i++) { u32 mchp, cpu_power; u16 val; mcp_samples[i] = read_ptv(ips); val = read_ctv(ips, 0); ctv1_samples[i] = val; val = read_ctv(ips, 1); ctv2_samples[i] = val; val = read_mgtv(ips); mch_samples[i] = val; cpu_power = get_cpu_power(ips, &old_cpu_power, IPS_SAMPLE_PERIOD); cpu_samples[i] = cpu_power; if (ips->read_mch_val) { mchp = ips->read_mch_val(); mchp_samples[i] = mchp; } schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); if (kthread_should_stop()) break; } ips->mcp_avg_temp = calc_avg_temp(ips, mcp_samples); ips->ctv1_avg_temp = calc_avg_temp(ips, ctv1_samples); ips->ctv2_avg_temp = calc_avg_temp(ips, ctv2_samples); ips->mch_avg_temp = calc_avg_temp(ips, mch_samples); ips->cpu_avg_power = calc_avg_power(ips, cpu_samples); ips->mch_avg_power = calc_avg_power(ips, mchp_samples); kfree(mcp_samples); kfree(ctv1_samples); kfree(ctv2_samples); kfree(mch_samples); kfree(cpu_samples); kfree(mchp_samples); /* Start the adjustment thread now that we have data */ wake_up_process(ips->adjust); /* * Ok, now we have an initial avg. From here on out, we track the * running avg using a decaying average calculation. This allows * us to reduce the sample frequency if the CPU and GPU are idle. */ old_cpu_power = thm_readl(THM_CEC); schedule_timeout_interruptible(msecs_to_jiffies(IPS_SAMPLE_PERIOD)); last_sample_period = IPS_SAMPLE_PERIOD; setup_deferrable_timer_on_stack(&timer, monitor_timeout, (unsigned long)current); do { u32 cpu_val, mch_val; u16 val; /* MCP itself */ val = read_ptv(ips); ips->mcp_avg_temp = update_average_temp(ips->mcp_avg_temp, val); /* Processor 0 */ val = read_ctv(ips, 0); ips->ctv1_avg_temp = update_average_temp(ips->ctv1_avg_temp, val); /* Power */ cpu_val = get_cpu_power(ips, &old_cpu_power, last_sample_period); ips->cpu_avg_power = update_average_power(ips->cpu_avg_power, cpu_val); if (ips->second_cpu) { /* Processor 1 */ val = read_ctv(ips, 1); ips->ctv2_avg_temp = update_average_temp(ips->ctv2_avg_temp, val); } /* MCH */ val = read_mgtv(ips); ips->mch_avg_temp = update_average_temp(ips->mch_avg_temp, val); /* Power */ if (ips->read_mch_val) { mch_val = ips->read_mch_val(); ips->mch_avg_power = update_average_power(ips->mch_avg_power, mch_val); } /* * Make sure ME is updating thermal regs. * Note: * If it's been more than a second since the last update, * the ME is probably hung. */ cur_seqno = (thm_readl(THM_ITV) & ITV_ME_SEQNO_MASK) >> ITV_ME_SEQNO_SHIFT; if (cur_seqno == last_seqno && time_after(jiffies, seqno_timestamp + HZ)) { dev_warn(&ips->dev->dev, "ME failed to update for more than 1s, likely hung\n"); } else { seqno_timestamp = get_jiffies_64(); last_seqno = cur_seqno; } last_msecs = jiffies_to_msecs(jiffies); expire = jiffies + msecs_to_jiffies(IPS_SAMPLE_PERIOD); __set_current_state(TASK_INTERRUPTIBLE); mod_timer(&timer, expire); schedule(); /* Calculate actual sample period for power averaging */ last_sample_period = jiffies_to_msecs(jiffies) - last_msecs; if (!last_sample_period) last_sample_period = 1; } while (!kthread_should_stop()); del_timer_sync(&timer); destroy_timer_on_stack(&timer); dev_dbg(&ips->dev->dev, "ips-monitor thread stopped\n"); return 0; } #if 0 #define THM_DUMPW(reg) \ { \ u16 val = thm_readw(reg); \ dev_dbg(&ips->dev->dev, #reg ": 0x%04x\n", val); \ } #define THM_DUMPL(reg) \ { \ u32 val = thm_readl(reg); \ dev_dbg(&ips->dev->dev, #reg ": 0x%08x\n", val); \ } #define THM_DUMPQ(reg) \ { \ u64 val = thm_readq(reg); \ dev_dbg(&ips->dev->dev, #reg ": 0x%016x\n", val); \ } static void dump_thermal_info(struct ips_driver *ips) { u16 ptl; ptl = thm_readw(THM_PTL); dev_dbg(&ips->dev->dev, "Processor temp limit: %d\n", ptl); THM_DUMPW(THM_CTA); THM_DUMPW(THM_TRC); THM_DUMPW(THM_CTV1); THM_DUMPL(THM_STS); THM_DUMPW(THM_PTV); THM_DUMPQ(THM_MGTV); } #endif /** * ips_irq_handler - handle temperature triggers and other IPS events * @irq: irq number * @arg: unused * * Handle temperature limit trigger events, generally by lowering the clamps. * If we're at a critical limit, we clamp back to the lowest possible value * to prevent emergency shutdown. */ static irqreturn_t ips_irq_handler(int irq, void *arg) { struct ips_driver *ips = arg; u8 tses = thm_readb(THM_TSES); u8 tes = thm_readb(THM_TES); if (!tses && !tes) return IRQ_NONE; dev_info(&ips->dev->dev, "TSES: 0x%02x\n", tses); dev_info(&ips->dev->dev, "TES: 0x%02x\n", tes); /* STS update from EC? */ if (tes & 1) { u32 sts, tc1; sts = thm_readl(THM_STS); tc1 = thm_readl(THM_TC1); if (sts & STS_NVV) { spin_lock(&ips->turbo_status_lock); ips->core_power_limit = (sts & STS_PCPL_MASK) >> STS_PCPL_SHIFT; ips->mch_power_limit = (sts & STS_GPL_MASK) >> STS_GPL_SHIFT; /* ignore EC CPU vs GPU pref */ ips->cpu_turbo_enabled = !(sts & STS_PCTD_DIS); /* * Disable turbo for now, until we can figure * out why the power figures are wrong */ ips->cpu_turbo_enabled = false; if (ips->gpu_busy) ips->gpu_turbo_enabled = !(sts & STS_GTD_DIS); ips->mcp_temp_limit = (sts & STS_PTL_MASK) >> STS_PTL_SHIFT; ips->mcp_power_limit = (tc1 & STS_PPL_MASK) >> STS_PPL_SHIFT; verify_limits(ips); spin_unlock(&ips->turbo_status_lock); thm_writeb(THM_SEC, SEC_ACK); } thm_writeb(THM_TES, tes); } /* Thermal trip */ if (tses) { dev_warn(&ips->dev->dev, "thermal trip occurred, tses: 0x%04x\n", tses); thm_writeb(THM_TSES, tses); } return IRQ_HANDLED; } #ifndef CONFIG_DEBUG_FS static void ips_debugfs_init(struct ips_driver *ips) { return; } static void ips_debugfs_cleanup(struct ips_driver *ips) { return; } #else /* Expose current state and limits in debugfs if possible */ struct ips_debugfs_node { struct ips_driver *ips; char *name; int (*show)(struct seq_file *m, void *data); }; static int show_cpu_temp(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%d.%02d\n", ips->ctv1_avg_temp / 100, ips->ctv1_avg_temp % 100); return 0; } static int show_cpu_power(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%dmW\n", ips->cpu_avg_power); return 0; } static int show_cpu_clamp(struct seq_file *m, void *data) { u64 turbo_override; int tdp, tdc; rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); tdp = (int)(turbo_override & TURBO_TDP_MASK); tdc = (int)((turbo_override & TURBO_TDC_MASK) >> TURBO_TDC_SHIFT); /* Convert to .1W/A units */ tdp = tdp * 10 / 8; tdc = tdc * 10 / 8; /* Watts Amperes */ seq_printf(m, "%d.%dW %d.%dA\n", tdp / 10, tdp % 10, tdc / 10, tdc % 10); return 0; } static int show_mch_temp(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%d.%02d\n", ips->mch_avg_temp / 100, ips->mch_avg_temp % 100); return 0; } static int show_mch_power(struct seq_file *m, void *data) { struct ips_driver *ips = m->private; seq_printf(m, "%dmW\n", ips->mch_avg_power); return 0; } static struct ips_debugfs_node ips_debug_files[] = { { NULL, "cpu_temp", show_cpu_temp }, { NULL, "cpu_power", show_cpu_power }, { NULL, "cpu_clamp", show_cpu_clamp }, { NULL, "mch_temp", show_mch_temp }, { NULL, "mch_power", show_mch_power }, }; static int ips_debugfs_open(struct inode *inode, struct file *file) { struct ips_debugfs_node *node = inode->i_private; return single_open(file, node->show, node->ips); } static const struct file_operations ips_debugfs_ops = { .owner = THIS_MODULE, .open = ips_debugfs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void ips_debugfs_cleanup(struct ips_driver *ips) { if (ips->debug_root) debugfs_remove_recursive(ips->debug_root); return; } static void ips_debugfs_init(struct ips_driver *ips) { int i; ips->debug_root = debugfs_create_dir("ips", NULL); if (!ips->debug_root) { dev_err(&ips->dev->dev, "failed to create debugfs entries: %ld\n", PTR_ERR(ips->debug_root)); return; } for (i = 0; i < ARRAY_SIZE(ips_debug_files); i++) { struct dentry *ent; struct ips_debugfs_node *node = &ips_debug_files[i]; node->ips = ips; ent = debugfs_create_file(node->name, S_IFREG | S_IRUGO, ips->debug_root, node, &ips_debugfs_ops); if (!ent) { dev_err(&ips->dev->dev, "failed to create debug file: %ld\n", PTR_ERR(ent)); goto err_cleanup; } } return; err_cleanup: ips_debugfs_cleanup(ips); return; } #endif /* CONFIG_DEBUG_FS */ /** * ips_detect_cpu - detect whether CPU supports IPS * * Walk our list and see if we're on a supported CPU. If we find one, * return the limits for it. */ static struct ips_mcp_limits *ips_detect_cpu(struct ips_driver *ips) { u64 turbo_power, misc_en; struct ips_mcp_limits *limits = NULL; u16 tdp; if (!(boot_cpu_data.x86 == 6 && boot_cpu_data.x86_model == 37)) { dev_info(&ips->dev->dev, "Non-IPS CPU detected.\n"); goto out; } rdmsrl(IA32_MISC_ENABLE, misc_en); /* * If the turbo enable bit isn't set, we shouldn't try to enable/disable * turbo manually or we'll get an illegal MSR access, even though * turbo will still be available. */ if (misc_en & IA32_MISC_TURBO_EN) ips->turbo_toggle_allowed = true; else ips->turbo_toggle_allowed = false; if (strstr(boot_cpu_data.x86_model_id, "CPU M")) limits = &ips_sv_limits; else if (strstr(boot_cpu_data.x86_model_id, "CPU L")) limits = &ips_lv_limits; else if (strstr(boot_cpu_data.x86_model_id, "CPU U")) limits = &ips_ulv_limits; else { dev_info(&ips->dev->dev, "No CPUID match found.\n"); goto out; } rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_power); tdp = turbo_power & TURBO_TDP_MASK; /* Sanity check TDP against CPU */ if (limits->core_power_limit != (tdp / 8) * 1000) { dev_info(&ips->dev->dev, "CPU TDP doesn't match expected value (found %d, expected %d)\n", tdp / 8, limits->core_power_limit / 1000); limits->core_power_limit = (tdp / 8) * 1000; } out: return limits; } /** * ips_get_i915_syms - try to get GPU control methods from i915 driver * @ips: IPS driver * * The i915 driver exports several interfaces to allow the IPS driver to * monitor and control graphics turbo mode. If we can find them, we can * enable graphics turbo, otherwise we must disable it to avoid exceeding * thermal and power limits in the MCP. */ static bool ips_get_i915_syms(struct ips_driver *ips) { ips->read_mch_val = symbol_get(i915_read_mch_val); if (!ips->read_mch_val) goto out_err; ips->gpu_raise = symbol_get(i915_gpu_raise); if (!ips->gpu_raise) goto out_put_mch; ips->gpu_lower = symbol_get(i915_gpu_lower); if (!ips->gpu_lower) goto out_put_raise; ips->gpu_busy = symbol_get(i915_gpu_busy); if (!ips->gpu_busy) goto out_put_lower; ips->gpu_turbo_disable = symbol_get(i915_gpu_turbo_disable); if (!ips->gpu_turbo_disable) goto out_put_busy; return true; out_put_busy: symbol_put(i915_gpu_busy); out_put_lower: symbol_put(i915_gpu_lower); out_put_raise: symbol_put(i915_gpu_raise); out_put_mch: symbol_put(i915_read_mch_val); out_err: return false; } static bool ips_gpu_turbo_enabled(struct ips_driver *ips) { if (!ips->gpu_busy && late_i915_load) { if (ips_get_i915_syms(ips)) { dev_info(&ips->dev->dev, "i915 driver attached, reenabling gpu turbo\n"); ips->gpu_turbo_enabled = !(thm_readl(THM_HTS) & HTS_GTD_DIS); } } return ips->gpu_turbo_enabled; } void ips_link_to_i915_driver(void) { /* We can't cleanly get at the various ips_driver structs from * this caller (the i915 driver), so just set a flag saying * that it's time to try getting the symbols again. */ late_i915_load = true; } EXPORT_SYMBOL_GPL(ips_link_to_i915_driver); static DEFINE_PCI_DEVICE_TABLE(ips_id_table) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_THERMAL_SENSOR), }, { 0, } }; MODULE_DEVICE_TABLE(pci, ips_id_table); static int ips_probe(struct pci_dev *dev, const struct pci_device_id *id) { u64 platform_info; struct ips_driver *ips; u32 hts; int ret = 0; u16 htshi, trc, trc_required_mask; u8 tse; ips = kzalloc(sizeof(struct ips_driver), GFP_KERNEL); if (!ips) return -ENOMEM; pci_set_drvdata(dev, ips); ips->dev = dev; ips->limits = ips_detect_cpu(ips); if (!ips->limits) { dev_info(&dev->dev, "IPS not supported on this CPU\n"); ret = -ENXIO; goto error_free; } spin_lock_init(&ips->turbo_status_lock); ret = pci_enable_device(dev); if (ret) { dev_err(&dev->dev, "can't enable PCI device, aborting\n"); goto error_free; } if (!pci_resource_start(dev, 0)) { dev_err(&dev->dev, "TBAR not assigned, aborting\n"); ret = -ENXIO; goto error_free; } ret = pci_request_regions(dev, "ips thermal sensor"); if (ret) { dev_err(&dev->dev, "thermal resource busy, aborting\n"); goto error_free; } ips->regmap = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!ips->regmap) { dev_err(&dev->dev, "failed to map thermal regs, aborting\n"); ret = -EBUSY; goto error_release; } tse = thm_readb(THM_TSE); if (tse != TSE_EN) { dev_err(&dev->dev, "thermal device not enabled (0x%02x), aborting\n", tse); ret = -ENXIO; goto error_unmap; } trc = thm_readw(THM_TRC); trc_required_mask = TRC_CORE1_EN | TRC_CORE_PWR | TRC_MCH_EN; if ((trc & trc_required_mask) != trc_required_mask) { dev_err(&dev->dev, "thermal reporting for required devices not enabled, aborting\n"); ret = -ENXIO; goto error_unmap; } if (trc & TRC_CORE2_EN) ips->second_cpu = true; update_turbo_limits(ips); dev_dbg(&dev->dev, "max cpu power clamp: %dW\n", ips->mcp_power_limit / 10); dev_dbg(&dev->dev, "max core power clamp: %dW\n", ips->core_power_limit / 10); /* BIOS may update limits at runtime */ if (thm_readl(THM_PSC) & PSP_PBRT) ips->poll_turbo_status = true; if (!ips_get_i915_syms(ips)) { dev_info(&dev->dev, "failed to get i915 symbols, graphics turbo disabled until i915 loads\n"); ips->gpu_turbo_enabled = false; } else { dev_dbg(&dev->dev, "graphics turbo enabled\n"); ips->gpu_turbo_enabled = true; } /* * Check PLATFORM_INFO MSR to make sure this chip is * turbo capable. */ rdmsrl(PLATFORM_INFO, platform_info); if (!(platform_info & PLATFORM_TDP)) { dev_err(&dev->dev, "platform indicates TDP override unavailable, aborting\n"); ret = -ENODEV; goto error_unmap; } /* * IRQ handler for ME interaction * Note: don't use MSI here as the PCH has bugs. */ pci_disable_msi(dev); ret = request_irq(dev->irq, ips_irq_handler, IRQF_SHARED, "ips", ips); if (ret) { dev_err(&dev->dev, "request irq failed, aborting\n"); goto error_unmap; } /* Enable aux, hot & critical interrupts */ thm_writeb(THM_TSPIEN, TSPIEN_AUX2_LOHI | TSPIEN_CRIT_LOHI | TSPIEN_HOT_LOHI | TSPIEN_AUX_LOHI); thm_writeb(THM_TEN, TEN_UPDATE_EN); /* Collect adjustment values */ ips->cta_val = thm_readw(THM_CTA); ips->pta_val = thm_readw(THM_PTA); ips->mgta_val = thm_readw(THM_MGTA); /* Save turbo limits & ratios */ rdmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); ips_disable_cpu_turbo(ips); ips->cpu_turbo_enabled = false; /* Create thermal adjust thread */ ips->adjust = kthread_create(ips_adjust, ips, "ips-adjust"); if (IS_ERR(ips->adjust)) { dev_err(&dev->dev, "failed to create thermal adjust thread, aborting\n"); ret = -ENOMEM; goto error_free_irq; } /* * Set up the work queue and monitor thread. The monitor thread * will wake up ips_adjust thread. */ ips->monitor = kthread_run(ips_monitor, ips, "ips-monitor"); if (IS_ERR(ips->monitor)) { dev_err(&dev->dev, "failed to create thermal monitor thread, aborting\n"); ret = -ENOMEM; goto error_thread_cleanup; } hts = (ips->core_power_limit << HTS_PCPL_SHIFT) | (ips->mcp_temp_limit << HTS_PTL_SHIFT) | HTS_NVV; htshi = HTS2_PRST_RUNNING << HTS2_PRST_SHIFT; thm_writew(THM_HTSHI, htshi); thm_writel(THM_HTS, hts); ips_debugfs_init(ips); dev_info(&dev->dev, "IPS driver initialized, MCP temp limit %d\n", ips->mcp_temp_limit); return ret; error_thread_cleanup: kthread_stop(ips->adjust); error_free_irq: free_irq(ips->dev->irq, ips); error_unmap: iounmap(ips->regmap); error_release: pci_release_regions(dev); error_free: kfree(ips); return ret; } static void ips_remove(struct pci_dev *dev) { struct ips_driver *ips = pci_get_drvdata(dev); u64 turbo_override; if (!ips) return; ips_debugfs_cleanup(ips); /* Release i915 driver */ if (ips->read_mch_val) symbol_put(i915_read_mch_val); if (ips->gpu_raise) symbol_put(i915_gpu_raise); if (ips->gpu_lower) symbol_put(i915_gpu_lower); if (ips->gpu_busy) symbol_put(i915_gpu_busy); if (ips->gpu_turbo_disable) symbol_put(i915_gpu_turbo_disable); rdmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); turbo_override &= ~(TURBO_TDC_OVR_EN | TURBO_TDP_OVR_EN); wrmsrl(TURBO_POWER_CURRENT_LIMIT, turbo_override); wrmsrl(TURBO_POWER_CURRENT_LIMIT, ips->orig_turbo_limit); free_irq(ips->dev->irq, ips); if (ips->adjust) kthread_stop(ips->adjust); if (ips->monitor) kthread_stop(ips->monitor); iounmap(ips->regmap); pci_release_regions(dev); kfree(ips); dev_dbg(&dev->dev, "IPS driver removed\n"); } #ifdef CONFIG_PM static int ips_suspend(struct pci_dev *dev, pm_message_t state) { return 0; } static int ips_resume(struct pci_dev *dev) { return 0; } #else #define ips_suspend NULL #define ips_resume NULL #endif /* CONFIG_PM */ static void ips_shutdown(struct pci_dev *dev) { } static struct pci_driver ips_pci_driver = { .name = "intel ips", .id_table = ips_id_table, .probe = ips_probe, .remove = ips_remove, .suspend = ips_suspend, .resume = ips_resume, .shutdown = ips_shutdown, }; static int __init ips_init(void) { return pci_register_driver(&ips_pci_driver); } module_init(ips_init); static void ips_exit(void) { pci_unregister_driver(&ips_pci_driver); return; } module_exit(ips_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jesse Barnes <jbarnes@virtuousgeek.org>"); MODULE_DESCRIPTION("Intelligent Power Sharing Driver");
gpl-2.0
AndroidDeveloperAlliance/ZenKernel_TUNA
drivers/media/dvb/ttpci/budget-av.c
3089
45871
/* * budget-av.c: driver for the SAA7146 based Budget DVB cards * with analog video in * * Compiled from various sources by Michael Hunold <michael@mihu.de> * * CI interface support (c) 2004 Olivier Gournet <ogournet@anevia.com> & * Andrew de Quincey <adq_dvb@lidskialf.net> * * Copyright (C) 2002 Ralph Metzler <rjkm@metzlerbros.de> * * Copyright (C) 1999-2002 Ralph Metzler * & Marcus Metzler for convergence integrated media GmbH * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html * * * the project's page is at http://www.linuxtv.org/ */ #include "budget.h" #include "stv0299.h" #include "stb0899_drv.h" #include "stb0899_reg.h" #include "stb0899_cfg.h" #include "tda8261.h" #include "tda8261_cfg.h" #include "tda1002x.h" #include "tda1004x.h" #include "tua6100.h" #include "dvb-pll.h" #include <media/saa7146_vv.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/spinlock.h> #include "dvb_ca_en50221.h" #define DEBICICAM 0x02420000 #define SLOTSTATUS_NONE 1 #define SLOTSTATUS_PRESENT 2 #define SLOTSTATUS_RESET 4 #define SLOTSTATUS_READY 8 #define SLOTSTATUS_OCCUPIED (SLOTSTATUS_PRESENT|SLOTSTATUS_RESET|SLOTSTATUS_READY) DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); struct budget_av { struct budget budget; struct video_device *vd; int cur_input; int has_saa7113; struct tasklet_struct ciintf_irq_tasklet; int slot_status; struct dvb_ca_en50221 ca; u8 reinitialise_demod:1; }; static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot); /* GPIO Connections: * 0 - Vcc/Reset (Reset is controlled by capacitor). Resets the frontend *AS WELL*! * 1 - CI memory select 0=>IO memory, 1=>Attribute Memory * 2 - CI Card Enable (Active Low) * 3 - CI Card Detect */ /**************************************************************************** * INITIALIZATION ****************************************************************************/ static u8 i2c_readreg(struct i2c_adapter *i2c, u8 id, u8 reg) { u8 mm1[] = { 0x00 }; u8 mm2[] = { 0x00 }; struct i2c_msg msgs[2]; msgs[0].flags = 0; msgs[1].flags = I2C_M_RD; msgs[0].addr = msgs[1].addr = id / 2; mm1[0] = reg; msgs[0].len = 1; msgs[1].len = 1; msgs[0].buf = mm1; msgs[1].buf = mm2; i2c_transfer(i2c, msgs, 2); return mm2[0]; } static int i2c_readregs(struct i2c_adapter *i2c, u8 id, u8 reg, u8 * buf, u8 len) { u8 mm1[] = { reg }; struct i2c_msg msgs[2] = { {.addr = id / 2,.flags = 0,.buf = mm1,.len = 1}, {.addr = id / 2,.flags = I2C_M_RD,.buf = buf,.len = len} }; if (i2c_transfer(i2c, msgs, 2) != 2) return -EIO; return 0; } static int i2c_writereg(struct i2c_adapter *i2c, u8 id, u8 reg, u8 val) { u8 msg[2] = { reg, val }; struct i2c_msg msgs; msgs.flags = 0; msgs.addr = id / 2; msgs.len = 2; msgs.buf = msg; return i2c_transfer(i2c, &msgs, 1); } static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address) { struct budget_av *budget_av = (struct budget_av *) ca->data; int result; if (slot != 0) return -EINVAL; saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI); udelay(1); result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 0xfff, 1, 0, 1); if (result == -ETIMEDOUT) { ciintf_slot_shutdown(ca, slot); printk(KERN_INFO "budget-av: cam ejected 1\n"); } return result; } static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value) { struct budget_av *budget_av = (struct budget_av *) ca->data; int result; if (slot != 0) return -EINVAL; saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTHI); udelay(1); result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 0xfff, 1, value, 0, 1); if (result == -ETIMEDOUT) { ciintf_slot_shutdown(ca, slot); printk(KERN_INFO "budget-av: cam ejected 2\n"); } return result; } static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address) { struct budget_av *budget_av = (struct budget_av *) ca->data; int result; if (slot != 0) return -EINVAL; saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO); udelay(1); result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, address & 3, 1, 0, 0); if (result == -ETIMEDOUT) { ciintf_slot_shutdown(ca, slot); printk(KERN_INFO "budget-av: cam ejected 3\n"); return -ETIMEDOUT; } return result; } static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value) { struct budget_av *budget_av = (struct budget_av *) ca->data; int result; if (slot != 0) return -EINVAL; saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO); udelay(1); result = ttpci_budget_debiwrite(&budget_av->budget, DEBICICAM, address & 3, 1, value, 0, 0); if (result == -ETIMEDOUT) { ciintf_slot_shutdown(ca, slot); printk(KERN_INFO "budget-av: cam ejected 5\n"); } return result; } static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot) { struct budget_av *budget_av = (struct budget_av *) ca->data; struct saa7146_dev *saa = budget_av->budget.dev; if (slot != 0) return -EINVAL; dprintk(1, "ciintf_slot_reset\n"); budget_av->slot_status = SLOTSTATUS_RESET; saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTHI); /* disable card */ saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI); /* Vcc off */ msleep(2); saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); /* Vcc on */ msleep(20); /* 20 ms Vcc settling time */ saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO); /* enable card */ ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB); msleep(20); /* reinitialise the frontend if necessary */ if (budget_av->reinitialise_demod) dvb_frontend_reinitialise(budget_av->budget.dvb_frontend); return 0; } static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot) { struct budget_av *budget_av = (struct budget_av *) ca->data; struct saa7146_dev *saa = budget_av->budget.dev; if (slot != 0) return -EINVAL; dprintk(1, "ciintf_slot_shutdown\n"); ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTB); budget_av->slot_status = SLOTSTATUS_NONE; return 0; } static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot) { struct budget_av *budget_av = (struct budget_av *) ca->data; struct saa7146_dev *saa = budget_av->budget.dev; if (slot != 0) return -EINVAL; dprintk(1, "ciintf_slot_ts_enable: %d\n", budget_av->slot_status); ttpci_budget_set_video_port(saa, BUDGET_VIDEO_PORTA); return 0; } static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open) { struct budget_av *budget_av = (struct budget_av *) ca->data; struct saa7146_dev *saa = budget_av->budget.dev; int result; if (slot != 0) return -EINVAL; /* test the card detect line - needs to be done carefully * since it never goes high for some CAMs on this interface (e.g. topuptv) */ if (budget_av->slot_status == SLOTSTATUS_NONE) { saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT); udelay(1); if (saa7146_read(saa, PSR) & MASK_06) { if (budget_av->slot_status == SLOTSTATUS_NONE) { budget_av->slot_status = SLOTSTATUS_PRESENT; printk(KERN_INFO "budget-av: cam inserted A\n"); } } saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO); } /* We also try and read from IO memory to work round the above detection bug. If * there is no CAM, we will get a timeout. Only done if there is no cam * present, since this test actually breaks some cams :( * * if the CI interface is not open, we also do the above test since we * don't care if the cam has problems - we'll be resetting it on open() anyway */ if ((budget_av->slot_status == SLOTSTATUS_NONE) || (!open)) { saa7146_setgpio(budget_av->budget.dev, 1, SAA7146_GPIO_OUTLO); result = ttpci_budget_debiread(&budget_av->budget, DEBICICAM, 0, 1, 0, 1); if ((result >= 0) && (budget_av->slot_status == SLOTSTATUS_NONE)) { budget_av->slot_status = SLOTSTATUS_PRESENT; printk(KERN_INFO "budget-av: cam inserted B\n"); } else if (result < 0) { if (budget_av->slot_status != SLOTSTATUS_NONE) { ciintf_slot_shutdown(ca, slot); printk(KERN_INFO "budget-av: cam ejected 5\n"); return 0; } } } /* read from attribute memory in reset/ready state to know when the CAM is ready */ if (budget_av->slot_status == SLOTSTATUS_RESET) { result = ciintf_read_attribute_mem(ca, slot, 0); if (result == 0x1d) { budget_av->slot_status = SLOTSTATUS_READY; } } /* work out correct return code */ if (budget_av->slot_status != SLOTSTATUS_NONE) { if (budget_av->slot_status & SLOTSTATUS_READY) { return DVB_CA_EN50221_POLL_CAM_PRESENT | DVB_CA_EN50221_POLL_CAM_READY; } return DVB_CA_EN50221_POLL_CAM_PRESENT; } return 0; } static int ciintf_init(struct budget_av *budget_av) { struct saa7146_dev *saa = budget_av->budget.dev; int result; memset(&budget_av->ca, 0, sizeof(struct dvb_ca_en50221)); saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); saa7146_setgpio(saa, 1, SAA7146_GPIO_OUTLO); saa7146_setgpio(saa, 2, SAA7146_GPIO_OUTLO); saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTLO); /* Enable DEBI pins */ saa7146_write(saa, MC1, MASK_27 | MASK_11); /* register CI interface */ budget_av->ca.owner = THIS_MODULE; budget_av->ca.read_attribute_mem = ciintf_read_attribute_mem; budget_av->ca.write_attribute_mem = ciintf_write_attribute_mem; budget_av->ca.read_cam_control = ciintf_read_cam_control; budget_av->ca.write_cam_control = ciintf_write_cam_control; budget_av->ca.slot_reset = ciintf_slot_reset; budget_av->ca.slot_shutdown = ciintf_slot_shutdown; budget_av->ca.slot_ts_enable = ciintf_slot_ts_enable; budget_av->ca.poll_slot_status = ciintf_poll_slot_status; budget_av->ca.data = budget_av; budget_av->budget.ci_present = 1; budget_av->slot_status = SLOTSTATUS_NONE; if ((result = dvb_ca_en50221_init(&budget_av->budget.dvb_adapter, &budget_av->ca, 0, 1)) != 0) { printk(KERN_ERR "budget-av: ci initialisation failed.\n"); goto error; } printk(KERN_INFO "budget-av: ci interface initialised.\n"); return 0; error: saa7146_write(saa, MC1, MASK_27); return result; } static void ciintf_deinit(struct budget_av *budget_av) { struct saa7146_dev *saa = budget_av->budget.dev; saa7146_setgpio(saa, 0, SAA7146_GPIO_INPUT); saa7146_setgpio(saa, 1, SAA7146_GPIO_INPUT); saa7146_setgpio(saa, 2, SAA7146_GPIO_INPUT); saa7146_setgpio(saa, 3, SAA7146_GPIO_INPUT); /* release the CA device */ dvb_ca_en50221_release(&budget_av->ca); /* disable DEBI pins */ saa7146_write(saa, MC1, MASK_27); } static const u8 saa7113_tab[] = { 0x01, 0x08, 0x02, 0xc0, 0x03, 0x33, 0x04, 0x00, 0x05, 0x00, 0x06, 0xeb, 0x07, 0xe0, 0x08, 0x28, 0x09, 0x00, 0x0a, 0x80, 0x0b, 0x47, 0x0c, 0x40, 0x0d, 0x00, 0x0e, 0x01, 0x0f, 0x44, 0x10, 0x08, 0x11, 0x0c, 0x12, 0x7b, 0x13, 0x00, 0x15, 0x00, 0x16, 0x00, 0x17, 0x00, 0x57, 0xff, 0x40, 0x82, 0x58, 0x00, 0x59, 0x54, 0x5a, 0x07, 0x5b, 0x83, 0x5e, 0x00, 0xff }; static int saa7113_init(struct budget_av *budget_av) { struct budget *budget = &budget_av->budget; struct saa7146_dev *saa = budget->dev; const u8 *data = saa7113_tab; saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTHI); msleep(200); if (i2c_writereg(&budget->i2c_adap, 0x4a, 0x01, 0x08) != 1) { dprintk(1, "saa7113 not found on KNC card\n"); return -ENODEV; } dprintk(1, "saa7113 detected and initializing\n"); while (*data != 0xff) { i2c_writereg(&budget->i2c_adap, 0x4a, *data, *(data + 1)); data += 2; } dprintk(1, "saa7113 status=%02x\n", i2c_readreg(&budget->i2c_adap, 0x4a, 0x1f)); return 0; } static int saa7113_setinput(struct budget_av *budget_av, int input) { struct budget *budget = &budget_av->budget; if (1 != budget_av->has_saa7113) return -ENODEV; if (input == 1) { i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc7); i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x80); } else if (input == 0) { i2c_writereg(&budget->i2c_adap, 0x4a, 0x02, 0xc0); i2c_writereg(&budget->i2c_adap, 0x4a, 0x09, 0x00); } else return -EINVAL; budget_av->cur_input = input; return 0; } static int philips_su1278_ty_ci_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { u8 aclk = 0; u8 bclk = 0; u8 m1; aclk = 0xb5; if (srate < 2000000) bclk = 0x86; else if (srate < 5000000) bclk = 0x89; else if (srate < 15000000) bclk = 0x8f; else if (srate < 45000000) bclk = 0x95; m1 = 0x14; if (srate < 4000000) m1 = 0x10; stv0299_writereg(fe, 0x13, aclk); stv0299_writereg(fe, 0x14, bclk); stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); stv0299_writereg(fe, 0x21, (ratio) & 0xf0); stv0299_writereg(fe, 0x0f, 0x80 | m1); return 0; } static int philips_su1278_ty_ci_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { u32 div; u8 buf[4]; struct budget *budget = (struct budget *) fe->dvb->priv; struct i2c_msg msg = {.addr = 0x61,.flags = 0,.buf = buf,.len = sizeof(buf) }; if ((params->frequency < 950000) || (params->frequency > 2150000)) return -EINVAL; div = (params->frequency + (125 - 1)) / 125; // round correctly buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = 0x80 | ((div & 0x18000) >> 10) | 4; buf[3] = 0x20; if (params->u.qpsk.symbol_rate < 4000000) buf[3] |= 1; if (params->frequency < 1250000) buf[3] |= 0; else if (params->frequency < 1550000) buf[3] |= 0x40; else if (params->frequency < 2050000) buf[3] |= 0x80; else if (params->frequency < 2150000) buf[3] |= 0xC0; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static u8 typhoon_cinergy1200s_inittab[] = { 0x01, 0x15, 0x02, 0x30, 0x03, 0x00, 0x04, 0x7d, /* F22FR = 0x7d, F22 = f_VCO / 128 / 0x7d = 22 kHz */ 0x05, 0x35, /* I2CT = 0, SCLT = 1, SDAT = 1 */ 0x06, 0x40, /* DAC not used, set to high impendance mode */ 0x07, 0x00, /* DAC LSB */ 0x08, 0x40, /* DiSEqC off */ 0x09, 0x00, /* FIFO */ 0x0c, 0x51, /* OP1 ctl = Normal, OP1 val = 1 (LNB Power ON) */ 0x0d, 0x82, /* DC offset compensation = ON, beta_agc1 = 2 */ 0x0e, 0x23, /* alpha_tmg = 2, beta_tmg = 3 */ 0x10, 0x3f, // AGC2 0x3d 0x11, 0x84, 0x12, 0xb9, 0x15, 0xc9, // lock detector threshold 0x16, 0x00, 0x17, 0x00, 0x18, 0x00, 0x19, 0x00, 0x1a, 0x00, 0x1f, 0x50, 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, 0x28, 0x00, // out imp: normal out type: parallel FEC mode:0 0x29, 0x1e, // 1/2 threshold 0x2a, 0x14, // 2/3 threshold 0x2b, 0x0f, // 3/4 threshold 0x2c, 0x09, // 5/6 threshold 0x2d, 0x05, // 7/8 threshold 0x2e, 0x01, 0x31, 0x1f, // test all FECs 0x32, 0x19, // viterbi and synchro search 0x33, 0xfc, // rs control 0x34, 0x93, // error control 0x0f, 0x92, 0xff, 0xff }; static struct stv0299_config typhoon_config = { .demod_address = 0x68, .inittab = typhoon_cinergy1200s_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP0, .min_delay_ms = 100, .set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate, }; static struct stv0299_config cinergy_1200s_config = { .demod_address = 0x68, .inittab = typhoon_cinergy1200s_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_0, .volt13_op0_op1 = STV0299_VOLT13_OP0, .min_delay_ms = 100, .set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate, }; static struct stv0299_config cinergy_1200s_1894_0010_config = { .demod_address = 0x68, .inittab = typhoon_cinergy1200s_inittab, .mclk = 88000000UL, .invert = 1, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP0, .min_delay_ms = 100, .set_symbol_rate = philips_su1278_ty_ci_set_symbol_rate, }; static int philips_cu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct budget *budget = (struct budget *) fe->dvb->priv; u8 buf[6]; struct i2c_msg msg = {.addr = 0x60,.flags = 0,.buf = buf,.len = sizeof(buf) }; int i; #define CU1216_IF 36125000 #define TUNER_MUL 62500 u32 div = (params->frequency + CU1216_IF + TUNER_MUL / 2) / TUNER_MUL; buf[0] = (div >> 8) & 0x7f; buf[1] = div & 0xff; buf[2] = 0xce; buf[3] = (params->frequency < 150000000 ? 0x01 : params->frequency < 445000000 ? 0x02 : 0x04); buf[4] = 0xde; buf[5] = 0x20; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1) return -EIO; /* wait for the pll lock */ msg.flags = I2C_M_RD; msg.len = 1; for (i = 0; i < 20; i++) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &msg, 1) == 1 && (buf[0] & 0x40)) break; msleep(10); } /* switch the charge pump to the lower current */ msg.flags = 0; msg.len = 2; msg.buf = &buf[2]; buf[2] &= ~0x40; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &msg, 1) != 1) return -EIO; return 0; } static struct tda1002x_config philips_cu1216_config = { .demod_address = 0x0c, .invert = 1, }; static struct tda1002x_config philips_cu1216_config_altaddress = { .demod_address = 0x0d, .invert = 0, }; static struct tda10023_config philips_cu1216_tda10023_config = { .demod_address = 0x0c, .invert = 1, }; static int philips_tu1216_tuner_init(struct dvb_frontend *fe) { struct budget *budget = (struct budget *) fe->dvb->priv; static u8 tu1216_init[] = { 0x0b, 0xf5, 0x85, 0xab }; struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tu1216_init,.len = sizeof(tu1216_init) }; // setup PLL configuration if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1) return -EIO; msleep(1); return 0; } static int philips_tu1216_tuner_set_params(struct dvb_frontend *fe, struct dvb_frontend_parameters *params) { struct budget *budget = (struct budget *) fe->dvb->priv; u8 tuner_buf[4]; struct i2c_msg tuner_msg = {.addr = 0x60,.flags = 0,.buf = tuner_buf,.len = sizeof(tuner_buf) }; int tuner_frequency = 0; u8 band, cp, filter; // determine charge pump tuner_frequency = params->frequency + 36166000; if (tuner_frequency < 87000000) return -EINVAL; else if (tuner_frequency < 130000000) cp = 3; else if (tuner_frequency < 160000000) cp = 5; else if (tuner_frequency < 200000000) cp = 6; else if (tuner_frequency < 290000000) cp = 3; else if (tuner_frequency < 420000000) cp = 5; else if (tuner_frequency < 480000000) cp = 6; else if (tuner_frequency < 620000000) cp = 3; else if (tuner_frequency < 830000000) cp = 5; else if (tuner_frequency < 895000000) cp = 7; else return -EINVAL; // determine band if (params->frequency < 49000000) return -EINVAL; else if (params->frequency < 161000000) band = 1; else if (params->frequency < 444000000) band = 2; else if (params->frequency < 861000000) band = 4; else return -EINVAL; // setup PLL filter switch (params->u.ofdm.bandwidth) { case BANDWIDTH_6_MHZ: filter = 0; break; case BANDWIDTH_7_MHZ: filter = 0; break; case BANDWIDTH_8_MHZ: filter = 1; break; default: return -EINVAL; } // calculate divisor // ((36166000+((1000000/6)/2)) + Finput)/(1000000/6) tuner_frequency = (((params->frequency / 1000) * 6) + 217496) / 1000; // setup tuner buffer tuner_buf[0] = (tuner_frequency >> 8) & 0x7f; tuner_buf[1] = tuner_frequency & 0xff; tuner_buf[2] = 0xca; tuner_buf[3] = (cp << 5) | (filter << 3) | band; if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); if (i2c_transfer(&budget->i2c_adap, &tuner_msg, 1) != 1) return -EIO; msleep(1); return 0; } static int philips_tu1216_request_firmware(struct dvb_frontend *fe, const struct firmware **fw, char *name) { struct budget *budget = (struct budget *) fe->dvb->priv; return request_firmware(fw, name, &budget->dev->pci->dev); } static struct tda1004x_config philips_tu1216_config = { .demod_address = 0x8, .invert = 1, .invert_oclk = 1, .xtal_freq = TDA10046_XTAL_4M, .agc_config = TDA10046_AGC_DEFAULT, .if_freq = TDA10046_FREQ_3617, .request_firmware = philips_tu1216_request_firmware, }; static u8 philips_sd1878_inittab[] = { 0x01, 0x15, 0x02, 0x30, 0x03, 0x00, 0x04, 0x7d, 0x05, 0x35, 0x06, 0x40, 0x07, 0x00, 0x08, 0x43, 0x09, 0x02, 0x0C, 0x51, 0x0D, 0x82, 0x0E, 0x23, 0x10, 0x3f, 0x11, 0x84, 0x12, 0xb9, 0x15, 0xc9, 0x16, 0x19, 0x17, 0x8c, 0x18, 0x59, 0x19, 0xf8, 0x1a, 0xfe, 0x1c, 0x7f, 0x1d, 0x00, 0x1e, 0x00, 0x1f, 0x50, 0x20, 0x00, 0x21, 0x00, 0x22, 0x00, 0x23, 0x00, 0x28, 0x00, 0x29, 0x28, 0x2a, 0x14, 0x2b, 0x0f, 0x2c, 0x09, 0x2d, 0x09, 0x31, 0x1f, 0x32, 0x19, 0x33, 0xfc, 0x34, 0x93, 0xff, 0xff }; static int philips_sd1878_ci_set_symbol_rate(struct dvb_frontend *fe, u32 srate, u32 ratio) { u8 aclk = 0; u8 bclk = 0; u8 m1; aclk = 0xb5; if (srate < 2000000) bclk = 0x86; else if (srate < 5000000) bclk = 0x89; else if (srate < 15000000) bclk = 0x8f; else if (srate < 45000000) bclk = 0x95; m1 = 0x14; if (srate < 4000000) m1 = 0x10; stv0299_writereg(fe, 0x0e, 0x23); stv0299_writereg(fe, 0x0f, 0x94); stv0299_writereg(fe, 0x10, 0x39); stv0299_writereg(fe, 0x13, aclk); stv0299_writereg(fe, 0x14, bclk); stv0299_writereg(fe, 0x15, 0xc9); stv0299_writereg(fe, 0x1f, (ratio >> 16) & 0xff); stv0299_writereg(fe, 0x20, (ratio >> 8) & 0xff); stv0299_writereg(fe, 0x21, (ratio) & 0xf0); stv0299_writereg(fe, 0x0f, 0x80 | m1); return 0; } static struct stv0299_config philips_sd1878_config = { .demod_address = 0x68, .inittab = philips_sd1878_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP0, .min_delay_ms = 100, .set_symbol_rate = philips_sd1878_ci_set_symbol_rate, }; /* KNC1 DVB-S (STB0899) Inittab */ static const struct stb0899_s1_reg knc1_stb0899_s1_init_1[] = { { STB0899_DEV_ID , 0x81 }, { STB0899_DISCNTRL1 , 0x32 }, { STB0899_DISCNTRL2 , 0x80 }, { STB0899_DISRX_ST0 , 0x04 }, { STB0899_DISRX_ST1 , 0x00 }, { STB0899_DISPARITY , 0x00 }, { STB0899_DISFIFO , 0x00 }, { STB0899_DISSTATUS , 0x20 }, { STB0899_DISF22 , 0x8c }, { STB0899_DISF22RX , 0x9a }, { STB0899_SYSREG , 0x0b }, { STB0899_ACRPRESC , 0x11 }, { STB0899_ACRDIV1 , 0x0a }, { STB0899_ACRDIV2 , 0x05 }, { STB0899_DACR1 , 0x00 }, { STB0899_DACR2 , 0x00 }, { STB0899_OUTCFG , 0x00 }, { STB0899_MODECFG , 0x00 }, { STB0899_IRQSTATUS_3 , 0x30 }, { STB0899_IRQSTATUS_2 , 0x00 }, { STB0899_IRQSTATUS_1 , 0x00 }, { STB0899_IRQSTATUS_0 , 0x00 }, { STB0899_IRQMSK_3 , 0xf3 }, { STB0899_IRQMSK_2 , 0xfc }, { STB0899_IRQMSK_1 , 0xff }, { STB0899_IRQMSK_0 , 0xff }, { STB0899_IRQCFG , 0x00 }, { STB0899_I2CCFG , 0x88 }, { STB0899_I2CRPT , 0x58 }, /* Repeater=8, Stop=disabled */ { STB0899_IOPVALUE5 , 0x00 }, { STB0899_IOPVALUE4 , 0x20 }, { STB0899_IOPVALUE3 , 0xc9 }, { STB0899_IOPVALUE2 , 0x90 }, { STB0899_IOPVALUE1 , 0x40 }, { STB0899_IOPVALUE0 , 0x00 }, { STB0899_GPIO00CFG , 0x82 }, { STB0899_GPIO01CFG , 0x82 }, { STB0899_GPIO02CFG , 0x82 }, { STB0899_GPIO03CFG , 0x82 }, { STB0899_GPIO04CFG , 0x82 }, { STB0899_GPIO05CFG , 0x82 }, { STB0899_GPIO06CFG , 0x82 }, { STB0899_GPIO07CFG , 0x82 }, { STB0899_GPIO08CFG , 0x82 }, { STB0899_GPIO09CFG , 0x82 }, { STB0899_GPIO10CFG , 0x82 }, { STB0899_GPIO11CFG , 0x82 }, { STB0899_GPIO12CFG , 0x82 }, { STB0899_GPIO13CFG , 0x82 }, { STB0899_GPIO14CFG , 0x82 }, { STB0899_GPIO15CFG , 0x82 }, { STB0899_GPIO16CFG , 0x82 }, { STB0899_GPIO17CFG , 0x82 }, { STB0899_GPIO18CFG , 0x82 }, { STB0899_GPIO19CFG , 0x82 }, { STB0899_GPIO20CFG , 0x82 }, { STB0899_SDATCFG , 0xb8 }, { STB0899_SCLTCFG , 0xba }, { STB0899_AGCRFCFG , 0x08 }, /* 0x1c */ { STB0899_GPIO22 , 0x82 }, /* AGCBB2CFG */ { STB0899_GPIO21 , 0x91 }, /* AGCBB1CFG */ { STB0899_DIRCLKCFG , 0x82 }, { STB0899_CLKOUT27CFG , 0x7e }, { STB0899_STDBYCFG , 0x82 }, { STB0899_CS0CFG , 0x82 }, { STB0899_CS1CFG , 0x82 }, { STB0899_DISEQCOCFG , 0x20 }, { STB0899_GPIO32CFG , 0x82 }, { STB0899_GPIO33CFG , 0x82 }, { STB0899_GPIO34CFG , 0x82 }, { STB0899_GPIO35CFG , 0x82 }, { STB0899_GPIO36CFG , 0x82 }, { STB0899_GPIO37CFG , 0x82 }, { STB0899_GPIO38CFG , 0x82 }, { STB0899_GPIO39CFG , 0x82 }, { STB0899_NCOARSE , 0x15 }, /* 0x15 = 27 Mhz Clock, F/3 = 198MHz, F/6 = 99MHz */ { STB0899_SYNTCTRL , 0x02 }, /* 0x00 = CLK from CLKI, 0x02 = CLK from XTALI */ { STB0899_FILTCTRL , 0x00 }, { STB0899_SYSCTRL , 0x00 }, { STB0899_STOPCLK1 , 0x20 }, { STB0899_STOPCLK2 , 0x00 }, { STB0899_INTBUFSTATUS , 0x00 }, { STB0899_INTBUFCTRL , 0x0a }, { 0xffff , 0xff }, }; static const struct stb0899_s1_reg knc1_stb0899_s1_init_3[] = { { STB0899_DEMOD , 0x00 }, { STB0899_RCOMPC , 0xc9 }, { STB0899_AGC1CN , 0x41 }, { STB0899_AGC1REF , 0x08 }, { STB0899_RTC , 0x7a }, { STB0899_TMGCFG , 0x4e }, { STB0899_AGC2REF , 0x33 }, { STB0899_TLSR , 0x84 }, { STB0899_CFD , 0xee }, { STB0899_ACLC , 0x87 }, { STB0899_BCLC , 0x94 }, { STB0899_EQON , 0x41 }, { STB0899_LDT , 0xdd }, { STB0899_LDT2 , 0xc9 }, { STB0899_EQUALREF , 0xb4 }, { STB0899_TMGRAMP , 0x10 }, { STB0899_TMGTHD , 0x30 }, { STB0899_IDCCOMP , 0xfb }, { STB0899_QDCCOMP , 0x03 }, { STB0899_POWERI , 0x3b }, { STB0899_POWERQ , 0x3d }, { STB0899_RCOMP , 0x81 }, { STB0899_AGCIQIN , 0x80 }, { STB0899_AGC2I1 , 0x04 }, { STB0899_AGC2I2 , 0xf5 }, { STB0899_TLIR , 0x25 }, { STB0899_RTF , 0x80 }, { STB0899_DSTATUS , 0x00 }, { STB0899_LDI , 0xca }, { STB0899_CFRM , 0xf1 }, { STB0899_CFRL , 0xf3 }, { STB0899_NIRM , 0x2a }, { STB0899_NIRL , 0x05 }, { STB0899_ISYMB , 0x17 }, { STB0899_QSYMB , 0xfa }, { STB0899_SFRH , 0x2f }, { STB0899_SFRM , 0x68 }, { STB0899_SFRL , 0x40 }, { STB0899_SFRUPH , 0x2f }, { STB0899_SFRUPM , 0x68 }, { STB0899_SFRUPL , 0x40 }, { STB0899_EQUAI1 , 0xfd }, { STB0899_EQUAQ1 , 0x04 }, { STB0899_EQUAI2 , 0x0f }, { STB0899_EQUAQ2 , 0xff }, { STB0899_EQUAI3 , 0xdf }, { STB0899_EQUAQ3 , 0xfa }, { STB0899_EQUAI4 , 0x37 }, { STB0899_EQUAQ4 , 0x0d }, { STB0899_EQUAI5 , 0xbd }, { STB0899_EQUAQ5 , 0xf7 }, { STB0899_DSTATUS2 , 0x00 }, { STB0899_VSTATUS , 0x00 }, { STB0899_VERROR , 0xff }, { STB0899_IQSWAP , 0x2a }, { STB0899_ECNT1M , 0x00 }, { STB0899_ECNT1L , 0x00 }, { STB0899_ECNT2M , 0x00 }, { STB0899_ECNT2L , 0x00 }, { STB0899_ECNT3M , 0x00 }, { STB0899_ECNT3L , 0x00 }, { STB0899_FECAUTO1 , 0x06 }, { STB0899_FECM , 0x01 }, { STB0899_VTH12 , 0xf0 }, { STB0899_VTH23 , 0xa0 }, { STB0899_VTH34 , 0x78 }, { STB0899_VTH56 , 0x4e }, { STB0899_VTH67 , 0x48 }, { STB0899_VTH78 , 0x38 }, { STB0899_PRVIT , 0xff }, { STB0899_VITSYNC , 0x19 }, { STB0899_RSULC , 0xb1 }, /* DVB = 0xb1, DSS = 0xa1 */ { STB0899_TSULC , 0x42 }, { STB0899_RSLLC , 0x40 }, { STB0899_TSLPL , 0x12 }, { STB0899_TSCFGH , 0x0c }, { STB0899_TSCFGM , 0x00 }, { STB0899_TSCFGL , 0x0c }, { STB0899_TSOUT , 0x4d }, /* 0x0d for CAM */ { STB0899_RSSYNCDEL , 0x00 }, { STB0899_TSINHDELH , 0x02 }, { STB0899_TSINHDELM , 0x00 }, { STB0899_TSINHDELL , 0x00 }, { STB0899_TSLLSTKM , 0x00 }, { STB0899_TSLLSTKL , 0x00 }, { STB0899_TSULSTKM , 0x00 }, { STB0899_TSULSTKL , 0xab }, { STB0899_PCKLENUL , 0x00 }, { STB0899_PCKLENLL , 0xcc }, { STB0899_RSPCKLEN , 0xcc }, { STB0899_TSSTATUS , 0x80 }, { STB0899_ERRCTRL1 , 0xb6 }, { STB0899_ERRCTRL2 , 0x96 }, { STB0899_ERRCTRL3 , 0x89 }, { STB0899_DMONMSK1 , 0x27 }, { STB0899_DMONMSK0 , 0x03 }, { STB0899_DEMAPVIT , 0x5c }, { STB0899_PLPARM , 0x1f }, { STB0899_PDELCTRL , 0x48 }, { STB0899_PDELCTRL2 , 0x00 }, { STB0899_BBHCTRL1 , 0x00 }, { STB0899_BBHCTRL2 , 0x00 }, { STB0899_HYSTTHRESH , 0x77 }, { STB0899_MATCSTM , 0x00 }, { STB0899_MATCSTL , 0x00 }, { STB0899_UPLCSTM , 0x00 }, { STB0899_UPLCSTL , 0x00 }, { STB0899_DFLCSTM , 0x00 }, { STB0899_DFLCSTL , 0x00 }, { STB0899_SYNCCST , 0x00 }, { STB0899_SYNCDCSTM , 0x00 }, { STB0899_SYNCDCSTL , 0x00 }, { STB0899_ISI_ENTRY , 0x00 }, { STB0899_ISI_BIT_EN , 0x00 }, { STB0899_MATSTRM , 0x00 }, { STB0899_MATSTRL , 0x00 }, { STB0899_UPLSTRM , 0x00 }, { STB0899_UPLSTRL , 0x00 }, { STB0899_DFLSTRM , 0x00 }, { STB0899_DFLSTRL , 0x00 }, { STB0899_SYNCSTR , 0x00 }, { STB0899_SYNCDSTRM , 0x00 }, { STB0899_SYNCDSTRL , 0x00 }, { STB0899_CFGPDELSTATUS1 , 0x10 }, { STB0899_CFGPDELSTATUS2 , 0x00 }, { STB0899_BBFERRORM , 0x00 }, { STB0899_BBFERRORL , 0x00 }, { STB0899_UPKTERRORM , 0x00 }, { STB0899_UPKTERRORL , 0x00 }, { 0xffff , 0xff }, }; /* STB0899 demodulator config for the KNC1 and clones */ static struct stb0899_config knc1_dvbs2_config = { .init_dev = knc1_stb0899_s1_init_1, .init_s2_demod = stb0899_s2_init_2, .init_s1_demod = knc1_stb0899_s1_init_3, .init_s2_fec = stb0899_s2_init_4, .init_tst = stb0899_s1_init_5, .postproc = NULL, .demod_address = 0x68, // .ts_output_mode = STB0899_OUT_PARALLEL, /* types = SERIAL/PARALLEL */ .block_sync_mode = STB0899_SYNC_FORCED, /* DSS, SYNC_FORCED/UNSYNCED */ // .ts_pfbit_toggle = STB0899_MPEG_NORMAL, /* DirecTV, MPEG toggling seq */ .xtal_freq = 27000000, .inversion = IQ_SWAP_OFF, /* 1 */ .lo_clk = 76500000, .hi_clk = 90000000, .esno_ave = STB0899_DVBS2_ESNO_AVE, .esno_quant = STB0899_DVBS2_ESNO_QUANT, .avframes_coarse = STB0899_DVBS2_AVFRAMES_COARSE, .avframes_fine = STB0899_DVBS2_AVFRAMES_FINE, .miss_threshold = STB0899_DVBS2_MISS_THRESHOLD, .uwp_threshold_acq = STB0899_DVBS2_UWP_THRESHOLD_ACQ, .uwp_threshold_track = STB0899_DVBS2_UWP_THRESHOLD_TRACK, .uwp_threshold_sof = STB0899_DVBS2_UWP_THRESHOLD_SOF, .sof_search_timeout = STB0899_DVBS2_SOF_SEARCH_TIMEOUT, .btr_nco_bits = STB0899_DVBS2_BTR_NCO_BITS, .btr_gain_shift_offset = STB0899_DVBS2_BTR_GAIN_SHIFT_OFFSET, .crl_nco_bits = STB0899_DVBS2_CRL_NCO_BITS, .ldpc_max_iter = STB0899_DVBS2_LDPC_MAX_ITER, .tuner_get_frequency = tda8261_get_frequency, .tuner_set_frequency = tda8261_set_frequency, .tuner_set_bandwidth = NULL, .tuner_get_bandwidth = tda8261_get_bandwidth, .tuner_set_rfsiggain = NULL }; /* * SD1878/SHA tuner config * 1F, Single I/P, Horizontal mount, High Sensitivity */ static const struct tda8261_config sd1878c_config = { // .name = "SD1878/SHA", .addr = 0x60, .step_size = TDA8261_STEP_1000 /* kHz */ }; static u8 read_pwm(struct budget_av *budget_av) { u8 b = 0xff; u8 pwm; struct i2c_msg msg[] = { {.addr = 0x50,.flags = 0,.buf = &b,.len = 1}, {.addr = 0x50,.flags = I2C_M_RD,.buf = &pwm,.len = 1} }; if ((i2c_transfer(&budget_av->budget.i2c_adap, msg, 2) != 2) || (pwm == 0xff)) pwm = 0x48; return pwm; } #define SUBID_DVBS_KNC1 0x0010 #define SUBID_DVBS_KNC1_PLUS 0x0011 #define SUBID_DVBS_TYPHOON 0x4f56 #define SUBID_DVBS_CINERGY1200 0x1154 #define SUBID_DVBS_CYNERGY1200N 0x1155 #define SUBID_DVBS_TV_STAR 0x0014 #define SUBID_DVBS_TV_STAR_PLUS_X4 0x0015 #define SUBID_DVBS_TV_STAR_CI 0x0016 #define SUBID_DVBS2_KNC1 0x0018 #define SUBID_DVBS2_KNC1_OEM 0x0019 #define SUBID_DVBS_EASYWATCH_1 0x001a #define SUBID_DVBS_EASYWATCH_2 0x001b #define SUBID_DVBS2_EASYWATCH 0x001d #define SUBID_DVBS_EASYWATCH 0x001e #define SUBID_DVBC_EASYWATCH 0x002a #define SUBID_DVBC_EASYWATCH_MK3 0x002c #define SUBID_DVBC_KNC1 0x0020 #define SUBID_DVBC_KNC1_PLUS 0x0021 #define SUBID_DVBC_KNC1_MK3 0x0022 #define SUBID_DVBC_KNC1_PLUS_MK3 0x0023 #define SUBID_DVBC_CINERGY1200 0x1156 #define SUBID_DVBC_CINERGY1200_MK3 0x1176 #define SUBID_DVBT_EASYWATCH 0x003a #define SUBID_DVBT_KNC1_PLUS 0x0031 #define SUBID_DVBT_KNC1 0x0030 #define SUBID_DVBT_CINERGY1200 0x1157 static void frontend_init(struct budget_av *budget_av) { struct saa7146_dev * saa = budget_av->budget.dev; struct dvb_frontend * fe = NULL; /* Enable / PowerON Frontend */ saa7146_setgpio(saa, 0, SAA7146_GPIO_OUTLO); /* Wait for PowerON */ msleep(100); /* additional setup necessary for the PLUS cards */ switch (saa->pci->subsystem_device) { case SUBID_DVBS_KNC1_PLUS: case SUBID_DVBC_KNC1_PLUS: case SUBID_DVBT_KNC1_PLUS: case SUBID_DVBC_EASYWATCH: case SUBID_DVBC_KNC1_PLUS_MK3: case SUBID_DVBS2_KNC1: case SUBID_DVBS2_KNC1_OEM: case SUBID_DVBS2_EASYWATCH: saa7146_setgpio(saa, 3, SAA7146_GPIO_OUTHI); break; } switch (saa->pci->subsystem_device) { case SUBID_DVBS_KNC1: /* * maybe that setting is needed for other dvb-s cards as well, * but so far it has been only confirmed for this type */ budget_av->reinitialise_demod = 1; /* fall through */ case SUBID_DVBS_KNC1_PLUS: case SUBID_DVBS_EASYWATCH_1: if (saa->pci->subsystem_vendor == 0x1894) { fe = dvb_attach(stv0299_attach, &cinergy_1200s_1894_0010_config, &budget_av->budget.i2c_adap); if (fe) { dvb_attach(tua6100_attach, fe, 0x60, &budget_av->budget.i2c_adap); } } else { fe = dvb_attach(stv0299_attach, &typhoon_config, &budget_av->budget.i2c_adap); if (fe) { fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params; } } break; case SUBID_DVBS_TV_STAR: case SUBID_DVBS_TV_STAR_PLUS_X4: case SUBID_DVBS_TV_STAR_CI: case SUBID_DVBS_CYNERGY1200N: case SUBID_DVBS_EASYWATCH: case SUBID_DVBS_EASYWATCH_2: fe = dvb_attach(stv0299_attach, &philips_sd1878_config, &budget_av->budget.i2c_adap); if (fe) { dvb_attach(dvb_pll_attach, fe, 0x60, &budget_av->budget.i2c_adap, DVB_PLL_PHILIPS_SD1878_TDA8261); } break; case SUBID_DVBS_TYPHOON: fe = dvb_attach(stv0299_attach, &typhoon_config, &budget_av->budget.i2c_adap); if (fe) { fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params; } break; case SUBID_DVBS2_KNC1: case SUBID_DVBS2_KNC1_OEM: case SUBID_DVBS2_EASYWATCH: budget_av->reinitialise_demod = 1; if ((fe = dvb_attach(stb0899_attach, &knc1_dvbs2_config, &budget_av->budget.i2c_adap))) dvb_attach(tda8261_attach, fe, &sd1878c_config, &budget_av->budget.i2c_adap); break; case SUBID_DVBS_CINERGY1200: fe = dvb_attach(stv0299_attach, &cinergy_1200s_config, &budget_av->budget.i2c_adap); if (fe) { fe->ops.tuner_ops.set_params = philips_su1278_ty_ci_tuner_set_params; } break; case SUBID_DVBC_KNC1: case SUBID_DVBC_KNC1_PLUS: case SUBID_DVBC_CINERGY1200: case SUBID_DVBC_EASYWATCH: budget_av->reinitialise_demod = 1; budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240; fe = dvb_attach(tda10021_attach, &philips_cu1216_config, &budget_av->budget.i2c_adap, read_pwm(budget_av)); if (fe == NULL) fe = dvb_attach(tda10021_attach, &philips_cu1216_config_altaddress, &budget_av->budget.i2c_adap, read_pwm(budget_av)); if (fe) { fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params; } break; case SUBID_DVBC_EASYWATCH_MK3: case SUBID_DVBC_CINERGY1200_MK3: case SUBID_DVBC_KNC1_MK3: case SUBID_DVBC_KNC1_PLUS_MK3: budget_av->reinitialise_demod = 1; budget_av->budget.dev->i2c_bitrate = SAA7146_I2C_BUS_BIT_RATE_240; fe = dvb_attach(tda10023_attach, &philips_cu1216_tda10023_config, &budget_av->budget.i2c_adap, read_pwm(budget_av)); if (fe) { fe->ops.tuner_ops.set_params = philips_cu1216_tuner_set_params; } break; case SUBID_DVBT_EASYWATCH: case SUBID_DVBT_KNC1: case SUBID_DVBT_KNC1_PLUS: case SUBID_DVBT_CINERGY1200: budget_av->reinitialise_demod = 1; fe = dvb_attach(tda10046_attach, &philips_tu1216_config, &budget_av->budget.i2c_adap); if (fe) { fe->ops.tuner_ops.init = philips_tu1216_tuner_init; fe->ops.tuner_ops.set_params = philips_tu1216_tuner_set_params; } break; } if (fe == NULL) { printk(KERN_ERR "budget-av: A frontend driver was not found " "for device [%04x:%04x] subsystem [%04x:%04x]\n", saa->pci->vendor, saa->pci->device, saa->pci->subsystem_vendor, saa->pci->subsystem_device); return; } budget_av->budget.dvb_frontend = fe; if (dvb_register_frontend(&budget_av->budget.dvb_adapter, budget_av->budget.dvb_frontend)) { printk(KERN_ERR "budget-av: Frontend registration failed!\n"); dvb_frontend_detach(budget_av->budget.dvb_frontend); budget_av->budget.dvb_frontend = NULL; } } static void budget_av_irq(struct saa7146_dev *dev, u32 * isr) { struct budget_av *budget_av = (struct budget_av *) dev->ext_priv; dprintk(8, "dev: %p, budget_av: %p\n", dev, budget_av); if (*isr & MASK_10) ttpci_budget_irq10_handler(dev, isr); } static int budget_av_detach(struct saa7146_dev *dev) { struct budget_av *budget_av = (struct budget_av *) dev->ext_priv; int err; dprintk(2, "dev: %p\n", dev); if (1 == budget_av->has_saa7113) { saa7146_setgpio(dev, 0, SAA7146_GPIO_OUTLO); msleep(200); saa7146_unregister_device(&budget_av->vd, dev); saa7146_vv_release(dev); } if (budget_av->budget.ci_present) ciintf_deinit(budget_av); if (budget_av->budget.dvb_frontend != NULL) { dvb_unregister_frontend(budget_av->budget.dvb_frontend); dvb_frontend_detach(budget_av->budget.dvb_frontend); } err = ttpci_budget_deinit(&budget_av->budget); kfree(budget_av); return err; } #define KNC1_INPUTS 2 static struct v4l2_input knc1_inputs[KNC1_INPUTS] = { { 0, "Composite", V4L2_INPUT_TYPE_TUNER, 1, 0, V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, { 1, "S-Video", V4L2_INPUT_TYPE_CAMERA, 2, 0, V4L2_STD_PAL_BG | V4L2_STD_NTSC_M, 0, V4L2_IN_CAP_STD }, }; static int vidioc_enum_input(struct file *file, void *fh, struct v4l2_input *i) { dprintk(1, "VIDIOC_ENUMINPUT %d.\n", i->index); if (i->index >= KNC1_INPUTS) return -EINVAL; memcpy(i, &knc1_inputs[i->index], sizeof(struct v4l2_input)); return 0; } static int vidioc_g_input(struct file *file, void *fh, unsigned int *i) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct budget_av *budget_av = (struct budget_av *)dev->ext_priv; *i = budget_av->cur_input; dprintk(1, "VIDIOC_G_INPUT %d.\n", *i); return 0; } static int vidioc_s_input(struct file *file, void *fh, unsigned int input) { struct saa7146_dev *dev = ((struct saa7146_fh *)fh)->dev; struct budget_av *budget_av = (struct budget_av *)dev->ext_priv; dprintk(1, "VIDIOC_S_INPUT %d.\n", input); return saa7113_setinput(budget_av, input); } static struct saa7146_ext_vv vv_data; static int budget_av_attach(struct saa7146_dev *dev, struct saa7146_pci_extension_data *info) { struct budget_av *budget_av; u8 *mac; int err; dprintk(2, "dev: %p\n", dev); if (!(budget_av = kzalloc(sizeof(struct budget_av), GFP_KERNEL))) return -ENOMEM; budget_av->has_saa7113 = 0; budget_av->budget.ci_present = 0; dev->ext_priv = budget_av; err = ttpci_budget_init(&budget_av->budget, dev, info, THIS_MODULE, adapter_nr); if (err) { kfree(budget_av); return err; } /* knc1 initialization */ saa7146_write(dev, DD1_STREAM_B, 0x04000000); saa7146_write(dev, DD1_INIT, 0x07000600); saa7146_write(dev, MC2, MASK_09 | MASK_25 | MASK_10 | MASK_26); if (saa7113_init(budget_av) == 0) { budget_av->has_saa7113 = 1; if (0 != saa7146_vv_init(dev, &vv_data)) { /* fixme: proper cleanup here */ ERR(("cannot init vv subsystem.\n")); return err; } vv_data.ops.vidioc_enum_input = vidioc_enum_input; vv_data.ops.vidioc_g_input = vidioc_g_input; vv_data.ops.vidioc_s_input = vidioc_s_input; if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) { /* fixme: proper cleanup here */ ERR(("cannot register capture v4l2 device.\n")); saa7146_vv_release(dev); return err; } /* beware: this modifies dev->vv ... */ saa7146_set_hps_source_and_sync(dev, SAA7146_HPS_SOURCE_PORT_A, SAA7146_HPS_SYNC_PORT_A); saa7113_setinput(budget_av, 0); } /* fixme: find some sane values here... */ saa7146_write(dev, PCI_BT_V1, 0x1c00101f); mac = budget_av->budget.dvb_adapter.proposed_mac; if (i2c_readregs(&budget_av->budget.i2c_adap, 0xa0, 0x30, mac, 6)) { printk(KERN_ERR "KNC1-%d: Could not read MAC from KNC1 card\n", budget_av->budget.dvb_adapter.num); memset(mac, 0, 6); } else { printk(KERN_INFO "KNC1-%d: MAC addr = %.2x:%.2x:%.2x:%.2x:%.2x:%.2x\n", budget_av->budget.dvb_adapter.num, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]); } budget_av->budget.dvb_adapter.priv = budget_av; frontend_init(budget_av); ciintf_init(budget_av); ttpci_budget_init_hooks(&budget_av->budget); return 0; } static struct saa7146_standard standard[] = { {.name = "PAL",.id = V4L2_STD_PAL, .v_offset = 0x17,.v_field = 288, .h_offset = 0x14,.h_pixels = 680, .v_max_out = 576,.h_max_out = 768 }, {.name = "NTSC",.id = V4L2_STD_NTSC, .v_offset = 0x16,.v_field = 240, .h_offset = 0x06,.h_pixels = 708, .v_max_out = 480,.h_max_out = 640, }, }; static struct saa7146_ext_vv vv_data = { .inputs = 2, .capabilities = 0, // perhaps later: V4L2_CAP_VBI_CAPTURE, but that need tweaking with the saa7113 .flags = 0, .stds = &standard[0], .num_stds = ARRAY_SIZE(standard), }; static struct saa7146_extension budget_extension; MAKE_BUDGET_INFO(knc1s, "KNC1 DVB-S", BUDGET_KNC1S); MAKE_BUDGET_INFO(knc1s2,"KNC1 DVB-S2", BUDGET_KNC1S2); MAKE_BUDGET_INFO(sates2,"Satelco EasyWatch DVB-S2", BUDGET_KNC1S2); MAKE_BUDGET_INFO(knc1c, "KNC1 DVB-C", BUDGET_KNC1C); MAKE_BUDGET_INFO(knc1t, "KNC1 DVB-T", BUDGET_KNC1T); MAKE_BUDGET_INFO(kncxs, "KNC TV STAR DVB-S", BUDGET_TVSTAR); MAKE_BUDGET_INFO(satewpls, "Satelco EasyWatch DVB-S light", BUDGET_TVSTAR); MAKE_BUDGET_INFO(satewpls1, "Satelco EasyWatch DVB-S light", BUDGET_KNC1S); MAKE_BUDGET_INFO(satewps, "Satelco EasyWatch DVB-S", BUDGET_KNC1S); MAKE_BUDGET_INFO(satewplc, "Satelco EasyWatch DVB-C", BUDGET_KNC1CP); MAKE_BUDGET_INFO(satewcmk3, "Satelco EasyWatch DVB-C MK3", BUDGET_KNC1C_MK3); MAKE_BUDGET_INFO(satewt, "Satelco EasyWatch DVB-T", BUDGET_KNC1T); MAKE_BUDGET_INFO(knc1sp, "KNC1 DVB-S Plus", BUDGET_KNC1SP); MAKE_BUDGET_INFO(knc1spx4, "KNC1 DVB-S Plus X4", BUDGET_KNC1SP); MAKE_BUDGET_INFO(knc1cp, "KNC1 DVB-C Plus", BUDGET_KNC1CP); MAKE_BUDGET_INFO(knc1cmk3, "KNC1 DVB-C MK3", BUDGET_KNC1C_MK3); MAKE_BUDGET_INFO(knc1cpmk3, "KNC1 DVB-C Plus MK3", BUDGET_KNC1CP_MK3); MAKE_BUDGET_INFO(knc1tp, "KNC1 DVB-T Plus", BUDGET_KNC1TP); MAKE_BUDGET_INFO(cin1200s, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S); MAKE_BUDGET_INFO(cin1200sn, "TerraTec Cinergy 1200 DVB-S", BUDGET_CIN1200S); MAKE_BUDGET_INFO(cin1200c, "Terratec Cinergy 1200 DVB-C", BUDGET_CIN1200C); MAKE_BUDGET_INFO(cin1200cmk3, "Terratec Cinergy 1200 DVB-C MK3", BUDGET_CIN1200C_MK3); MAKE_BUDGET_INFO(cin1200t, "Terratec Cinergy 1200 DVB-T", BUDGET_CIN1200T); static struct pci_device_id pci_tbl[] = { MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x4f56), MAKE_EXTENSION_PCI(knc1s, 0x1131, 0x0010), MAKE_EXTENSION_PCI(knc1s, 0x1894, 0x0010), MAKE_EXTENSION_PCI(knc1sp, 0x1131, 0x0011), MAKE_EXTENSION_PCI(knc1sp, 0x1894, 0x0011), MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0014), MAKE_EXTENSION_PCI(knc1spx4, 0x1894, 0x0015), MAKE_EXTENSION_PCI(kncxs, 0x1894, 0x0016), MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0018), MAKE_EXTENSION_PCI(knc1s2, 0x1894, 0x0019), MAKE_EXTENSION_PCI(sates2, 0x1894, 0x001d), MAKE_EXTENSION_PCI(satewpls, 0x1894, 0x001e), MAKE_EXTENSION_PCI(satewpls1, 0x1894, 0x001a), MAKE_EXTENSION_PCI(satewps, 0x1894, 0x001b), MAKE_EXTENSION_PCI(satewplc, 0x1894, 0x002a), MAKE_EXTENSION_PCI(satewcmk3, 0x1894, 0x002c), MAKE_EXTENSION_PCI(satewt, 0x1894, 0x003a), MAKE_EXTENSION_PCI(knc1c, 0x1894, 0x0020), MAKE_EXTENSION_PCI(knc1cp, 0x1894, 0x0021), MAKE_EXTENSION_PCI(knc1cmk3, 0x1894, 0x0022), MAKE_EXTENSION_PCI(knc1cpmk3, 0x1894, 0x0023), MAKE_EXTENSION_PCI(knc1t, 0x1894, 0x0030), MAKE_EXTENSION_PCI(knc1tp, 0x1894, 0x0031), MAKE_EXTENSION_PCI(cin1200s, 0x153b, 0x1154), MAKE_EXTENSION_PCI(cin1200sn, 0x153b, 0x1155), MAKE_EXTENSION_PCI(cin1200c, 0x153b, 0x1156), MAKE_EXTENSION_PCI(cin1200cmk3, 0x153b, 0x1176), MAKE_EXTENSION_PCI(cin1200t, 0x153b, 0x1157), { .vendor = 0, } }; MODULE_DEVICE_TABLE(pci, pci_tbl); static struct saa7146_extension budget_extension = { .name = "budget_av", .flags = SAA7146_USE_I2C_IRQ, .pci_tbl = pci_tbl, .module = THIS_MODULE, .attach = budget_av_attach, .detach = budget_av_detach, .irq_mask = MASK_10, .irq_func = budget_av_irq, }; static int __init budget_av_init(void) { return saa7146_register_extension(&budget_extension); } static void __exit budget_av_exit(void) { saa7146_unregister_extension(&budget_extension); } module_init(budget_av_init); module_exit(budget_av_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ralph Metzler, Marcus Metzler, Michael Hunold, others"); MODULE_DESCRIPTION("driver for the SAA7146 based so-called " "budget PCI DVB w/ analog input and CI-module (e.g. the KNC cards)");
gpl-2.0
eoghan2t9/android_kernel_oppo_n1_test
arch/arm/mach-msm/board-halibut.c
4625
2420
/* linux/arch/arm/mach-msm/board-halibut.c * * Copyright (C) 2007 Google, Inc. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/io.h> #include <linux/delay.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/flash.h> #include <asm/setup.h> #include <mach/irqs.h> #include <mach/board.h> #include <mach/msm_iomap.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include "devices.h" static struct resource smc91x_resources[] = { [0] = { .start = 0x9C004300, .end = 0x9C004400, .flags = IORESOURCE_MEM, }, [1] = { .start = MSM_GPIO_TO_INT(49), .end = MSM_GPIO_TO_INT(49), .flags = IORESOURCE_IRQ, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, }; static struct platform_device *devices[] __initdata = { &msm_device_uart3, &msm_device_smd, &msm_device_nand, &msm_device_hsusb, &msm_device_i2c, &smc91x_device, }; extern struct sys_timer msm_timer; static void __init halibut_init_early(void) { arch_ioremap_caller = __msm_ioremap_caller; } static void __init halibut_init_irq(void) { msm_init_irq(); } static void __init halibut_init(void) { platform_add_devices(devices, ARRAY_SIZE(devices)); } static void __init halibut_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) { } static void __init halibut_map_io(void) { msm_map_common_io(); msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a); } MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") .atag_offset = 0x100, .fixup = halibut_fixup, .map_io = halibut_map_io, .init_early = halibut_init_early, .init_irq = halibut_init_irq, .init_machine = halibut_init, .timer = &msm_timer, MACHINE_END
gpl-2.0
garwynn/L900_3.8_Experiment
arch/sh/drivers/pci/fixups-snapgear.c
4625
1100
/* * arch/sh/drivers/pci/ops-snapgear.c * * Author: David McCullough <davidm@snapgear.com> * * Ported to new API by Paul Mundt <lethal@linux-sh.org> * * Highly leveraged from pci-bigsur.c, written by Dustin McIntire. * * May be copied or modified under the terms of the GNU General Public * License. See linux/COPYING for more information. * * PCI initialization for the SnapGear boards */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/sh_intc.h> #include "pci-sh4.h" int __init pcibios_map_platform_irq(const struct pci_dev *pdev, u8 slot, u8 pin) { int irq = -1; switch (slot) { case 8: /* the PCI bridge */ break; case 11: irq = evt2irq(0x300); break; /* USB */ case 12: irq = evt2irq(0x360); break; /* PCMCIA */ case 13: irq = evt2irq(0x2a0); break; /* eth0 */ case 14: irq = evt2irq(0x300); break; /* eth1 */ case 15: irq = evt2irq(0x360); break; /* safenet (unused) */ } printk("PCI: Mapping SnapGear IRQ for slot %d, pin %c to irq %d\n", slot, pin - 1 + 'A', irq); return irq; }
gpl-2.0
Sudokamikaze/XKernel-taoshan
drivers/gpu/drm/drm_lock.c
4881
10696
/** * \file drm_lock.c * IOCTLs for locking * * \author Rickard E. (Rik) Faith <faith@valinux.com> * \author Gareth Hughes <gareth@valinux.com> */ /* * Created: Tue Feb 2 08:37:54 1999 by faith@valinux.com * * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas. * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #include <linux/export.h> #include "drmP.h" static int drm_notifier(void *priv); static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context); /** * Lock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Add the current task to the lock wait queue, and attempt to take to lock. */ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv) { DECLARE_WAITQUEUE(entry, current); struct drm_lock *lock = data; struct drm_master *master = file_priv->master; int ret = 0; ++file_priv->lock_count; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } DRM_DEBUG("%d (pid %d) requests lock (0x%08x), flags = 0x%08x\n", lock->context, task_pid_nr(current), master->lock.hw_lock->lock, lock->flags); if (drm_core_check_feature(dev, DRIVER_DMA_QUEUE)) if (lock->context < 0) return -EINVAL; add_wait_queue(&master->lock.lock_queue, &entry); spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters++; spin_unlock_bh(&master->lock.spinlock); for (;;) { __set_current_state(TASK_INTERRUPTIBLE); if (!master->lock.hw_lock) { /* Device has been unregistered */ send_sig(SIGTERM, current, 0); ret = -EINTR; break; } if (drm_lock_take(&master->lock, lock->context)) { master->lock.file_priv = file_priv; master->lock.lock_time = jiffies; atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); break; /* Got lock */ } /* Contention */ mutex_unlock(&drm_global_mutex); schedule(); mutex_lock(&drm_global_mutex); if (signal_pending(current)) { ret = -EINTR; break; } } spin_lock_bh(&master->lock.spinlock); master->lock.user_waiters--; spin_unlock_bh(&master->lock.spinlock); __set_current_state(TASK_RUNNING); remove_wait_queue(&master->lock.lock_queue, &entry); DRM_DEBUG("%d %s\n", lock->context, ret ? "interrupted" : "has lock"); if (ret) return ret; /* don't set the block all signals on the master process for now * really probably not the correct answer but lets us debug xkb * xserver for now */ if (!file_priv->is_master) { sigemptyset(&dev->sigmask); sigaddset(&dev->sigmask, SIGSTOP); sigaddset(&dev->sigmask, SIGTSTP); sigaddset(&dev->sigmask, SIGTTIN); sigaddset(&dev->sigmask, SIGTTOU); dev->sigdata.context = lock->context; dev->sigdata.lock = master->lock.hw_lock; block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask); } if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT)) { if (dev->driver->dma_quiescent(dev)) { DRM_DEBUG("%d waiting for DMA quiescent\n", lock->context); return -EBUSY; } } return 0; } /** * Unlock ioctl. * * \param inode device inode. * \param file_priv DRM file private. * \param cmd command. * \param arg user argument, pointing to a drm_lock structure. * \return zero on success or negative number on failure. * * Transfer and free the lock. */ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_lock *lock = data; struct drm_master *master = file_priv->master; if (lock->context == DRM_KERNEL_CONTEXT) { DRM_ERROR("Process %d using kernel context %d\n", task_pid_nr(current), lock->context); return -EINVAL; } atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); if (drm_lock_free(&master->lock, lock->context)) { /* FIXME: Should really bail out here. */ } unblock_all_signals(); return 0; } /** * Take the heavyweight lock. * * \param lock lock pointer. * \param context locking context. * \return one if the lock is held, or zero otherwise. * * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); do { old = *lock; if (old & _DRM_LOCK_HELD) new = old | _DRM_LOCK_CONT; else { new = context | _DRM_LOCK_HELD | ((lock_data->user_waiters + lock_data->kernel_waiters > 1) ? _DRM_LOCK_CONT : 0); } prev = cmpxchg(lock, old, new); } while (prev != old); spin_unlock_bh(&lock_data->spinlock); if (_DRM_LOCKING_CONTEXT(old) == context) { if (old & _DRM_LOCK_HELD) { if (context != DRM_KERNEL_CONTEXT) { DRM_ERROR("%d holds heavyweight lock\n", context); } return 0; } } if ((_DRM_LOCKING_CONTEXT(new)) == context && (new & _DRM_LOCK_HELD)) { /* Have lock */ return 1; } return 0; } /** * This takes a lock forcibly and hands it to context. Should ONLY be used * inside *_unlock to give lock to kernel before calling *_dma_schedule. * * \param dev DRM device. * \param lock lock pointer. * \param context locking context. * \return always one. * * Resets the lock file pointer. * Marks the lock as held by the given context, via the \p cmpxchg instruction. */ static int drm_lock_transfer(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; lock_data->file_priv = NULL; do { old = *lock; new = context | _DRM_LOCK_HELD; prev = cmpxchg(lock, old, new); } while (prev != old); return 1; } /** * Free lock. * * \param dev DRM device. * \param lock lock. * \param context context. * * Resets the lock file pointer. * Marks the lock as not held, via the \p cmpxchg instruction. Wakes any task * waiting on the lock queue. */ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context) { unsigned int old, new, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (lock_data->kernel_waiters != 0) { drm_lock_transfer(lock_data, 0); lock_data->idle_has_lock = 1; spin_unlock_bh(&lock_data->spinlock); return 1; } spin_unlock_bh(&lock_data->spinlock); do { old = *lock; new = _DRM_LOCKING_CONTEXT(old); prev = cmpxchg(lock, old, new); } while (prev != old); if (_DRM_LOCK_IS_HELD(old) && _DRM_LOCKING_CONTEXT(old) != context) { DRM_ERROR("%d freed heavyweight lock held by %d\n", context, _DRM_LOCKING_CONTEXT(old)); return 1; } wake_up_interruptible(&lock_data->lock_queue); return 0; } /** * If we get here, it means that the process has called DRM_IOCTL_LOCK * without calling DRM_IOCTL_UNLOCK. * * If the lock is not held, then let the signal proceed as usual. If the lock * is held, then set the contended flag and keep the signal blocked. * * \param priv pointer to a drm_sigdata structure. * \return one if the signal should be delivered normally, or zero if the * signal should be blocked. */ static int drm_notifier(void *priv) { struct drm_sigdata *s = (struct drm_sigdata *) priv; unsigned int old, new, prev; /* Allow signal delivery if lock isn't held */ if (!s->lock || !_DRM_LOCK_IS_HELD(s->lock->lock) || _DRM_LOCKING_CONTEXT(s->lock->lock) != s->context) return 1; /* Otherwise, set flag to force call to drmUnlock */ do { old = s->lock->lock; new = old | _DRM_LOCK_CONT; prev = cmpxchg(&s->lock->lock, old, new); } while (prev != old); return 0; } /** * This function returns immediately and takes the hw lock * with the kernel context if it is free, otherwise it gets the highest priority when and if * it is eventually released. * * This guarantees that the kernel will _eventually_ have the lock _unless_ it is held * by a blocked process. (In the latter case an explicit wait for the hardware lock would cause * a deadlock, which is why the "idlelock" was invented). * * This should be sufficient to wait for GPU idle without * having to worry about starvation. */ void drm_idlelock_take(struct drm_lock_data *lock_data) { int ret = 0; spin_lock_bh(&lock_data->spinlock); lock_data->kernel_waiters++; if (!lock_data->idle_has_lock) { spin_unlock_bh(&lock_data->spinlock); ret = drm_lock_take(lock_data, DRM_KERNEL_CONTEXT); spin_lock_bh(&lock_data->spinlock); if (ret == 1) lock_data->idle_has_lock = 1; } spin_unlock_bh(&lock_data->spinlock); } EXPORT_SYMBOL(drm_idlelock_take); void drm_idlelock_release(struct drm_lock_data *lock_data) { unsigned int old, prev; volatile unsigned int *lock = &lock_data->hw_lock->lock; spin_lock_bh(&lock_data->spinlock); if (--lock_data->kernel_waiters == 0) { if (lock_data->idle_has_lock) { do { old = *lock; prev = cmpxchg(lock, old, DRM_KERNEL_CONTEXT); } while (prev != old); wake_up_interruptible(&lock_data->lock_queue); lock_data->idle_has_lock = 0; } } spin_unlock_bh(&lock_data->spinlock); } EXPORT_SYMBOL(drm_idlelock_release); int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv) { struct drm_master *master = file_priv->master; return (file_priv->lock_count && master->lock.hw_lock && _DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) && master->lock.file_priv == file_priv); }
gpl-2.0